From 84b80de3eb5a9882c9a05dd1cf3f271e58b1d2cf Mon Sep 17 00:00:00 2001 From: Ashish Agrawal Date: Mon, 18 Apr 2022 14:24:54 -0700 Subject: [PATCH] Integrate Document Level Alerting changes (#410) * Rebase to push doc level changes on latest main changes (#391) * Document level alerting dev (#272) Signed-off-by: Ashish Agrawal * Add last run context to Monitor data model Signed-off-by: Ashish Agrawal * add Update Monitor function Signed-off-by: Ashish Agrawal * fix integ test Signed-off-by: Ashish Agrawal * Implemented draft of Finding data model, a new Input type, and some basic unit tests. (#260) * Implemented draft of Finding data model, and some basic unit tests for it. Signed-off-by: AWSHurneyt <79280347+AWSHurneyt@users.noreply.github.com> * POC for doc-level-alerting (#277) Signed-off-by: Sriram <59816283+skkosuri-amzn@users.noreply.github.com> * Add connection to triggers for doc level alerting (#316) Signed-off-by: Ashish Agrawal * CRUD APIs integration Tests and validation"conflict resolved" (#362) Signed-off-by: charliezhangaws * Segregate monitor runner logic for separation of concerns (#363) * Refactor monitor runner logic for separation of concerns and better testability. Signed-off-by: Saurabh Singh * Add action and alert flow and findings schema and additional fixes (#381) Signed-off-by: Ashish Agrawal * Finding Search API (#385) * Findings search API based on Annie's work Signed-off-by: Annie Lee * Fix Search API and add IT tests Signed-off-by: Ashish Agrawal Co-authored-by: Annie Lee * Fix integ tests and minor issues from doc level changes Signed-off-by: Ashish Agrawal Co-authored-by: Annie Lee <71157062+leeyun-amzn@users.noreply.github.com> Co-authored-by: Daniel Doubrovkine (dB.) Co-authored-by: AWSHurneyt <79280347+AWSHurneyt@users.noreply.github.com> Co-authored-by: Sriram <59816283+skkosuri-amzn@users.noreply.github.com> Co-authored-by: charliezhangaws Co-authored-by: Saurabh Singh Co-authored-by: Annie Lee * Add Trigger condition resolver which parses and evaluates the Trigger expression. (#405) Signed-off-by: Saurabh Singh * percolate query implementation in doc-level alerting (#399) Signed-off-by: Subhobrata Dey * Finding Index rollover (#408) * Finding Index rollover Signed-off-by: jiahe zhang * Apply fixes to make rollover work Signed-off-by: Ashish Agrawal Co-authored-by: jiahe zhang Co-authored-by: Annie Lee <71157062+leeyun-amzn@users.noreply.github.com> Co-authored-by: Daniel Doubrovkine (dB.) Co-authored-by: AWSHurneyt <79280347+AWSHurneyt@users.noreply.github.com> Co-authored-by: Sriram <59816283+skkosuri-amzn@users.noreply.github.com> Co-authored-by: charliezhangaws Co-authored-by: Saurabh Singh Co-authored-by: Annie Lee Co-authored-by: Saurabh Singh Co-authored-by: Subhobrata Dey --- alerting/build.gradle | 1 + .../percolator/PercolateQueryBuilderExt.java | 671 +++++++++++++++++ .../percolator/PercolatorFieldMapperExt.java | 584 +++++++++++++++ .../percolator/PercolatorPluginExt.java | 79 ++ .../org/opensearch/alerting/AlertService.kt | 29 +- .../org/opensearch/alerting/AlertingPlugin.kt | 37 +- .../alerting/BucketLevelMonitorRunner.kt | 333 +++++++++ .../DocumentReturningMonitorRunner.kt | 460 ++++++++++++ .../org/opensearch/alerting/MonitorRunner.kt | 687 ++---------------- .../alerting/MonitorRunnerExecutionContext.kt | 44 ++ .../alerting/MonitorRunnerService.kt | 263 +++++++ .../alerting/QueryLevelMonitorRunner.kt | 88 +++ .../org/opensearch/alerting/TriggerService.kt | 34 +- .../alerting/action/GetFindingsAction.kt | 15 + .../alerting/action/GetFindingsRequest.kt | 42 ++ .../alerting/action/GetFindingsResponse.kt | 63 ++ .../alerting/alerts/AlertIndices.kt | 279 +++++-- .../opensearch/alerting/alerts/AlertMover.kt | 6 +- .../org/opensearch/alerting/model/Alert.kt | 53 +- .../alerting/model/AlertingConfigAccessor.kt | 12 + .../model/DocumentExecutionContext.kt | 14 + .../alerting/model/DocumentLevelTrigger.kt | 160 ++++ .../model/DocumentLevelTriggerRunResult.kt | 66 ++ .../org/opensearch/alerting/model/Finding.kt | 146 ++++ .../alerting/model/FindingDocument.kt | 91 +++ .../alerting/model/FindingWithDocs.kt | 85 +++ .../org/opensearch/alerting/model/Monitor.kt | 30 +- .../org/opensearch/alerting/model/Trigger.kt | 2 + .../resthandler/RestGetFindingsAction.kt | 67 ++ .../resthandler/RestIndexMonitorAction.kt | 28 + .../resthandler/RestSearchMonitorAction.kt | 4 +- .../DocumentLevelTriggerExecutionContext.kt | 44 ++ .../alerting/settings/AlertingSettings.kt | 35 + .../LegacyOpenDistroAlertingSettings.kt | 6 + .../TransportAcknowledgeAlertAction.kt | 101 ++- .../transport/TransportDeleteMonitorAction.kt | 23 + .../TransportExecuteMonitorAction.kt | 80 +- .../transport/TransportGetAlertsAction.kt | 2 +- .../transport/TransportGetFindingsAction.kt | 169 +++++ .../transport/TransportIndexMonitorAction.kt | 97 +++ .../parsers/ExpressionParser.kt | 12 + .../parsers/TriggerExpressionParser.kt | 53 ++ .../parsers/TriggerExpressionRPNBaseParser.kt | 114 +++ .../resolvers/TriggerExpression.kt | 32 + .../resolvers/TriggerExpressionRPNResolver.kt | 103 +++ .../resolvers/TriggerExpressionResolver.kt | 12 + .../tokens/ExpressionToken.kt | 8 + .../tokens/TriggerExpressionConstant.kt | 26 + .../tokens/TriggerExpressionOperator.kt | 20 + .../tokens/TriggerExpressionToken.kt | 11 + .../opensearch/alerting/util/AlertingUtils.kt | 33 + .../alerting/util/DocLevelMonitorQueries.kt | 115 +++ .../opensearch/alerting/util/IndexUtils.kt | 13 +- .../alerting/alerts/alert_mapping.json | 16 + .../alerting/alerts/finding_mapping.json | 56 ++ .../alerting/org.opensearch.alerting.txt | 17 + .../org/opensearch/alerting/ADTestHelpers.kt | 2 +- .../org/opensearch/alerting/AccessRoles.kt | 5 + .../opensearch/alerting/AlertServiceTests.kt | 5 + .../alerting/AlertingRestTestCase.kt | 144 +++- .../alerting/DocumentMonitorRunnerIT.kt | 137 ++++ ...rRunnerIT.kt => MonitorRunnerServiceIT.kt} | 14 +- .../org/opensearch/alerting/TestHelpers.kt | 131 +++- .../action/AcknowledgeAlertResponseTests.kt | 4 +- .../alerting/action/GetAlertsResponseTests.kt | 8 +- .../action/GetMonitorResponseTests.kt | 1 + .../action/IndexMonitorResponseTests.kt | 1 + .../alerting/alerts/AlertIndicesIT.kt | 185 ++++- .../model/DocLevelMonitorInputTests.kt | 67 ++ .../opensearch/alerting/model/FindingTests.kt | 39 + .../alerting/model/WriteableTests.kt | 30 + .../alerting/resthandler/FindingsRestApiIT.kt | 143 ++++ .../alerting/resthandler/MonitorRestApiIT.kt | 127 +++- .../SecureEmailAccountRestApiIT.kt | 5 + .../resthandler/SecureEmailGroupsRestApiIT.kt | 5 + .../settings/AlertingSettingsTests.kt | 5 + .../settings/DestinationSettingsTests.kt | 5 + .../TriggerExpressionParserTests.kt | 76 ++ .../TriggerExpressionResolverTests.kt | 124 ++++ .../core/model/ClusterMetricsInput.kt | 12 +- .../core/model/DocLevelMonitorInput.kt | 111 +++ .../alerting/core/model/DocLevelQuery.kt | 106 +++ .../opensearch/alerting/core/model/Input.kt | 36 +- .../alerting/core/model/ScheduledJob.kt | 1 + .../alerting/core/model/SearchInput.kt | 2 +- .../resources/mappings/doc-level-queries.json | 13 + .../resources/mappings/scheduled-jobs.json | 6 +- 87 files changed, 6385 insertions(+), 836 deletions(-) create mode 100644 alerting/src/main/java/org/opensearch/percolator/PercolateQueryBuilderExt.java create mode 100644 alerting/src/main/java/org/opensearch/percolator/PercolatorFieldMapperExt.java create mode 100644 alerting/src/main/java/org/opensearch/percolator/PercolatorPluginExt.java create mode 100644 alerting/src/main/kotlin/org/opensearch/alerting/BucketLevelMonitorRunner.kt create mode 100644 alerting/src/main/kotlin/org/opensearch/alerting/DocumentReturningMonitorRunner.kt create mode 100644 alerting/src/main/kotlin/org/opensearch/alerting/MonitorRunnerExecutionContext.kt create mode 100644 alerting/src/main/kotlin/org/opensearch/alerting/MonitorRunnerService.kt create mode 100644 alerting/src/main/kotlin/org/opensearch/alerting/QueryLevelMonitorRunner.kt create mode 100644 alerting/src/main/kotlin/org/opensearch/alerting/action/GetFindingsAction.kt create mode 100644 alerting/src/main/kotlin/org/opensearch/alerting/action/GetFindingsRequest.kt create mode 100644 alerting/src/main/kotlin/org/opensearch/alerting/action/GetFindingsResponse.kt create mode 100644 alerting/src/main/kotlin/org/opensearch/alerting/model/DocumentExecutionContext.kt create mode 100644 alerting/src/main/kotlin/org/opensearch/alerting/model/DocumentLevelTrigger.kt create mode 100644 alerting/src/main/kotlin/org/opensearch/alerting/model/DocumentLevelTriggerRunResult.kt create mode 100644 alerting/src/main/kotlin/org/opensearch/alerting/model/Finding.kt create mode 100644 alerting/src/main/kotlin/org/opensearch/alerting/model/FindingDocument.kt create mode 100644 alerting/src/main/kotlin/org/opensearch/alerting/model/FindingWithDocs.kt create mode 100644 alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestGetFindingsAction.kt create mode 100644 alerting/src/main/kotlin/org/opensearch/alerting/script/DocumentLevelTriggerExecutionContext.kt create mode 100644 alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportGetFindingsAction.kt create mode 100644 alerting/src/main/kotlin/org/opensearch/alerting/triggercondition/parsers/ExpressionParser.kt create mode 100644 alerting/src/main/kotlin/org/opensearch/alerting/triggercondition/parsers/TriggerExpressionParser.kt create mode 100644 alerting/src/main/kotlin/org/opensearch/alerting/triggercondition/parsers/TriggerExpressionRPNBaseParser.kt create mode 100644 alerting/src/main/kotlin/org/opensearch/alerting/triggercondition/resolvers/TriggerExpression.kt create mode 100644 alerting/src/main/kotlin/org/opensearch/alerting/triggercondition/resolvers/TriggerExpressionRPNResolver.kt create mode 100644 alerting/src/main/kotlin/org/opensearch/alerting/triggercondition/resolvers/TriggerExpressionResolver.kt create mode 100644 alerting/src/main/kotlin/org/opensearch/alerting/triggercondition/tokens/ExpressionToken.kt create mode 100644 alerting/src/main/kotlin/org/opensearch/alerting/triggercondition/tokens/TriggerExpressionConstant.kt create mode 100644 alerting/src/main/kotlin/org/opensearch/alerting/triggercondition/tokens/TriggerExpressionOperator.kt create mode 100644 alerting/src/main/kotlin/org/opensearch/alerting/triggercondition/tokens/TriggerExpressionToken.kt create mode 100644 alerting/src/main/kotlin/org/opensearch/alerting/util/DocLevelMonitorQueries.kt create mode 100644 alerting/src/main/resources/org/opensearch/alerting/alerts/finding_mapping.json create mode 100644 alerting/src/test/kotlin/org/opensearch/alerting/DocumentMonitorRunnerIT.kt rename alerting/src/test/kotlin/org/opensearch/alerting/{MonitorRunnerIT.kt => MonitorRunnerServiceIT.kt} (99%) create mode 100644 alerting/src/test/kotlin/org/opensearch/alerting/model/DocLevelMonitorInputTests.kt create mode 100644 alerting/src/test/kotlin/org/opensearch/alerting/model/FindingTests.kt create mode 100644 alerting/src/test/kotlin/org/opensearch/alerting/resthandler/FindingsRestApiIT.kt create mode 100644 alerting/src/test/kotlin/org/opensearch/alerting/triggeraction/TriggerExpressionParserTests.kt create mode 100644 alerting/src/test/kotlin/org/opensearch/alerting/triggeraction/TriggerExpressionResolverTests.kt create mode 100644 core/src/main/kotlin/org/opensearch/alerting/core/model/DocLevelMonitorInput.kt create mode 100644 core/src/main/kotlin/org/opensearch/alerting/core/model/DocLevelQuery.kt create mode 100644 core/src/main/resources/mappings/doc-level-queries.json diff --git a/alerting/build.gradle b/alerting/build.gradle index a4cf559e1..89a19841a 100644 --- a/alerting/build.gradle +++ b/alerting/build.gradle @@ -56,6 +56,7 @@ configurations.testImplementation { dependencies { compileOnly "org.opensearch.plugin:opensearch-scripting-painless-spi:${versions.opensearch}" + api "org.opensearch.plugin:percolator-client:${opensearch_version}" // OpenSearch Nanny state implementation "org.jetbrains.kotlin:kotlin-stdlib:${kotlin_version}" diff --git a/alerting/src/main/java/org/opensearch/percolator/PercolateQueryBuilderExt.java b/alerting/src/main/java/org/opensearch/percolator/PercolateQueryBuilderExt.java new file mode 100644 index 000000000..33f67641e --- /dev/null +++ b/alerting/src/main/java/org/opensearch/percolator/PercolateQueryBuilderExt.java @@ -0,0 +1,671 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.percolator; + +import org.apache.lucene.analysis.Analyzer; +import org.apache.lucene.analysis.DelegatingAnalyzerWrapper; +import org.apache.lucene.index.BinaryDocValues; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.IndexReaderContext; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.LeafReader; +import org.apache.lucene.index.ReaderUtil; +import org.apache.lucene.index.memory.MemoryIndex; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.ScoreMode; +import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Weight; +import org.apache.lucene.search.join.BitSetProducer; +import org.apache.lucene.store.ByteBuffersDirectory; +import org.apache.lucene.store.Directory; +import org.apache.lucene.util.BitDocIdSet; +import org.apache.lucene.util.BitSet; +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.SetOnce; +import org.opensearch.OpenSearchException; +import org.opensearch.ResourceNotFoundException; +import org.opensearch.Version; +import org.opensearch.action.ActionListener; +import org.opensearch.action.get.GetRequest; +import org.opensearch.common.ParseField; +import org.opensearch.common.bytes.BytesReference; +import org.opensearch.common.io.stream.InputStreamStreamInput; +import org.opensearch.common.io.stream.NamedWriteableAwareStreamInput; +import org.opensearch.common.io.stream.NamedWriteableRegistry; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.xcontent.ConstructingObjectParser; +import org.opensearch.common.xcontent.LoggingDeprecationHandler; +import org.opensearch.common.xcontent.NamedXContentRegistry; +import org.opensearch.common.xcontent.XContentBuilder; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.common.xcontent.XContentHelper; +import org.opensearch.common.xcontent.XContentParser; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.index.analysis.FieldNameAnalyzer; +import org.opensearch.index.fielddata.IndexFieldData; +import org.opensearch.index.fielddata.IndexFieldDataCache; +import org.opensearch.index.mapper.DocumentMapper; +import org.opensearch.index.mapper.MappedFieldType; +import org.opensearch.index.mapper.MapperService; +import org.opensearch.index.mapper.ParseContext; +import org.opensearch.index.mapper.ParsedDocument; +import org.opensearch.index.mapper.SourceToParse; +import org.opensearch.index.query.AbstractQueryBuilder; +import org.opensearch.index.query.QueryBuilder; +import org.opensearch.index.query.QueryRewriteContext; +import org.opensearch.index.query.QueryShardContext; +import org.opensearch.index.query.QueryShardException; +import org.opensearch.index.query.Rewriteable; +import org.opensearch.indices.breaker.CircuitBreakerService; +import org.opensearch.indices.breaker.NoneCircuitBreakerService; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Objects; +import java.util.function.Supplier; + +import static org.opensearch.common.xcontent.ConstructingObjectParser.constructorArg; +import static org.opensearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; +import static org.opensearch.search.SearchService.ALLOW_EXPENSIVE_QUERIES; + +/** + * This is a stop-gap solution & will be removed in future. + */ +// TODO for cleanup +public class PercolateQueryBuilderExt extends AbstractQueryBuilder { + public static final String NAME = "percolate_ext"; + + static final ParseField DOCUMENT_FIELD = new ParseField("document"); + static final ParseField DOCUMENTS_FIELD = new ParseField("documents"); + private static final ParseField NAME_FIELD = new ParseField("name"); + private static final ParseField QUERY_FIELD = new ParseField("field"); + private static final ParseField INDEXED_DOCUMENT_FIELD_INDEX = new ParseField("index"); + private static final ParseField INDEXED_DOCUMENT_FIELD_ID = new ParseField("id"); + private static final ParseField INDEXED_DOCUMENT_FIELD_ROUTING = new ParseField("routing"); + private static final ParseField INDEXED_DOCUMENT_FIELD_PREFERENCE = new ParseField("preference"); + private static final ParseField INDEXED_DOCUMENT_FIELD_VERSION = new ParseField("version"); + + private final String field; + private String name; + private final List documents; + private final XContentType documentXContentType; + + private final String indexedDocumentIndex; + private final String indexedDocumentId; + private final String indexedDocumentRouting; + private final String indexedDocumentPreference; + private final Long indexedDocumentVersion; + private final Supplier documentSupplier; + + /** + * Creates a percolator query builder instance for percolating a provided document. + * + * @param field The field that contains the percolator query + * @param document The binary blob containing document to percolate + * @param documentXContentType The content type of the binary blob containing the document to percolate + */ + public PercolateQueryBuilderExt(String field, BytesReference document, XContentType documentXContentType) { + this(field, Collections.singletonList(document), documentXContentType); + } + + /** + * Creates a percolator query builder instance for percolating a provided document. + * + * @param field The field that contains the percolator query + * @param documents The binary blob containing document to percolate + * @param documentXContentType The content type of the binary blob containing the document to percolate + */ + public PercolateQueryBuilderExt(String field, List documents, XContentType documentXContentType) { + if (field == null) { + throw new IllegalArgumentException("[field] is a required argument"); + } + if (documents == null) { + throw new IllegalArgumentException("[document] is a required argument"); + } + this.field = field; + this.documents = documents; + this.documentXContentType = Objects.requireNonNull(documentXContentType); + indexedDocumentIndex = null; + indexedDocumentId = null; + indexedDocumentRouting = null; + indexedDocumentPreference = null; + indexedDocumentVersion = null; + this.documentSupplier = null; + } + + /** + * Creates a percolator query builder instance for percolating a document in a remote index. + * + * @param field The field that contains the percolator query + * @param indexedDocumentIndex The index containing the document to percolate + * @param indexedDocumentId The id of the document to percolate + * @param indexedDocumentRouting The routing value for the document to percolate + * @param indexedDocumentPreference The preference to use when fetching the document to percolate + * @param indexedDocumentVersion The expected version of the document to percolate + */ + public PercolateQueryBuilderExt( + String field, + String indexedDocumentIndex, + String indexedDocumentId, + String indexedDocumentRouting, + String indexedDocumentPreference, + Long indexedDocumentVersion + ) { + if (field == null) { + throw new IllegalArgumentException("[field] is a required argument"); + } + if (indexedDocumentIndex == null) { + throw new IllegalArgumentException("[index] is a required argument"); + } + if (indexedDocumentId == null) { + throw new IllegalArgumentException("[id] is a required argument"); + } + this.field = field; + this.indexedDocumentIndex = indexedDocumentIndex; + this.indexedDocumentId = indexedDocumentId; + this.indexedDocumentRouting = indexedDocumentRouting; + this.indexedDocumentPreference = indexedDocumentPreference; + this.indexedDocumentVersion = indexedDocumentVersion; + this.documents = Collections.emptyList(); + this.documentXContentType = null; + this.documentSupplier = null; + } + + protected PercolateQueryBuilderExt(String field, Supplier documentSupplier) { + if (field == null) { + throw new IllegalArgumentException("[field] is a required argument"); + } + this.field = field; + this.documents = Collections.emptyList(); + this.documentXContentType = null; + this.documentSupplier = documentSupplier; + indexedDocumentIndex = null; + indexedDocumentId = null; + indexedDocumentRouting = null; + indexedDocumentPreference = null; + indexedDocumentVersion = null; + } + + /** + * Read from a stream. + */ + PercolateQueryBuilderExt(StreamInput in) throws IOException { + super(in); + field = in.readString(); + name = in.readOptionalString(); + if (in.getVersion().before(Version.V_2_0_0)) { + String documentType = in.readOptionalString(); + if (documentType != null) { + throw new IllegalStateException("documentType must be null"); + } + } + indexedDocumentIndex = in.readOptionalString(); + if (in.getVersion().before(Version.V_2_0_0)) { + String indexedDocumentType = in.readOptionalString(); + if (indexedDocumentType != null) { + throw new IllegalStateException("indexedDocumentType must be null"); + } + } + + indexedDocumentId = in.readOptionalString(); + indexedDocumentRouting = in.readOptionalString(); + indexedDocumentPreference = in.readOptionalString(); + if (in.readBoolean()) { + indexedDocumentVersion = in.readVLong(); + } else { + indexedDocumentVersion = null; + } + documents = in.readList(StreamInput::readBytesReference); + if (documents.isEmpty() == false) { + documentXContentType = in.readEnum(XContentType.class); + } else { + documentXContentType = null; + } + documentSupplier = null; + } + + /** + * Sets the name used for identification purposes in _percolator_document_slot response field + * when multiple percolate queries have been specified in the main query. + */ + public PercolateQueryBuilderExt setName(String name) { + this.name = name; + return this; + } + + @Override + protected void doWriteTo(StreamOutput out) throws IOException { + if (documentSupplier != null) { + throw new IllegalStateException("supplier must be null, can't serialize suppliers, missing a rewriteAndFetch?"); + } + out.writeString(field); + out.writeOptionalString(name); + if (out.getVersion().before(Version.V_2_0_0)) { + // In 7x, typeless percolate queries are represented by null documentType values + out.writeOptionalString(null); + } + out.writeOptionalString(indexedDocumentIndex); + if (out.getVersion().before(Version.V_2_0_0)) { + // In 7x, typeless percolate queries are represented by null indexedDocumentType values + out.writeOptionalString(null); + } + out.writeOptionalString(indexedDocumentId); + out.writeOptionalString(indexedDocumentRouting); + out.writeOptionalString(indexedDocumentPreference); + if (indexedDocumentVersion != null) { + out.writeBoolean(true); + out.writeVLong(indexedDocumentVersion); + } else { + out.writeBoolean(false); + } + out.writeVInt(documents.size()); + for (BytesReference document : documents) { + out.writeBytesReference(document); + } + if (documents.isEmpty() == false) { + out.writeEnum(documentXContentType); + } + } + + @Override + protected void doXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(NAME); + builder.field(QUERY_FIELD.getPreferredName(), field); + if (name != null) { + builder.field(NAME_FIELD.getPreferredName(), name); + } + if (documents.isEmpty() == false) { + builder.startArray(DOCUMENTS_FIELD.getPreferredName()); + for (BytesReference document : documents) { + try ( + XContentParser parser = XContentHelper.createParser( + NamedXContentRegistry.EMPTY, + LoggingDeprecationHandler.INSTANCE, + document + ) + ) { + parser.nextToken(); + builder.generator().copyCurrentStructure(parser); + } + } + builder.endArray(); + } + if (indexedDocumentIndex != null || indexedDocumentId != null) { + if (indexedDocumentIndex != null) { + builder.field(INDEXED_DOCUMENT_FIELD_INDEX.getPreferredName(), indexedDocumentIndex); + } + if (indexedDocumentId != null) { + builder.field(INDEXED_DOCUMENT_FIELD_ID.getPreferredName(), indexedDocumentId); + } + if (indexedDocumentRouting != null) { + builder.field(INDEXED_DOCUMENT_FIELD_ROUTING.getPreferredName(), indexedDocumentRouting); + } + if (indexedDocumentPreference != null) { + builder.field(INDEXED_DOCUMENT_FIELD_PREFERENCE.getPreferredName(), indexedDocumentPreference); + } + if (indexedDocumentVersion != null) { + builder.field(INDEXED_DOCUMENT_FIELD_VERSION.getPreferredName(), indexedDocumentVersion); + } + } + printBoostAndQueryName(builder); + builder.endObject(); + } + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(NAME, args -> { + String field = (String) args[0]; + BytesReference document = (BytesReference) args[1]; + @SuppressWarnings("unchecked") + List documents = (List) args[2]; + String indexedDocId = (String) args[3]; + String indexedDocIndex = (String) args[4]; + String indexDocRouting = (String) args[5]; + String indexDocPreference = (String) args[6]; + Long indexedDocVersion = (Long) args[7]; + if (indexedDocId != null) { + return new PercolateQueryBuilderExt(field, indexedDocIndex, indexedDocId, indexDocRouting, indexDocPreference, indexedDocVersion); + } else if (document != null) { + return new PercolateQueryBuilderExt(field, Collections.singletonList(document), XContentType.JSON); + } else { + return new PercolateQueryBuilderExt(field, documents, XContentType.JSON); + } + }); + static { + PARSER.declareString(constructorArg(), QUERY_FIELD); + PARSER.declareObject(optionalConstructorArg(), (p, c) -> parseDocument(p), DOCUMENT_FIELD); + PARSER.declareObjectArray(optionalConstructorArg(), (p, c) -> parseDocument(p), DOCUMENTS_FIELD); + PARSER.declareString(optionalConstructorArg(), INDEXED_DOCUMENT_FIELD_ID); + PARSER.declareString(optionalConstructorArg(), INDEXED_DOCUMENT_FIELD_INDEX); + PARSER.declareString(optionalConstructorArg(), INDEXED_DOCUMENT_FIELD_ROUTING); + PARSER.declareString(optionalConstructorArg(), INDEXED_DOCUMENT_FIELD_PREFERENCE); + PARSER.declareLong(optionalConstructorArg(), INDEXED_DOCUMENT_FIELD_VERSION); + PARSER.declareString(PercolateQueryBuilderExt::setName, NAME_FIELD); + PARSER.declareString(PercolateQueryBuilderExt::queryName, AbstractQueryBuilder.NAME_FIELD); + PARSER.declareFloat(PercolateQueryBuilderExt::boost, BOOST_FIELD); + PARSER.declareRequiredFieldSet( + DOCUMENT_FIELD.getPreferredName(), + DOCUMENTS_FIELD.getPreferredName(), + INDEXED_DOCUMENT_FIELD_ID.getPreferredName() + ); + PARSER.declareExclusiveFieldSet( + DOCUMENT_FIELD.getPreferredName(), + DOCUMENTS_FIELD.getPreferredName(), + INDEXED_DOCUMENT_FIELD_ID.getPreferredName() + ); + } + + private static BytesReference parseDocument(XContentParser parser) throws IOException { + try (XContentBuilder builder = XContentFactory.jsonBuilder()) { + builder.copyCurrentStructure(parser); + builder.flush(); + return BytesReference.bytes(builder); + } + } + + public static PercolateQueryBuilderExt fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + @Override + protected boolean doEquals(PercolateQueryBuilderExt other) { + return Objects.equals(field, other.field) + && Objects.equals(documents, other.documents) + && Objects.equals(indexedDocumentIndex, other.indexedDocumentIndex) + && Objects.equals(documentSupplier, other.documentSupplier) + && Objects.equals(indexedDocumentId, other.indexedDocumentId); + + } + + @Override + protected int doHashCode() { + return Objects.hash(field, documents, indexedDocumentIndex, indexedDocumentId, documentSupplier); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + protected QueryBuilder doRewrite(QueryRewriteContext queryShardContext) { + if (documents.isEmpty() == false) { + return this; + } else if (documentSupplier != null) { + final BytesReference source = documentSupplier.get(); + if (source == null) { + return this; // not executed yet + } else { + PercolateQueryBuilderExt rewritten = new PercolateQueryBuilderExt( + field, + Collections.singletonList(source), + XContentHelper.xContentType(source) + ); + if (name != null) { + rewritten.setName(name); + } + return rewritten; + } + } + GetRequest getRequest = new GetRequest(indexedDocumentIndex, indexedDocumentId); + getRequest.preference("_local"); + getRequest.routing(indexedDocumentRouting); + getRequest.preference(indexedDocumentPreference); + if (indexedDocumentVersion != null) { + getRequest.version(indexedDocumentVersion); + } + SetOnce documentSupplier = new SetOnce<>(); + queryShardContext.registerAsyncAction((client, listener) -> { + client.get(getRequest, ActionListener.wrap(getResponse -> { + if (getResponse.isExists() == false) { + throw new ResourceNotFoundException( + "indexed document [{}/{}] couldn't be found", + indexedDocumentIndex, + indexedDocumentId + ); + } + if (getResponse.isSourceEmpty()) { + throw new IllegalArgumentException( + "indexed document [" + indexedDocumentIndex + "/" + indexedDocumentId + "] source disabled" + ); + } + documentSupplier.set(getResponse.getSourceAsBytesRef()); + listener.onResponse(null); + }, listener::onFailure)); + }); + + PercolateQueryBuilderExt rewritten = new PercolateQueryBuilderExt(field, documentSupplier::get); + if (name != null) { + rewritten.setName(name); + } + return rewritten; + } + + @Override + protected Query doToQuery(QueryShardContext context) throws IOException { + if (context.allowExpensiveQueries() == false) { + throw new OpenSearchException( + "[percolate] queries cannot be executed when '" + ALLOW_EXPENSIVE_QUERIES.getKey() + "' is set to false." + ); + } + + // Call nowInMillis() so that this query becomes un-cacheable since we + // can't be sure that it doesn't use now or scripts + context.nowInMillis(); + + if (documents.isEmpty()) { + throw new IllegalStateException("no document to percolate"); + } + + MappedFieldType fieldType = context.fieldMapper(field); + if (fieldType == null) { + throw new QueryShardException(context, "field [" + field + "] does not exist"); + } + + if (!(fieldType instanceof PercolatorFieldMapperExt.PercolatorFieldType)) { + throw new QueryShardException( + context, + "expected field [" + field + "] to be of type [percolator], but is of type [" + fieldType.typeName() + "]" + ); + } + + final List docs = new ArrayList<>(); + final DocumentMapper docMapper; + final MapperService mapperService = context.getMapperService(); + String type = mapperService.documentMapper().type(); + docMapper = mapperService.documentMapper(); + for (BytesReference document : documents) { + docs.add(docMapper.parse(new SourceToParse(context.index().getName(), "_temp_id", document, documentXContentType))); + } + + FieldNameAnalyzer fieldNameAnalyzer = (FieldNameAnalyzer) docMapper.mappers().indexAnalyzer(); + // Need to this custom impl because FieldNameAnalyzer is strict and the percolator sometimes isn't when + // 'index.percolator.map_unmapped_fields_as_string' is enabled: + Analyzer analyzer = new DelegatingAnalyzerWrapper(Analyzer.PER_FIELD_REUSE_STRATEGY) { + @Override + protected Analyzer getWrappedAnalyzer(String fieldName) { + Analyzer analyzer = fieldNameAnalyzer.analyzers().get(fieldName); + if (analyzer != null) { + return analyzer; + } else { + return context.getIndexAnalyzers().getDefaultIndexAnalyzer(); + } + } + }; + final IndexSearcher docSearcher; + final boolean excludeNestedDocuments; + if (docs.size() > 1 || docs.get(0).docs().size() > 1) { + assert docs.size() != 1 || docMapper.hasNestedObjects(); + docSearcher = createMultiDocumentSearcher(analyzer, docs); + excludeNestedDocuments = docMapper.hasNestedObjects() + && docs.stream().map(ParsedDocument::docs).mapToInt(List::size).anyMatch(size -> size > 1); + } else { + MemoryIndex memoryIndex = MemoryIndex.fromDocument(docs.get(0).rootDoc(), analyzer, true, false); + docSearcher = memoryIndex.createSearcher(); + docSearcher.setQueryCache(null); + excludeNestedDocuments = false; + } + + PercolatorFieldMapperExt.PercolatorFieldType pft = (PercolatorFieldMapperExt.PercolatorFieldType) fieldType; + String name = pft.name(); + QueryShardContext percolateShardContext = wrap(context); + PercolatorFieldMapperExt.configureContext(percolateShardContext, pft.mapUnmappedFieldsAsText); + ; + PercolateQuery.QueryStore queryStore = createStore(pft.queryBuilderField, percolateShardContext); + + return pft.percolateQuery(name, queryStore, documents, docSearcher, excludeNestedDocuments, context.indexVersionCreated()); + } + + public String getField() { + return field; + } + + public List getDocuments() { + return documents; + } + + // pkg-private for testing + XContentType getXContentType() { + return documentXContentType; + } + + public String getQueryName() { + return name; + } + + static IndexSearcher createMultiDocumentSearcher(Analyzer analyzer, Collection docs) { + Directory directory = new ByteBuffersDirectory(); + try (IndexWriter indexWriter = new IndexWriter(directory, new IndexWriterConfig(analyzer))) { + // Indexing in order here, so that the user provided order matches with the docid sequencing: + Iterable iterable = () -> docs.stream().map(ParsedDocument::docs).flatMap(Collection::stream).iterator(); + indexWriter.addDocuments(iterable); + + DirectoryReader directoryReader = DirectoryReader.open(indexWriter); + assert directoryReader.leaves().size() == 1 : "Expected single leaf, but got [" + directoryReader.leaves().size() + "]"; + final IndexSearcher slowSearcher = new IndexSearcher(directoryReader); + slowSearcher.setQueryCache(null); + return slowSearcher; + } catch (IOException e) { + throw new OpenSearchException("Failed to create index for percolator with nested document ", e); + } + } + + static PercolateQuery.QueryStore createStore(MappedFieldType queryBuilderFieldType, QueryShardContext context) { + Version indexVersion = context.indexVersionCreated(); + NamedWriteableRegistry registry = context.getWriteableRegistry(); + return ctx -> { + LeafReader leafReader = ctx.reader(); + BinaryDocValues binaryDocValues = leafReader.getBinaryDocValues(queryBuilderFieldType.name()); + if (binaryDocValues == null) { + return docId -> null; + } + return docId -> { + if (binaryDocValues.advanceExact(docId)) { + BytesRef qbSource = binaryDocValues.binaryValue(); + try (InputStream in = new ByteArrayInputStream(qbSource.bytes, qbSource.offset, qbSource.length)) { + try ( + StreamInput input = new NamedWriteableAwareStreamInput( + new InputStreamStreamInput(in, qbSource.length), + registry + ) + ) { + input.setVersion(indexVersion); + // Query builder's content is stored via BinaryFieldMapper, which has a custom encoding + // to encode multiple binary values into a single binary doc values field. + // This is the reason we need to first need to read the number of values and + // then the length of the field value in bytes. + int numValues = input.readVInt(); + assert numValues == 1; + int valueLength = input.readVInt(); + assert valueLength > 0; + QueryBuilder queryBuilder = input.readNamedWriteable(QueryBuilder.class); + assert in.read() == -1; + queryBuilder = Rewriteable.rewrite(queryBuilder, context); + return queryBuilder.toQuery(context); + } + } + } else { + return null; + } + }; + }; + } + + static QueryShardContext wrap(QueryShardContext shardContext) { + return new QueryShardContext(shardContext) { + + @Override + public IndexReader getIndexReader() { + // The reader that matters in this context is not the reader of the shard but + // the reader of the MemoryIndex. We just use `null` for simplicity. + return null; + } + + @Override + public BitSetProducer bitsetFilter(Query query) { + return context -> { + final IndexReaderContext topLevelContext = ReaderUtil.getTopLevelContext(context); + final IndexSearcher searcher = new IndexSearcher(topLevelContext); + searcher.setQueryCache(null); + final Weight weight = searcher.createWeight(searcher.rewrite(query), ScoreMode.COMPLETE_NO_SCORES, 1f); + final Scorer s = weight.scorer(context); + + if (s != null) { + return new BitDocIdSet(BitSet.of(s.iterator(), context.reader().maxDoc())).bits(); + } else { + return null; + } + }; + } + + @Override + @SuppressWarnings("unchecked") + public > IFD getForField(MappedFieldType fieldType) { + IndexFieldData.Builder builder = fieldType.fielddataBuilder( + shardContext.getFullyQualifiedIndex().getName(), + shardContext::lookup + ); + IndexFieldDataCache cache = new IndexFieldDataCache.None(); + CircuitBreakerService circuitBreaker = new NoneCircuitBreakerService(); + return (IFD) builder.build(cache, circuitBreaker); + } + }; + } +} \ No newline at end of file diff --git a/alerting/src/main/java/org/opensearch/percolator/PercolatorFieldMapperExt.java b/alerting/src/main/java/org/opensearch/percolator/PercolatorFieldMapperExt.java new file mode 100644 index 000000000..3a8fc8a22 --- /dev/null +++ b/alerting/src/main/java/org/opensearch/percolator/PercolatorFieldMapperExt.java @@ -0,0 +1,584 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.percolator; + +import org.apache.lucene.document.BinaryRange; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.NumericDocValuesField; +import org.apache.lucene.index.FieldInfo; +import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.LeafReader; +import org.apache.lucene.index.PointValues; +import org.apache.lucene.index.Term; +import org.apache.lucene.index.Terms; +import org.apache.lucene.index.TermsEnum; +import org.apache.lucene.sandbox.search.CoveringQuery; +import org.apache.lucene.search.BooleanClause; +import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.LongValuesSource; +import org.apache.lucene.search.MatchNoDocsQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.TermInSetQuery; +import org.apache.lucene.search.TermQuery; +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.BytesRefBuilder; +import org.opensearch.Version; +import org.opensearch.action.support.PlainActionFuture; +import org.opensearch.common.ParsingException; +import org.opensearch.common.bytes.BytesReference; +import org.opensearch.common.collect.Tuple; +import org.opensearch.common.hash.MurmurHash3; +import org.opensearch.common.io.stream.OutputStreamStreamOutput; +import org.opensearch.common.lucene.search.Queries; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.xcontent.XContentLocation; +import org.opensearch.common.xcontent.XContentParser; +import org.opensearch.index.mapper.BinaryFieldMapper; +import org.opensearch.index.mapper.FieldMapper; +import org.opensearch.index.mapper.KeywordFieldMapper; +import org.opensearch.index.mapper.MappedFieldType; +import org.opensearch.index.mapper.Mapper; +import org.opensearch.index.mapper.MapperParsingException; +import org.opensearch.index.mapper.NumberFieldMapper; +import org.opensearch.index.mapper.ParametrizedFieldMapper; +import org.opensearch.index.mapper.ParseContext; +import org.opensearch.index.mapper.RangeFieldMapper; +import org.opensearch.index.mapper.RangeType; +import org.opensearch.index.mapper.SourceValueFetcher; +import org.opensearch.index.mapper.TextSearchInfo; +import org.opensearch.index.mapper.ValueFetcher; +import org.opensearch.index.query.BoolQueryBuilder; +import org.opensearch.index.query.BoostingQueryBuilder; +import org.opensearch.index.query.ConstantScoreQueryBuilder; +import org.opensearch.index.query.DisMaxQueryBuilder; +import org.opensearch.index.query.QueryBuilder; +import org.opensearch.index.query.QueryShardContext; +import org.opensearch.index.query.QueryShardException; +import org.opensearch.index.query.Rewriteable; +import org.opensearch.index.query.functionscore.FunctionScoreQueryBuilder; +import org.opensearch.search.lookup.SearchLookup; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.function.Supplier; + +import static org.opensearch.index.query.AbstractQueryBuilder.parseInnerQueryBuilder; + +/** + * This is a stop-gap solution & will be removed in future. + */ +// TODO for cleanup +public class PercolatorFieldMapperExt extends ParametrizedFieldMapper { + + static final Setting INDEX_MAP_UNMAPPED_FIELDS_AS_TEXT_SETTING = Setting.boolSetting( + "index.percolator.map_unmapped_fields_as_text", + false, + Setting.Property.IndexScope + ); + static final String CONTENT_TYPE = "percolator_ext"; + + static final byte FIELD_VALUE_SEPARATOR = 0; // nul code point + static final String EXTRACTION_COMPLETE = "complete"; + static final String EXTRACTION_PARTIAL = "partial"; + static final String EXTRACTION_FAILED = "failed"; + + static final String EXTRACTED_TERMS_FIELD_NAME = "extracted_terms"; + static final String EXTRACTION_RESULT_FIELD_NAME = "extraction_result"; + static final String QUERY_BUILDER_FIELD_NAME = "query_builder_field"; + static final String RANGE_FIELD_NAME = "range_field"; + static final String MINIMUM_SHOULD_MATCH_FIELD_NAME = "minimum_should_match_field"; + + @Override + public ParametrizedFieldMapper.Builder getMergeBuilder() { + return new Builder(simpleName(), queryShardContext).init(this); + } + + static class Builder extends ParametrizedFieldMapper.Builder { + + private final Parameter> meta = Parameter.metaParam(); + + private final Supplier queryShardContext; + + Builder(String fieldName, Supplier queryShardContext) { + super(fieldName); + this.queryShardContext = queryShardContext; + } + + @Override + protected List> getParameters() { + return Arrays.asList(meta); + } + + @Override + public PercolatorFieldMapperExt build(BuilderContext context) { + PercolatorFieldType fieldType = new PercolatorFieldType(buildFullName(context), meta.getValue()); + context.path().add(name()); + KeywordFieldMapper extractedTermsField = createExtractQueryFieldBuilder(EXTRACTED_TERMS_FIELD_NAME, context); + fieldType.queryTermsField = extractedTermsField.fieldType(); + KeywordFieldMapper extractionResultField = createExtractQueryFieldBuilder(EXTRACTION_RESULT_FIELD_NAME, context); + fieldType.extractionResultField = extractionResultField.fieldType(); + BinaryFieldMapper queryBuilderField = createQueryBuilderFieldBuilder(context); + fieldType.queryBuilderField = queryBuilderField.fieldType(); + // Range field is of type ip, because that matches closest with BinaryRange field. Otherwise we would + // have to introduce a new field type... + RangeFieldMapper rangeFieldMapper = createExtractedRangeFieldBuilder(RANGE_FIELD_NAME, RangeType.IP, context); + fieldType.rangeField = rangeFieldMapper.fieldType(); + NumberFieldMapper minimumShouldMatchFieldMapper = createMinimumShouldMatchField(context); + fieldType.minimumShouldMatchField = minimumShouldMatchFieldMapper.fieldType(); + fieldType.mapUnmappedFieldsAsText = getMapUnmappedFieldAsText(context.indexSettings()); + + context.path().remove(); + return new PercolatorFieldMapperExt( + name(), + fieldType, + multiFieldsBuilder.build(this, context), + copyTo.build(), + queryShardContext, + extractedTermsField, + extractionResultField, + queryBuilderField, + rangeFieldMapper, + minimumShouldMatchFieldMapper, + getMapUnmappedFieldAsText(context.indexSettings()) + ); + } + + private static boolean getMapUnmappedFieldAsText(Settings indexSettings) { + return INDEX_MAP_UNMAPPED_FIELDS_AS_TEXT_SETTING.get(indexSettings); + } + + static KeywordFieldMapper createExtractQueryFieldBuilder(String name, BuilderContext context) { + KeywordFieldMapper.Builder queryMetadataFieldBuilder = new KeywordFieldMapper.Builder(name); + queryMetadataFieldBuilder.docValues(false); + return queryMetadataFieldBuilder.build(context); + } + + static BinaryFieldMapper createQueryBuilderFieldBuilder(BuilderContext context) { + BinaryFieldMapper.Builder builder = new BinaryFieldMapper.Builder(QUERY_BUILDER_FIELD_NAME, true); + return builder.build(context); + } + + static RangeFieldMapper createExtractedRangeFieldBuilder(String name, RangeType rangeType, BuilderContext context) { + RangeFieldMapper.Builder builder = new RangeFieldMapper.Builder( + name, + rangeType, + true, + hasIndexCreated(context.indexSettings()) ? context.indexCreatedVersion() : null + ); + // For now no doc values, because in processQuery(...) only the Lucene range fields get added: + builder.docValues(false); + return builder.build(context); + } + + static NumberFieldMapper createMinimumShouldMatchField(BuilderContext context) { + NumberFieldMapper.Builder builder = NumberFieldMapper.Builder.docValuesOnly( + MINIMUM_SHOULD_MATCH_FIELD_NAME, + NumberFieldMapper.NumberType.INTEGER + ); + return builder.build(context); + } + + } + + static class TypeParser implements FieldMapper.TypeParser { + + @Override + public Builder parse(String name, Map node, ParserContext parserContext) throws MapperParsingException { + return new Builder(name, parserContext.queryShardContextSupplier()); + } + } + + static class PercolatorFieldType extends MappedFieldType { + + MappedFieldType queryTermsField; + MappedFieldType extractionResultField; + MappedFieldType queryBuilderField; + MappedFieldType minimumShouldMatchField; + + RangeFieldMapper.RangeFieldType rangeField; + boolean mapUnmappedFieldsAsText; + + private PercolatorFieldType(String name, Map meta) { + super(name, false, false, false, TextSearchInfo.NONE, meta); + } + + @Override + public String typeName() { + return CONTENT_TYPE; + } + + @Override + public Query termQuery(Object value, QueryShardContext context) { + throw new QueryShardException(context, "Percolator fields are not searchable directly, use a percolate query instead"); + } + + @Override + public ValueFetcher valueFetcher(QueryShardContext context, SearchLookup searchLookup, String format) { + return SourceValueFetcher.identity(name(), context, format); + } + + Query percolateQuery( + String name, + PercolateQuery.QueryStore queryStore, + List documents, + IndexSearcher searcher, + boolean excludeNestedDocuments, + Version indexVersion + ) throws IOException { + IndexReader indexReader = searcher.getIndexReader(); + Tuple t = createCandidateQuery(indexReader, indexVersion); + Query candidateQuery = t.v1(); + boolean canUseMinimumShouldMatchField = t.v2(); + + Query verifiedMatchesQuery; + // We can only skip the MemoryIndex verification when percolating a single non nested document. We cannot + // skip MemoryIndex verification when percolating multiple documents, because when terms and + // ranges are extracted from IndexReader backed by a RamDirectory holding multiple documents we do + // not know to which document the terms belong too and for certain queries we incorrectly emit candidate + // matches as actual match. + if (canUseMinimumShouldMatchField && indexReader.maxDoc() == 1) { + verifiedMatchesQuery = new TermQuery(new Term(extractionResultField.name(), EXTRACTION_COMPLETE)); + } else { + verifiedMatchesQuery = new MatchNoDocsQuery("multiple or nested docs or CoveringQuery could not be used"); + } + Query filter = null; + if (excludeNestedDocuments) { + filter = Queries.newNonNestedFilter(); + } + return new PercolateQuery(name, queryStore, documents, candidateQuery, searcher, filter, verifiedMatchesQuery); + } + + Tuple createCandidateQuery(IndexReader indexReader, Version indexVersion) throws IOException { + Tuple, Map>> t = extractTermsAndRanges(indexReader); + List extractedTerms = t.v1(); + Map> encodedPointValuesByField = t.v2(); + // `1 + ` is needed to take into account the EXTRACTION_FAILED should clause + boolean canUseMinimumShouldMatchField = 1 + extractedTerms.size() + encodedPointValuesByField.size() <= BooleanQuery + .getMaxClauseCount(); + + List subQueries = new ArrayList<>(); + for (Map.Entry> entry : encodedPointValuesByField.entrySet()) { + String rangeFieldName = entry.getKey(); + List encodedPointValues = entry.getValue(); + byte[] min = encodedPointValues.get(0); + byte[] max = encodedPointValues.get(1); + Query query = BinaryRange.newIntersectsQuery(rangeField.name(), encodeRange(rangeFieldName, min, max)); + subQueries.add(query); + } + + BooleanQuery.Builder candidateQuery = new BooleanQuery.Builder(); + if (canUseMinimumShouldMatchField) { + LongValuesSource valuesSource = LongValuesSource.fromIntField(minimumShouldMatchField.name()); + for (BytesRef extractedTerm : extractedTerms) { + subQueries.add(new TermQuery(new Term(queryTermsField.name(), extractedTerm))); + } + candidateQuery.add(new CoveringQuery(subQueries, valuesSource), BooleanClause.Occur.SHOULD); + } else { + candidateQuery.add(new TermInSetQuery(queryTermsField.name(), extractedTerms), BooleanClause.Occur.SHOULD); + for (Query subQuery : subQueries) { + candidateQuery.add(subQuery, BooleanClause.Occur.SHOULD); + } + } + // include extractionResultField:failed, because docs with this term have no extractedTermsField + // and otherwise we would fail to return these docs. Docs that failed query term extraction + // always need to be verified by MemoryIndex: + candidateQuery.add(new TermQuery(new Term(extractionResultField.name(), EXTRACTION_FAILED)), BooleanClause.Occur.SHOULD); + return new Tuple<>(candidateQuery.build(), canUseMinimumShouldMatchField); + } + + // This was extracted the method above, because otherwise it is difficult to test what terms are included in + // the query in case a CoveringQuery is used (it does not have a getter to retrieve the clauses) + Tuple, Map>> extractTermsAndRanges(IndexReader indexReader) throws IOException { + List extractedTerms = new ArrayList<>(); + Map> encodedPointValuesByField = new HashMap<>(); + + LeafReader reader = indexReader.leaves().get(0).reader(); + for (FieldInfo info : reader.getFieldInfos()) { + Terms terms = reader.terms(info.name); + if (terms != null) { + BytesRef fieldBr = new BytesRef(info.name); + TermsEnum tenum = terms.iterator(); + for (BytesRef term = tenum.next(); term != null; term = tenum.next()) { + BytesRefBuilder builder = new BytesRefBuilder(); + builder.append(fieldBr); + builder.append(FIELD_VALUE_SEPARATOR); + builder.append(term); + extractedTerms.add(builder.toBytesRef()); + } + } + if (info.getPointIndexDimensionCount() == 1) { // not != 0 because range fields are not supported + PointValues values = reader.getPointValues(info.name); + List encodedPointValues = new ArrayList<>(); + encodedPointValues.add(values.getMinPackedValue().clone()); + encodedPointValues.add(values.getMaxPackedValue().clone()); + encodedPointValuesByField.put(info.name, encodedPointValues); + } + } + return new Tuple<>(extractedTerms, encodedPointValuesByField); + } + + } + + private final Supplier queryShardContext; + private final KeywordFieldMapper queryTermsField; + private final KeywordFieldMapper extractionResultField; + private final BinaryFieldMapper queryBuilderField; + private final NumberFieldMapper minimumShouldMatchFieldMapper; + private final RangeFieldMapper rangeFieldMapper; + private final boolean mapUnmappedFieldsAsText; + + PercolatorFieldMapperExt( + String simpleName, + MappedFieldType mappedFieldType, + MultiFields multiFields, + CopyTo copyTo, + Supplier queryShardContext, + KeywordFieldMapper queryTermsField, + KeywordFieldMapper extractionResultField, + BinaryFieldMapper queryBuilderField, + RangeFieldMapper rangeFieldMapper, + NumberFieldMapper minimumShouldMatchFieldMapper, + boolean mapUnmappedFieldsAsText + ) { + super(simpleName, mappedFieldType, multiFields, copyTo); + this.queryShardContext = queryShardContext; + this.queryTermsField = queryTermsField; + this.extractionResultField = extractionResultField; + this.queryBuilderField = queryBuilderField; + this.minimumShouldMatchFieldMapper = minimumShouldMatchFieldMapper; + this.rangeFieldMapper = rangeFieldMapper; + this.mapUnmappedFieldsAsText = mapUnmappedFieldsAsText; + } + + @Override + public void parse(ParseContext context) throws IOException { + QueryShardContext queryShardContext = this.queryShardContext.get(); + if (context.doc().getField(queryBuilderField.name()) != null) { + // If a percolator query has been defined in an array object then multiple percolator queries + // could be provided. In order to prevent this we fail if we try to parse more than one query + // for the current document. + throw new IllegalArgumentException("a document can only contain one percolator query"); + } + + configureContext(queryShardContext, isMapUnmappedFieldAsText()); + + XContentParser parser = context.parser(); + QueryBuilder queryBuilder = parseQueryBuilder(parser, parser.getTokenLocation()); + verifyQuery(queryBuilder); + // Fetching of terms, shapes and indexed scripts happen during this rewrite: + PlainActionFuture future = new PlainActionFuture<>(); + Rewriteable.rewriteAndFetch(queryBuilder, queryShardContext, future); + queryBuilder = future.actionGet(); + + Version indexVersion = context.mapperService().getIndexSettings().getIndexVersionCreated(); + createQueryBuilderField(indexVersion, queryBuilderField, queryBuilder, context); + + QueryBuilder queryBuilderForProcessing = queryBuilder.rewrite(new QueryShardContext(queryShardContext)); + Query query = queryBuilderForProcessing.toQuery(queryShardContext); + processQuery(query, context); + } + + static void createQueryBuilderField(Version indexVersion, BinaryFieldMapper qbField, QueryBuilder queryBuilder, ParseContext context) + throws IOException { + try (ByteArrayOutputStream stream = new ByteArrayOutputStream()) { + try (OutputStreamStreamOutput out = new OutputStreamStreamOutput(stream)) { + out.setVersion(indexVersion); + out.writeNamedWriteable(queryBuilder); + byte[] queryBuilderAsBytes = stream.toByteArray(); + qbField.parse(context.createExternalValueContext(queryBuilderAsBytes)); + } + } + } + + private static final FieldType INDEXED_KEYWORD = new FieldType(); + static { + INDEXED_KEYWORD.setTokenized(false); + INDEXED_KEYWORD.setOmitNorms(true); + INDEXED_KEYWORD.setIndexOptions(IndexOptions.DOCS); + INDEXED_KEYWORD.freeze(); + } + + void processQuery(Query query, ParseContext context) { + ParseContext.Document doc = context.doc(); + PercolatorFieldType pft = (PercolatorFieldType) this.fieldType(); + QueryAnalyzer.Result result; + Version indexVersion = context.mapperService().getIndexSettings().getIndexVersionCreated(); + result = QueryAnalyzer.analyze(query, indexVersion); + if (result == QueryAnalyzer.Result.UNKNOWN) { + doc.add(new Field(pft.extractionResultField.name(), EXTRACTION_FAILED, INDEXED_KEYWORD)); + return; + } + for (QueryAnalyzer.QueryExtraction extraction : result.extractions) { + if (extraction.term != null) { + BytesRefBuilder builder = new BytesRefBuilder(); + builder.append(new BytesRef(extraction.field())); + builder.append(FIELD_VALUE_SEPARATOR); + builder.append(extraction.bytes()); + doc.add(new Field(queryTermsField.name(), builder.toBytesRef(), INDEXED_KEYWORD)); + } else if (extraction.range != null) { + byte[] min = extraction.range.lowerPoint; + byte[] max = extraction.range.upperPoint; + doc.add(new BinaryRange(rangeFieldMapper.name(), encodeRange(extraction.range.fieldName, min, max))); + } + } + + if (result.matchAllDocs) { + doc.add(new Field(extractionResultField.name(), EXTRACTION_FAILED, INDEXED_KEYWORD)); + if (result.verified) { + doc.add(new Field(extractionResultField.name(), EXTRACTION_COMPLETE, INDEXED_KEYWORD)); + } + } else if (result.verified) { + doc.add(new Field(extractionResultField.name(), EXTRACTION_COMPLETE, INDEXED_KEYWORD)); + } else { + doc.add(new Field(extractionResultField.name(), EXTRACTION_PARTIAL, INDEXED_KEYWORD)); + } + + createFieldNamesField(context); + doc.add(new NumericDocValuesField(minimumShouldMatchFieldMapper.name(), result.minimumShouldMatch)); + } + + static void configureContext(QueryShardContext context, boolean mapUnmappedFieldsAsString) { + // This means that fields in the query need to exist in the mapping prior to registering this query + // The reason that this is required, is that if a field doesn't exist then the query assumes defaults, which may be undesired. + // + // Even worse when fields mentioned in percolator queries do go added to map after the queries have been registered + // then the percolator queries don't work as expected any more. + // + // Query parsing can't introduce new fields in mappings (which happens when registering a percolator query), + // because field type can't be inferred from queries (like document do) so the best option here is to disallow + // the usage of unmapped fields in percolator queries to avoid unexpected behaviour + // + // if index.percolator.map_unmapped_fields_as_string is set to true, query can contain unmapped fields which will be mapped + // as an analyzed string. + context.setAllowUnmappedFields(false); + context.setMapUnmappedFieldAsString(mapUnmappedFieldsAsString); + } + + static QueryBuilder parseQueryBuilder(XContentParser parser, XContentLocation location) { + try { + return parseInnerQueryBuilder(parser); + } catch (IOException e) { + throw new ParsingException(location, "Failed to parse", e); + } + } + + @Override + public Iterator iterator() { + return Arrays.asList( + queryTermsField, + extractionResultField, + queryBuilderField, + minimumShouldMatchFieldMapper, + rangeFieldMapper + ).iterator(); + } + + @Override + protected void parseCreateField(ParseContext context) { + throw new UnsupportedOperationException("should not be invoked"); + } + + @Override + protected String contentType() { + return CONTENT_TYPE; + } + + boolean isMapUnmappedFieldAsText() { + return mapUnmappedFieldsAsText; + } + + /** + * Fails if a percolator contains an unsupported query. The following queries are not supported: + * 1) a has_child query + * 2) a has_parent query + */ + static void verifyQuery(QueryBuilder queryBuilder) { + if (queryBuilder.getName().equals("has_child")) { + throw new IllegalArgumentException("the [has_child] query is unsupported inside a percolator query"); + } else if (queryBuilder.getName().equals("has_parent")) { + throw new IllegalArgumentException("the [has_parent] query is unsupported inside a percolator query"); + } else if (queryBuilder instanceof BoolQueryBuilder) { + BoolQueryBuilder boolQueryBuilder = (BoolQueryBuilder) queryBuilder; + List clauses = new ArrayList<>(); + clauses.addAll(boolQueryBuilder.filter()); + clauses.addAll(boolQueryBuilder.must()); + clauses.addAll(boolQueryBuilder.mustNot()); + clauses.addAll(boolQueryBuilder.should()); + for (QueryBuilder clause : clauses) { + verifyQuery(clause); + } + } else if (queryBuilder instanceof ConstantScoreQueryBuilder) { + verifyQuery(((ConstantScoreQueryBuilder) queryBuilder).innerQuery()); + } else if (queryBuilder instanceof FunctionScoreQueryBuilder) { + verifyQuery(((FunctionScoreQueryBuilder) queryBuilder).query()); + } else if (queryBuilder instanceof BoostingQueryBuilder) { + verifyQuery(((BoostingQueryBuilder) queryBuilder).negativeQuery()); + verifyQuery(((BoostingQueryBuilder) queryBuilder).positiveQuery()); + } else if (queryBuilder instanceof DisMaxQueryBuilder) { + DisMaxQueryBuilder disMaxQueryBuilder = (DisMaxQueryBuilder) queryBuilder; + for (QueryBuilder innerQueryBuilder : disMaxQueryBuilder.innerQueries()) { + verifyQuery(innerQueryBuilder); + } + } + } + + static byte[] encodeRange(String rangeFieldName, byte[] minEncoded, byte[] maxEncoded) { + assert minEncoded.length == maxEncoded.length; + byte[] bytes = new byte[BinaryRange.BYTES * 2]; + + // First compute hash for field name and write the full hash into the byte array + BytesRef fieldAsBytesRef = new BytesRef(rangeFieldName); + MurmurHash3.Hash128 hash = new MurmurHash3.Hash128(); + MurmurHash3.hash128(fieldAsBytesRef.bytes, fieldAsBytesRef.offset, fieldAsBytesRef.length, 0, hash); + ByteBuffer bb = ByteBuffer.wrap(bytes); + bb.putLong(hash.h1).putLong(hash.h2).putLong(hash.h1).putLong(hash.h2); + assert bb.position() == bb.limit(); + + // Secondly, overwrite the min and max encoded values in the byte array + // This way we are able to reuse as much as possible from the hash for any range type. + int offset = BinaryRange.BYTES - minEncoded.length; + System.arraycopy(minEncoded, 0, bytes, offset, minEncoded.length); + System.arraycopy(maxEncoded, 0, bytes, BinaryRange.BYTES + offset, maxEncoded.length); + return bytes; + } +} \ No newline at end of file diff --git a/alerting/src/main/java/org/opensearch/percolator/PercolatorPluginExt.java b/alerting/src/main/java/org/opensearch/percolator/PercolatorPluginExt.java new file mode 100644 index 000000000..4ad0ca7c4 --- /dev/null +++ b/alerting/src/main/java/org/opensearch/percolator/PercolatorPluginExt.java @@ -0,0 +1,79 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.percolator; + +import org.opensearch.common.settings.Setting; +import org.opensearch.index.mapper.Mapper; +import org.opensearch.plugins.ExtensiblePlugin; +import org.opensearch.plugins.MapperPlugin; +import org.opensearch.plugins.Plugin; +import org.opensearch.plugins.SearchPlugin; +import org.opensearch.search.fetch.FetchSubPhase; + +import java.util.Arrays; +import java.util.List; +import java.util.Map; + +import static java.util.Collections.singletonList; +import static java.util.Collections.singletonMap; + +/** + * This is a stop-gap solution & will be removed in future. + */ +// TODO for cleanup +public class PercolatorPluginExt extends Plugin implements MapperPlugin, SearchPlugin, ExtensiblePlugin { + @Override + public List> getQueries() { + return singletonList(new QuerySpec<>(PercolateQueryBuilderExt.NAME, PercolateQueryBuilderExt::new, PercolateQueryBuilderExt::fromXContent)); + } + + @Override + public List getFetchSubPhases(FetchPhaseConstructionContext context) { + return Arrays.asList(new PercolatorMatchedSlotSubFetchPhase(), new PercolatorHighlightSubFetchPhase(context.getHighlighters())); + } + + @Override + public List> getSettings() { + return Arrays.asList(PercolatorFieldMapperExt.INDEX_MAP_UNMAPPED_FIELDS_AS_TEXT_SETTING); + } + + @Override + public Map getMappers() { + return singletonMap(PercolatorFieldMapperExt.CONTENT_TYPE, new PercolatorFieldMapperExt.TypeParser()); + } + + @Override + public void loadExtensions(ExtensionLoader loader) { + ExtensiblePlugin.super.loadExtensions(loader); + } +} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/AlertService.kt b/alerting/src/main/kotlin/org/opensearch/alerting/AlertService.kt index c241fdaf1..638ba078b 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/AlertService.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/AlertService.kt @@ -22,6 +22,7 @@ import org.opensearch.alerting.model.ActionRunResult import org.opensearch.alerting.model.AggregationResultBucket import org.opensearch.alerting.model.Alert import org.opensearch.alerting.model.BucketLevelTrigger +import org.opensearch.alerting.model.DocumentLevelTriggerRunResult import org.opensearch.alerting.model.Monitor import org.opensearch.alerting.model.QueryLevelTriggerRunResult import org.opensearch.alerting.model.Trigger @@ -29,6 +30,7 @@ import org.opensearch.alerting.model.action.AlertCategory import org.opensearch.alerting.opensearchapi.firstFailureOrNull import org.opensearch.alerting.opensearchapi.retry import org.opensearch.alerting.opensearchapi.suspendUntil +import org.opensearch.alerting.script.DocumentLevelTriggerExecutionContext import org.opensearch.alerting.script.QueryLevelTriggerExecutionContext import org.opensearch.alerting.util.IndexUtils import org.opensearch.alerting.util.getBucketKeysHash @@ -166,6 +168,29 @@ class AlertService( } } + // TODO: clean this up so it follows the proper alert management for doc monitors + fun composeDocLevelAlert( + findings: List, + relatedDocIds: List, + ctx: DocumentLevelTriggerExecutionContext, + result: DocumentLevelTriggerRunResult, + alertError: AlertError? + ): Alert { + val currentTime = Instant.now() + + val actionExecutionResults = result.actionResults.map { + ActionExecutionResult(it.key, it.value.executionTime, if (it.value.throttled) 1 else 0) + } + + val alertState = if (alertError == null) Alert.State.ACTIVE else Alert.State.ERROR + return Alert( + monitor = ctx.monitor, trigger = ctx.trigger, startTime = currentTime, + lastNotificationTime = currentTime, state = alertState, errorMessage = alertError?.message, + actionExecutionResults = actionExecutionResults, schemaVersion = IndexUtils.alertIndexSchemaVersion, + findingIds = findings, relatedDocIds = relatedDocIds + ) + } + fun updateActionResultsForBucketLevelAlert( currentAlert: Alert, actionResults: Map, @@ -295,8 +320,8 @@ class AlertService( DeleteRequest(AlertIndices.ALERT_INDEX, alert.id) .routing(alert.monitorId), // Only add completed alert to history index if history is enabled - if (alertIndices.isHistoryEnabled()) { - IndexRequest(AlertIndices.HISTORY_WRITE_INDEX) + if (alertIndices.isAlertHistoryEnabled()) { + IndexRequest(AlertIndices.ALERT_HISTORY_WRITE_INDEX) .routing(alert.monitorId) .source(alert.toXContentWithUser(XContentFactory.jsonBuilder())) .id(alert.id) diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/AlertingPlugin.kt b/alerting/src/main/kotlin/org/opensearch/alerting/AlertingPlugin.kt index 2e74e13bc..f16ed2d31 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/AlertingPlugin.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/AlertingPlugin.kt @@ -17,6 +17,7 @@ import org.opensearch.alerting.action.GetAlertsAction import org.opensearch.alerting.action.GetDestinationsAction import org.opensearch.alerting.action.GetEmailAccountAction import org.opensearch.alerting.action.GetEmailGroupAction +import org.opensearch.alerting.action.GetFindingsAction import org.opensearch.alerting.action.GetMonitorAction import org.opensearch.alerting.action.IndexDestinationAction import org.opensearch.alerting.action.IndexEmailAccountAction @@ -32,6 +33,7 @@ import org.opensearch.alerting.core.ScheduledJobIndices import org.opensearch.alerting.core.action.node.ScheduledJobsStatsAction import org.opensearch.alerting.core.action.node.ScheduledJobsStatsTransportAction import org.opensearch.alerting.core.model.ClusterMetricsInput +import org.opensearch.alerting.core.model.DocLevelMonitorInput import org.opensearch.alerting.core.model.ScheduledJob import org.opensearch.alerting.core.model.SearchInput import org.opensearch.alerting.core.resthandler.RestScheduledJobStatsHandler @@ -39,6 +41,7 @@ import org.opensearch.alerting.core.schedule.JobScheduler import org.opensearch.alerting.core.settings.LegacyOpenDistroScheduledJobSettings import org.opensearch.alerting.core.settings.ScheduledJobSettings import org.opensearch.alerting.model.BucketLevelTrigger +import org.opensearch.alerting.model.DocumentLevelTrigger import org.opensearch.alerting.model.Monitor import org.opensearch.alerting.model.QueryLevelTrigger import org.opensearch.alerting.resthandler.RestAcknowledgeAlertAction @@ -51,6 +54,7 @@ import org.opensearch.alerting.resthandler.RestGetAlertsAction import org.opensearch.alerting.resthandler.RestGetDestinationsAction import org.opensearch.alerting.resthandler.RestGetEmailAccountAction import org.opensearch.alerting.resthandler.RestGetEmailGroupAction +import org.opensearch.alerting.resthandler.RestGetFindingsAction import org.opensearch.alerting.resthandler.RestGetMonitorAction import org.opensearch.alerting.resthandler.RestIndexDestinationAction import org.opensearch.alerting.resthandler.RestIndexEmailAccountAction @@ -74,6 +78,7 @@ import org.opensearch.alerting.transport.TransportGetAlertsAction import org.opensearch.alerting.transport.TransportGetDestinationsAction import org.opensearch.alerting.transport.TransportGetEmailAccountAction import org.opensearch.alerting.transport.TransportGetEmailGroupAction +import org.opensearch.alerting.transport.TransportGetFindingsSearchAction import org.opensearch.alerting.transport.TransportGetMonitorAction import org.opensearch.alerting.transport.TransportIndexDestinationAction import org.opensearch.alerting.transport.TransportIndexEmailAccountAction @@ -82,6 +87,7 @@ import org.opensearch.alerting.transport.TransportIndexMonitorAction import org.opensearch.alerting.transport.TransportSearchEmailAccountAction import org.opensearch.alerting.transport.TransportSearchEmailGroupAction import org.opensearch.alerting.transport.TransportSearchMonitorAction +import org.opensearch.alerting.util.DocLevelMonitorQueries import org.opensearch.alerting.util.destinationmigration.DestinationMigrationCoordinator import org.opensearch.client.Client import org.opensearch.cluster.metadata.IndexNameExpressionResolver @@ -102,8 +108,8 @@ import org.opensearch.index.IndexModule import org.opensearch.painless.spi.PainlessExtension import org.opensearch.painless.spi.Whitelist import org.opensearch.painless.spi.WhitelistLoader +import org.opensearch.percolator.PercolatorPluginExt import org.opensearch.plugins.ActionPlugin -import org.opensearch.plugins.Plugin import org.opensearch.plugins.ReloadablePlugin import org.opensearch.plugins.ScriptPlugin import org.opensearch.plugins.SearchPlugin @@ -122,7 +128,7 @@ import java.util.function.Supplier * It also adds [Monitor.XCONTENT_REGISTRY], [SearchInput.XCONTENT_REGISTRY], [QueryLevelTrigger.XCONTENT_REGISTRY], * [BucketLevelTrigger.XCONTENT_REGISTRY], [ClusterMetricsInput.XCONTENT_REGISTRY] to the [NamedXContentRegistry] so that we are able to deserialize the custom named objects. */ -internal class AlertingPlugin : PainlessExtension, ActionPlugin, ScriptPlugin, ReloadablePlugin, SearchPlugin, Plugin() { +internal class AlertingPlugin : PainlessExtension, ActionPlugin, ScriptPlugin, ReloadablePlugin, SearchPlugin, PercolatorPluginExt() { override fun getContextWhitelists(): Map, List> { val whitelist = WhitelistLoader.loadFromResourceFiles(javaClass, "org.opensearch.alerting.txt") @@ -140,13 +146,15 @@ internal class AlertingPlugin : PainlessExtension, ActionPlugin, ScriptPlugin, R @JvmField val EMAIL_GROUP_BASE_URI = "$DESTINATION_BASE_URI/email_groups" @JvmField val LEGACY_OPENDISTRO_EMAIL_ACCOUNT_BASE_URI = "$LEGACY_OPENDISTRO_DESTINATION_BASE_URI/email_accounts" @JvmField val LEGACY_OPENDISTRO_EMAIL_GROUP_BASE_URI = "$LEGACY_OPENDISTRO_DESTINATION_BASE_URI/email_groups" + @JvmField val FINDING_BASE_URI = "/_plugins/_alerting/findings" @JvmField val ALERTING_JOB_TYPES = listOf("monitor") } - lateinit var runner: MonitorRunner + lateinit var runner: MonitorRunnerService lateinit var scheduler: JobScheduler lateinit var sweeper: JobSweeper lateinit var scheduledJobIndices: ScheduledJobIndices + lateinit var docLevelMonitorQueries: DocLevelMonitorQueries lateinit var threadPool: ThreadPool lateinit var alertIndices: AlertIndices lateinit var clusterService: ClusterService @@ -180,7 +188,8 @@ internal class AlertingPlugin : PainlessExtension, ActionPlugin, ScriptPlugin, R RestSearchEmailGroupAction(), RestGetEmailGroupAction(), RestGetDestinationsAction(), - RestGetAlertsAction() + RestGetAlertsAction(), + RestGetFindingsAction() ) } @@ -204,7 +213,9 @@ internal class AlertingPlugin : PainlessExtension, ActionPlugin, ScriptPlugin, R ActionPlugin.ActionHandler(SearchEmailGroupAction.INSTANCE, TransportSearchEmailGroupAction::class.java), ActionPlugin.ActionHandler(DeleteEmailGroupAction.INSTANCE, TransportDeleteEmailGroupAction::class.java), ActionPlugin.ActionHandler(GetDestinationsAction.INSTANCE, TransportGetDestinationsAction::class.java), - ActionPlugin.ActionHandler(GetAlertsAction.INSTANCE, TransportGetAlertsAction::class.java) + ActionPlugin.ActionHandler(GetAlertsAction.INSTANCE, TransportGetAlertsAction::class.java), + ActionPlugin.ActionHandler(GetFindingsAction.INSTANCE, TransportGetFindingsSearchAction::class.java) + ) } @@ -212,9 +223,11 @@ internal class AlertingPlugin : PainlessExtension, ActionPlugin, ScriptPlugin, R return listOf( Monitor.XCONTENT_REGISTRY, SearchInput.XCONTENT_REGISTRY, + DocLevelMonitorInput.XCONTENT_REGISTRY, QueryLevelTrigger.XCONTENT_REGISTRY, BucketLevelTrigger.XCONTENT_REGISTRY, - ClusterMetricsInput.XCONTENT_REGISTRY + ClusterMetricsInput.XCONTENT_REGISTRY, + DocumentLevelTrigger.XCONTENT_REGISTRY ) } @@ -234,7 +247,7 @@ internal class AlertingPlugin : PainlessExtension, ActionPlugin, ScriptPlugin, R // Need to figure out how to use the OpenSearch DI classes rather than handwiring things here. val settings = environment.settings() alertIndices = AlertIndices(settings, client, threadPool, clusterService) - runner = MonitorRunner + runner = MonitorRunnerService .registerClusterService(clusterService) .registerClient(client) .registerNamedXContentRegistry(xContentRegistry) @@ -248,12 +261,13 @@ internal class AlertingPlugin : PainlessExtension, ActionPlugin, ScriptPlugin, R .registerConsumers() .registerDestinationSettings() scheduledJobIndices = ScheduledJobIndices(client.admin(), clusterService) + docLevelMonitorQueries = DocLevelMonitorQueries(client.admin(), clusterService) scheduler = JobScheduler(threadPool, runner) sweeper = JobSweeper(environment.settings(), client, clusterService, threadPool, xContentRegistry, scheduler, ALERTING_JOB_TYPES) destinationMigrationCoordinator = DestinationMigrationCoordinator(client, clusterService, threadPool) this.threadPool = threadPool this.clusterService = clusterService - return listOf(sweeper, scheduler, runner, scheduledJobIndices, destinationMigrationCoordinator) + return listOf(sweeper, scheduler, runner, scheduledJobIndices, docLevelMonitorQueries, destinationMigrationCoordinator) } override fun getSettings(): List> { @@ -310,7 +324,12 @@ internal class AlertingPlugin : PainlessExtension, ActionPlugin, ScriptPlugin, R LegacyOpenDistroDestinationSettings.EMAIL_USERNAME, LegacyOpenDistroDestinationSettings.EMAIL_PASSWORD, LegacyOpenDistroDestinationSettings.ALLOW_LIST, - LegacyOpenDistroDestinationSettings.HOST_DENY_LIST + LegacyOpenDistroDestinationSettings.HOST_DENY_LIST, + AlertingSettings.FINDING_HISTORY_ENABLED, + AlertingSettings.FINDING_HISTORY_MAX_DOCS, + AlertingSettings.FINDING_HISTORY_INDEX_MAX_AGE, + AlertingSettings.FINDING_HISTORY_ROLLOVER_PERIOD, + AlertingSettings.FINDING_HISTORY_RETENTION_PERIOD ) } diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/BucketLevelMonitorRunner.kt b/alerting/src/main/kotlin/org/opensearch/alerting/BucketLevelMonitorRunner.kt new file mode 100644 index 000000000..4c966664c --- /dev/null +++ b/alerting/src/main/kotlin/org/opensearch/alerting/BucketLevelMonitorRunner.kt @@ -0,0 +1,333 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting + +import kotlinx.coroutines.runBlocking +import org.apache.logging.log4j.LogManager +import org.opensearch.alerting.model.ActionRunResult +import org.opensearch.alerting.model.Alert +import org.opensearch.alerting.model.BucketLevelTrigger +import org.opensearch.alerting.model.BucketLevelTriggerRunResult +import org.opensearch.alerting.model.InputRunResults +import org.opensearch.alerting.model.Monitor +import org.opensearch.alerting.model.MonitorRunResult +import org.opensearch.alerting.model.action.ActionExecutionScope +import org.opensearch.alerting.model.action.AlertCategory +import org.opensearch.alerting.model.action.PerAlertActionScope +import org.opensearch.alerting.model.action.PerExecutionActionScope +import org.opensearch.alerting.opensearchapi.InjectorContextElement +import org.opensearch.alerting.script.BucketLevelTriggerExecutionContext +import org.opensearch.alerting.util.getActionExecutionPolicy +import org.opensearch.alerting.util.getBucketKeysHash +import org.opensearch.alerting.util.getCombinedTriggerRunResult +import java.time.Instant + +object BucketLevelMonitorRunner : MonitorRunner() { + private val logger = LogManager.getLogger(javaClass) + + override suspend fun runMonitor( + monitor: Monitor, + monitorCtx: MonitorRunnerExecutionContext, + periodStart: Instant, + periodEnd: Instant, + dryrun: Boolean + ): MonitorRunResult { + val roles = MonitorRunnerService.getRolesForMonitor(monitor) + logger.debug("Running monitor: ${monitor.name} with roles: $roles Thread: ${Thread.currentThread().name}") + + if (periodStart == periodEnd) { + logger.warn("Start and end time are the same: $periodStart. This monitor will probably only run once.") + } + + var monitorResult = MonitorRunResult(monitor.name, periodStart, periodEnd) + val currentAlerts = try { + monitorCtx.alertIndices!!.createOrUpdateAlertIndex() + monitorCtx.alertIndices!!.createOrUpdateInitialAlertHistoryIndex() + monitorCtx.alertService!!.loadCurrentAlertsForBucketLevelMonitor(monitor) + } catch (e: Exception) { + // We can't save ERROR alerts to the index here as we don't know if there are existing ACTIVE alerts + val id = if (monitor.id.trim().isEmpty()) "_na_" else monitor.id + logger.error("Error loading alerts for monitor: $id", e) + return monitorResult.copy(error = e) + } + + /* + * Since the aggregation query can consist of multiple pages, each iteration of the do-while loop only has partial results + * from the runBucketLevelTrigger results whereas the currentAlerts has a complete view of existing Alerts. This means that + * it can be confirmed if an Alert is new or de-duped local to the do-while loop if a key appears or doesn't appear in + * the currentAlerts. However, it cannot be guaranteed that an existing Alert is COMPLETED until all pages have been + * iterated over (since a bucket that did not appear in one page of the aggregation results, could appear in a later page). + * + * To solve for this, the currentAlerts will be acting as a list of "potentially completed alerts" throughout the execution. + * When categorizing the Alerts in each iteration, de-duped Alerts will be removed from the currentAlerts map + * (for the Trigger being executed) and the Alerts left in currentAlerts after all pages have been iterated through can + * be marked as COMPLETED since they were never de-duped. + * + * Meanwhile, the nextAlerts map will contain Alerts that will exist at the end of this Monitor execution. It is a compilation + * across Triggers because in the case of executing actions at a PER_EXECUTION frequency, all the Alerts are needed before executing + * Actions which can only be done once all of the aggregation results (and Triggers given the pagination logic) have been evaluated. + */ + val triggerResults = mutableMapOf() + val triggerContexts = mutableMapOf() + val nextAlerts = mutableMapOf>>() + var firstIteration = true + var firstPageOfInputResults = InputRunResults(listOf(), null) + do { + // TODO: Since a composite aggregation is being used for the input query, the total bucket count cannot be determined. + // If a setting is imposed that limits buckets that can be processed for Bucket-Level Monitors, we'd need to iterate over + // the buckets until we hit that threshold. In that case, we'd want to exit the execution without creating any alerts since the + // buckets we iterate over before hitting the limit is not deterministic. Is there a better way to fail faster in this case? + runBlocking(InjectorContextElement(monitor.id, monitorCtx.settings!!, monitorCtx.threadPool!!.threadContext, roles)) { + // Storing the first page of results in the case of pagination input results to prevent empty results + // in the final output of monitorResult which occurs when all pages have been exhausted. + // If it's favorable to return the last page, will need to check how to accomplish that with multiple aggregation paths + // with different page counts. + val inputResults = monitorCtx.inputService!!.collectInputResults( + monitor, + periodStart, + periodEnd, + monitorResult.inputResults + ) + if (firstIteration) { + firstPageOfInputResults = inputResults + firstIteration = false + } + monitorResult = monitorResult.copy(inputResults = inputResults) + } + + for (trigger in monitor.triggers) { + // The currentAlerts map is formed by iterating over the Monitor's Triggers as keys so null should not be returned here + val currentAlertsForTrigger = currentAlerts[trigger]!! + val triggerCtx = BucketLevelTriggerExecutionContext(monitor, trigger as BucketLevelTrigger, monitorResult) + triggerContexts[trigger.id] = triggerCtx + val triggerResult = monitorCtx.triggerService!!.runBucketLevelTrigger(monitor, trigger, triggerCtx) + triggerResults[trigger.id] = triggerResult.getCombinedTriggerRunResult(triggerResults[trigger.id]) + + /* + * If an error was encountered when running the trigger, it means that something went wrong when parsing the input results + * for the filtered buckets returned from the pipeline bucket selector injected into the input query. + * + * In this case, the returned aggregation result buckets are empty so the categorization of the Alerts that happens below + * should be skipped/invalidated since comparing the current Alerts to an empty result will lead the execution to believe + * that all Alerts have been COMPLETED. Not doing so would mean it would not be possible to propagate the error into the + * existing Alerts in a way the user can easily view them since they will have all been moved to the history index. + */ + if (triggerResults[trigger.id]?.error != null) continue + + // TODO: Should triggerResult's aggregationResultBucket be a list? If not, getCategorizedAlertsForBucketLevelMonitor can + // be refactored to use a map instead + val categorizedAlerts = monitorCtx.alertService!!.getCategorizedAlertsForBucketLevelMonitor( + monitor, trigger, currentAlertsForTrigger, triggerResult.aggregationResultBuckets.values.toList() + ).toMutableMap() + val dedupedAlerts = categorizedAlerts.getOrDefault(AlertCategory.DEDUPED, emptyList()) + var newAlerts = categorizedAlerts.getOrDefault(AlertCategory.NEW, emptyList()) + + /* + * Index de-duped and new Alerts here (if it's not a test Monitor) so they are available at the time the Actions are executed. + * + * The new Alerts have to be returned and saved back with their indexed doc ID to prevent duplicate documents + * when the Alerts are updated again after Action execution. + * + * Note: Index operations can fail for various reasons (such as write blocks on cluster), in such a case, the Actions + * will still execute with the Alert information in the ctx but the Alerts may not be visible. + */ + if (!dryrun && monitor.id != Monitor.NO_ID) { + monitorCtx.alertService!!.saveAlerts(dedupedAlerts, monitorCtx.retryPolicy!!, allowUpdatingAcknowledgedAlert = true) + newAlerts = monitorCtx.alertService!!.saveNewAlerts(newAlerts, monitorCtx.retryPolicy!!) + } + + // Store deduped and new Alerts to accumulate across pages + if (!nextAlerts.containsKey(trigger.id)) { + nextAlerts[trigger.id] = mutableMapOf( + AlertCategory.DEDUPED to mutableListOf(), + AlertCategory.NEW to mutableListOf(), + AlertCategory.COMPLETED to mutableListOf() + ) + } + nextAlerts[trigger.id]?.get(AlertCategory.DEDUPED)?.addAll(dedupedAlerts) + nextAlerts[trigger.id]?.get(AlertCategory.NEW)?.addAll(newAlerts) + } + } while (monitorResult.inputResults.afterKeysPresent()) + + // The completed Alerts are whatever are left in the currentAlerts. + // However, this operation will only be done if there was no trigger error, since otherwise the nextAlerts were not collected + // in favor of just using the currentAlerts as-is. + currentAlerts.forEach { (trigger, keysToAlertsMap) -> + if (triggerResults[trigger.id]?.error == null) + nextAlerts[trigger.id]?.get(AlertCategory.COMPLETED) + ?.addAll(monitorCtx.alertService!!.convertToCompletedAlerts(keysToAlertsMap)) + } + + for (trigger in monitor.triggers) { + val alertsToUpdate = mutableSetOf() + val completedAlertsToUpdate = mutableSetOf() + // Filter ACKNOWLEDGED Alerts from the deduped list so they do not have Actions executed for them. + // New Alerts are ignored since they cannot be acknowledged yet. + val dedupedAlerts = nextAlerts[trigger.id]?.get(AlertCategory.DEDUPED) + ?.filterNot { it.state == Alert.State.ACKNOWLEDGED }?.toMutableList() + ?: mutableListOf() + // Update nextAlerts so the filtered DEDUPED Alerts are reflected for PER_ALERT Action execution + nextAlerts[trigger.id]?.set(AlertCategory.DEDUPED, dedupedAlerts) + val newAlerts = nextAlerts[trigger.id]?.get(AlertCategory.NEW) ?: mutableListOf() + val completedAlerts = nextAlerts[trigger.id]?.get(AlertCategory.COMPLETED) ?: mutableListOf() + + // Adding all the COMPLETED Alerts to a separate set and removing them if they get added + // to alertsToUpdate to ensure the Alert doc is updated at the end in either case + completedAlertsToUpdate.addAll(completedAlerts) + + // All trigger contexts and results should be available at this point since all triggers were evaluated in the main do-while loop + val triggerCtx = triggerContexts[trigger.id]!! + val triggerResult = triggerResults[trigger.id]!! + val monitorOrTriggerError = monitorResult.error ?: triggerResult.error + val shouldDefaultToPerExecution = defaultToPerExecutionAction( + monitorCtx, + monitorId = monitor.id, + triggerId = trigger.id, + totalActionableAlertCount = dedupedAlerts.size + newAlerts.size + completedAlerts.size, + monitorOrTriggerError = monitorOrTriggerError + ) + for (action in trigger.actions) { + // ActionExecutionPolicy should not be null for Bucket-Level Monitors since it has a default config when not set explicitly + val actionExecutionScope = action.getActionExecutionPolicy(monitor)!!.actionExecutionScope + if (actionExecutionScope is PerAlertActionScope && !shouldDefaultToPerExecution) { + for (alertCategory in actionExecutionScope.actionableAlerts) { + val alertsToExecuteActionsFor = nextAlerts[trigger.id]?.get(alertCategory) ?: mutableListOf() + for (alert in alertsToExecuteActionsFor) { + val actionCtx = getActionContextForAlertCategory( + alertCategory, alert, triggerCtx, monitorOrTriggerError + ) + // AggregationResultBucket should not be null here + val alertBucketKeysHash = alert.aggregationResultBucket!!.getBucketKeysHash() + if (!triggerResult.actionResultsMap.containsKey(alertBucketKeysHash)) { + triggerResult.actionResultsMap[alertBucketKeysHash] = mutableMapOf() + } + + // Keeping the throttled response separate from runAction for now since + // throttling is not supported for PER_EXECUTION + val actionResult = if (MonitorRunnerService.isActionActionable(action, alert)) { + this.runAction(action, actionCtx, monitorCtx, dryrun) + } else { + ActionRunResult(action.id, action.name, mapOf(), true, null, null) + } + + triggerResult.actionResultsMap[alertBucketKeysHash]?.set(action.id, actionResult) + alertsToUpdate.add(alert) + // Remove the alert from completedAlertsToUpdate in case it is present there since + // its update will be handled in the alertsToUpdate batch + completedAlertsToUpdate.remove(alert) + } + } + } else if (actionExecutionScope is PerExecutionActionScope || shouldDefaultToPerExecution) { + // If all categories of Alerts are empty, there is nothing to message on and we can skip the Action. + // If the error is not null, this is disregarded and the Action is executed anyway so the user can be notified. + if (monitorOrTriggerError == null && dedupedAlerts.isEmpty() && newAlerts.isEmpty() && completedAlerts.isEmpty()) + continue + + val actionCtx = triggerCtx.copy( + dedupedAlerts = dedupedAlerts, + newAlerts = newAlerts, + completedAlerts = completedAlerts, + error = monitorResult.error ?: triggerResult.error + ) + val actionResult = this.runAction(action, actionCtx, monitorCtx, dryrun) + // If there was an error during trigger execution then the Alerts to be updated are the current Alerts since the state + // was not changed. Otherwise, the Alerts to be updated are the sum of the deduped, new and completed Alerts. + val alertsToIterate = if (monitorOrTriggerError == null) { + (dedupedAlerts + newAlerts + completedAlerts) + } else currentAlerts[trigger]?.map { it.value } ?: listOf() + // Save the Action run result for every Alert + for (alert in alertsToIterate) { + val alertBucketKeysHash = alert.aggregationResultBucket!!.getBucketKeysHash() + if (!triggerResult.actionResultsMap.containsKey(alertBucketKeysHash)) { + triggerResult.actionResultsMap[alertBucketKeysHash] = mutableMapOf() + } + triggerResult.actionResultsMap[alertBucketKeysHash]?.set(action.id, actionResult) + alertsToUpdate.add(alert) + // Remove the alert from completedAlertsToUpdate in case it is present there since + // its update will be handled in the alertsToUpdate batch + completedAlertsToUpdate.remove(alert) + } + } + } + + // Alerts are only added to alertsToUpdate after Action execution meaning the action results for it should be present + // in the actionResultsMap but returning a default value when accessing the map to be safe. + val updatedAlerts = alertsToUpdate.map { alert -> + val bucketKeysHash = alert.aggregationResultBucket!!.getBucketKeysHash() + val actionResults = triggerResult.actionResultsMap.getOrDefault(bucketKeysHash, emptyMap()) + monitorCtx.alertService!!.updateActionResultsForBucketLevelAlert( + alert.copy(lastNotificationTime = MonitorRunnerService.currentTime()), + actionResults, + // TODO: Update BucketLevelTriggerRunResult.alertError() to retrieve error based on the first failed Action + monitorResult.alertError() ?: triggerResult.alertError() + ) + } + + // Update Alerts with action execution results (if it's not a test Monitor). + // ACKNOWLEDGED Alerts should not be saved here since actions are not executed for them. + if (!dryrun && monitor.id != Monitor.NO_ID) { + monitorCtx.alertService!!.saveAlerts(updatedAlerts, monitorCtx.retryPolicy!!, allowUpdatingAcknowledgedAlert = false) + // Save any COMPLETED Alerts that were not covered in updatedAlerts + monitorCtx.alertService!!.saveAlerts( + completedAlertsToUpdate.toList(), + monitorCtx.retryPolicy!!, + allowUpdatingAcknowledgedAlert = false + ) + } + } + + return monitorResult.copy(inputResults = firstPageOfInputResults, triggerResults = triggerResults) + } + + private fun defaultToPerExecutionAction( + monitorCtx: MonitorRunnerExecutionContext, + monitorId: String, + triggerId: String, + totalActionableAlertCount: Int, + monitorOrTriggerError: Exception? + ): Boolean { + // If the monitorId or triggerResult has an error, then also default to PER_EXECUTION to communicate the error + if (monitorOrTriggerError != null) { + logger.debug( + "Trigger [$triggerId] in monitor [$monitorId] encountered an error. Defaulting to " + + "[${ActionExecutionScope.Type.PER_EXECUTION}] for action execution to communicate error." + ) + return true + } + + // If the MAX_ACTIONABLE_ALERT_COUNT is set to -1, consider it unbounded and proceed regardless of actionable Alert count + if (monitorCtx.maxActionableAlertCount < 0) return false + + // If the total number of Alerts to execute Actions on exceeds the MAX_ACTIONABLE_ALERT_COUNT setting then default to + // PER_EXECUTION for less intrusive Actions + if (totalActionableAlertCount > monitorCtx.maxActionableAlertCount) { + logger.debug( + "The total actionable alerts for trigger [$triggerId] in monitor [$monitorId] is [$totalActionableAlertCount] " + + "which exceeds the maximum of [${monitorCtx.maxActionableAlertCount}]. " + + "Defaulting to [${ActionExecutionScope.Type.PER_EXECUTION}] for action execution." + ) + return true + } + + return false + } + + private fun getActionContextForAlertCategory( + alertCategory: AlertCategory, + alert: Alert, + ctx: BucketLevelTriggerExecutionContext, + error: Exception? + ): BucketLevelTriggerExecutionContext { + return when (alertCategory) { + AlertCategory.DEDUPED -> + ctx.copy(dedupedAlerts = listOf(alert), newAlerts = emptyList(), completedAlerts = emptyList(), error = error) + AlertCategory.NEW -> + ctx.copy(dedupedAlerts = emptyList(), newAlerts = listOf(alert), completedAlerts = emptyList(), error = error) + AlertCategory.COMPLETED -> + ctx.copy(dedupedAlerts = emptyList(), newAlerts = emptyList(), completedAlerts = listOf(alert), error = error) + } + } +} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/DocumentReturningMonitorRunner.kt b/alerting/src/main/kotlin/org/opensearch/alerting/DocumentReturningMonitorRunner.kt new file mode 100644 index 000000000..e430d8f67 --- /dev/null +++ b/alerting/src/main/kotlin/org/opensearch/alerting/DocumentReturningMonitorRunner.kt @@ -0,0 +1,460 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting + +import org.apache.logging.log4j.LogManager +import org.opensearch.action.index.IndexRequest +import org.opensearch.action.search.SearchAction +import org.opensearch.action.search.SearchRequest +import org.opensearch.action.search.SearchResponse +import org.opensearch.action.support.WriteRequest +import org.opensearch.alerting.alerts.AlertIndices.Companion.FINDING_HISTORY_WRITE_INDEX +import org.opensearch.alerting.core.model.DocLevelMonitorInput +import org.opensearch.alerting.core.model.DocLevelQuery +import org.opensearch.alerting.core.model.ScheduledJob +import org.opensearch.alerting.model.Alert +import org.opensearch.alerting.model.DocumentExecutionContext +import org.opensearch.alerting.model.DocumentLevelTrigger +import org.opensearch.alerting.model.DocumentLevelTriggerRunResult +import org.opensearch.alerting.model.Finding +import org.opensearch.alerting.model.InputRunResults +import org.opensearch.alerting.model.Monitor +import org.opensearch.alerting.model.MonitorRunResult +import org.opensearch.alerting.opensearchapi.string +import org.opensearch.alerting.script.DocumentLevelTriggerExecutionContext +import org.opensearch.alerting.util.updateMonitor +import org.opensearch.client.Client +import org.opensearch.cluster.routing.ShardRouting +import org.opensearch.cluster.service.ClusterService +import org.opensearch.common.bytes.BytesReference +import org.opensearch.common.xcontent.ToXContent +import org.opensearch.common.xcontent.XContentBuilder +import org.opensearch.common.xcontent.XContentFactory +import org.opensearch.common.xcontent.XContentType +import org.opensearch.index.query.BoolQueryBuilder +import org.opensearch.index.query.QueryBuilders +import org.opensearch.percolator.PercolateQueryBuilderExt +import org.opensearch.rest.RestStatus +import org.opensearch.search.SearchHits +import org.opensearch.search.builder.SearchSourceBuilder +import org.opensearch.search.sort.SortOrder +import java.io.IOException +import java.time.Instant +import java.util.UUID +import kotlin.collections.HashMap +import kotlin.math.max + +object DocumentReturningMonitorRunner : MonitorRunner() { + private val logger = LogManager.getLogger(javaClass) + + override suspend fun runMonitor( + monitor: Monitor, + monitorCtx: MonitorRunnerExecutionContext, + periodStart: Instant, + periodEnd: Instant, + dryrun: Boolean + ): MonitorRunResult { + logger.info("Document-level-monitor is running ...") + var monitorResult = MonitorRunResult(monitor.name, periodStart, periodEnd) + + // TODO: is this needed from Charlie? + try { + monitorCtx.alertIndices!!.createOrUpdateAlertIndex() + monitorCtx.alertIndices!!.createOrUpdateInitialAlertHistoryIndex() + monitorCtx.alertIndices!!.createOrUpdateInitialFindingHistoryIndex() + } catch (e: Exception) { + val id = if (monitor.id.trim().isEmpty()) "_na_" else monitor.id + logger.error("Error loading alerts for monitor: $id", e) + return monitorResult.copy(error = e) + } + + try { + validate(monitor) + } catch (e: Exception) { + logger.info("Failed to start Document-level-monitor. Error: ${e.message}") + return monitorResult.copy(error = e) + } + + val docLevelMonitorInput = monitor.inputs[0] as DocLevelMonitorInput + val index = docLevelMonitorInput.indices[0] + val queries: List = docLevelMonitorInput.queries + + val isTempMonitor = dryrun || monitor.id == Monitor.NO_ID + var lastRunContext = monitor.lastRunContext.toMutableMap() + try { + if (lastRunContext.isNullOrEmpty()) { + lastRunContext = createRunContext(monitorCtx.clusterService!!, monitorCtx.client!!, index).toMutableMap() + } + } catch (e: Exception) { + logger.info("Failed to start Document-level-monitor $index. Error: ${e.message}") + return monitorResult.copy(error = e) + } + + val count: Int = lastRunContext["shards_count"] as Int + val updatedLastRunContext = lastRunContext.toMutableMap() + for (i: Int in 0 until count) { + val shard = i.toString() + val maxSeqNo: Long = getMaxSeqNo(monitorCtx.client!!, index, shard) + updatedLastRunContext[shard] = maxSeqNo + + // update lastRunContext if its a temp monitor as we only want to view the last bit of data then + // TODO: If dryrun, we should make it so we limit the search as this could still potentially give us lots of data + if (isTempMonitor) { + lastRunContext[shard] = max(-1, maxSeqNo - 1) + } + } + + val queryToDocIds = mutableMapOf>() + val docsToQueries = mutableMapOf>() + val docExecutionContext = DocumentExecutionContext(queries, lastRunContext, updatedLastRunContext) + val idQueryMap = mutableMapOf() + + val matchingDocs = getMatchingDocs(monitor, monitorCtx, docExecutionContext, index, dryrun) + + if (matchingDocs.isNotEmpty()) { + val matchedQueriesForDocs = getMatchedQueries(monitorCtx, matchingDocs.map { it.second }, monitor) + + matchedQueriesForDocs.forEach { hit -> + val (id, query) = Pair( + hit.id.replace("_${monitor.id}", ""), + ((hit.sourceAsMap["query"] as HashMap<*, *>)["query_string"] as HashMap<*, *>)["query"] + ) + val docLevelQuery = DocLevelQuery(id, id, query.toString()) + + val docIndices = hit.field("_percolator_document_slot").values.map { it.toString().toInt() } + docIndices.forEach { idx -> + if (queryToDocIds.containsKey(docLevelQuery)) { + queryToDocIds[docLevelQuery]?.add(matchingDocs[idx].first) + } else { + queryToDocIds[docLevelQuery] = mutableSetOf(matchingDocs[idx].first) + } + + if (docsToQueries.containsKey(matchingDocs[idx].first)) { + docsToQueries[matchingDocs[idx].first]?.add(id) + } else { + docsToQueries[matchingDocs[idx].first] = mutableListOf(id) + } + } + } + } + + val queryInputResults = queryToDocIds.mapKeys { it.key.id } + monitorResult = monitorResult.copy(inputResults = InputRunResults(listOf(queryInputResults))) + val queryIds = queries.map { + idQueryMap[it.id] = it + it.id + } + + val triggerResults = mutableMapOf() + monitor.triggers.forEach { + triggerResults[it.id] = runForEachDocTrigger( + monitorCtx, + monitorResult, + it as DocumentLevelTrigger, + monitor, + idQueryMap, + docsToQueries, + queryToDocIds, + dryrun + ) + } + + // Don't update monitor if this is a test monitor + if (!isTempMonitor) { + + // TODO: Check for race condition against the update monitor api + // This does the update at the end in case of errors and makes sure all the queries are executed + val updatedMonitor = monitor.copy(lastRunContext = updatedLastRunContext) + // note: update has to called in serial for shards of a given index. + // make sure this is just updated for the specific query or at the end of all the queries + updateMonitor(monitorCtx.client!!, monitorCtx.xContentRegistry!!, monitorCtx.settings!!, updatedMonitor) + } + + // TODO: Update the Document as part of the Trigger and return back the trigger action result + return monitorResult.copy(triggerResults = triggerResults) + } + + private suspend fun runForEachDocTrigger( + monitorCtx: MonitorRunnerExecutionContext, + monitorResult: MonitorRunResult, + trigger: DocumentLevelTrigger, + monitor: Monitor, + idQueryMap: Map, + docsToQueries: Map>, + queryToDocIds: Map>, + dryrun: Boolean + ): DocumentLevelTriggerRunResult { + val triggerCtx = DocumentLevelTriggerExecutionContext(monitor, trigger) + val triggerResult = monitorCtx.triggerService!!.runDocLevelTrigger(monitor, trigger, queryToDocIds) + + logger.info("trigger results") + logger.info(triggerResult.triggeredDocs.toString()) + + val index = (monitor.inputs[0] as DocLevelMonitorInput).indices[0] + + // TODO: modify findings such that there is a finding per document + val findings = mutableListOf() + val findingDocPairs = mutableListOf>() + + // TODO: Implement throttling for findings + if (!dryrun && monitor.id != Monitor.NO_ID) { + docsToQueries.forEach { + val triggeredQueries = it.value.map { queryId -> idQueryMap[queryId]!! } + val findingId = createFindings(monitor, monitorCtx, index, triggeredQueries, listOf(it.key)) + findings.add(findingId) + + if (triggerResult.triggeredDocs.contains(it.key)) { + findingDocPairs.add(Pair(findingId, it.key)) + } + } + } + + val actionCtx = triggerCtx.copy( + triggeredDocs = triggerResult.triggeredDocs, + relatedFindings = findings, + error = monitorResult.error ?: triggerResult.error + ) + + for (action in trigger.actions) { + triggerResult.actionResults[action.id] = this.runAction(action, actionCtx, monitorCtx, dryrun) + } + + // TODO: Implement throttling for alerts + // Alerts are saved after the actions since if there are failures in the actions, they can be stated in the alert + if (!dryrun && monitor.id != Monitor.NO_ID) { + val alerts = mutableListOf() + findingDocPairs.forEach { + val alert = monitorCtx.alertService!!.composeDocLevelAlert( + listOf(it.first), + listOf(it.second), + triggerCtx, + triggerResult, + monitorResult.alertError() ?: triggerResult.alertError() + ) + alerts.add(alert) + } + monitorCtx.retryPolicy?.let { monitorCtx.alertService!!.saveAlerts(alerts, it) } + } + return triggerResult + } + + private fun createFindings( + monitor: Monitor, + monitorCtx: MonitorRunnerExecutionContext, + index: String, + docLevelQueries: List, + matchingDocIds: List + ): String { + val finding = Finding( + id = UUID.randomUUID().toString(), + relatedDocIds = matchingDocIds, + monitorId = monitor.id, + monitorName = monitor.name, + index = index, + docLevelQueries = docLevelQueries, + timestamp = Instant.now() + ) + + val findingStr = finding.toXContent(XContentBuilder.builder(XContentType.JSON.xContent()), ToXContent.EMPTY_PARAMS).string() + // change this to debug. + logger.info("Findings: $findingStr") + + // todo: below is all hardcoded, temp code and added only to test. replace this with proper Findings index lifecycle management. + val indexRequest = IndexRequest(FINDING_HISTORY_WRITE_INDEX) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .source(findingStr, XContentType.JSON) + .id(finding.id) + .routing(finding.id) + + monitorCtx.client!!.index(indexRequest).actionGet() + return finding.id + } + + private fun validate(monitor: Monitor) { + if (monitor.inputs.size > 1) { + throw IOException("Only one input is supported with document-level-monitor.") + } + + if (monitor.inputs[0].name() != DocLevelMonitorInput.DOC_LEVEL_INPUT_FIELD) { + throw IOException("Invalid input with document-level-monitor.") + } + + val docLevelMonitorInput = monitor.inputs[0] as DocLevelMonitorInput + if (docLevelMonitorInput.indices.size > 1) { + throw IOException("Only one index is supported with document-level-monitor.") + } + } + + fun createRunContext(clusterService: ClusterService, client: Client, index: String): HashMap { + val lastRunContext = HashMap() + lastRunContext["index"] = index + val count = getShardsCount(clusterService, index) + lastRunContext["shards_count"] = count + + for (i: Int in 0 until count) { + val shard = i.toString() + val maxSeqNo: Long = getMaxSeqNo(client, index, shard) + lastRunContext[shard] = maxSeqNo + } + return lastRunContext + } + + /** + * Get the current max seq number of the shard. We find it by searching the last document + * in the primary shard. + */ + private fun getMaxSeqNo(client: Client, index: String, shard: String): Long { + val request: SearchRequest = SearchRequest() + .indices(index) + .preference("_shards:$shard") + .source( + SearchSourceBuilder() + .version(true) + .sort("_seq_no", SortOrder.DESC) + .seqNoAndPrimaryTerm(true) + .query(QueryBuilders.matchAllQuery()) + .size(1) + ) + val response: SearchResponse = client.search(request).actionGet() + if (response.status() !== RestStatus.OK) { + throw IOException("Failed to get max seq no for shard: $shard") + } + if (response.hits.hits.isEmpty()) + return -1L + + return response.hits.hits[0].seqNo + } + + private fun getShardsCount(clusterService: ClusterService, index: String): Int { + val allShards: List = clusterService!!.state().routingTable().allShards(index) + return allShards.filter { it.primary() }.size + } + + private fun getMatchingDocs( + monitor: Monitor, + monitorCtx: MonitorRunnerExecutionContext, + docExecutionCtx: DocumentExecutionContext, + index: String, + dryrun: Boolean + ): List> { + val count: Int = docExecutionCtx.lastRunContext["shards_count"] as Int + val matchingDocs = mutableListOf>() + for (i: Int in 0 until count) { + val shard = i.toString() + try { + logger.info("Monitor execution for shard: $shard") + + val maxSeqNo: Long = docExecutionCtx.updatedLastRunContext[shard].toString().toLong() + logger.info("MaxSeqNo of shard_$shard is $maxSeqNo") + + // If dryrun, set the previous sequence number as 1 less than the max sequence number or 0 + val prevSeqNo = if (dryrun || monitor.id == Monitor.NO_ID) + max(-1, maxSeqNo - 1) + else docExecutionCtx.lastRunContext[shard].toString().toLongOrNull() + + if (dryrun) { + logger.info("it is a dryrun") + } + + logger.info("prevSeq: $prevSeqNo, maxSeq: $maxSeqNo") + + val hits: SearchHits = searchShard( + monitorCtx, + index, + shard, + prevSeqNo, + maxSeqNo, + null + ) + logger.info("Search hits for shard_$shard is: ${hits.hits.size}") + + if (hits.hits.isNotEmpty()) { + matchingDocs.addAll(getAllDocs(hits, monitor.id)) + } + } catch (e: Exception) { + logger.info("Failed to run for shard $shard. Error: ${e.message}") + } + } + return matchingDocs + } + + private fun searchShard( + monitorCtx: MonitorRunnerExecutionContext, + index: String, + shard: String, + prevSeqNo: Long?, + maxSeqNo: Long, + query: String? + ): SearchHits { + if (prevSeqNo?.equals(maxSeqNo) == true && maxSeqNo != 0L) { + return SearchHits.empty() + } + val boolQueryBuilder = BoolQueryBuilder() + boolQueryBuilder.filter(QueryBuilders.rangeQuery("_seq_no").gt(prevSeqNo).lte(maxSeqNo)) + + if (query != null) { + boolQueryBuilder.must(QueryBuilders.queryStringQuery(query)) + } + + val request: SearchRequest = SearchRequest() + .indices(index) + .preference("_shards:$shard") + .source( + SearchSourceBuilder() + .version(true) + .query(boolQueryBuilder) + .size(10000) // fixme: make this configurable. + ) + logger.info("Request: $request") + val response: SearchResponse = monitorCtx.client!!.search(request).actionGet() + if (response.status() !== RestStatus.OK) { + throw IOException("Failed to search shard: $shard") + } + return response.hits + } + + private fun getMatchedQueries( + monitorCtx: MonitorRunnerExecutionContext, + docs: List, + monitor: Monitor + ): SearchHits { + val boolQueryBuilder = BoolQueryBuilder() + + val percolateQueryBuilder = PercolateQueryBuilderExt("query", docs, XContentType.JSON) + if (monitor.id.isNotEmpty()) { + boolQueryBuilder.filter(QueryBuilders.matchQuery("monitor_id", monitor.id)) + } + boolQueryBuilder.filter(percolateQueryBuilder) + + val searchRequest = SearchRequest(ScheduledJob.DOC_LEVEL_QUERIES_INDEX) + val searchSourceBuilder = SearchSourceBuilder() + searchSourceBuilder.query(boolQueryBuilder) + searchRequest.source(searchSourceBuilder) + + val response: SearchResponse = monitorCtx.client!!.execute(SearchAction.INSTANCE, searchRequest).actionGet() + + if (response.status() !== RestStatus.OK) { + throw IOException("Failed to search percolate index: ${ScheduledJob.DOC_LEVEL_QUERIES_INDEX}") + } + return response.hits + } + + private fun getAllDocs(hits: SearchHits, monitorId: String): List> { + return hits.map { hit -> + val sourceMap = hit.sourceAsMap + + var xContentBuilder = XContentFactory.jsonBuilder().startObject() + sourceMap.forEach { (k, v) -> + xContentBuilder = xContentBuilder.field("${k}_$monitorId", v) + } + xContentBuilder = xContentBuilder.endObject() + + val sourceRef = BytesReference.bytes(xContentBuilder) + + Pair(hit.id, sourceRef) + } + } +} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/MonitorRunner.kt b/alerting/src/main/kotlin/org/opensearch/alerting/MonitorRunner.kt index c5b6f8f1a..83137d14e 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/MonitorRunner.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/MonitorRunner.kt @@ -5,675 +5,85 @@ package org.opensearch.alerting -import kotlinx.coroutines.CoroutineScope import kotlinx.coroutines.Dispatchers -import kotlinx.coroutines.Job -import kotlinx.coroutines.SupervisorJob -import kotlinx.coroutines.launch import kotlinx.coroutines.withContext -import org.apache.logging.log4j.LogManager -import org.opensearch.action.bulk.BackoffPolicy -import org.opensearch.alerting.alerts.AlertIndices -import org.opensearch.alerting.alerts.moveAlerts -import org.opensearch.alerting.core.JobRunner -import org.opensearch.alerting.core.model.ScheduledJob import org.opensearch.alerting.model.ActionRunResult -import org.opensearch.alerting.model.Alert import org.opensearch.alerting.model.AlertingConfigAccessor -import org.opensearch.alerting.model.BucketLevelTrigger -import org.opensearch.alerting.model.BucketLevelTriggerRunResult -import org.opensearch.alerting.model.InputRunResults import org.opensearch.alerting.model.Monitor import org.opensearch.alerting.model.MonitorRunResult -import org.opensearch.alerting.model.QueryLevelTrigger -import org.opensearch.alerting.model.QueryLevelTriggerRunResult import org.opensearch.alerting.model.action.Action -import org.opensearch.alerting.model.action.Action.Companion.MESSAGE -import org.opensearch.alerting.model.action.Action.Companion.MESSAGE_ID -import org.opensearch.alerting.model.action.Action.Companion.SUBJECT -import org.opensearch.alerting.model.action.ActionExecutionScope -import org.opensearch.alerting.model.action.AlertCategory -import org.opensearch.alerting.model.action.PerAlertActionScope -import org.opensearch.alerting.model.action.PerExecutionActionScope import org.opensearch.alerting.model.destination.Destination -import org.opensearch.alerting.model.destination.DestinationContextFactory -import org.opensearch.alerting.opensearchapi.InjectorContextElement -import org.opensearch.alerting.opensearchapi.retry -import org.opensearch.alerting.opensearchapi.withClosableContext -import org.opensearch.alerting.script.BucketLevelTriggerExecutionContext +import org.opensearch.alerting.script.DocumentLevelTriggerExecutionContext import org.opensearch.alerting.script.QueryLevelTriggerExecutionContext import org.opensearch.alerting.script.TriggerExecutionContext -import org.opensearch.alerting.settings.AlertingSettings.Companion.ALERT_BACKOFF_COUNT -import org.opensearch.alerting.settings.AlertingSettings.Companion.ALERT_BACKOFF_MILLIS -import org.opensearch.alerting.settings.AlertingSettings.Companion.DEFAULT_MAX_ACTIONABLE_ALERT_COUNT -import org.opensearch.alerting.settings.AlertingSettings.Companion.MAX_ACTIONABLE_ALERT_COUNT -import org.opensearch.alerting.settings.AlertingSettings.Companion.MOVE_ALERTS_BACKOFF_COUNT -import org.opensearch.alerting.settings.AlertingSettings.Companion.MOVE_ALERTS_BACKOFF_MILLIS -import org.opensearch.alerting.settings.DestinationSettings -import org.opensearch.alerting.settings.DestinationSettings.Companion.ALLOW_LIST -import org.opensearch.alerting.settings.DestinationSettings.Companion.ALLOW_LIST_NONE -import org.opensearch.alerting.settings.DestinationSettings.Companion.HOST_DENY_LIST -import org.opensearch.alerting.settings.DestinationSettings.Companion.loadDestinationSettings -import org.opensearch.alerting.settings.LegacyOpenDistroDestinationSettings.Companion.HOST_DENY_LIST_NONE import org.opensearch.alerting.util.destinationmigration.NotificationActionConfigs import org.opensearch.alerting.util.destinationmigration.NotificationApiUtils.Companion.getNotificationConfigInfo import org.opensearch.alerting.util.destinationmigration.createMessageContent import org.opensearch.alerting.util.destinationmigration.getTitle import org.opensearch.alerting.util.destinationmigration.publishLegacyNotification import org.opensearch.alerting.util.destinationmigration.sendNotification -import org.opensearch.alerting.util.getActionExecutionPolicy -import org.opensearch.alerting.util.getBucketKeysHash -import org.opensearch.alerting.util.getCombinedTriggerRunResult -import org.opensearch.alerting.util.isADMonitor import org.opensearch.alerting.util.isAllowed -import org.opensearch.alerting.util.isBucketLevelMonitor import org.opensearch.alerting.util.isTestAction -import org.opensearch.client.Client import org.opensearch.client.node.NodeClient -import org.opensearch.cluster.service.ClusterService import org.opensearch.common.Strings -import org.opensearch.common.component.AbstractLifecycleComponent -import org.opensearch.common.settings.Settings -import org.opensearch.common.xcontent.NamedXContentRegistry import org.opensearch.commons.notifications.model.NotificationConfigInfo -import org.opensearch.script.Script -import org.opensearch.script.ScriptService -import org.opensearch.script.TemplateScript -import org.opensearch.threadpool.ThreadPool import java.time.Instant -import kotlin.coroutines.CoroutineContext -object MonitorRunner : JobRunner, CoroutineScope, AbstractLifecycleComponent() { +abstract class MonitorRunner { - private val logger = LogManager.getLogger(javaClass) - - private lateinit var clusterService: ClusterService - private lateinit var client: Client - private lateinit var xContentRegistry: NamedXContentRegistry - private lateinit var scriptService: ScriptService - private lateinit var settings: Settings - private lateinit var threadPool: ThreadPool - private lateinit var alertIndices: AlertIndices - private lateinit var inputService: InputService - private lateinit var triggerService: TriggerService - private lateinit var alertService: AlertService - - @Volatile private lateinit var retryPolicy: BackoffPolicy - @Volatile private lateinit var moveAlertsRetryPolicy: BackoffPolicy - - @Volatile private var allowList = ALLOW_LIST_NONE - @Volatile private var hostDenyList = HOST_DENY_LIST_NONE - - @Volatile private lateinit var destinationSettings: Map - @Volatile private lateinit var destinationContextFactory: DestinationContextFactory - - @Volatile private var maxActionableAlertCount = DEFAULT_MAX_ACTIONABLE_ALERT_COUNT - - private lateinit var runnerSupervisor: Job - override val coroutineContext: CoroutineContext - get() = Dispatchers.Default + runnerSupervisor - - fun registerClusterService(clusterService: ClusterService): MonitorRunner { - this.clusterService = clusterService - return this - } - - fun registerClient(client: Client): MonitorRunner { - this.client = client - return this - } - - fun registerNamedXContentRegistry(xContentRegistry: NamedXContentRegistry): MonitorRunner { - this.xContentRegistry = xContentRegistry - return this - } - - fun registerScriptService(scriptService: ScriptService): MonitorRunner { - this.scriptService = scriptService - return this - } - - fun registerSettings(settings: Settings): MonitorRunner { - this.settings = settings - return this - } - - fun registerThreadPool(threadPool: ThreadPool): MonitorRunner { - this.threadPool = threadPool - return this - } - - fun registerAlertIndices(alertIndices: AlertIndices): MonitorRunner { - this.alertIndices = alertIndices - return this - } - - fun registerInputService(inputService: InputService): MonitorRunner { - this.inputService = inputService - return this - } - - fun registerTriggerService(triggerService: TriggerService): MonitorRunner { - this.triggerService = triggerService - return this - } - - fun registerAlertService(alertService: AlertService): MonitorRunner { - this.alertService = alertService - return this - } - - // Must be called after registerClusterService and registerSettings in AlertingPlugin - fun registerConsumers(): MonitorRunner { - retryPolicy = BackoffPolicy.constantBackoff(ALERT_BACKOFF_MILLIS.get(settings), ALERT_BACKOFF_COUNT.get(settings)) - clusterService.clusterSettings.addSettingsUpdateConsumer(ALERT_BACKOFF_MILLIS, ALERT_BACKOFF_COUNT) { millis, count -> - retryPolicy = BackoffPolicy.constantBackoff(millis, count) - } - - moveAlertsRetryPolicy = - BackoffPolicy.exponentialBackoff(MOVE_ALERTS_BACKOFF_MILLIS.get(settings), MOVE_ALERTS_BACKOFF_COUNT.get(settings)) - clusterService.clusterSettings.addSettingsUpdateConsumer(MOVE_ALERTS_BACKOFF_MILLIS, MOVE_ALERTS_BACKOFF_COUNT) { millis, count -> - moveAlertsRetryPolicy = BackoffPolicy.exponentialBackoff(millis, count) - } - - allowList = ALLOW_LIST.get(settings) - clusterService.clusterSettings.addSettingsUpdateConsumer(ALLOW_LIST) { - allowList = it - } - - // Host deny list is not a dynamic setting so no consumer is registered but the variable is set here - hostDenyList = HOST_DENY_LIST.get(settings) - - maxActionableAlertCount = MAX_ACTIONABLE_ALERT_COUNT.get(settings) - clusterService.clusterSettings.addSettingsUpdateConsumer(MAX_ACTIONABLE_ALERT_COUNT) { - maxActionableAlertCount = it - } - - return this - } - - // To be safe, call this last as it depends on a number of other components being registered beforehand (client, settings, etc.) - fun registerDestinationSettings(): MonitorRunner { - destinationSettings = loadDestinationSettings(settings) - destinationContextFactory = DestinationContextFactory(client, xContentRegistry, destinationSettings) - return this - } - - // Updates destination settings when the reload API is called so that new keystore values are visible - fun reloadDestinationSettings(settings: Settings) { - destinationSettings = loadDestinationSettings(settings) - - // Update destinationContextFactory as well since destinationSettings has been updated - destinationContextFactory.updateDestinationSettings(destinationSettings) - } - - override fun doStart() { - runnerSupervisor = SupervisorJob() - } - - override fun doStop() { - runnerSupervisor.cancel() - } - - override fun doClose() { } - - override fun postIndex(job: ScheduledJob) { - if (job !is Monitor) { - throw IllegalArgumentException("Invalid job type") - } - - launch { - try { - moveAlertsRetryPolicy.retry(logger) { - if (alertIndices.isInitialized()) { - moveAlerts(client, job.id, job) - } - } - } catch (e: Exception) { - logger.error("Failed to move active alerts for monitor [${job.id}].", e) - } - } - } - - override fun postDelete(jobId: String) { - launch { - try { - moveAlertsRetryPolicy.retry(logger) { - if (alertIndices.isInitialized()) { - moveAlerts(client, jobId, null) - } - } - } catch (e: Exception) { - logger.error("Failed to move active alerts for monitor [$jobId].", e) - } - } - } - - override fun runJob(job: ScheduledJob, periodStart: Instant, periodEnd: Instant) { - if (job !is Monitor) { - throw IllegalArgumentException("Invalid job type") - } - - launch { - if (job.isBucketLevelMonitor()) { - runBucketLevelMonitor(job, periodStart, periodEnd) - } else { - runQueryLevelMonitor(job, periodStart, periodEnd) - } - } - } - - suspend fun runQueryLevelMonitor(monitor: Monitor, periodStart: Instant, periodEnd: Instant, dryrun: Boolean = false): - MonitorRunResult { - val roles = getRolesForMonitor(monitor) - logger.debug("Running monitor: ${monitor.name} with roles: $roles Thread: ${Thread.currentThread().name}") - - if (periodStart == periodEnd) { - logger.warn("Start and end time are the same: $periodStart. This monitor will probably only run once.") - } - - var monitorResult = MonitorRunResult(monitor.name, periodStart, periodEnd) - val currentAlerts = try { - alertIndices.createOrUpdateAlertIndex() - alertIndices.createOrUpdateInitialHistoryIndex() - alertService.loadCurrentAlertsForQueryLevelMonitor(monitor) - } catch (e: Exception) { - // We can't save ERROR alerts to the index here as we don't know if there are existing ACTIVE alerts - val id = if (monitor.id.trim().isEmpty()) "_na_" else monitor.id - logger.error("Error loading alerts for monitor: $id", e) - return monitorResult.copy(error = e) - } - if (!isADMonitor(monitor)) { - withClosableContext(InjectorContextElement(monitor.id, settings, threadPool.threadContext, roles)) { - monitorResult = monitorResult.copy(inputResults = inputService.collectInputResults(monitor, periodStart, periodEnd)) - } - } else { - monitorResult = monitorResult.copy(inputResults = inputService.collectInputResultsForADMonitor(monitor, periodStart, periodEnd)) - } - - val updatedAlerts = mutableListOf() - val triggerResults = mutableMapOf() - for (trigger in monitor.triggers) { - val currentAlert = currentAlerts[trigger] - val triggerCtx = QueryLevelTriggerExecutionContext(monitor, trigger as QueryLevelTrigger, monitorResult, currentAlert) - val triggerResult = triggerService.runQueryLevelTrigger(monitor, trigger, triggerCtx) - triggerResults[trigger.id] = triggerResult - - if (triggerService.isQueryLevelTriggerActionable(triggerCtx, triggerResult)) { - val actionCtx = triggerCtx.copy(error = monitorResult.error ?: triggerResult.error) - for (action in trigger.actions) { - triggerResult.actionResults[action.id] = runAction(action, actionCtx, dryrun) - } - } - - val updatedAlert = alertService.composeQueryLevelAlert( - triggerCtx, triggerResult, - monitorResult.alertError() ?: triggerResult.alertError() - ) - if (updatedAlert != null) updatedAlerts += updatedAlert - } - - // Don't save alerts if this is a test monitor - if (!dryrun && monitor.id != Monitor.NO_ID) { - alertService.saveAlerts(updatedAlerts, retryPolicy) - } - return monitorResult.copy(triggerResults = triggerResults) - } - - // TODO: This method has grown very large with all the business logic that has been added. - // Revisit this during refactoring and break it down to be more manageable. - suspend fun runBucketLevelMonitor( + abstract suspend fun runMonitor( monitor: Monitor, + monitorCtx: MonitorRunnerExecutionContext, periodStart: Instant, periodEnd: Instant, - dryrun: Boolean = false - ): MonitorRunResult { - val roles = getRolesForMonitor(monitor) - logger.debug("Running monitor: ${monitor.name} with roles: $roles Thread: ${Thread.currentThread().name}") - - if (periodStart == periodEnd) { - logger.warn("Start and end time are the same: $periodStart. This monitor will probably only run once.") - } - - var monitorResult = MonitorRunResult(monitor.name, periodStart, periodEnd) - val currentAlerts = try { - alertIndices.createOrUpdateAlertIndex() - alertIndices.createOrUpdateInitialHistoryIndex() - alertService.loadCurrentAlertsForBucketLevelMonitor(monitor) - } catch (e: Exception) { - // We can't save ERROR alerts to the index here as we don't know if there are existing ACTIVE alerts - val id = if (monitor.id.trim().isEmpty()) "_na_" else monitor.id - logger.error("Error loading alerts for monitor: $id", e) - return monitorResult.copy(error = e) - } - - /* - * Since the aggregation query can consist of multiple pages, each iteration of the do-while loop only has partial results - * from the runBucketLevelTrigger results whereas the currentAlerts has a complete view of existing Alerts. This means that - * it can be confirmed if an Alert is new or de-duped local to the do-while loop if a key appears or doesn't appear in - * the currentAlerts. However, it cannot be guaranteed that an existing Alert is COMPLETED until all pages have been - * iterated over (since a bucket that did not appear in one page of the aggregation results, could appear in a later page). - * - * To solve for this, the currentAlerts will be acting as a list of "potentially completed alerts" throughout the execution. - * When categorizing the Alerts in each iteration, de-duped Alerts will be removed from the currentAlerts map - * (for the Trigger being executed) and the Alerts left in currentAlerts after all pages have been iterated through can - * be marked as COMPLETED since they were never de-duped. - * - * Meanwhile, the nextAlerts map will contain Alerts that will exist at the end of this Monitor execution. It is a compilation - * across Triggers because in the case of executing actions at a PER_EXECUTION frequency, all the Alerts are needed before executing - * Actions which can only be done once all of the aggregation results (and Triggers given the pagination logic) have been evaluated. - */ - val triggerResults = mutableMapOf() - val triggerContexts = mutableMapOf() - val nextAlerts = mutableMapOf>>() - var firstIteration = true - var firstPageOfInputResults = InputRunResults(listOf(), null) - do { - // TODO: Since a composite aggregation is being used for the input query, the total bucket count cannot be determined. - // If a setting is imposed that limits buckets that can be processed for Bucket-Level Monitors, we'd need to iterate over - // the buckets until we hit that threshold. In that case, we'd want to exit the execution without creating any alerts since the - // buckets we iterate over before hitting the limit is not deterministic. Is there a better way to fail faster in this case? - withClosableContext(InjectorContextElement(monitor.id, settings, threadPool.threadContext, roles)) { - // Storing the first page of results in the case of pagination input results to prevent empty results - // in the final output of monitorResult which occurs when all pages have been exhausted. - // If it's favorable to return the last page, will need to check how to accomplish that with multiple aggregation paths - // with different page counts. - val inputResults = inputService.collectInputResults(monitor, periodStart, periodEnd, monitorResult.inputResults) - if (firstIteration) { - firstPageOfInputResults = inputResults - firstIteration = false - } - monitorResult = monitorResult.copy(inputResults = inputResults) - } - - for (trigger in monitor.triggers) { - // The currentAlerts map is formed by iterating over the Monitor's Triggers as keys so null should not be returned here - val currentAlertsForTrigger = currentAlerts[trigger]!! - val triggerCtx = BucketLevelTriggerExecutionContext(monitor, trigger as BucketLevelTrigger, monitorResult) - triggerContexts[trigger.id] = triggerCtx - val triggerResult = triggerService.runBucketLevelTrigger(monitor, trigger, triggerCtx) - triggerResults[trigger.id] = triggerResult.getCombinedTriggerRunResult(triggerResults[trigger.id]) - - /* - * If an error was encountered when running the trigger, it means that something went wrong when parsing the input results - * for the filtered buckets returned from the pipeline bucket selector injected into the input query. - * - * In this case, the returned aggregation result buckets are empty so the categorization of the Alerts that happens below - * should be skipped/invalidated since comparing the current Alerts to an empty result will lead the execution to believe - * that all Alerts have been COMPLETED. Not doing so would mean it would not be possible to propagate the error into the - * existing Alerts in a way the user can easily view them since they will have all been moved to the history index. - */ - if (triggerResults[trigger.id]?.error != null) continue - - // TODO: Should triggerResult's aggregationResultBucket be a list? If not, getCategorizedAlertsForBucketLevelMonitor can - // be refactored to use a map instead - val categorizedAlerts = alertService.getCategorizedAlertsForBucketLevelMonitor( - monitor, trigger, currentAlertsForTrigger, triggerResult.aggregationResultBuckets.values.toList() - ).toMutableMap() - val dedupedAlerts = categorizedAlerts.getOrDefault(AlertCategory.DEDUPED, emptyList()) - var newAlerts = categorizedAlerts.getOrDefault(AlertCategory.NEW, emptyList()) - - /* - * Index de-duped and new Alerts here (if it's not a test Monitor) so they are available at the time the Actions are executed. - * - * The new Alerts have to be returned and saved back with their indexed doc ID to prevent duplicate documents - * when the Alerts are updated again after Action execution. - * - * Note: Index operations can fail for various reasons (such as write blocks on cluster), in such a case, the Actions - * will still execute with the Alert information in the ctx but the Alerts may not be visible. - */ - if (!dryrun && monitor.id != Monitor.NO_ID) { - alertService.saveAlerts(dedupedAlerts, retryPolicy, allowUpdatingAcknowledgedAlert = true) - newAlerts = alertService.saveNewAlerts(newAlerts, retryPolicy) - } - - // Store deduped and new Alerts to accumulate across pages - if (!nextAlerts.containsKey(trigger.id)) { - nextAlerts[trigger.id] = mutableMapOf( - AlertCategory.DEDUPED to mutableListOf(), - AlertCategory.NEW to mutableListOf(), - AlertCategory.COMPLETED to mutableListOf() - ) - } - nextAlerts[trigger.id]?.get(AlertCategory.DEDUPED)?.addAll(dedupedAlerts) - nextAlerts[trigger.id]?.get(AlertCategory.NEW)?.addAll(newAlerts) - } - } while (monitorResult.inputResults.afterKeysPresent()) - - // The completed Alerts are whatever are left in the currentAlerts. - // However, this operation will only be done if there was no trigger error, since otherwise the nextAlerts were not collected - // in favor of just using the currentAlerts as-is. - currentAlerts.forEach { (trigger, keysToAlertsMap) -> - if (triggerResults[trigger.id]?.error == null) - nextAlerts[trigger.id]?.get(AlertCategory.COMPLETED)?.addAll(alertService.convertToCompletedAlerts(keysToAlertsMap)) - } - - for (trigger in monitor.triggers) { - val alertsToUpdate = mutableSetOf() - val completedAlertsToUpdate = mutableSetOf() - // Filter ACKNOWLEDGED Alerts from the deduped list so they do not have Actions executed for them. - // New Alerts are ignored since they cannot be acknowledged yet. - val dedupedAlerts = nextAlerts[trigger.id]?.get(AlertCategory.DEDUPED) - ?.filterNot { it.state == Alert.State.ACKNOWLEDGED }?.toMutableList() - ?: mutableListOf() - // Update nextAlerts so the filtered DEDUPED Alerts are reflected for PER_ALERT Action execution - nextAlerts[trigger.id]?.set(AlertCategory.DEDUPED, dedupedAlerts) - val newAlerts = nextAlerts[trigger.id]?.get(AlertCategory.NEW) ?: mutableListOf() - val completedAlerts = nextAlerts[trigger.id]?.get(AlertCategory.COMPLETED) ?: mutableListOf() - - // Adding all the COMPLETED Alerts to a separate set and removing them if they get added - // to alertsToUpdate to ensure the Alert doc is updated at the end in either case - completedAlertsToUpdate.addAll(completedAlerts) - - // All trigger contexts and results should be available at this point since all triggers were evaluated in the main do-while loop - val triggerCtx = triggerContexts[trigger.id]!! - val triggerResult = triggerResults[trigger.id]!! - val monitorOrTriggerError = monitorResult.error ?: triggerResult.error - val shouldDefaultToPerExecution = defaultToPerExecutionAction( - monitorId = monitor.id, - triggerId = trigger.id, - totalActionableAlertCount = dedupedAlerts.size + newAlerts.size + completedAlerts.size, - monitorOrTriggerError = monitorOrTriggerError - ) - for (action in trigger.actions) { - // ActionExecutionPolicy should not be null for Bucket-Level Monitors since it has a default config when not set explicitly - val actionExecutionScope = action.getActionExecutionPolicy(monitor)!!.actionExecutionScope - if (actionExecutionScope is PerAlertActionScope && !shouldDefaultToPerExecution) { - for (alertCategory in actionExecutionScope.actionableAlerts) { - val alertsToExecuteActionsFor = nextAlerts[trigger.id]?.get(alertCategory) ?: mutableListOf() - for (alert in alertsToExecuteActionsFor) { - val actionCtx = getActionContextForAlertCategory( - alertCategory, alert, triggerCtx, monitorOrTriggerError - ) - // AggregationResultBucket should not be null here - val alertBucketKeysHash = alert.aggregationResultBucket!!.getBucketKeysHash() - if (!triggerResult.actionResultsMap.containsKey(alertBucketKeysHash)) { - triggerResult.actionResultsMap[alertBucketKeysHash] = mutableMapOf() - } - - // Keeping the throttled response separate from runAction for now since - // throttling is not supported for PER_EXECUTION - val actionResult = if (isActionActionable(action, alert)) { - runAction(action, actionCtx, dryrun) - } else { - ActionRunResult(action.id, action.name, mapOf(), true, null, null) - } - - triggerResult.actionResultsMap[alertBucketKeysHash]?.set(action.id, actionResult) - alertsToUpdate.add(alert) - // Remove the alert from completedAlertsToUpdate in case it is present there since - // its update will be handled in the alertsToUpdate batch - completedAlertsToUpdate.remove(alert) - } - } - } else if (actionExecutionScope is PerExecutionActionScope || shouldDefaultToPerExecution) { - // If all categories of Alerts are empty, there is nothing to message on and we can skip the Action. - // If the error is not null, this is disregarded and the Action is executed anyway so the user can be notified. - if (monitorOrTriggerError == null && dedupedAlerts.isEmpty() && newAlerts.isEmpty() && completedAlerts.isEmpty()) - continue - - val actionCtx = triggerCtx.copy( - dedupedAlerts = dedupedAlerts, - newAlerts = newAlerts, - completedAlerts = completedAlerts, - error = monitorResult.error ?: triggerResult.error - ) - val actionResult = runAction(action, actionCtx, dryrun) - // If there was an error during trigger execution then the Alerts to be updated are the current Alerts since the state - // was not changed. Otherwise, the Alerts to be updated are the sum of the deduped, new and completed Alerts. - val alertsToIterate = if (monitorOrTriggerError == null) { - (dedupedAlerts + newAlerts + completedAlerts) - } else currentAlerts[trigger]?.map { it.value } ?: listOf() - // Save the Action run result for every Alert - for (alert in alertsToIterate) { - val alertBucketKeysHash = alert.aggregationResultBucket!!.getBucketKeysHash() - if (!triggerResult.actionResultsMap.containsKey(alertBucketKeysHash)) { - triggerResult.actionResultsMap[alertBucketKeysHash] = mutableMapOf() - } - triggerResult.actionResultsMap[alertBucketKeysHash]?.set(action.id, actionResult) - alertsToUpdate.add(alert) - // Remove the alert from completedAlertsToUpdate in case it is present there since - // its update will be handled in the alertsToUpdate batch - completedAlertsToUpdate.remove(alert) - } - } - } - - // Alerts are only added to alertsToUpdate after Action execution meaning the action results for it should be present - // in the actionResultsMap but returning a default value when accessing the map to be safe. - val updatedAlerts = alertsToUpdate.map { alert -> - val bucketKeysHash = alert.aggregationResultBucket!!.getBucketKeysHash() - val actionResults = triggerResult.actionResultsMap.getOrDefault(bucketKeysHash, emptyMap()) - alertService.updateActionResultsForBucketLevelAlert( - alert.copy(lastNotificationTime = currentTime()), - actionResults, - // TODO: Update BucketLevelTriggerRunResult.alertError() to retrieve error based on the first failed Action - monitorResult.alertError() ?: triggerResult.alertError() - ) - } - - // Update Alerts with action execution results (if it's not a test Monitor). - // ACKNOWLEDGED Alerts should not be saved here since actions are not executed for them. - if (!dryrun && monitor.id != Monitor.NO_ID) { - alertService.saveAlerts(updatedAlerts, retryPolicy, allowUpdatingAcknowledgedAlert = false) - // Save any COMPLETED Alerts that were not covered in updatedAlerts - alertService.saveAlerts(completedAlertsToUpdate.toList(), retryPolicy, allowUpdatingAcknowledgedAlert = false) - } - } - - return monitorResult.copy(inputResults = firstPageOfInputResults, triggerResults = triggerResults) - } - - private fun defaultToPerExecutionAction( - monitorId: String, - triggerId: String, - totalActionableAlertCount: Int, - monitorOrTriggerError: Exception? - ): Boolean { - // If the monitorId or triggerResult has an error, then also default to PER_EXECUTION to communicate the error - if (monitorOrTriggerError != null) { - logger.debug( - "Trigger [$triggerId] in monitor [$monitorId] encountered an error. Defaulting to " + - "[${ActionExecutionScope.Type.PER_EXECUTION}] for action execution to communicate error." - ) - return true - } - - // If the MAX_ACTIONABLE_ALERT_COUNT is set to -1, consider it unbounded and proceed regardless of actionable Alert count - if (maxActionableAlertCount < 0) return false - - // If the total number of Alerts to execute Actions on exceeds the MAX_ACTIONABLE_ALERT_COUNT setting then default to - // PER_EXECUTION for less intrusive Actions - if (totalActionableAlertCount > maxActionableAlertCount) { - logger.debug( - "The total actionable alerts for trigger [$triggerId] in monitor [$monitorId] is [$totalActionableAlertCount] " + - "which exceeds the maximum of [$maxActionableAlertCount]. Defaulting to [${ActionExecutionScope.Type.PER_EXECUTION}] " + - "for action execution." - ) - return true - } - - return false - } - - private fun getRolesForMonitor(monitor: Monitor): List { - /* - * We need to handle 3 cases: - * 1. Monitors created by older versions and never updated. These monitors wont have User details in the - * monitor object. `monitor.user` will be null. Insert `all_access, AmazonES_all_access` role. - * 2. Monitors are created when security plugin is disabled, these will have empty User object. - * (`monitor.user.name`, `monitor.user.roles` are empty ) - * 3. Monitors are created when security plugin is enabled, these will have an User object. - */ - return if (monitor.user == null) { - // fixme: discuss and remove hardcoded to settings? - // TODO: Remove "AmazonES_all_access" role? - settings.getAsList("", listOf("all_access", "AmazonES_all_access")) - } else { - monitor.user.roles - } - } - - // TODO: Can this be updated to just use 'Instant.now()'? - // 'threadPool.absoluteTimeInMillis()' is referring to a cached value of System.currentTimeMillis() that by default updates every 200ms - private fun currentTime() = Instant.ofEpochMilli(threadPool.absoluteTimeInMillis()) - - private fun isActionActionable(action: Action, alert: Alert?): Boolean { - if (alert == null || action.throttle == null) { - return true - } - if (action.throttleEnabled) { - val result = alert.actionExecutionResults.firstOrNull { r -> r.actionId == action.id } - val lastExecutionTime: Instant? = result?.lastExecutionTime - val throttledTimeBound = currentTime().minus(action.throttle.value.toLong(), action.throttle.unit) - return (lastExecutionTime == null || lastExecutionTime.isBefore(throttledTimeBound)) - } - return true - } - - private fun getActionContextForAlertCategory( - alertCategory: AlertCategory, - alert: Alert, - ctx: BucketLevelTriggerExecutionContext, - error: Exception? - ): BucketLevelTriggerExecutionContext { - return when (alertCategory) { - AlertCategory.DEDUPED -> - ctx.copy(dedupedAlerts = listOf(alert), newAlerts = emptyList(), completedAlerts = emptyList(), error = error) - AlertCategory.NEW -> - ctx.copy(dedupedAlerts = emptyList(), newAlerts = listOf(alert), completedAlerts = emptyList(), error = error) - AlertCategory.COMPLETED -> - ctx.copy(dedupedAlerts = emptyList(), newAlerts = emptyList(), completedAlerts = listOf(alert), error = error) - } - } - - private suspend fun runAction(action: Action, ctx: TriggerExecutionContext, dryrun: Boolean): ActionRunResult { + dryRun: Boolean + ): MonitorRunResult<*> + + suspend fun runAction( + action: Action, + ctx: TriggerExecutionContext, + monitorCtx: MonitorRunnerExecutionContext, + dryrun: Boolean + ): ActionRunResult { return try { - if (ctx is QueryLevelTriggerExecutionContext && !isActionActionable(action, ctx.alert)) { + if ( + (ctx is QueryLevelTriggerExecutionContext && !MonitorRunnerService.isActionActionable(action, ctx.alert)) || + (ctx is DocumentLevelTriggerExecutionContext && !MonitorRunnerService.isActionActionable(action, ctx.alert)) + ) { return ActionRunResult(action.id, action.name, mapOf(), true, null, null) } - val actionOutput = mutableMapOf() - actionOutput[SUBJECT] = if (action.subjectTemplate != null) compileTemplate(action.subjectTemplate, ctx) else "" - actionOutput[MESSAGE] = compileTemplate(action.messageTemplate, ctx) - if (Strings.isNullOrEmpty(actionOutput[MESSAGE])) { + actionOutput[Action.SUBJECT] = if (action.subjectTemplate != null) + MonitorRunnerService.compileTemplate(action.subjectTemplate, ctx) + else "" + actionOutput[Action.MESSAGE] = MonitorRunnerService.compileTemplate(action.messageTemplate, ctx) + if (Strings.isNullOrEmpty(actionOutput[Action.MESSAGE])) { throw IllegalStateException("Message content missing in the Destination with id: ${action.destinationId}") } if (!dryrun) { // TODO: Inject user here so only Destination/Notifications that the user has permissions to are retrieved withContext(Dispatchers.IO) { - actionOutput[MESSAGE_ID] = getConfigAndSendNotification(action, actionOutput[SUBJECT], actionOutput[MESSAGE]!!) + actionOutput[Action.MESSAGE_ID] = getConfigAndSendNotification( + action, + monitorCtx, + actionOutput[Action.SUBJECT], + actionOutput[Action.MESSAGE]!! + ) } } - ActionRunResult(action.id, action.name, actionOutput, false, currentTime(), null) + ActionRunResult(action.id, action.name, actionOutput, false, MonitorRunnerService.currentTime(), null) } catch (e: Exception) { - ActionRunResult(action.id, action.name, mapOf(), false, currentTime(), e) + ActionRunResult(action.id, action.name, mapOf(), false, MonitorRunnerService.currentTime(), e) } } - private suspend fun getConfigAndSendNotification(action: Action, subject: String?, message: String): String { - val config = getConfigForNotificationAction(action) - + protected suspend fun getConfigAndSendNotification( + action: Action, + monitorCtx: MonitorRunnerExecutionContext, + subject: String?, + message: String + ): String { + val config = getConfigForNotificationAction(action, monitorCtx) if (config.destination == null && config.channel == null) { throw IllegalStateException("Unable to find a Notification Channel or Destination config with id [${action.id}]") } @@ -684,7 +94,7 @@ object MonitorRunner : JobRunner, CoroutineScope, AbstractLifecycleComponent() { return "test action" } - if (config.destination?.isAllowed(allowList) == false) { + if (config.destination?.isAllowed(monitorCtx.allowList) == false) { throw IllegalStateException( "Monitor contains a Destination type that is not allowed: ${config.destination.type}" ) @@ -693,39 +103,36 @@ object MonitorRunner : JobRunner, CoroutineScope, AbstractLifecycleComponent() { var actionResponseContent = "" actionResponseContent = config.channel ?.sendNotification( - client, + monitorCtx.client!!, config.channel.getTitle(subject), config.channel.createMessageContent(subject, message) ) ?: actionResponseContent actionResponseContent = config.destination - ?.buildLegacyBaseMessage(subject, message, destinationContextFactory.getDestinationContext(config.destination)) - ?.publishLegacyNotification(client) + ?.buildLegacyBaseMessage(subject, message, monitorCtx.destinationContextFactory!!.getDestinationContext(config.destination)) + ?.publishLegacyNotification(monitorCtx.client!!) ?: actionResponseContent return actionResponseContent } - private fun compileTemplate(template: Script, ctx: TriggerExecutionContext): String { - return scriptService.compile(template, TemplateScript.CONTEXT) - .newInstance(template.params + mapOf("ctx" to ctx.asTemplateArg())) - .execute() - } - /** * The "destination" ID referenced in a Monitor Action could either be a Notification config or a Destination config * depending on whether the background migration process has already migrated it from a Destination to a Notification config. * * To cover both of these cases, the Notification config will take precedence and if it is not found, the Destination will be retrieved. */ - private suspend fun getConfigForNotificationAction(action: Action): NotificationActionConfigs { + private suspend fun getConfigForNotificationAction( + action: Action, + monitorCtx: MonitorRunnerExecutionContext + ): NotificationActionConfigs { var destination: Destination? = null - val channel: NotificationConfigInfo? = getNotificationConfigInfo(client as NodeClient, action.destinationId) + val channel: NotificationConfigInfo? = getNotificationConfigInfo(monitorCtx.client as NodeClient, action.destinationId) // If the channel was not found, try to retrieve the Destination if (channel == null) { destination = try { - AlertingConfigAccessor.getDestinationInfo(client, xContentRegistry, action.destinationId) + AlertingConfigAccessor.getDestinationInfo(monitorCtx.client!!, monitorCtx.xContentRegistry!!, action.destinationId) } catch (e: IllegalStateException) { // Catching the exception thrown when the Destination was not found so the NotificationActionConfigs object can be returned null diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/MonitorRunnerExecutionContext.kt b/alerting/src/main/kotlin/org/opensearch/alerting/MonitorRunnerExecutionContext.kt new file mode 100644 index 000000000..65dcdcef1 --- /dev/null +++ b/alerting/src/main/kotlin/org/opensearch/alerting/MonitorRunnerExecutionContext.kt @@ -0,0 +1,44 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting + +import org.opensearch.action.bulk.BackoffPolicy +import org.opensearch.alerting.alerts.AlertIndices +import org.opensearch.alerting.model.destination.DestinationContextFactory +import org.opensearch.alerting.settings.AlertingSettings +import org.opensearch.alerting.settings.DestinationSettings +import org.opensearch.alerting.settings.LegacyOpenDistroDestinationSettings +import org.opensearch.client.Client +import org.opensearch.cluster.service.ClusterService +import org.opensearch.common.settings.Settings +import org.opensearch.common.xcontent.NamedXContentRegistry +import org.opensearch.script.ScriptService +import org.opensearch.threadpool.ThreadPool + +data class MonitorRunnerExecutionContext( + + var clusterService: ClusterService? = null, + var client: Client? = null, + var xContentRegistry: NamedXContentRegistry? = null, + var scriptService: ScriptService? = null, + var settings: Settings? = null, + var threadPool: ThreadPool? = null, + var alertIndices: AlertIndices? = null, + var inputService: InputService? = null, + var triggerService: TriggerService? = null, + var alertService: AlertService? = null, + + @Volatile var retryPolicy: BackoffPolicy? = null, + @Volatile var moveAlertsRetryPolicy: BackoffPolicy? = null, + + @Volatile var allowList: List = DestinationSettings.ALLOW_LIST_NONE, + @Volatile var hostDenyList: List = LegacyOpenDistroDestinationSettings.HOST_DENY_LIST_NONE, + + @Volatile var destinationSettings: Map? = null, + @Volatile var destinationContextFactory: DestinationContextFactory? = null, + + @Volatile var maxActionableAlertCount: Long = AlertingSettings.DEFAULT_MAX_ACTIONABLE_ALERT_COUNT +) diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/MonitorRunnerService.kt b/alerting/src/main/kotlin/org/opensearch/alerting/MonitorRunnerService.kt new file mode 100644 index 000000000..20550ba25 --- /dev/null +++ b/alerting/src/main/kotlin/org/opensearch/alerting/MonitorRunnerService.kt @@ -0,0 +1,263 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting + +import kotlinx.coroutines.CoroutineScope +import kotlinx.coroutines.Dispatchers +import kotlinx.coroutines.Job +import kotlinx.coroutines.SupervisorJob +import kotlinx.coroutines.launch +import org.apache.logging.log4j.LogManager +import org.opensearch.action.bulk.BackoffPolicy +import org.opensearch.alerting.alerts.AlertIndices +import org.opensearch.alerting.alerts.moveAlerts +import org.opensearch.alerting.core.JobRunner +import org.opensearch.alerting.core.model.ScheduledJob +import org.opensearch.alerting.model.Alert +import org.opensearch.alerting.model.Monitor +import org.opensearch.alerting.model.MonitorRunResult +import org.opensearch.alerting.model.action.Action +import org.opensearch.alerting.model.destination.DestinationContextFactory +import org.opensearch.alerting.opensearchapi.retry +import org.opensearch.alerting.script.TriggerExecutionContext +import org.opensearch.alerting.settings.AlertingSettings.Companion.ALERT_BACKOFF_COUNT +import org.opensearch.alerting.settings.AlertingSettings.Companion.ALERT_BACKOFF_MILLIS +import org.opensearch.alerting.settings.AlertingSettings.Companion.MAX_ACTIONABLE_ALERT_COUNT +import org.opensearch.alerting.settings.AlertingSettings.Companion.MOVE_ALERTS_BACKOFF_COUNT +import org.opensearch.alerting.settings.AlertingSettings.Companion.MOVE_ALERTS_BACKOFF_MILLIS +import org.opensearch.alerting.settings.DestinationSettings.Companion.ALLOW_LIST +import org.opensearch.alerting.settings.DestinationSettings.Companion.HOST_DENY_LIST +import org.opensearch.alerting.settings.DestinationSettings.Companion.loadDestinationSettings +import org.opensearch.alerting.util.isBucketLevelMonitor +import org.opensearch.alerting.util.isDocLevelMonitor +import org.opensearch.client.Client +import org.opensearch.cluster.service.ClusterService +import org.opensearch.common.component.AbstractLifecycleComponent +import org.opensearch.common.settings.Settings +import org.opensearch.common.xcontent.NamedXContentRegistry +import org.opensearch.script.Script +import org.opensearch.script.ScriptService +import org.opensearch.script.TemplateScript +import org.opensearch.threadpool.ThreadPool +import java.time.Instant +import kotlin.coroutines.CoroutineContext + +object MonitorRunnerService : JobRunner, CoroutineScope, AbstractLifecycleComponent() { + + private val logger = LogManager.getLogger(javaClass) + + var monitorCtx: MonitorRunnerExecutionContext = MonitorRunnerExecutionContext() + + private lateinit var runnerSupervisor: Job + override val coroutineContext: CoroutineContext + get() = Dispatchers.Default + runnerSupervisor + + fun registerClusterService(clusterService: ClusterService): MonitorRunnerService { + this.monitorCtx.clusterService = clusterService + return this + } + + fun registerClient(client: Client): MonitorRunnerService { + this.monitorCtx.client = client + return this + } + + fun registerNamedXContentRegistry(xContentRegistry: NamedXContentRegistry): MonitorRunnerService { + this.monitorCtx.xContentRegistry = xContentRegistry + return this + } + + fun registerScriptService(scriptService: ScriptService): MonitorRunnerService { + this.monitorCtx.scriptService = scriptService + return this + } + + fun registerSettings(settings: Settings): MonitorRunnerService { + this.monitorCtx.settings = settings + return this + } + + fun registerThreadPool(threadPool: ThreadPool): MonitorRunnerService { + this.monitorCtx.threadPool = threadPool + return this + } + + fun registerAlertIndices(alertIndices: AlertIndices): MonitorRunnerService { + this.monitorCtx.alertIndices = alertIndices + return this + } + + fun registerInputService(inputService: InputService): MonitorRunnerService { + this.monitorCtx.inputService = inputService + return this + } + + fun registerTriggerService(triggerService: TriggerService): MonitorRunnerService { + this.monitorCtx.triggerService = triggerService + return this + } + + fun registerAlertService(alertService: AlertService): MonitorRunnerService { + this.monitorCtx.alertService = alertService + return this + } + + // Must be called after registerClusterService and registerSettings in AlertingPlugin + fun registerConsumers(): MonitorRunnerService { + monitorCtx.retryPolicy = BackoffPolicy.constantBackoff( + ALERT_BACKOFF_MILLIS.get(monitorCtx.settings), + ALERT_BACKOFF_COUNT.get(monitorCtx.settings) + ) + monitorCtx.clusterService!!.clusterSettings.addSettingsUpdateConsumer(ALERT_BACKOFF_MILLIS, ALERT_BACKOFF_COUNT) { millis, count -> + monitorCtx.retryPolicy = BackoffPolicy.constantBackoff(millis, count) + } + + monitorCtx.moveAlertsRetryPolicy = + BackoffPolicy.exponentialBackoff( + MOVE_ALERTS_BACKOFF_MILLIS.get(monitorCtx.settings), + MOVE_ALERTS_BACKOFF_COUNT.get(monitorCtx.settings) + ) + monitorCtx.clusterService!!.clusterSettings.addSettingsUpdateConsumer(MOVE_ALERTS_BACKOFF_MILLIS, MOVE_ALERTS_BACKOFF_COUNT) { + millis, count -> + monitorCtx.moveAlertsRetryPolicy = BackoffPolicy.exponentialBackoff(millis, count) + } + + monitorCtx.allowList = ALLOW_LIST.get(monitorCtx.settings) + monitorCtx.clusterService!!.clusterSettings.addSettingsUpdateConsumer(ALLOW_LIST) { + monitorCtx.allowList = it + } + + // Host deny list is not a dynamic setting so no consumer is registered but the variable is set here + monitorCtx.hostDenyList = HOST_DENY_LIST.get(monitorCtx.settings) + + monitorCtx.maxActionableAlertCount = MAX_ACTIONABLE_ALERT_COUNT.get(monitorCtx.settings) + monitorCtx.clusterService!!.clusterSettings.addSettingsUpdateConsumer(MAX_ACTIONABLE_ALERT_COUNT) { + monitorCtx.maxActionableAlertCount = it + } + + return this + } + + // To be safe, call this last as it depends on a number of other components being registered beforehand (client, settings, etc.) + fun registerDestinationSettings(): MonitorRunnerService { + monitorCtx.destinationSettings = loadDestinationSettings(monitorCtx.settings!!) + monitorCtx.destinationContextFactory = + DestinationContextFactory(monitorCtx.client!!, monitorCtx.xContentRegistry!!, monitorCtx.destinationSettings!!) + return this + } + + // Updates destination settings when the reload API is called so that new keystore values are visible + fun reloadDestinationSettings(settings: Settings) { + monitorCtx.destinationSettings = loadDestinationSettings(settings) + + // Update destinationContextFactory as well since destinationSettings has been updated + monitorCtx.destinationContextFactory!!.updateDestinationSettings(monitorCtx.destinationSettings!!) + } + + override fun doStart() { + runnerSupervisor = SupervisorJob() + } + + override fun doStop() { + runnerSupervisor.cancel() + } + + override fun doClose() { } + + override fun postIndex(job: ScheduledJob) { + if (job !is Monitor) { + throw IllegalArgumentException("Invalid job type") + } + + launch { + try { + monitorCtx.moveAlertsRetryPolicy!!.retry(logger) { + if (monitorCtx.alertIndices!!.isAlertInitialized()) { + moveAlerts(monitorCtx.client!!, job.id, job) + } + } + } catch (e: Exception) { + logger.error("Failed to move active alerts for monitor [${job.id}].", e) + } + } + } + + override fun postDelete(jobId: String) { + launch { + try { + monitorCtx.moveAlertsRetryPolicy!!.retry(logger) { + if (monitorCtx.alertIndices!!.isAlertInitialized()) { + moveAlerts(monitorCtx.client!!, jobId, null) + } + } + } catch (e: Exception) { + logger.error("Failed to move active alerts for monitor [$jobId].", e) + } + } + } + + override fun runJob(job: ScheduledJob, periodStart: Instant, periodEnd: Instant) { + if (job !is Monitor) { + throw IllegalArgumentException("Invalid job type") + } + launch { + runJob(job, periodStart, periodEnd, false) + } + } + + suspend fun runJob(job: ScheduledJob, periodStart: Instant, periodEnd: Instant, dryrun: Boolean): MonitorRunResult<*> { + val monitor = job as Monitor + return if (monitor.isBucketLevelMonitor()) { + BucketLevelMonitorRunner.runMonitor(monitor, monitorCtx, periodStart, periodEnd, dryrun) + } else if (monitor.isDocLevelMonitor()) { + DocumentReturningMonitorRunner.runMonitor(monitor, monitorCtx, periodStart, periodEnd, dryrun) + } else { + QueryLevelMonitorRunner.runMonitor(monitor, monitorCtx, periodStart, periodEnd, dryrun) + } + } + + // TODO: See if we can move below methods (or few of these) to a common utils + internal fun getRolesForMonitor(monitor: Monitor): List { + /* + * We need to handle 3 cases: + * 1. Monitors created by older versions and never updated. These monitors wont have User details in the + * monitor object. `monitor.user` will be null. Insert `all_access, AmazonES_all_access` role. + * 2. Monitors are created when security plugin is disabled, these will have empty User object. + * (`monitor.user.name`, `monitor.user.roles` are empty ) + * 3. Monitors are created when security plugin is enabled, these will have an User object. + */ + return if (monitor.user == null) { + // fixme: discuss and remove hardcoded to settings? + // TODO: Remove "AmazonES_all_access" role? + monitorCtx.settings!!.getAsList("", listOf("all_access", "AmazonES_all_access")) + } else { + monitor.user.roles + } + } + + // TODO: Can this be updated to just use 'Instant.now()'? + // 'threadPool.absoluteTimeInMillis()' is referring to a cached value of System.currentTimeMillis() that by default updates every 200ms + internal fun currentTime() = Instant.ofEpochMilli(monitorCtx.threadPool!!.absoluteTimeInMillis()) + + internal fun isActionActionable(action: Action, alert: Alert?): Boolean { + if (alert == null || action.throttle == null) { + return true + } + if (action.throttleEnabled) { + val result = alert.actionExecutionResults.firstOrNull { r -> r.actionId == action.id } + val lastExecutionTime: Instant? = result?.lastExecutionTime + val throttledTimeBound = currentTime().minus(action.throttle.value.toLong(), action.throttle.unit) + return (lastExecutionTime == null || lastExecutionTime.isBefore(throttledTimeBound)) + } + return true + } + + internal fun compileTemplate(template: Script, ctx: TriggerExecutionContext): String { + return monitorCtx.scriptService!!.compile(template, TemplateScript.CONTEXT) + .newInstance(template.params + mapOf("ctx" to ctx.asTemplateArg())) + .execute() + } +} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/QueryLevelMonitorRunner.kt b/alerting/src/main/kotlin/org/opensearch/alerting/QueryLevelMonitorRunner.kt new file mode 100644 index 000000000..cf9f2d5fc --- /dev/null +++ b/alerting/src/main/kotlin/org/opensearch/alerting/QueryLevelMonitorRunner.kt @@ -0,0 +1,88 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting + +import kotlinx.coroutines.runBlocking +import org.apache.logging.log4j.LogManager +import org.opensearch.alerting.model.Alert +import org.opensearch.alerting.model.Monitor +import org.opensearch.alerting.model.MonitorRunResult +import org.opensearch.alerting.model.QueryLevelTrigger +import org.opensearch.alerting.model.QueryLevelTriggerRunResult +import org.opensearch.alerting.opensearchapi.InjectorContextElement +import org.opensearch.alerting.script.QueryLevelTriggerExecutionContext +import org.opensearch.alerting.util.isADMonitor +import java.time.Instant + +object QueryLevelMonitorRunner : MonitorRunner() { + private val logger = LogManager.getLogger(javaClass) + + override suspend fun runMonitor( + monitor: Monitor, + monitorCtx: MonitorRunnerExecutionContext, + periodStart: Instant, + periodEnd: Instant, + dryrun: Boolean + ): MonitorRunResult { + val roles = MonitorRunnerService.getRolesForMonitor(monitor) + logger.debug("Running monitor: ${monitor.name} with roles: $roles Thread: ${Thread.currentThread().name}") + + if (periodStart == periodEnd) { + logger.warn("Start and end time are the same: $periodStart. This monitor will probably only run once.") + } + + var monitorResult = MonitorRunResult(monitor.name, periodStart, periodEnd) + val currentAlerts = try { + monitorCtx.alertIndices!!.createOrUpdateAlertIndex() + monitorCtx.alertIndices!!.createOrUpdateInitialAlertHistoryIndex() + monitorCtx.alertService!!.loadCurrentAlertsForQueryLevelMonitor(monitor) + } catch (e: Exception) { + // We can't save ERROR alerts to the index here as we don't know if there are existing ACTIVE alerts + val id = if (monitor.id.trim().isEmpty()) "_na_" else monitor.id + logger.error("Error loading alerts for monitor: $id", e) + return monitorResult.copy(error = e) + } + if (!isADMonitor(monitor)) { + runBlocking(InjectorContextElement(monitor.id, monitorCtx.settings!!, monitorCtx.threadPool!!.threadContext, roles)) { + monitorResult = monitorResult.copy( + inputResults = monitorCtx.inputService!!.collectInputResults(monitor, periodStart, periodEnd) + ) + } + } else { + monitorResult = monitorResult.copy( + inputResults = monitorCtx.inputService!!.collectInputResultsForADMonitor(monitor, periodStart, periodEnd) + ) + } + + val updatedAlerts = mutableListOf() + val triggerResults = mutableMapOf() + for (trigger in monitor.triggers) { + val currentAlert = currentAlerts[trigger] + val triggerCtx = QueryLevelTriggerExecutionContext(monitor, trigger as QueryLevelTrigger, monitorResult, currentAlert) + val triggerResult = monitorCtx.triggerService!!.runQueryLevelTrigger(monitor, trigger, triggerCtx) + triggerResults[trigger.id] = triggerResult + + if (monitorCtx.triggerService!!.isQueryLevelTriggerActionable(triggerCtx, triggerResult)) { + val actionCtx = triggerCtx.copy(error = monitorResult.error ?: triggerResult.error) + for (action in trigger.actions) { + triggerResult.actionResults[action.id] = this.runAction(action, actionCtx, monitorCtx, dryrun) + } + } + + val updatedAlert = monitorCtx.alertService!!.composeQueryLevelAlert( + triggerCtx, triggerResult, + monitorResult.alertError() ?: triggerResult.alertError() + ) + if (updatedAlert != null) updatedAlerts += updatedAlert + } + + // Don't save alerts if this is a test monitor + if (!dryrun && monitor.id != Monitor.NO_ID) { + monitorCtx.retryPolicy?.let { monitorCtx.alertService!!.saveAlerts(updatedAlerts, it) } + } + return monitorResult.copy(triggerResults = triggerResults) + } +} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/TriggerService.kt b/alerting/src/main/kotlin/org/opensearch/alerting/TriggerService.kt index c94f0419f..77ea886a8 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/TriggerService.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/TriggerService.kt @@ -8,27 +8,33 @@ package org.opensearch.alerting import org.apache.logging.log4j.LogManager import org.opensearch.alerting.aggregation.bucketselectorext.BucketSelectorIndices.Fields.BUCKET_INDICES import org.opensearch.alerting.aggregation.bucketselectorext.BucketSelectorIndices.Fields.PARENT_BUCKET_PATH +import org.opensearch.alerting.core.model.DocLevelQuery import org.opensearch.alerting.model.AggregationResultBucket import org.opensearch.alerting.model.Alert import org.opensearch.alerting.model.BucketLevelTrigger import org.opensearch.alerting.model.BucketLevelTriggerRunResult +import org.opensearch.alerting.model.DocumentLevelTrigger +import org.opensearch.alerting.model.DocumentLevelTriggerRunResult import org.opensearch.alerting.model.Monitor import org.opensearch.alerting.model.QueryLevelTrigger import org.opensearch.alerting.model.QueryLevelTriggerRunResult import org.opensearch.alerting.script.BucketLevelTriggerExecutionContext import org.opensearch.alerting.script.QueryLevelTriggerExecutionContext import org.opensearch.alerting.script.TriggerScript +import org.opensearch.alerting.triggercondition.parsers.TriggerExpressionParser import org.opensearch.alerting.util.getBucketKeysHash +import org.opensearch.script.Script import org.opensearch.script.ScriptService import org.opensearch.search.aggregations.Aggregation import org.opensearch.search.aggregations.Aggregations import org.opensearch.search.aggregations.support.AggregationPath -import java.lang.IllegalArgumentException /** Service that handles executing Triggers */ class TriggerService(val scriptService: ScriptService) { private val logger = LogManager.getLogger(TriggerService::class.java) + private val ALWAYS_RUN = Script("return true") + private val NEVER_RUN = Script("return false") fun isQueryLevelTriggerActionable(ctx: QueryLevelTriggerExecutionContext, result: QueryLevelTriggerRunResult): Boolean { // Suppress actions if the current alert is acknowledged and there are no errors. @@ -53,6 +59,32 @@ class TriggerService(val scriptService: ScriptService) { } } + // TODO: improve performance and support match all and match any + fun runDocLevelTrigger( + monitor: Monitor, + trigger: DocumentLevelTrigger, + queryToDocIds: Map> + ): DocumentLevelTriggerRunResult { + return try { + var triggeredDocs = mutableListOf() + + if (trigger.condition.idOrCode.equals(ALWAYS_RUN.idOrCode)) { + for (value in queryToDocIds.values) { + triggeredDocs.addAll(value) + } + } else if (!trigger.condition.idOrCode.equals(NEVER_RUN.idOrCode)) { + triggeredDocs = TriggerExpressionParser(trigger.condition.idOrCode).parse() + .evaluate(queryToDocIds).toMutableList() + } + + DocumentLevelTriggerRunResult(trigger.name, triggeredDocs, null) + } catch (e: Exception) { + logger.info("Error running script for monitor ${monitor.id}, trigger: ${trigger.id}", e) + // if the script fails we need to send an alert so set triggered = true + DocumentLevelTriggerRunResult(trigger.name, emptyList(), e) + } + } + @Suppress("UNCHECKED_CAST") fun runBucketLevelTrigger( monitor: Monitor, diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/action/GetFindingsAction.kt b/alerting/src/main/kotlin/org/opensearch/alerting/action/GetFindingsAction.kt new file mode 100644 index 000000000..b25bd12c1 --- /dev/null +++ b/alerting/src/main/kotlin/org/opensearch/alerting/action/GetFindingsAction.kt @@ -0,0 +1,15 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.action + +import org.opensearch.action.ActionType + +class GetFindingsAction private constructor() : ActionType(NAME, ::GetFindingsResponse) { + companion object { + val INSTANCE = GetFindingsAction() + const val NAME = "cluster:admin/opendistro/alerting/findings/get" + } +} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/action/GetFindingsRequest.kt b/alerting/src/main/kotlin/org/opensearch/alerting/action/GetFindingsRequest.kt new file mode 100644 index 000000000..15f9a0d41 --- /dev/null +++ b/alerting/src/main/kotlin/org/opensearch/alerting/action/GetFindingsRequest.kt @@ -0,0 +1,42 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.action + +import org.opensearch.action.ActionRequest +import org.opensearch.action.ActionRequestValidationException +import org.opensearch.alerting.model.Table +import org.opensearch.common.io.stream.StreamInput +import org.opensearch.common.io.stream.StreamOutput +import java.io.IOException + +class GetFindingsRequest : ActionRequest { + val findingId: String? + val table: Table + + constructor( + findingId: String?, + table: Table + ) : super() { + this.findingId = findingId + this.table = table + } + + @Throws(IOException::class) + constructor(sin: StreamInput) : this( + findingId = sin.readOptionalString(), + table = Table.readFrom(sin) + ) + + override fun validate(): ActionRequestValidationException? { + return null + } + + @Throws(IOException::class) + override fun writeTo(out: StreamOutput) { + out.writeOptionalString(findingId) + table.writeTo(out) + } +} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/action/GetFindingsResponse.kt b/alerting/src/main/kotlin/org/opensearch/alerting/action/GetFindingsResponse.kt new file mode 100644 index 000000000..66943e318 --- /dev/null +++ b/alerting/src/main/kotlin/org/opensearch/alerting/action/GetFindingsResponse.kt @@ -0,0 +1,63 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.action + +import org.opensearch.action.ActionResponse +import org.opensearch.alerting.model.FindingWithDocs +import org.opensearch.common.io.stream.StreamInput +import org.opensearch.common.io.stream.StreamOutput +import org.opensearch.common.xcontent.ToXContent +import org.opensearch.common.xcontent.ToXContentObject +import org.opensearch.common.xcontent.XContentBuilder +import org.opensearch.rest.RestStatus +import java.io.IOException + +class GetFindingsResponse : ActionResponse, ToXContentObject { + var status: RestStatus + var totalFindings: Int? + var findings: List + + constructor( + status: RestStatus, + totalFindings: Int?, + findings: List + ) : super() { + this.status = status + this.totalFindings = totalFindings + this.findings = findings + } + + @Throws(IOException::class) + constructor(sin: StreamInput) { + this.status = sin.readEnum(RestStatus::class.java) + val findings = mutableListOf() + this.totalFindings = sin.readOptionalInt() + var currentSize = sin.readInt() + for (i in 0 until currentSize) { + findings.add(FindingWithDocs.readFrom(sin)) + } + this.findings = findings + } + + @Throws(IOException::class) + override fun writeTo(out: StreamOutput) { + out.writeEnum(status) + out.writeOptionalInt(totalFindings) + out.writeInt(findings.size) + for (finding in findings) { + finding.writeTo(out) + } + } + + @Throws(IOException::class) + override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { + builder.startObject() + .field("total_findings", totalFindings) + .field("findings", findings) + + return builder.endObject() + } +} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/alerts/AlertIndices.kt b/alerting/src/main/kotlin/org/opensearch/alerting/alerts/AlertIndices.kt index 4b13a3085..b847a701f 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/alerts/AlertIndices.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/alerts/AlertIndices.kt @@ -21,19 +21,26 @@ import org.opensearch.action.admin.indices.rollover.RolloverRequest import org.opensearch.action.admin.indices.rollover.RolloverResponse import org.opensearch.action.support.IndicesOptions import org.opensearch.action.support.master.AcknowledgedResponse +import org.opensearch.alerting.alerts.AlertIndices.Companion.ALERT_HISTORY_WRITE_INDEX import org.opensearch.alerting.alerts.AlertIndices.Companion.ALERT_INDEX -import org.opensearch.alerting.alerts.AlertIndices.Companion.HISTORY_WRITE_INDEX import org.opensearch.alerting.opensearchapi.suspendUntil import org.opensearch.alerting.settings.AlertingSettings import org.opensearch.alerting.settings.AlertingSettings.Companion.ALERT_HISTORY_ENABLED import org.opensearch.alerting.settings.AlertingSettings.Companion.ALERT_HISTORY_INDEX_MAX_AGE import org.opensearch.alerting.settings.AlertingSettings.Companion.ALERT_HISTORY_MAX_DOCS +import org.opensearch.alerting.settings.AlertingSettings.Companion.ALERT_HISTORY_RETENTION_PERIOD import org.opensearch.alerting.settings.AlertingSettings.Companion.ALERT_HISTORY_ROLLOVER_PERIOD +import org.opensearch.alerting.settings.AlertingSettings.Companion.FINDING_HISTORY_ENABLED +import org.opensearch.alerting.settings.AlertingSettings.Companion.FINDING_HISTORY_INDEX_MAX_AGE +import org.opensearch.alerting.settings.AlertingSettings.Companion.FINDING_HISTORY_MAX_DOCS +import org.opensearch.alerting.settings.AlertingSettings.Companion.FINDING_HISTORY_RETENTION_PERIOD +import org.opensearch.alerting.settings.AlertingSettings.Companion.FINDING_HISTORY_ROLLOVER_PERIOD import org.opensearch.alerting.settings.AlertingSettings.Companion.REQUEST_TIMEOUT import org.opensearch.alerting.util.IndexUtils import org.opensearch.client.Client import org.opensearch.cluster.ClusterChangedEvent import org.opensearch.cluster.ClusterStateListener +import org.opensearch.cluster.metadata.IndexMetadata import org.opensearch.cluster.service.ClusterService import org.opensearch.common.settings.Settings import org.opensearch.common.unit.TimeValue @@ -44,14 +51,15 @@ import java.time.Instant /** * Class to manage the creation and rollover of alert indices and alert history indices. In progress alerts are stored - * in [ALERT_INDEX]. Completed alerts are written to [HISTORY_WRITE_INDEX] which is an alias that points at the - * current index to which completed alerts are written. [HISTORY_WRITE_INDEX] is periodically rolled over to a new + * in [ALERT_INDEX]. Completed alerts are written to [ALERT_HISTORY_WRITE_INDEX] which is an alias that points at the + * current index to which completed alerts are written. [ALERT_HISTORY_WRITE_INDEX] is periodically rolled over to a new * date based index. The frequency of rolling over indices is controlled by the `opendistro.alerting.alert_rollover_period` setting. * * These indexes are created when first used and are then rolled over every `alert_rollover_period`. The rollover is * initiated on the master node to ensure only a single node tries to roll it over. Once we have a curator functionality * in Scheduled Jobs we can migrate to using that to rollover the index. */ +// TODO: reafactor to make a generic version of this class for finding and alerts class AlertIndices( settings: Settings, private val client: Client, @@ -61,17 +69,28 @@ class AlertIndices( init { clusterService.addListener(this) - clusterService.clusterSettings.addSettingsUpdateConsumer(ALERT_HISTORY_ENABLED) { historyEnabled = it } - clusterService.clusterSettings.addSettingsUpdateConsumer(ALERT_HISTORY_MAX_DOCS) { historyMaxDocs = it } - clusterService.clusterSettings.addSettingsUpdateConsumer(ALERT_HISTORY_INDEX_MAX_AGE) { historyMaxAge = it } + clusterService.clusterSettings.addSettingsUpdateConsumer(ALERT_HISTORY_ENABLED) { alertHistoryEnabled = it } + clusterService.clusterSettings.addSettingsUpdateConsumer(ALERT_HISTORY_MAX_DOCS) { alertHistoryMaxDocs = it } + clusterService.clusterSettings.addSettingsUpdateConsumer(ALERT_HISTORY_INDEX_MAX_AGE) { alertHistoryMaxAge = it } clusterService.clusterSettings.addSettingsUpdateConsumer(ALERT_HISTORY_ROLLOVER_PERIOD) { - historyRolloverPeriod = it - rescheduleRollover() + alertHistoryRolloverPeriod = it + rescheduleAlertRollover() } - clusterService.clusterSettings.addSettingsUpdateConsumer(AlertingSettings.ALERT_HISTORY_RETENTION_PERIOD) { - historyRetentionPeriod = it + clusterService.clusterSettings.addSettingsUpdateConsumer(ALERT_HISTORY_RETENTION_PERIOD) { + alertHistoryRetentionPeriod = it } clusterService.clusterSettings.addSettingsUpdateConsumer(REQUEST_TIMEOUT) { requestTimeout = it } + + clusterService.clusterSettings.addSettingsUpdateConsumer(FINDING_HISTORY_ENABLED) { findingHistoryEnabled = it } + clusterService.clusterSettings.addSettingsUpdateConsumer(FINDING_HISTORY_MAX_DOCS) { findingHistoryMaxDocs = it } + clusterService.clusterSettings.addSettingsUpdateConsumer(FINDING_HISTORY_INDEX_MAX_AGE) { findingHistoryMaxAge = it } + clusterService.clusterSettings.addSettingsUpdateConsumer(FINDING_HISTORY_ROLLOVER_PERIOD) { + findingHistoryRolloverPeriod = it + rescheduleFindingRollover() + } + clusterService.clusterSettings.addSettingsUpdateConsumer(FINDING_HISTORY_RETENTION_PERIOD) { + findingHistoryRetentionPeriod = it + } } companion object { @@ -80,33 +99,54 @@ class AlertIndices( const val ALERT_INDEX = ".opendistro-alerting-alerts" /** The alias of the index in which to write alert history */ - const val HISTORY_WRITE_INDEX = ".opendistro-alerting-alert-history-write" + const val ALERT_HISTORY_WRITE_INDEX = ".opendistro-alerting-alert-history-write" + + /** The alias of the index in which to write alert finding */ + const val FINDING_HISTORY_WRITE_INDEX = ".opensearch-alerting-finding-history-write" + + /** The index name pattern referring to all alert history indices */ + const val ALERT_HISTORY_ALL = ".opendistro-alerting-alert-history*" /** The index name pattern referring to all alert history indices */ - const val HISTORY_ALL = ".opendistro-alerting-alert-history*" + const val FINDING_HISTORY_ALL = ".opensearch-alerting-finding-history*" /** The index name pattern to create alert history indices */ - const val HISTORY_INDEX_PATTERN = "<.opendistro-alerting-alert-history-{now/d}-1>" + const val ALERT_HISTORY_INDEX_PATTERN = "<.opendistro-alerting-alert-history-{now/d}-1>" + + /** The index name pattern to create finding history indices */ + const val FINDING_HISTORY_INDEX_PATTERN = "<.opensearch-alerting-finding-history-{now/d}-1>" /** The index name pattern to query all alerts, history and current alerts. */ - const val ALL_INDEX_PATTERN = ".opendistro-alerting-alert*" + const val ALL_ALERT_INDEX_PATTERN = ".opendistro-alerting-alert*" + + /** The index name pattern to query all findings, history and current findings. */ + const val ALL_FINDING_INDEX_PATTERN = ".opensearch-alerting-finding*" @JvmStatic fun alertMapping() = AlertIndices::class.java.getResource("alert_mapping.json").readText() + @JvmStatic + fun findingMapping() = + AlertIndices::class.java.getResource("finding_mapping.json").readText() + private val logger = LogManager.getLogger(AlertIndices::class.java) } - @Volatile private var historyEnabled = AlertingSettings.ALERT_HISTORY_ENABLED.get(settings) + @Volatile private var alertHistoryEnabled = AlertingSettings.ALERT_HISTORY_ENABLED.get(settings) + @Volatile private var findingHistoryEnabled = AlertingSettings.FINDING_HISTORY_ENABLED.get(settings) - @Volatile private var historyMaxDocs = AlertingSettings.ALERT_HISTORY_MAX_DOCS.get(settings) + @Volatile private var alertHistoryMaxDocs = AlertingSettings.ALERT_HISTORY_MAX_DOCS.get(settings) + @Volatile private var findingHistoryMaxDocs = AlertingSettings.FINDING_HISTORY_MAX_DOCS.get(settings) - @Volatile private var historyMaxAge = AlertingSettings.ALERT_HISTORY_INDEX_MAX_AGE.get(settings) + @Volatile private var alertHistoryMaxAge = AlertingSettings.ALERT_HISTORY_INDEX_MAX_AGE.get(settings) + @Volatile private var findingHistoryMaxAge = AlertingSettings.FINDING_HISTORY_INDEX_MAX_AGE.get(settings) - @Volatile private var historyRolloverPeriod = AlertingSettings.ALERT_HISTORY_ROLLOVER_PERIOD.get(settings) + @Volatile private var alertHistoryRolloverPeriod = AlertingSettings.ALERT_HISTORY_ROLLOVER_PERIOD.get(settings) + @Volatile private var findingHistoryRolloverPeriod = AlertingSettings.FINDING_HISTORY_ROLLOVER_PERIOD.get(settings) - @Volatile private var historyRetentionPeriod = AlertingSettings.ALERT_HISTORY_RETENTION_PERIOD.get(settings) + @Volatile private var alertHistoryRetentionPeriod = AlertingSettings.ALERT_HISTORY_RETENTION_PERIOD.get(settings) + @Volatile private var findingHistoryRetentionPeriod = AlertingSettings.FINDING_HISTORY_RETENTION_PERIOD.get(settings) @Volatile private var requestTimeout = AlertingSettings.REQUEST_TIMEOUT.get(settings) @@ -115,7 +155,9 @@ class AlertIndices( // for JobsMonitor to report var lastRolloverTime: TimeValue? = null - private var historyIndexInitialized: Boolean = false + private var alertHistoryIndexInitialized: Boolean = false + + private var findingHistoryIndexInitialized: Boolean = false private var alertIndexInitialized: Boolean = false @@ -124,15 +166,18 @@ class AlertIndices( fun onMaster() { try { // try to rollover immediately as we might be restarting the cluster - rolloverHistoryIndex() + rolloverAlertHistoryIndex() + rolloverFindingHistoryIndex() // schedule the next rollover for approx MAX_AGE later scheduledRollover = threadPool - .scheduleWithFixedDelay({ rolloverAndDeleteHistoryIndices() }, historyRolloverPeriod, executorName()) + .scheduleWithFixedDelay({ rolloverAndDeleteAlertHistoryIndices() }, alertHistoryRolloverPeriod, executorName()) + scheduledRollover = threadPool + .scheduleWithFixedDelay({ rolloverAndDeleteFindingHistoryIndices() }, findingHistoryRolloverPeriod, executorName()) } catch (e: Exception) { // This should be run on cluster startup logger.error( - "Error creating alert indices. " + - "Alerts can't be recorded until master node is restarted.", + "Error creating alert/finding indices. " + + "Alerts/Findings can't be recorded until master node is restarted.", e ) } @@ -161,45 +206,74 @@ class AlertIndices( // if the indexes have been deleted they need to be reinitialized alertIndexInitialized = event.state().routingTable().hasIndex(ALERT_INDEX) - historyIndexInitialized = event.state().metadata().hasAlias(HISTORY_WRITE_INDEX) + alertHistoryIndexInitialized = event.state().metadata().hasAlias(ALERT_HISTORY_WRITE_INDEX) + findingHistoryIndexInitialized = event.state().metadata().hasAlias(FINDING_HISTORY_WRITE_INDEX) + } + + private fun rescheduleAlertRollover() { + if (clusterService.state().nodes.isLocalNodeElectedMaster) { + scheduledRollover?.cancel() + scheduledRollover = threadPool + .scheduleWithFixedDelay({ rolloverAndDeleteAlertHistoryIndices() }, alertHistoryRolloverPeriod, executorName()) + } } - private fun rescheduleRollover() { + private fun rescheduleFindingRollover() { if (clusterService.state().nodes.isLocalNodeElectedMaster) { scheduledRollover?.cancel() scheduledRollover = threadPool - .scheduleWithFixedDelay({ rolloverAndDeleteHistoryIndices() }, historyRolloverPeriod, executorName()) + .scheduleWithFixedDelay({ rolloverAndDeleteFindingHistoryIndices() }, findingHistoryRolloverPeriod, executorName()) } } - fun isInitialized(): Boolean { - return alertIndexInitialized && historyIndexInitialized + fun isAlertInitialized(): Boolean { + return alertIndexInitialized && alertHistoryIndexInitialized } - fun isHistoryEnabled(): Boolean = historyEnabled + fun isAlertHistoryEnabled(): Boolean = alertHistoryEnabled + + fun isFindingHistoryEnabled(): Boolean = findingHistoryEnabled suspend fun createOrUpdateAlertIndex() { if (!alertIndexInitialized) { - alertIndexInitialized = createIndex(ALERT_INDEX) + alertIndexInitialized = createIndex(ALERT_INDEX, alertMapping()) if (alertIndexInitialized) IndexUtils.alertIndexUpdated() } else { - if (!IndexUtils.alertIndexUpdated) updateIndexMapping(ALERT_INDEX) + if (!IndexUtils.alertIndexUpdated) updateIndexMapping(ALERT_INDEX, alertMapping()) } alertIndexInitialized } - suspend fun createOrUpdateInitialHistoryIndex() { - if (!historyIndexInitialized) { - historyIndexInitialized = createIndex(HISTORY_INDEX_PATTERN, HISTORY_WRITE_INDEX) - if (historyIndexInitialized) - IndexUtils.lastUpdatedHistoryIndex = IndexUtils.getIndexNameWithAlias(clusterService.state(), HISTORY_WRITE_INDEX) + suspend fun createOrUpdateInitialAlertHistoryIndex() { + if (!alertHistoryIndexInitialized) { + alertHistoryIndexInitialized = createIndex(ALERT_HISTORY_INDEX_PATTERN, alertMapping(), ALERT_HISTORY_WRITE_INDEX) + if (alertHistoryIndexInitialized) + IndexUtils.lastUpdatedAlertHistoryIndex = IndexUtils.getIndexNameWithAlias( + clusterService.state(), + ALERT_HISTORY_WRITE_INDEX + ) } else { - updateIndexMapping(HISTORY_WRITE_INDEX, true) + updateIndexMapping(ALERT_HISTORY_WRITE_INDEX, alertMapping(), true) } - historyIndexInitialized + alertHistoryIndexInitialized } - private suspend fun createIndex(index: String, alias: String? = null): Boolean { + suspend fun createOrUpdateInitialFindingHistoryIndex() { + if (!findingHistoryIndexInitialized) { + findingHistoryIndexInitialized = createIndex(FINDING_HISTORY_INDEX_PATTERN, findingMapping(), FINDING_HISTORY_WRITE_INDEX) + if (findingHistoryIndexInitialized) { + IndexUtils.lastUpdatedFindingHistoryIndex = IndexUtils.getIndexNameWithAlias( + clusterService.state(), + FINDING_HISTORY_WRITE_INDEX + ) + } + } else { + updateIndexMapping(FINDING_HISTORY_WRITE_INDEX, findingMapping(), true) + } + findingHistoryIndexInitialized + } + + private suspend fun createIndex(index: String, schemaMapping: String, alias: String? = null): Boolean { // This should be a fast check of local cluster state. Should be exceedingly rare that the local cluster // state does not contain the index and multiple nodes concurrently try to create the index. // If it does happen that error is handled we catch the ResourceAlreadyExistsException @@ -209,7 +283,7 @@ class AlertIndices( if (existsResponse.isExists) return true val request = CreateIndexRequest(index) - .mapping(alertMapping()) + .mapping(schemaMapping) .settings(Settings.builder().put("index.hidden", true).build()) if (alias != null) request.alias(Alias(alias)) @@ -221,15 +295,14 @@ class AlertIndices( } } - private suspend fun updateIndexMapping(index: String, alias: Boolean = false) { + private suspend fun updateIndexMapping(index: String, mapping: String, alias: Boolean = false) { val clusterState = clusterService.state() - val mapping = alertMapping() var targetIndex = index if (alias) { targetIndex = IndexUtils.getIndexNameWithAlias(clusterState, index) } - if (targetIndex == IndexUtils.lastUpdatedHistoryIndex) { + if (targetIndex == IndexUtils.lastUpdatedAlertHistoryIndex || targetIndex == IndexUtils.lastUpdatedFindingHistoryIndex) { return } @@ -247,63 +320,92 @@ class AlertIndices( private fun setIndexUpdateFlag(index: String, targetIndex: String) { when (index) { ALERT_INDEX -> IndexUtils.alertIndexUpdated() - HISTORY_WRITE_INDEX -> IndexUtils.lastUpdatedHistoryIndex = targetIndex + ALERT_HISTORY_WRITE_INDEX -> IndexUtils.lastUpdatedAlertHistoryIndex = targetIndex + FINDING_HISTORY_WRITE_INDEX -> IndexUtils.lastUpdatedFindingHistoryIndex = targetIndex } } - private fun rolloverAndDeleteHistoryIndices() { - if (historyEnabled) rolloverHistoryIndex() - deleteOldHistoryIndices() + private fun rolloverAndDeleteAlertHistoryIndices() { + if (alertHistoryEnabled) rolloverAlertHistoryIndex() + deleteOldIndices("History", ALERT_HISTORY_ALL) + } + + private fun rolloverAndDeleteFindingHistoryIndices() { + if (findingHistoryEnabled) rolloverFindingHistoryIndex() + deleteOldIndices("Finding", FINDING_HISTORY_ALL) } - private fun rolloverHistoryIndex() { - if (!historyIndexInitialized) { + private fun rolloverIndex( + initialized: Boolean, + index: String, + pattern: String, + map: String, + docsCondition: Long, + ageCondition: TimeValue, + writeIndex: String + ) { + if (!initialized) { return } // We have to pass null for newIndexName in order to get Elastic to increment the index count. - val request = RolloverRequest(HISTORY_WRITE_INDEX, null) - request.createIndexRequest.index(HISTORY_INDEX_PATTERN) - .mapping(alertMapping()) + val request = RolloverRequest(index, null) + request.createIndexRequest.index(pattern) + .mapping(map) .settings(Settings.builder().put("index.hidden", true).build()) - request.addMaxIndexDocsCondition(historyMaxDocs) - request.addMaxIndexAgeCondition(historyMaxAge) + request.addMaxIndexDocsCondition(docsCondition) + request.addMaxIndexAgeCondition(ageCondition) client.admin().indices().rolloverIndex( request, object : ActionListener { override fun onResponse(response: RolloverResponse) { if (!response.isRolledOver) { - logger.info("$HISTORY_WRITE_INDEX not rolled over. Conditions were: ${response.conditionStatus}") + logger.info("$writeIndex not rolled over. Conditions were: ${response.conditionStatus}") } else { lastRolloverTime = TimeValue.timeValueMillis(threadPool.absoluteTimeInMillis()) } } override fun onFailure(e: Exception) { - logger.error("$HISTORY_WRITE_INDEX not roll over failed.") + logger.error("$writeIndex not roll over failed.") } } ) } - private fun deleteOldHistoryIndices() { + private fun rolloverAlertHistoryIndex() { + rolloverIndex( + alertHistoryIndexInitialized, ALERT_HISTORY_WRITE_INDEX, + ALERT_HISTORY_INDEX_PATTERN, alertMapping(), + alertHistoryMaxDocs, alertHistoryMaxAge, ALERT_HISTORY_WRITE_INDEX + ) + } + + private fun rolloverFindingHistoryIndex() { + rolloverIndex( + findingHistoryIndexInitialized, FINDING_HISTORY_WRITE_INDEX, + FINDING_HISTORY_INDEX_PATTERN, findingMapping(), + findingHistoryMaxDocs, findingHistoryMaxAge, FINDING_HISTORY_WRITE_INDEX + ) + } + private fun deleteOldIndices(tag: String, indices: String) { + logger.error("info deleteOldIndices") val clusterStateRequest = ClusterStateRequest() .clear() - .indices(HISTORY_ALL) + .indices(indices) .metadata(true) .local(true) .indicesOptions(IndicesOptions.strictExpand()) - client.admin().cluster().state( clusterStateRequest, object : ActionListener { override fun onResponse(clusterStateResponse: ClusterStateResponse) { if (!clusterStateResponse.state.metadata.indices.isEmpty) { val indicesToDelete = getIndicesToDelete(clusterStateResponse) - logger.info("Deleting old history indices viz $indicesToDelete") + logger.info("Deleting old $tag indices viz $indicesToDelete") deleteAllOldHistoryIndices(indicesToDelete) } else { - logger.info("No Old History Indices to delete") + logger.info("No Old $tag Indices to delete") } } override fun onFailure(e: Exception) { @@ -317,24 +419,39 @@ class AlertIndices( val indicesToDelete = mutableListOf() for (entry in clusterStateResponse.state.metadata.indices) { val indexMetaData = entry.value - val creationTime = indexMetaData.creationDate - - if ((Instant.now().toEpochMilli() - creationTime) > historyRetentionPeriod.millis) { - val alias = indexMetaData.aliases.firstOrNull { HISTORY_WRITE_INDEX == it.value.alias } - if (alias != null) { - if (historyEnabled) { - // If the index has the write alias and history is enabled, don't delete the index - continue - } else { - // Otherwise reset historyIndexInitialized since index will be deleted - historyIndexInitialized = false - } - } + getHistoryIndexToDelete(indexMetaData, alertHistoryRetentionPeriod.millis, ALERT_HISTORY_WRITE_INDEX, alertHistoryEnabled) + ?.let { indicesToDelete.add(it) } + getHistoryIndexToDelete(indexMetaData, findingHistoryRetentionPeriod.millis, FINDING_HISTORY_WRITE_INDEX, findingHistoryEnabled) + ?.let { indicesToDelete.add(it) } + } + return indicesToDelete + } - indicesToDelete.add(indexMetaData.index.name) + private fun getHistoryIndexToDelete( + indexMetadata: IndexMetadata, + retentionPeriodMillis: Long, + writeIndex: String, + historyEnabled: Boolean + ): String? { + val creationTime = indexMetadata.creationDate + if ((Instant.now().toEpochMilli() - creationTime) > retentionPeriodMillis) { + val alias = indexMetadata.aliases.firstOrNull { writeIndex == it.value.alias } + if (alias != null) { + if (historyEnabled) { + // If the index has the write alias and history is enabled, don't delete the index + return null + } else if (writeIndex == ALERT_HISTORY_WRITE_INDEX) { + // Otherwise reset alertHistoryIndexInitialized since index will be deleted + alertHistoryIndexInitialized = false + } else if (writeIndex == FINDING_HISTORY_WRITE_INDEX) { + // Otherwise reset findingHistoryIndexInitialized since index will be deleted + findingHistoryIndexInitialized = false + } } + + return indexMetadata.index.name } - return indicesToDelete + return null } private fun deleteAllOldHistoryIndices(indicesToDelete: List) { @@ -345,12 +462,14 @@ class AlertIndices( object : ActionListener { override fun onResponse(deleteIndicesResponse: AcknowledgedResponse) { if (!deleteIndicesResponse.isAcknowledged) { - logger.error("Could not delete one or more Alerting history indices: $indicesToDelete. Retrying one by one.") + logger.error( + "Could not delete one or more Alerting/Finding history indices: $indicesToDelete. Retrying one by one." + ) deleteOldHistoryIndex(indicesToDelete) } } override fun onFailure(e: Exception) { - logger.error("Delete for Alerting History Indices $indicesToDelete Failed. Retrying one By one.") + logger.error("Delete for Alerting/Finding History Indices $indicesToDelete Failed. Retrying one By one.") deleteOldHistoryIndex(indicesToDelete) } } @@ -367,7 +486,7 @@ class AlertIndices( override fun onResponse(acknowledgedResponse: AcknowledgedResponse?) { if (acknowledgedResponse != null) { if (!acknowledgedResponse.isAcknowledged) { - logger.error("Could not delete one or more Alerting history indices: $index") + logger.error("Could not delete one or more Alerting/Finding history indices: $index") } } } diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/alerts/AlertMover.kt b/alerting/src/main/kotlin/org/opensearch/alerting/alerts/AlertMover.kt index 06b9bea48..a9c704958 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/alerts/AlertMover.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/alerts/AlertMover.kt @@ -11,8 +11,8 @@ import org.opensearch.action.delete.DeleteRequest import org.opensearch.action.index.IndexRequest import org.opensearch.action.search.SearchRequest import org.opensearch.action.search.SearchResponse +import org.opensearch.alerting.alerts.AlertIndices.Companion.ALERT_HISTORY_WRITE_INDEX import org.opensearch.alerting.alerts.AlertIndices.Companion.ALERT_INDEX -import org.opensearch.alerting.alerts.AlertIndices.Companion.HISTORY_WRITE_INDEX import org.opensearch.alerting.model.Alert import org.opensearch.alerting.model.Monitor import org.opensearch.alerting.opensearchapi.suspendUntil @@ -37,7 +37,7 @@ import org.opensearch.search.builder.SearchSourceBuilder * 1. Find active alerts: * a. matching monitorId if no monitor is provided (postDelete) * b. matching monitorId and no triggerIds if monitor is provided (postIndex) - * 2. Move alerts over to [HISTORY_WRITE_INDEX] as DELETED + * 2. Move alerts over to [ALERT_HISTORY_WRITE_INDEX] as DELETED * 3. Delete alerts from [ALERT_INDEX] * 4. Schedule a retry if there were any failures */ @@ -61,7 +61,7 @@ suspend fun moveAlerts(client: Client, monitorId: String, monitor: Monitor? = nu // If no alerts are found, simply return if (response.hits.totalHits?.value == 0L) return val indexRequests = response.hits.map { hit -> - IndexRequest(AlertIndices.HISTORY_WRITE_INDEX) + IndexRequest(AlertIndices.ALERT_HISTORY_WRITE_INDEX) .routing(monitorId) .source( Alert.parse(alertContentParser(hit.sourceRef), hit.id, hit.version) diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/model/Alert.kt b/alerting/src/main/kotlin/org/opensearch/alerting/model/Alert.kt index e892bf560..d65741a51 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/model/Alert.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/model/Alert.kt @@ -32,6 +32,8 @@ data class Alert( val monitorUser: User?, val triggerId: String, val triggerName: String, + val findingIds: List, + val relatedDocIds: List, val state: State, val startTime: Instant, val endTime: Instant? = null, @@ -65,7 +67,7 @@ data class Alert( triggerId = trigger.id, triggerName = trigger.name, state = state, startTime = startTime, lastNotificationTime = lastNotificationTime, errorMessage = errorMessage, errorHistory = errorHistory, severity = trigger.severity, actionExecutionResults = actionExecutionResults, schemaVersion = schemaVersion, - aggregationResultBucket = null + aggregationResultBucket = null, findingIds = emptyList(), relatedDocIds = emptyList() ) constructor( @@ -83,7 +85,7 @@ data class Alert( triggerId = trigger.id, triggerName = trigger.name, state = state, startTime = startTime, lastNotificationTime = lastNotificationTime, errorMessage = errorMessage, errorHistory = errorHistory, severity = trigger.severity, actionExecutionResults = actionExecutionResults, schemaVersion = schemaVersion, - aggregationResultBucket = null + aggregationResultBucket = null, findingIds = emptyList(), relatedDocIds = emptyList() ) constructor( @@ -102,7 +104,27 @@ data class Alert( triggerId = trigger.id, triggerName = trigger.name, state = state, startTime = startTime, lastNotificationTime = lastNotificationTime, errorMessage = errorMessage, errorHistory = errorHistory, severity = trigger.severity, actionExecutionResults = actionExecutionResults, schemaVersion = schemaVersion, - aggregationResultBucket = aggregationResultBucket + aggregationResultBucket = aggregationResultBucket, findingIds = emptyList(), relatedDocIds = emptyList() + ) + + constructor( + monitor: Monitor, + trigger: DocumentLevelTrigger, + findingIds: List, + relatedDocIds: List, + startTime: Instant, + lastNotificationTime: Instant?, + state: State = State.ACTIVE, + errorMessage: String? = null, + errorHistory: List = mutableListOf(), + actionExecutionResults: List = mutableListOf(), + schemaVersion: Int = NO_SCHEMA_VERSION + ) : this( + monitorId = monitor.id, monitorName = monitor.name, monitorVersion = monitor.version, monitorUser = monitor.user, + triggerId = trigger.id, triggerName = trigger.name, state = state, startTime = startTime, + lastNotificationTime = lastNotificationTime, errorMessage = errorMessage, errorHistory = errorHistory, + severity = trigger.severity, actionExecutionResults = actionExecutionResults, schemaVersion = schemaVersion, + aggregationResultBucket = null, findingIds = findingIds, relatedDocIds = relatedDocIds ) enum class State { @@ -122,6 +144,8 @@ data class Alert( } else null, triggerId = sin.readString(), triggerName = sin.readString(), + findingIds = sin.readStringList(), + relatedDocIds = sin.readStringList(), state = sin.readEnum(State::class.java), startTime = sin.readInstant(), endTime = sin.readOptionalInstant(), @@ -148,6 +172,8 @@ data class Alert( monitorUser?.writeTo(out) out.writeString(triggerId) out.writeString(triggerName) + out.writeStringCollection(findingIds) + out.writeStringCollection(relatedDocIds) out.writeEnum(state) out.writeInstant(startTime) out.writeOptionalInstant(endTime) @@ -176,6 +202,8 @@ data class Alert( const val MONITOR_USER_FIELD = "monitor_user" const val TRIGGER_ID_FIELD = "trigger_id" const val TRIGGER_NAME_FIELD = "trigger_name" + const val FINDING_IDS = "finding_ids" + const val RELATED_DOC_IDS = "related_doc_ids" const val STATE_FIELD = "state" const val START_TIME_FIELD = "start_time" const val LAST_NOTIFICATION_TIME_FIELD = "last_notification_time" @@ -201,6 +229,8 @@ data class Alert( var monitorUser: User? = null lateinit var triggerId: String lateinit var triggerName: String + val findingIds = mutableListOf() + val relatedDocIds = mutableListOf() lateinit var state: State lateinit var startTime: Instant lateinit var severity: String @@ -223,6 +253,18 @@ data class Alert( MONITOR_VERSION_FIELD -> monitorVersion = xcp.longValue() MONITOR_USER_FIELD -> monitorUser = if (xcp.currentToken() == XContentParser.Token.VALUE_NULL) null else User.parse(xcp) TRIGGER_ID_FIELD -> triggerId = xcp.text() + FINDING_IDS -> { + ensureExpectedToken(XContentParser.Token.START_ARRAY, xcp.currentToken(), xcp) + while (xcp.nextToken() != XContentParser.Token.END_ARRAY) { + findingIds.add(xcp.text()) + } + } + RELATED_DOC_IDS -> { + ensureExpectedToken(XContentParser.Token.START_ARRAY, xcp.currentToken(), xcp) + while (xcp.nextToken() != XContentParser.Token.END_ARRAY) { + relatedDocIds.add(xcp.text()) + } + } STATE_FIELD -> state = State.valueOf(xcp.text()) TRIGGER_NAME_FIELD -> triggerName = xcp.text() START_TIME_FIELD -> startTime = requireNotNull(xcp.instant()) @@ -264,7 +306,8 @@ data class Alert( state = requireNotNull(state), startTime = requireNotNull(startTime), endTime = endTime, lastNotificationTime = lastNotificationTime, acknowledgedTime = acknowledgedTime, errorMessage = errorMessage, errorHistory = errorHistory, severity = severity, - actionExecutionResults = actionExecutionResults, aggregationResultBucket = aggAlertBucket + actionExecutionResults = actionExecutionResults, aggregationResultBucket = aggAlertBucket, findingIds = findingIds, + relatedDocIds = relatedDocIds ) } @@ -297,6 +340,8 @@ data class Alert( builder.field(TRIGGER_ID_FIELD, triggerId) .field(TRIGGER_NAME_FIELD, triggerName) + .field(FINDING_IDS, findingIds.toTypedArray()) + .field(RELATED_DOC_IDS, relatedDocIds.toTypedArray()) .field(STATE_FIELD, state) .field(ERROR_MESSAGE_FIELD, errorMessage) .field(ALERT_HISTORY_FIELD, errorHistory.toTypedArray()) diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/model/AlertingConfigAccessor.kt b/alerting/src/main/kotlin/org/opensearch/alerting/model/AlertingConfigAccessor.kt index 7e982dd41..9ad8bd5cb 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/model/AlertingConfigAccessor.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/model/AlertingConfigAccessor.kt @@ -26,6 +26,18 @@ import org.opensearch.common.xcontent.XContentType */ class AlertingConfigAccessor { companion object { + suspend fun getMonitorInfo(client: Client, xContentRegistry: NamedXContentRegistry, monitorId: String): Monitor { + val jobSource = getAlertingConfigDocumentSource(client, "Monitor", monitorId) + return withContext(Dispatchers.IO) { + val xcp = XContentHelper.createParser( + xContentRegistry, LoggingDeprecationHandler.INSTANCE, + jobSource, XContentType.JSON + ) + val monitor = Monitor.parse(xcp) + monitor + } + } + suspend fun getDestinationInfo(client: Client, xContentRegistry: NamedXContentRegistry, destinationId: String): Destination { val jobSource = getAlertingConfigDocumentSource(client, "Destination", destinationId) return withContext(Dispatchers.IO) { diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/model/DocumentExecutionContext.kt b/alerting/src/main/kotlin/org/opensearch/alerting/model/DocumentExecutionContext.kt new file mode 100644 index 000000000..a6acd027a --- /dev/null +++ b/alerting/src/main/kotlin/org/opensearch/alerting/model/DocumentExecutionContext.kt @@ -0,0 +1,14 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.model + +import org.opensearch.alerting.core.model.DocLevelQuery + +data class DocumentExecutionContext( + val queries: List, + val lastRunContext: Map, + val updatedLastRunContext: Map +) diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/model/DocumentLevelTrigger.kt b/alerting/src/main/kotlin/org/opensearch/alerting/model/DocumentLevelTrigger.kt new file mode 100644 index 000000000..1495be418 --- /dev/null +++ b/alerting/src/main/kotlin/org/opensearch/alerting/model/DocumentLevelTrigger.kt @@ -0,0 +1,160 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.model + +import org.opensearch.alerting.model.Trigger.Companion.ACTIONS_FIELD +import org.opensearch.alerting.model.Trigger.Companion.ID_FIELD +import org.opensearch.alerting.model.Trigger.Companion.NAME_FIELD +import org.opensearch.alerting.model.Trigger.Companion.SEVERITY_FIELD +import org.opensearch.alerting.model.action.Action +import org.opensearch.common.CheckedFunction +import org.opensearch.common.ParseField +import org.opensearch.common.UUIDs +import org.opensearch.common.io.stream.StreamInput +import org.opensearch.common.io.stream.StreamOutput +import org.opensearch.common.xcontent.NamedXContentRegistry +import org.opensearch.common.xcontent.ToXContent +import org.opensearch.common.xcontent.XContentBuilder +import org.opensearch.common.xcontent.XContentParser +import org.opensearch.common.xcontent.XContentParser.Token +import org.opensearch.common.xcontent.XContentParserUtils +import org.opensearch.common.xcontent.XContentParserUtils.ensureExpectedToken +import org.opensearch.script.Script +import java.io.IOException + +/** + * A single-alert Trigger that uses Painless scripts which execute on the response of the Monitor input query to define + * alerting conditions. + */ +data class DocumentLevelTrigger( + override val id: String = UUIDs.base64UUID(), + override val name: String, + override val severity: String, + override val actions: List, + val condition: Script +) : Trigger { + + @Throws(IOException::class) + constructor(sin: StreamInput) : this( + sin.readString(), // id + sin.readString(), // name + sin.readString(), // severity + sin.readList(::Action), // actions + Script(sin) + ) + + override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { + builder.startObject() + .startObject(DOCUMENT_LEVEL_TRIGGER_FIELD) + .field(ID_FIELD, id) + .field(NAME_FIELD, name) + .field(SEVERITY_FIELD, severity) + .startObject(CONDITION_FIELD) + .field(SCRIPT_FIELD, condition) + .endObject() + .field(ACTIONS_FIELD, actions.toTypedArray()) + .endObject() + .endObject() + return builder + } + + override fun name(): String { + return DOCUMENT_LEVEL_TRIGGER_FIELD + } + + /** Returns a representation of the trigger suitable for passing into painless and mustache scripts. */ + fun asTemplateArg(): Map { + return mapOf( + ID_FIELD to id, NAME_FIELD to name, SEVERITY_FIELD to severity, + ACTIONS_FIELD to actions.map { it.asTemplateArg() } + ) + } + + @Throws(IOException::class) + override fun writeTo(out: StreamOutput) { + out.writeString(id) + out.writeString(name) + out.writeString(severity) + out.writeCollection(actions) + condition.writeTo(out) + } + + companion object { + const val DOCUMENT_LEVEL_TRIGGER_FIELD = "document_level_trigger" + const val CONDITION_FIELD = "condition" + const val SCRIPT_FIELD = "script" + const val QUERY_IDS_FIELD = "query_ids" + + val XCONTENT_REGISTRY = NamedXContentRegistry.Entry( + Trigger::class.java, ParseField(DOCUMENT_LEVEL_TRIGGER_FIELD), + CheckedFunction { parseInner(it) } + ) + + @JvmStatic @Throws(IOException::class) + fun parseInner(xcp: XContentParser): DocumentLevelTrigger { + var id = UUIDs.base64UUID() // assign a default triggerId if one is not specified + lateinit var name: String + lateinit var severity: String + lateinit var condition: Script + val queryIds: MutableList = mutableListOf() + val actions: MutableList = mutableListOf() + + if (xcp.currentToken() != Token.START_OBJECT && xcp.currentToken() != Token.FIELD_NAME) { + XContentParserUtils.throwUnknownToken(xcp.currentToken(), xcp.tokenLocation) + } + + // If the parser began on START_OBJECT, move to the next token so that the while loop enters on + // the fieldName (or END_OBJECT if it's empty). + if (xcp.currentToken() == Token.START_OBJECT) xcp.nextToken() + + while (xcp.currentToken() != Token.END_OBJECT) { + val fieldName = xcp.currentName() + + xcp.nextToken() + when (fieldName) { + ID_FIELD -> id = xcp.text() + NAME_FIELD -> name = xcp.text() + SEVERITY_FIELD -> severity = xcp.text() + CONDITION_FIELD -> { + xcp.nextToken() + condition = Script.parse(xcp) + require(condition.lang == Script.DEFAULT_SCRIPT_LANG) { + "Invalid script language. Allowed languages are [${Script.DEFAULT_SCRIPT_LANG}]" + } + xcp.nextToken() + } + QUERY_IDS_FIELD -> { + ensureExpectedToken(Token.START_ARRAY, xcp.currentToken(), xcp) + while (xcp.nextToken() != Token.END_ARRAY) { + queryIds.add(xcp.text()) + } + } + ACTIONS_FIELD -> { + ensureExpectedToken(Token.START_ARRAY, xcp.currentToken(), xcp) + while (xcp.nextToken() != Token.END_ARRAY) { + actions.add(Action.parse(xcp)) + } + } + } + xcp.nextToken() + } + + return DocumentLevelTrigger( + name = requireNotNull(name) { "Trigger name is null" }, + severity = requireNotNull(severity) { "Trigger severity is null" }, + condition = requireNotNull(condition) { "Trigger condition is null" }, + actions = requireNotNull(actions) { "Trigger actions are null" }, + id = requireNotNull(id) { "Trigger id is null." } + ) + } + + @JvmStatic + @Throws(IOException::class) + fun readFrom(sin: StreamInput): DocumentLevelTrigger { + return DocumentLevelTrigger(sin) + } + } +} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/model/DocumentLevelTriggerRunResult.kt b/alerting/src/main/kotlin/org/opensearch/alerting/model/DocumentLevelTriggerRunResult.kt new file mode 100644 index 000000000..c5556630b --- /dev/null +++ b/alerting/src/main/kotlin/org/opensearch/alerting/model/DocumentLevelTriggerRunResult.kt @@ -0,0 +1,66 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.model + +import org.opensearch.alerting.alerts.AlertError +import org.opensearch.common.io.stream.StreamInput +import org.opensearch.common.io.stream.StreamOutput +import org.opensearch.common.xcontent.ToXContent +import org.opensearch.common.xcontent.XContentBuilder +import org.opensearch.script.ScriptException +import java.io.IOException +import java.time.Instant + +data class DocumentLevelTriggerRunResult( + override var triggerName: String, + var triggeredDocs: List, + override var error: Exception?, + var actionResults: MutableMap = mutableMapOf() +) : TriggerRunResult(triggerName, error) { + + @Throws(IOException::class) + @Suppress("UNCHECKED_CAST") + constructor(sin: StreamInput) : this( + triggerName = sin.readString(), + error = sin.readException(), + triggeredDocs = sin.readStringList(), + actionResults = sin.readMap() as MutableMap + ) + + override fun alertError(): AlertError? { + if (error != null) { + return AlertError(Instant.now(), "Failed evaluating trigger:\n${error!!.userErrorMessage()}") + } + for (actionResult in actionResults.values) { + if (actionResult.error != null) { + return AlertError(Instant.now(), "Failed running action:\n${actionResult.error.userErrorMessage()}") + } + } + return null + } + + override fun internalXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { + if (error is ScriptException) error = Exception((error as ScriptException).toJsonString(), error) + return builder + .field("triggeredDocs", triggeredDocs as List) + .field("action_results", actionResults as Map) + } + + @Throws(IOException::class) + override fun writeTo(out: StreamOutput) { + super.writeTo(out) + out.writeStringCollection(triggeredDocs) + out.writeMap(actionResults as Map) + } + + companion object { + @JvmStatic + @Throws(IOException::class) + fun readFrom(sin: StreamInput): TriggerRunResult { + return DocumentLevelTriggerRunResult(sin) + } + } +} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/model/Finding.kt b/alerting/src/main/kotlin/org/opensearch/alerting/model/Finding.kt new file mode 100644 index 000000000..1e8a186ff --- /dev/null +++ b/alerting/src/main/kotlin/org/opensearch/alerting/model/Finding.kt @@ -0,0 +1,146 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.model + +import org.opensearch.alerting.core.model.DocLevelQuery +import org.opensearch.alerting.opensearchapi.instant +import org.opensearch.common.io.stream.StreamInput +import org.opensearch.common.io.stream.StreamOutput +import org.opensearch.common.io.stream.Writeable +import org.opensearch.common.xcontent.ToXContent +import org.opensearch.common.xcontent.XContentBuilder +import org.opensearch.common.xcontent.XContentParser +import org.opensearch.common.xcontent.XContentParserUtils.ensureExpectedToken +import java.io.IOException +import java.time.Instant + +/** + * A wrapper of the log event that enriches the event by also including information about the monitor it triggered. + */ +class Finding( + val id: String = NO_ID, + val relatedDocIds: List, + val monitorId: String, + val monitorName: String, + val index: String, + val docLevelQueries: List, + val timestamp: Instant +) : Writeable, ToXContent { + + @Throws(IOException::class) + constructor(sin: StreamInput) : this( + id = sin.readString(), + relatedDocIds = sin.readStringList(), + monitorId = sin.readString(), + monitorName = sin.readString(), + index = sin.readString(), + docLevelQueries = sin.readList((DocLevelQuery)::readFrom), + timestamp = sin.readInstant() + ) + + fun asTemplateArg(): Map { + return mapOf( + FINDING_ID_FIELD to id, + RELATED_DOC_IDS_FIELD to relatedDocIds, + MONITOR_ID_FIELD to monitorId, + MONITOR_NAME_FIELD to monitorName, + INDEX_FIELD to index, + QUERIES_FIELD to docLevelQueries, + TIMESTAMP_FIELD to timestamp.toEpochMilli() + ) + } + + override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { + builder.startObject() + .field(FINDING_ID_FIELD, id) + .field(RELATED_DOC_IDS_FIELD, relatedDocIds) + .field(MONITOR_ID_FIELD, monitorId) + .field(MONITOR_NAME_FIELD, monitorName) + .field(INDEX_FIELD, index) + .field(QUERIES_FIELD, docLevelQueries.toTypedArray()) + .field(TIMESTAMP_FIELD, timestamp.toEpochMilli()) + builder.endObject() + return builder + } + + @Throws(IOException::class) + override fun writeTo(out: StreamOutput) { + out.writeString(id) + out.writeStringCollection(relatedDocIds) + out.writeString(monitorId) + out.writeString(monitorName) + out.writeString(index) + out.writeCollection(docLevelQueries) + out.writeInstant(timestamp) + } + + companion object { + const val FINDING_ID_FIELD = "id" + const val RELATED_DOC_IDS_FIELD = "related_doc_ids" + const val MONITOR_ID_FIELD = "monitor_id" + const val MONITOR_NAME_FIELD = "monitor_name" + const val INDEX_FIELD = "index" + const val QUERIES_FIELD = "queries" + const val TIMESTAMP_FIELD = "timestamp" + const val NO_ID = "" + + @JvmStatic @JvmOverloads + @Throws(IOException::class) + fun parse(xcp: XContentParser): Finding { + var id: String = NO_ID + val relatedDocIds: MutableList = mutableListOf() + lateinit var monitorId: String + lateinit var monitorName: String + lateinit var index: String + val queries: MutableList = mutableListOf() + lateinit var timestamp: Instant + + ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.currentToken(), xcp) + while (xcp.nextToken() != XContentParser.Token.END_OBJECT) { + val fieldName = xcp.currentName() + xcp.nextToken() + + when (fieldName) { + FINDING_ID_FIELD -> id = xcp.text() + RELATED_DOC_IDS_FIELD -> { + ensureExpectedToken(XContentParser.Token.START_ARRAY, xcp.currentToken(), xcp) + while (xcp.nextToken() != XContentParser.Token.END_ARRAY) { + relatedDocIds.add(xcp.text()) + } + } + MONITOR_ID_FIELD -> monitorId = xcp.text() + MONITOR_NAME_FIELD -> monitorName = xcp.text() + INDEX_FIELD -> index = xcp.text() + QUERIES_FIELD -> { + ensureExpectedToken(XContentParser.Token.START_ARRAY, xcp.currentToken(), xcp) + while (xcp.nextToken() != XContentParser.Token.END_ARRAY) { + queries.add(DocLevelQuery.parse(xcp)) + } + } + TIMESTAMP_FIELD -> { + timestamp = requireNotNull(xcp.instant()) + } + } + } + + return Finding( + id = id, + relatedDocIds = relatedDocIds, + monitorId = monitorId, + monitorName = monitorName, + index = index, + docLevelQueries = queries, + timestamp = timestamp + ) + } + + @JvmStatic + @Throws(IOException::class) + fun readFrom(sin: StreamInput): Finding { + return Finding(sin) + } + } +} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/model/FindingDocument.kt b/alerting/src/main/kotlin/org/opensearch/alerting/model/FindingDocument.kt new file mode 100644 index 000000000..bb6728b35 --- /dev/null +++ b/alerting/src/main/kotlin/org/opensearch/alerting/model/FindingDocument.kt @@ -0,0 +1,91 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.model + +import org.apache.logging.log4j.LogManager +import org.opensearch.common.io.stream.StreamInput +import org.opensearch.common.io.stream.StreamOutput +import org.opensearch.common.io.stream.Writeable +import org.opensearch.common.xcontent.ToXContent +import org.opensearch.common.xcontent.XContentBuilder +import org.opensearch.common.xcontent.XContentParser +import org.opensearch.common.xcontent.XContentParserUtils +import java.io.IOException + +private val log = LogManager.getLogger(FindingDocument::class.java) + +class FindingDocument( + val index: String, + val id: String, + val found: Boolean, + val document: String +) : Writeable, ToXContent { + + @Throws(IOException::class) + constructor(sin: StreamInput) : this( + index = sin.readString(), + id = sin.readString(), + found = sin.readBoolean(), + document = sin.readString() + ) + + override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { + return builder.startObject() + .field(INDEX_FIELD, index) + .field(FINDING_DOCUMENT_ID_FIELD, id) + .field(FOUND_FIELD, found) + .field(DOCUMENT_FIELD, document) + .endObject() + } + + @Throws(IOException::class) + override fun writeTo(out: StreamOutput) { + out.writeString(index) + out.writeString(id) + out.writeBoolean(found) + out.writeString(document) + } + + companion object { + const val INDEX_FIELD = "index" + const val FINDING_DOCUMENT_ID_FIELD = "id" + const val FOUND_FIELD = "found" + const val DOCUMENT_FIELD = "document" + const val NO_ID = "" + const val NO_INDEX = "" + + @JvmStatic @JvmOverloads + @Throws(IOException::class) + fun parse(xcp: XContentParser, id: String = NO_ID, index: String = NO_INDEX): FindingDocument { + var found = false + var document: String = "" + + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.currentToken(), xcp) + while (xcp.nextToken() != XContentParser.Token.END_OBJECT) { + val fieldName = xcp.currentName() + xcp.nextToken() + + when (fieldName) { + FOUND_FIELD -> found = xcp.booleanValue() + DOCUMENT_FIELD -> document = xcp.text() + } + } + + return FindingDocument( + index = index, + id = id, + found = found, + document = document + ) + } + + @JvmStatic + @Throws(IOException::class) + fun readFrom(sin: StreamInput): FindingDocument { + return FindingDocument(sin) + } + } +} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/model/FindingWithDocs.kt b/alerting/src/main/kotlin/org/opensearch/alerting/model/FindingWithDocs.kt new file mode 100644 index 000000000..5fbcb98ff --- /dev/null +++ b/alerting/src/main/kotlin/org/opensearch/alerting/model/FindingWithDocs.kt @@ -0,0 +1,85 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.model + +import org.apache.logging.log4j.LogManager +import org.opensearch.common.io.stream.StreamInput +import org.opensearch.common.io.stream.StreamOutput +import org.opensearch.common.io.stream.Writeable +import org.opensearch.common.xcontent.ToXContent +import org.opensearch.common.xcontent.XContentBuilder +import org.opensearch.common.xcontent.XContentParser +import org.opensearch.common.xcontent.XContentParserUtils +import java.io.IOException + +private val log = LogManager.getLogger(Finding::class.java) + +class FindingWithDocs( + val finding: Finding, + val documents: List +) : Writeable, ToXContent { + + @Throws(IOException::class) + constructor(sin: StreamInput) : this( + finding = Finding.readFrom(sin), + documents = sin.readList((FindingDocument)::readFrom) + ) + + @Throws(IOException::class) + override fun writeTo(out: StreamOutput) { + finding.writeTo(out) + documents.forEach { + it.writeTo(out) + } + } + + override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { + builder.startObject() + .field(FINDING_FIELD, finding) + .field(DOCUMENTS_FIELD, documents) + builder.endObject() + return builder + } + + companion object { + const val FINDING_FIELD = "finding" + const val DOCUMENTS_FIELD = "document_list" + + @JvmStatic + @Throws(IOException::class) + fun parse(xcp: XContentParser): FindingWithDocs { + lateinit var finding: Finding + val documents: MutableList = mutableListOf() + + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.currentToken(), xcp) + while (xcp.nextToken() != XContentParser.Token.END_OBJECT) { + val fieldName = xcp.currentName() + xcp.nextToken() + + when (fieldName) { + FINDING_FIELD -> finding = Finding.parse(xcp) + DOCUMENTS_FIELD -> { + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_ARRAY, xcp.currentToken(), xcp) + while (xcp.nextToken() != XContentParser.Token.END_ARRAY) { + documents.add(FindingDocument.parse(xcp)) + } + } + } + } + + return FindingWithDocs( + finding = finding, + documents = documents + ) + } + + @JvmStatic + @Throws(IOException::class) + fun readFrom(sin: StreamInput): FindingWithDocs { + return FindingWithDocs(sin) + } + } +} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/model/Monitor.kt b/alerting/src/main/kotlin/org/opensearch/alerting/model/Monitor.kt index 8141ebb42..787e5d778 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/model/Monitor.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/model/Monitor.kt @@ -55,6 +55,7 @@ data class Monitor( val schemaVersion: Int = NO_SCHEMA_VERSION, val inputs: List, val triggers: List, + val lastRunContext: Map, val uiMetadata: Map ) : ScheduledJob { @@ -68,11 +69,13 @@ data class Monitor( // Verify Trigger type based on Monitor type when (monitorType) { MonitorType.QUERY_LEVEL_MONITOR -> - require(trigger is QueryLevelTrigger) { "Incompatible trigger [$trigger.id] for monitor type [$monitorType]" } + require(trigger is QueryLevelTrigger) { "Incompatible trigger [${trigger.id}] for monitor type [$monitorType]" } MonitorType.BUCKET_LEVEL_MONITOR -> - require(trigger is BucketLevelTrigger) { "Incompatible trigger [$trigger.id] for monitor type [$monitorType]" } + require(trigger is BucketLevelTrigger) { "Incompatible trigger [${trigger.id}] for monitor type [$monitorType]" } MonitorType.CLUSTER_METRICS_MONITOR -> - require(trigger is QueryLevelTrigger) { "Incompatible trigger [$trigger.id] for monitor type [$monitorType]" } + require(trigger is QueryLevelTrigger) { "Incompatible trigger [${trigger.id}] for monitor type [$monitorType]" } + MonitorType.DOC_LEVEL_MONITOR -> + require(trigger is DocumentLevelTrigger) { "Incompatible trigger [${trigger.id}] for monitor type [$monitorType]" } } } if (enabled) { @@ -108,8 +111,9 @@ data class Monitor( User(sin) } else null, schemaVersion = sin.readInt(), - inputs = sin.readList(::SearchInput), + inputs = sin.readList((Input)::readFrom), triggers = sin.readList((Trigger)::readFrom), + lastRunContext = suppressWarning(sin.readMap()), uiMetadata = suppressWarning(sin.readMap()) ) @@ -118,7 +122,8 @@ data class Monitor( enum class MonitorType(val value: String) { QUERY_LEVEL_MONITOR("query_level_monitor"), BUCKET_LEVEL_MONITOR("bucket_level_monitor"), - CLUSTER_METRICS_MONITOR("cluster_metrics_monitor"); + CLUSTER_METRICS_MONITOR("cluster_metrics_monitor"), + DOC_LEVEL_MONITOR("doc_level_monitor"); override fun toString(): String { return value @@ -156,6 +161,7 @@ data class Monitor( .field(INPUTS_FIELD, inputs.toTypedArray()) .field(TRIGGERS_FIELD, triggers.toTypedArray()) .optionalTimeField(LAST_UPDATE_TIME_FIELD, lastUpdateTime) + if (lastRunContext.isNotEmpty()) builder.field(LAST_RUN_CONTEXT_FIELD, lastRunContext) if (uiMetadata.isNotEmpty()) builder.field(UI_METADATA_FIELD, uiMetadata) if (params.paramAsBoolean("with_type", false)) builder.endObject() return builder.endObject() @@ -181,14 +187,22 @@ data class Monitor( out.writeBoolean(user != null) user?.writeTo(out) out.writeInt(schemaVersion) - out.writeCollection(inputs) + // Outputting type with each Input so that the generic Input.readFrom() can read it + out.writeVInt(inputs.size) + inputs.forEach { + if (it is SearchInput) out.writeEnum(Input.Type.SEARCH_INPUT) + else out.writeEnum(Input.Type.DOCUMENT_LEVEL_INPUT) + it.writeTo(out) + } // Outputting type with each Trigger so that the generic Trigger.readFrom() can read it out.writeVInt(triggers.size) triggers.forEach { if (it is QueryLevelTrigger) out.writeEnum(Trigger.Type.QUERY_LEVEL_TRIGGER) + else if (it is DocumentLevelTrigger) out.writeEnum(Trigger.Type.DOCUMENT_LEVEL_TRIGGER) else out.writeEnum(Trigger.Type.BUCKET_LEVEL_TRIGGER) it.writeTo(out) } + out.writeMap(lastRunContext) out.writeMap(uiMetadata) } @@ -206,6 +220,7 @@ data class Monitor( const val NO_VERSION = 1L const val INPUTS_FIELD = "inputs" const val LAST_UPDATE_TIME_FIELD = "last_update_time" + const val LAST_RUN_CONTEXT_FIELD = "last_run_context" const val UI_METADATA_FIELD = "ui_metadata" const val ENABLED_TIME_FIELD = "enabled_time" @@ -228,6 +243,7 @@ data class Monitor( var schedule: Schedule? = null var lastUpdateTime: Instant? = null var enabledTime: Instant? = null + var lastRunContext: Map = mapOf() var uiMetadata: Map = mapOf() var enabled = true var schemaVersion = NO_SCHEMA_VERSION @@ -269,6 +285,7 @@ data class Monitor( } ENABLED_TIME_FIELD -> enabledTime = xcp.instant() LAST_UPDATE_TIME_FIELD -> lastUpdateTime = xcp.instant() + LAST_RUN_CONTEXT_FIELD -> lastRunContext = xcp.map() UI_METADATA_FIELD -> uiMetadata = xcp.map() else -> { xcp.skipChildren() @@ -294,6 +311,7 @@ data class Monitor( schemaVersion, inputs.toList(), triggers.toList(), + lastRunContext, uiMetadata ) } diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/model/Trigger.kt b/alerting/src/main/kotlin/org/opensearch/alerting/model/Trigger.kt index e6b30415b..e3a9b12ab 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/model/Trigger.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/model/Trigger.kt @@ -18,6 +18,7 @@ import java.io.IOException interface Trigger : Writeable, ToXContentObject { enum class Type(val value: String) { + DOCUMENT_LEVEL_TRIGGER(DocumentLevelTrigger.DOCUMENT_LEVEL_TRIGGER_FIELD), QUERY_LEVEL_TRIGGER(QueryLevelTrigger.QUERY_LEVEL_TRIGGER_FIELD), BUCKET_LEVEL_TRIGGER(BucketLevelTrigger.BUCKET_LEVEL_TRIGGER_FIELD); @@ -58,6 +59,7 @@ interface Trigger : Writeable, ToXContentObject { return when (val type = sin.readEnum(Trigger.Type::class.java)) { Type.QUERY_LEVEL_TRIGGER -> QueryLevelTrigger(sin) Type.BUCKET_LEVEL_TRIGGER -> BucketLevelTrigger(sin) + Type.DOCUMENT_LEVEL_TRIGGER -> DocumentLevelTrigger(sin) // This shouldn't be reachable but ensuring exhaustiveness as Kotlin warns // enum can be null in Java else -> throw IllegalStateException("Unexpected input [$type] when reading Trigger") diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestGetFindingsAction.kt b/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestGetFindingsAction.kt new file mode 100644 index 000000000..e71412a2b --- /dev/null +++ b/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestGetFindingsAction.kt @@ -0,0 +1,67 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.resthandler + +import org.apache.logging.log4j.LogManager +import org.opensearch.alerting.AlertingPlugin +import org.opensearch.alerting.action.GetFindingsAction +import org.opensearch.alerting.action.GetFindingsRequest +import org.opensearch.alerting.model.Table +import org.opensearch.client.node.NodeClient +import org.opensearch.rest.BaseRestHandler +import org.opensearch.rest.BaseRestHandler.RestChannelConsumer +import org.opensearch.rest.RestHandler.Route +import org.opensearch.rest.RestRequest +import org.opensearch.rest.RestRequest.Method.GET +import org.opensearch.rest.action.RestToXContentListener + +/** + * This class consists of the REST handler to search findings . + */ +class RestGetFindingsAction : BaseRestHandler() { + + private val log = LogManager.getLogger(RestGetFindingsAction::class.java) + + override fun getName(): String { + return "get_findings_action" + } + + override fun routes(): List { + return listOf( + Route(GET, "${AlertingPlugin.FINDING_BASE_URI}/_search") + ) + } + + override fun prepareRequest(request: RestRequest, client: NodeClient): RestChannelConsumer { + log.info("${request.method()} ${request.path()}") + + val findingID: String? = request.param("findingId") + val sortString = request.param("sortString", "id.keyword") + val sortOrder = request.param("sortOrder", "asc") + val missing: String? = request.param("missing") + val size = request.paramAsInt("size", 20) + val startIndex = request.paramAsInt("startIndex", 0) + val searchString = request.param("searchString", "") + + val table = Table( + sortOrder, + sortString, + missing, + size, + startIndex, + searchString + ) + + val getFindingsSearchRequest = GetFindingsRequest( + findingID, + table + ) + return RestChannelConsumer { + channel -> + client.execute(GetFindingsAction.INSTANCE, getFindingsSearchRequest, RestToXContentListener(channel)) + } + } +} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestIndexMonitorAction.kt b/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestIndexMonitorAction.kt index f4c6d5742..65283826c 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestIndexMonitorAction.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestIndexMonitorAction.kt @@ -10,7 +10,10 @@ import org.opensearch.alerting.AlertingPlugin import org.opensearch.alerting.action.IndexMonitorAction import org.opensearch.alerting.action.IndexMonitorRequest import org.opensearch.alerting.action.IndexMonitorResponse +import org.opensearch.alerting.model.BucketLevelTrigger +import org.opensearch.alerting.model.DocumentLevelTrigger import org.opensearch.alerting.model.Monitor +import org.opensearch.alerting.model.QueryLevelTrigger import org.opensearch.alerting.util.IF_PRIMARY_TERM import org.opensearch.alerting.util.IF_SEQ_NO import org.opensearch.alerting.util.REFRESH @@ -79,6 +82,31 @@ class RestIndexMonitorAction : BaseRestHandler() { val xcp = request.contentParser() ensureExpectedToken(Token.START_OBJECT, xcp.nextToken(), xcp) val monitor = Monitor.parse(xcp, id).copy(lastUpdateTime = Instant.now()) + val monitorType = monitor.monitorType + val triggers = monitor.triggers + when (monitorType) { + Monitor.MonitorType.QUERY_LEVEL_MONITOR -> { + triggers.forEach { + if (it !is QueryLevelTrigger) { + throw IllegalArgumentException("Illegal trigger type, ${it.javaClass.name}, for query level monitor") + } + } + } + Monitor.MonitorType.BUCKET_LEVEL_MONITOR -> { + triggers.forEach { + if (it !is BucketLevelTrigger) { + throw IllegalArgumentException("Illegal trigger type, ${it.javaClass.name}, for bucket level monitor") + } + } + } + Monitor.MonitorType.DOC_LEVEL_MONITOR -> { + triggers.forEach { + if (it !is DocumentLevelTrigger) { + throw IllegalArgumentException("Illegal trigger type, ${it.javaClass.name}, for document level monitor") + } + } + } + } val seqNo = request.paramAsLong(IF_SEQ_NO, SequenceNumbers.UNASSIGNED_SEQ_NO) val primaryTerm = request.paramAsLong(IF_PRIMARY_TERM, SequenceNumbers.UNASSIGNED_PRIMARY_TERM) val refreshPolicy = if (request.hasParam(REFRESH)) { diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestSearchMonitorAction.kt b/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestSearchMonitorAction.kt index 430b143c4..44446bb78 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestSearchMonitorAction.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestSearchMonitorAction.kt @@ -11,7 +11,7 @@ import org.opensearch.action.search.SearchResponse import org.opensearch.alerting.AlertingPlugin import org.opensearch.alerting.action.SearchMonitorAction import org.opensearch.alerting.action.SearchMonitorRequest -import org.opensearch.alerting.alerts.AlertIndices.Companion.ALL_INDEX_PATTERN +import org.opensearch.alerting.alerts.AlertIndices.Companion.ALL_ALERT_INDEX_PATTERN import org.opensearch.alerting.core.model.ScheduledJob import org.opensearch.alerting.core.model.ScheduledJob.Companion.SCHEDULED_JOBS_INDEX import org.opensearch.alerting.model.Monitor @@ -89,7 +89,7 @@ class RestSearchMonitorAction( log.debug("${request.method()} ${AlertingPlugin.MONITOR_BASE_URI}/_search") val index = request.param("index", SCHEDULED_JOBS_INDEX) - if (index != SCHEDULED_JOBS_INDEX && index != ALL_INDEX_PATTERN) { + if (index != SCHEDULED_JOBS_INDEX && index != ALL_ALERT_INDEX_PATTERN) { throw IllegalArgumentException("Invalid index name.") } diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/script/DocumentLevelTriggerExecutionContext.kt b/alerting/src/main/kotlin/org/opensearch/alerting/script/DocumentLevelTriggerExecutionContext.kt new file mode 100644 index 000000000..e131c047a --- /dev/null +++ b/alerting/src/main/kotlin/org/opensearch/alerting/script/DocumentLevelTriggerExecutionContext.kt @@ -0,0 +1,44 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.script + +import org.opensearch.alerting.model.Alert +import org.opensearch.alerting.model.DocumentLevelTrigger +import org.opensearch.alerting.model.Monitor +import java.time.Instant + +data class DocumentLevelTriggerExecutionContext( + override val monitor: Monitor, + val trigger: DocumentLevelTrigger, + override val results: List>, + override val periodStart: Instant, + override val periodEnd: Instant, + val alert: Alert? = null, + val triggeredDocs: List, + val relatedFindings: List, + override val error: Exception? = null +) : TriggerExecutionContext(monitor, results, periodStart, periodEnd, error) { + + constructor( + monitor: Monitor, + trigger: DocumentLevelTrigger, + alert: Alert? = null + ) : this( + monitor, trigger, emptyList(), Instant.now(), Instant.now(), + alert, emptyList(), emptyList(), null + ) + + /** + * Mustache templates need special permissions to reflectively introspect field names. To avoid doing this we + * translate the context to a Map of Strings to primitive types, which can be accessed without reflection. + */ + override fun asTemplateArg(): Map { + val tempArg = super.asTemplateArg().toMutableMap() + tempArg["trigger"] = trigger.asTemplateArg() + tempArg["alert"] = alert?.asTemplateArg() + return tempArg + } +} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/settings/AlertingSettings.kt b/alerting/src/main/kotlin/org/opensearch/alerting/settings/AlertingSettings.kt index 71f1bae0c..1268703c9 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/settings/AlertingSettings.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/settings/AlertingSettings.kt @@ -7,6 +7,8 @@ package org.opensearch.alerting.settings import org.opensearch.alerting.AlertingPlugin import org.opensearch.common.settings.Setting +import org.opensearch.common.unit.TimeValue +import java.util.concurrent.TimeUnit /** * settings specific to [AlertingPlugin]. These settings include things like history index max age, request timeout, etc... @@ -73,30 +75,63 @@ class AlertingSettings { Setting.Property.NodeScope, Setting.Property.Dynamic ) + // TODO: Do we want to let users to disable this? If so, we need to fix the rollover logic + // such that the main index is findings and rolls over to the finding history index + val FINDING_HISTORY_ENABLED = Setting.boolSetting( + "plugins.alerting.alert_finding_enabled", + true, + Setting.Property.NodeScope, Setting.Property.Dynamic + ) + val ALERT_HISTORY_ROLLOVER_PERIOD = Setting.positiveTimeSetting( "plugins.alerting.alert_history_rollover_period", LegacyOpenDistroAlertingSettings.ALERT_HISTORY_ROLLOVER_PERIOD, Setting.Property.NodeScope, Setting.Property.Dynamic ) + val FINDING_HISTORY_ROLLOVER_PERIOD = Setting.positiveTimeSetting( + "plugins.alerting.alert_finding_rollover_period", + TimeValue.timeValueHours(12), + Setting.Property.NodeScope, Setting.Property.Dynamic + ) + val ALERT_HISTORY_INDEX_MAX_AGE = Setting.positiveTimeSetting( "plugins.alerting.alert_history_max_age", LegacyOpenDistroAlertingSettings.ALERT_HISTORY_INDEX_MAX_AGE, Setting.Property.NodeScope, Setting.Property.Dynamic ) + val FINDING_HISTORY_INDEX_MAX_AGE = Setting.positiveTimeSetting( + "plugins.alerting.finding_history_max_age", + TimeValue(30, TimeUnit.DAYS), + Setting.Property.NodeScope, Setting.Property.Dynamic + ) + val ALERT_HISTORY_MAX_DOCS = Setting.longSetting( "plugins.alerting.alert_history_max_docs", LegacyOpenDistroAlertingSettings.ALERT_HISTORY_MAX_DOCS, Setting.Property.NodeScope, Setting.Property.Dynamic ) + val FINDING_HISTORY_MAX_DOCS = Setting.longSetting( + "plugins.alerting.alert_finding_max_docs", + 1000L, + 0L, + Setting.Property.NodeScope, Setting.Property.Dynamic, Setting.Property.Deprecated + ) + val ALERT_HISTORY_RETENTION_PERIOD = Setting.positiveTimeSetting( "plugins.alerting.alert_history_retention_period", LegacyOpenDistroAlertingSettings.ALERT_HISTORY_RETENTION_PERIOD, Setting.Property.NodeScope, Setting.Property.Dynamic ) + val FINDING_HISTORY_RETENTION_PERIOD = Setting.positiveTimeSetting( + "plugins.alerting.finding_history_retention_period", + TimeValue(60, TimeUnit.DAYS), + Setting.Property.NodeScope, Setting.Property.Dynamic + ) + val REQUEST_TIMEOUT = Setting.positiveTimeSetting( "plugins.alerting.request_timeout", LegacyOpenDistroAlertingSettings.REQUEST_TIMEOUT, diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/settings/LegacyOpenDistroAlertingSettings.kt b/alerting/src/main/kotlin/org/opensearch/alerting/settings/LegacyOpenDistroAlertingSettings.kt index be8a7d437..387b6cec9 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/settings/LegacyOpenDistroAlertingSettings.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/settings/LegacyOpenDistroAlertingSettings.kt @@ -99,6 +99,12 @@ class LegacyOpenDistroAlertingSettings { Setting.Property.NodeScope, Setting.Property.Dynamic, Setting.Property.Deprecated ) + val ALERT_FINDING_RETENTION_PERIOD = Setting.positiveTimeSetting( + "opendistro.alerting.alert_finding_retention_period", + TimeValue(60, TimeUnit.DAYS), + Setting.Property.NodeScope, Setting.Property.Dynamic, Setting.Property.Deprecated + ) + val REQUEST_TIMEOUT = Setting.positiveTimeSetting( "opendistro.alerting.request_timeout", TimeValue.timeValueSeconds(10), diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportAcknowledgeAlertAction.kt b/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportAcknowledgeAlertAction.kt index 1dbce69fd..520257537 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportAcknowledgeAlertAction.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportAcknowledgeAlertAction.kt @@ -9,6 +9,8 @@ import org.apache.logging.log4j.LogManager import org.opensearch.action.ActionListener import org.opensearch.action.bulk.BulkRequest import org.opensearch.action.bulk.BulkResponse +import org.opensearch.action.delete.DeleteRequest +import org.opensearch.action.index.IndexRequest import org.opensearch.action.search.SearchRequest import org.opensearch.action.search.SearchResponse import org.opensearch.action.support.ActionFilters @@ -20,9 +22,12 @@ import org.opensearch.alerting.action.AcknowledgeAlertResponse import org.opensearch.alerting.alerts.AlertIndices import org.opensearch.alerting.model.Alert import org.opensearch.alerting.opensearchapi.optionalTimeField +import org.opensearch.alerting.settings.AlertingSettings import org.opensearch.alerting.util.AlertingException import org.opensearch.client.Client +import org.opensearch.cluster.service.ClusterService import org.opensearch.common.inject.Inject +import org.opensearch.common.settings.Settings import org.opensearch.common.xcontent.LoggingDeprecationHandler import org.opensearch.common.xcontent.NamedXContentRegistry import org.opensearch.common.xcontent.XContentFactory @@ -41,12 +46,20 @@ private val log = LogManager.getLogger(TransportAcknowledgeAlertAction::class.ja class TransportAcknowledgeAlertAction @Inject constructor( transportService: TransportService, val client: Client, + clusterService: ClusterService, actionFilters: ActionFilters, + val settings: Settings, val xContentRegistry: NamedXContentRegistry ) : HandledTransportAction( AcknowledgeAlertAction.NAME, transportService, actionFilters, ::AcknowledgeAlertRequest ) { + @Volatile private var isAlertHistoryEnabled = AlertingSettings.ALERT_HISTORY_ENABLED.get(settings) + + init { + clusterService.clusterSettings.addSettingsUpdateConsumer(AlertingSettings.ALERT_HISTORY_ENABLED) { isAlertHistoryEnabled = it } + } + override fun doExecute(task: Task, request: AcknowledgeAlertRequest, actionListener: ActionListener) { client.threadPool().threadContext.stashContext().use { AcknowledgeHandler(client, actionListener, request).start() @@ -92,7 +105,9 @@ class TransportAcknowledgeAlertAction @Inject constructor( } private fun onSearchResponse(response: SearchResponse) { - val updateRequests = response.hits.flatMap { hit -> + val updateRequests = mutableListOf() + val copyRequests = mutableListOf() + response.hits.forEach { hit -> val xcp = XContentHelper.createParser( xContentRegistry, LoggingDeprecationHandler.INSTANCE, hit.sourceRef, XContentType.JSON @@ -100,9 +115,10 @@ class TransportAcknowledgeAlertAction @Inject constructor( XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.nextToken(), xcp) val alert = Alert.parse(xcp, hit.id, hit.version) alerts[alert.id] = alert + if (alert.state == Alert.State.ACTIVE) { - listOf( - UpdateRequest(AlertIndices.ALERT_INDEX, hit.id) + if (alert.findingIds.isEmpty() || !isAlertHistoryEnabled) { + val updateRequest = UpdateRequest(AlertIndices.ALERT_INDEX, alert.id) .routing(request.monitorId) .setIfSeqNo(hit.seqNo) .setIfPrimaryTerm(hit.primaryTerm) @@ -112,41 +128,48 @@ class TransportAcknowledgeAlertAction @Inject constructor( .optionalTimeField(Alert.ACKNOWLEDGED_TIME_FIELD, Instant.now()) .endObject() ) - ) - } else { - emptyList() + updateRequests.add(updateRequest) + } else { + val copyRequest = IndexRequest(AlertIndices.ALERT_HISTORY_WRITE_INDEX) + .routing(request.monitorId) + .id(alert.id) + .source( + alert.copy(state = Alert.State.ACKNOWLEDGED, acknowledgedTime = Instant.now()) + .toXContentWithUser(XContentFactory.jsonBuilder()) + ) + copyRequests.add(copyRequest) + } } } - log.info("Acknowledging monitor: $request.monitorId, alerts: ${updateRequests.map { it.id() }}") - val bulkRequest = BulkRequest().add(updateRequests).setRefreshPolicy(request.refreshPolicy) - client.bulk( - bulkRequest, - object : ActionListener { - override fun onResponse(response: BulkResponse) { - onBulkResponse(response) - } - - override fun onFailure(t: Exception) { - actionListener.onFailure(AlertingException.wrap(t)) - } - } - ) + try { + val updateResponse = if (updateRequests.isNotEmpty()) + client.bulk(BulkRequest().add(updateRequests).setRefreshPolicy(request.refreshPolicy)).actionGet() + else null + val copyResponse = if (copyRequests.isNotEmpty()) + client.bulk(BulkRequest().add(copyRequests).setRefreshPolicy(request.refreshPolicy)).actionGet() + else null + onBulkResponse(updateResponse, copyResponse) + } catch (t: Exception) { + log.error("ack error: ${t.message}") + actionListener.onFailure(AlertingException.wrap(t)) + } } - private fun onBulkResponse(response: BulkResponse) { + private fun onBulkResponse(updateResponse: BulkResponse?, copyResponse: BulkResponse?) { + val deleteRequests = mutableListOf() val missing = request.alertIds.toMutableSet() val acknowledged = mutableListOf() val failed = mutableListOf() - // First handle all alerts that aren't currently ACTIVE. These can't be acknowledged. + alerts.values.forEach { if (it.state != Alert.State.ACTIVE) { missing.remove(it.id) failed.add(it) } } - // Now handle all alerts we tried to acknowledge... - response.items.forEach { item -> + + updateResponse?.items?.forEach { item -> missing.remove(item.id) if (item.isFailed) { failed.add(alerts[item.id]!!) @@ -154,6 +177,36 @@ class TransportAcknowledgeAlertAction @Inject constructor( acknowledged.add(alerts[item.id]!!) } } + + copyResponse?.items?.forEach { item -> + log.info("got a copyResponse: $item") + missing.remove(item.id) + if (item.isFailed) { + log.info("got a failureResponse: ${item.failureMessage}") + failed.add(alerts[item.id]!!) + } else { + val deleteRequest = DeleteRequest(AlertIndices.ALERT_INDEX, item.id) + .routing(request.monitorId) + deleteRequests.add(deleteRequest) + } + } + + if (deleteRequests.isNotEmpty()) { + try { + val deleteResponse = client.bulk(BulkRequest().add(deleteRequests).setRefreshPolicy(request.refreshPolicy)).actionGet() + deleteResponse.items.forEach { item -> + missing.remove(item.id) + if (item.isFailed) { + failed.add(alerts[item.id]!!) + } else { + acknowledged.add(alerts[item.id]!!) + } + } + } catch (t: Exception) { + actionListener.onFailure(AlertingException.wrap(t)) + return + } + } actionListener.onResponse(AcknowledgeAlertResponse(acknowledged.toList(), failed.toList(), missing.toList())) } } diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportDeleteMonitorAction.kt b/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportDeleteMonitorAction.kt index 606795f22..b52d5817a 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportDeleteMonitorAction.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportDeleteMonitorAction.kt @@ -29,6 +29,10 @@ import org.opensearch.common.xcontent.NamedXContentRegistry import org.opensearch.common.xcontent.XContentHelper import org.opensearch.common.xcontent.XContentType import org.opensearch.commons.authuser.User +import org.opensearch.index.query.QueryBuilders +import org.opensearch.index.reindex.BulkByScrollResponse +import org.opensearch.index.reindex.DeleteByQueryAction +import org.opensearch.index.reindex.DeleteByQueryRequestBuilder import org.opensearch.rest.RestStatus import org.opensearch.tasks.Task import org.opensearch.transport.TransportService @@ -132,6 +136,10 @@ class TransportDeleteMonitorAction @Inject constructor( deleteRequest, object : ActionListener { override fun onResponse(response: DeleteResponse) { + val clusterState = clusterService.state() + if (clusterState.routingTable.hasIndex(ScheduledJob.DOC_LEVEL_QUERIES_INDEX)) { + deleteDocLevelMonitorQueries() + } actionListener.onResponse(response) } @@ -141,5 +149,20 @@ class TransportDeleteMonitorAction @Inject constructor( } ) } + + private fun deleteDocLevelMonitorQueries() { + DeleteByQueryRequestBuilder(client, DeleteByQueryAction.INSTANCE) + .source(ScheduledJob.DOC_LEVEL_QUERIES_INDEX) + .filter(QueryBuilders.matchQuery("monitor_id", monitorId)) + .execute( + object : ActionListener { + override fun onResponse(response: BulkByScrollResponse) { + } + + override fun onFailure(t: Exception) { + } + } + ) + } } } diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportExecuteMonitorAction.kt b/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportExecuteMonitorAction.kt index 9bf093d32..d5a52495f 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportExecuteMonitorAction.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportExecuteMonitorAction.kt @@ -11,20 +11,26 @@ import kotlinx.coroutines.withContext import org.apache.logging.log4j.LogManager import org.opensearch.OpenSearchStatusException import org.opensearch.action.ActionListener +import org.opensearch.action.admin.indices.create.CreateIndexResponse +import org.opensearch.action.bulk.BulkResponse import org.opensearch.action.get.GetRequest import org.opensearch.action.get.GetResponse import org.opensearch.action.support.ActionFilters import org.opensearch.action.support.HandledTransportAction -import org.opensearch.alerting.MonitorRunner +import org.opensearch.action.support.WriteRequest +import org.opensearch.alerting.MonitorRunnerService import org.opensearch.alerting.action.ExecuteMonitorAction import org.opensearch.alerting.action.ExecuteMonitorRequest import org.opensearch.alerting.action.ExecuteMonitorResponse import org.opensearch.alerting.core.model.ScheduledJob import org.opensearch.alerting.model.Monitor +import org.opensearch.alerting.settings.AlertingSettings import org.opensearch.alerting.util.AlertingException -import org.opensearch.alerting.util.isBucketLevelMonitor +import org.opensearch.alerting.util.DocLevelMonitorQueries import org.opensearch.client.Client +import org.opensearch.cluster.service.ClusterService import org.opensearch.common.inject.Inject +import org.opensearch.common.settings.Settings import org.opensearch.common.xcontent.LoggingDeprecationHandler import org.opensearch.common.xcontent.NamedXContentRegistry import org.opensearch.common.xcontent.XContentHelper @@ -41,12 +47,16 @@ private val log = LogManager.getLogger(TransportGetMonitorAction::class.java) class TransportExecuteMonitorAction @Inject constructor( transportService: TransportService, private val client: Client, - private val runner: MonitorRunner, + private val clusterService: ClusterService, + private val runner: MonitorRunnerService, actionFilters: ActionFilters, - val xContentRegistry: NamedXContentRegistry + val xContentRegistry: NamedXContentRegistry, + private val docLevelMonitorQueries: DocLevelMonitorQueries, + private val settings: Settings ) : HandledTransportAction ( ExecuteMonitorAction.NAME, transportService, actionFilters, ::ExecuteMonitorRequest ) { + @Volatile private var indexTimeout = AlertingSettings.INDEX_TIMEOUT.get(settings) override fun doExecute(task: Task, execMonitorRequest: ExecuteMonitorRequest, actionListener: ActionListener) { @@ -63,11 +73,7 @@ class TransportExecuteMonitorAction @Inject constructor( val (periodStart, periodEnd) = monitor.schedule.getPeriodEndingAt(Instant.ofEpochMilli(execMonitorRequest.requestEnd.millis)) try { - val monitorRunResult = if (monitor.isBucketLevelMonitor()) { - runner.runBucketLevelMonitor(monitor, periodStart, periodEnd, execMonitorRequest.dryrun) - } else { - runner.runQueryLevelMonitor(monitor, periodStart, periodEnd, execMonitorRequest.dryrun) - } + val monitorRunResult = runner.runJob(monitor, periodStart, periodEnd, execMonitorRequest.dryrun) withContext(Dispatchers.IO) { actionListener.onResponse(ExecuteMonitorResponse(monitorRunResult)) } @@ -115,7 +121,61 @@ class TransportExecuteMonitorAction @Inject constructor( true -> execMonitorRequest.monitor as Monitor false -> (execMonitorRequest.monitor as Monitor).copy(user = user) } - executeMonitor(monitor) + + if (monitor.monitorType == Monitor.MonitorType.DOC_LEVEL_MONITOR) { + if (!docLevelMonitorQueries.docLevelQueryIndexExists()) { + docLevelMonitorQueries.initDocLevelQueryIndex(object : ActionListener { + override fun onResponse(response: CreateIndexResponse) { + log.info("Central Percolation index ${ScheduledJob.DOC_LEVEL_QUERIES_INDEX} created") + docLevelMonitorQueries.indexDocLevelQueries( + client, + monitor, + monitor.id, + WriteRequest.RefreshPolicy.IMMEDIATE, + indexTimeout, + null, + actionListener, + object : ActionListener { + override fun onResponse(response: BulkResponse) { + log.info("Queries inserted into Percolate index ${ScheduledJob.DOC_LEVEL_QUERIES_INDEX}") + executeMonitor(monitor) + } + + override fun onFailure(t: Exception) { + actionListener.onFailure(AlertingException.wrap(t)) + } + } + ) + } + + override fun onFailure(t: Exception) { + actionListener.onFailure(AlertingException.wrap(t)) + } + }) + } else { + docLevelMonitorQueries.indexDocLevelQueries( + client, + monitor, + monitor.id, + WriteRequest.RefreshPolicy.IMMEDIATE, + indexTimeout, + null, + actionListener, + object : ActionListener { + override fun onResponse(response: BulkResponse) { + log.info("Queries inserted into Percolate index ${ScheduledJob.DOC_LEVEL_QUERIES_INDEX}") + executeMonitor(monitor) + } + + override fun onFailure(t: Exception) { + actionListener.onFailure(AlertingException.wrap(t)) + } + } + ) + } + } else { + executeMonitor(monitor) + } } } } diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportGetAlertsAction.kt b/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportGetAlertsAction.kt index d39fd1100..9c25cb1aa 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportGetAlertsAction.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportGetAlertsAction.kt @@ -135,7 +135,7 @@ class TransportGetAlertsAction @Inject constructor( fun search(searchSourceBuilder: SearchSourceBuilder, actionListener: ActionListener) { val searchRequest = SearchRequest() - .indices(AlertIndices.ALL_INDEX_PATTERN) + .indices(AlertIndices.ALL_ALERT_INDEX_PATTERN) .source(searchSourceBuilder) client.search( diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportGetFindingsAction.kt b/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportGetFindingsAction.kt new file mode 100644 index 000000000..f91d0efd4 --- /dev/null +++ b/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportGetFindingsAction.kt @@ -0,0 +1,169 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.transport + +import org.apache.logging.log4j.LogManager +import org.opensearch.action.ActionListener +import org.opensearch.action.get.MultiGetRequest +import org.opensearch.action.search.SearchRequest +import org.opensearch.action.search.SearchResponse +import org.opensearch.action.support.ActionFilters +import org.opensearch.action.support.HandledTransportAction +import org.opensearch.alerting.action.GetFindingsAction +import org.opensearch.alerting.action.GetFindingsRequest +import org.opensearch.alerting.action.GetFindingsResponse +import org.opensearch.alerting.alerts.AlertIndices.Companion.ALL_FINDING_INDEX_PATTERN +import org.opensearch.alerting.model.Finding +import org.opensearch.alerting.model.FindingDocument +import org.opensearch.alerting.model.FindingWithDocs +import org.opensearch.alerting.settings.AlertingSettings +import org.opensearch.alerting.util.AlertingException +import org.opensearch.client.Client +import org.opensearch.cluster.service.ClusterService +import org.opensearch.common.Strings +import org.opensearch.common.inject.Inject +import org.opensearch.common.settings.Settings +import org.opensearch.common.xcontent.LoggingDeprecationHandler +import org.opensearch.common.xcontent.NamedXContentRegistry +import org.opensearch.common.xcontent.XContentFactory +import org.opensearch.common.xcontent.XContentParser +import org.opensearch.common.xcontent.XContentParserUtils +import org.opensearch.common.xcontent.XContentType +import org.opensearch.index.query.Operator +import org.opensearch.index.query.QueryBuilders +import org.opensearch.search.builder.SearchSourceBuilder +import org.opensearch.search.fetch.subphase.FetchSourceContext +import org.opensearch.search.sort.SortBuilders +import org.opensearch.search.sort.SortOrder +import org.opensearch.tasks.Task +import org.opensearch.transport.TransportService + +private val log = LogManager.getLogger(TransportGetFindingsSearchAction::class.java) + +class TransportGetFindingsSearchAction @Inject constructor( + transportService: TransportService, + val client: Client, + clusterService: ClusterService, + actionFilters: ActionFilters, + val settings: Settings, + val xContentRegistry: NamedXContentRegistry +) : HandledTransportAction ( + GetFindingsAction.NAME, transportService, actionFilters, ::GetFindingsRequest +), + SecureTransportAction { + + @Volatile override var filterByEnabled = AlertingSettings.FILTER_BY_BACKEND_ROLES.get(settings) + + init { + listenFilterBySettingChange(clusterService) + } + + override fun doExecute( + task: Task, + getFindingsRequest: GetFindingsRequest, + actionListener: ActionListener + ) { + val tableProp = getFindingsRequest.table + + val sortBuilder = SortBuilders + .fieldSort(tableProp.sortString) + .order(SortOrder.fromString(tableProp.sortOrder)) + if (!tableProp.missing.isNullOrBlank()) { + sortBuilder.missing(tableProp.missing) + } + + val searchSourceBuilder = SearchSourceBuilder() + .sort(sortBuilder) + .size(tableProp.size) + .from(tableProp.startIndex) + .fetchSource(FetchSourceContext(true, Strings.EMPTY_ARRAY, Strings.EMPTY_ARRAY)) + .seqNoAndPrimaryTerm(true) + .version(true) + + val queryBuilder = QueryBuilders.boolQuery() + + if (!getFindingsRequest.findingId.isNullOrBlank()) + queryBuilder.filter(QueryBuilders.termQuery("_id", getFindingsRequest.findingId)) + + if (!tableProp.searchString.isNullOrBlank()) { + queryBuilder + .must( + QueryBuilders + .queryStringQuery(tableProp.searchString) + .defaultOperator(Operator.AND) + .field("queries.tags") + .field("queries.name") + ) + } + + searchSourceBuilder.query(queryBuilder) + + client.threadPool().threadContext.stashContext().use { + search(searchSourceBuilder, actionListener) + } + } + + fun search(searchSourceBuilder: SearchSourceBuilder, actionListener: ActionListener) { + val searchRequest = SearchRequest() + .source(searchSourceBuilder) + .indices(ALL_FINDING_INDEX_PATTERN) + client.search( + searchRequest, + object : ActionListener { + override fun onResponse(response: SearchResponse) { + val totalFindingCount = response.hits.totalHits?.value?.toInt() + val mgetRequest = MultiGetRequest() + val findingsWithDocs = mutableListOf() + val findings = mutableListOf() + for (hit in response.hits) { + val xcp = XContentFactory.xContent(XContentType.JSON) + .createParser(xContentRegistry, LoggingDeprecationHandler.INSTANCE, hit.sourceAsString) + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.nextToken(), xcp) + val finding = Finding.parse(xcp) + findings.add(finding) + val documentIds = finding.relatedDocIds + // Add getRequests to mget request + documentIds.forEach { + docId -> + mgetRequest.add(MultiGetRequest.Item(finding.index, docId)) + } + } + val documents = searchDocument(mgetRequest) + findings.forEach { + val documentIds = it.relatedDocIds + val relatedDocs = mutableListOf() + for (docId in documentIds) { + val key = "${it.index}|$docId" + documents[key]?.let { document -> relatedDocs.add(document) } + } + findingsWithDocs.add(FindingWithDocs(it, relatedDocs)) + } + actionListener.onResponse(GetFindingsResponse(response.status(), totalFindingCount, findingsWithDocs)) + } + + override fun onFailure(t: Exception) { + actionListener.onFailure(AlertingException.wrap(t)) + } + } + ) + } + + // TODO: Verify what happens if indices are closed/deleted + fun searchDocument( + mgetRequest: MultiGetRequest + ): Map { + val response = client.multiGet(mgetRequest).actionGet() + val documents: MutableMap = mutableMapOf() + response.responses.forEach { + val key = "${it.index}|${it.id}" + val docData = if (it.isFailed) "" else it.response.sourceAsString + val findingDocument = FindingDocument(it.index, it.id, !it.isFailed, docData) + documents[key] = findingDocument + } + + return documents + } +} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportIndexMonitorAction.kt b/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportIndexMonitorAction.kt index 50fde1514..246ef3439 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportIndexMonitorAction.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportIndexMonitorAction.kt @@ -10,6 +10,7 @@ import org.opensearch.OpenSearchSecurityException import org.opensearch.OpenSearchStatusException import org.opensearch.action.ActionListener import org.opensearch.action.admin.indices.create.CreateIndexResponse +import org.opensearch.action.bulk.BulkResponse import org.opensearch.action.get.GetRequest import org.opensearch.action.get.GetResponse import org.opensearch.action.index.IndexRequest @@ -18,11 +19,14 @@ import org.opensearch.action.search.SearchRequest import org.opensearch.action.search.SearchResponse import org.opensearch.action.support.ActionFilters import org.opensearch.action.support.HandledTransportAction +import org.opensearch.action.support.WriteRequest.RefreshPolicy import org.opensearch.action.support.master.AcknowledgedResponse +import org.opensearch.alerting.DocumentReturningMonitorRunner import org.opensearch.alerting.action.IndexMonitorAction import org.opensearch.alerting.action.IndexMonitorRequest import org.opensearch.alerting.action.IndexMonitorResponse import org.opensearch.alerting.core.ScheduledJobIndices +import org.opensearch.alerting.core.model.DocLevelMonitorInput import org.opensearch.alerting.core.model.ScheduledJob import org.opensearch.alerting.core.model.ScheduledJob.Companion.SCHEDULED_JOBS_INDEX import org.opensearch.alerting.core.model.SearchInput @@ -34,6 +38,7 @@ import org.opensearch.alerting.settings.AlertingSettings.Companion.MAX_ACTION_TH import org.opensearch.alerting.settings.AlertingSettings.Companion.REQUEST_TIMEOUT import org.opensearch.alerting.settings.DestinationSettings.Companion.ALLOW_LIST import org.opensearch.alerting.util.AlertingException +import org.opensearch.alerting.util.DocLevelMonitorQueries import org.opensearch.alerting.util.IndexUtils import org.opensearch.alerting.util.addUserBackendRolesFilter import org.opensearch.alerting.util.isADMonitor @@ -50,6 +55,9 @@ import org.opensearch.common.xcontent.XContentHelper import org.opensearch.common.xcontent.XContentType import org.opensearch.commons.authuser.User import org.opensearch.index.query.QueryBuilders +import org.opensearch.index.reindex.BulkByScrollResponse +import org.opensearch.index.reindex.DeleteByQueryAction +import org.opensearch.index.reindex.DeleteByQueryRequestBuilder import org.opensearch.rest.RestRequest import org.opensearch.rest.RestStatus import org.opensearch.search.builder.SearchSourceBuilder @@ -65,6 +73,7 @@ class TransportIndexMonitorAction @Inject constructor( val client: Client, actionFilters: ActionFilters, val scheduledJobIndices: ScheduledJobIndices, + val docLevelMonitorQueries: DocLevelMonitorQueries, val clusterService: ClusterService, val settings: Settings, val xContentRegistry: NamedXContentRegistry @@ -115,6 +124,7 @@ class TransportIndexMonitorAction @Inject constructor( user: User? ) { val indices = mutableListOf() + // todo: for doc level alerting: check if index is present before monitor is created. val searchInputs = request.monitor.inputs.filter { it.name() == SearchInput.SEARCH_FIELD } searchInputs.forEach { val searchInput = it as SearchInput @@ -369,6 +379,11 @@ class TransportIndexMonitorAction @Inject constructor( } private fun indexMonitor() { + if (request.monitor.monitorType == Monitor.MonitorType.DOC_LEVEL_MONITOR) { + val monitorIndex = (request.monitor.inputs[0] as DocLevelMonitorInput).indices[0] + val lastRunContext = DocumentReturningMonitorRunner.createRunContext(clusterService, client, monitorIndex).toMutableMap() + request.monitor = request.monitor.copy(lastRunContext = lastRunContext) + } request.monitor = request.monitor.copy(schemaVersion = IndexUtils.scheduledJobIndexSchemaVersion) val indexRequest = IndexRequest(SCHEDULED_JOBS_INDEX) .setRefreshPolicy(request.refreshPolicy) @@ -387,6 +402,11 @@ class TransportIndexMonitorAction @Inject constructor( ) return } + + if (request.monitor.monitorType == Monitor.MonitorType.DOC_LEVEL_MONITOR) { + indexDocLevelMonitorQueries(request.monitor, response.id, request.refreshPolicy) + } + actionListener.onResponse( IndexMonitorResponse( response.id, response.version, response.seqNo, @@ -401,6 +421,58 @@ class TransportIndexMonitorAction @Inject constructor( ) } + @Suppress("UNCHECKED_CAST") + private fun indexDocLevelMonitorQueries(monitor: Monitor, monitorId: String, refreshPolicy: RefreshPolicy) { + if (!docLevelMonitorQueries.docLevelQueryIndexExists()) { + docLevelMonitorQueries.initDocLevelQueryIndex(object : ActionListener { + override fun onResponse(response: CreateIndexResponse) { + log.info("Central Percolation index ${ScheduledJob.DOC_LEVEL_QUERIES_INDEX} created") + docLevelMonitorQueries.indexDocLevelQueries( + client, + monitor, + monitorId, + refreshPolicy, + indexTimeout, + actionListener, + null, + object : ActionListener { + override fun onResponse(response: BulkResponse) { + log.info("Queries inserted into Percolate index ${ScheduledJob.DOC_LEVEL_QUERIES_INDEX}") + } + + override fun onFailure(t: Exception) { + actionListener.onFailure(AlertingException.wrap(t)) + } + } + ) + } + + override fun onFailure(t: Exception) { + actionListener.onFailure(AlertingException.wrap(t)) + } + }) + } else { + docLevelMonitorQueries.indexDocLevelQueries( + client, + monitor, + monitorId, + refreshPolicy, + indexTimeout, + actionListener, + null, + object : ActionListener { + override fun onResponse(response: BulkResponse) { + log.info("Queries inserted into Percolate index ${ScheduledJob.DOC_LEVEL_QUERIES_INDEX}") + } + + override fun onFailure(t: Exception) { + actionListener.onFailure(AlertingException.wrap(t)) + } + } + ) + } + } + private fun updateMonitor() { val getRequest = GetRequest(SCHEDULED_JOBS_INDEX, request.monitorId) client.get( @@ -434,6 +506,15 @@ class TransportIndexMonitorAction @Inject constructor( return } + if ( + request.monitor.monitorType == Monitor.MonitorType.DOC_LEVEL_MONITOR && + request.monitor.lastRunContext.toMutableMap().isNullOrEmpty() + ) { + val monitorIndex = (request.monitor.inputs[0] as DocLevelMonitorInput).indices[0] + val lastRunContext = DocumentReturningMonitorRunner.createRunContext(clusterService, client, monitorIndex).toMutableMap() + request.monitor = request.monitor.copy(lastRunContext = lastRunContext) + } + // If both are enabled, use the current existing monitor enabled time, otherwise the next execution will be // incorrect. if (request.monitor.enabled && currentMonitor.enabled) @@ -459,6 +540,22 @@ class TransportIndexMonitorAction @Inject constructor( ) return } + + if (currentMonitor.monitorType == Monitor.MonitorType.DOC_LEVEL_MONITOR) { + DeleteByQueryRequestBuilder(client, DeleteByQueryAction.INSTANCE) + .source(ScheduledJob.DOC_LEVEL_QUERIES_INDEX) + .filter(QueryBuilders.matchQuery("monitor_id", currentMonitor.id)) + .execute( + object : ActionListener { + override fun onResponse(response: BulkByScrollResponse) { + indexDocLevelMonitorQueries(request.monitor, currentMonitor.id, request.refreshPolicy) + } + + override fun onFailure(t: Exception) { + } + } + ) + } actionListener.onResponse( IndexMonitorResponse( response.id, response.version, response.seqNo, diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/triggercondition/parsers/ExpressionParser.kt b/alerting/src/main/kotlin/org/opensearch/alerting/triggercondition/parsers/ExpressionParser.kt new file mode 100644 index 000000000..c0e215000 --- /dev/null +++ b/alerting/src/main/kotlin/org/opensearch/alerting/triggercondition/parsers/ExpressionParser.kt @@ -0,0 +1,12 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.triggercondition.parsers + +import org.opensearch.alerting.triggercondition.resolvers.TriggerExpressionResolver + +interface ExpressionParser { + fun parse(): TriggerExpressionResolver +} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/triggercondition/parsers/TriggerExpressionParser.kt b/alerting/src/main/kotlin/org/opensearch/alerting/triggercondition/parsers/TriggerExpressionParser.kt new file mode 100644 index 000000000..835e9b383 --- /dev/null +++ b/alerting/src/main/kotlin/org/opensearch/alerting/triggercondition/parsers/TriggerExpressionParser.kt @@ -0,0 +1,53 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.triggercondition.parsers + +import org.opensearch.alerting.triggercondition.resolvers.TriggerExpressionRPNResolver +import org.opensearch.alerting.triggercondition.tokens.TriggerExpressionOperator + +/** + * The postfix (Reverse Polish Notation) parser. + * Uses the Shunting-yard algorithm to parse a mathematical expression + * @param triggerExpression String containing the trigger expression for the monitor + */ +class TriggerExpressionParser( + triggerExpression: String +) : TriggerExpressionRPNBaseParser(triggerExpression) { + + override fun parse(): TriggerExpressionRPNResolver { + val expression = expressionToParse.replace(" ", "") + + val splitters = ArrayList() + TriggerExpressionOperator.values().forEach { splitters.add(it.value) } + + val breaks = ArrayList().apply { add(expression) } + for (s in splitters) { + val a = ArrayList() + for (ind in 0 until breaks.size) { + breaks[ind].let { + if (it.length > 1) { + a.addAll(breakString(breaks[ind], s)) + } else a.add(it) + } + } + breaks.clear() + breaks.addAll(a) + } + + return TriggerExpressionRPNResolver(convertInfixToPostfix(breaks)) + } + + private fun breakString(input: String, delimeter: String): ArrayList { + val tokens = input.split(delimeter) + val array = ArrayList() + for (t in tokens) { + array.add(t) + array.add(delimeter) + } + array.removeAt(array.size - 1) + return array + } +} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/triggercondition/parsers/TriggerExpressionRPNBaseParser.kt b/alerting/src/main/kotlin/org/opensearch/alerting/triggercondition/parsers/TriggerExpressionRPNBaseParser.kt new file mode 100644 index 000000000..6dd6bfc36 --- /dev/null +++ b/alerting/src/main/kotlin/org/opensearch/alerting/triggercondition/parsers/TriggerExpressionRPNBaseParser.kt @@ -0,0 +1,114 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.triggercondition.parsers + +import org.opensearch.alerting.triggercondition.tokens.ExpressionToken +import org.opensearch.alerting.triggercondition.tokens.TriggerExpressionConstant +import org.opensearch.alerting.triggercondition.tokens.TriggerExpressionOperator +import org.opensearch.alerting.triggercondition.tokens.TriggerExpressionToken +import java.util.Stack + +/** + * This is the abstract base class which holds the trigger expression parsing logic; + * using the Infix to Postfix a.k.a. Reverse Polish Notation (RPN) parser. + * It also uses the Shunting-Yard algorithm to parse the given trigger expression. + * + * @param expressionToParse Complete string containing the trigger expression + */ +abstract class TriggerExpressionRPNBaseParser( + protected val expressionToParse: String +) : ExpressionParser { + /** + * To perform the Infix-to-postfix conversion of the trigger expression + */ + protected fun convertInfixToPostfix(expTokens: List): ArrayList { + val expTokenStack = Stack() + val outputExpTokens = ArrayList() + + for (tokenString in expTokens) { + if (tokenString.isEmpty()) continue + when (val expToken = assignToken(tokenString)) { + is TriggerExpressionToken -> outputExpTokens.add(expToken) + is TriggerExpressionOperator -> { + when (expToken) { + TriggerExpressionOperator.PAR_LEFT -> expTokenStack.push(expToken) + TriggerExpressionOperator.PAR_RIGHT -> { + var topExpToken = expTokenStack.popExpTokenOrNull() + while (topExpToken != null && topExpToken != TriggerExpressionOperator.PAR_LEFT) { + outputExpTokens.add(topExpToken) + topExpToken = expTokenStack.popExpTokenOrNull() + } + if (topExpToken != TriggerExpressionOperator.PAR_LEFT) + throw java.lang.IllegalArgumentException("No matching left parenthesis.") + } + else -> { + var op2 = expTokenStack.peekExpTokenOrNull() + while (op2 != null) { + val c = expToken.precedence.compareTo(op2.precedence) + if (c < 0 || !expToken.rightAssociative && c <= 0) { + outputExpTokens.add(expTokenStack.pop()) + } else { + break + } + op2 = expTokenStack.peekExpTokenOrNull() + } + expTokenStack.push(expToken) + } + } + } + } + } + + while (!expTokenStack.isEmpty()) { + expTokenStack.peekExpTokenOrNull()?.let { + if (it == TriggerExpressionOperator.PAR_LEFT) + throw java.lang.IllegalArgumentException("No matching right parenthesis.") + } + val top = expTokenStack.pop() + outputExpTokens.add(top) + } + + return outputExpTokens + } + + /** + * Looks up and maps the expression token that matches the string version of that expression unit + */ + private fun assignToken(tokenString: String): ExpressionToken { + + // Check "query" string in trigger expression such as in 'query[name="abc"]' + if (tokenString.startsWith(TriggerExpressionConstant.ConstantType.QUERY.ident)) + return TriggerExpressionToken(tokenString) + + // Check operators in trigger expression such as in [&&, ||, !] + for (op in TriggerExpressionOperator.values()) { + if (op.value == tokenString) return op + } + + // Check any constants in trigger expression such as in ["name, "id", "tag", [", "]", "="] + for (con in TriggerExpressionConstant.ConstantType.values()) { + if (tokenString == con.ident) return TriggerExpressionConstant(con) + } + + throw IllegalArgumentException("Error while processing the trigger expression '$tokenString'") + } + + private inline fun Stack.popExpTokenOrNull(): T? { + return try { + pop() as T + } catch (e: java.lang.Exception) { + null + } + } + + private inline fun Stack.peekExpTokenOrNull(): T? { + return try { + peek() as T + } catch (e: java.lang.Exception) { + null + } + } +} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/triggercondition/resolvers/TriggerExpression.kt b/alerting/src/main/kotlin/org/opensearch/alerting/triggercondition/resolvers/TriggerExpression.kt new file mode 100644 index 000000000..2a3e6c1ff --- /dev/null +++ b/alerting/src/main/kotlin/org/opensearch/alerting/triggercondition/resolvers/TriggerExpression.kt @@ -0,0 +1,32 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.triggercondition.resolvers + +sealed class TriggerExpression { + + fun resolve(): Set = when (this) { + is And -> resolveAnd(docSet1, docSet2) + is Or -> resolveOr(docSet1, docSet2) + is Not -> resolveNot(allDocs, docSet2) + } + + private fun resolveAnd(documentSet1: Set, documentSet2: Set): Set { + return documentSet1.intersect(documentSet2) + } + + private fun resolveOr(documentSet1: Set, documentSet2: Set): Set { + return documentSet1.union(documentSet2) + } + + private fun resolveNot(allDocs: Set, documentSet2: Set): Set { + return allDocs.subtract(documentSet2) + } + + // Operators implemented as operator functions + class And(val docSet1: Set, val docSet2: Set) : TriggerExpression() + class Or(val docSet1: Set, val docSet2: Set) : TriggerExpression() + class Not(val allDocs: Set, val docSet2: Set) : TriggerExpression() +} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/triggercondition/resolvers/TriggerExpressionRPNResolver.kt b/alerting/src/main/kotlin/org/opensearch/alerting/triggercondition/resolvers/TriggerExpressionRPNResolver.kt new file mode 100644 index 000000000..749214048 --- /dev/null +++ b/alerting/src/main/kotlin/org/opensearch/alerting/triggercondition/resolvers/TriggerExpressionRPNResolver.kt @@ -0,0 +1,103 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.triggercondition.resolvers + +import org.opensearch.alerting.core.model.DocLevelQuery +import org.opensearch.alerting.triggercondition.tokens.ExpressionToken +import org.opensearch.alerting.triggercondition.tokens.TriggerExpressionConstant +import org.opensearch.alerting.triggercondition.tokens.TriggerExpressionOperator +import org.opensearch.alerting.triggercondition.tokens.TriggerExpressionToken +import java.util.Optional +import java.util.Stack + +/** + * Solves the Trigger Expression using the Reverse Polish Notation (RPN) based solver + * @param polishNotation an array of expression tokens organized in the RPN order + */ +class TriggerExpressionRPNResolver( + private val polishNotation: ArrayList +) : TriggerExpressionResolver { + + private val eqString by lazy { + val stringBuilder = StringBuilder() + for (expToken in polishNotation) { + when (expToken) { + is TriggerExpressionToken -> stringBuilder.append(expToken.value) + is TriggerExpressionOperator -> stringBuilder.append(expToken.value) + is TriggerExpressionConstant -> stringBuilder.append(expToken.type.ident) + else -> throw Exception() + } + stringBuilder.append(" ") + } + stringBuilder.toString() + } + + override fun toString(): String = eqString + + /** + * Evaluates the trigger expression expressed provided in form of the RPN token array. + * @param queryToDocIds Map to hold the resultant document id per query id + * @return evaluates the final set of document id + */ + override fun evaluate(queryToDocIds: Map>): Set { + val tokenStack = Stack>() + + val allDocIds = mutableSetOf() + for (value in queryToDocIds.values) { + allDocIds.addAll(value) + } + + for (expToken in polishNotation) { + when (expToken) { + is TriggerExpressionToken -> tokenStack.push(resolveQueryExpression(expToken.value, queryToDocIds)) + is TriggerExpressionOperator -> { + val right = tokenStack.pop() + val expr = when (expToken) { + TriggerExpressionOperator.AND -> TriggerExpression.And(tokenStack.pop(), right) + TriggerExpressionOperator.OR -> TriggerExpression.Or(tokenStack.pop(), right) + TriggerExpressionOperator.NOT -> TriggerExpression.Not(allDocIds, right) + else -> throw IllegalArgumentException("No matching operator.") + } + tokenStack.push(expr.resolve()) + } + } + } + return tokenStack.pop() + } + + private fun resolveQueryExpression(queryExpString: String, queryToDocIds: Map>): Set { + if (!queryExpString.startsWith(TriggerExpressionConstant.ConstantType.QUERY.ident)) return emptySet() + val token = queryExpString.substringAfter(TriggerExpressionConstant.ConstantType.BRACKET_LEFT.ident) + .substringBefore(TriggerExpressionConstant.ConstantType.BRACKET_RIGHT.ident) + if (token.isEmpty()) return emptySet() + + val tokens = token.split(TriggerExpressionConstant.ConstantType.EQUALS.ident) + if (tokens.isEmpty() || tokens.size != 2) return emptySet() + + val identifier = tokens[0] + val value = tokens[1] + val documents = mutableSetOf() + when (identifier) { + TriggerExpressionConstant.ConstantType.NAME.ident -> { + val key: Optional = queryToDocIds.keys.stream().filter { it.name == value }.findFirst() + if (key.isPresent) queryToDocIds[key.get()]?.let { doc -> documents.addAll(doc) } + } + + TriggerExpressionConstant.ConstantType.ID.ident -> { + val key: Optional = queryToDocIds.keys.stream().filter { it.id == value }.findFirst() + if (key.isPresent) queryToDocIds[key.get()]?.let { doc -> documents.addAll(doc) } + } + + // Iterate through all the queries with the same Tag + TriggerExpressionConstant.ConstantType.TAG.ident -> { + queryToDocIds.keys.stream().forEach { + if (it.tags.contains(value)) queryToDocIds[it]?.let { it1 -> documents.addAll(it1) } + } + } + } + return documents + } +} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/triggercondition/resolvers/TriggerExpressionResolver.kt b/alerting/src/main/kotlin/org/opensearch/alerting/triggercondition/resolvers/TriggerExpressionResolver.kt new file mode 100644 index 000000000..faeabad08 --- /dev/null +++ b/alerting/src/main/kotlin/org/opensearch/alerting/triggercondition/resolvers/TriggerExpressionResolver.kt @@ -0,0 +1,12 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.triggercondition.resolvers + +import org.opensearch.alerting.core.model.DocLevelQuery + +interface TriggerExpressionResolver { + fun evaluate(queryToDocIds: Map>): Set +} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/triggercondition/tokens/ExpressionToken.kt b/alerting/src/main/kotlin/org/opensearch/alerting/triggercondition/tokens/ExpressionToken.kt new file mode 100644 index 000000000..2085bf2d3 --- /dev/null +++ b/alerting/src/main/kotlin/org/opensearch/alerting/triggercondition/tokens/ExpressionToken.kt @@ -0,0 +1,8 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.triggercondition.tokens + +interface ExpressionToken diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/triggercondition/tokens/TriggerExpressionConstant.kt b/alerting/src/main/kotlin/org/opensearch/alerting/triggercondition/tokens/TriggerExpressionConstant.kt new file mode 100644 index 000000000..80e662a21 --- /dev/null +++ b/alerting/src/main/kotlin/org/opensearch/alerting/triggercondition/tokens/TriggerExpressionConstant.kt @@ -0,0 +1,26 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.triggercondition.tokens + +/** + * To define all the tokens which could be part of expression constant such as query[id=new_id], query[name=new_name], + * query[tag=new_tag] + */ +class TriggerExpressionConstant(val type: ConstantType) : ExpressionToken { + + enum class ConstantType(val ident: String) { + QUERY("query"), + + TAG("tag"), + NAME("name"), + ID("id"), + + BRACKET_LEFT("["), + BRACKET_RIGHT("]"), + + EQUALS("=") + } +} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/triggercondition/tokens/TriggerExpressionOperator.kt b/alerting/src/main/kotlin/org/opensearch/alerting/triggercondition/tokens/TriggerExpressionOperator.kt new file mode 100644 index 000000000..de3c4a0df --- /dev/null +++ b/alerting/src/main/kotlin/org/opensearch/alerting/triggercondition/tokens/TriggerExpressionOperator.kt @@ -0,0 +1,20 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.triggercondition.tokens + +/** + * To define all the operators used in the trigger expression + */ +enum class TriggerExpressionOperator(val value: String, val precedence: Int, val rightAssociative: Boolean) : ExpressionToken { + + AND("&&", 2, false), + OR("||", 2, false), + + NOT("!", 3, true), + + PAR_LEFT("(", 1, false), + PAR_RIGHT(")", 1, false) +} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/triggercondition/tokens/TriggerExpressionToken.kt b/alerting/src/main/kotlin/org/opensearch/alerting/triggercondition/tokens/TriggerExpressionToken.kt new file mode 100644 index 000000000..808f7737d --- /dev/null +++ b/alerting/src/main/kotlin/org/opensearch/alerting/triggercondition/tokens/TriggerExpressionToken.kt @@ -0,0 +1,11 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.triggercondition.tokens + +/** + * To define the tokens in Trigger expression such as query[tag=“sev1"] or query[name=“sev1"] or query[id=“sev1"] + */ +internal data class TriggerExpressionToken(val value: String) : ExpressionToken diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/util/AlertingUtils.kt b/alerting/src/main/kotlin/org/opensearch/alerting/util/AlertingUtils.kt index 1e88d5ba8..8772ce704 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/util/AlertingUtils.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/util/AlertingUtils.kt @@ -6,6 +6,10 @@ package org.opensearch.alerting.util import inet.ipaddr.IPAddressString +import org.opensearch.action.index.IndexRequest +import org.opensearch.action.index.IndexResponse +import org.opensearch.action.support.WriteRequest +import org.opensearch.alerting.core.model.ScheduledJob import org.opensearch.alerting.destination.message.BaseMessage import org.opensearch.alerting.model.AggregationResultBucket import org.opensearch.alerting.model.BucketLevelTriggerRunResult @@ -13,7 +17,14 @@ import org.opensearch.alerting.model.Monitor import org.opensearch.alerting.model.action.Action import org.opensearch.alerting.model.action.ActionExecutionPolicy import org.opensearch.alerting.model.destination.Destination +import org.opensearch.alerting.opensearchapi.suspendUntil +import org.opensearch.alerting.settings.AlertingSettings import org.opensearch.alerting.settings.DestinationSettings +import org.opensearch.client.Client +import org.opensearch.common.settings.Settings +import org.opensearch.common.xcontent.NamedXContentRegistry +import org.opensearch.common.xcontent.ToXContent +import org.opensearch.common.xcontent.XContentFactory /** * RFC 5322 compliant pattern matching: https://www.ietf.org/rfc/rfc5322.txt @@ -54,6 +65,8 @@ fun BaseMessage.isHostInDenylist(networks: List): Boolean { fun Monitor.isBucketLevelMonitor(): Boolean = this.monitorType == Monitor.MonitorType.BUCKET_LEVEL_MONITOR +fun Monitor.isDocLevelMonitor(): Boolean = this.monitorType == Monitor.MonitorType.DOC_LEVEL_MONITOR + /** * Since buckets can have multi-value keys, this converts the bucket key values to a string that can be used * as the key for a HashMap to easily retrieve [AggregationResultBucket] based on the bucket key values. @@ -87,3 +100,23 @@ fun BucketLevelTriggerRunResult.getCombinedTriggerRunResult( return this.copy(aggregationResultBuckets = mergedAggregationResultBuckets, actionResultsMap = mergedActionResultsMap, error = error) } + +// TODO: Check if this can be more generic such that TransportIndexMonitorAction class can use this. Also see if this should be refactored +// to another class. Include tests for this as well. +suspend fun updateMonitor(client: Client, xContentRegistry: NamedXContentRegistry, settings: Settings, monitor: Monitor): IndexResponse { + /*val currentMonitor = AlertingConfigAccessor.getMonitorInfo(client, xContentRegistry, monitor.id) + + var updateMonitor = monitor + // If both are enabled, use the current existing monitor enabled time, otherwise the next execution will be + // incorrect. + if (monitor.enabled && currentMonitor.enabled) + updateMonitor = monitor.copy(enabledTime = currentMonitor.enabledTime)*/ + + val indexRequest = IndexRequest(ScheduledJob.SCHEDULED_JOBS_INDEX) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .source(monitor.toXContentWithUser(XContentFactory.jsonBuilder(), ToXContent.MapParams(mapOf("with_type" to "true")))) + .id(monitor.id) + .timeout(AlertingSettings.INDEX_TIMEOUT.get(settings)) + + return client.suspendUntil { client.index(indexRequest, it) } +} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/util/DocLevelMonitorQueries.kt b/alerting/src/main/kotlin/org/opensearch/alerting/util/DocLevelMonitorQueries.kt new file mode 100644 index 000000000..badb76370 --- /dev/null +++ b/alerting/src/main/kotlin/org/opensearch/alerting/util/DocLevelMonitorQueries.kt @@ -0,0 +1,115 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.util + +import org.apache.logging.log4j.LogManager +import org.opensearch.action.ActionListener +import org.opensearch.action.admin.indices.create.CreateIndexRequest +import org.opensearch.action.admin.indices.create.CreateIndexResponse +import org.opensearch.action.admin.indices.mapping.put.PutMappingRequest +import org.opensearch.action.bulk.BulkRequest +import org.opensearch.action.bulk.BulkResponse +import org.opensearch.action.index.IndexRequest +import org.opensearch.action.support.WriteRequest.RefreshPolicy +import org.opensearch.action.support.master.AcknowledgedResponse +import org.opensearch.alerting.action.ExecuteMonitorResponse +import org.opensearch.alerting.action.IndexMonitorResponse +import org.opensearch.alerting.core.model.DocLevelMonitorInput +import org.opensearch.alerting.core.model.DocLevelQuery +import org.opensearch.alerting.core.model.ScheduledJob +import org.opensearch.alerting.model.Monitor +import org.opensearch.client.AdminClient +import org.opensearch.client.Client +import org.opensearch.cluster.service.ClusterService +import org.opensearch.common.settings.Settings +import org.opensearch.common.unit.TimeValue + +private val log = LogManager.getLogger(DocLevelMonitorQueries::class.java) + +class DocLevelMonitorQueries(private val client: AdminClient, private val clusterService: ClusterService) { + companion object { + @JvmStatic + fun docLevelQueriesMappings(): String { + return DocLevelMonitorQueries::class.java.classLoader.getResource("mappings/doc-level-queries.json").readText() + } + } + + fun initDocLevelQueryIndex(actionListener: ActionListener) { + if (!docLevelQueryIndexExists()) { + var indexRequest = CreateIndexRequest(ScheduledJob.DOC_LEVEL_QUERIES_INDEX) + .mapping(docLevelQueriesMappings()) + .settings( + Settings.builder().put("index.hidden", true) + .build() + ) + client.indices().create(indexRequest, actionListener) + } + } + + fun docLevelQueryIndexExists(): Boolean { + val clusterState = clusterService.state() + return clusterState.routingTable.hasIndex(ScheduledJob.DOC_LEVEL_QUERIES_INDEX) + } + + fun indexDocLevelQueries( + queryClient: Client, + monitor: Monitor, + monitorId: String, + refreshPolicy: RefreshPolicy, + indexTimeout: TimeValue, + indexMonitorActionListener: ActionListener?, + executeMonitorActionListener: ActionListener?, + docLevelQueryIndexListener: ActionListener + ) { + val docLevelMonitorInput = monitor.inputs[0] as DocLevelMonitorInput + val index = docLevelMonitorInput.indices[0] + val queries: List = docLevelMonitorInput.queries + + val clusterState = clusterService.state() + if (clusterState.routingTable.hasIndex(index)) { + val indexMetadata = clusterState.metadata.index(index) + + if (indexMetadata.mapping() != null) { + val properties = ((indexMetadata.mapping()?.sourceAsMap?.get("properties")) as Map>) + val updatedProperties = properties.entries.associate { "${it.key}_$monitorId" to it.value }.toMutableMap() + + val updateMappingRequest = PutMappingRequest(ScheduledJob.DOC_LEVEL_QUERIES_INDEX) + updateMappingRequest.source(mapOf("properties" to updatedProperties)) + + queryClient.admin().indices().putMapping( + updateMappingRequest, + object : ActionListener { + override fun onResponse(response: AcknowledgedResponse) { + log.info("Percolation index ${ScheduledJob.DOC_LEVEL_QUERIES_INDEX} updated with new mappings") + + val request = BulkRequest().setRefreshPolicy(refreshPolicy).timeout(indexTimeout) + + queries.forEach { + var query = it.query + + properties.forEach { prop -> + query = query.replace("${prop.key}:", "${prop.key}_$monitorId:") + } + val indexRequest = IndexRequest(ScheduledJob.DOC_LEVEL_QUERIES_INDEX) + .id(it.id + "_$monitorId") + .source(mapOf("query" to mapOf("query_string" to mapOf("query" to query)), "monitor_id" to monitorId)) + request.add(indexRequest) + } + + queryClient.bulk(request, docLevelQueryIndexListener) + } + + override fun onFailure(e: Exception) { + if (indexMonitorActionListener != null) { + indexMonitorActionListener.onFailure(AlertingException.wrap(e)) + } else executeMonitorActionListener?.onFailure(AlertingException.wrap(e)) + } + } + ) + } + } + } +} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/util/IndexUtils.kt b/alerting/src/main/kotlin/org/opensearch/alerting/util/IndexUtils.kt index e0ce289a8..9f299e8c5 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/util/IndexUtils.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/util/IndexUtils.kt @@ -29,16 +29,22 @@ class IndexUtils { private set var alertIndexSchemaVersion: Int private set + var findingIndexSchemaVersion: Int + private set var scheduledJobIndexUpdated: Boolean = false private set var alertIndexUpdated: Boolean = false private set - var lastUpdatedHistoryIndex: String? = null + var findingIndexUpdated: Boolean = false + private set + var lastUpdatedAlertHistoryIndex: String? = null + var lastUpdatedFindingHistoryIndex: String? = null init { scheduledJobIndexSchemaVersion = getSchemaVersion(ScheduledJobIndices.scheduledJobMappings()) alertIndexSchemaVersion = getSchemaVersion(AlertIndices.alertMapping()) + findingIndexSchemaVersion = getSchemaVersion(AlertIndices.findingMapping()) } @JvmStatic @@ -51,6 +57,11 @@ class IndexUtils { alertIndexUpdated = true } + @JvmStatic + fun findingIndexUpdated() { + findingIndexUpdated = true + } + @JvmStatic fun getSchemaVersion(mapping: String): Int { val xcp = XContentType.JSON.xContent().createParser( diff --git a/alerting/src/main/resources/org/opensearch/alerting/alerts/alert_mapping.json b/alerting/src/main/resources/org/opensearch/alerting/alerts/alert_mapping.json index 4ebba38fa..abb377b6c 100644 --- a/alerting/src/main/resources/org/opensearch/alerting/alerts/alert_mapping.json +++ b/alerting/src/main/resources/org/opensearch/alerting/alerts/alert_mapping.json @@ -83,6 +83,22 @@ } } }, + "finding_ids": { + "type" : "text", + "fields" : { + "keyword" : { + "type" : "keyword" + } + } + }, + "related_doc_ids": { + "type" : "text", + "fields" : { + "keyword" : { + "type" : "keyword" + } + } + }, "state": { "type": "keyword" }, diff --git a/alerting/src/main/resources/org/opensearch/alerting/alerts/finding_mapping.json b/alerting/src/main/resources/org/opensearch/alerting/alerts/finding_mapping.json new file mode 100644 index 000000000..c9386b2ef --- /dev/null +++ b/alerting/src/main/resources/org/opensearch/alerting/alerts/finding_mapping.json @@ -0,0 +1,56 @@ +{ + "dynamic": "strict", + "_meta" : { + "schema_version": 1 + }, + "properties": { + "schema_version": { + "type": "integer" + }, + "related_doc_ids": { + "type" : "text", + "fields" : { + "keyword" : { + "type" : "keyword" + } + } + }, + "monitor_id": { + "type": "keyword" + }, + "monitor_name": { + "type": "keyword" + }, + "id": { + "type": "keyword" + }, + "index": { + "type": "keyword" + }, + "queries" : { + "type": "nested", + "properties": { + "id": { + "type": "keyword" + }, + "name": { + "type": "keyword" + }, + "query": { + "type": "text" + }, + "tags": { + "type": "text", + "fields" : { + "keyword" : { + "type" : "keyword" + } + } + } + } + }, + "timestamp": { + "type": "long" + } + } +} \ No newline at end of file diff --git a/alerting/src/main/resources/org/opensearch/alerting/org.opensearch.alerting.txt b/alerting/src/main/resources/org/opensearch/alerting/org.opensearch.alerting.txt index 78d53e839..7d9d8c095 100644 --- a/alerting/src/main/resources/org/opensearch/alerting/org.opensearch.alerting.txt +++ b/alerting/src/main/resources/org/opensearch/alerting/org.opensearch.alerting.txt @@ -31,6 +31,16 @@ class org.opensearch.alerting.script.QueryLevelTriggerExecutionContext { Exception getError() } +class org.opensearch.alerting.script.DocumentLevelTriggerExecutionContext { + Monitor getMonitor() + DocumentLevelTrigger getTrigger() + List getResults() + java.time.Instant getPeriodStart() + java.time.Instant getPeriodEnd() + Alert getAlert() + Exception getError() +} + class org.opensearch.alerting.model.Monitor { String getId() long getVersion() @@ -45,6 +55,13 @@ class org.opensearch.alerting.model.QueryLevelTrigger { List getActions() } +class org.opensearch.alerting.model.DocumentLevelTrigger { + String getId() + String getName() + String getSeverity() + List getActions() +} + class org.opensearch.alerting.model.Alert { String getId() long getVersion() diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/ADTestHelpers.kt b/alerting/src/test/kotlin/org/opensearch/alerting/ADTestHelpers.kt index a82999bfe..dcbc13da7 100644 --- a/alerting/src/test/kotlin/org/opensearch/alerting/ADTestHelpers.kt +++ b/alerting/src/test/kotlin/org/opensearch/alerting/ADTestHelpers.kt @@ -496,7 +496,7 @@ fun randomADMonitor( return Monitor( name = name, monitorType = Monitor.MonitorType.QUERY_LEVEL_MONITOR, enabled = enabled, inputs = inputs, schedule = schedule, triggers = triggers, enabledTime = enabledTime, lastUpdateTime = lastUpdateTime, - user = user, uiMetadata = if (withMetadata) mapOf("foo" to "bar") else mapOf() + user = user, lastRunContext = mapOf(), uiMetadata = if (withMetadata) mapOf("foo" to "bar") else mapOf() ) } diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/AccessRoles.kt b/alerting/src/test/kotlin/org/opensearch/alerting/AccessRoles.kt index 54feafbc8..f13085357 100644 --- a/alerting/src/test/kotlin/org/opensearch/alerting/AccessRoles.kt +++ b/alerting/src/test/kotlin/org/opensearch/alerting/AccessRoles.kt @@ -1,3 +1,8 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + package org.opensearch.alerting val ALL_ACCESS_ROLE = "all_access" diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/AlertServiceTests.kt b/alerting/src/test/kotlin/org/opensearch/alerting/AlertServiceTests.kt index f4e0d6a7c..1cdc5ac1a 100644 --- a/alerting/src/test/kotlin/org/opensearch/alerting/AlertServiceTests.kt +++ b/alerting/src/test/kotlin/org/opensearch/alerting/AlertServiceTests.kt @@ -57,6 +57,11 @@ class AlertServiceTests : OpenSearchTestCase() { settingSet.add(AlertingSettings.ALERT_HISTORY_ROLLOVER_PERIOD) settingSet.add(AlertingSettings.ALERT_HISTORY_RETENTION_PERIOD) settingSet.add(AlertingSettings.REQUEST_TIMEOUT) + settingSet.add(AlertingSettings.FINDING_HISTORY_ENABLED) + settingSet.add(AlertingSettings.FINDING_HISTORY_MAX_DOCS) + settingSet.add(AlertingSettings.FINDING_HISTORY_INDEX_MAX_AGE) + settingSet.add(AlertingSettings.FINDING_HISTORY_ROLLOVER_PERIOD) + settingSet.add(AlertingSettings.FINDING_HISTORY_RETENTION_PERIOD) val discoveryNode = DiscoveryNode("node", buildNewFakeTransportAddress(), Version.CURRENT) val clusterSettings = ClusterSettings(settings, settingSet) val testClusterService = ClusterServiceUtils.createClusterService(threadPool, discoveryNode, clusterSettings) diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/AlertingRestTestCase.kt b/alerting/src/test/kotlin/org/opensearch/alerting/AlertingRestTestCase.kt index 1dbf6e8fd..a5cd3738a 100644 --- a/alerting/src/test/kotlin/org/opensearch/alerting/AlertingRestTestCase.kt +++ b/alerting/src/test/kotlin/org/opensearch/alerting/AlertingRestTestCase.kt @@ -16,12 +16,17 @@ import org.junit.rules.DisableOnDebug import org.opensearch.action.search.SearchResponse import org.opensearch.alerting.AlertingPlugin.Companion.EMAIL_ACCOUNT_BASE_URI import org.opensearch.alerting.AlertingPlugin.Companion.EMAIL_GROUP_BASE_URI +import org.opensearch.alerting.action.GetFindingsResponse import org.opensearch.alerting.alerts.AlertIndices +import org.opensearch.alerting.core.model.DocLevelQuery import org.opensearch.alerting.core.model.ScheduledJob import org.opensearch.alerting.core.model.SearchInput import org.opensearch.alerting.core.settings.ScheduledJobSettings import org.opensearch.alerting.model.Alert import org.opensearch.alerting.model.BucketLevelTrigger +import org.opensearch.alerting.model.DocumentLevelTrigger +import org.opensearch.alerting.model.Finding +import org.opensearch.alerting.model.FindingWithDocs import org.opensearch.alerting.model.Monitor import org.opensearch.alerting.model.QueryLevelTrigger import org.opensearch.alerting.model.destination.Chime @@ -44,6 +49,7 @@ import org.opensearch.common.unit.TimeValue import org.opensearch.common.xcontent.LoggingDeprecationHandler import org.opensearch.common.xcontent.NamedXContentRegistry import org.opensearch.common.xcontent.ToXContent +import org.opensearch.common.xcontent.XContentBuilder import org.opensearch.common.xcontent.XContentFactory import org.opensearch.common.xcontent.XContentFactory.jsonBuilder import org.opensearch.common.xcontent.XContentParser @@ -60,10 +66,12 @@ import java.time.ZonedDateTime import java.time.format.DateTimeFormatter import java.time.temporal.ChronoUnit import java.util.Locale +import java.util.UUID import javax.management.MBeanServerInvocationHandler import javax.management.ObjectName import javax.management.remote.JMXConnectorFactory import javax.management.remote.JMXServiceURL +import kotlin.collections.HashMap abstract class AlertingRestTestCase : ODFERestTestCase() { @@ -81,7 +89,8 @@ abstract class AlertingRestTestCase : ODFERestTestCase() { Monitor.XCONTENT_REGISTRY, SearchInput.XCONTENT_REGISTRY, QueryLevelTrigger.XCONTENT_REGISTRY, - BucketLevelTrigger.XCONTENT_REGISTRY + BucketLevelTrigger.XCONTENT_REGISTRY, + DocumentLevelTrigger.XCONTENT_REGISTRY ) + SearchModule(Settings.EMPTY, emptyList()).namedXContents ) } @@ -102,7 +111,8 @@ abstract class AlertingRestTestCase : ODFERestTestCase() { response.entity.content ).map() assertUserNull(monitorJson as HashMap) - return monitor.copy(id = monitorJson["_id"] as String, version = (monitorJson["_version"] as Int).toLong()) + + return getMonitor(monitorId = monitorJson["_id"] as String) } protected fun createMonitor(monitor: Monitor, refresh: Boolean = true): Monitor { @@ -475,6 +485,15 @@ abstract class AlertingRestTestCase : ODFERestTestCase() { return getMonitor(monitorId = monitorId) } + protected fun createRandomDocumentMonitor(refresh: Boolean = false, withMetadata: Boolean = false): Monitor { + val monitor = randomDocumentLevelMonitor(withMetadata = withMetadata) + val monitorId = createMonitor(monitor, refresh).id + if (withMetadata) { + return getMonitor(monitorId = monitorId, header = BasicHeader(HttpHeaders.USER_AGENT, "OpenSearch-Dashboards")) + } + return getMonitor(monitorId = monitorId) + } + @Suppress("UNCHECKED_CAST") protected fun updateMonitor(monitor: Monitor, refresh: Boolean = false): Monitor { val response = client().makeRequest( @@ -511,9 +530,82 @@ abstract class AlertingRestTestCase : ODFERestTestCase() { return monitor.copy(id = id, version = version) } - protected fun searchAlerts(monitor: Monitor, indices: String = AlertIndices.ALERT_INDEX, refresh: Boolean = true): List { + // TODO: understand why doc alerts wont work with the normal search Alerts function + protected fun searchAlertsWithFilter( + monitor: Monitor, + indices: String = AlertIndices.ALERT_INDEX, + refresh: Boolean = true + ): List { if (refresh) refreshIndex(indices) + val request = """ + { "version" : true, + "query": { "match_all": {} } + } + """.trimIndent() + val httpResponse = adminClient().makeRequest("GET", "/$indices/_search", StringEntity(request, APPLICATION_JSON)) + assertEquals("Search failed", RestStatus.OK, httpResponse.restStatus()) + + val searchResponse = SearchResponse.fromXContent(createParser(JsonXContent.jsonXContent, httpResponse.entity.content)) + return searchResponse.hits.hits.map { + val xcp = createParser(JsonXContent.jsonXContent, it.sourceRef).also { it.nextToken() } + Alert.parse(xcp, it.id, it.version) + }.filter { alert -> alert.monitorId == monitor.id } + } + + protected fun createFinding( + monitorId: String = "NO_ID", + monitorName: String = "NO_NAME", + index: String = "testIndex", + docLevelQueries: List = listOf(DocLevelQuery(query = "test_field:\"us-west-2\"", name = "testQuery")), + matchingDocIds: List + ): String { + val finding = Finding( + id = UUID.randomUUID().toString(), + relatedDocIds = matchingDocIds, + monitorId = monitorId, + monitorName = monitorName, + index = index, + docLevelQueries = docLevelQueries, + timestamp = Instant.now() + ) + + val findingStr = finding.toXContent(XContentBuilder.builder(XContentType.JSON.xContent()), ToXContent.EMPTY_PARAMS).string() + + indexDoc(".opensearch-alerting-findings", finding.id, findingStr) + return finding.id + } + + protected fun searchFindings( + monitor: Monitor, + indices: String = AlertIndices.ALL_FINDING_INDEX_PATTERN, + refresh: Boolean = true + ): List { + if (refresh) refreshIndex(indices) + + val request = """ + { "version" : true, + "query": { "match_all": {} } + } + """.trimIndent() + val httpResponse = adminClient().makeRequest("GET", "/$indices/_search", StringEntity(request, APPLICATION_JSON)) + assertEquals("Search failed", RestStatus.OK, httpResponse.restStatus()) + + val searchResponse = SearchResponse.fromXContent(createParser(JsonXContent.jsonXContent, httpResponse.entity.content)) + return searchResponse.hits.hits.map { + val xcp = createParser(JsonXContent.jsonXContent, it.sourceRef).also { it.nextToken() } + Finding.parse(xcp) + }.filter { finding -> finding.monitorId == monitor.id } + } + + protected fun searchAlerts(monitor: Monitor, indices: String = AlertIndices.ALERT_INDEX, refresh: Boolean = true): List { + try { + if (refresh) refreshIndex(indices) + } catch (e: Exception) { + logger.warn("Could not refresh index $indices because: ${e.message}") + return emptyList() + } + // If this is a test monitor (it doesn't have an ID) and no alerts will be saved for it. val searchParams = if (monitor.id != Monitor.NO_ID) mapOf("routing" to monitor.id) else mapOf() val request = """ @@ -595,6 +687,40 @@ abstract class AlertingRestTestCase : ODFERestTestCase() { protected fun executeMonitor(client: RestClient, monitor: Monitor, params: Map = mapOf()): Response = client.makeRequest("POST", "$ALERTING_BASE_URI/_execute", params, monitor.toHttpEntityWithUser()) + protected fun searchFindings(params: Map = mutableMapOf()): GetFindingsResponse { + + var baseEndpoint = "${AlertingPlugin.FINDING_BASE_URI}/_search?" + for (entry in params.entries) { + baseEndpoint += "${entry.key}=${entry.value}&" + } + + val response = client().makeRequest("GET", baseEndpoint) + + assertEquals("Unable to retrieve findings", RestStatus.OK, response.restStatus()) + + val parser = createParser(XContentType.JSON.xContent(), response.entity.content) + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser) + + var totalFindings = 0 + val findings = mutableListOf() + + while (parser.nextToken() != XContentParser.Token.END_OBJECT) { + parser.nextToken() + + when (parser.currentName()) { + "total_findings" -> totalFindings = parser.intValue() + "findings" -> { + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_ARRAY, parser.currentToken(), parser) + while (parser.nextToken() != XContentParser.Token.END_ARRAY) { + findings.add(FindingWithDocs.parse(parser)) + } + } + } + } + + return GetFindingsResponse(response.restStatus(), totalFindings, findings) + } + protected fun indexDoc(index: String, id: String, doc: String, refresh: Boolean = true): Response { return indexDoc(client(), index, id, doc, refresh) } @@ -679,10 +805,18 @@ abstract class AlertingRestTestCase : ODFERestTestCase() { fun putAlertMappings(mapping: String? = null) { val mappingHack = if (mapping != null) mapping else AlertIndices.alertMapping().trimStart('{').trimEnd('}') - val encodedHistoryIndex = URLEncoder.encode(AlertIndices.HISTORY_INDEX_PATTERN, Charsets.UTF_8.toString()) + val encodedHistoryIndex = URLEncoder.encode(AlertIndices.ALERT_HISTORY_INDEX_PATTERN, Charsets.UTF_8.toString()) val settings = Settings.builder().put("index.hidden", true).build() createIndex(AlertIndices.ALERT_INDEX, settings, mappingHack) - createIndex(encodedHistoryIndex, settings, mappingHack, "\"${AlertIndices.HISTORY_WRITE_INDEX}\" : {}") + createIndex(encodedHistoryIndex, settings, mappingHack, "\"${AlertIndices.ALERT_HISTORY_WRITE_INDEX}\" : {}") + } + + fun putFindingMappings(mapping: String? = null) { + val mappingHack = if (mapping != null) mapping else AlertIndices.findingMapping().trimStart('{').trimEnd('}') + val encodedHistoryIndex = URLEncoder.encode(AlertIndices.FINDING_HISTORY_INDEX_PATTERN, Charsets.UTF_8.toString()) + val settings = Settings.builder().put("index.hidden", true).build() +// createIndex(AlertIndices.FINDING_HISTORY_WRITE_INDEX, settings, mappingHack) + createIndex(encodedHistoryIndex, settings, mappingHack, "\"${AlertIndices.FINDING_HISTORY_WRITE_INDEX}\" : {}") } fun scheduledJobMappings(): String { diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/DocumentMonitorRunnerIT.kt b/alerting/src/test/kotlin/org/opensearch/alerting/DocumentMonitorRunnerIT.kt new file mode 100644 index 000000000..bb401dd5d --- /dev/null +++ b/alerting/src/test/kotlin/org/opensearch/alerting/DocumentMonitorRunnerIT.kt @@ -0,0 +1,137 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting + +import org.opensearch.alerting.core.model.DocLevelMonitorInput +import org.opensearch.alerting.core.model.DocLevelQuery +import java.time.ZonedDateTime +import java.time.format.DateTimeFormatter +import java.time.temporal.ChronoUnit.MILLIS + +class DocumentMonitorRunnerIT : AlertingRestTestCase() { + + fun `test execute monitor with dryrun`() { + + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc = """{ + "message" : "This is an error from IAD region", + "test_strict_date_time" : "$testTime", + "test_field" : "us-west-2" + }""" + + val index = createTestIndex() + + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3") + val docReturningInput = DocLevelMonitorInput("description", listOf(index), listOf(docQuery)) + + val action = randomAction(template = randomTemplateScript("Hello {{ctx.monitor.name}}"), destinationId = createDestination().id) + val monitor = randomDocumentLevelMonitor( + inputs = listOf(docReturningInput), + triggers = listOf(randomDocumentLevelTrigger(condition = ALWAYS_RUN, actions = listOf(action))) + ) + + indexDoc(index, "1", testDoc) + + val response = executeMonitor(monitor, params = DRYRUN_MONITOR) + + val output = entityAsMap(response) + assertEquals(monitor.name, output["monitor_name"]) + + assertEquals(1, output.objectMap("trigger_results").values.size) + + for (triggerResult in output.objectMap("trigger_results").values) { + assertEquals(1, triggerResult.objectMap("action_results").values.size) + for (actionResult in triggerResult.objectMap("action_results").values) { + @Suppress("UNCHECKED_CAST") val actionOutput = actionResult["output"] as Map + assertEquals("Hello ${monitor.name}", actionOutput["subject"]) + assertEquals("Hello ${monitor.name}", actionOutput["message"]) + } + } + + val alerts = searchAlerts(monitor) + assertEquals("Alert saved for test monitor", 0, alerts.size) + } + + fun `test execute monitor returns search result with dryrun`() { + val testIndex = createTestIndex() + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc = """{ + "message" : "This is an error from IAD region", + "test_strict_date_time" : "$testTime", + "test_field" : "us-west-2" + }""" + + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3") + val docReturningInput = DocLevelMonitorInput("description", listOf(testIndex), listOf(docQuery)) + + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val monitor = randomDocumentLevelMonitor(inputs = listOf(docReturningInput), triggers = listOf(trigger)) + + indexDoc(testIndex, "1", testDoc) + indexDoc(testIndex, "5", testDoc) + + val response = executeMonitor(monitor, params = DRYRUN_MONITOR) + + val output = entityAsMap(response) + + assertEquals(monitor.name, output["monitor_name"]) + @Suppress("UNCHECKED_CAST") + val searchResult = (output.objectMap("input_results")["results"] as List>).first() + @Suppress("UNCHECKED_CAST") + val matchingDocsToQuery = searchResult[docQuery.id] as List + assertEquals("Incorrect search result", 1, matchingDocsToQuery.size) + assertTrue("Incorrect search result", matchingDocsToQuery.contains("5")) + } + + fun `test execute monitor generates alerts and findings`() { + putFindingMappings() + val testIndex = createTestIndex() + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc = """{ + "message" : "This is an error from IAD region", + "test_strict_date_time" : "$testTime", + "test_field" : "us-west-2" + }""" + + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3") + val docReturningInput = DocLevelMonitorInput("description", listOf(testIndex), listOf(docQuery)) + + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val monitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docReturningInput), triggers = listOf(trigger))) + assertNotNull(monitor.id) + + Thread.sleep(2000) + indexDoc(testIndex, "1", testDoc) + indexDoc(testIndex, "5", testDoc) + + val response = executeMonitor(monitor.id) + + val output = entityAsMap(response) + + assertEquals(monitor.name, output["monitor_name"]) + @Suppress("UNCHECKED_CAST") + val searchResult = (output.objectMap("input_results")["results"] as List>).first() + @Suppress("UNCHECKED_CAST") + val matchingDocsToQuery = searchResult[docQuery.id] as List + assertEquals("Incorrect search result", 2, matchingDocsToQuery.size) + assertTrue("Incorrect search result", matchingDocsToQuery.containsAll(listOf("1", "5"))) + + val alerts = searchAlertsWithFilter(monitor) + assertEquals("Alert saved for test monitor", 2, alerts.size) + + // TODO: modify findings such that there is a finding per document, so this test will need to be modified + val findings = searchFindings(monitor) + assertEquals("Findings saved for test monitor", 2, findings.size) + assertTrue("Findings saved for test monitor", findings[0].relatedDocIds.contains("1")) + assertTrue("Findings saved for test monitor", findings[1].relatedDocIds.contains("5")) + } + + @Suppress("UNCHECKED_CAST") + /** helper that returns a field in a json map whose values are all json objects */ + private fun Map.objectMap(key: String): Map> { + return this[key] as Map> + } +} diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/MonitorRunnerIT.kt b/alerting/src/test/kotlin/org/opensearch/alerting/MonitorRunnerServiceIT.kt similarity index 99% rename from alerting/src/test/kotlin/org/opensearch/alerting/MonitorRunnerIT.kt rename to alerting/src/test/kotlin/org/opensearch/alerting/MonitorRunnerServiceIT.kt index 34fb65871..540fd166c 100644 --- a/alerting/src/test/kotlin/org/opensearch/alerting/MonitorRunnerIT.kt +++ b/alerting/src/test/kotlin/org/opensearch/alerting/MonitorRunnerServiceIT.kt @@ -49,7 +49,7 @@ import java.time.temporal.ChronoUnit.MILLIS import java.time.temporal.ChronoUnit.MINUTES import kotlin.collections.HashMap -class MonitorRunnerIT : AlertingRestTestCase() { +class MonitorRunnerServiceIT : AlertingRestTestCase() { fun `test execute monitor with dryrun`() { val action = randomAction(template = randomTemplateScript("Hello {{ctx.monitor.name}}"), destinationId = createDestination().id) @@ -255,7 +255,7 @@ class MonitorRunnerIT : AlertingRestTestCase() { executeMonitor(monitor.id) assertTrue("There's still an active alert", searchAlerts(monitor, AlertIndices.ALERT_INDEX).isEmpty()) - val completedAlert = searchAlerts(monitor, AlertIndices.ALL_INDEX_PATTERN).single() + val completedAlert = searchAlerts(monitor, AlertIndices.ALL_ALERT_INDEX_PATTERN).single() verifyAlert(completedAlert, monitor, COMPLETED) } @@ -490,7 +490,7 @@ class MonitorRunnerIT : AlertingRestTestCase() { val errorAlert = searchAlerts(monitor).single() verifyAlert(errorAlert, monitor, ERROR) executeMonitor(monitor.id) - val completedAlert = searchAlerts(monitor, AlertIndices.ALL_INDEX_PATTERN).single() + val completedAlert = searchAlerts(monitor, AlertIndices.ALL_ALERT_INDEX_PATTERN).single() verifyAlert(completedAlert, monitor, COMPLETED) assertNull("Completed alert still has error message.", completedAlert.errorMessage) @@ -736,7 +736,7 @@ class MonitorRunnerIT : AlertingRestTestCase() { Thread.sleep(200) updateMonitor(monitor.copy(triggers = listOf(trigger.copy(condition = NEVER_RUN)), id = monitor.id)) executeMonitor(monitor.id) - val completedAlert = searchAlerts(monitor, AlertIndices.ALL_INDEX_PATTERN).single() + val completedAlert = searchAlerts(monitor, AlertIndices.ALL_ALERT_INDEX_PATTERN).single() verifyAlert(completedAlert, monitor, COMPLETED) updateMonitor(monitor.copy(triggers = listOf(trigger.copy(condition = ALWAYS_RUN)), id = monitor.id)) @@ -1210,7 +1210,7 @@ class MonitorRunnerIT : AlertingRestTestCase() { executeMonitor(monitor.id) // Verify expected alert was completed - alerts = searchAlerts(monitor, AlertIndices.ALL_INDEX_PATTERN) + alerts = searchAlerts(monitor, AlertIndices.ALL_ALERT_INDEX_PATTERN) val activeAlerts = alerts.filter { it.state == ACTIVE } val completedAlerts = alerts.filter { it.state == COMPLETED } assertEquals("Incorrect number of active alerts", 1, activeAlerts.size) @@ -1305,7 +1305,7 @@ class MonitorRunnerIT : AlertingRestTestCase() { // Execute Monitor and check that both Alerts were updated Thread.sleep(200) executeMonitor(monitor.id) - currentAlerts = searchAlerts(monitor, AlertIndices.ALL_INDEX_PATTERN) + currentAlerts = searchAlerts(monitor, AlertIndices.ALL_ALERT_INDEX_PATTERN) val completedAlerts = currentAlerts.filter { it.state == COMPLETED } assertEquals("Incorrect number of completed alerts", 2, completedAlerts.size) val previouslyAcknowledgedAlert = completedAlerts.single { it.aggregationResultBucket?.getBucketKeysHash().equals("test_value_1") } @@ -1531,7 +1531,7 @@ class MonitorRunnerIT : AlertingRestTestCase() { // Execute Monitor and check that both Alerts were moved to COMPLETED executeMonitor(monitor.id) - currentAlerts = searchAlerts(monitor, AlertIndices.ALL_INDEX_PATTERN) + currentAlerts = searchAlerts(monitor, AlertIndices.ALL_ALERT_INDEX_PATTERN) val completedAlerts = currentAlerts.filter { it.state == COMPLETED } assertEquals("Incorrect number of completed alerts", 2, completedAlerts.size) } diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/TestHelpers.kt b/alerting/src/test/kotlin/org/opensearch/alerting/TestHelpers.kt index 8955d0766..9f4e8717e 100644 --- a/alerting/src/test/kotlin/org/opensearch/alerting/TestHelpers.kt +++ b/alerting/src/test/kotlin/org/opensearch/alerting/TestHelpers.kt @@ -11,6 +11,8 @@ import org.apache.http.HttpEntity import org.opensearch.alerting.aggregation.bucketselectorext.BucketSelectorExtAggregationBuilder import org.opensearch.alerting.aggregation.bucketselectorext.BucketSelectorExtFilter import org.opensearch.alerting.core.model.ClusterMetricsInput +import org.opensearch.alerting.core.model.DocLevelMonitorInput +import org.opensearch.alerting.core.model.DocLevelQuery import org.opensearch.alerting.core.model.Input import org.opensearch.alerting.core.model.IntervalSchedule import org.opensearch.alerting.core.model.Schedule @@ -21,6 +23,9 @@ import org.opensearch.alerting.model.AggregationResultBucket import org.opensearch.alerting.model.Alert import org.opensearch.alerting.model.BucketLevelTrigger import org.opensearch.alerting.model.BucketLevelTriggerRunResult +import org.opensearch.alerting.model.DocumentLevelTrigger +import org.opensearch.alerting.model.DocumentLevelTriggerRunResult +import org.opensearch.alerting.model.Finding import org.opensearch.alerting.model.InputRunResults import org.opensearch.alerting.model.Monitor import org.opensearch.alerting.model.MonitorRunResult @@ -83,7 +88,7 @@ fun randomQueryLevelMonitor( return Monitor( name = name, monitorType = Monitor.MonitorType.QUERY_LEVEL_MONITOR, enabled = enabled, inputs = inputs, schedule = schedule, triggers = triggers, enabledTime = enabledTime, lastUpdateTime = lastUpdateTime, user = user, - uiMetadata = if (withMetadata) mapOf("foo" to "bar") else mapOf() + lastRunContext = mapOf(), uiMetadata = if (withMetadata) mapOf("foo" to "bar") else mapOf() ) } @@ -101,7 +106,7 @@ fun randomQueryLevelMonitorWithoutUser( return Monitor( name = name, monitorType = Monitor.MonitorType.QUERY_LEVEL_MONITOR, enabled = enabled, inputs = inputs, schedule = schedule, triggers = triggers, enabledTime = enabledTime, lastUpdateTime = lastUpdateTime, user = null, - uiMetadata = if (withMetadata) mapOf("foo" to "bar") else mapOf() + lastRunContext = mapOf(), uiMetadata = if (withMetadata) mapOf("foo" to "bar") else mapOf() ) } @@ -125,7 +130,7 @@ fun randomBucketLevelMonitor( return Monitor( name = name, monitorType = Monitor.MonitorType.BUCKET_LEVEL_MONITOR, enabled = enabled, inputs = inputs, schedule = schedule, triggers = triggers, enabledTime = enabledTime, lastUpdateTime = lastUpdateTime, user = user, - uiMetadata = if (withMetadata) mapOf("foo" to "bar") else mapOf() + lastRunContext = mapOf(), uiMetadata = if (withMetadata) mapOf("foo" to "bar") else mapOf() ) } @@ -143,7 +148,25 @@ fun randomClusterMetricsMonitor( return Monitor( name = name, monitorType = Monitor.MonitorType.CLUSTER_METRICS_MONITOR, enabled = enabled, inputs = inputs, schedule = schedule, triggers = triggers, enabledTime = enabledTime, lastUpdateTime = lastUpdateTime, user = user, - uiMetadata = if (withMetadata) mapOf("foo" to "bar") else mapOf() + lastRunContext = mapOf(), uiMetadata = if (withMetadata) mapOf("foo" to "bar") else mapOf() + ) +} + +fun randomDocumentLevelMonitor( + name: String = OpenSearchRestTestCase.randomAlphaOfLength(10), + user: User? = randomUser(), + inputs: List = listOf(DocLevelMonitorInput("description", listOf("index"), emptyList())), + schedule: Schedule = IntervalSchedule(interval = 5, unit = ChronoUnit.MINUTES), + enabled: Boolean = randomBoolean(), + triggers: List = (1..randomInt(10)).map { randomQueryLevelTrigger() }, + enabledTime: Instant? = if (enabled) Instant.now().truncatedTo(ChronoUnit.MILLIS) else null, + lastUpdateTime: Instant = Instant.now().truncatedTo(ChronoUnit.MILLIS), + withMetadata: Boolean = false +): Monitor { + return Monitor( + name = name, monitorType = Monitor.MonitorType.DOC_LEVEL_MONITOR, enabled = enabled, inputs = inputs, + schedule = schedule, triggers = triggers, enabledTime = enabledTime, lastUpdateTime = lastUpdateTime, user = user, + lastRunContext = mapOf(), uiMetadata = if (withMetadata) mapOf("foo" to "bar") else mapOf() ) } @@ -184,6 +207,25 @@ fun randomBucketLevelTrigger( fun randomActionsForBucketLevelTrigger(min: Int = 0, max: Int = 10, destinationId: String = ""): List = (min..randomInt(max)).map { randomActionWithPolicy(destinationId = destinationId) } +fun randomDocumentLevelTrigger( + id: String = UUIDs.base64UUID(), + name: String = OpenSearchRestTestCase.randomAlphaOfLength(10), + severity: String = "1", + condition: Script = randomScript(), + actions: List = mutableListOf(), + destinationId: String = "" +): DocumentLevelTrigger { + return DocumentLevelTrigger( + id = id, + name = name, + severity = severity, + condition = condition, + actions = if (actions.isEmpty() && destinationId.isNotBlank()) + (0..randomInt(10)).map { randomAction(destinationId = destinationId) } + else actions + ) +} + fun randomBucketSelectorExtAggregationBuilder( name: String = OpenSearchRestTestCase.randomAlphaOfLength(10), bucketsPathsMap: MutableMap = mutableMapOf("avg" to "10"), @@ -201,6 +243,23 @@ fun randomBucketSelectorScript( return Script(Script.DEFAULT_SCRIPT_TYPE, Script.DEFAULT_SCRIPT_LANG, idOrCode, emptyMap(), params) } +fun randomDocLevelTrigger( + id: String = UUIDs.base64UUID(), + name: String = OpenSearchRestTestCase.randomAlphaOfLength(10), + severity: String = "1", + condition: Script = randomScript(), + actions: List = mutableListOf(), + destinationId: String = "" +): DocumentLevelTrigger { + return DocumentLevelTrigger( + id = id, + name = name, + severity = severity, + condition = condition, + actions = if (actions.isEmpty()) (0..randomInt(10)).map { randomAction(destinationId = destinationId) } else actions + ) +} + fun randomEmailAccount( salt: String = "", name: String = salt + OpenSearchRestTestCase.randomAlphaOfLength(10), @@ -307,6 +366,43 @@ fun randomAlert(monitor: Monitor = randomQueryLevelMonitor()): Alert { ) } +fun randomDocLevelQuery( + id: String = OpenSearchRestTestCase.randomAlphaOfLength(10), + query: String = OpenSearchRestTestCase.randomAlphaOfLength(10), + name: String = "${randomInt(5)}", + tags: List = mutableListOf(0..randomInt(10)).map { OpenSearchRestTestCase.randomAlphaOfLength(10) } +): DocLevelQuery { + return DocLevelQuery(id = id, query = query, name = name, tags = tags) +} + +fun randomDocLevelMonitorInput( + description: String = OpenSearchRestTestCase.randomAlphaOfLength(randomInt(10)), + indices: List = listOf(1..randomInt(10)).map { OpenSearchRestTestCase.randomAlphaOfLength(10) }, + queries: List = listOf(1..randomInt(10)).map { randomDocLevelQuery() } +): DocLevelMonitorInput { + return DocLevelMonitorInput(description = description, indices = indices, queries = queries) +} + +fun randomFinding( + id: String = OpenSearchRestTestCase.randomAlphaOfLength(10), + relatedDocIds: List = listOf(OpenSearchRestTestCase.randomAlphaOfLength(10)), + monitorId: String = OpenSearchRestTestCase.randomAlphaOfLength(10), + monitorName: String = OpenSearchRestTestCase.randomAlphaOfLength(10), + index: String = OpenSearchRestTestCase.randomAlphaOfLength(10), + docLevelQueries: List = listOf(randomDocLevelQuery()), + timestamp: Instant = Instant.now() +): Finding { + return Finding( + id = id, + relatedDocIds = relatedDocIds, + monitorId = monitorId, + monitorName = monitorName, + index = index, + docLevelQueries = docLevelQueries, + timestamp = timestamp + ) +} + fun randomAlertWithAggregationResultBucket(monitor: Monitor = randomBucketLevelMonitor()): Alert { val trigger = randomBucketLevelTrigger() val actionExecutionResults = mutableListOf(randomActionExecutionResult(), randomActionExecutionResult()) @@ -363,6 +459,21 @@ fun randomBucketLevelMonitorRunResult(): MonitorRunResult { + val triggerResults = mutableMapOf() + val triggerRunResult = randomDocumentLevelTriggerRunResult() + triggerResults.plus(Pair("test", triggerRunResult)) + + return MonitorRunResult( + "test-monitor", + Instant.now(), + Instant.now(), + null, + randomInputRunResults(), + triggerResults + ) +} + fun randomInputRunResults(): InputRunResults { return InputRunResults(listOf(), null) } @@ -412,6 +523,13 @@ fun randomBucketLevelTriggerRunResult(): BucketLevelTriggerRunResult { ) } +fun randomDocumentLevelTriggerRunResult(): DocumentLevelTriggerRunResult { + val map = mutableMapOf() + map.plus(Pair("key1", randomActionRunResult())) + map.plus(Pair("key2", randomActionRunResult())) + return DocumentLevelTriggerRunResult("trigger-name", mutableListOf(UUIDs.randomBase64UUID().toString()), null, map) +} + fun randomActionRunResult(): ActionRunResult { val map = mutableMapOf() map.plus(Pair("key1", "val1")) @@ -526,7 +644,10 @@ fun parser(xc: String): XContentParser { fun xContentRegistry(): NamedXContentRegistry { return NamedXContentRegistry( listOf( - SearchInput.XCONTENT_REGISTRY, QueryLevelTrigger.XCONTENT_REGISTRY, BucketLevelTrigger.XCONTENT_REGISTRY + SearchInput.XCONTENT_REGISTRY, + QueryLevelTrigger.XCONTENT_REGISTRY, + BucketLevelTrigger.XCONTENT_REGISTRY, + DocumentLevelTrigger.XCONTENT_REGISTRY ) + SearchModule(Settings.EMPTY, emptyList()).namedXContents ) } diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/action/AcknowledgeAlertResponseTests.kt b/alerting/src/test/kotlin/org/opensearch/alerting/action/AcknowledgeAlertResponseTests.kt index 72c8198a6..13d98f599 100644 --- a/alerting/src/test/kotlin/org/opensearch/alerting/action/AcknowledgeAlertResponseTests.kt +++ b/alerting/src/test/kotlin/org/opensearch/alerting/action/AcknowledgeAlertResponseTests.kt @@ -22,7 +22,7 @@ class AcknowledgeAlertResponseTests : OpenSearchTestCase() { val acknowledged = mutableListOf( Alert( "1234", 0L, 1, "monitor-1234", "test-monitor", 0L, randomUser(), - "trigger-14", "test-trigger", Alert.State.ACKNOWLEDGED, + "trigger-14", "test-trigger", ArrayList(), ArrayList(), Alert.State.ACKNOWLEDGED, Instant.now(), Instant.now(), Instant.now(), Instant.now(), null, ArrayList(), "sev-2", ArrayList(), null ) @@ -30,7 +30,7 @@ class AcknowledgeAlertResponseTests : OpenSearchTestCase() { val failed = mutableListOf( Alert( "1234", 0L, 1, "monitor-1234", "test-monitor", 0L, randomUser(), - "trigger-14", "test-trigger", Alert.State.ERROR, Instant.now(), Instant.now(), + "trigger-14", "test-trigger", ArrayList(), ArrayList(), Alert.State.ERROR, Instant.now(), Instant.now(), Instant.now(), Instant.now(), null, mutableListOf(AlertError(Instant.now(), "Error msg")), "sev-2", mutableListOf(ActionExecutionResult("7890", null, 0)), null ) diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/action/GetAlertsResponseTests.kt b/alerting/src/test/kotlin/org/opensearch/alerting/action/GetAlertsResponseTests.kt index 5d29f1107..277e8e9a2 100644 --- a/alerting/src/test/kotlin/org/opensearch/alerting/action/GetAlertsResponseTests.kt +++ b/alerting/src/test/kotlin/org/opensearch/alerting/action/GetAlertsResponseTests.kt @@ -42,6 +42,8 @@ class GetAlertsResponseTests : OpenSearchTestCase() { randomUser(), "triggerId", "triggerName", + Collections.emptyList(), + Collections.emptyList(), Alert.State.ACKNOWLEDGED, Instant.MIN, null, @@ -78,6 +80,8 @@ class GetAlertsResponseTests : OpenSearchTestCase() { null, "triggerId", "triggerName", + Collections.emptyList(), + Collections.emptyList(), Alert.State.ACKNOWLEDGED, now, null, @@ -93,8 +97,8 @@ class GetAlertsResponseTests : OpenSearchTestCase() { var actualXContentString = req.toXContent(builder(), ToXContent.EMPTY_PARAMS).string() val expectedXContentString = "{\"alerts\":[{\"id\":\"id\",\"version\":0,\"monitor_id\":\"monitorId\"," + "\"schema_version\":0,\"monitor_version\":0,\"monitor_name\":\"monitorName\"," + - "\"trigger_id\":\"triggerId\"," + - "\"trigger_name\":\"triggerName\",\"state\":\"ACKNOWLEDGED\",\"error_message\":null,\"alert_history\":[]," + + "\"trigger_id\":\"triggerId\",\"trigger_name\":\"triggerName\"," + + "\"finding_ids\":[],\"related_doc_ids\":[],\"state\":\"ACKNOWLEDGED\",\"error_message\":null,\"alert_history\":[]," + "\"severity\":\"severity\",\"action_execution_results\":[],\"start_time\":" + now.toEpochMilli() + ",\"last_notification_time\":null,\"end_time\":null,\"acknowledged_time\":null}],\"totalAlerts\":1}" assertEquals(expectedXContentString, actualXContentString) diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/action/GetMonitorResponseTests.kt b/alerting/src/test/kotlin/org/opensearch/alerting/action/GetMonitorResponseTests.kt index 2bd14a45f..5634623de 100644 --- a/alerting/src/test/kotlin/org/opensearch/alerting/action/GetMonitorResponseTests.kt +++ b/alerting/src/test/kotlin/org/opensearch/alerting/action/GetMonitorResponseTests.kt @@ -49,6 +49,7 @@ class GetMonitorResponseTests : OpenSearchTestCase() { schemaVersion = 0, inputs = mutableListOf(), triggers = mutableListOf(), + lastRunContext = mutableMapOf(), uiMetadata = mutableMapOf() ) val req = GetMonitorResponse("1234", 1L, 2L, 0L, RestStatus.OK, monitor) diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/action/IndexMonitorResponseTests.kt b/alerting/src/test/kotlin/org/opensearch/alerting/action/IndexMonitorResponseTests.kt index 00210dce6..c7d6d5686 100644 --- a/alerting/src/test/kotlin/org/opensearch/alerting/action/IndexMonitorResponseTests.kt +++ b/alerting/src/test/kotlin/org/opensearch/alerting/action/IndexMonitorResponseTests.kt @@ -35,6 +35,7 @@ class IndexMonitorResponseTests : OpenSearchTestCase() { schemaVersion = 0, inputs = mutableListOf(), triggers = mutableListOf(), + lastRunContext = mutableMapOf(), uiMetadata = mutableMapOf() ) val req = IndexMonitorResponse("1234", 1L, 2L, 0L, RestStatus.OK, monitor) diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/alerts/AlertIndicesIT.kt b/alerting/src/test/kotlin/org/opensearch/alerting/alerts/AlertIndicesIT.kt index 597f6a2cb..dcf229fe4 100644 --- a/alerting/src/test/kotlin/org/opensearch/alerting/alerts/AlertIndicesIT.kt +++ b/alerting/src/test/kotlin/org/opensearch/alerting/alerts/AlertIndicesIT.kt @@ -11,8 +11,12 @@ import org.opensearch.action.search.SearchResponse import org.opensearch.alerting.ALWAYS_RUN import org.opensearch.alerting.AlertingRestTestCase import org.opensearch.alerting.NEVER_RUN +import org.opensearch.alerting.core.model.DocLevelMonitorInput +import org.opensearch.alerting.core.model.DocLevelQuery import org.opensearch.alerting.core.model.ScheduledJob import org.opensearch.alerting.makeRequest +import org.opensearch.alerting.randomDocumentLevelMonitor +import org.opensearch.alerting.randomDocumentLevelTrigger import org.opensearch.alerting.randomQueryLevelMonitor import org.opensearch.alerting.randomQueryLevelTrigger import org.opensearch.alerting.settings.AlertingSettings @@ -26,29 +30,64 @@ class AlertIndicesIT : AlertingRestTestCase() { executeMonitor(randomQueryLevelMonitor(triggers = listOf(randomQueryLevelTrigger(condition = ALWAYS_RUN)))) assertIndexExists(AlertIndices.ALERT_INDEX) - assertIndexExists(AlertIndices.HISTORY_WRITE_INDEX) + assertIndexExists(AlertIndices.ALERT_HISTORY_WRITE_INDEX) + } + + fun `test create finding index`() { + val testIndex = createTestIndex() + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3") + val docReturningInput = DocLevelMonitorInput("description", listOf(testIndex), listOf(docQuery)) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val monitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docReturningInput), triggers = listOf(trigger))) + + executeMonitor(monitor.id) + + assertIndexExists(AlertIndices.FINDING_HISTORY_WRITE_INDEX) } fun `test update alert index mapping with new schema version`() { wipeAllODFEIndices() assertIndexDoesNotExist(AlertIndices.ALERT_INDEX) - assertIndexDoesNotExist(AlertIndices.HISTORY_WRITE_INDEX) + assertIndexDoesNotExist(AlertIndices.ALERT_HISTORY_WRITE_INDEX) putAlertMappings( AlertIndices.alertMapping().trimStart('{').trimEnd('}') .replace("\"schema_version\": 3", "\"schema_version\": 0") ) assertIndexExists(AlertIndices.ALERT_INDEX) - assertIndexExists(AlertIndices.HISTORY_WRITE_INDEX) + assertIndexExists(AlertIndices.ALERT_HISTORY_WRITE_INDEX) verifyIndexSchemaVersion(AlertIndices.ALERT_INDEX, 0) - verifyIndexSchemaVersion(AlertIndices.HISTORY_WRITE_INDEX, 0) + verifyIndexSchemaVersion(AlertIndices.ALERT_HISTORY_WRITE_INDEX, 0) wipeAllODFEIndices() executeMonitor(createRandomMonitor()) assertIndexExists(AlertIndices.ALERT_INDEX) - assertIndexExists(AlertIndices.HISTORY_WRITE_INDEX) - verifyIndexSchemaVersion(ScheduledJob.SCHEDULED_JOBS_INDEX, 4) + assertIndexExists(AlertIndices.ALERT_HISTORY_WRITE_INDEX) + verifyIndexSchemaVersion(ScheduledJob.SCHEDULED_JOBS_INDEX, 5) verifyIndexSchemaVersion(AlertIndices.ALERT_INDEX, 3) - verifyIndexSchemaVersion(AlertIndices.HISTORY_WRITE_INDEX, 3) + verifyIndexSchemaVersion(AlertIndices.ALERT_HISTORY_WRITE_INDEX, 3) + } + + fun `test update finding index mapping with new schema version`() { + wipeAllODFEIndices() + assertIndexDoesNotExist(AlertIndices.FINDING_HISTORY_WRITE_INDEX) + + putFindingMappings( + AlertIndices.findingMapping().trimStart('{').trimEnd('}') + .replace("\"schema_version\": 1", "\"schema_version\": 0") + ) + assertIndexExists(AlertIndices.FINDING_HISTORY_WRITE_INDEX) + verifyIndexSchemaVersion(AlertIndices.FINDING_HISTORY_WRITE_INDEX, 0) + wipeAllODFEIndices() + + val testIndex = createTestIndex() + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3") + val docReturningInput = DocLevelMonitorInput("description", listOf(testIndex), listOf(docQuery)) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val trueMonitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docReturningInput), triggers = listOf(trigger))) + executeMonitor(trueMonitor.id) + assertIndexExists(AlertIndices.FINDING_HISTORY_WRITE_INDEX) + verifyIndexSchemaVersion(ScheduledJob.SCHEDULED_JOBS_INDEX, 5) + verifyIndexSchemaVersion(AlertIndices.FINDING_HISTORY_WRITE_INDEX, 1) } fun `test alert index gets recreated automatically if deleted`() { @@ -58,10 +97,10 @@ class AlertIndicesIT : AlertingRestTestCase() { executeMonitor(trueMonitor) assertIndexExists(AlertIndices.ALERT_INDEX) - assertIndexExists(AlertIndices.HISTORY_WRITE_INDEX) + assertIndexExists(AlertIndices.ALERT_HISTORY_WRITE_INDEX) wipeAllODFEIndices() assertIndexDoesNotExist(AlertIndices.ALERT_INDEX) - assertIndexDoesNotExist(AlertIndices.HISTORY_WRITE_INDEX) + assertIndexDoesNotExist(AlertIndices.ALERT_HISTORY_WRITE_INDEX) val executeResponse = executeMonitor(trueMonitor) val xcp = createParser(XContentType.JSON.xContent(), executeResponse.entity.content) @@ -69,7 +108,28 @@ class AlertIndicesIT : AlertingRestTestCase() { assertNull("Error running a monitor after wiping alert indices", output["error"]) } - fun `test rollover history index`() { + fun `test finding index gets recreated automatically if deleted`() { + wipeAllODFEIndices() + assertIndexDoesNotExist(AlertIndices.FINDING_HISTORY_WRITE_INDEX) + val testIndex = createTestIndex() + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3") + val docReturningInput = DocLevelMonitorInput("description", listOf(testIndex), listOf(docQuery)) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val trueMonitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docReturningInput), triggers = listOf(trigger))) + + executeMonitor(trueMonitor.id) + assertIndexExists(AlertIndices.FINDING_HISTORY_WRITE_INDEX) + wipeAllODFEIndices() + assertIndexDoesNotExist(AlertIndices.FINDING_HISTORY_WRITE_INDEX) + + createTestIndex(testIndex) + val executeResponse = executeMonitor(trueMonitor) + val xcp = createParser(XContentType.JSON.xContent(), executeResponse.entity.content) + val output = xcp.map() + assertNull("Error running a monitor after wiping finding indices", output["error"]) + } + + fun `test rollover alert history index`() { // Update the rollover check to be every 1 second and the index max age to be 1 second client().updateSettings(AlertingSettings.ALERT_HISTORY_ROLLOVER_PERIOD.key, "1s") client().updateSettings(AlertingSettings.ALERT_HISTORY_INDEX_MAX_AGE.key, "1s") @@ -82,7 +142,24 @@ class AlertIndicesIT : AlertingRestTestCase() { assertTrue("Did not find 3 alert indices", getAlertIndices().size >= 3) } - fun `test history disabled`() { + fun `test rollover finding history index`() { + // Update the rollover check to be every 1 second and the index max age to be 1 second + client().updateSettings(AlertingSettings.FINDING_HISTORY_ROLLOVER_PERIOD.key, "1s") + client().updateSettings(AlertingSettings.FINDING_HISTORY_INDEX_MAX_AGE.key, "1s") + + val testIndex = createTestIndex() + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3") + val docReturningInput = DocLevelMonitorInput("description", listOf(testIndex), listOf(docQuery)) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val trueMonitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docReturningInput), triggers = listOf(trigger))) + executeMonitor(trueMonitor.id) + + // Allow for a rollover index. + Thread.sleep(2000) + assertTrue("Did not find 2 alert indices", getFindingIndices().size >= 2) + } + + fun `test alert history disabled`() { resetHistorySettings() val trigger1 = randomQueryLevelTrigger(condition = ALWAYS_RUN) @@ -97,10 +174,10 @@ class AlertIndicesIT : AlertingRestTestCase() { updateMonitor(monitor1.copy(triggers = listOf(trigger1.copy(condition = NEVER_RUN)), id = monitor1.id), true) executeMonitor(monitor1.id) - val completedAlert1 = searchAlerts(monitor1, AlertIndices.ALL_INDEX_PATTERN).single() + val completedAlert1 = searchAlerts(monitor1, AlertIndices.ALL_ALERT_INDEX_PATTERN).single() assertNotNull("Alert is not completed", completedAlert1.endTime) - assertEquals(1, getHistoryDocCount()) + assertEquals(1, getAlertHistoryDocCount()) // Disable alert history client().updateSettings(AlertingSettings.ALERT_HISTORY_ENABLED.key, "false") @@ -119,11 +196,11 @@ class AlertIndicesIT : AlertingRestTestCase() { // For the second alert, since history is now disabled, searching for the completed alert should return an empty List // since a COMPLETED alert will be removed from the alert index and not added to the history index - val completedAlert2 = searchAlerts(monitor2, AlertIndices.ALL_INDEX_PATTERN) + val completedAlert2 = searchAlerts(monitor2, AlertIndices.ALL_ALERT_INDEX_PATTERN) assertTrue("Alert is not completed", completedAlert2.isEmpty()) // Get history entry count again and ensure the new alert was not added - assertEquals(1, getHistoryDocCount()) + assertEquals(1, getAlertHistoryDocCount()) } fun `test short retention period`() { @@ -139,18 +216,18 @@ class AlertIndicesIT : AlertingRestTestCase() { assertEquals("1 alert should be active", 1, activeAlert.size) assertEquals("Did not find 2 alert indices", 2, getAlertIndices().size) // History index is created but is empty - assertEquals(0, getHistoryDocCount()) + assertEquals(0, getAlertHistoryDocCount()) // Mark alert as COMPLETED updateMonitor(monitor.copy(triggers = listOf(trigger.copy(condition = NEVER_RUN)), id = monitor.id), true) executeMonitor(monitor.id) // Verify alert is completed - val completedAlert = searchAlerts(monitor, AlertIndices.ALL_INDEX_PATTERN).single() + val completedAlert = searchAlerts(monitor, AlertIndices.ALL_ALERT_INDEX_PATTERN).single() assertNotNull("Alert is not completed", completedAlert.endTime) // The completed alert should be removed from the active alert index and added to the history index - assertEquals(1, getHistoryDocCount()) + assertEquals(1, getAlertHistoryDocCount()) // Update rollover check and max docs as well as decreasing the retention period client().updateSettings(AlertingSettings.ALERT_HISTORY_ROLLOVER_PERIOD.key, "1s") @@ -163,7 +240,56 @@ class AlertIndicesIT : AlertingRestTestCase() { // Given the max_docs and retention settings above, the history index will rollover and the non-write index will be deleted. // This leaves two indices: alert index and an empty history write index assertEquals("Did not find 2 alert indices", 2, getAlertIndices().size) - assertEquals(0, getHistoryDocCount()) + assertEquals(0, getAlertHistoryDocCount()) + } + + fun `test short finding retention period`() { + resetHistorySettings() + + // Create monitor and execute + val testIndex = createTestIndex() + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3") + val docReturningInput = DocLevelMonitorInput("description", listOf(testIndex), listOf(docQuery)) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val monitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docReturningInput), triggers = listOf(trigger))) + + val testDoc = """{ + "message" : "This is an error from IAD region", + "test_field" : "us-west-2" + }""" + indexDoc(testIndex, "1", testDoc) + + executeMonitor(monitor.id) + + // Check if alert is active and alert index is created + val activeAlert = searchAlerts(monitor) + assertEquals("1 alert should be active", 1, activeAlert.size) + assertEquals("Did not find 2 alert indices", 2, getAlertIndices().size) + // History index is created but is empty + assertEquals(0, getAlertHistoryDocCount()) + + // Mark doc level alert as Acknowledged + acknowledgeAlerts(monitor, activeAlert[0]) + + // Verify alert is completed + val ackAlert = searchAlerts(monitor, AlertIndices.ALL_ALERT_INDEX_PATTERN).single() + assertNotNull("Alert is not acknowledged", ackAlert.acknowledgedTime) + + // The completed alert should be removed from the active alert index and added to the history index + assertEquals(1, getAlertHistoryDocCount()) + + // Update rollover check and max docs as well as decreasing the retention period + client().updateSettings(AlertingSettings.ALERT_HISTORY_ROLLOVER_PERIOD.key, "1s") + client().updateSettings(AlertingSettings.ALERT_HISTORY_MAX_DOCS.key, 1) + client().updateSettings(AlertingSettings.ALERT_HISTORY_RETENTION_PERIOD.key, "1s") + + // Give some time for history to be rolled over and cleared + Thread.sleep(5000) + + // Given the max_docs and retention settings above, the history index will rollover and the non-write index will be deleted. + // This leaves two indices: alert index and an empty history write index + assertEquals("Did not find 2 alert indices", 2, getAlertIndices().size) + assertEquals(0, getAlertHistoryDocCount()) } private fun assertIndexExists(index: String) { @@ -180,10 +306,23 @@ class AlertIndicesIT : AlertingRestTestCase() { client().updateSettings(AlertingSettings.ALERT_HISTORY_ENABLED.key, "true") client().updateSettings(AlertingSettings.ALERT_HISTORY_ROLLOVER_PERIOD.key, "60s") client().updateSettings(AlertingSettings.ALERT_HISTORY_RETENTION_PERIOD.key, "60s") + client().updateSettings(AlertingSettings.FINDING_HISTORY_ENABLED.key, "true") + client().updateSettings(AlertingSettings.FINDING_HISTORY_ROLLOVER_PERIOD.key, "60s") + client().updateSettings(AlertingSettings.FINDING_HISTORY_RETENTION_PERIOD.key, "60s") } private fun getAlertIndices(): List { - val response = client().makeRequest("GET", "/_cat/indices/${AlertIndices.ALL_INDEX_PATTERN}?format=json") + val response = client().makeRequest("GET", "/_cat/indices/${AlertIndices.ALL_ALERT_INDEX_PATTERN}?format=json") + val xcp = createParser(XContentType.JSON.xContent(), response.entity.content) + val responseList = xcp.list() + val indices = mutableListOf() + responseList.filterIsInstance>().forEach { indices.add(it["index"] as String) } + + return indices + } + + private fun getFindingIndices(): List { + val response = client().makeRequest("GET", "/_cat/indices/${AlertIndices.ALL_FINDING_INDEX_PATTERN}?format=json") val xcp = createParser(XContentType.JSON.xContent(), response.entity.content) val responseList = xcp.list() val indices = mutableListOf() @@ -192,7 +331,7 @@ class AlertIndicesIT : AlertingRestTestCase() { return indices } - private fun getHistoryDocCount(): Long { + private fun getAlertHistoryDocCount(): Long { val request = """ { "query": { @@ -201,10 +340,10 @@ class AlertIndicesIT : AlertingRestTestCase() { } """.trimIndent() val response = adminClient().makeRequest( - "POST", "${AlertIndices.HISTORY_ALL}/_search", emptyMap(), + "POST", "${AlertIndices.ALERT_HISTORY_ALL}/_search", emptyMap(), StringEntity(request, APPLICATION_JSON) ) - assertEquals("Request to get history failed", RestStatus.OK, response.restStatus()) + assertEquals("Request to get alert history failed", RestStatus.OK, response.restStatus()) return SearchResponse.fromXContent(createParser(jsonXContent, response.entity.content)).hits.totalHits!!.value } } diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/model/DocLevelMonitorInputTests.kt b/alerting/src/test/kotlin/org/opensearch/alerting/model/DocLevelMonitorInputTests.kt new file mode 100644 index 000000000..8a29ef03d --- /dev/null +++ b/alerting/src/test/kotlin/org/opensearch/alerting/model/DocLevelMonitorInputTests.kt @@ -0,0 +1,67 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.model + +import org.opensearch.alerting.core.model.DocLevelMonitorInput +import org.opensearch.alerting.core.model.DocLevelQuery +import org.opensearch.alerting.opensearchapi.string +import org.opensearch.alerting.randomDocLevelMonitorInput +import org.opensearch.alerting.randomDocLevelQuery +import org.opensearch.common.xcontent.ToXContent +import org.opensearch.common.xcontent.XContentBuilder +import org.opensearch.common.xcontent.XContentType +import org.opensearch.test.OpenSearchTestCase + +class DocLevelMonitorInputTests : OpenSearchTestCase() { + fun `testing DocLevelQuery asTemplateArgs`() { + // GIVEN + val query = randomDocLevelQuery() + + // WHEN + val templateArgs = query.asTemplateArg() + + // THEN + assertEquals("Template args 'id' field does not match:", templateArgs[DocLevelQuery.QUERY_ID_FIELD], query.id) + assertEquals("Template args 'query' field does not match:", templateArgs[DocLevelQuery.QUERY_FIELD], query.query) + assertEquals("Template args 'name' field does not match:", templateArgs[DocLevelQuery.NAME_FIELD], query.name) + assertEquals("Template args 'tags' field does not match:", templateArgs[DocLevelQuery.TAGS_FIELD], query.tags) + } + + fun `testing DocLevelMonitorInput asTemplateArgs`() { + // GIVEN + val input = randomDocLevelMonitorInput() + + // test + val inputString = input.toXContent(XContentBuilder.builder(XContentType.JSON.xContent()), ToXContent.EMPTY_PARAMS).string() + // assertEquals("test", inputString) + // test end + // WHEN + val templateArgs = input.asTemplateArg() + + // THEN + assertEquals( + "Template args 'description' field does not match:", + templateArgs[DocLevelMonitorInput.DESCRIPTION_FIELD], + input.description + ) + assertEquals( + "Template args 'indices' field does not match:", + templateArgs[DocLevelMonitorInput.INDICES_FIELD], + input.indices + ) + assertEquals( + "Template args 'queries' field does not contain the expected number of queries:", + input.queries.size, + (templateArgs[DocLevelMonitorInput.QUERIES_FIELD] as List<*>).size + ) + input.queries.forEach { + assertTrue( + "Template args 'queries' field does not match:", + (templateArgs[DocLevelMonitorInput.QUERIES_FIELD] as List<*>).contains(it.asTemplateArg()) + ) + } + } +} diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/model/FindingTests.kt b/alerting/src/test/kotlin/org/opensearch/alerting/model/FindingTests.kt new file mode 100644 index 000000000..5078beb2d --- /dev/null +++ b/alerting/src/test/kotlin/org/opensearch/alerting/model/FindingTests.kt @@ -0,0 +1,39 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.model + +import org.opensearch.alerting.randomFinding +import org.opensearch.test.OpenSearchTestCase + +class FindingTests : OpenSearchTestCase() { + fun `test finding asTemplateArgs`() { + // GIVEN + val finding = randomFinding() + + // WHEN + val templateArgs = finding.asTemplateArg() + + // THEN + assertEquals("Template args 'id' field does not match:", templateArgs[Finding.FINDING_ID_FIELD], finding.id) + assertEquals( + "Template args 'relatedDocIds' field does not match:", + templateArgs[Finding.RELATED_DOC_IDS_FIELD], + finding.relatedDocIds + ) + assertEquals("Template args 'monitorId' field does not match:", templateArgs[Finding.MONITOR_ID_FIELD], finding.monitorId) + assertEquals( + "Template args 'monitorName' field does not match:", + templateArgs[Finding.MONITOR_NAME_FIELD], + finding.monitorName + ) + assertEquals("Template args 'queries' field does not match:", templateArgs[Finding.QUERIES_FIELD], finding.docLevelQueries) + assertEquals( + "Template args 'timestamp' field does not match:", + templateArgs[Finding.TIMESTAMP_FIELD], + finding.timestamp.toEpochMilli() + ) + } +} diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/model/WriteableTests.kt b/alerting/src/test/kotlin/org/opensearch/alerting/model/WriteableTests.kt index 64b8c8d62..c0b1d7eb0 100644 --- a/alerting/src/test/kotlin/org/opensearch/alerting/model/WriteableTests.kt +++ b/alerting/src/test/kotlin/org/opensearch/alerting/model/WriteableTests.kt @@ -17,6 +17,9 @@ import org.opensearch.alerting.randomActionRunResult import org.opensearch.alerting.randomBucketLevelMonitorRunResult import org.opensearch.alerting.randomBucketLevelTrigger import org.opensearch.alerting.randomBucketLevelTriggerRunResult +import org.opensearch.alerting.randomDocLevelTrigger +import org.opensearch.alerting.randomDocumentLevelMonitorRunResult +import org.opensearch.alerting.randomDocumentLevelTriggerRunResult import org.opensearch.alerting.randomEmailAccount import org.opensearch.alerting.randomEmailGroup import org.opensearch.alerting.randomInputRunResults @@ -107,6 +110,15 @@ class WriteableTests : OpenSearchTestCase() { assertEquals("Round tripping BucketLevelTrigger doesn't work", trigger, newTrigger) } + fun `test doc-level trigger as stream`() { + val trigger = randomDocLevelTrigger() + val out = BytesStreamOutput() + trigger.writeTo(out) + val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) + val newTrigger = DocumentLevelTrigger.readFrom(sin) + assertEquals("Round tripping DocumentLevelTrigger doesn't work", trigger, newTrigger) + } + fun `test actionrunresult as stream`() { val actionRunResult = randomActionRunResult() val out = BytesStreamOutput() @@ -134,6 +146,15 @@ class WriteableTests : OpenSearchTestCase() { assertEquals("Round tripping ActionRunResult doesn't work", runResult, newRunResult) } + fun `test doc-level triggerrunresult as stream`() { + val runResult = randomDocumentLevelTriggerRunResult() + val out = BytesStreamOutput() + runResult.writeTo(out) + val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) + val newRunResult = DocumentLevelTriggerRunResult(sin) + assertEquals("Round tripping ActionRunResult doesn't work", runResult, newRunResult) + } + fun `test inputrunresult as stream`() { val runResult = randomInputRunResults() val out = BytesStreamOutput() @@ -161,6 +182,15 @@ class WriteableTests : OpenSearchTestCase() { assertEquals("Round tripping MonitorRunResult doesn't work", runResult, newRunResult) } + fun `test doc-level monitorrunresult as stream`() { + val runResult = randomDocumentLevelMonitorRunResult() + val out = BytesStreamOutput() + runResult.writeTo(out) + val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) + val newRunResult = MonitorRunResult(sin) + assertEquals("Round tripping MonitorRunResult doesn't work", runResult, newRunResult) + } + fun `test searchinput as stream`() { val input = SearchInput(emptyList(), SearchSourceBuilder()) val out = BytesStreamOutput() diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/resthandler/FindingsRestApiIT.kt b/alerting/src/test/kotlin/org/opensearch/alerting/resthandler/FindingsRestApiIT.kt new file mode 100644 index 000000000..b48235330 --- /dev/null +++ b/alerting/src/test/kotlin/org/opensearch/alerting/resthandler/FindingsRestApiIT.kt @@ -0,0 +1,143 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.resthandler + +import org.opensearch.alerting.AlertingRestTestCase +import org.opensearch.alerting.core.model.DocLevelQuery +import org.opensearch.test.junit.annotations.TestLogging + +@TestLogging("level:DEBUG", reason = "Debug for tests.") +@Suppress("UNCHECKED_CAST") +class FindingsRestApiIT : AlertingRestTestCase() { + + fun `test find Finding where doc is not retrieved`() { + + createFinding(matchingDocIds = listOf("someId")) + val response = searchFindings() + assertEquals(1, response.totalFindings) + assertEquals(1, response.findings[0].documents.size) + assertFalse(response.findings[0].documents[0].found) + } + + fun `test find Finding where doc is retrieved`() { + val testIndex = createTestIndex() + val testDoc = """{ + "message" : "This is an error from IAD region", + "test_field" : "us-west-2" + }""" + indexDoc(testIndex, "someId", testDoc) + val testDoc2 = """{ + "message" : "This is an error2 from IAD region", + "test_field" : "us-west-3" + }""" + indexDoc(testIndex, "someId2", testDoc2) + + val findingWith1 = createFinding(matchingDocIds = listOf("someId"), index = testIndex) + val findingWith2 = createFinding(matchingDocIds = listOf("someId", "someId2"), index = testIndex) + val response = searchFindings() + assertEquals(2, response.totalFindings) + for (findingWithDoc in response.findings) { + if (findingWithDoc.finding.id == findingWith1) { + assertEquals(1, findingWithDoc.documents.size) + assertTrue(findingWithDoc.documents[0].found) + assertEquals(testDoc, findingWithDoc.documents[0].document) + } else if (findingWithDoc.finding.id == findingWith2) { + assertEquals(2, findingWithDoc.documents.size) + assertTrue(findingWithDoc.documents[0].found) + assertTrue(findingWithDoc.documents[1].found) + assertEquals(testDoc, findingWithDoc.documents[0].document) + assertEquals(testDoc2, findingWithDoc.documents[1].document) + } else { + fail("Found a finding that should not have been retrieved") + } + } + } + + fun `test find Finding for specific finding by id`() { + val testIndex = createTestIndex() + val testDoc = """{ + "message" : "This is an error from IAD region", + "test_field" : "us-west-2" + }""" + indexDoc(testIndex, "someId", testDoc) + val testDoc2 = """{ + "message" : "This is an error2 from IAD region", + "test_field" : "us-west-3" + }""" + indexDoc(testIndex, "someId2", testDoc2) + + createFinding(matchingDocIds = listOf("someId"), index = testIndex) + val findingId = createFinding(matchingDocIds = listOf("someId", "someId2"), index = testIndex) + val response = searchFindings(mapOf(Pair("findingId", findingId))) + assertEquals(1, response.totalFindings) + assertEquals(findingId, response.findings[0].finding.id) + assertEquals(2, response.findings[0].documents.size) + assertTrue(response.findings[0].documents[0].found) + assertTrue(response.findings[0].documents[1].found) + assertEquals(testDoc, response.findings[0].documents[0].document) + assertEquals(testDoc2, response.findings[0].documents[1].document) + } + + fun `test find Finding by tag`() { + val testIndex = createTestIndex() + val testDoc = """{ + "message" : "This is an error from IAD region", + "test_field" : "us-west-2" + }""" + indexDoc(testIndex, "someId", testDoc) + val testDoc2 = """{ + "message" : "This is an error2 from IAD region", + "test_field" : "us-west-3" + }""" + indexDoc(testIndex, "someId2", testDoc2) + + val docLevelQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "realQuery", tags = listOf("sigma")) + createFinding(matchingDocIds = listOf("someId"), index = testIndex) + val findingId = createFinding( + matchingDocIds = listOf("someId", "someId2"), + index = testIndex, + docLevelQueries = listOf(docLevelQuery) + ) + val response = searchFindings(mapOf(Pair("searchString", "sigma"))) + assertEquals(1, response.totalFindings) + assertEquals(findingId, response.findings[0].finding.id) + assertEquals(2, response.findings[0].documents.size) + assertTrue(response.findings[0].documents[0].found) + assertTrue(response.findings[0].documents[1].found) + assertEquals(testDoc, response.findings[0].documents[0].document) + assertEquals(testDoc2, response.findings[0].documents[1].document) + } + + fun `test find Finding by name`() { + val testIndex = createTestIndex() + val testDoc = """{ + "message" : "This is an error from IAD region", + "test_field" : "us-west-2" + }""" + indexDoc(testIndex, "someId", testDoc) + val testDoc2 = """{ + "message" : "This is an error2 from IAD region", + "test_field" : "us-west-3" + }""" + indexDoc(testIndex, "someId2", testDoc2) + + val docLevelQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "realQuery", tags = listOf("sigma")) + createFinding(matchingDocIds = listOf("someId"), index = testIndex) + val findingId = createFinding( + matchingDocIds = listOf("someId", "someId2"), + index = testIndex, + docLevelQueries = listOf(docLevelQuery) + ) + val response = searchFindings(mapOf(Pair("searchString", "realQuery"))) + assertEquals(1, response.totalFindings) + assertEquals(findingId, response.findings[0].finding.id) + assertEquals(2, response.findings[0].documents.size) + assertTrue(response.findings[0].documents[0].found) + assertTrue(response.findings[0].documents[1].found) + assertEquals(testDoc, response.findings[0].documents[0].document) + assertEquals(testDoc2, response.findings[0].documents[1].document) + } +} diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/resthandler/MonitorRestApiIT.kt b/alerting/src/test/kotlin/org/opensearch/alerting/resthandler/MonitorRestApiIT.kt index bb92c15bc..6e00302c2 100644 --- a/alerting/src/test/kotlin/org/opensearch/alerting/resthandler/MonitorRestApiIT.kt +++ b/alerting/src/test/kotlin/org/opensearch/alerting/resthandler/MonitorRestApiIT.kt @@ -9,6 +9,7 @@ import org.apache.http.entity.ContentType import org.apache.http.message.BasicHeader import org.apache.http.nio.entity.NStringEntity import org.opensearch.alerting.ALERTING_BASE_URI +import org.opensearch.alerting.ALWAYS_RUN import org.opensearch.alerting.ANOMALY_DETECTOR_INDEX import org.opensearch.alerting.AlertingRestTestCase import org.opensearch.alerting.DESTINATION_BASE_URI @@ -16,11 +17,14 @@ import org.opensearch.alerting.LEGACY_OPENDISTRO_ALERTING_BASE_URI import org.opensearch.alerting.alerts.AlertIndices import org.opensearch.alerting.anomalyDetectorIndexMapping import org.opensearch.alerting.core.model.CronSchedule +import org.opensearch.alerting.core.model.DocLevelMonitorInput +import org.opensearch.alerting.core.model.DocLevelQuery import org.opensearch.alerting.core.model.ScheduledJob import org.opensearch.alerting.core.model.SearchInput import org.opensearch.alerting.core.settings.ScheduledJobSettings import org.opensearch.alerting.makeRequest import org.opensearch.alerting.model.Alert +import org.opensearch.alerting.model.DocumentLevelTrigger import org.opensearch.alerting.model.Monitor import org.opensearch.alerting.model.QueryLevelTrigger import org.opensearch.alerting.model.destination.Chime @@ -30,6 +34,9 @@ import org.opensearch.alerting.randomAction import org.opensearch.alerting.randomAlert import org.opensearch.alerting.randomAnomalyDetector import org.opensearch.alerting.randomAnomalyDetectorWithUser +import org.opensearch.alerting.randomBucketLevelTrigger +import org.opensearch.alerting.randomDocumentLevelMonitor +import org.opensearch.alerting.randomDocumentLevelTrigger import org.opensearch.alerting.randomQueryLevelMonitor import org.opensearch.alerting.randomQueryLevelTrigger import org.opensearch.alerting.randomThrottle @@ -791,7 +798,7 @@ class MonitorRestApiIT : AlertingRestTestCase() { val alerts = searchAlerts(monitor) assertEquals("Active alert was not deleted", 0, alerts.size) - val historyAlerts = searchAlerts(monitor, AlertIndices.HISTORY_WRITE_INDEX) + val historyAlerts = searchAlerts(monitor, AlertIndices.ALERT_HISTORY_WRITE_INDEX) assertEquals("Alert was not moved to history", 1, historyAlerts.size) assertEquals( "Alert data incorrect", @@ -820,7 +827,7 @@ class MonitorRestApiIT : AlertingRestTestCase() { val alerts = searchAlerts(monitor) assertEquals("Active alert was not deleted", 0, alerts.size) - val historyAlerts = searchAlerts(monitor, AlertIndices.HISTORY_WRITE_INDEX) + val historyAlerts = searchAlerts(monitor, AlertIndices.ALERT_HISTORY_WRITE_INDEX) assertEquals("Alert was not moved to history", 1, historyAlerts.size) assertEquals( "Alert data incorrect", @@ -853,7 +860,7 @@ class MonitorRestApiIT : AlertingRestTestCase() { assertEquals("One alert should be in active index", 1, alerts.size) assertEquals("Wrong alert in active index", alertKeep.toJsonString(), alerts.single().toJsonString()) - val historyAlerts = searchAlerts(monitor, AlertIndices.HISTORY_WRITE_INDEX) + val historyAlerts = searchAlerts(monitor, AlertIndices.ALERT_HISTORY_WRITE_INDEX) // Only alertDelete should of been moved to history index assertEquals("One alert should be in history index", 1, historyAlerts.size) assertEquals( @@ -1106,4 +1113,118 @@ class MonitorRestApiIT : AlertingRestTestCase() { alertingStatsResponse[statsResponseOpenSearchSweeperEnabledField] ) } + + @Throws(Exception::class) + fun `test creating a document monitor`() { + val testIndex = createTestIndex() + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3") + val docReturningInput = DocLevelMonitorInput("description", listOf(testIndex), listOf(docQuery)) + + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val monitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docReturningInput), triggers = listOf(trigger))) + + val createResponse = client().makeRequest("POST", ALERTING_BASE_URI, emptyMap(), monitor.toHttpEntity()) + + assertEquals("Create monitor failed", RestStatus.CREATED, createResponse.restStatus()) + val responseBody = createResponse.asMap() + val createdId = responseBody["_id"] as String + val createdVersion = responseBody["_version"] as Int + assertNotEquals("response is missing Id", Monitor.NO_ID, createdId) + assertTrue("incorrect version", createdVersion > 0) + val actualLocation = createResponse.getHeader("Location") + assertEquals("Incorrect Location header", "$ALERTING_BASE_URI/$createdId", actualLocation) + } + + @Throws(Exception::class) + fun `test getting a document level monitor`() { + val testIndex = createTestIndex() + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3") + val docReturningInput = DocLevelMonitorInput("description", listOf(testIndex), listOf(docQuery)) + + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val monitor = createMonitor( + randomDocumentLevelMonitor(inputs = listOf(docReturningInput), triggers = listOf(trigger), user = null) + ) + + val storedMonitor = getMonitor(monitor.id) + + assertEquals("Indexed and retrieved monitor differ", monitor, storedMonitor) + } + + @Throws(Exception::class) + fun `test updating conditions for a doc-level monitor`() { + val testIndex = createTestIndex() + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3") + val docReturningInput = DocLevelMonitorInput("description", listOf(testIndex), listOf(docQuery)) + + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val monitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docReturningInput), triggers = listOf(trigger))) + + val updatedTriggers = listOf( + DocumentLevelTrigger( + name = "foo", + severity = "1", + condition = Script("return true"), + actions = emptyList() + ) + ) + val updateResponse = client().makeRequest( + "PUT", monitor.relativeUrl(), + emptyMap(), monitor.copy(triggers = updatedTriggers).toHttpEntity() + ) + + assertEquals("Update monitor failed", RestStatus.OK, updateResponse.restStatus()) + val responseBody = updateResponse.asMap() + assertEquals("Updated monitor id doesn't match", monitor.id, responseBody["_id"] as String) + assertEquals("Version not incremented", (monitor.version + 1).toInt(), responseBody["_version"] as Int) + + val updatedMonitor = getMonitor(monitor.id) + assertEquals("Monitor trigger not updated", updatedTriggers, updatedMonitor.triggers) + } + + @Throws(Exception::class) + fun `test deleting a document level monitor`() { + val testIndex = createTestIndex() + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3") + val docReturningInput = DocLevelMonitorInput("description", listOf(testIndex), listOf(docQuery)) + + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val monitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docReturningInput), triggers = listOf(trigger))) + + val deleteResponse = client().makeRequest("DELETE", monitor.relativeUrl()) + assertEquals("Delete failed", RestStatus.OK, deleteResponse.restStatus()) + + val getResponse = client().makeRequest("HEAD", monitor.relativeUrl()) + assertEquals("Deleted monitor still exists", RestStatus.NOT_FOUND, getResponse.restStatus()) + } + + fun `test creating a document monitor with error trigger`() { + val trigger = randomQueryLevelTrigger() + try { + val monitor = randomDocumentLevelMonitor(triggers = listOf(trigger)) + client().makeRequest("POST", ALERTING_BASE_URI, emptyMap(), monitor.toHttpEntity()) + fail("Monitor with illegal trigger should be rejected.") + } catch (e: IllegalArgumentException) { + assertEquals( + "a document monitor with error trigger", + "Incompatible trigger [${trigger.id}] for monitor type [${Monitor.MonitorType.DOC_LEVEL_MONITOR}]", + e.message + ) + } + } + + fun `test creating a query monitor with error trigger`() { + val trigger = randomBucketLevelTrigger() + try { + val monitor = randomQueryLevelMonitor(triggers = listOf(trigger)) + client().makeRequest("POST", ALERTING_BASE_URI, emptyMap(), monitor.toHttpEntity()) + fail("Monitor with illegal trigger should be rejected.") + } catch (e: IllegalArgumentException) { + assertEquals( + "a query monitor with error trigger", + "Incompatible trigger [${trigger.id}] for monitor type [${Monitor.MonitorType.QUERY_LEVEL_MONITOR}]", + e.message + ) + } + } } diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/resthandler/SecureEmailAccountRestApiIT.kt b/alerting/src/test/kotlin/org/opensearch/alerting/resthandler/SecureEmailAccountRestApiIT.kt index 808413044..4e5dd52dc 100644 --- a/alerting/src/test/kotlin/org/opensearch/alerting/resthandler/SecureEmailAccountRestApiIT.kt +++ b/alerting/src/test/kotlin/org/opensearch/alerting/resthandler/SecureEmailAccountRestApiIT.kt @@ -1,3 +1,8 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + package org.opensearch.alerting.resthandler import org.apache.http.HttpHeaders diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/resthandler/SecureEmailGroupsRestApiIT.kt b/alerting/src/test/kotlin/org/opensearch/alerting/resthandler/SecureEmailGroupsRestApiIT.kt index 53dd248b2..709642d91 100644 --- a/alerting/src/test/kotlin/org/opensearch/alerting/resthandler/SecureEmailGroupsRestApiIT.kt +++ b/alerting/src/test/kotlin/org/opensearch/alerting/resthandler/SecureEmailGroupsRestApiIT.kt @@ -1,3 +1,8 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + package org.opensearch.alerting.resthandler import org.apache.http.HttpHeaders diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/settings/AlertingSettingsTests.kt b/alerting/src/test/kotlin/org/opensearch/alerting/settings/AlertingSettingsTests.kt index 713bd1345..6ee8c4997 100644 --- a/alerting/src/test/kotlin/org/opensearch/alerting/settings/AlertingSettingsTests.kt +++ b/alerting/src/test/kotlin/org/opensearch/alerting/settings/AlertingSettingsTests.kt @@ -1,3 +1,8 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + package org.opensearch.alerting.settings import org.junit.Before diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/settings/DestinationSettingsTests.kt b/alerting/src/test/kotlin/org/opensearch/alerting/settings/DestinationSettingsTests.kt index 34815f6d5..2e96c1fad 100644 --- a/alerting/src/test/kotlin/org/opensearch/alerting/settings/DestinationSettingsTests.kt +++ b/alerting/src/test/kotlin/org/opensearch/alerting/settings/DestinationSettingsTests.kt @@ -1,3 +1,8 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + package org.opensearch.alerting.settings import org.junit.Before diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/triggeraction/TriggerExpressionParserTests.kt b/alerting/src/test/kotlin/org/opensearch/alerting/triggeraction/TriggerExpressionParserTests.kt new file mode 100644 index 000000000..d3f4613fe --- /dev/null +++ b/alerting/src/test/kotlin/org/opensearch/alerting/triggeraction/TriggerExpressionParserTests.kt @@ -0,0 +1,76 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.triggeraction + +import org.junit.Assert +import org.opensearch.alerting.triggercondition.parsers.TriggerExpressionParser +import org.opensearch.test.OpenSearchTestCase + +class TriggerExpressionParserTests : OpenSearchTestCase() { + + fun `test trigger expression posix parsing simple AND`() { + val eqString = "(query[name=sigma-123] && query[name=sigma-456])" + val equation = TriggerExpressionParser(eqString).parse() + Assert.assertEquals("query[name=sigma-123] query[name=sigma-456] && ", equation.toString()) + } + + fun `test trigger expression posix parsing multiple AND`() { + val eqString = "(query[name=sigma-123] && query[name=sigma-456]) && query[name=sigma-789]" + val equation = TriggerExpressionParser(eqString).parse() + Assert.assertEquals("query[name=sigma-123] query[name=sigma-456] && query[name=sigma-789] && ", equation.toString()) + } + + fun `test trigger expression posix parsing multiple AND with parenthesis`() { + val eqString = "(query[name=sigma-123] && query[name=sigma-456]) && (query[name=sigma-789] && query[name=id-2aw34])" + val equation = TriggerExpressionParser(eqString).parse() + Assert.assertEquals( + "query[name=sigma-123] query[name=sigma-456] && query[name=sigma-789] query[name=id-2aw34] && && ", + equation.toString() + ) + } + + fun `test trigger expression posix parsing simple OR`() { + val eqString = "(query[name=sigma-123] || query[name=sigma-456])" + val equation = TriggerExpressionParser(eqString).parse() + Assert.assertEquals("query[name=sigma-123] query[name=sigma-456] || ", equation.toString()) + } + + fun `test trigger expression posix parsing multiple OR`() { + val eqString = "(query[name=sigma-123] || query[name=sigma-456]) || query[name=sigma-789]" + val equation = TriggerExpressionParser(eqString).parse() + Assert.assertEquals("query[name=sigma-123] query[name=sigma-456] || query[name=sigma-789] || ", equation.toString()) + } + + fun `test trigger expression posix parsing multiple OR with parenthesis`() { + val eqString = "(query[name=sigma-123] || query[name=sigma-456]) || (query[name=sigma-789] || query[name=id-2aw34])" + val equation = TriggerExpressionParser(eqString).parse() + Assert.assertEquals( + "query[name=sigma-123] query[name=sigma-456] || query[name=sigma-789] query[name=id-2aw34] || || ", + equation.toString() + ) + } + + fun `test trigger expression posix parsing simple NOT`() { + val eqString = "(query[name=sigma-123] || !query[name=sigma-456])" + val equation = TriggerExpressionParser(eqString).parse() + Assert.assertEquals("query[name=sigma-123] query[name=sigma-456] ! || ", equation.toString()) + } + + fun `test trigger expression posix parsing multiple NOT`() { + val eqString = "(query[name=sigma-123] && !query[tag=tag-456]) && !(query[name=sigma-789])" + val equation = TriggerExpressionParser(eqString).parse() + Assert.assertEquals("query[name=sigma-123] query[tag=tag-456] ! && query[name=sigma-789] ! && ", equation.toString()) + } + + fun `test trigger expression posix parsing multiple operators with parenthesis`() { + val eqString = "(query[name=sigma-123] && query[tag=sev1]) || !(!query[name=sigma-789] || query[name=id-2aw34])" + val equation = TriggerExpressionParser(eqString).parse() + Assert.assertEquals( + "query[name=sigma-123] query[tag=sev1] && query[name=sigma-789] ! query[name=id-2aw34] || ! || ", + equation.toString() + ) + } +} diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/triggeraction/TriggerExpressionResolverTests.kt b/alerting/src/test/kotlin/org/opensearch/alerting/triggeraction/TriggerExpressionResolverTests.kt new file mode 100644 index 000000000..134073485 --- /dev/null +++ b/alerting/src/test/kotlin/org/opensearch/alerting/triggeraction/TriggerExpressionResolverTests.kt @@ -0,0 +1,124 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.triggeraction + +import org.junit.Assert +import org.opensearch.alerting.core.model.DocLevelQuery +import org.opensearch.alerting.triggercondition.parsers.TriggerExpressionParser +import org.opensearch.test.OpenSearchTestCase + +class TriggerExpressionResolverTests : OpenSearchTestCase() { + + fun `test trigger expression evaluation simple AND`() { + val eqString = "(query[name=sigma-123] && query[name=sigma-456])" + val equation = TriggerExpressionParser(eqString).parse() + val queryToDocIds = mutableMapOf>() + queryToDocIds[DocLevelQuery("", "sigma-123", "", emptyList())] = mutableSetOf("1", "2", "3") + queryToDocIds[DocLevelQuery("", "sigma-456", "", emptyList())] = mutableSetOf("1", "2", "3") + Assert.assertEquals("query[name=sigma-123] query[name=sigma-456] && ", equation.toString()) + Assert.assertEquals(mutableSetOf("1", "2", "3"), equation.evaluate(queryToDocIds)) + } + + fun `test trigger expression evaluation simple AND scenario2`() { + val eqString = "(query[name=sigma-123] && query[id=id1456])" + val equation = TriggerExpressionParser(eqString).parse() + val queryToDocIds = mutableMapOf>() + queryToDocIds[DocLevelQuery("", "sigma-123", "", emptyList())] = mutableSetOf("6", "3", "7") + queryToDocIds[DocLevelQuery("id1456", "", "", emptyList())] = mutableSetOf("1", "2", "3") + Assert.assertEquals("query[name=sigma-123] query[id=id1456] && ", equation.toString()) + Assert.assertEquals(mutableSetOf("3"), equation.evaluate(queryToDocIds)) + } + + fun `test trigger expression evaluation simple AND scenario3`() { + val eqString = "(query[name=sigma-123] && query[tag=sev2])" + val equation = TriggerExpressionParser(eqString).parse() + val queryToDocIds = mutableMapOf>() + queryToDocIds[DocLevelQuery("", "sigma-123", "", emptyList())] = mutableSetOf("6", "8", "7") + queryToDocIds[DocLevelQuery("", "", "", mutableListOf("tag=sev2"))] = mutableSetOf("1", "2", "3") + Assert.assertEquals("query[name=sigma-123] query[tag=sev2] && ", equation.toString()) + Assert.assertEquals(emptySet(), equation.evaluate(queryToDocIds)) + } + + fun `test trigger expression evaluation simple OR`() { + val eqString = "(query[name=sigma-123] || query[name=sigma-456])" + val equation = TriggerExpressionParser(eqString).parse() + val queryToDocIds = mutableMapOf>() + queryToDocIds[DocLevelQuery("", "sigma-123", "", emptyList())] = mutableSetOf("1", "2", "3") + queryToDocIds[DocLevelQuery("", "sigma-456", "", emptyList())] = mutableSetOf("1", "2", "3") + Assert.assertEquals("query[name=sigma-123] query[name=sigma-456] || ", equation.toString()) + Assert.assertEquals(mutableSetOf("1", "2", "3"), equation.evaluate(queryToDocIds)) + } + + fun `test trigger expression evaluation simple OR scenario2`() { + val eqString = "(query[name=sigma-123] || query[id=id1456])" + val equation = TriggerExpressionParser(eqString).parse() + val queryToDocIds = mutableMapOf>() + queryToDocIds[DocLevelQuery("", "sigma-123", "", emptyList())] = mutableSetOf("6", "3", "7") + queryToDocIds[DocLevelQuery("id1456", "", "", emptyList())] = mutableSetOf("1", "2", "3") + Assert.assertEquals("query[name=sigma-123] query[id=id1456] || ", equation.toString()) + Assert.assertEquals(mutableSetOf("6", "3", "7", "1", "2", "3"), equation.evaluate(queryToDocIds)) + } + + fun `test trigger expression evaluation simple OR scenario3`() { + val eqString = "(query[name=sigma-123] || query[tag=sev2])" + val equation = TriggerExpressionParser(eqString).parse() + val queryToDocIds = mutableMapOf>() + queryToDocIds[DocLevelQuery("", "sigma-123", "", emptyList())] = mutableSetOf("6", "8", "7") + queryToDocIds[DocLevelQuery("", "", "", mutableListOf("tag=sev2"))] = emptySet() + Assert.assertEquals("query[name=sigma-123] query[tag=sev2] || ", equation.toString()) + Assert.assertEquals(mutableSetOf("6", "8", "7"), equation.evaluate(queryToDocIds)) + } + + fun `test trigger expression evaluation simple NOT`() { + val eqString = "!(query[name=sigma-456])" + val equation = TriggerExpressionParser(eqString).parse() + val queryToDocIds = mutableMapOf>() + queryToDocIds[DocLevelQuery("", "sigma-123", "", emptyList())] = mutableSetOf("1", "2", "3") + queryToDocIds[DocLevelQuery("", "sigma-456", "", emptyList())] = mutableSetOf("4", "5", "6") + Assert.assertEquals("query[name=sigma-456] ! ", equation.toString()) + Assert.assertEquals(mutableSetOf("1", "2", "3"), equation.evaluate(queryToDocIds)) + } + + fun `test trigger expression evaluation AND with NOT`() { + val eqString = "(query[name=sigma-123] && !query[name=sigma-456])" + val equation = TriggerExpressionParser(eqString).parse() + val queryToDocIds = mutableMapOf>() + queryToDocIds[DocLevelQuery("", "sigma-123", "", emptyList())] = mutableSetOf("1", "2", "3", "11") + queryToDocIds[DocLevelQuery("", "sigma-456", "", emptyList())] = mutableSetOf("3", "4", "5") + queryToDocIds[DocLevelQuery("id_new", "", "", emptyList())] = mutableSetOf("11", "12", "13") + Assert.assertEquals("query[name=sigma-123] query[name=sigma-456] ! && ", equation.toString()) + Assert.assertEquals(mutableSetOf("1", "2", "11"), equation.evaluate(queryToDocIds)) + } + + fun `test trigger expression evaluation OR with NOT`() { + val eqString = "(query[name=sigma-123] || !query[id=id1456])" + val equation = TriggerExpressionParser(eqString).parse() + val queryToDocIds = mutableMapOf>() + queryToDocIds[DocLevelQuery("", "sigma-123", "", emptyList())] = mutableSetOf("6", "3", "7") + queryToDocIds[DocLevelQuery("id1456", "", "", emptyList())] = mutableSetOf("11", "12", "15") + queryToDocIds[DocLevelQuery("id_new", "", "", emptyList())] = mutableSetOf("11", "12", "13") + Assert.assertEquals("query[name=sigma-123] query[id=id1456] ! || ", equation.toString()) + Assert.assertEquals(mutableSetOf("6", "3", "7", "13"), equation.evaluate(queryToDocIds)) + } + + fun `test trigger expression evaluation with multiple operators with parenthesis`() { + val eqString = "(query[name=sigma-123] && query[tag=sev1]) || !(!query[name=sigma-789] || query[id=id-2aw34])" + val equation = TriggerExpressionParser(eqString).parse() + + val queryToDocIds = mutableMapOf>() + queryToDocIds[DocLevelQuery("", "sigma-123", "", emptyList())] = mutableSetOf("1", "2", "3") + queryToDocIds[DocLevelQuery("id_random1", "", "", mutableListOf("sev1"))] = mutableSetOf("2", "3", "4") + queryToDocIds[DocLevelQuery("", "sigma-789", "", emptyList())] = mutableSetOf("11", "12", "13") + queryToDocIds[DocLevelQuery("id-2aw34", "", "", emptyList())] = mutableSetOf("13", "14", "15") + + Assert.assertEquals( + "query[name=sigma-123] query[tag=sev1] && query[name=sigma-789] ! query[id=id-2aw34] || ! || ", + equation.toString() + ) + + Assert.assertEquals(mutableSetOf("2", "3", "11", "12"), equation.evaluate(queryToDocIds)) + } +} diff --git a/core/src/main/kotlin/org/opensearch/alerting/core/model/ClusterMetricsInput.kt b/core/src/main/kotlin/org/opensearch/alerting/core/model/ClusterMetricsInput.kt index 0ca4eeadb..abdb32337 100644 --- a/core/src/main/kotlin/org/opensearch/alerting/core/model/ClusterMetricsInput.kt +++ b/core/src/main/kotlin/org/opensearch/alerting/core/model/ClusterMetricsInput.kt @@ -9,6 +9,7 @@ import org.apache.commons.validator.routines.UrlValidator import org.apache.http.client.utils.URIBuilder import org.opensearch.common.CheckedFunction import org.opensearch.common.ParseField +import org.opensearch.common.io.stream.StreamInput import org.opensearch.common.io.stream.StreamOutput import org.opensearch.common.xcontent.NamedXContentRegistry import org.opensearch.common.xcontent.ToXContent @@ -63,6 +64,13 @@ data class ClusterMetricsInput( this.parseEmptyFields() } + @Throws(IOException::class) + constructor(sin: StreamInput) : this( + sin.readString(), // path + sin.readString(), // path params + sin.readString() // url + ) + override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { return builder.startObject() .startObject(URI_FIELD) @@ -96,13 +104,13 @@ data class ClusterMetricsInput( const val URL_FIELD = "url" const val URI_FIELD = "uri" - val XCONTENT_REGISTRY = NamedXContentRegistry.Entry(Input::class.java, ParseField("uri"), CheckedFunction { parseInner(it) }) + val XCONTENT_REGISTRY = NamedXContentRegistry.Entry(Input::class.java, ParseField(URI_FIELD), CheckedFunction { parseInner(it) }) /** * This parse function uses [XContentParser] to parse JSON input and store corresponding fields to create a [ClusterMetricsInput] object */ @JvmStatic @Throws(IOException::class) - private fun parseInner(xcp: XContentParser): ClusterMetricsInput { + fun parseInner(xcp: XContentParser): ClusterMetricsInput { var path = "" var pathParams = "" var url = "" diff --git a/core/src/main/kotlin/org/opensearch/alerting/core/model/DocLevelMonitorInput.kt b/core/src/main/kotlin/org/opensearch/alerting/core/model/DocLevelMonitorInput.kt new file mode 100644 index 000000000..fbeba6007 --- /dev/null +++ b/core/src/main/kotlin/org/opensearch/alerting/core/model/DocLevelMonitorInput.kt @@ -0,0 +1,111 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.core.model + +import org.opensearch.common.CheckedFunction +import org.opensearch.common.ParseField +import org.opensearch.common.io.stream.StreamInput +import org.opensearch.common.io.stream.StreamOutput +import org.opensearch.common.xcontent.NamedXContentRegistry +import org.opensearch.common.xcontent.ToXContent +import org.opensearch.common.xcontent.XContentBuilder +import org.opensearch.common.xcontent.XContentParser +import org.opensearch.common.xcontent.XContentParserUtils.ensureExpectedToken +import java.io.IOException + +data class DocLevelMonitorInput( + val description: String = NO_DESCRIPTION, + val indices: List, + val queries: List +) : Input { + + @Throws(IOException::class) + constructor(sin: StreamInput) : this( + sin.readString(), // description + sin.readStringList(), // indices + sin.readList(::DocLevelQuery) // docLevelQueries + ) + + fun asTemplateArg(): Map { + return mapOf( + DESCRIPTION_FIELD to description, + INDICES_FIELD to indices, + QUERIES_FIELD to queries.map { it.asTemplateArg() } + ) + } + + override fun name(): String { + return DOC_LEVEL_INPUT_FIELD + } + + @Throws(IOException::class) + override fun writeTo(out: StreamOutput) { + out.writeString(description) + out.writeStringCollection(indices) + out.writeCollection(queries) + } + + override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { + builder.startObject() + .startObject(DOC_LEVEL_INPUT_FIELD) + .field(DESCRIPTION_FIELD, description) + .field(INDICES_FIELD, indices.toTypedArray()) + .field(QUERIES_FIELD, queries.toTypedArray()) + .endObject() + .endObject() + return builder + } + + companion object { + const val DESCRIPTION_FIELD = "description" + const val INDICES_FIELD = "indices" + const val DOC_LEVEL_INPUT_FIELD = "doc_level_input" + const val QUERIES_FIELD = "queries" + + const val NO_DESCRIPTION = "" + + val XCONTENT_REGISTRY = NamedXContentRegistry.Entry( + Input::class.java, + ParseField(DOC_LEVEL_INPUT_FIELD), CheckedFunction { parse(it) } + ) + + @JvmStatic @Throws(IOException::class) + fun parse(xcp: XContentParser): DocLevelMonitorInput { + var description: String = NO_DESCRIPTION + val indices: MutableList = mutableListOf() + val docLevelQueries: MutableList = mutableListOf() + + ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.currentToken(), xcp) + while (xcp.nextToken() != XContentParser.Token.END_OBJECT) { + val fieldName = xcp.currentName() + xcp.nextToken() + + when (fieldName) { + DESCRIPTION_FIELD -> description = xcp.text() + INDICES_FIELD -> { + ensureExpectedToken(XContentParser.Token.START_ARRAY, xcp.currentToken(), xcp) + while (xcp.nextToken() != XContentParser.Token.END_ARRAY) { + indices.add(xcp.text()) + } + } + QUERIES_FIELD -> { + ensureExpectedToken(XContentParser.Token.START_ARRAY, xcp.currentToken(), xcp) + while (xcp.nextToken() != XContentParser.Token.END_ARRAY) { + docLevelQueries.add(DocLevelQuery.parse(xcp)) + } + } + } + } + + return DocLevelMonitorInput(description = description, indices = indices, queries = docLevelQueries) + } + + @JvmStatic @Throws(IOException::class) + fun readFrom(sin: StreamInput): DocLevelMonitorInput { + return DocLevelMonitorInput(sin) + } + } +} diff --git a/core/src/main/kotlin/org/opensearch/alerting/core/model/DocLevelQuery.kt b/core/src/main/kotlin/org/opensearch/alerting/core/model/DocLevelQuery.kt new file mode 100644 index 000000000..2a4d32bca --- /dev/null +++ b/core/src/main/kotlin/org/opensearch/alerting/core/model/DocLevelQuery.kt @@ -0,0 +1,106 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.core.model + +import org.opensearch.common.io.stream.StreamInput +import org.opensearch.common.io.stream.StreamOutput +import org.opensearch.common.io.stream.Writeable +import org.opensearch.common.xcontent.ToXContent +import org.opensearch.common.xcontent.ToXContentObject +import org.opensearch.common.xcontent.XContentBuilder +import org.opensearch.common.xcontent.XContentParser +import org.opensearch.common.xcontent.XContentParserUtils.ensureExpectedToken +import java.io.IOException + +data class DocLevelQuery( + val id: String = NO_ID, + val name: String, + val query: String, + val tags: List = mutableListOf() +) : Writeable, ToXContentObject { + + @Throws(IOException::class) + constructor(sin: StreamInput) : this( + sin.readString(), // id + sin.readString(), // name + sin.readString(), // query + sin.readStringList() // tags + ) + + fun asTemplateArg(): Map { + return mapOf( + QUERY_ID_FIELD to id, + NAME_FIELD to name, + QUERY_FIELD to query, + TAGS_FIELD to tags + ) + } + + @Throws(IOException::class) + override fun writeTo(out: StreamOutput) { + out.writeString(id) + out.writeString(name) + out.writeString(query) + out.writeStringCollection(tags) + } + + override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { + builder.startObject() + .field(QUERY_ID_FIELD, id) + .field(NAME_FIELD, name) + .field(QUERY_FIELD, query) + .field(TAGS_FIELD, tags.toTypedArray()) + .endObject() + return builder + } + + companion object { + const val QUERY_ID_FIELD = "id" + const val NAME_FIELD = "name" + const val QUERY_FIELD = "query" + const val TAGS_FIELD = "tags" + + const val NO_ID = "" + + @JvmStatic @Throws(IOException::class) + fun parse(xcp: XContentParser): DocLevelQuery { + var id: String = NO_ID + lateinit var query: String + lateinit var name: String + val tags: MutableList = mutableListOf() + + ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.currentToken(), xcp) + while (xcp.nextToken() != XContentParser.Token.END_OBJECT) { + val fieldName = xcp.currentName() + xcp.nextToken() + + when (fieldName) { + QUERY_ID_FIELD -> id = xcp.text() + NAME_FIELD -> name = xcp.text() + QUERY_FIELD -> query = xcp.text() + TAGS_FIELD -> { + ensureExpectedToken(XContentParser.Token.START_ARRAY, xcp.currentToken(), xcp) + while (xcp.nextToken() != XContentParser.Token.END_ARRAY) { + tags.add(xcp.text()) + } + } + } + } + + return DocLevelQuery( + id = id, + name = name, + query = query, + tags = tags + ) + } + + @JvmStatic @Throws(IOException::class) + fun readFrom(sin: StreamInput): DocLevelQuery { + return DocLevelQuery(sin) + } + } +} diff --git a/core/src/main/kotlin/org/opensearch/alerting/core/model/Input.kt b/core/src/main/kotlin/org/opensearch/alerting/core/model/Input.kt index f7700e05f..06d351fb8 100644 --- a/core/src/main/kotlin/org/opensearch/alerting/core/model/Input.kt +++ b/core/src/main/kotlin/org/opensearch/alerting/core/model/Input.kt @@ -5,6 +5,10 @@ package org.opensearch.alerting.core.model +import org.opensearch.alerting.core.model.ClusterMetricsInput.Companion.URI_FIELD +import org.opensearch.alerting.core.model.DocLevelMonitorInput.Companion.DOC_LEVEL_INPUT_FIELD +import org.opensearch.alerting.core.model.SearchInput.Companion.SEARCH_FIELD +import org.opensearch.common.io.stream.StreamInput import org.opensearch.common.io.stream.Writeable import org.opensearch.common.xcontent.ToXContentObject import org.opensearch.common.xcontent.XContentParser @@ -13,6 +17,17 @@ import org.opensearch.common.xcontent.XContentParserUtils.ensureExpectedToken import java.io.IOException interface Input : Writeable, ToXContentObject { + + enum class Type(val value: String) { + DOCUMENT_LEVEL_INPUT(DOC_LEVEL_INPUT_FIELD), + CLUSTER_METRICS_INPUT(URI_FIELD), + SEARCH_INPUT(SEARCH_FIELD); + + override fun toString(): String { + return value + } + } + companion object { @Throws(IOException::class) @@ -20,10 +35,29 @@ interface Input : Writeable, ToXContentObject { ensureExpectedToken(Token.START_OBJECT, xcp.currentToken(), xcp) ensureExpectedToken(Token.FIELD_NAME, xcp.nextToken(), xcp) ensureExpectedToken(Token.START_OBJECT, xcp.nextToken(), xcp) - val input = xcp.namedObject(Input::class.java, xcp.currentName(), null) + val input = if (xcp.currentName() == Type.SEARCH_INPUT.value) { + SearchInput.parseInner(xcp) + } else if (xcp.currentName() == Type.CLUSTER_METRICS_INPUT.value) { + ClusterMetricsInput.parseInner(xcp) + } else { + DocLevelMonitorInput.parse(xcp) + } ensureExpectedToken(Token.END_OBJECT, xcp.nextToken(), xcp) return input } + + @JvmStatic + @Throws(IOException::class) + fun readFrom(sin: StreamInput): Input { + return when (val type = sin.readEnum(Input.Type::class.java)) { + Type.DOCUMENT_LEVEL_INPUT -> DocLevelMonitorInput(sin) + Type.CLUSTER_METRICS_INPUT -> ClusterMetricsInput(sin) + Type.SEARCH_INPUT -> SearchInput(sin) + // This shouldn't be reachable but ensuring exhaustiveness as Kotlin warns + // enum can be null in Java + else -> throw IllegalStateException("Unexpected input [$type] when reading Trigger") + } + } } fun name(): String diff --git a/core/src/main/kotlin/org/opensearch/alerting/core/model/ScheduledJob.kt b/core/src/main/kotlin/org/opensearch/alerting/core/model/ScheduledJob.kt index 6b132ced6..95e48d7e5 100644 --- a/core/src/main/kotlin/org/opensearch/alerting/core/model/ScheduledJob.kt +++ b/core/src/main/kotlin/org/opensearch/alerting/core/model/ScheduledJob.kt @@ -36,6 +36,7 @@ interface ScheduledJob : Writeable, ToXContentObject { companion object { /** The name of the ElasticSearch index in which we store jobs */ const val SCHEDULED_JOBS_INDEX = ".opendistro-alerting-config" + const val DOC_LEVEL_QUERIES_INDEX = ".opendistro-alerting-queries" const val NO_ID = "" diff --git a/core/src/main/kotlin/org/opensearch/alerting/core/model/SearchInput.kt b/core/src/main/kotlin/org/opensearch/alerting/core/model/SearchInput.kt index 1688cc540..6e2d075eb 100644 --- a/core/src/main/kotlin/org/opensearch/alerting/core/model/SearchInput.kt +++ b/core/src/main/kotlin/org/opensearch/alerting/core/model/SearchInput.kt @@ -53,7 +53,7 @@ data class SearchInput(val indices: List, val query: SearchSourceBuilder val XCONTENT_REGISTRY = NamedXContentRegistry.Entry(Input::class.java, ParseField("search"), CheckedFunction { parseInner(it) }) @JvmStatic @Throws(IOException::class) - private fun parseInner(xcp: XContentParser): SearchInput { + fun parseInner(xcp: XContentParser): SearchInput { val indices = mutableListOf() lateinit var searchSourceBuilder: SearchSourceBuilder diff --git a/core/src/main/resources/mappings/doc-level-queries.json b/core/src/main/resources/mappings/doc-level-queries.json new file mode 100644 index 000000000..6f70349fa --- /dev/null +++ b/core/src/main/resources/mappings/doc-level-queries.json @@ -0,0 +1,13 @@ +{ + "_meta": { + "schema_version": 1 + }, + "properties": { + "query": { + "type": "percolator_ext" + }, + "monitor_id": { + "type": "text" + } + } +} \ No newline at end of file diff --git a/core/src/main/resources/mappings/scheduled-jobs.json b/core/src/main/resources/mappings/scheduled-jobs.json index af3d10086..90b502cfc 100644 --- a/core/src/main/resources/mappings/scheduled-jobs.json +++ b/core/src/main/resources/mappings/scheduled-jobs.json @@ -1,6 +1,6 @@ { "_meta" : { - "schema_version": 4 + "schema_version": 5 }, "properties": { "monitor": { @@ -244,6 +244,10 @@ } } }, + "last_run_context": { + "type": "object", + "enabled": false + }, "ui_metadata": { "type": "object", "enabled": false