From 5327cfce1f87046aa16dea45ea9a07b8f39ddc55 Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Wed, 2 Nov 2022 00:13:37 -0700 Subject: [PATCH 01/18] Support fetching alerts by list of alert ids in Get Alerts Action (#608) (#609) (#632) Signed-off-by: Surya Sashank Nistala --- .../TransportAcknowledgeAlertAction.kt | 64 +++++++++++++++---- .../transport/TransportGetAlertsAction.kt | 4 ++ .../alerting/MonitorDataSourcesIT.kt | 30 ++++++++- 3 files changed, 84 insertions(+), 14 deletions(-) diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportAcknowledgeAlertAction.kt b/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportAcknowledgeAlertAction.kt index eaa48f21e..152f114c2 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportAcknowledgeAlertAction.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportAcknowledgeAlertAction.kt @@ -9,6 +9,7 @@ import kotlinx.coroutines.CoroutineScope import kotlinx.coroutines.Dispatchers import kotlinx.coroutines.launch import org.apache.logging.log4j.LogManager +import org.opensearch.ResourceNotFoundException import org.opensearch.action.ActionListener import org.opensearch.action.bulk.BulkRequest import org.opensearch.action.bulk.BulkResponse @@ -19,6 +20,9 @@ import org.opensearch.action.search.SearchResponse import org.opensearch.action.support.ActionFilters import org.opensearch.action.support.HandledTransportAction import org.opensearch.action.update.UpdateRequest +import org.opensearch.alerting.action.GetMonitorAction +import org.opensearch.alerting.action.GetMonitorRequest +import org.opensearch.alerting.action.GetMonitorResponse import org.opensearch.alerting.alerts.AlertIndices import org.opensearch.alerting.opensearchapi.suspendUntil import org.opensearch.alerting.settings.AlertingSettings @@ -38,13 +42,17 @@ import org.opensearch.commons.alerting.action.AcknowledgeAlertRequest import org.opensearch.commons.alerting.action.AcknowledgeAlertResponse import org.opensearch.commons.alerting.action.AlertingActions import org.opensearch.commons.alerting.model.Alert +import org.opensearch.commons.alerting.model.Monitor import org.opensearch.commons.alerting.util.optionalTimeField import org.opensearch.commons.utils.recreateObject import org.opensearch.index.query.QueryBuilders +import org.opensearch.rest.RestRequest import org.opensearch.search.builder.SearchSourceBuilder +import org.opensearch.search.fetch.subphase.FetchSourceContext import org.opensearch.tasks.Task import org.opensearch.transport.TransportService import java.time.Instant +import java.util.* private val log = LogManager.getLogger(TransportAcknowledgeAlertAction::class.java) private val scope: CoroutineScope = CoroutineScope(Dispatchers.IO) @@ -55,12 +63,14 @@ class TransportAcknowledgeAlertAction @Inject constructor( clusterService: ClusterService, actionFilters: ActionFilters, val settings: Settings, - val xContentRegistry: NamedXContentRegistry + val xContentRegistry: NamedXContentRegistry, + val transportGetMonitorAction: TransportGetMonitorAction ) : HandledTransportAction( AlertingActions.ACKNOWLEDGE_ALERTS_ACTION_NAME, transportService, actionFilters, ::AcknowledgeAlertRequest ) { - @Volatile private var isAlertHistoryEnabled = AlertingSettings.ALERT_HISTORY_ENABLED.get(settings) + @Volatile + private var isAlertHistoryEnabled = AlertingSettings.ALERT_HISTORY_ENABLED.get(settings) init { clusterService.clusterSettings.addSettingsUpdateConsumer(AlertingSettings.ALERT_HISTORY_ENABLED) { isAlertHistoryEnabled = it } @@ -75,7 +85,31 @@ class TransportAcknowledgeAlertAction @Inject constructor( ?: recreateObject(acknowledgeAlertRequest) { AcknowledgeAlertRequest(it) } client.threadPool().threadContext.stashContext().use { scope.launch { - AcknowledgeHandler(client, actionListener, request).start() + val getMonitorResponse: GetMonitorResponse = + transportGetMonitorAction.client.suspendUntil { + val getMonitorRequest = GetMonitorRequest( + monitorId = request.monitorId, + -3L, + RestRequest.Method.GET, + FetchSourceContext.FETCH_SOURCE + ) + execute(GetMonitorAction.INSTANCE, getMonitorRequest, it) + } + if (getMonitorResponse.monitor == null) { + actionListener.onFailure( + AlertingException.wrap( + ResourceNotFoundException( + String.format( + Locale.ROOT, + "No monitor found with id [%s]", + request.monitorId + ) + ) + ) + ) + } else { + AcknowledgeHandler(client, actionListener, request).start(getMonitorResponse.monitor!!) + } } } } @@ -87,14 +121,14 @@ class TransportAcknowledgeAlertAction @Inject constructor( ) { val alerts = mutableMapOf() - suspend fun start() = findActiveAlerts() + suspend fun start(monitor: Monitor) = findActiveAlerts(monitor) - private suspend fun findActiveAlerts() { + private suspend fun findActiveAlerts(monitor: Monitor) { val queryBuilder = QueryBuilders.boolQuery() .filter(QueryBuilders.termQuery(Alert.MONITOR_ID_FIELD, request.monitorId)) .filter(QueryBuilders.termsQuery("_id", request.alertIds)) val searchRequest = SearchRequest() - .indices(AlertIndices.ALERT_INDEX) + .indices(monitor.dataSources.alertsIndex) .routing(request.monitorId) .source( SearchSourceBuilder() @@ -105,13 +139,13 @@ class TransportAcknowledgeAlertAction @Inject constructor( ) try { val searchResponse: SearchResponse = client.suspendUntil { client.search(searchRequest, it) } - onSearchResponse(searchResponse) + onSearchResponse(searchResponse, monitor) } catch (t: Exception) { actionListener.onFailure(AlertingException.wrap(t)) } } - private suspend fun onSearchResponse(response: SearchResponse) { + private suspend fun onSearchResponse(response: SearchResponse, monitor: Monitor) { val updateRequests = mutableListOf() val copyRequests = mutableListOf() response.hits.forEach { hit -> @@ -124,8 +158,12 @@ class TransportAcknowledgeAlertAction @Inject constructor( alerts[alert.id] = alert if (alert.state == Alert.State.ACTIVE) { - if (alert.findingIds.isEmpty() || !isAlertHistoryEnabled) { - val updateRequest = UpdateRequest(AlertIndices.ALERT_INDEX, alert.id) + if ( + monitor.dataSources.alertsIndex != AlertIndices.ALERT_INDEX || + alert.findingIds.isEmpty() || + !isAlertHistoryEnabled + ) { + val updateRequest = UpdateRequest(monitor.dataSources.alertsIndex, alert.id) .routing(request.monitorId) .setIfSeqNo(hit.seqNo) .setIfPrimaryTerm(hit.primaryTerm) @@ -160,14 +198,14 @@ class TransportAcknowledgeAlertAction @Inject constructor( client.bulk(BulkRequest().add(copyRequests).setRefreshPolicy(request.refreshPolicy), it) } else null - onBulkResponse(updateResponse, copyResponse) + onBulkResponse(updateResponse, copyResponse, monitor) } catch (t: Exception) { log.error("ack error: ${t.message}") actionListener.onFailure(AlertingException.wrap(t)) } } - private suspend fun onBulkResponse(updateResponse: BulkResponse?, copyResponse: BulkResponse?) { + private suspend fun onBulkResponse(updateResponse: BulkResponse?, copyResponse: BulkResponse?, monitor: Monitor) { val deleteRequests = mutableListOf() val missing = request.alertIds.toMutableSet() val acknowledged = mutableListOf() @@ -196,7 +234,7 @@ class TransportAcknowledgeAlertAction @Inject constructor( log.info("got a failureResponse: ${item.failureMessage}") failed.add(alerts[item.id]!!) } else { - val deleteRequest = DeleteRequest(AlertIndices.ALERT_INDEX, item.id) + val deleteRequest = DeleteRequest(monitor.dataSources.alertsIndex, item.id) .routing(request.monitorId) deleteRequests.add(deleteRequest) } diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportGetAlertsAction.kt b/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportGetAlertsAction.kt index 756caeeab..e7810337a 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportGetAlertsAction.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportGetAlertsAction.kt @@ -99,6 +99,10 @@ class TransportGetAlertsAction @Inject constructor( if (getAlertsRequest.alertState != "ALL") queryBuilder.filter(QueryBuilders.termQuery("state", getAlertsRequest.alertState)) + if (getAlertsRequest.alertIds.isNullOrEmpty() == false) { + queryBuilder.filter(QueryBuilders.termsQuery("id", getAlertsRequest.alertIds)) + } + if (getAlertsRequest.monitorId != null) { queryBuilder.filter(QueryBuilders.termQuery("monitor_id", getAlertsRequest.monitorId)) } else if (getAlertsRequest.monitorIds.isNullOrEmpty() == false) { diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/MonitorDataSourcesIT.kt b/alerting/src/test/kotlin/org/opensearch/alerting/MonitorDataSourcesIT.kt index 839c07f29..d602a449e 100644 --- a/alerting/src/test/kotlin/org/opensearch/alerting/MonitorDataSourcesIT.kt +++ b/alerting/src/test/kotlin/org/opensearch/alerting/MonitorDataSourcesIT.kt @@ -8,9 +8,11 @@ package org.opensearch.alerting import org.junit.Assert import org.opensearch.action.admin.cluster.state.ClusterStateRequest import org.opensearch.action.admin.indices.create.CreateIndexRequest +import org.opensearch.action.support.WriteRequest import org.opensearch.alerting.core.ScheduledJobIndices import org.opensearch.alerting.transport.AlertingSingleNodeTestCase import org.opensearch.common.settings.Settings +import org.opensearch.commons.alerting.action.AcknowledgeAlertRequest import org.opensearch.commons.alerting.action.AlertingActions import org.opensearch.commons.alerting.action.GetAlertsRequest import org.opensearch.commons.alerting.model.DataSources @@ -21,6 +23,7 @@ import org.opensearch.commons.alerting.model.Table import java.time.ZonedDateTime import java.time.format.DateTimeFormatter import java.time.temporal.ChronoUnit.MILLIS +import java.util.stream.Collectors class MonitorDataSourcesIT : AlertingSingleNodeTestCase() { @@ -105,6 +108,12 @@ class MonitorDataSourcesIT : AlertingSingleNodeTestCase() { .get() Assert.assertTrue(getAlertsResponse != null) Assert.assertTrue(getAlertsResponse.alerts.size == 1) + val alertId = getAlertsResponse.alerts.get(0).id + val acknowledgeAlertResponse = client().execute( + AlertingActions.ACKNOWLEDGE_ALERTS_ACTION_TYPE, + AcknowledgeAlertRequest(id, listOf(alertId), WriteRequest.RefreshPolicy.IMMEDIATE) + ).get() + Assert.assertEquals(acknowledgeAlertResponse.acknowledged.size, 1) } fun `test execute monitor with custom query index`() { @@ -504,10 +513,29 @@ class MonitorDataSourcesIT : AlertingSingleNodeTestCase() { var alertsResponseForRequestWithoutCustomIndex = client() .execute( AlertingActions.GET_ALERTS_ACTION_TYPE, - GetAlertsRequest(table, "ALL", "ALL", null, null, listOf(id, id1, "1", "2")) + GetAlertsRequest(table, "ALL", "ALL", null, null, monitorIds = listOf(id, id1, "1", "2")) ) .get() Assert.assertTrue(alertsResponseForRequestWithoutCustomIndex != null) Assert.assertTrue(alertsResponseForRequestWithoutCustomIndex.alerts.size == 2) + val alertIds = getAlertsResponse.alerts.stream().map { alert -> alert.id }.collect(Collectors.toList()) + var getAlertsByAlertIds = client() + .execute( + AlertingActions.GET_ALERTS_ACTION_TYPE, + GetAlertsRequest(table, "ALL", "ALL", null, null, alertIds = alertIds) + ) + .get() + Assert.assertTrue(getAlertsByAlertIds != null) + Assert.assertTrue(getAlertsByAlertIds.alerts.size == 2) + + var getAlertsByWrongAlertIds = client() + .execute( + AlertingActions.GET_ALERTS_ACTION_TYPE, + GetAlertsRequest(table, "ALL", "ALL", null, null, alertIds = listOf("1", "2")) + ) + .get() + + Assert.assertTrue(getAlertsByWrongAlertIds != null) + Assert.assertEquals(getAlertsByWrongAlertIds.alerts.size, 0) } } From 567304bb86300c0cf9d40e785cb6edf6e0b23b7d Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Wed, 19 Oct 2022 12:03:56 -0700 Subject: [PATCH 02/18] searchAlert fix (#613) (#614) Signed-off-by: Petar Dzepina (cherry picked from commit 996190ea19baeff075e10ca7ea91c0cf01993d43) Co-authored-by: Petar Dzepina --- .../org/opensearch/alerting/AlertService.kt | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/AlertService.kt b/alerting/src/main/kotlin/org/opensearch/alerting/AlertService.kt index 2bc433dd4..cc0bfbd97 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/AlertService.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/AlertService.kt @@ -64,7 +64,7 @@ class AlertService( suspend fun loadCurrentAlertsForQueryLevelMonitor(monitor: Monitor): Map { val searchAlertsResponse: SearchResponse = searchAlerts( - monitorId = monitor.id, + monitor = monitor, size = monitor.triggers.size * 2 // We expect there to be only a single in-progress alert so fetch 2 to check ) @@ -83,7 +83,7 @@ class AlertService( suspend fun loadCurrentAlertsForBucketLevelMonitor(monitor: Monitor): Map> { val searchAlertsResponse: SearchResponse = searchAlerts( - monitorId = monitor.id, + monitor = monitor, // TODO: This should be limited based on a circuit breaker that limits Alerts size = MAX_BUCKET_LEVEL_MONITOR_ALERT_SEARCH_COUNT ) @@ -351,7 +351,7 @@ class AlertService( /** * This is a separate method created specifically for saving new Alerts during the Bucket-Level Monitor run. * Alerts are saved in two batches during the execution of an Bucket-Level Monitor, once before the Actions are executed - * and once afterwards. This method saves Alerts to the [AlertIndices.ALERT_INDEX] but returns the same Alerts with their document IDs. + * and once afterwards. This method saves Alerts to the monitor's alertIndex but returns the same Alerts with their document IDs. * * The Alerts are required with their indexed ID so that when the new Alerts are updated after the Action execution, * the ID is available for the index request so that the existing Alert can be updated, instead of creating a duplicate Alert document. @@ -420,12 +420,15 @@ class AlertService( } /** - * Searches for Alerts in the [AlertIndices.ALERT_INDEX]. + * Searches for Alerts in the monitor's alertIndex. * * @param monitorId The Monitor to get Alerts for * @param size The number of search hits (Alerts) to return */ - private suspend fun searchAlerts(monitorId: String, size: Int): SearchResponse { + private suspend fun searchAlerts(monitor: Monitor, size: Int): SearchResponse { + val monitorId = monitor.id + val alertIndex = monitor.dataSources.alertsIndex + val queryBuilder = QueryBuilders.boolQuery() .filter(QueryBuilders.termQuery(Alert.MONITOR_ID_FIELD, monitorId)) @@ -433,7 +436,7 @@ class AlertService( .size(size) .query(queryBuilder) - val searchRequest = SearchRequest(AlertIndices.ALERT_INDEX) + val searchRequest = SearchRequest(alertIndex) .routing(monitorId) .source(searchSourceBuilder) val searchResponse: SearchResponse = client.suspendUntil { client.search(searchRequest, it) } From f66a2b850c901e45be5ebcd0c5f8a1475f37b884 Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Thu, 20 Oct 2022 13:23:09 -0700 Subject: [PATCH 03/18] change transport ack alerts action to accept Action Request and recreate as AckAlertRequest (#618) (#619) Signed-off-by: Surya Sashank Nistala Signed-off-by: Surya Sashank Nistala (cherry picked from commit 2005185fcbbd36e41dc8344ab5a8c717c3043a1a) Co-authored-by: Surya Sashank Nistala --- .../alerting/transport/TransportAcknowledgeAlertAction.kt | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportAcknowledgeAlertAction.kt b/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportAcknowledgeAlertAction.kt index 152f114c2..4e6ee8bbf 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportAcknowledgeAlertAction.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportAcknowledgeAlertAction.kt @@ -11,6 +11,7 @@ import kotlinx.coroutines.launch import org.apache.logging.log4j.LogManager import org.opensearch.ResourceNotFoundException import org.opensearch.action.ActionListener +import org.opensearch.action.ActionRequest import org.opensearch.action.bulk.BulkRequest import org.opensearch.action.bulk.BulkResponse import org.opensearch.action.delete.DeleteRequest @@ -65,7 +66,7 @@ class TransportAcknowledgeAlertAction @Inject constructor( val settings: Settings, val xContentRegistry: NamedXContentRegistry, val transportGetMonitorAction: TransportGetMonitorAction -) : HandledTransportAction( +) : HandledTransportAction( AlertingActions.ACKNOWLEDGE_ALERTS_ACTION_NAME, transportService, actionFilters, ::AcknowledgeAlertRequest ) { @@ -78,7 +79,7 @@ class TransportAcknowledgeAlertAction @Inject constructor( override fun doExecute( task: Task, - acknowledgeAlertRequest: AcknowledgeAlertRequest, + acknowledgeAlertRequest: ActionRequest, actionListener: ActionListener ) { val request = acknowledgeAlertRequest as? AcknowledgeAlertRequest From dd3c8018b0d9f93be829d24b62b8ee5a9d438626 Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Fri, 21 Oct 2022 10:19:11 -0700 Subject: [PATCH 04/18] Custom history indicies (#616) (#621) Signed-off-by: Petar Dzepina (cherry picked from commit 7546b007c70f5ec47f77f0832d84f53f86e060ab) Co-authored-by: Petar Dzepina --- .../org/opensearch/alerting/AlertService.kt | 14 ++--- .../alerting/MonitorRunnerService.kt | 3 +- .../alerting/alerts/AlertIndices.kt | 40 ++++++++++++--- .../opensearch/alerting/alerts/AlertMover.kt | 15 +++--- .../TransportAcknowledgeAlertAction.kt | 3 +- .../alerting/MonitorDataSourcesIT.kt | 51 +++++++++++++++++++ 6 files changed, 105 insertions(+), 21 deletions(-) diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/AlertService.kt b/alerting/src/main/kotlin/org/opensearch/alerting/AlertService.kt index cc0bfbd97..7326c7c01 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/AlertService.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/AlertService.kt @@ -284,7 +284,9 @@ class AlertService( retryPolicy: BackoffPolicy, allowUpdatingAcknowledgedAlert: Boolean = false ) { - val alertIndex = dataSources.alertsIndex + val alertsIndex = dataSources.alertsIndex + val alertsHistoryIndex = dataSources.alertsHistoryIndex + var requestsToRetry = alerts.flatMap { alert -> // We don't want to set the version when saving alerts because the MonitorRunner has first priority when writing alerts. // In the rare event that a user acknowledges an alert between when it's read and when it's written @@ -293,7 +295,7 @@ class AlertService( when (alert.state) { Alert.State.ACTIVE, Alert.State.ERROR -> { listOf>( - IndexRequest(alertIndex) + IndexRequest(alertsIndex) .routing(alert.monitorId) .source(alert.toXContentWithUser(XContentFactory.jsonBuilder())) .id(if (alert.id != Alert.NO_ID) alert.id else null) @@ -304,7 +306,7 @@ class AlertService( // and updated by the MonitorRunner if (allowUpdatingAcknowledgedAlert) { listOf>( - IndexRequest(alertIndex) + IndexRequest(alertsIndex) .routing(alert.monitorId) .source(alert.toXContentWithUser(XContentFactory.jsonBuilder())) .id(if (alert.id != Alert.NO_ID) alert.id else null) @@ -318,11 +320,11 @@ class AlertService( } Alert.State.COMPLETED -> { listOfNotNull>( - DeleteRequest(alertIndex, alert.id) + DeleteRequest(alertsIndex, alert.id) .routing(alert.monitorId), // Only add completed alert to history index if history is enabled - if (alertIndices.isAlertHistoryEnabled(dataSources)) { - IndexRequest(AlertIndices.ALERT_HISTORY_WRITE_INDEX) + if (alertIndices.isAlertHistoryEnabled()) { + IndexRequest(alertsHistoryIndex) .routing(alert.monitorId) .source(alert.toXContentWithUser(XContentFactory.jsonBuilder())) .id(alert.id) diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/MonitorRunnerService.kt b/alerting/src/main/kotlin/org/opensearch/alerting/MonitorRunnerService.kt index bbeb7e99f..471fc1c83 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/MonitorRunnerService.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/MonitorRunnerService.kt @@ -52,7 +52,6 @@ object MonitorRunnerService : JobRunner, CoroutineScope, AbstractLifecycleCompon private val logger = LogManager.getLogger(javaClass) var monitorCtx: MonitorRunnerExecutionContext = MonitorRunnerExecutionContext() - private lateinit var runnerSupervisor: Job override val coroutineContext: CoroutineContext get() = Dispatchers.Default + runnerSupervisor @@ -184,7 +183,7 @@ object MonitorRunnerService : JobRunner, CoroutineScope, AbstractLifecycleCompon launch { try { monitorCtx.moveAlertsRetryPolicy!!.retry(logger) { - if (monitorCtx.alertIndices!!.isAlertInitialized()) { + if (monitorCtx.alertIndices!!.isAlertInitialized(job.dataSources)) { moveAlerts(monitorCtx.client!!, job.id, job) } } diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/alerts/AlertIndices.kt b/alerting/src/main/kotlin/org/opensearch/alerting/alerts/AlertIndices.kt index e813e8d0d..c56621745 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/alerts/AlertIndices.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/alerts/AlertIndices.kt @@ -231,13 +231,25 @@ class AlertIndices( return alertIndexInitialized && alertHistoryIndexInitialized } - fun isAlertHistoryEnabled(dataSources: DataSources): Boolean { - if (dataSources.alertsIndex == ALERT_INDEX) { - return alertHistoryEnabled + fun isAlertInitialized(dataSources: DataSources): Boolean { + val alertsIndex = dataSources.alertsIndex + val alertsHistoryIndex = dataSources.alertsHistoryIndex + if (alertsIndex == ALERT_INDEX && alertsHistoryIndex == ALERT_HISTORY_WRITE_INDEX) { + return alertIndexInitialized && alertHistoryIndexInitialized + } + if ( + clusterService.state().metadata.indices.containsKey(alertsIndex) && + clusterService.state().metadata.hasAlias(alertsHistoryIndex) + ) { + return true } return false } + fun isAlertHistoryEnabled(): Boolean { + return alertHistoryEnabled + } + fun isFindingHistoryEnabled(): Boolean = findingHistoryEnabled suspend fun createOrUpdateAlertIndex() { @@ -265,6 +277,19 @@ class AlertIndices( if (dataSources.alertsIndex == ALERT_INDEX) { return createOrUpdateInitialAlertHistoryIndex() } + if (!clusterService.state().metadata.hasAlias(dataSources.alertsHistoryIndex)) { + createIndex( + dataSources.alertsHistoryIndexPattern ?: ALERT_HISTORY_INDEX_PATTERN, + alertMapping(), + dataSources.alertsHistoryIndex + ) + } else { + updateIndexMapping( + dataSources.alertsHistoryIndex ?: ALERT_HISTORY_WRITE_INDEX, + alertMapping(), + true + ) + } } suspend fun createOrUpdateInitialAlertHistoryIndex() { if (!alertHistoryIndexInitialized) { @@ -300,13 +325,15 @@ class AlertIndices( return createOrUpdateInitialFindingHistoryIndex() } val findingsIndex = dataSources.findingsIndex + val findingsIndexPattern = dataSources.findingsIndexPattern ?: FINDING_HISTORY_INDEX_PATTERN if (!clusterService.state().routingTable().hasIndex(findingsIndex)) { createIndex( - findingsIndex, - findingMapping() + findingsIndexPattern, + findingMapping(), + findingsIndex ) } else { - updateIndexMapping(findingsIndex, findingMapping(), false) + updateIndexMapping(findingsIndex, findingMapping(), true) } } @@ -339,6 +366,7 @@ class AlertIndices( targetIndex = IndexUtils.getIndexNameWithAlias(clusterState, index) } + // TODO call getMapping and compare actual mappings here instead of this if (targetIndex == IndexUtils.lastUpdatedAlertHistoryIndex || targetIndex == IndexUtils.lastUpdatedFindingHistoryIndex) { return } diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/alerts/AlertMover.kt b/alerting/src/main/kotlin/org/opensearch/alerting/alerts/AlertMover.kt index 100281a87..058e02c22 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/alerts/AlertMover.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/alerts/AlertMover.kt @@ -37,11 +37,14 @@ import org.opensearch.search.builder.SearchSourceBuilder * 1. Find active alerts: * a. matching monitorId if no monitor is provided (postDelete) * b. matching monitorId and no triggerIds if monitor is provided (postIndex) - * 2. Move alerts over to [ALERT_HISTORY_WRITE_INDEX] as DELETED - * 3. Delete alerts from [ALERT_INDEX] + * 2. Move alerts over to DataSources.alertsHistoryIndex as DELETED + * 3. Delete alerts from monitor's DataSources.alertsIndex * 4. Schedule a retry if there were any failures */ -suspend fun moveAlerts(client: Client, monitorId: String, monitor: Monitor? = null) { +suspend fun moveAlerts(client: Client, monitorId: String, monitor: Monitor?) { + var alertIndex = monitor?.dataSources?.alertsIndex ?: ALERT_INDEX + var alertHistoryIndex = monitor?.dataSources?.alertsHistoryIndex ?: ALERT_HISTORY_WRITE_INDEX + val boolQuery = QueryBuilders.boolQuery() .filter(QueryBuilders.termQuery(Alert.MONITOR_ID_FIELD, monitorId)) @@ -53,7 +56,7 @@ suspend fun moveAlerts(client: Client, monitorId: String, monitor: Monitor? = nu .query(boolQuery) .version(true) - val activeAlertsRequest = SearchRequest(AlertIndices.ALERT_INDEX) + val activeAlertsRequest = SearchRequest(alertIndex) .routing(monitorId) .source(activeAlertsQuery) val response: SearchResponse = client.suspendUntil { search(activeAlertsRequest, it) } @@ -61,7 +64,7 @@ suspend fun moveAlerts(client: Client, monitorId: String, monitor: Monitor? = nu // If no alerts are found, simply return if (response.hits.totalHits?.value == 0L) return val indexRequests = response.hits.map { hit -> - IndexRequest(AlertIndices.ALERT_HISTORY_WRITE_INDEX) + IndexRequest(alertHistoryIndex) .routing(monitorId) .source( Alert.parse(alertContentParser(hit.sourceRef), hit.id, hit.version) @@ -76,7 +79,7 @@ suspend fun moveAlerts(client: Client, monitorId: String, monitor: Monitor? = nu val copyResponse: BulkResponse = client.suspendUntil { bulk(copyRequest, it) } val deleteRequests = copyResponse.items.filterNot { it.isFailed }.map { - DeleteRequest(AlertIndices.ALERT_INDEX, it.id) + DeleteRequest(alertIndex, it.id) .routing(monitorId) .version(it.version) .versionType(VersionType.EXTERNAL_GTE) diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportAcknowledgeAlertAction.kt b/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportAcknowledgeAlertAction.kt index 4e6ee8bbf..f95520f55 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportAcknowledgeAlertAction.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportAcknowledgeAlertAction.kt @@ -147,6 +147,7 @@ class TransportAcknowledgeAlertAction @Inject constructor( } private suspend fun onSearchResponse(response: SearchResponse, monitor: Monitor) { + val alertsHistoryIndex = monitor.dataSources.alertsHistoryIndex val updateRequests = mutableListOf() val copyRequests = mutableListOf() response.hits.forEach { hit -> @@ -176,7 +177,7 @@ class TransportAcknowledgeAlertAction @Inject constructor( ) updateRequests.add(updateRequest) } else { - val copyRequest = IndexRequest(AlertIndices.ALERT_HISTORY_WRITE_INDEX) + val copyRequest = IndexRequest(alertsHistoryIndex) .routing(request.monitorId) .id(alert.id) .source( diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/MonitorDataSourcesIT.kt b/alerting/src/test/kotlin/org/opensearch/alerting/MonitorDataSourcesIT.kt index d602a449e..767da7f22 100644 --- a/alerting/src/test/kotlin/org/opensearch/alerting/MonitorDataSourcesIT.kt +++ b/alerting/src/test/kotlin/org/opensearch/alerting/MonitorDataSourcesIT.kt @@ -15,14 +15,17 @@ import org.opensearch.common.settings.Settings import org.opensearch.commons.alerting.action.AcknowledgeAlertRequest import org.opensearch.commons.alerting.action.AlertingActions import org.opensearch.commons.alerting.action.GetAlertsRequest +import org.opensearch.commons.alerting.model.Alert import org.opensearch.commons.alerting.model.DataSources import org.opensearch.commons.alerting.model.DocLevelMonitorInput import org.opensearch.commons.alerting.model.DocLevelQuery import org.opensearch.commons.alerting.model.ScheduledJob.Companion.SCHEDULED_JOBS_INDEX import org.opensearch.commons.alerting.model.Table +import org.opensearch.test.OpenSearchTestCase import java.time.ZonedDateTime import java.time.format.DateTimeFormatter import java.time.temporal.ChronoUnit.MILLIS +import java.util.concurrent.TimeUnit import java.util.stream.Collectors class MonitorDataSourcesIT : AlertingSingleNodeTestCase() { @@ -465,6 +468,54 @@ class MonitorDataSourcesIT : AlertingSingleNodeTestCase() { } } + fun `test search custom alerts history index`() { + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3") + val docLevelInput = DocLevelMonitorInput("description", listOf(index), listOf(docQuery)) + val trigger1 = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val trigger2 = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val customAlertsIndex = "custom_alerts_index" + val customAlertsHistoryIndex = "custom_alerts_history_index" + val customAlertsHistoryIndexPattern = "" + var monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger1, trigger2), + dataSources = DataSources( + alertsIndex = customAlertsIndex, + alertsHistoryIndex = customAlertsHistoryIndex, + alertsHistoryIndexPattern = customAlertsHistoryIndexPattern + ) + ) + val monitorResponse = createMonitor(monitor) + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc = """{ + "message" : "This is an error from IAD region", + "test_strict_date_time" : "$testTime", + "test_field" : "us-west-2" + }""" + assertFalse(monitorResponse?.id.isNullOrEmpty()) + monitor = monitorResponse!!.monitor + indexDoc(index, "1", testDoc) + val monitorId = monitorResponse.id + val executeMonitorResponse = executeMonitor(monitor, monitorId, false) + var alertsBefore = searchAlerts(monitorId, customAlertsIndex) + Assert.assertEquals(2, alertsBefore.size) + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 2) + // Remove 1 trigger from monitor to force moveAlerts call to move alerts to history index + monitor = monitor.copy(triggers = listOf(trigger1)) + updateMonitor(monitor, monitorId) + + var alerts = listOf() + OpenSearchTestCase.waitUntil({ + alerts = searchAlerts(monitorId, customAlertsHistoryIndex) + if (alerts.size == 1) { + return@waitUntil true + } + return@waitUntil false + }, 30, TimeUnit.SECONDS) + assertEquals("Alerts from custom history index", 1, alerts.size) + } + fun `test get alerts by list of monitors containing both existent and non-existent ids`() { val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3") val docLevelInput = DocLevelMonitorInput("description", listOf(index), listOf(docQuery)) From c8c83e820c8e6b4871ccff9f0b95c29d46abd816 Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Tue, 25 Oct 2022 00:32:08 -0700 Subject: [PATCH 05/18] fix alias exists check in findings index creation (#622) (#623) Signed-off-by: Surya Sashank Nistala --- .../main/kotlin/org/opensearch/alerting/alerts/AlertIndices.kt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/alerts/AlertIndices.kt b/alerting/src/main/kotlin/org/opensearch/alerting/alerts/AlertIndices.kt index c56621745..c1f14ea83 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/alerts/AlertIndices.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/alerts/AlertIndices.kt @@ -326,7 +326,7 @@ class AlertIndices( } val findingsIndex = dataSources.findingsIndex val findingsIndexPattern = dataSources.findingsIndexPattern ?: FINDING_HISTORY_INDEX_PATTERN - if (!clusterService.state().routingTable().hasIndex(findingsIndex)) { + if (!clusterService.state().metadata().hasAlias(findingsIndexPattern)) { createIndex( findingsIndexPattern, findingMapping(), From e15142dd147da9d679c3ec97469f375169223f61 Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Wed, 26 Oct 2022 15:14:37 -0700 Subject: [PATCH 06/18] fix for windows ktlint issue (#585) (#624) Signed-off-by: Subhobrata Dey --- .github/workflows/test-workflow.yml | 28 ++- .../alerting/TriggerServiceTests.kt | 220 +++++++++++++++--- scripts/build.sh | 4 +- 3 files changed, 216 insertions(+), 36 deletions(-) diff --git a/.github/workflows/test-workflow.yml b/.github/workflows/test-workflow.yml index c83a25fd5..22a79a69d 100644 --- a/.github/workflows/test-workflow.yml +++ b/.github/workflows/test-workflow.yml @@ -10,30 +10,50 @@ on: jobs: build: + env: + BUILD_ARGS: ${{ matrix.os_build_args }} + WORKING_DIR: ${{ matrix.working_directory }}. strategy: matrix: java: [11, 17] + os: [ ubuntu-latest, windows-latest, macos-latest ] + include: + - os: windows-latest + os_build_args: -x integTest -x jacocoTestReport + working_directory: X:\ + os_java_options: -Xmx4096M + - os: macos-latest + os_build_args: -x integTest -x jacocoTestReport # Job name - name: Build Alerting with JDK ${{ matrix.java }} + name: Build Alerting with JDK ${{ matrix.java }} on ${{ matrix.os }} # This job runs on Linux - runs-on: ubuntu-latest + runs-on: ${{ matrix.os }} steps: # This step uses the checkout Github action: https://github.com/actions/checkout - name: Checkout Branch uses: actions/checkout@v2 + # This is a hack, but this step creates a link to the X: mounted drive, which makes the path + # short enough to work on Windows + - name: Shorten Path + if: ${{ matrix.os == 'windows-latest' }} + run: subst 'X:' . # This step uses the setup-java Github action: https://github.com/actions/setup-java - name: Set Up JDK ${{ matrix.java }} uses: actions/setup-java@v1 with: java-version: ${{ matrix.java }} - name: Build and run with Gradle - run: ./gradlew build + working-directory: ${{ env.WORKING_DIR }} + run: ./gradlew build ${{ env.BUILD_ARGS }} + env: + _JAVA_OPTIONS: ${{ matrix.os_java_options }} - name: Create Artifact Path run: | mkdir -p alerting-artifacts cp ./alerting/build/distributions/*.zip alerting-artifacts # This step uses the codecov-action Github action: https://github.com/codecov/codecov-action - name: Upload Coverage Report + if: ${{ matrix.os == 'ubuntu-latest' }} uses: codecov/codecov-action@v1 with: token: ${{ secrets.CODECOV_TOKEN }} @@ -41,5 +61,5 @@ jobs: - name: Upload Artifacts uses: actions/upload-artifact@v1 with: - name: alerting-plugin + name: alerting-plugin-${{ matrix.os }} path: alerting-artifacts diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/TriggerServiceTests.kt b/alerting/src/test/kotlin/org/opensearch/alerting/TriggerServiceTests.kt index c99eb2b9b..f2f321112 100644 --- a/alerting/src/test/kotlin/org/opensearch/alerting/TriggerServiceTests.kt +++ b/alerting/src/test/kotlin/org/opensearch/alerting/TriggerServiceTests.kt @@ -37,19 +37,75 @@ class TriggerServiceTests : OpenSearchTestCase() { val trigger = randomBucketLevelTrigger(bucketSelector = bucketSelectorExtAggregationBuilder) val monitor = randomBucketLevelMonitor(triggers = listOf(trigger)) - val inputResultsStr = "{\"_shards\":" + - "{\"total\":1,\"failed\":0,\"successful\":1,\"skipped\":0},\"hits\":{\"hits\":" + - "[{\"_index\":\"sample-http-responses\",\"_type\":\"http\",\"_source\":" + - "{\"status_code\":100,\"http_4xx\":0,\"http_3xx\":0,\"http_5xx\":0,\"http_2xx\":0," + - "\"timestamp\":100000,\"http_1xx\":1},\"_id\":1,\"_score\":1}],\"total\":{\"value\":4,\"relation\":\"eq\"}," + - "\"max_score\":1},\"took\":37,\"timed_out\":false,\"aggregations\":{\"status_code\":" + - "{\"doc_count_error_upper_bound\":0,\"sum_other_doc_count\":0,\"buckets\":[{\"doc_count\":2,\"key\":100}," + - "{\"doc_count\":1,\"key\":102},{\"doc_count\":1,\"key\":201}]},\"${trigger.id}\":{\"parent_bucket_path\":" + - "\"status_code\",\"bucket_indices\":[0,1,2]}}}" - - val parser = XContentType.JSON.xContent().createParser( - NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, inputResultsStr - ) + val inputResultsStr = "{\n" + + " \"_shards\": {\n" + + " \"total\": 1,\n" + + " \"failed\": 0,\n" + + " \"successful\": 1,\n" + + " \"skipped\": 0\n" + + " },\n" + + " \"hits\": {\n" + + " \"hits\": [\n" + + " {\n" + + " \"_index\": \"sample-http-responses\",\n" + + " \"_type\": \"http\",\n" + + " \"_source\": {\n" + + " \"status_code\": 100,\n" + + " \"http_4xx\": 0,\n" + + " \"http_3xx\": 0,\n" + + " \"http_5xx\": 0,\n" + + " \"http_2xx\": 0,\n" + + " \"timestamp\": 100000,\n" + + " \"http_1xx\": 1\n" + + " },\n" + + " \"_id\": 1,\n" + + " \"_score\": 1\n" + + " }\n" + + " ],\n" + + " \"total\": {\n" + + " \"value\": 4,\n" + + " \"relation\": \"eq\"\n" + + " },\n" + + " \"max_score\": 1\n" + + " },\n" + + " \"took\": 37,\n" + + " \"timed_out\": false,\n" + + " \"aggregations\": {\n" + + " \"status_code\": {\n" + + " \"doc_count_error_upper_bound\": 0,\n" + + " \"sum_other_doc_count\": 0,\n" + + " \"buckets\": [\n" + + " {\n" + + " \"doc_count\": 2,\n" + + " \"key\": 100\n" + + " },\n" + + " {\n" + + " \"doc_count\": 1,\n" + + " \"key\": 102\n" + + " },\n" + + " {\n" + + " \"doc_count\": 1,\n" + + " \"key\": 201\n" + + " }\n" + + " ]\n" + + " },\n" + + " \"${trigger.id}\": {\n" + + " \"parent_bucket_path\": \"status_code\",\n" + + " \"bucket_indices\": [\n" + + " 0,\n" + + " 1,\n" + + " 2\n" + + " ]\n" + + " }\n" + + " }\n" + + "}" + + val parser = XContentType.JSON.xContent() + .createParser( + NamedXContentRegistry.EMPTY, + DeprecationHandler.THROW_UNSUPPORTED_OPERATION, + inputResultsStr + ) val inputResults = parser.map() @@ -70,23 +126,127 @@ class TriggerServiceTests : OpenSearchTestCase() { val trigger = randomBucketLevelTrigger(bucketSelector = bucketSelectorExtAggregationBuilder) val monitor = randomBucketLevelMonitor(triggers = listOf(trigger)) - val inputResultsStr = "{\"_shards\":{\"total\":1, \"failed\":0, \"successful\":1, \"skipped\":0}, \"hits\":{\"hits\":" + - "[{\"_index\":\"sample-http-responses\", \"_type\":\"http\", \"_source\":{\"status_code\":100, \"http_4xx\":0," + - " \"http_3xx\":0, \"http_5xx\":0, \"http_2xx\":0, \"timestamp\":100000, \"http_1xx\":1}, \"_id\":1, \"_score\":1.0}, " + - "{\"_index\":\"sample-http-responses\", \"_type\":\"http\", \"_source\":{\"status_code\":102, \"http_4xx\":0, " + - "\"http_3xx\":0, \"http_5xx\":0, \"http_2xx\":0, \"timestamp\":160000, \"http_1xx\":1}, \"_id\":2, \"_score\":1.0}, " + - "{\"_index\":\"sample-http-responses\", \"_type\":\"http\", \"_source\":{\"status_code\":100, \"http_4xx\":0, " + - "\"http_3xx\":0, \"http_5xx\":0, \"http_2xx\":0, \"timestamp\":220000, \"http_1xx\":1}, \"_id\":4, \"_score\":1.0}, " + - "{\"_index\":\"sample-http-responses\", \"_type\":\"http\", \"_source\":{\"status_code\":201, \"http_4xx\":0, " + - "\"http_3xx\":0, \"http_5xx\":0, \"http_2xx\":1, \"timestamp\":280000, \"http_1xx\":0}, \"_id\":5, \"_score\":1.0}]," + - " \"total\":{\"value\":4, \"relation\":\"eq\"}, \"max_score\":1.0}, \"took\":15, \"timed_out\":false, \"aggregations\":" + - "{\"${trigger.id}\":{\"parent_bucket_path\":\"status_code\", \"bucket_indices\":[0, 1, 2]}, \"status_code\":{\"buckets\":" + - "[{\"doc_count\":2, \"key\":{\"status_code\":100}}, {\"doc_count\":1, \"key\":{\"status_code\":102}}, {\"doc_count\":1," + - " \"key\":{\"status_code\":201}}], \"after_key\":{\"status_code\":201}}}}" - - val parser = XContentType.JSON.xContent().createParser( - NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, inputResultsStr - ) + val inputResultsStr = "{\n" + + " \"_shards\": {\n" + + " \"total\": 1,\n" + + " \"failed\": 0,\n" + + " \"successful\": 1,\n" + + " \"skipped\": 0\n" + + " },\n" + + " \"hits\": {\n" + + " \"hits\": [\n" + + " {\n" + + " \"_index\": \"sample-http-responses\",\n" + + " \"_type\": \"http\",\n" + + " \"_source\": {\n" + + " \"status_code\": 100,\n" + + " \"http_4xx\": 0,\n" + + " \"http_3xx\": 0,\n" + + " \"http_5xx\": 0,\n" + + " \"http_2xx\": 0,\n" + + " \"timestamp\": 100000,\n" + + " \"http_1xx\": 1\n" + + " },\n" + + " \"_id\": 1,\n" + + " \"_score\": 1\n" + + " },\n" + + " {\n" + + " \"_index\": \"sample-http-responses\",\n" + + " \"_type\": \"http\",\n" + + " \"_source\": {\n" + + " \"status_code\": 102,\n" + + " \"http_4xx\": 0,\n" + + " \"http_3xx\": 0,\n" + + " \"http_5xx\": 0,\n" + + " \"http_2xx\": 0,\n" + + " \"timestamp\": 160000,\n" + + " \"http_1xx\": 1\n" + + " },\n" + + " \"_id\": 2,\n" + + " \"_score\": 1\n" + + " },\n" + + " {\n" + + " \"_index\": \"sample-http-responses\",\n" + + " \"_type\": \"http\",\n" + + " \"_source\": {\n" + + " \"status_code\": 100,\n" + + " \"http_4xx\": 0,\n" + + " \"http_3xx\": 0,\n" + + " \"http_5xx\": 0,\n" + + " \"http_2xx\": 0,\n" + + " \"timestamp\": 220000,\n" + + " \"http_1xx\": 1\n" + + " },\n" + + " \"_id\": 4,\n" + + " \"_score\": 1\n" + + " },\n" + + " {\n" + + " \"_index\": \"sample-http-responses\",\n" + + " \"_type\": \"http\",\n" + + " \"_source\": {\n" + + " \"status_code\": 201,\n" + + " \"http_4xx\": 0,\n" + + " \"http_3xx\": 0,\n" + + " \"http_5xx\": 0,\n" + + " \"http_2xx\": 1,\n" + + " \"timestamp\": 280000,\n" + + " \"http_1xx\": 0\n" + + " },\n" + + " \"_id\": 5,\n" + + " \"_score\": 1\n" + + " }\n" + + " ],\n" + + " \"total\": {\n" + + " \"value\": 4,\n" + + " \"relation\": \"eq\"\n" + + " },\n" + + " \"max_score\": 1\n" + + " },\n" + + " \"took\": 15,\n" + + " \"timed_out\": false,\n" + + " \"aggregations\": {\n" + + " \"${trigger.id}\": {\n" + + " \"parent_bucket_path\": \"status_code\",\n" + + " \"bucket_indices\": [\n" + + " 0,\n" + + " 1,\n" + + " 2\n" + + " ]\n" + + " },\n" + + " \"status_code\": {\n" + + " \"buckets\": [\n" + + " {\n" + + " \"doc_count\": 2,\n" + + " \"key\": {\n" + + " \"status_code\": 100\n" + + " }\n" + + " },\n" + + " {\n" + + " \"doc_count\": 1,\n" + + " \"key\": {\n" + + " \"status_code\": 102\n" + + " }\n" + + " },\n" + + " {\n" + + " \"doc_count\": 1,\n" + + " \"key\": {\n" + + " \"status_code\": 201\n" + + " }\n" + + " }\n" + + " ],\n" + + " \"after_key\": {\n" + + " \"status_code\": 201\n" + + " }\n" + + " }\n" + + " }\n" + + "}" + + val parser = XContentType.JSON.xContent() + .createParser( + NamedXContentRegistry.EMPTY, + DeprecationHandler.THROW_UNSUPPORTED_OPERATION, + inputResultsStr + ) val inputResults = parser.map() diff --git a/scripts/build.sh b/scripts/build.sh index 2e621271c..5a173adfb 100755 --- a/scripts/build.sh +++ b/scripts/build.sh @@ -66,7 +66,7 @@ fi mkdir -p $OUTPUT/plugins -./gradlew assemble --no-daemon --refresh-dependencies -DskipTests=true -Dopensearch.version=$VERSION -Dbuild.version_qualifier=$QUALIFIER -Dbuild.snapshot=$SNAPSHOT -x ktlint +./gradlew assemble --no-daemon --refresh-dependencies -DskipTests=true -Dopensearch.version=$VERSION -Dbuild.version_qualifier=$QUALIFIER -Dbuild.snapshot=$SNAPSHOT zipPath=$(find . -path \*build/distributions/*.zip) distributions="$(dirname "${zipPath}")" @@ -74,6 +74,6 @@ distributions="$(dirname "${zipPath}")" echo "COPY ${distributions}/*.zip" cp ${distributions}/*.zip ./$OUTPUT/plugins -./gradlew publishPluginZipPublicationToZipStagingRepository -Dopensearch.version=$VERSION -Dbuild.snapshot=$SNAPSHOT -Dbuild.version_qualifier=$QUALIFIER -x ktlint +./gradlew publishPluginZipPublicationToZipStagingRepository -Dopensearch.version=$VERSION -Dbuild.snapshot=$SNAPSHOT -Dbuild.version_qualifier=$QUALIFIER mkdir -p $OUTPUT/maven/org/opensearch cp -r ./build/local-staging-repo/org/opensearch/. $OUTPUT/maven/org/opensearch From ec6c76891a5e3457df6154c4126fe442773004fd Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Fri, 28 Oct 2022 15:42:10 -0700 Subject: [PATCH 07/18] Ack alerts - allow moving alerts to history index with custom datasources (#626) (#627) * in case of custom indices, allow moving alerts to history index Signed-off-by: Petar Dzepina * empty commit Signed-off-by: Petar Dzepina * added IT for custom datasources alert ack Signed-off-by: Petar Dzepina Signed-off-by: Petar Dzepina (cherry picked from commit 0740d9be59113a8e4c31a5370baabe9b31761f2c) Co-authored-by: Petar Dzepina --- .../TransportAcknowledgeAlertAction.kt | 2 - .../alerting/MonitorDataSourcesIT.kt | 70 +++++++++++++++++++ 2 files changed, 70 insertions(+), 2 deletions(-) diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportAcknowledgeAlertAction.kt b/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportAcknowledgeAlertAction.kt index f95520f55..b6c14e56e 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportAcknowledgeAlertAction.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportAcknowledgeAlertAction.kt @@ -24,7 +24,6 @@ import org.opensearch.action.update.UpdateRequest import org.opensearch.alerting.action.GetMonitorAction import org.opensearch.alerting.action.GetMonitorRequest import org.opensearch.alerting.action.GetMonitorResponse -import org.opensearch.alerting.alerts.AlertIndices import org.opensearch.alerting.opensearchapi.suspendUntil import org.opensearch.alerting.settings.AlertingSettings import org.opensearch.alerting.util.AlertingException @@ -161,7 +160,6 @@ class TransportAcknowledgeAlertAction @Inject constructor( if (alert.state == Alert.State.ACTIVE) { if ( - monitor.dataSources.alertsIndex != AlertIndices.ALERT_INDEX || alert.findingIds.isEmpty() || !isAlertHistoryEnabled ) { diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/MonitorDataSourcesIT.kt b/alerting/src/test/kotlin/org/opensearch/alerting/MonitorDataSourcesIT.kt index 767da7f22..6bed045fb 100644 --- a/alerting/src/test/kotlin/org/opensearch/alerting/MonitorDataSourcesIT.kt +++ b/alerting/src/test/kotlin/org/opensearch/alerting/MonitorDataSourcesIT.kt @@ -516,6 +516,76 @@ class MonitorDataSourcesIT : AlertingSingleNodeTestCase() { assertEquals("Alerts from custom history index", 1, alerts.size) } + fun `test search custom alerts history index after alert ack`() { + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3") + val docLevelInput = DocLevelMonitorInput("description", listOf(index), listOf(docQuery)) + val trigger1 = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val trigger2 = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val customAlertsIndex = "custom_alerts_index" + val customAlertsHistoryIndex = "custom_alerts_history_index" + val customAlertsHistoryIndexPattern = "" + var monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger1, trigger2), + dataSources = DataSources( + alertsIndex = customAlertsIndex, + alertsHistoryIndex = customAlertsHistoryIndex, + alertsHistoryIndexPattern = customAlertsHistoryIndexPattern + ) + ) + val monitorResponse = createMonitor(monitor) + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc = """{ + "message" : "This is an error from IAD region", + "test_strict_date_time" : "$testTime", + "test_field" : "us-west-2" + }""" + assertFalse(monitorResponse?.id.isNullOrEmpty()) + monitor = monitorResponse!!.monitor + indexDoc(index, "1", testDoc) + val monitorId = monitorResponse.id + val executeMonitorResponse = executeMonitor(monitor, monitorId, false) + var alertsBefore = searchAlerts(monitorId, customAlertsIndex) + Assert.assertEquals(2, alertsBefore.size) + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 2) + + var alerts = listOf() + OpenSearchTestCase.waitUntil({ + alerts = searchAlerts(monitorId, customAlertsIndex) + if (alerts.size == 1) { + return@waitUntil true + } + return@waitUntil false + }, 30, TimeUnit.SECONDS) + assertEquals("Alerts from custom index", 2, alerts.size) + + val ackReq = AcknowledgeAlertRequest(monitorId, alerts.map { it.id }.toMutableList(), WriteRequest.RefreshPolicy.IMMEDIATE) + client().execute(AlertingActions.ACKNOWLEDGE_ALERTS_ACTION_TYPE, ackReq).get() + + // verify alerts moved from alert index to alert history index + alerts = listOf() + OpenSearchTestCase.waitUntil({ + alerts = searchAlerts(monitorId, customAlertsHistoryIndex) + if (alerts.size == 1) { + return@waitUntil true + } + return@waitUntil false + }, 30, TimeUnit.SECONDS) + assertEquals("Alerts from custom history index", 2, alerts.size) + + // verify alerts deleted from alert index + alerts = listOf() + OpenSearchTestCase.waitUntil({ + alerts = searchAlerts(monitorId, customAlertsIndex) + if (alerts.size == 1) { + return@waitUntil true + } + return@waitUntil false + }, 30, TimeUnit.SECONDS) + assertEquals("Alerts from custom history index", 0, alerts.size) + } + fun `test get alerts by list of monitors containing both existent and non-existent ids`() { val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3") val docLevelInput = DocLevelMonitorInput("description", listOf(index), listOf(docQuery)) From 57f6aa3ed08efad62cd33bf5a7fb26029ca35dcc Mon Sep 17 00:00:00 2001 From: Surya Sashank Nistala Date: Tue, 1 Nov 2022 07:21:35 -0700 Subject: [PATCH 08/18] refactored DeleteMonitor Action to be synchronious (#628) (#630) * refactored DeleteMonitor Action to be synchronious Signed-off-by: Petar Dzepina Signed-off-by: Petar Dzepina Co-authored-by: Petar Dzepina --- .../transport/TransportDeleteMonitorAction.kt | 165 +++++++----------- 1 file changed, 63 insertions(+), 102 deletions(-) diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportDeleteMonitorAction.kt b/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportDeleteMonitorAction.kt index 755073b92..ab57a0d45 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportDeleteMonitorAction.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportDeleteMonitorAction.kt @@ -5,6 +5,10 @@ package org.opensearch.alerting.transport +import kotlinx.coroutines.CoroutineName +import kotlinx.coroutines.Dispatchers +import kotlinx.coroutines.GlobalScope +import kotlinx.coroutines.launch import org.apache.logging.log4j.LogManager import org.opensearch.OpenSearchStatusException import org.opensearch.action.ActionListener @@ -15,6 +19,7 @@ import org.opensearch.action.get.GetRequest import org.opensearch.action.get.GetResponse import org.opensearch.action.support.ActionFilters import org.opensearch.action.support.HandledTransportAction +import org.opensearch.alerting.opensearchapi.suspendUntil import org.opensearch.alerting.settings.AlertingSettings import org.opensearch.alerting.util.AlertingException import org.opensearch.client.Client @@ -39,7 +44,9 @@ import org.opensearch.index.reindex.DeleteByQueryRequestBuilder import org.opensearch.rest.RestStatus import org.opensearch.tasks.Task import org.opensearch.transport.TransportService -import java.io.IOException +import kotlin.coroutines.resume +import kotlin.coroutines.resumeWithException +import kotlin.coroutines.suspendCoroutine private val log = LogManager.getLogger(TransportDeleteMonitorAction::class.java) @@ -71,7 +78,8 @@ class TransportDeleteMonitorAction @Inject constructor( if (!validateUserBackendRoles(user, actionListener)) { return } - client.threadPool().threadContext.stashContext().use { + + GlobalScope.launch(Dispatchers.IO + CoroutineName("DeleteMonitorAction")) { DeleteMonitorHandler(client, actionListener, deleteRequest, user, transformedRequest.monitorId).resolveUserAndStart() } } @@ -83,120 +91,73 @@ class TransportDeleteMonitorAction @Inject constructor( private val user: User?, private val monitorId: String ) { - - fun resolveUserAndStart() { - if (user == null) { - // Security is disabled, so we can delete the destination without issues - deleteMonitor() - } else if (!doFilterForUser(user)) { - // security is enabled and filterby is disabled. - deleteMonitor() - } else { - try { - start() - } catch (ex: IOException) { - actionListener.onFailure(AlertingException.wrap(ex)) + suspend fun resolveUserAndStart() { + try { + val monitor = getMonitor() + + val canDelete = user == null || + !doFilterForUser(user) || + checkUserPermissionsWithResource(user, monitor.user, actionListener, "monitor", monitorId) + + if (canDelete) { + val deleteResponse = deleteMonitor(monitor) + deleteMetadata(monitor) + deleteDocLevelMonitorQueries(monitor) + actionListener.onResponse(DeleteMonitorResponse(deleteResponse.id, deleteResponse.version)) + } else { + actionListener.onFailure( + AlertingException("Not allowed to delete this monitor!", RestStatus.FORBIDDEN, IllegalStateException()) + ) } + } catch (t: Exception) { + actionListener.onFailure(AlertingException.wrap(t)) } } - fun start() { + private suspend fun getMonitor(): Monitor { val getRequest = GetRequest(ScheduledJob.SCHEDULED_JOBS_INDEX, monitorId) - client.get( - getRequest, - object : ActionListener { - override fun onResponse(response: GetResponse) { - if (!response.isExists) { - actionListener.onFailure( - AlertingException.wrap( - OpenSearchStatusException("Monitor with $monitorId is not found", RestStatus.NOT_FOUND) - ) - ) - return - } - val xcp = XContentHelper.createParser( - xContentRegistry, LoggingDeprecationHandler.INSTANCE, - response.sourceAsBytesRef, XContentType.JSON - ) - val monitor = ScheduledJob.parse(xcp, response.id, response.version) as Monitor - onGetResponse(monitor) - } - override fun onFailure(t: Exception) { - actionListener.onFailure(AlertingException.wrap(t)) - } - } - ) - } - private fun onGetResponse(monitor: Monitor) { - if (!checkUserPermissionsWithResource(user, monitor.user, actionListener, "monitor", monitorId)) { - return - } else { - deleteMonitor() + val getResponse: GetResponse = client.suspendUntil { get(getRequest, it) } + if (getResponse.isExists == false) { + actionListener.onFailure( + AlertingException.wrap( + OpenSearchStatusException("Monitor with $monitorId is not found", RestStatus.NOT_FOUND) + ) + ) } - } - - private fun deleteMonitor() { - client.delete( - deleteRequest, - object : ActionListener { - override fun onResponse(response: DeleteResponse) { - val clusterState = clusterService.state() - if (clusterState.routingTable.hasIndex(ScheduledJob.DOC_LEVEL_QUERIES_INDEX)) { - deleteDocLevelMonitorQueries() - } - deleteMetadata() - - actionListener.onResponse(DeleteMonitorResponse(response.id, response.version)) - } - - override fun onFailure(t: Exception) { - actionListener.onFailure(AlertingException.wrap(t)) - } - } + val xcp = XContentHelper.createParser( + xContentRegistry, LoggingDeprecationHandler.INSTANCE, + getResponse.sourceAsBytesRef, XContentType.JSON ) + return ScheduledJob.parse(xcp, getResponse.id, getResponse.version) as Monitor } - private fun deleteMetadata() { - val getRequest = GetRequest(ScheduledJob.SCHEDULED_JOBS_INDEX, monitorId) - client.get( - getRequest, - object : ActionListener { - override fun onResponse(response: GetResponse) { - if (response.isExists) { - val deleteMetadataRequest = DeleteRequest(ScheduledJob.SCHEDULED_JOBS_INDEX, "$monitorId") - .setRefreshPolicy(deleteRequest.refreshPolicy) - client.delete( - deleteMetadataRequest, - object : ActionListener { - override fun onResponse(response: DeleteResponse) { - } - - override fun onFailure(t: Exception) { - } - } - ) - } - } - override fun onFailure(t: Exception) { - } - } - ) + private suspend fun deleteMonitor(monitor: Monitor): DeleteResponse { + return client.suspendUntil { delete(deleteRequest, it) } } - private fun deleteDocLevelMonitorQueries() { - DeleteByQueryRequestBuilder(client, DeleteByQueryAction.INSTANCE) - .source(ScheduledJob.DOC_LEVEL_QUERIES_INDEX) - .filter(QueryBuilders.matchQuery("monitor_id", monitorId)) - .execute( - object : ActionListener { - override fun onResponse(response: BulkByScrollResponse) { - } + private suspend fun deleteMetadata(monitor: Monitor) { + val deleteRequest = DeleteRequest(ScheduledJob.SCHEDULED_JOBS_INDEX, "${monitor.id}-metadata") + val deleteResponse: DeleteResponse = client.suspendUntil { delete(deleteRequest, it) } + } - override fun onFailure(t: Exception) { + private suspend fun deleteDocLevelMonitorQueries(monitor: Monitor) { + val clusterState = clusterService.state() + if (!clusterState.routingTable.hasIndex(monitor.dataSources.queryIndex)) { + return + } + val response: BulkByScrollResponse = suspendCoroutine { cont -> + DeleteByQueryRequestBuilder(client, DeleteByQueryAction.INSTANCE) + .source(monitor.dataSources.queryIndex) + .filter(QueryBuilders.matchQuery("monitor_id", monitorId)) + .refresh(true) + .execute( + object : ActionListener { + override fun onResponse(response: BulkByScrollResponse) = cont.resume(response) + override fun onFailure(t: Exception) = cont.resumeWithException(t) } - } - ) + ) + } } } } From 1400add65122b414aba99c240da56004f3f43a8d Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Thu, 3 Nov 2022 17:26:32 -0700 Subject: [PATCH 09/18] Enabled parsing of bucket level monitors when executing the transport action on the same node (#631) (#638) Signed-off-by: Stevan Buzejic --- .../transport/TransportIndexMonitorAction.kt | 9 +++++++-- .../alerting/resthandler/MonitorRestApiIT.kt | 16 ++++++++++++++++ 2 files changed, 23 insertions(+), 2 deletions(-) diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportIndexMonitorAction.kt b/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportIndexMonitorAction.kt index 06c6b0e26..a00cde540 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportIndexMonitorAction.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportIndexMonitorAction.kt @@ -45,6 +45,7 @@ import org.opensearch.alerting.util.isADMonitor import org.opensearch.client.Client import org.opensearch.cluster.service.ClusterService import org.opensearch.common.inject.Inject +import org.opensearch.common.io.stream.NamedWriteableRegistry import org.opensearch.common.settings.Settings import org.opensearch.common.unit.TimeValue import org.opensearch.common.xcontent.LoggingDeprecationHandler @@ -87,7 +88,8 @@ class TransportIndexMonitorAction @Inject constructor( val docLevelMonitorQueries: DocLevelMonitorQueries, val clusterService: ClusterService, val settings: Settings, - val xContentRegistry: NamedXContentRegistry + val xContentRegistry: NamedXContentRegistry, + val namedWriteableRegistry: NamedWriteableRegistry, ) : HandledTransportAction( AlertingActions.INDEX_MONITOR_ACTION_NAME, transportService, actionFilters, ::IndexMonitorRequest ), @@ -111,7 +113,10 @@ class TransportIndexMonitorAction @Inject constructor( override fun doExecute(task: Task, request: ActionRequest, actionListener: ActionListener) { val transformedRequest = request as? IndexMonitorRequest - ?: recreateObject(request) { IndexMonitorRequest(it) } + ?: recreateObject(request, namedWriteableRegistry) { + IndexMonitorRequest(it) + } + val user = readUserFromThreadContext(client) if (!validateUserBackendRoles(user, actionListener)) { diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/resthandler/MonitorRestApiIT.kt b/alerting/src/test/kotlin/org/opensearch/alerting/resthandler/MonitorRestApiIT.kt index 02fceaf3a..32451e9e5 100644 --- a/alerting/src/test/kotlin/org/opensearch/alerting/resthandler/MonitorRestApiIT.kt +++ b/alerting/src/test/kotlin/org/opensearch/alerting/resthandler/MonitorRestApiIT.kt @@ -24,6 +24,7 @@ import org.opensearch.alerting.randomAction import org.opensearch.alerting.randomAlert import org.opensearch.alerting.randomAnomalyDetector import org.opensearch.alerting.randomAnomalyDetectorWithUser +import org.opensearch.alerting.randomBucketLevelMonitor import org.opensearch.alerting.randomBucketLevelTrigger import org.opensearch.alerting.randomDocumentLevelMonitor import org.opensearch.alerting.randomDocumentLevelTrigger @@ -107,6 +108,21 @@ class MonitorRestApiIT : AlertingRestTestCase() { assertEquals("Incorrect Location header", "$ALERTING_BASE_URI/$createdId", createResponse.getHeader("Location")) } + @Throws(Exception::class) + fun `test creating a bucket monitor`() { + val monitor = randomBucketLevelMonitor() + + val createResponse = client().makeRequest("POST", ALERTING_BASE_URI, emptyMap(), monitor.toHttpEntity()) + + assertEquals("Create monitor failed", RestStatus.CREATED, createResponse.restStatus()) + val responseBody = createResponse.asMap() + val createdId = responseBody["_id"] as String + val createdVersion = responseBody["_version"] as Int + assertNotEquals("response is missing Id", Monitor.NO_ID, createdId) + assertTrue("incorrect version", createdVersion > 0) + assertEquals("Incorrect Location header", "$ALERTING_BASE_URI/$createdId", createResponse.getHeader("Location")) + } + fun `test creating a monitor with legacy ODFE`() { val monitor = randomQueryLevelMonitor() val createResponse = client().makeRequest("POST", LEGACY_OPENDISTRO_ALERTING_BASE_URI, emptyMap(), monitor.toHttpEntity()) From f80b3e08bd8aac7fa048b18c8434d1230168b0c2 Mon Sep 17 00:00:00 2001 From: Surya Sashank Nistala Date: Fri, 4 Nov 2022 01:12:56 -0700 Subject: [PATCH 10/18] adds filtering on owner field in search monitor action (#641) Signed-off-by: Surya Sashank Nistala Signed-off-by: Surya Sashank Nistala --- .../resthandler/RestIndexMonitorAction.kt | 7 +++ .../resthandler/RestSearchMonitorAction.kt | 10 ---- .../transport/TransportSearchMonitorAction.kt | 25 ++++++++ .../alerting/DocumentMonitorRunnerIT.kt | 41 +++++++++++++ .../alerting/MonitorDataSourcesIT.kt | 57 +++++++++++++++++++ .../org/opensearch/alerting/TestHelpers.kt | 10 ++-- .../resources/mappings/scheduled-jobs.json | 9 +++ 7 files changed, 145 insertions(+), 14 deletions(-) diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestIndexMonitorAction.kt b/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestIndexMonitorAction.kt index 7925dff0a..c94afa8ae 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestIndexMonitorAction.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestIndexMonitorAction.kt @@ -85,6 +85,7 @@ class RestIndexMonitorAction : BaseRestHandler() { ensureExpectedToken(Token.START_OBJECT, xcp.nextToken(), xcp) val monitor = Monitor.parse(xcp, id).copy(lastUpdateTime = Instant.now()) validateDataSources(monitor) + validateOwner(monitor.owner) val monitorType = monitor.monitorType val triggers = monitor.triggers when (monitorType) { @@ -136,6 +137,12 @@ class RestIndexMonitorAction : BaseRestHandler() { } } + private fun validateOwner(owner: String?) { + if (owner != "alerting") { + throw IllegalArgumentException("Invalid owner field") + } + } + private fun indexMonitorResponse(channel: RestChannel, restMethod: RestRequest.Method): RestResponseListener { return object : RestResponseListener(channel) { diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestSearchMonitorAction.kt b/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestSearchMonitorAction.kt index 064748455..18a49de04 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestSearchMonitorAction.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestSearchMonitorAction.kt @@ -22,10 +22,8 @@ import org.opensearch.common.xcontent.LoggingDeprecationHandler import org.opensearch.common.xcontent.ToXContent.EMPTY_PARAMS import org.opensearch.common.xcontent.XContentFactory.jsonBuilder import org.opensearch.common.xcontent.XContentType -import org.opensearch.commons.alerting.model.Monitor import org.opensearch.commons.alerting.model.ScheduledJob import org.opensearch.commons.alerting.model.ScheduledJob.Companion.SCHEDULED_JOBS_INDEX -import org.opensearch.index.query.QueryBuilders import org.opensearch.rest.BaseRestHandler import org.opensearch.rest.BaseRestHandler.RestChannelConsumer import org.opensearch.rest.BytesRestResponse @@ -97,14 +95,6 @@ class RestSearchMonitorAction( searchSourceBuilder.parseXContent(request.contentOrSourceParamParser()) searchSourceBuilder.fetchSource(context(request)) - val queryBuilder = QueryBuilders.boolQuery().must(searchSourceBuilder.query()) - if (index == SCHEDULED_JOBS_INDEX) { - queryBuilder.filter(QueryBuilders.existsQuery(Monitor.MONITOR_TYPE)) - } - - searchSourceBuilder.query(queryBuilder) - .seqNoAndPrimaryTerm(true) - .version(true) val searchRequest = SearchRequest() .source(searchSourceBuilder) .indices(index) diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportSearchMonitorAction.kt b/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportSearchMonitorAction.kt index f97e382ff..13ed9c9cb 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportSearchMonitorAction.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportSearchMonitorAction.kt @@ -20,7 +20,12 @@ import org.opensearch.client.Client import org.opensearch.cluster.service.ClusterService import org.opensearch.common.inject.Inject import org.opensearch.common.settings.Settings +import org.opensearch.commons.alerting.model.Monitor import org.opensearch.commons.authuser.User +import org.opensearch.index.query.BoolQueryBuilder +import org.opensearch.index.query.ExistsQueryBuilder +import org.opensearch.index.query.MatchQueryBuilder +import org.opensearch.index.query.QueryBuilders import org.opensearch.tasks.Task import org.opensearch.transport.TransportService @@ -43,6 +48,14 @@ class TransportSearchMonitorAction @Inject constructor( } override fun doExecute(task: Task, searchMonitorRequest: SearchMonitorRequest, actionListener: ActionListener) { + val searchSourceBuilder = searchMonitorRequest.searchRequest.source() + val queryBuilder = if (searchSourceBuilder.query() == null) BoolQueryBuilder() + else QueryBuilders.boolQuery().must(searchSourceBuilder.query()) + queryBuilder.filter(QueryBuilders.existsQuery(Monitor.MONITOR_TYPE)) + searchSourceBuilder.query(queryBuilder) + .seqNoAndPrimaryTerm(true) + .version(true) + addOwnerFieldIfNotExists(searchMonitorRequest.searchRequest) val user = readUserFromThreadContext(client) client.threadPool().threadContext.stashContext().use { resolve(searchMonitorRequest, actionListener, user) @@ -78,4 +91,16 @@ class TransportSearchMonitorAction @Inject constructor( } ) } + + private fun addOwnerFieldIfNotExists(searchRequest: SearchRequest) { + if (searchRequest.source().query() == null || searchRequest.source().query().toString().contains("monitor.owner") == false) { + var boolQueryBuilder: BoolQueryBuilder = if (searchRequest.source().query() == null) BoolQueryBuilder() + else QueryBuilders.boolQuery().must(searchRequest.source().query()) + val bqb = BoolQueryBuilder() + bqb.should().add(BoolQueryBuilder().mustNot(ExistsQueryBuilder("monitor.owner"))) + bqb.should().add(BoolQueryBuilder().must(MatchQueryBuilder("monitor.owner", "alerting"))) + boolQueryBuilder.filter(bqb) + searchRequest.source().query(boolQueryBuilder) + } + } } diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/DocumentMonitorRunnerIT.kt b/alerting/src/test/kotlin/org/opensearch/alerting/DocumentMonitorRunnerIT.kt index 4b2b52367..9ffc42b3a 100644 --- a/alerting/src/test/kotlin/org/opensearch/alerting/DocumentMonitorRunnerIT.kt +++ b/alerting/src/test/kotlin/org/opensearch/alerting/DocumentMonitorRunnerIT.kt @@ -669,4 +669,45 @@ class DocumentMonitorRunnerIT : AlertingRestTestCase() { private fun Map.objectMap(key: String): Map> { return this[key] as Map> } + + fun `test execute monitor with non-null owner`() { + + val testIndex = createTestIndex() + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc = """{ + "message" : "This is an error from IAD region", + "test_strict_date_time" : "$testTime", + "test_field" : "us-west-2" + }""" + + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3") + val docLevelInput = DocLevelMonitorInput("description", listOf(testIndex), listOf(docQuery)) + + val alertCategories = AlertCategory.values() + val actionExecutionScope = PerAlertActionScope( + actionableAlerts = (1..randomInt(alertCategories.size)).map { alertCategories[it - 1] }.toSet() + ) + val actionExecutionPolicy = ActionExecutionPolicy(actionExecutionScope) + val actions = (0..randomInt(10)).map { + randomActionWithPolicy( + template = randomTemplateScript("Hello {{ctx.monitor.name}}"), + destinationId = createDestination().id, + actionExecutionPolicy = actionExecutionPolicy + ) + } + + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN, actions = actions) + try { + createMonitor( + randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + owner = "owner" + ) + ) + fail("Expected create monitor to fail") + } catch (e: ResponseException) { + assertTrue(e.message!!.contains("illegal_argument_exception")) + } + } } diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/MonitorDataSourcesIT.kt b/alerting/src/test/kotlin/org/opensearch/alerting/MonitorDataSourcesIT.kt index 6bed045fb..43c9dff3e 100644 --- a/alerting/src/test/kotlin/org/opensearch/alerting/MonitorDataSourcesIT.kt +++ b/alerting/src/test/kotlin/org/opensearch/alerting/MonitorDataSourcesIT.kt @@ -8,7 +8,10 @@ package org.opensearch.alerting import org.junit.Assert import org.opensearch.action.admin.cluster.state.ClusterStateRequest import org.opensearch.action.admin.indices.create.CreateIndexRequest +import org.opensearch.action.search.SearchRequest import org.opensearch.action.support.WriteRequest +import org.opensearch.alerting.action.SearchMonitorAction +import org.opensearch.alerting.action.SearchMonitorRequest import org.opensearch.alerting.core.ScheduledJobIndices import org.opensearch.alerting.transport.AlertingSingleNodeTestCase import org.opensearch.common.settings.Settings @@ -21,6 +24,7 @@ import org.opensearch.commons.alerting.model.DocLevelMonitorInput import org.opensearch.commons.alerting.model.DocLevelQuery import org.opensearch.commons.alerting.model.ScheduledJob.Companion.SCHEDULED_JOBS_INDEX import org.opensearch.commons.alerting.model.Table +import org.opensearch.index.query.MatchQueryBuilder import org.opensearch.test.OpenSearchTestCase import java.time.ZonedDateTime import java.time.format.DateTimeFormatter @@ -319,6 +323,7 @@ class MonitorDataSourcesIT : AlertingSingleNodeTestCase() { val updateMonitorResponse = updateMonitor( monitor.copy( id = monitorId, + owner = "security_analytics_plugin", dataSources = DataSources( alertsIndex = customAlertsIndex, queryIndex = customQueryIndex, @@ -328,6 +333,7 @@ class MonitorDataSourcesIT : AlertingSingleNodeTestCase() { monitorId ) Assert.assertNotNull(updateMonitorResponse) + Assert.assertEquals(updateMonitorResponse!!.monitor.owner, "security_analytics_plugin") indexDoc(index, "2", testDoc) executeMonitorResponse = executeMonitor(updateMonitorResponse!!.monitor, monitorId, false) Assert.assertTrue(client().admin().cluster().state(ClusterStateRequest()).get().state.routingTable.hasIndex(customQueryIndex)) @@ -347,6 +353,16 @@ class MonitorDataSourcesIT : AlertingSingleNodeTestCase() { .get() Assert.assertTrue(getAlertsResponse != null) Assert.assertTrue(getAlertsResponse.alerts.size == 1) + val searchRequest = SearchRequest(SCHEDULED_JOBS_INDEX) + var searchMonitorResponse = + client().execute(SearchMonitorAction.INSTANCE, SearchMonitorRequest(searchRequest)) + .get() + Assert.assertEquals(searchMonitorResponse.hits.hits.size, 0) + searchRequest.source().query(MatchQueryBuilder("monitor.owner", "security_analytics_plugin")) + searchMonitorResponse = + client().execute(SearchMonitorAction.INSTANCE, SearchMonitorRequest(searchRequest)) + .get() + Assert.assertEquals(searchMonitorResponse.hits.hits.size, 1) } fun `test execute GetFindingsAction with monitorId param`() { @@ -427,6 +443,47 @@ class MonitorDataSourcesIT : AlertingSingleNodeTestCase() { } } + fun `test execute monitor with owner field`() { + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3") + val docLevelInput = DocLevelMonitorInput("description", listOf(index), listOf(docQuery)) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val customAlertsIndex = "custom_alerts_index" + var monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + dataSources = DataSources(alertsIndex = customAlertsIndex), + owner = "owner" + ) + val monitorResponse = createMonitor(monitor) + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc = """{ + "message" : "This is an error from IAD region", + "test_strict_date_time" : "$testTime", + "test_field" : "us-west-2" + }""" + assertFalse(monitorResponse?.id.isNullOrEmpty()) + monitor = monitorResponse!!.monitor + Assert.assertEquals(monitor.owner, "owner") + indexDoc(index, "1", testDoc) + val id = monitorResponse.id + val executeMonitorResponse = executeMonitor(monitor, id, false) + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) + val alerts = searchAlerts(id, customAlertsIndex) + assertEquals("Alert saved for test monitor", 1, alerts.size) + val table = Table("asc", "id", null, 1, 0, "") + var getAlertsResponse = client() + .execute(AlertingActions.GET_ALERTS_ACTION_TYPE, GetAlertsRequest(table, "ALL", "ALL", null, customAlertsIndex)) + .get() + Assert.assertTrue(getAlertsResponse != null) + Assert.assertTrue(getAlertsResponse.alerts.size == 1) + getAlertsResponse = client() + .execute(AlertingActions.GET_ALERTS_ACTION_TYPE, GetAlertsRequest(table, "ALL", "ALL", id, null)) + .get() + Assert.assertTrue(getAlertsResponse != null) + Assert.assertTrue(getAlertsResponse.alerts.size == 1) + } + fun `test execute GetFindingsAction with unknown findingIndex param`() { val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3") val docLevelInput = DocLevelMonitorInput("description", listOf(index), listOf(docQuery)) diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/TestHelpers.kt b/alerting/src/test/kotlin/org/opensearch/alerting/TestHelpers.kt index 551e6d2c7..2b99b514a 100644 --- a/alerting/src/test/kotlin/org/opensearch/alerting/TestHelpers.kt +++ b/alerting/src/test/kotlin/org/opensearch/alerting/TestHelpers.kt @@ -162,12 +162,13 @@ fun randomDocumentLevelMonitor( triggers: List = (1..randomInt(10)).map { randomQueryLevelTrigger() }, enabledTime: Instant? = if (enabled) Instant.now().truncatedTo(ChronoUnit.MILLIS) else null, lastUpdateTime: Instant = Instant.now().truncatedTo(ChronoUnit.MILLIS), - withMetadata: Boolean = false + withMetadata: Boolean = false, + owner: String? = null ): Monitor { return Monitor( name = name, monitorType = Monitor.MonitorType.DOC_LEVEL_MONITOR, enabled = enabled, inputs = inputs, schedule = schedule, triggers = triggers, enabledTime = enabledTime, lastUpdateTime = lastUpdateTime, user = user, - uiMetadata = if (withMetadata) mapOf("foo" to "bar") else mapOf() + uiMetadata = if (withMetadata) mapOf("foo" to "bar") else mapOf(), owner = owner ) } @@ -181,12 +182,13 @@ fun randomDocumentLevelMonitor( enabledTime: Instant? = if (enabled) Instant.now().truncatedTo(ChronoUnit.MILLIS) else null, lastUpdateTime: Instant = Instant.now().truncatedTo(ChronoUnit.MILLIS), withMetadata: Boolean = false, - dataSources: DataSources + dataSources: DataSources, + owner: String? = null ): Monitor { return Monitor( name = name, monitorType = Monitor.MonitorType.DOC_LEVEL_MONITOR, enabled = enabled, inputs = inputs, schedule = schedule, triggers = triggers, enabledTime = enabledTime, lastUpdateTime = lastUpdateTime, user = user, - uiMetadata = if (withMetadata) mapOf("foo" to "bar") else mapOf(), dataSources = dataSources + uiMetadata = if (withMetadata) mapOf("foo" to "bar") else mapOf(), dataSources = dataSources, owner = owner ) } diff --git a/core/src/main/resources/mappings/scheduled-jobs.json b/core/src/main/resources/mappings/scheduled-jobs.json index 8254e1ebf..5ee414c0e 100644 --- a/core/src/main/resources/mappings/scheduled-jobs.json +++ b/core/src/main/resources/mappings/scheduled-jobs.json @@ -18,6 +18,15 @@ } } }, + "owner": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword", + "ignore_above": 256 + } + } + }, "monitor_type": { "type": "keyword" }, From 0821a7833b5aabb3e039082630f70477ac1ad681 Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Fri, 4 Nov 2022 09:01:41 -0700 Subject: [PATCH 11/18] Support to specify backend roles for monitors (#635) (#644) * Support specify backend roles for monitors Signed-off-by: Ashish Agrawal (cherry picked from commit f986238c477132611f15859c2e68da9cd0acfdf1) Co-authored-by: Ashish Agrawal --- .github/workflows/security-test-workflow.yml | 7 +- .../resthandler/RestIndexMonitorAction.kt | 4 +- .../transport/SecureTransportAction.kt | 2 +- .../transport/TransportIndexMonitorAction.kt | 83 +++ .../org/opensearch/alerting/AccessRoles.kt | 1 + .../alerting/AlertingRestTestCase.kt | 85 ++- .../resthandler/SecureDestinationRestApiIT.kt | 2 +- .../SecureEmailAccountRestApiIT.kt | 8 +- .../resthandler/SecureEmailGroupsRestApiIT.kt | 4 +- .../resthandler/SecureMonitorRestApiIT.kt | 541 +++++++++++++++++- 10 files changed, 708 insertions(+), 29 deletions(-) diff --git a/.github/workflows/security-test-workflow.yml b/.github/workflows/security-test-workflow.yml index 7cb66242c..127962210 100644 --- a/.github/workflows/security-test-workflow.yml +++ b/.github/workflows/security-test-workflow.yml @@ -43,7 +43,12 @@ jobs: plugin_version=`echo $plugin|awk -F- '{print $3}'| cut -d. -f 1-4` qualifier=`echo $plugin|awk -F- '{print $4}'| cut -d. -f 1-1` candidate_version=`echo $plugin|awk -F- '{print $5}'| cut -d. -f 1-1` - docker_version=$version-$qualifier + if qualifier + then + docker_version=$version-$qualifier + else + docker_version=$version + fi [[ -z $candidate_version ]] && candidate_version=$qualifier && qualifier="" diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestIndexMonitorAction.kt b/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestIndexMonitorAction.kt index c94afa8ae..5aafba1be 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestIndexMonitorAction.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestIndexMonitorAction.kt @@ -84,6 +84,8 @@ class RestIndexMonitorAction : BaseRestHandler() { val xcp = request.contentParser() ensureExpectedToken(Token.START_OBJECT, xcp.nextToken(), xcp) val monitor = Monitor.parse(xcp, id).copy(lastUpdateTime = Instant.now()) + val rbacRoles = request.contentParser().map()["rbac_roles"] as List? + validateDataSources(monitor) validateOwner(monitor.owner) val monitorType = monitor.monitorType @@ -118,7 +120,7 @@ class RestIndexMonitorAction : BaseRestHandler() { } else { WriteRequest.RefreshPolicy.IMMEDIATE } - val indexMonitorRequest = IndexMonitorRequest(id, seqNo, primaryTerm, refreshPolicy, request.method(), monitor) + val indexMonitorRequest = IndexMonitorRequest(id, seqNo, primaryTerm, refreshPolicy, request.method(), monitor, rbacRoles) return RestChannelConsumer { channel -> client.execute(AlertingActions.INDEX_MONITOR_ACTION_TYPE, indexMonitorRequest, indexMonitorResponse(channel, request.method())) diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/transport/SecureTransportAction.kt b/alerting/src/main/kotlin/org/opensearch/alerting/transport/SecureTransportAction.kt index d5303fc18..4b3a403c7 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/transport/SecureTransportAction.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/transport/SecureTransportAction.kt @@ -61,7 +61,7 @@ interface SecureTransportAction { /** * 'all_access' role users are treated as admins. */ - private fun isAdmin(user: User?): Boolean { + fun isAdmin(user: User?): Boolean { return when { user == null -> { false diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportIndexMonitorAction.kt b/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportIndexMonitorAction.kt index a00cde540..0546d0733 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportIndexMonitorAction.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportIndexMonitorAction.kt @@ -123,6 +123,40 @@ class TransportIndexMonitorAction @Inject constructor( return } + if ( + user != null && + !isAdmin(user) && + transformedRequest.rbacRoles != null + ) { + if (transformedRequest.rbacRoles?.stream()?.anyMatch { !user.backendRoles.contains(it) } == true) { + log.debug( + "User specified backend roles, ${transformedRequest.rbacRoles}, " + + "that they don' have access to. User backend roles: ${user.backendRoles}" + ) + actionListener.onFailure( + AlertingException.wrap( + OpenSearchStatusException( + "User specified backend roles that they don't have access to. Contact administrator", RestStatus.FORBIDDEN + ) + ) + ) + return + } else if (transformedRequest.rbacRoles?.isEmpty() == true) { + log.debug( + "Non-admin user are not allowed to specify an empty set of backend roles. " + + "Please don't pass in the parameter or pass in at least one backend role." + ) + actionListener.onFailure( + AlertingException.wrap( + OpenSearchStatusException( + "Non-admin user are not allowed to specify an empty set of backend roles.", RestStatus.FORBIDDEN + ) + ) + ) + return + } + } + if (!isADMonitor(transformedRequest.monitor)) { checkIndicesAndExecute(client, actionListener, transformedRequest, user) } else { @@ -405,6 +439,19 @@ class TransportIndexMonitorAction @Inject constructor( private suspend fun indexMonitor() { var metadata = createMetadata() + if (user != null) { + // Use the backend roles which is an intersection of the requested backend roles and the user's backend roles. + // Admins can pass in any backend role. Also if no backend role is passed in, all the user's backend roles are used. + val rbacRoles = if (request.rbacRoles == null) user.backendRoles.toSet() + else if (!isAdmin(user)) request.rbacRoles?.intersect(user.backendRoles)?.toSet() + else request.rbacRoles + + request.monitor = request.monitor.copy( + user = User(user.name, rbacRoles.orEmpty().toList(), user.roles, user.customAttNames) + ) + log.debug("Created monitor's backend roles: $rbacRoles") + } + val indexRequest = IndexRequest(SCHEDULED_JOBS_INDEX) .setRefreshPolicy(request.refreshPolicy) .source(request.monitor.toXContentWithUser(jsonBuilder(), ToXContent.MapParams(mapOf("with_type" to "true")))) @@ -499,6 +546,42 @@ class TransportIndexMonitorAction @Inject constructor( if (request.monitor.enabled && currentMonitor.enabled) request.monitor = request.monitor.copy(enabledTime = currentMonitor.enabledTime) + /** + * On update monitor check which backend roles to associate to the monitor. + * Below are 2 examples of how the logic works + * + * Example 1, say we have a Monitor with backend roles [a, b, c, d] associated with it. + * If I'm User A (non-admin user) and I have backend roles [a, b, c] associated with me and I make a request to update + * the Monitor's backend roles to [a, b]. This would mean that the roles to remove are [c] and the roles to add are [a, b]. + * The Monitor's backend roles would then be [a, b, d]. + * + * Example 2, say we have a Monitor with backend roles [a, b, c, d] associated with it. + * If I'm User A (admin user) and I have backend roles [a, b, c] associated with me and I make a request to update + * the Monitor's backend roles to [a, b]. This would mean that the roles to remove are [c, d] and the roles to add are [a, b]. + * The Monitor's backend roles would then be [a, b]. + */ + if (user != null) { + if (request.rbacRoles != null) { + if (isAdmin(user)) { + request.monitor = request.monitor.copy( + user = User(user.name, request.rbacRoles, user.roles, user.customAttNames) + ) + } else { + // rolesToRemove: these are the backend roles to remove from the monitor + val rolesToRemove = user.backendRoles - request.rbacRoles.orEmpty() + // remove the monitor's roles with rolesToRemove and add any roles passed into the request.rbacRoles + val updatedRbac = currentMonitor.user?.backendRoles.orEmpty() - rolesToRemove + request.rbacRoles.orEmpty() + request.monitor = request.monitor.copy( + user = User(user.name, updatedRbac, user.roles, user.customAttNames) + ) + } + } else { + request.monitor = request.monitor + .copy(user = User(user.name, currentMonitor.user!!.backendRoles, user.roles, user.customAttNames)) + } + log.debug("Update monitor backend roles to: ${request.monitor.user?.backendRoles}") + } + request.monitor = request.monitor.copy(schemaVersion = IndexUtils.scheduledJobIndexSchemaVersion) val indexRequest = IndexRequest(SCHEDULED_JOBS_INDEX) .setRefreshPolicy(request.refreshPolicy) diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/AccessRoles.kt b/alerting/src/test/kotlin/org/opensearch/alerting/AccessRoles.kt index 918a1f1c3..d3b73779c 100644 --- a/alerting/src/test/kotlin/org/opensearch/alerting/AccessRoles.kt +++ b/alerting/src/test/kotlin/org/opensearch/alerting/AccessRoles.kt @@ -6,6 +6,7 @@ package org.opensearch.alerting val ALL_ACCESS_ROLE = "all_access" +val READALL_AND_MONITOR_ROLE = "readall_and_monitor" val ALERTING_FULL_ACCESS_ROLE = "alerting_full_access" val ALERTING_READ_ONLY_ACCESS = "alerting_read_access" val ALERTING_NO_ACCESS_ROLE = "no_access" diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/AlertingRestTestCase.kt b/alerting/src/test/kotlin/org/opensearch/alerting/AlertingRestTestCase.kt index 647132a1a..bacdd2c40 100644 --- a/alerting/src/test/kotlin/org/opensearch/alerting/AlertingRestTestCase.kt +++ b/alerting/src/test/kotlin/org/opensearch/alerting/AlertingRestTestCase.kt @@ -106,10 +106,26 @@ abstract class AlertingRestTestCase : ODFERestTestCase() { return entityAsMap(this) } - protected fun createMonitorWithClient(client: RestClient, monitor: Monitor, refresh: Boolean = true): Monitor { + private fun createMonitorEntityWithBackendRoles(monitor: Monitor, rbacRoles: List?): HttpEntity { + if (rbacRoles == null) { + return monitor.toHttpEntity() + } + val temp = monitor.toJsonString() + val toReplace = temp.lastIndexOf("}") + val rbacString = rbacRoles.joinToString { "\"$it\"" } + val jsonString = temp.substring(0, toReplace) + ", \"rbac_roles\": [$rbacString] }" + return StringEntity(jsonString, APPLICATION_JSON) + } + + protected fun createMonitorWithClient( + client: RestClient, + monitor: Monitor, + rbacRoles: List? = null, + refresh: Boolean = true + ): Monitor { val response = client.makeRequest( "POST", "$ALERTING_BASE_URI?refresh=$refresh", emptyMap(), - monitor.toHttpEntity() + createMonitorEntityWithBackendRoles(monitor, rbacRoles) ) assertEquals("Unable to create a new monitor", RestStatus.CREATED, response.restStatus()) @@ -123,7 +139,7 @@ abstract class AlertingRestTestCase : ODFERestTestCase() { } protected fun createMonitor(monitor: Monitor, refresh: Boolean = true): Monitor { - return createMonitorWithClient(client(), monitor, refresh) + return createMonitorWithClient(client(), monitor, emptyList(), refresh) } protected fun deleteMonitor(monitor: Monitor, refresh: Boolean = true): Response { @@ -499,6 +515,21 @@ abstract class AlertingRestTestCase : ODFERestTestCase() { return getMonitor(monitorId = monitor.id) } + protected fun updateMonitorWithClient( + client: RestClient, + monitor: Monitor, + rbacRoles: List = emptyList(), + refresh: Boolean = true + ): Monitor { + val response = client.makeRequest( + "PUT", "${monitor.relativeUrl()}?refresh=$refresh", + emptyMap(), createMonitorEntityWithBackendRoles(monitor, rbacRoles) + ) + assertEquals("Unable to update a monitor", RestStatus.OK, response.restStatus()) + assertUserNull(response.asMap()["monitor"] as Map) + return getMonitor(monitorId = monitor.id) + } + protected fun getMonitor(monitorId: String, header: BasicHeader = BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json")): Monitor { val response = client().makeRequest("GET", "$ALERTING_BASE_URI/$monitorId", null, header) assertEquals("Unable to get monitor $monitorId", RestStatus.OK, response.restStatus()) @@ -1089,6 +1120,18 @@ abstract class AlertingRestTestCase : ODFERestTestCase() { client().performRequest(request) } + fun patchUserBackendRoles(name: String, backendRoles: Array) { + val request = Request("PATCH", "/_plugins/_security/api/internalusers/$name") + val broles = backendRoles.joinToString { "\"$it\"" } + var entity = " [{\n" + + "\"op\": \"replace\",\n" + + "\"path\": \"/backend_roles\",\n" + + "\"value\": [$broles]\n" + + "}]" + request.setJsonEntity(entity) + client().performRequest(request) + } + fun createIndexRole(name: String, index: String) { val request = Request("PUT", "/_plugins/_security/api/roles/$name") var entity = "{\n" + @@ -1174,6 +1217,22 @@ abstract class AlertingRestTestCase : ODFERestTestCase() { client().performRequest(request) } + fun updateRoleMapping(role: String, users: List, addUser: Boolean) { + val request = Request("PATCH", "/_plugins/_security/api/rolesmapping/$role") + val usersStr = users.joinToString { it -> "\"$it\"" } + + val op = if (addUser) "add" else "remove" + + val entity = "[{\n" + + " \"op\" : \"$op\",\n" + + " \"path\" : \"/users\",\n" + + " \"value\" : [$usersStr]\n" + + "}]" + + request.setJsonEntity(entity) + client().performRequest(request) + } + fun deleteUser(name: String) { client().makeRequest("DELETE", "/_plugins/_security/api/internalusers/$name") } @@ -1202,15 +1261,31 @@ abstract class AlertingRestTestCase : ODFERestTestCase() { user: String, index: String, role: String, - backendRole: String, + backendRoles: List, clusterPermissions: String? ) { - createUser(user, user, arrayOf(backendRole)) + createUser(user, user, backendRoles.toTypedArray()) createTestIndex(index) createCustomIndexRole(role, index, clusterPermissions) createUserRolesMapping(role, arrayOf(user)) } + fun createUserWithRoles( + user: String, + roles: List, + backendRoles: List, + isExistingRole: Boolean + ) { + createUser(user, user, backendRoles.toTypedArray()) + for (role in roles) { + if (isExistingRole) { + updateRoleMapping(role, listOf(user), true) + } else { + createUserRolesMapping(role, arrayOf(user)) + } + } + } + fun createUserWithDocLevelSecurityTestData( user: String, index: String, diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/resthandler/SecureDestinationRestApiIT.kt b/alerting/src/test/kotlin/org/opensearch/alerting/resthandler/SecureDestinationRestApiIT.kt index 598b4f98c..35b9cad43 100644 --- a/alerting/src/test/kotlin/org/opensearch/alerting/resthandler/SecureDestinationRestApiIT.kt +++ b/alerting/src/test/kotlin/org/opensearch/alerting/resthandler/SecureDestinationRestApiIT.kt @@ -139,7 +139,7 @@ class SecureDestinationRestApiIT : AlertingRestTestCase() { user, TEST_HR_INDEX, TEST_HR_ROLE, - TEST_HR_BACKEND_ROLE, + listOf(TEST_HR_BACKEND_ROLE), getClusterPermissionsFromCustomRole(ALERTING_GET_DESTINATION_ACCESS) ) diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/resthandler/SecureEmailAccountRestApiIT.kt b/alerting/src/test/kotlin/org/opensearch/alerting/resthandler/SecureEmailAccountRestApiIT.kt index 74bb75ff7..b27b6285a 100644 --- a/alerting/src/test/kotlin/org/opensearch/alerting/resthandler/SecureEmailAccountRestApiIT.kt +++ b/alerting/src/test/kotlin/org/opensearch/alerting/resthandler/SecureEmailAccountRestApiIT.kt @@ -76,7 +76,7 @@ class SecureEmailAccountRestApiIT : AlertingRestTestCase() { user, TEST_HR_INDEX, TEST_HR_ROLE, - TEST_HR_BACKEND_ROLE, + listOf(TEST_HR_BACKEND_ROLE), getClusterPermissionsFromCustomRole(ALERTING_GET_EMAIL_ACCOUNT_ACCESS) ) @@ -105,7 +105,7 @@ class SecureEmailAccountRestApiIT : AlertingRestTestCase() { user, TEST_HR_INDEX, TEST_HR_ROLE, - TEST_HR_BACKEND_ROLE, + listOf(TEST_HR_BACKEND_ROLE), getClusterPermissionsFromCustomRole(ALERTING_SEARCH_EMAIL_ACCOUNT_ACCESS) ) @@ -132,7 +132,7 @@ class SecureEmailAccountRestApiIT : AlertingRestTestCase() { user, TEST_HR_INDEX, TEST_HR_ROLE, - TEST_HR_BACKEND_ROLE, + listOf(TEST_HR_BACKEND_ROLE), getClusterPermissionsFromCustomRole(ALERTING_NO_ACCESS_ROLE) ) @@ -162,7 +162,7 @@ class SecureEmailAccountRestApiIT : AlertingRestTestCase() { user, TEST_HR_INDEX, TEST_HR_ROLE, - TEST_HR_BACKEND_ROLE, + listOf(TEST_HR_BACKEND_ROLE), getClusterPermissionsFromCustomRole(ALERTING_NO_ACCESS_ROLE) ) diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/resthandler/SecureEmailGroupsRestApiIT.kt b/alerting/src/test/kotlin/org/opensearch/alerting/resthandler/SecureEmailGroupsRestApiIT.kt index 72fb317e1..13e51e62d 100644 --- a/alerting/src/test/kotlin/org/opensearch/alerting/resthandler/SecureEmailGroupsRestApiIT.kt +++ b/alerting/src/test/kotlin/org/opensearch/alerting/resthandler/SecureEmailGroupsRestApiIT.kt @@ -78,7 +78,7 @@ class SecureEmailGroupsRestApiIT : AlertingRestTestCase() { user, TEST_HR_INDEX, TEST_HR_ROLE, - TEST_HR_BACKEND_ROLE, + listOf(TEST_HR_BACKEND_ROLE), getClusterPermissionsFromCustomRole(ALERTING_GET_EMAIL_GROUP_ACCESS) ) @@ -105,7 +105,7 @@ class SecureEmailGroupsRestApiIT : AlertingRestTestCase() { user, TEST_HR_INDEX, TEST_HR_ROLE, - TEST_HR_BACKEND_ROLE, + listOf(TEST_HR_BACKEND_ROLE), getClusterPermissionsFromCustomRole(ALERTING_SEARCH_EMAIL_GROUP_ACCESS) ) diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/resthandler/SecureMonitorRestApiIT.kt b/alerting/src/test/kotlin/org/opensearch/alerting/resthandler/SecureMonitorRestApiIT.kt index 9888578d0..4b6f18f50 100644 --- a/alerting/src/test/kotlin/org/opensearch/alerting/resthandler/SecureMonitorRestApiIT.kt +++ b/alerting/src/test/kotlin/org/opensearch/alerting/resthandler/SecureMonitorRestApiIT.kt @@ -25,6 +25,7 @@ import org.opensearch.alerting.ALL_ACCESS_ROLE import org.opensearch.alerting.ALWAYS_RUN import org.opensearch.alerting.AlertingRestTestCase import org.opensearch.alerting.DRYRUN_MONITOR +import org.opensearch.alerting.READALL_AND_MONITOR_ROLE import org.opensearch.alerting.TEST_HR_BACKEND_ROLE import org.opensearch.alerting.TEST_HR_INDEX import org.opensearch.alerting.TEST_HR_ROLE @@ -116,7 +117,7 @@ class SecureMonitorRestApiIT : AlertingRestTestCase() { user, TEST_HR_INDEX, TEST_HR_ROLE, - TEST_HR_BACKEND_ROLE, + listOf(TEST_HR_BACKEND_ROLE), getClusterPermissionsFromCustomRole(ALERTING_NO_ACCESS_ROLE) ) try { @@ -142,7 +143,7 @@ class SecureMonitorRestApiIT : AlertingRestTestCase() { user, TEST_HR_INDEX, TEST_HR_ROLE, - TEST_HR_BACKEND_ROLE, + listOf(TEST_HR_BACKEND_ROLE), getClusterPermissionsFromCustomRole(ALERTING_READ_ONLY_ACCESS) ) try { @@ -169,7 +170,7 @@ class SecureMonitorRestApiIT : AlertingRestTestCase() { user, TEST_HR_INDEX, TEST_HR_ROLE, - TEST_HR_BACKEND_ROLE, + listOf(TEST_HR_BACKEND_ROLE), getClusterPermissionsFromCustomRole(ALERTING_SEARCH_MONITOR_ONLY_ACCESS) ) val monitor = createRandomMonitor(true) @@ -196,7 +197,7 @@ class SecureMonitorRestApiIT : AlertingRestTestCase() { user, TEST_HR_INDEX, TEST_HR_ROLE, - TEST_HR_BACKEND_ROLE, + listOf(TEST_HR_BACKEND_ROLE), getClusterPermissionsFromCustomRole(ALERTING_NO_ACCESS_ROLE) ) try { @@ -223,7 +224,7 @@ class SecureMonitorRestApiIT : AlertingRestTestCase() { user, TEST_HR_INDEX, TEST_HR_ROLE, - TEST_HR_BACKEND_ROLE, + listOf(TEST_HR_BACKEND_ROLE), getClusterPermissionsFromCustomRole(ALERTING_INDEX_MONITOR_ACCESS) ) try { @@ -257,7 +258,7 @@ class SecureMonitorRestApiIT : AlertingRestTestCase() { user, TEST_HR_INDEX, TEST_HR_ROLE, - TEST_HR_BACKEND_ROLE, + listOf(TEST_HR_BACKEND_ROLE), getClusterPermissionsFromCustomRole(ALERTING_GET_MONITOR_ACCESS) ) @@ -283,7 +284,7 @@ class SecureMonitorRestApiIT : AlertingRestTestCase() { user, TEST_HR_INDEX, TEST_HR_ROLE, - TEST_HR_BACKEND_ROLE, + listOf(TEST_HR_BACKEND_ROLE), getClusterPermissionsFromCustomRole(ALERTING_NO_ACCESS_ROLE) ) @@ -349,6 +350,518 @@ class SecureMonitorRestApiIT : AlertingRestTestCase() { assertFalse("The monitor was not disabled", updatedMonitor.enabled) } + fun `test create monitor with enable filter by with a user have access and without role has no access`() { + enableFilterBy() + if (!isHttps()) { + // if security is disabled and filter by is enabled, we can't create monitor + // refer: `test create monitor with enable filter by` + return + } + val monitor = randomQueryLevelMonitor(enabled = true) + + createUserWithRoles( + user, + listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), + listOf(TEST_HR_BACKEND_ROLE, "role2"), + false + ) + + val createdMonitor = createMonitorWithClient(userClient!!, monitor = monitor, listOf(TEST_HR_BACKEND_ROLE, "role2")) + assertNotNull("The monitor was not created", createdMonitor) + + createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf()) + createUserRolesMapping(READALL_AND_MONITOR_ROLE, arrayOf()) + + // getUser should have access to the monitor + val getUser = "getUser" + createUserWithTestDataAndCustomRole( + getUser, + TEST_HR_INDEX, + TEST_HR_ROLE, + listOf("role2"), + getClusterPermissionsFromCustomRole(ALERTING_GET_MONITOR_ACCESS) + ) + val getUserClient = SecureRestClientBuilder(clusterHosts.toTypedArray(), isHttps(), getUser, getUser) + .setSocketTimeout(60000).build() + + val getMonitorResponse = getUserClient?.makeRequest( + "GET", + "$ALERTING_BASE_URI/${createdMonitor.id}", + null, + BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + ) + assertEquals("Get monitor failed", RestStatus.OK, getMonitorResponse?.restStatus()) + + // Remove backend role and ensure no access is granted after + patchUserBackendRoles(getUser, arrayOf("role1")) + try { + getUserClient?.makeRequest( + "GET", + "$ALERTING_BASE_URI/${createdMonitor.id}", + null, + BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + ) + fail("Expected Forbidden exception") + } catch (e: ResponseException) { + assertEquals("Get monitor failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) + } finally { + deleteRoleAndRoleMapping(TEST_HR_ROLE) + deleteUser(getUser) + getUserClient?.close() + } + } + + fun `test create monitor with enable filter by with no backend roles`() { + enableFilterBy() + if (!isHttps()) { + // if security is disabled and filter by is enabled, we can't create monitor + // refer: `test create monitor with enable filter by` + return + } + val monitor = randomQueryLevelMonitor(enabled = true) + + createUserWithRoles( + user, + listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), + listOf(TEST_HR_BACKEND_ROLE, "role2"), + false + ) + + try { + createMonitorWithClient(userClient!!, monitor = monitor, listOf()) + fail("Expected exception since a non-admin user is trying to create a monitor with no backend roles") + } catch (e: ResponseException) { + assertEquals("Create monitor failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) + } finally { + createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf()) + createUserRolesMapping(READALL_AND_MONITOR_ROLE, arrayOf()) + } + } + + fun `test create monitor as admin with enable filter by with no backend roles`() { + enableFilterBy() + if (!isHttps()) { + // if security is disabled and filter by is enabled, we can't create monitor + // refer: `test create monitor with enable filter by` + return + } + val monitor = randomQueryLevelMonitor(enabled = true) + + createUserWithRoles( + user, + listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), + listOf(TEST_HR_BACKEND_ROLE, "role2"), + false + ) + + val createdMonitor = createMonitorWithClient(client(), monitor = monitor, listOf()) + assertNotNull("The monitor was not created", createdMonitor) + + try { + userClient?.makeRequest( + "GET", + "$ALERTING_BASE_URI/${createdMonitor.id}", + null, + BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + ) + fail("Expected Forbidden exception") + } catch (e: ResponseException) { + assertEquals("Get monitor failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) + } finally { + createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf()) + createUserRolesMapping(READALL_AND_MONITOR_ROLE, arrayOf()) + } + } + + fun `test create monitor with enable filter by with roles user has no access and throw exception`() { + enableFilterBy() + if (!isHttps()) { + // if security is disabled and filter by is enabled, we can't create monitor + // refer: `test create monitor with enable filter by` + return + } + val monitor = randomQueryLevelMonitor(enabled = true) + + createUserWithRoles( + user, + listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), + listOf(TEST_HR_BACKEND_ROLE, "role2"), + false + ) + + try { + createMonitorWithClient(userClient!!, monitor = monitor, listOf(TEST_HR_BACKEND_ROLE, "role1", "role2")) + fail("Expected create monitor to fail as user does not have role1 backend role") + } catch (e: ResponseException) { + assertEquals("Create monitor failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) + } finally { + createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf()) + createUserRolesMapping(READALL_AND_MONITOR_ROLE, arrayOf()) + } + } + + fun `test create monitor as admin with enable filter by with a user have access and without role has no access`() { + enableFilterBy() + if (!isHttps()) { + // if security is disabled and filter by is enabled, we can't create monitor + // refer: `test create monitor with enable filter by` + return + } + val monitor = randomQueryLevelMonitor(enabled = true) + + val createdMonitor = createMonitorWithClient(client(), monitor = monitor, listOf(TEST_HR_BACKEND_ROLE, "role1", "role2")) + assertNotNull("The monitor was not created", createdMonitor) + + // user should have access to the admin monitor + createUserWithTestDataAndCustomRole( + user, + TEST_HR_INDEX, + TEST_HR_ROLE, + listOf(TEST_HR_BACKEND_ROLE), + getClusterPermissionsFromCustomRole(ALERTING_GET_MONITOR_ACCESS) + ) + + val getMonitorResponse = userClient?.makeRequest( + "GET", + "$ALERTING_BASE_URI/${createdMonitor.id}", + null, + BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + ) + assertEquals("Get monitor failed", RestStatus.OK, getMonitorResponse?.restStatus()) + + // Remove good backend role and ensure no access is granted after + patchUserBackendRoles(user, arrayOf("role5")) + try { + userClient?.makeRequest( + "GET", + "$ALERTING_BASE_URI/${createdMonitor.id}", + null, + BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + ) + fail("Expected Forbidden exception") + } catch (e: ResponseException) { + assertEquals("Get monitor failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) + } finally { + deleteRoleAndRoleMapping(TEST_HR_ROLE) + } + } + + fun `test update monitor with enable filter by with removing a permission`() { + enableFilterBy() + if (!isHttps()) { + // if security is disabled and filter by is enabled, we can't create monitor + // refer: `test create monitor with enable filter by` + return + } + val monitor = randomQueryLevelMonitor(enabled = true) + + createUserWithRoles( + user, + listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), + listOf(TEST_HR_BACKEND_ROLE, "role2"), + false + ) + + val createdMonitor = createMonitorWithClient(userClient!!, monitor = monitor, listOf(TEST_HR_BACKEND_ROLE, "role2")) + assertNotNull("The monitor was not created", createdMonitor) + + // getUser should have access to the monitor + val getUser = "getUser" + createUserWithTestDataAndCustomRole( + getUser, + TEST_HR_INDEX, + TEST_HR_ROLE, + listOf("role2"), + getClusterPermissionsFromCustomRole(ALERTING_GET_MONITOR_ACCESS) + ) + val getUserClient = SecureRestClientBuilder(clusterHosts.toTypedArray(), isHttps(), getUser, getUser) + .setSocketTimeout(60000).build() + + val getMonitorResponse = getUserClient?.makeRequest( + "GET", + "$ALERTING_BASE_URI/${createdMonitor.id}", + null, + BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + ) + assertEquals("Get monitor failed", RestStatus.OK, getMonitorResponse?.restStatus()) + + // Remove backend role from monitor + val updatedMonitor = updateMonitorWithClient(userClient!!, createdMonitor, listOf(TEST_HR_BACKEND_ROLE)) + + // getUser should no longer have access + try { + getUserClient?.makeRequest( + "GET", + "$ALERTING_BASE_URI/${updatedMonitor.id}", + null, + BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + ) + fail("Expected Forbidden exception") + } catch (e: ResponseException) { + assertEquals("Get monitor failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) + } finally { + deleteRoleAndRoleMapping(TEST_HR_ROLE) + deleteUser(getUser) + getUserClient?.close() + createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf()) + createUserRolesMapping(READALL_AND_MONITOR_ROLE, arrayOf()) + } + } + + fun `test update monitor with enable filter by with no backend roles`() { + enableFilterBy() + if (!isHttps()) { + // if security is disabled and filter by is enabled, we can't create monitor + // refer: `test create monitor with enable filter by` + return + } + val monitor = randomQueryLevelMonitor(enabled = true) + + createUserWithRoles( + user, + listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), + listOf(TEST_HR_BACKEND_ROLE, "role2"), + false + ) + + val createdMonitor = createMonitorWithClient(userClient!!, monitor = monitor, listOf("role2")) + assertNotNull("The monitor was not created", createdMonitor) + + try { + updateMonitorWithClient(userClient!!, createdMonitor, listOf()) + } catch (e: ResponseException) { + assertEquals("Update monitor failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) + } finally { + createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf()) + createUserRolesMapping(READALL_AND_MONITOR_ROLE, arrayOf()) + } + } + + fun `test update monitor as admin with enable filter by with no backend roles`() { + enableFilterBy() + if (!isHttps()) { + // if security is disabled and filter by is enabled, we can't create monitor + // refer: `test create monitor with enable filter by` + return + } + val monitor = randomQueryLevelMonitor(enabled = true) + + createUserWithRoles( + user, + listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), + listOf(TEST_HR_BACKEND_ROLE, "role2"), + false + ) + + val createdMonitor = createMonitorWithClient(client(), monitor = monitor, listOf(TEST_HR_BACKEND_ROLE)) + assertNotNull("The monitor was not created", createdMonitor) + + val getMonitorResponse = userClient?.makeRequest( + "GET", + "$ALERTING_BASE_URI/${createdMonitor.id}", + null, + BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + ) + assertEquals("Get monitor failed", RestStatus.OK, getMonitorResponse?.restStatus()) + + val updatedMonitor = updateMonitorWithClient(client(), createdMonitor, listOf()) + + try { + userClient?.makeRequest( + "GET", + "$ALERTING_BASE_URI/${updatedMonitor.id}", + null, + BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + ) + fail("Expected Forbidden exception") + } catch (e: ResponseException) { + assertEquals("Get monitor failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) + } finally { + createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf()) + createUserRolesMapping(READALL_AND_MONITOR_ROLE, arrayOf()) + } + } + + fun `test update monitor with enable filter by with updating with a permission user has no access to and throw exception`() { + enableFilterBy() + if (!isHttps()) { + // if security is disabled and filter by is enabled, we can't create monitor + // refer: `test create monitor with enable filter by` + return + } + val monitor = randomQueryLevelMonitor(enabled = true) + + createUserWithRoles( + user, + listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), + listOf(TEST_HR_BACKEND_ROLE, "role2"), + false + ) + + val createdMonitor = createMonitorWithClient(userClient!!, monitor = monitor, listOf(TEST_HR_BACKEND_ROLE, "role2")) + assertNotNull("The monitor was not created", createdMonitor) + + // getUser should have access to the monitor + val getUser = "getUser" + createUserWithTestDataAndCustomRole( + getUser, + TEST_HR_INDEX, + TEST_HR_ROLE, + listOf("role2"), + getClusterPermissionsFromCustomRole(ALERTING_GET_MONITOR_ACCESS) + ) + val getUserClient = SecureRestClientBuilder(clusterHosts.toTypedArray(), isHttps(), getUser, getUser) + .setSocketTimeout(60000).build() + + val getMonitorResponse = getUserClient?.makeRequest( + "GET", + "$ALERTING_BASE_URI/${createdMonitor.id}", + null, + BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + ) + assertEquals("Get monitor failed", RestStatus.OK, getMonitorResponse?.restStatus()) + + try { + updateMonitorWithClient(userClient!!, createdMonitor, listOf(TEST_HR_BACKEND_ROLE, "role1")) + fail("Expected update monitor to fail as user doesn't have access to role1") + } catch (e: ResponseException) { + assertEquals("Update monitor failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) + } finally { + deleteRoleAndRoleMapping(TEST_HR_ROLE) + deleteUser(getUser) + getUserClient?.close() + createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf()) + createUserRolesMapping(READALL_AND_MONITOR_ROLE, arrayOf()) + } + } + + fun `test update monitor as another user with enable filter by with removing a permission and adding permission`() { + enableFilterBy() + if (!isHttps()) { + // if security is disabled and filter by is enabled, we can't create monitor + // refer: `test create monitor with enable filter by` + return + } + val monitor = randomQueryLevelMonitor(enabled = true) + + createUserWithRoles( + user, + listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), + listOf(TEST_HR_BACKEND_ROLE, "role2"), + false + ) + + val createdMonitor = createMonitorWithClient(userClient!!, monitor = monitor, listOf(TEST_HR_BACKEND_ROLE)) + assertNotNull("The monitor was not created", createdMonitor) + + // Remove backend role from monitor with new user and add role5 + val updateUser = "updateUser" + createUserWithRoles( + updateUser, + listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), + listOf(TEST_HR_BACKEND_ROLE, "role5"), + false + ) + + val updateUserClient = SecureRestClientBuilder(clusterHosts.toTypedArray(), isHttps(), updateUser, updateUser) + .setSocketTimeout(60000).build() + val updatedMonitor = updateMonitorWithClient(updateUserClient, createdMonitor, listOf("role5")) + + // old user should no longer have access + try { + userClient?.makeRequest( + "GET", + "$ALERTING_BASE_URI/${updatedMonitor.id}", + null, + BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + ) + fail("Expected Forbidden exception") + } catch (e: ResponseException) { + assertEquals("Get monitor failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) + } finally { + deleteUser(updateUser) + updateUserClient?.close() + createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf()) + createUserRolesMapping(READALL_AND_MONITOR_ROLE, arrayOf()) + } + } + + fun `test update monitor as admin with enable filter by with removing a permission`() { + enableFilterBy() + if (!isHttps()) { + // if security is disabled and filter by is enabled, we can't create monitor + // refer: `test create monitor with enable filter by` + return + } + val monitor = randomQueryLevelMonitor(enabled = true) + + createUserWithRoles( + user, + listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), + listOf(TEST_HR_BACKEND_ROLE, "role2"), + false + ) + + val createdMonitor = createMonitorWithClient(userClient!!, monitor = monitor, listOf(TEST_HR_BACKEND_ROLE, "role2")) + assertNotNull("The monitor was not created", createdMonitor) + + // getUser should have access to the monitor + val getUser = "getUser" + createUserWithTestDataAndCustomRole( + getUser, + TEST_HR_INDEX, + TEST_HR_ROLE, + listOf("role1", "role2"), + getClusterPermissionsFromCustomRole(ALERTING_GET_MONITOR_ACCESS) + ) + val getUserClient = SecureRestClientBuilder(clusterHosts.toTypedArray(), isHttps(), getUser, getUser) + .setSocketTimeout(60000).build() + + val getMonitorResponse = getUserClient?.makeRequest( + "GET", + "$ALERTING_BASE_URI/${createdMonitor.id}", + null, + BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + ) + assertEquals("Get monitor failed", RestStatus.OK, getMonitorResponse?.restStatus()) + + // Remove backend role from monitor + val updatedMonitor = updateMonitorWithClient(client(), createdMonitor, listOf("role4")) + + // original user should no longer have access + try { + userClient?.makeRequest( + "GET", + "$ALERTING_BASE_URI/${updatedMonitor.id}", + null, + BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + ) + fail("Expected Forbidden exception") + } catch (e: ResponseException) { + assertEquals("Get monitor failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) + } finally { + createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf()) + createUserRolesMapping(READALL_AND_MONITOR_ROLE, arrayOf()) + } + + // get user should no longer have access + try { + getUserClient?.makeRequest( + "GET", + "$ALERTING_BASE_URI/${updatedMonitor.id}", + null, + BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + ) + fail("Expected Forbidden exception") + } catch (e: ResponseException) { + assertEquals("Get monitor failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) + } finally { + deleteRoleAndRoleMapping(TEST_HR_ROLE) + deleteUser(getUser) + getUserClient?.close() + } + } + fun `test delete monitor with disable filter by`() { disableFilterBy() val monitor = randomQueryLevelMonitor(enabled = true) @@ -513,7 +1026,7 @@ class SecureMonitorRestApiIT : AlertingRestTestCase() { user, TEST_HR_INDEX, TEST_HR_ROLE, - TEST_HR_BACKEND_ROLE, + listOf(TEST_HR_BACKEND_ROLE), getClusterPermissionsFromCustomRole(ALERTING_EXECUTE_MONITOR_ACCESS) ) @@ -538,7 +1051,7 @@ class SecureMonitorRestApiIT : AlertingRestTestCase() { user, TEST_HR_INDEX, TEST_HR_ROLE, - TEST_HR_BACKEND_ROLE, + listOf(TEST_HR_BACKEND_ROLE), getClusterPermissionsFromCustomRole(ALERTING_NO_ACCESS_ROLE) ) @@ -565,7 +1078,7 @@ class SecureMonitorRestApiIT : AlertingRestTestCase() { user, TEST_HR_INDEX, TEST_HR_ROLE, - TEST_HR_BACKEND_ROLE, + listOf(TEST_HR_BACKEND_ROLE), getClusterPermissionsFromCustomRole(ALERTING_DELETE_MONITOR_ACCESS) ) @@ -592,7 +1105,7 @@ class SecureMonitorRestApiIT : AlertingRestTestCase() { user, TEST_HR_INDEX, TEST_HR_ROLE, - TEST_HR_BACKEND_ROLE, + listOf(TEST_HR_BACKEND_ROLE), getClusterPermissionsFromCustomRole(ALERTING_NO_ACCESS_ROLE) ) @@ -712,7 +1225,7 @@ class SecureMonitorRestApiIT : AlertingRestTestCase() { user, TEST_HR_INDEX, TEST_HR_ROLE, - TEST_HR_BACKEND_ROLE, + listOf(TEST_HR_BACKEND_ROLE), getClusterPermissionsFromCustomRole(ALERTING_GET_ALERTS_ACCESS) ) try { @@ -820,7 +1333,7 @@ class SecureMonitorRestApiIT : AlertingRestTestCase() { user, TEST_HR_INDEX, TEST_HR_ROLE, - TEST_HR_BACKEND_ROLE, + listOf(TEST_HR_BACKEND_ROLE), TERM_DLS_QUERY, getClusterPermissionsFromCustomRole(ALERTING_FULL_ACCESS_ROLE) ) @@ -874,7 +1387,7 @@ class SecureMonitorRestApiIT : AlertingRestTestCase() { user, TEST_HR_INDEX, TEST_HR_ROLE, - TEST_HR_BACKEND_ROLE, + listOf(TEST_HR_BACKEND_ROLE), TERM_DLS_QUERY, getClusterPermissionsFromCustomRole(ALERTING_FULL_ACCESS_ROLE) ) From ab2a612b6a2d71c67e06729b3c994e7d6a801bc7 Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Fri, 4 Nov 2022 12:00:11 -0700 Subject: [PATCH 12/18] searchAlert fix (#613) (#614) (#633) Signed-off-by: Petar Dzepina (cherry picked from commit 996190ea19baeff075e10ca7ea91c0cf01993d43) Co-authored-by: Petar Dzepina (cherry picked from commit a3e6a1c8577977fccbda3eaa150773bdfb0e7974) From 07a029deda1320fbb1c74cd2573990f3feb0634d Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Fri, 4 Nov 2022 19:08:01 -0700 Subject: [PATCH 13/18] Add 2.4 release notes (#647) (#649) Signed-off-by: Ashish Agrawal Signed-off-by: Ashish Agrawal (cherry picked from commit bb84683fad196b41a5bf3ac768398150c2430f66) Co-authored-by: Ashish Agrawal --- ...ensearch-alerting.release-notes-2.4.0.0.md | 38 +++++++++++++++++++ 1 file changed, 38 insertions(+) create mode 100644 release-notes/opensearch-alerting.release-notes-2.4.0.0.md diff --git a/release-notes/opensearch-alerting.release-notes-2.4.0.0.md b/release-notes/opensearch-alerting.release-notes-2.4.0.0.md new file mode 100644 index 000000000..97546edbe --- /dev/null +++ b/release-notes/opensearch-alerting.release-notes-2.4.0.0.md @@ -0,0 +1,38 @@ +## Version 2.4.0.0 2022-11-04 + +Compatible with OpenSearch 2.4.0 + +### Enhancements +* Support multiple data sources ([#558](https://github.com/opensearch-project/alerting/pull/558])) +* Use custom query index in update monitor flow ([#591](https://github.com/opensearch-project/alerting/pull/591])) +* Enhance Get Alerts and Get Findings for list of monitors in bulk ([#590](https://github.com/opensearch-project/alerting/pull/590])) +* Support fetching alerts by list of alert ids in Get Alerts Action ([#608](https://github.com/opensearch-project/alerting/pull/608])) +* Ack alerts - allow moving alerts to history index with custom datasources ([#626](https://github.com/opensearch-project/alerting/pull/626])) +* Enabled parsing of bucket level monitors ([#631](https://github.com/opensearch-project/alerting/pull/631])) +* Custom history indices ([#616](https://github.com/opensearch-project/alerting/pull/616])) +* adds filtering on owner field in search monitor action ([#641](https://github.com/opensearch-project/alerting/pull/641])) +* Support to specify backend roles for monitors ([#635](https://github.com/opensearch-project/alerting/pull/635])) + +### Refactoring +* moving over data models from alerting to common-utils ([#556](https://github.com/opensearch-project/alerting/pull/556])) +* expose delete monitor api from alerting ([#568](https://github.com/opensearch-project/alerting/pull/568])) +* Use findings and alerts models, dtos from common utils ([#569](https://github.com/opensearch-project/alerting/pull/569])) +* Recreate request object from writeable for Get alerts and get findings ([#577](https://github.com/opensearch-project/alerting/pull/577])) +* Use acknowledge alert request,response, actions from common-utils dependencies ([#606](https://github.com/opensearch-project/alerting/pull/606])) +* expose delete monitor api from alerting ([#568](https://github.com/opensearch-project/alerting/pull/568])) +* refactored DeleteMonitor Action to be synchronious ([#628](https://github.com/opensearch-project/alerting/pull/628])) + +### Bug Fixes +* add tags to trigger condition of doc-level monitor ([#598](https://github.com/opensearch-project/alerting/pull/598])) +* searchAlert fix ([#613](https://github.com/opensearch-project/alerting/pull/598])) +* Fix Acknowledge Alert Request class loader issue ([#618](https://github.com/opensearch-project/alerting/pull/618])) +* fix alias exists check in findings index creation ([#622](https://github.com/opensearch-project/alerting/pull/622])) +* add tags to trigger condition of doc-level monitor ([#598](https://github.com/opensearch-project/alerting/pull/598])) +* fix for windows ktlint issue ([#585](https://github.com/opensearch-project/alerting/pull/585])) + +### Infrastructure +* Disable ktlint for alerting as it has errors on Windows ([#570](https://github.com/opensearch-project/alerting/pull/570])) +* Remove plugin to OS min race condition ([#601](https://github.com/opensearch-project/alerting/pull/601])) + +### Documentation +* Add 2.4 release notes ([#646](https://github.com/opensearch-project/alerting/pull/646)) \ No newline at end of file From fa3a13928bc2468a864dbccd3943157ce6ae0cd4 Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Sat, 5 Nov 2022 22:33:36 -0700 Subject: [PATCH 14/18] Adds findings in bucket level monitor (#636) (#652) * bucket level monitor findings Signed-off-by: Surya Sashank Nistala * add test to verify bucket level monitor findings Signed-off-by: Surya Sashank Nistala * added tests. fixed document ids in bucket level monitor findings Signed-off-by: Surya Sashank Nistala Signed-off-by: Surya Sashank Nistala (cherry picked from commit 5b451b988b7cad0b5a1076daa8908c2fd68db154) Co-authored-by: Surya Sashank Nistala --- .../org/opensearch/alerting/AlertService.kt | 9 +- .../alerting/BucketLevelMonitorRunner.kt | 149 +++++++++++++++++- .../opensearch/alerting/AlertServiceTests.kt | 20 ++- .../alerting/AlertingRestTestCase.kt | 6 +- .../alerting/MonitorRunnerServiceIT.kt | 138 ++++++++++++++++ .../org/opensearch/alerting/TestHelpers.kt | 26 +++ 6 files changed, 336 insertions(+), 12 deletions(-) diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/AlertService.kt b/alerting/src/main/kotlin/org/opensearch/alerting/AlertService.kt index 7326c7c01..2c913c135 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/AlertService.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/AlertService.kt @@ -15,6 +15,7 @@ import org.opensearch.action.delete.DeleteRequest import org.opensearch.action.index.IndexRequest import org.opensearch.action.search.SearchRequest import org.opensearch.action.search.SearchResponse +import org.opensearch.action.support.WriteRequest import org.opensearch.alerting.alerts.AlertIndices import org.opensearch.alerting.model.ActionRunResult import org.opensearch.alerting.model.QueryLevelTriggerRunResult @@ -236,7 +237,8 @@ class AlertService( monitor: Monitor, trigger: BucketLevelTrigger, currentAlerts: MutableMap, - aggResultBuckets: List + aggResultBuckets: List, + findings: List ): Map> { val dedupedAlerts = mutableListOf() val newAlerts = mutableListOf() @@ -256,7 +258,8 @@ class AlertService( monitor = monitor, trigger = trigger, startTime = currentTime, lastNotificationTime = null, state = Alert.State.ACTIVE, errorMessage = null, errorHistory = mutableListOf(), actionExecutionResults = mutableListOf(), - schemaVersion = IndexUtils.alertIndexSchemaVersion, aggregationResultBucket = aggAlertBucket + schemaVersion = IndexUtils.alertIndexSchemaVersion, aggregationResultBucket = aggAlertBucket, + findingIds = findings ) newAlerts.add(newAlert) } @@ -381,7 +384,7 @@ class AlertService( // If the index request is to be retried, the Alert is saved separately as well so that its relative ordering is maintained in // relation to index request in the retried bulk request for when it eventually succeeds. retryPolicy.retry(logger, listOf(RestStatus.TOO_MANY_REQUESTS)) { - val bulkRequest = BulkRequest().add(requestsToRetry) + val bulkRequest = BulkRequest().add(requestsToRetry).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) val bulkResponse: BulkResponse = client.suspendUntil { client.bulk(bulkRequest, it) } // TODO: This is only used to retrieve the retryCause, could instead fetch it from the bulkResponse iteration below val failedResponses = (bulkResponse.items ?: arrayOf()).filter { it.isFailed } diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/BucketLevelMonitorRunner.kt b/alerting/src/main/kotlin/org/opensearch/alerting/BucketLevelMonitorRunner.kt index c96f4ed57..d3f534528 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/BucketLevelMonitorRunner.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/BucketLevelMonitorRunner.kt @@ -6,24 +6,49 @@ package org.opensearch.alerting import org.apache.logging.log4j.LogManager +import org.opensearch.action.bulk.BulkRequest +import org.opensearch.action.bulk.BulkResponse +import org.opensearch.action.index.IndexRequest +import org.opensearch.action.search.SearchRequest +import org.opensearch.action.search.SearchResponse +import org.opensearch.action.support.WriteRequest import org.opensearch.alerting.model.ActionRunResult import org.opensearch.alerting.model.BucketLevelTriggerRunResult import org.opensearch.alerting.model.InputRunResults import org.opensearch.alerting.model.MonitorRunResult import org.opensearch.alerting.opensearchapi.InjectorContextElement +import org.opensearch.alerting.opensearchapi.retry +import org.opensearch.alerting.opensearchapi.suspendUntil import org.opensearch.alerting.opensearchapi.withClosableContext import org.opensearch.alerting.script.BucketLevelTriggerExecutionContext import org.opensearch.alerting.util.defaultToPerExecutionAction import org.opensearch.alerting.util.getActionExecutionPolicy import org.opensearch.alerting.util.getBucketKeysHash import org.opensearch.alerting.util.getCombinedTriggerRunResult +import org.opensearch.common.xcontent.LoggingDeprecationHandler +import org.opensearch.common.xcontent.ToXContent +import org.opensearch.common.xcontent.XContentBuilder +import org.opensearch.common.xcontent.XContentType import org.opensearch.commons.alerting.model.Alert import org.opensearch.commons.alerting.model.BucketLevelTrigger +import org.opensearch.commons.alerting.model.Finding import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.commons.alerting.model.SearchInput import org.opensearch.commons.alerting.model.action.AlertCategory import org.opensearch.commons.alerting.model.action.PerAlertActionScope import org.opensearch.commons.alerting.model.action.PerExecutionActionScope +import org.opensearch.commons.alerting.util.string +import org.opensearch.index.query.BoolQueryBuilder +import org.opensearch.index.query.QueryBuilders +import org.opensearch.rest.RestStatus +import org.opensearch.script.Script +import org.opensearch.script.ScriptType +import org.opensearch.script.TemplateScript +import org.opensearch.search.aggregations.AggregatorFactories +import org.opensearch.search.aggregations.bucket.composite.CompositeAggregationBuilder +import org.opensearch.search.builder.SearchSourceBuilder import java.time.Instant +import java.util.UUID object BucketLevelMonitorRunner : MonitorRunner() { private val logger = LogManager.getLogger(javaClass) @@ -116,11 +141,20 @@ object BucketLevelMonitorRunner : MonitorRunner() { * existing Alerts in a way the user can easily view them since they will have all been moved to the history index. */ if (triggerResults[trigger.id]?.error != null) continue - + val findings = + if (monitor.triggers.size == 1 && monitor.dataSources.findingsEnabled == true) createFindings( + triggerResult, + monitor, + monitorCtx, + periodStart, + periodEnd, + !dryrun && monitor.id != Monitor.NO_ID + ) + else emptyList() // TODO: Should triggerResult's aggregationResultBucket be a list? If not, getCategorizedAlertsForBucketLevelMonitor can // be refactored to use a map instead val categorizedAlerts = monitorCtx.alertService!!.getCategorizedAlertsForBucketLevelMonitor( - monitor, trigger, currentAlertsForTrigger, triggerResult.aggregationResultBuckets.values.toList() + monitor, trigger, currentAlertsForTrigger, triggerResult.aggregationResultBuckets.values.toList(), findings ).toMutableMap() val dedupedAlerts = categorizedAlerts.getOrDefault(AlertCategory.DEDUPED, emptyList()) var newAlerts = categorizedAlerts.getOrDefault(AlertCategory.NEW, emptyList()) @@ -287,6 +321,117 @@ object BucketLevelMonitorRunner : MonitorRunner() { return monitorResult.copy(inputResults = firstPageOfInputResults, triggerResults = triggerResults) } + private suspend fun createFindings( + triggerResult: BucketLevelTriggerRunResult, + monitor: Monitor, + monitorCtx: MonitorRunnerExecutionContext, + periodStart: Instant, + periodEnd: Instant, + shouldCreateFinding: Boolean + ): List { + monitor.inputs.forEach { input -> + if (input is SearchInput) { + val bucketValues: Set = triggerResult.aggregationResultBuckets.keys + val query = input.query + var fieldName = "" + var grouByFields = 0 // if number of fields used to group by > 1 we won't calculate findings + for (aggFactory in (query.aggregations() as AggregatorFactories.Builder).aggregatorFactories) { + val sources = (aggFactory as CompositeAggregationBuilder).sources() + for (source in sources) { + if (grouByFields > 0) { + return listOf() + } + grouByFields++ + fieldName = source.field() + } + } + if (fieldName != "") { + val searchParams = mapOf( + "period_start" to periodStart.toEpochMilli(), + "period_end" to periodEnd.toEpochMilli() + ) + val searchSource = monitorCtx.scriptService!!.compile( + Script( + ScriptType.INLINE, Script.DEFAULT_TEMPLATE_LANG, + query.toString(), searchParams + ), + TemplateScript.CONTEXT + ) + .newInstance(searchParams) + .execute() + val sr = SearchRequest(*input.indices.toTypedArray()) + XContentType.JSON.xContent().createParser(monitorCtx.xContentRegistry, LoggingDeprecationHandler.INSTANCE, searchSource) + .use { + val source = SearchSourceBuilder.fromXContent(it) + val queryBuilder = if (input.query.query() == null) BoolQueryBuilder() + else QueryBuilders.boolQuery().must(source.query()) + queryBuilder.filter(QueryBuilders.termsQuery(fieldName, bucketValues)) + sr.source().query(queryBuilder) + } + val searchResponse: SearchResponse = monitorCtx.client!!.suspendUntil { monitorCtx.client!!.search(sr, it) } + return createFindingPerIndex(searchResponse, monitor, monitorCtx, shouldCreateFinding) + } + } + } + return listOf() + } + + private suspend fun createFindingPerIndex( + searchResponse: SearchResponse, + monitor: Monitor, + monitorCtx: MonitorRunnerExecutionContext, + shouldCreateFinding: Boolean + ): List { + val docIdsByIndexName: MutableMap> = mutableMapOf() + for (hit in searchResponse.hits.hits) { + val ids = docIdsByIndexName.getOrDefault(hit.index, mutableListOf()) + ids.add(hit.id) + docIdsByIndexName[hit.index] = ids + } + val findings = mutableListOf() + var requestsToRetry: MutableList = mutableListOf() + docIdsByIndexName.entries.forEach { it -> + run { + val finding = Finding( + id = UUID.randomUUID().toString(), + relatedDocIds = it.value, + monitorId = monitor.id, + monitorName = monitor.name, + index = it.key, + timestamp = Instant.now(), + docLevelQueries = listOf() + ) + + val findingStr = finding.toXContent(XContentBuilder.builder(XContentType.JSON.xContent()), ToXContent.EMPTY_PARAMS).string() + logger.debug("Findings: $findingStr") + if (shouldCreateFinding) { + val indexRequest = IndexRequest(monitor.dataSources.findingsIndex) + .source(findingStr, XContentType.JSON) + .id(finding.id) + .routing(finding.id) + requestsToRetry.add(indexRequest) + } + findings.add(finding.id) + } + } + if (requestsToRetry.isEmpty()) return listOf() + monitorCtx.retryPolicy!!.retry(logger, listOf(RestStatus.TOO_MANY_REQUESTS)) { + val bulkRequest = BulkRequest().add(requestsToRetry).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + val bulkResponse: BulkResponse = monitorCtx.client!!.suspendUntil { monitorCtx.client!!.bulk(bulkRequest, it) } + requestsToRetry = mutableListOf() + val findingsBeingRetried = mutableListOf() + bulkResponse.items.forEach { item -> + if (item.isFailed) { + if (item.status() == RestStatus.TOO_MANY_REQUESTS) { + requestsToRetry.add(bulkRequest.requests()[item.itemId] as IndexRequest) + findingsBeingRetried.add(findingsBeingRetried[item.itemId]) + } + } + } + } + return findings + } + private fun getActionContextForAlertCategory( alertCategory: AlertCategory, alert: Alert, diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/AlertServiceTests.kt b/alerting/src/test/kotlin/org/opensearch/alerting/AlertServiceTests.kt index 6fc2055dd..982a3281e 100644 --- a/alerting/src/test/kotlin/org/opensearch/alerting/AlertServiceTests.kt +++ b/alerting/src/test/kotlin/org/opensearch/alerting/AlertServiceTests.kt @@ -83,7 +83,9 @@ class AlertServiceTests : OpenSearchTestCase() { ) ) - val categorizedAlerts = alertService.getCategorizedAlertsForBucketLevelMonitor(monitor, trigger, currentAlerts, aggResultBuckets) + val categorizedAlerts = alertService.getCategorizedAlertsForBucketLevelMonitor( + monitor, trigger, currentAlerts, aggResultBuckets, emptyList() + ) // Completed Alerts are what remains in currentAlerts after categorization val completedAlerts = currentAlerts.values.toList() assertEquals(listOf(), categorizedAlerts[AlertCategory.DEDUPED]) @@ -115,7 +117,9 @@ class AlertServiceTests : OpenSearchTestCase() { ) ) - val categorizedAlerts = alertService.getCategorizedAlertsForBucketLevelMonitor(monitor, trigger, currentAlerts, aggResultBuckets) + val categorizedAlerts = alertService.getCategorizedAlertsForBucketLevelMonitor( + monitor, trigger, currentAlerts, aggResultBuckets, emptyList() + ) // Completed Alerts are what remains in currentAlerts after categorization val completedAlerts = currentAlerts.values.toList() assertAlertsExistForBucketKeys( @@ -142,7 +146,9 @@ class AlertServiceTests : OpenSearchTestCase() { ) val aggResultBuckets = listOf() - val categorizedAlerts = alertService.getCategorizedAlertsForBucketLevelMonitor(monitor, trigger, currentAlerts, aggResultBuckets) + val categorizedAlerts = alertService.getCategorizedAlertsForBucketLevelMonitor( + monitor, trigger, currentAlerts, aggResultBuckets, emptyList() + ) // Completed Alerts are what remains in currentAlerts after categorization val completedAlerts = currentAlerts.values.toList() assertEquals(listOf(), categorizedAlerts[AlertCategory.DEDUPED]) @@ -174,7 +180,9 @@ class AlertServiceTests : OpenSearchTestCase() { ) ) - val categorizedAlerts = alertService.getCategorizedAlertsForBucketLevelMonitor(monitor, trigger, currentAlerts, aggResultBuckets) + val categorizedAlerts = alertService.getCategorizedAlertsForBucketLevelMonitor( + monitor, trigger, currentAlerts, aggResultBuckets, emptyList() + ) // Completed Alerts are what remains in currentAlerts after categorization val completedAlerts = currentAlerts.values.toList() assertAlertsExistForBucketKeys(listOf(listOf("b")), categorizedAlerts[AlertCategory.DEDUPED] ?: error("Deduped alerts not found")) @@ -198,7 +206,9 @@ class AlertServiceTests : OpenSearchTestCase() { ) ) - val categorizedAlerts = alertService.getCategorizedAlertsForBucketLevelMonitor(monitor, trigger, currentAlerts, aggResultBuckets) + val categorizedAlerts = alertService.getCategorizedAlertsForBucketLevelMonitor( + monitor, trigger, currentAlerts, aggResultBuckets, emptyList() + ) // Completed Alerts are what remains in currentAlerts after categorization val completedAlerts = currentAlerts.values.toList() assertAlertsExistForBucketKeys(listOf(listOf("a")), categorizedAlerts[AlertCategory.DEDUPED] ?: error("Deduped alerts not found")) diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/AlertingRestTestCase.kt b/alerting/src/test/kotlin/org/opensearch/alerting/AlertingRestTestCase.kt index bacdd2c40..952c7f1db 100644 --- a/alerting/src/test/kotlin/org/opensearch/alerting/AlertingRestTestCase.kt +++ b/alerting/src/test/kotlin/org/opensearch/alerting/AlertingRestTestCase.kt @@ -779,7 +779,8 @@ abstract class AlertingRestTestCase : ODFERestTestCase() { """ "properties" : { "test_strict_date_time" : { "type" : "date", "format" : "strict_date_time" }, - "test_field" : { "type" : "keyword" } + "test_field" : { "type" : "keyword" }, + "number" : { "type" : "keyword" } } """.trimIndent() ) @@ -866,7 +867,8 @@ abstract class AlertingRestTestCase : ODFERestTestCase() { val testDoc = """ { "test_strict_date_time": "$testTime", - "test_field": "$value" + "test_field": "$value", + "number": "$i" } """.trimIndent() // Indexing documents with deterministic doc id to allow for easy selected deletion during testing diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/MonitorRunnerServiceIT.kt b/alerting/src/test/kotlin/org/opensearch/alerting/MonitorRunnerServiceIT.kt index 007fcf4b0..8b99ec5dd 100644 --- a/alerting/src/test/kotlin/org/opensearch/alerting/MonitorRunnerServiceIT.kt +++ b/alerting/src/test/kotlin/org/opensearch/alerting/MonitorRunnerServiceIT.kt @@ -25,6 +25,7 @@ import org.opensearch.commons.alerting.model.Alert.State.ACKNOWLEDGED import org.opensearch.commons.alerting.model.Alert.State.ACTIVE import org.opensearch.commons.alerting.model.Alert.State.COMPLETED import org.opensearch.commons.alerting.model.Alert.State.ERROR +import org.opensearch.commons.alerting.model.DataSources import org.opensearch.commons.alerting.model.IntervalSchedule import org.opensearch.commons.alerting.model.Monitor import org.opensearch.commons.alerting.model.SearchInput @@ -1321,6 +1322,143 @@ class MonitorRunnerServiceIT : AlertingRestTestCase() { ) } + fun `test bucket-level monitor with findings enabled`() { + val testIndex = createTestIndex() + insertSampleTimeSerializedData( + testIndex, + listOf( + "test_value_1", + "test_value_2" + ) + ) + + val query = QueryBuilders.rangeQuery("test_strict_date_time") + .gt("{{period_end}}||-10d") + .lte("{{period_end}}") + .format("epoch_millis") + val compositeSources = listOf( + TermsValuesSourceBuilder("test_field").field("test_field") + ) + val compositeAgg = CompositeAggregationBuilder("composite_agg", compositeSources) + val input = SearchInput(indices = listOf(testIndex), query = SearchSourceBuilder().size(0).query(query).aggregation(compositeAgg)) + val triggerScript = """ + params.docCount > 0 + """.trimIndent() + + // For the Actions ensure that there is at least one and any PER_ALERT actions contain ACTIVE, DEDUPED and COMPLETED in its policy + // so that the assertions done later in this test don't fail. + // The config is being mutated this way to still maintain the randomness in configuration (like including other ActionExecutionScope). + val actions = randomActionsForBucketLevelTrigger(min = 1).map { + if (it.actionExecutionPolicy?.actionExecutionScope is PerAlertActionScope) { + it.copy( + actionExecutionPolicy = ActionExecutionPolicy( + PerAlertActionScope(setOf(AlertCategory.NEW, AlertCategory.DEDUPED, AlertCategory.COMPLETED)) + ) + ) + } else { + it + } + } + var trigger = randomBucketLevelTrigger(actions = actions) + trigger = trigger.copy( + bucketSelector = BucketSelectorExtAggregationBuilder( + name = trigger.id, + bucketsPathsMap = mapOf("docCount" to "_count"), + script = Script(triggerScript), + parentBucketPath = "composite_agg", + filter = null + ) + ) + val monitor = createMonitor( + randomBucketLevelMonitor( + inputs = listOf(input), + enabled = false, + triggers = listOf(trigger), + dataSources = DataSources(findingsEnabled = true) + ) + ) + executeMonitor(monitor.id) + + // Check created Alerts + var currentAlerts = searchAlerts(monitor) + assertEquals("Alerts not saved", 2, currentAlerts.size) + currentAlerts.forEach { alert -> + Assert.assertEquals("expected findings for alert", alert.findingIds.size, 1) + } + val findings = searchFindings(monitor) + assertEquals("Findings saved for test monitor", 1, findings.size) + assertTrue("Findings saved for test monitor", findings[0].relatedDocIds.contains("1")) + assertTrue("Findings saved for test monitor", findings[0].relatedDocIds.contains("2")) + } + + fun `test bucket-level monitor with findings enabled for multiple group by fields`() { + val testIndex = createTestIndex() + insertSampleTimeSerializedData( + testIndex, + listOf( + "test_value_1", + "test_value_2" + ) + ) + + val query = QueryBuilders.rangeQuery("test_strict_date_time") + .gt("{{period_end}}||-10d") + .lte("{{period_end}}") + .format("epoch_millis") + val compositeSources = listOf( + TermsValuesSourceBuilder("test_field").field("test_field"), + TermsValuesSourceBuilder("number").field("number") + ) + val compositeAgg = CompositeAggregationBuilder("composite_agg", compositeSources) + val input = SearchInput(indices = listOf(testIndex), query = SearchSourceBuilder().size(0).query(query).aggregation(compositeAgg)) + val triggerScript = """ + params.docCount > 0 + """.trimIndent() + + // For the Actions ensure that there is at least one and any PER_ALERT actions contain ACTIVE, DEDUPED and COMPLETED in its policy + // so that the assertions done later in this test don't fail. + // The config is being mutated this way to still maintain the randomness in configuration (like including other ActionExecutionScope). + val actions = randomActionsForBucketLevelTrigger(min = 1).map { + if (it.actionExecutionPolicy?.actionExecutionScope is PerAlertActionScope) { + it.copy( + actionExecutionPolicy = ActionExecutionPolicy( + PerAlertActionScope(setOf(AlertCategory.NEW, AlertCategory.DEDUPED, AlertCategory.COMPLETED)) + ) + ) + } else { + it + } + } + var trigger = randomBucketLevelTrigger(actions = actions) + trigger = trigger.copy( + bucketSelector = BucketSelectorExtAggregationBuilder( + name = trigger.id, + bucketsPathsMap = mapOf("docCount" to "_count"), + script = Script(triggerScript), + parentBucketPath = "composite_agg", + filter = null + ) + ) + val monitor = createMonitor( + randomBucketLevelMonitor( + inputs = listOf(input), + enabled = false, + triggers = listOf(trigger), + dataSources = DataSources(findingsEnabled = true) + ) + ) + executeMonitor(monitor.id) + + // Check created Alerts + var currentAlerts = searchAlerts(monitor) + assertEquals("Alerts not saved", 2, currentAlerts.size) + currentAlerts.forEach { alert -> + Assert.assertEquals("expected findings for alert", alert.findingIds.size, 0) + } + val findings = searchFindings(monitor) + assertEquals("Findings saved for test monitor", 0, findings.size) + } + @Suppress("UNCHECKED_CAST") fun `test bucket-level monitor with one good action and one bad action`() { val testIndex = createTestIndex() diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/TestHelpers.kt b/alerting/src/test/kotlin/org/opensearch/alerting/TestHelpers.kt index 2b99b514a..02a2b9d5d 100644 --- a/alerting/src/test/kotlin/org/opensearch/alerting/TestHelpers.kt +++ b/alerting/src/test/kotlin/org/opensearch/alerting/TestHelpers.kt @@ -135,6 +135,32 @@ fun randomBucketLevelMonitor( ) } +fun randomBucketLevelMonitor( + name: String = OpenSearchRestTestCase.randomAlphaOfLength(10), + user: User = randomUser(), + inputs: List = listOf( + SearchInput( + emptyList(), + SearchSourceBuilder().query(QueryBuilders.matchAllQuery()) + .aggregation(TermsAggregationBuilder("test_agg").field("test_field")) + ) + ), + schedule: Schedule = IntervalSchedule(interval = 5, unit = ChronoUnit.MINUTES), + enabled: Boolean = randomBoolean(), + triggers: List = (1..randomInt(10)).map { randomBucketLevelTrigger() }, + enabledTime: Instant? = if (enabled) Instant.now().truncatedTo(ChronoUnit.MILLIS) else null, + lastUpdateTime: Instant = Instant.now().truncatedTo(ChronoUnit.MILLIS), + withMetadata: Boolean = false, + dataSources: DataSources +): Monitor { + return Monitor( + name = name, monitorType = Monitor.MonitorType.BUCKET_LEVEL_MONITOR, enabled = enabled, inputs = inputs, + schedule = schedule, triggers = triggers, enabledTime = enabledTime, lastUpdateTime = lastUpdateTime, user = user, + uiMetadata = if (withMetadata) mapOf("foo" to "bar") else mapOf(), + dataSources = dataSources + ) +} + fun randomClusterMetricsMonitor( name: String = OpenSearchRestTestCase.randomAlphaOfLength(10), user: User = randomUser(), From b1dee168a27bbcc0b847989eba260ae2953c7968 Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Mon, 7 Nov 2022 11:17:19 -0800 Subject: [PATCH 15/18] update release notes (#654) (#655) Signed-off-by: Surya Sashank Nistala Signed-off-by: Surya Sashank Nistala (cherry picked from commit f842a046f4970e81b215fd650da38cec784a21ab) Co-authored-by: Surya Sashank Nistala --- release-notes/opensearch-alerting.release-notes-2.4.0.0.md | 1 + 1 file changed, 1 insertion(+) diff --git a/release-notes/opensearch-alerting.release-notes-2.4.0.0.md b/release-notes/opensearch-alerting.release-notes-2.4.0.0.md index 97546edbe..3b364c7cd 100644 --- a/release-notes/opensearch-alerting.release-notes-2.4.0.0.md +++ b/release-notes/opensearch-alerting.release-notes-2.4.0.0.md @@ -12,6 +12,7 @@ Compatible with OpenSearch 2.4.0 * Custom history indices ([#616](https://github.com/opensearch-project/alerting/pull/616])) * adds filtering on owner field in search monitor action ([#641](https://github.com/opensearch-project/alerting/pull/641])) * Support to specify backend roles for monitors ([#635](https://github.com/opensearch-project/alerting/pull/635])) +* Adds findings in bucket level monitor ([#636](https://github.com/opensearch-project/alerting/pull/636])) ### Refactoring * moving over data models from alerting to common-utils ([#556](https://github.com/opensearch-project/alerting/pull/556])) From 32f03b8b2eb7a3b66a91903416cd186152e2636e Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Mon, 7 Nov 2022 12:57:34 -0800 Subject: [PATCH 16/18] =?UTF-8?q?Added=20exception=20check=20once=20the=20?= =?UTF-8?q?.opendistro-alerting-config=20index=20is=20b=E2=80=A6=20(#650)?= =?UTF-8?q?=20(#658)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Added exception check once the .opendistro-alerting-config index is being created During .opendistro-alerting-config index creation, if ResourceAlreadyExists exception is being raised, the flow will check first if the index is in yellow state and then it will re-try to index monitor Signed-off-by: Stevan Buzejic * Formating of the file fixed Signed-off-by: Stevan Buzejic Signed-off-by: Stevan Buzejic (cherry picked from commit ceff6093dc32aa45ecb75acef6388f4b477e1177) Co-authored-by: Stevan Buzejic <30922513+stevanbz@users.noreply.github.com> --- .../transport/TransportIndexMonitorAction.kt | 34 ++++++++++++++++--- 1 file changed, 30 insertions(+), 4 deletions(-) diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportIndexMonitorAction.kt b/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportIndexMonitorAction.kt index 0546d0733..6c943cef8 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportIndexMonitorAction.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportIndexMonitorAction.kt @@ -9,10 +9,15 @@ import kotlinx.coroutines.CoroutineScope import kotlinx.coroutines.Dispatchers import kotlinx.coroutines.launch import org.apache.logging.log4j.LogManager +import org.opensearch.OpenSearchException import org.opensearch.OpenSearchSecurityException import org.opensearch.OpenSearchStatusException +import org.opensearch.ResourceAlreadyExistsException import org.opensearch.action.ActionListener import org.opensearch.action.ActionRequest +import org.opensearch.action.admin.cluster.health.ClusterHealthAction +import org.opensearch.action.admin.cluster.health.ClusterHealthRequest +import org.opensearch.action.admin.cluster.health.ClusterHealthResponse import org.opensearch.action.admin.indices.create.CreateIndexResponse import org.opensearch.action.admin.indices.get.GetIndexRequest import org.opensearch.action.admin.indices.get.GetIndexResponse @@ -297,10 +302,30 @@ class TransportIndexMonitorAction @Inject constructor( if (!scheduledJobIndices.scheduledJobIndexExists()) { scheduledJobIndices.initScheduledJobIndex(object : ActionListener { override fun onResponse(response: CreateIndexResponse) { - onCreateMappingsResponse(response) + onCreateMappingsResponse(response.isAcknowledged) } override fun onFailure(t: Exception) { - actionListener.onFailure(AlertingException.wrap(t)) + // https://github.com/opensearch-project/alerting/issues/646 + if (t is ResourceAlreadyExistsException && t.message?.contains("already exists") == true) { + scope.launch { + // Wait for the yellow status + val request = ClusterHealthRequest() + .indices(SCHEDULED_JOBS_INDEX) + .waitForYellowStatus() + val response: ClusterHealthResponse = client.suspendUntil { + execute(ClusterHealthAction.INSTANCE, request, it) + } + if (response.isTimedOut) { + actionListener.onFailure( + OpenSearchException("Cannot determine that the $SCHEDULED_JOBS_INDEX index is healthy") + ) + } + // Retry mapping of monitor + onCreateMappingsResponse(true) + } + } else { + actionListener.onFailure(AlertingException.wrap(t)) + } } }) } else if (!IndexUtils.scheduledJobIndexUpdated) { @@ -346,6 +371,7 @@ class TransportIndexMonitorAction @Inject constructor( val query = QueryBuilders.boolQuery().filter(QueryBuilders.termQuery("${Monitor.MONITOR_TYPE}.type", Monitor.MONITOR_TYPE)) val searchSource = SearchSourceBuilder().query(query).timeout(requestTimeout) val searchRequest = SearchRequest(SCHEDULED_JOBS_INDEX).source(searchSource) + client.search( searchRequest, object : ActionListener { @@ -401,8 +427,8 @@ class TransportIndexMonitorAction @Inject constructor( } } - private fun onCreateMappingsResponse(response: CreateIndexResponse) { - if (response.isAcknowledged) { + private fun onCreateMappingsResponse(isAcknowledged: Boolean) { + if (isAcknowledged) { log.info("Created $SCHEDULED_JOBS_INDEX with mappings.") prepareMonitorIndexing() IndexUtils.scheduledJobIndexUpdated() From 46cd1cf5c383a8b8f7e6d34b5b5f20d661147fcd Mon Sep 17 00:00:00 2001 From: Subhobrata Dey Date: Tue, 8 Nov 2022 11:52:07 -0800 Subject: [PATCH 17/18] create findingIndex bugfix (#663) Signed-off-by: Petar Dzepina --- .../alerting/alerts/AlertIndices.kt | 2 +- .../alerting/MonitorDataSourcesIT.kt | 87 +++++++++++++++---- .../transport/AlertingSingleNodeTestCase.kt | 11 +++ 3 files changed, 83 insertions(+), 17 deletions(-) diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/alerts/AlertIndices.kt b/alerting/src/main/kotlin/org/opensearch/alerting/alerts/AlertIndices.kt index c1f14ea83..813b90f3b 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/alerts/AlertIndices.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/alerts/AlertIndices.kt @@ -326,7 +326,7 @@ class AlertIndices( } val findingsIndex = dataSources.findingsIndex val findingsIndexPattern = dataSources.findingsIndexPattern ?: FINDING_HISTORY_INDEX_PATTERN - if (!clusterService.state().metadata().hasAlias(findingsIndexPattern)) { + if (!clusterService.state().metadata().hasAlias(findingsIndex)) { createIndex( findingsIndexPattern, findingMapping(), diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/MonitorDataSourcesIT.kt b/alerting/src/test/kotlin/org/opensearch/alerting/MonitorDataSourcesIT.kt index 43c9dff3e..f4575cbbe 100644 --- a/alerting/src/test/kotlin/org/opensearch/alerting/MonitorDataSourcesIT.kt +++ b/alerting/src/test/kotlin/org/opensearch/alerting/MonitorDataSourcesIT.kt @@ -8,6 +8,7 @@ package org.opensearch.alerting import org.junit.Assert import org.opensearch.action.admin.cluster.state.ClusterStateRequest import org.opensearch.action.admin.indices.create.CreateIndexRequest +import org.opensearch.action.admin.indices.refresh.RefreshRequest import org.opensearch.action.search.SearchRequest import org.opensearch.action.support.WriteRequest import org.opensearch.alerting.action.SearchMonitorAction @@ -17,6 +18,7 @@ import org.opensearch.alerting.transport.AlertingSingleNodeTestCase import org.opensearch.common.settings.Settings import org.opensearch.commons.alerting.action.AcknowledgeAlertRequest import org.opensearch.commons.alerting.action.AlertingActions +import org.opensearch.commons.alerting.action.DeleteMonitorRequest import org.opensearch.commons.alerting.action.GetAlertsRequest import org.opensearch.commons.alerting.model.Alert import org.opensearch.commons.alerting.model.DataSources @@ -25,6 +27,8 @@ import org.opensearch.commons.alerting.model.DocLevelQuery import org.opensearch.commons.alerting.model.ScheduledJob.Companion.SCHEDULED_JOBS_INDEX import org.opensearch.commons.alerting.model.Table import org.opensearch.index.query.MatchQueryBuilder +import org.opensearch.index.query.QueryBuilders +import org.opensearch.search.builder.SearchSourceBuilder import org.opensearch.test.OpenSearchTestCase import java.time.ZonedDateTime import java.time.format.DateTimeFormatter @@ -195,17 +199,67 @@ class MonitorDataSourcesIT : AlertingSingleNodeTestCase() { Assert.assertTrue(mapping?.source()?.string()?.contains("\"analyzer\":\"$analyzer\"") == true) } - fun `test execute monitor with custom findings index`() { + fun `test delete monitor deletes all queries and metadata too`() { + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3") + val docLevelInput = DocLevelMonitorInput("description", listOf(index), listOf(docQuery)) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val customQueryIndex = "custom_alerts_index" + val analyzer = "whitespace" + var monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + dataSources = DataSources( + queryIndex = customQueryIndex, + queryIndexMappingsByType = mapOf(Pair("text", mapOf(Pair("analyzer", analyzer)))), + ) + ) + val monitorResponse = createMonitor(monitor) + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc = """{ + "message" : "This is an error from IAD region", + "test_strict_date_time" : "$testTime", + "test_field" : "us-west-2" + }""" + assertFalse(monitorResponse?.id.isNullOrEmpty()) + monitor = monitorResponse!!.monitor + indexDoc(index, "1", testDoc) + val monitorId = monitorResponse.id + val executeMonitorResponse = executeMonitor(monitor, monitorId, false) + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) + searchAlerts(monitorId) + val clusterStateResponse = client().admin().cluster().state(ClusterStateRequest().indices(customQueryIndex).metadata(true)).get() + val mapping = clusterStateResponse.state.metadata.index(customQueryIndex).mapping() + Assert.assertTrue(mapping?.source()?.string()?.contains("\"analyzer\":\"$analyzer\"") == true) + // Verify queries exist + var searchResponse = client().search( + SearchRequest(customQueryIndex).source(SearchSourceBuilder().query(QueryBuilders.matchAllQuery())) + ).get() + assertNotEquals(0, searchResponse.hits.hits.size) + client().execute( + AlertingActions.DELETE_MONITOR_ACTION_TYPE, DeleteMonitorRequest(monitorId, WriteRequest.RefreshPolicy.IMMEDIATE) + ).get() + client().admin().indices().refresh(RefreshRequest(customQueryIndex)).get() + // Verify queries are deleted + searchResponse = client().search( + SearchRequest(customQueryIndex).source(SearchSourceBuilder().query(QueryBuilders.matchAllQuery())) + ).get() + assertEquals(0, searchResponse.hits.hits.size) + } + + fun `test execute monitor with custom findings index and pattern`() { val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3") val docLevelInput = DocLevelMonitorInput("description", listOf(index), listOf(docQuery)) val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) val customFindingsIndex = "custom_findings_index" + val customFindingsIndexPattern = "" var monitor = randomDocumentLevelMonitor( inputs = listOf(docLevelInput), triggers = listOf(trigger), - dataSources = DataSources(findingsIndex = customFindingsIndex) + dataSources = DataSources(findingsIndex = customFindingsIndex, findingsIndexPattern = customFindingsIndexPattern) ) val monitorResponse = createMonitor(monitor) + client().admin().indices().refresh(RefreshRequest("*")) val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) val testDoc = """{ "message" : "This is an error from IAD region", @@ -216,24 +270,25 @@ class MonitorDataSourcesIT : AlertingSingleNodeTestCase() { monitor = monitorResponse!!.monitor indexDoc(index, "1", testDoc) val id = monitorResponse.id - val executeMonitorResponse = executeMonitor(monitor, id, false) + var executeMonitorResponse = executeMonitor(monitor, id, false) Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) - searchAlerts(id) - val findings = searchFindings(id, customFindingsIndex) + + var findings = searchFindings(id, "custom_findings_index*", true) assertEquals("Findings saved for test monitor", 1, findings.size) assertTrue("Findings saved for test monitor", findings[0].relatedDocIds.contains("1")) - val table = Table("asc", "id", null, 1, 0, "") - var getAlertsResponse = client() - .execute(AlertingActions.GET_ALERTS_ACTION_TYPE, GetAlertsRequest(table, "ALL", "ALL", null, null)) - .get() - Assert.assertTrue(getAlertsResponse != null) - Assert.assertTrue(getAlertsResponse.alerts.size == 1) - getAlertsResponse = client() - .execute(AlertingActions.GET_ALERTS_ACTION_TYPE, GetAlertsRequest(table, "ALL", "ALL", id, null)) - .get() - Assert.assertTrue(getAlertsResponse != null) - Assert.assertTrue(getAlertsResponse.alerts.size == 1) + + indexDoc(index, "2", testDoc) + executeMonitorResponse = executeMonitor(monitor, id, false) + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) + searchAlerts(id) + findings = searchFindings(id, "custom_findings_index*", true) + assertEquals("Findings saved for test monitor", 2, findings.size) + assertTrue("Findings saved for test monitor", findings[1].relatedDocIds.contains("2")) + + val indices = getAllIndicesFromPattern("custom_findings_index*") + Assert.assertTrue(indices.isNotEmpty()) } fun `test execute pre-existing monitorand update`() { diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/transport/AlertingSingleNodeTestCase.kt b/alerting/src/test/kotlin/org/opensearch/alerting/transport/AlertingSingleNodeTestCase.kt index b1f3da648..61e788a32 100644 --- a/alerting/src/test/kotlin/org/opensearch/alerting/transport/AlertingSingleNodeTestCase.kt +++ b/alerting/src/test/kotlin/org/opensearch/alerting/transport/AlertingSingleNodeTestCase.kt @@ -6,6 +6,8 @@ package org.opensearch.alerting.transport import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope +import org.opensearch.action.admin.indices.get.GetIndexRequestBuilder +import org.opensearch.action.admin.indices.get.GetIndexResponse import org.opensearch.action.admin.indices.refresh.RefreshAction import org.opensearch.action.admin.indices.refresh.RefreshRequest import org.opensearch.action.support.WriteRequest @@ -54,6 +56,15 @@ abstract class AlertingSingleNodeTestCase : OpenSearchSingleNodeTestCase() { createTestIndex() } + protected fun getAllIndicesFromPattern(pattern: String): List { + val getIndexResponse = ( + client().admin().indices().prepareGetIndex() + .setIndices(pattern) as GetIndexRequestBuilder + ).get() as GetIndexResponse + getIndexResponse + return getIndexResponse.indices().toList() + } + protected fun executeMonitor(monitor: Monitor, id: String, dryRun: Boolean = true): ExecuteMonitorResponse? { val request = ExecuteMonitorRequest(dryRun, TimeValue(Instant.now().toEpochMilli()), id, monitor) return client().execute(ExecuteMonitorAction.INSTANCE, request).get() From bf63b34ab54f1b81a9d13ca01efdc64b1f1307c2 Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Tue, 8 Nov 2022 16:11:39 -0800 Subject: [PATCH 18/18] fix bucket level monitor findings to support term aggs in query (#666) (#667) Signed-off-by: Surya Sashank Nistala (cherry picked from commit 0fb9dd04ccd1356b67e320173954cbc928eba4fd) Co-authored-by: Surya Sashank Nistala --- .../alerting/BucketLevelMonitorRunner.kt | 58 +++++++++++----- .../alerting/MonitorRunnerServiceIT.kt | 69 ++++++++++++++++++- 2 files changed, 110 insertions(+), 17 deletions(-) diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/BucketLevelMonitorRunner.kt b/alerting/src/main/kotlin/org/opensearch/alerting/BucketLevelMonitorRunner.kt index d3f534528..3c4fc6425 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/BucketLevelMonitorRunner.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/BucketLevelMonitorRunner.kt @@ -46,6 +46,7 @@ import org.opensearch.script.ScriptType import org.opensearch.script.TemplateScript import org.opensearch.search.aggregations.AggregatorFactories import org.opensearch.search.aggregations.bucket.composite.CompositeAggregationBuilder +import org.opensearch.search.aggregations.bucket.terms.TermsAggregationBuilder import org.opensearch.search.builder.SearchSourceBuilder import java.time.Instant import java.util.UUID @@ -71,6 +72,9 @@ object BucketLevelMonitorRunner : MonitorRunner() { val currentAlerts = try { monitorCtx.alertIndices!!.createOrUpdateAlertIndex(monitor.dataSources) monitorCtx.alertIndices!!.createOrUpdateInitialAlertHistoryIndex(monitor.dataSources) + if (monitor.dataSources.findingsEnabled == true) { + monitorCtx.alertIndices!!.createOrUpdateInitialFindingHistoryIndex(monitor.dataSources) + } monitorCtx.alertService!!.loadCurrentAlertsForBucketLevelMonitor(monitor) } catch (e: Exception) { // We can't save ERROR alerts to the index here as we don't know if there are existing ACTIVE alerts @@ -142,15 +146,19 @@ object BucketLevelMonitorRunner : MonitorRunner() { */ if (triggerResults[trigger.id]?.error != null) continue val findings = - if (monitor.triggers.size == 1 && monitor.dataSources.findingsEnabled == true) createFindings( - triggerResult, - monitor, - monitorCtx, - periodStart, - periodEnd, - !dryrun && monitor.id != Monitor.NO_ID - ) - else emptyList() + if (monitor.triggers.size == 1 && monitor.dataSources.findingsEnabled == true) { + logger.debug("Creating bucket level findings") + createFindings( + triggerResult, + monitor, + monitorCtx, + periodStart, + periodEnd, + !dryrun && monitor.id != Monitor.NO_ID + ) + } else { + emptyList() + } // TODO: Should triggerResult's aggregationResultBucket be a list? If not, getCategorizedAlertsForBucketLevelMonitor can // be refactored to use a map instead val categorizedAlerts = monitorCtx.alertService!!.getCategorizedAlertsForBucketLevelMonitor( @@ -334,15 +342,30 @@ object BucketLevelMonitorRunner : MonitorRunner() { val bucketValues: Set = triggerResult.aggregationResultBuckets.keys val query = input.query var fieldName = "" - var grouByFields = 0 // if number of fields used to group by > 1 we won't calculate findings + for (aggFactory in (query.aggregations() as AggregatorFactories.Builder).aggregatorFactories) { - val sources = (aggFactory as CompositeAggregationBuilder).sources() - for (source in sources) { - if (grouByFields > 0) { + when (aggFactory) { + is CompositeAggregationBuilder -> { + var grouByFields = 0 // if number of fields used to group by > 1 we won't calculate findings + val sources = aggFactory.sources() + for (source in sources) { + if (grouByFields > 0) { + logger.error("grouByFields > 0. not generating findings for bucket level monitor ${monitor.id}") + return listOf() + } + grouByFields++ + fieldName = source.field() + } + } + is TermsAggregationBuilder -> { + fieldName = aggFactory.field() + } + else -> { + logger.error( + "Bucket level monitor findings supported only for composite and term aggs. Found [{${aggFactory.type}}]" + ) return listOf() } - grouByFields++ - fieldName = source.field() } } if (fieldName != "") { @@ -370,6 +393,8 @@ object BucketLevelMonitorRunner : MonitorRunner() { } val searchResponse: SearchResponse = monitorCtx.client!!.suspendUntil { monitorCtx.client!!.search(sr, it) } return createFindingPerIndex(searchResponse, monitor, monitorCtx, shouldCreateFinding) + } else { + logger.error("Couldn't resolve groupBy field. Not generating bucket level monitor findings for monitor %${monitor.id}") } } } @@ -403,8 +428,9 @@ object BucketLevelMonitorRunner : MonitorRunner() { ) val findingStr = finding.toXContent(XContentBuilder.builder(XContentType.JSON.xContent()), ToXContent.EMPTY_PARAMS).string() - logger.debug("Findings: $findingStr") + logger.debug("Bucket level monitor ${monitor.id} Findings: $findingStr") if (shouldCreateFinding) { + logger.debug("Saving bucket level monitor findings for monitor ${monitor.id}") val indexRequest = IndexRequest(monitor.dataSources.findingsIndex) .source(findingStr, XContentType.JSON) .id(finding.id) diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/MonitorRunnerServiceIT.kt b/alerting/src/test/kotlin/org/opensearch/alerting/MonitorRunnerServiceIT.kt index 8b99ec5dd..4ce7dcd23 100644 --- a/alerting/src/test/kotlin/org/opensearch/alerting/MonitorRunnerServiceIT.kt +++ b/alerting/src/test/kotlin/org/opensearch/alerting/MonitorRunnerServiceIT.kt @@ -40,6 +40,7 @@ import org.opensearch.rest.RestStatus import org.opensearch.script.Script import org.opensearch.search.aggregations.bucket.composite.CompositeAggregationBuilder import org.opensearch.search.aggregations.bucket.composite.TermsValuesSourceBuilder +import org.opensearch.search.aggregations.bucket.terms.TermsAggregationBuilder import org.opensearch.search.builder.SearchSourceBuilder import java.net.URLEncoder import java.time.Instant @@ -1322,7 +1323,73 @@ class MonitorRunnerServiceIT : AlertingRestTestCase() { ) } - fun `test bucket-level monitor with findings enabled`() { + fun `test bucket-level monitor with findings enabled on term agg`() { + val testIndex = createTestIndex() + insertSampleTimeSerializedData( + testIndex, + listOf( + "test_value_1", + "test_value_2" + ) + ) + + val query = QueryBuilders.rangeQuery("test_strict_date_time") + .gt("{{period_end}}||-10d") + .lte("{{period_end}}") + .format("epoch_millis") + val termAgg = TermsAggregationBuilder("test_field").field("test_field") + val input = SearchInput(indices = listOf(testIndex), query = SearchSourceBuilder().size(0).query(query).aggregation(termAgg)) + val triggerScript = """ + params.docCount > 0 + """.trimIndent() + + // For the Actions ensure that there is at least one and any PER_ALERT actions contain ACTIVE, DEDUPED and COMPLETED in its policy + // so that the assertions done later in this test don't fail. + // The config is being mutated this way to still maintain the randomness in configuration (like including other ActionExecutionScope). + val actions = randomActionsForBucketLevelTrigger(min = 1).map { + if (it.actionExecutionPolicy?.actionExecutionScope is PerAlertActionScope) { + it.copy( + actionExecutionPolicy = ActionExecutionPolicy( + PerAlertActionScope(setOf(AlertCategory.NEW, AlertCategory.DEDUPED, AlertCategory.COMPLETED)) + ) + ) + } else { + it + } + } + var trigger = randomBucketLevelTrigger(actions = actions) + trigger = trigger.copy( + bucketSelector = BucketSelectorExtAggregationBuilder( + name = trigger.id, + bucketsPathsMap = mapOf("docCount" to "_count"), + script = Script(triggerScript), + parentBucketPath = "test_field", + filter = null + ) + ) + val monitor = createMonitor( + randomBucketLevelMonitor( + inputs = listOf(input), + enabled = false, + triggers = listOf(trigger), + dataSources = DataSources(findingsEnabled = true) + ) + ) + executeMonitor(monitor.id) + + // Check created Alerts + var currentAlerts = searchAlerts(monitor) + assertEquals("Alerts not saved", 2, currentAlerts.size) + currentAlerts.forEach { alert -> + Assert.assertEquals("expected findings for alert", alert.findingIds.size, 1) + } + val findings = searchFindings(monitor) + assertEquals("Findings saved for test monitor", 1, findings.size) + assertTrue("Findings saved for test monitor", findings[0].relatedDocIds.contains("1")) + assertTrue("Findings saved for test monitor", findings[0].relatedDocIds.contains("2")) + } + + fun `test bucket-level monitor with findings enabled on composite agg`() { val testIndex = createTestIndex() insertSampleTimeSerializedData( testIndex,