Skip to content

Commit

Permalink
Backport removal of runBlocking and Stats API fix (opensearch-project…
Browse files Browse the repository at this point in the history
…#287)

* Remove runBlocking calls from MonitorRunner (opensearch-project#281)

* Remove runBlocking calls from MonitorRunner

Signed-off-by: Mohammad Qureshi <[email protected]>

* Add security tests with partial user permissions to check user context being correctly applied

Signed-off-by: Mohammad Qureshi <[email protected]>

* Fix ktlint style issues

Signed-off-by: Mohammad Qureshi <[email protected]>

* Add back opendistro prefixed sweeper enabled field to alerting stats response (opensearch-project#283)

* Add back opendistro prefixed sweeper enabled to alerting stats response

Signed-off-by: Mohammad Qureshi <[email protected]>

* Rename stats response field variables used in tests

Signed-off-by: Mohammad Qureshi <[email protected]>
Signed-off-by: AWSHurneyt <[email protected]>
  • Loading branch information
qreshi authored and AWSHurneyt committed Mar 30, 2022
1 parent 44fde63 commit 2df48f8
Show file tree
Hide file tree
Showing 7 changed files with 210 additions and 25 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,6 @@ import kotlinx.coroutines.Dispatchers
import kotlinx.coroutines.Job
import kotlinx.coroutines.SupervisorJob
import kotlinx.coroutines.launch
import kotlinx.coroutines.runBlocking
import kotlinx.coroutines.withContext
import org.apache.logging.log4j.LogManager
import org.opensearch.action.bulk.BackoffPolicy
Expand All @@ -41,6 +40,7 @@ import org.opensearch.alerting.core.JobRunner
import org.opensearch.alerting.core.model.ScheduledJob
import org.opensearch.alerting.elasticapi.InjectorContextElement
import org.opensearch.alerting.elasticapi.retry
import org.opensearch.alerting.elasticapi.withClosableContext
import org.opensearch.alerting.model.ActionRunResult
import org.opensearch.alerting.model.Alert
import org.opensearch.alerting.model.AlertingConfigAccessor
Expand Down Expand Up @@ -295,7 +295,7 @@ object MonitorRunner : JobRunner, CoroutineScope, AbstractLifecycleComponent() {
return monitorResult.copy(error = e)
}
if (!isADMonitor(monitor)) {
runBlocking(InjectorContextElement(monitor.id, settings, threadPool.threadContext, roles)) {
withClosableContext(InjectorContextElement(monitor.id, settings, threadPool.threadContext, roles)) {
monitorResult = monitorResult.copy(inputResults = inputService.collectInputResults(monitor, periodStart, periodEnd))
}
} else {
Expand Down Expand Up @@ -384,7 +384,7 @@ object MonitorRunner : JobRunner, CoroutineScope, AbstractLifecycleComponent() {
// If a setting is imposed that limits buckets that can be processed for Bucket-Level Monitors, we'd need to iterate over
// the buckets until we hit that threshold. In that case, we'd want to exit the execution without creating any alerts since the
// buckets we iterate over before hitting the limit is not deterministic. Is there a better way to fail faster in this case?
runBlocking(InjectorContextElement(monitor.id, settings, threadPool.threadContext, roles)) {
withClosableContext(InjectorContextElement(monitor.id, settings, threadPool.threadContext, roles)) {
// Storing the first page of results in the case of pagination input results to prevent empty results
// in the final output of monitorResult which occurs when all pages have been exhausted.
// If it's favorable to return the last page, will need to check how to accomplish that with multiple aggregation paths
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -90,6 +90,9 @@ abstract class AlertingRestTestCase : ODFERestTestCase() {
protected val numberOfNodes = System.getProperty("cluster.number_of_nodes", "1")!!.toInt()
protected val isMultiNode = numberOfNodes > 1

protected val statsResponseOpendistroSweeperEnabledField = "opendistro.scheduled_jobs.enabled"
protected val statsResponseOpenSearchSweeperEnabledField = "plugins.scheduled_jobs.enabled"

override fun xContentRegistry(): NamedXContentRegistry {
return NamedXContentRegistry(
mutableListOf(
Expand Down Expand Up @@ -846,6 +849,26 @@ abstract class AlertingRestTestCase : ODFERestTestCase() {
client().performRequest(request)
}

fun createIndexRoleWithDocLevelSecurity(name: String, index: String, dlsQuery: String) {
val request = Request("PUT", "/_plugins/_security/api/roles/$name")
val entity = """
{
"cluster_permissions": [],
"index_permissions": [{
"index_patterns": [
"$index"
],
"dls": "$dlsQuery",
"allowed_actions": [
"read"
]
}]
}
""".trimIndent()
request.setJsonEntity(entity)
client().performRequest(request)
}

fun createUserRolesMapping(role: String, users: Array<String>) {
val request = Request("PUT", "/_plugins/_security/api/rolesmapping/$role")
val usersStr = users.joinToString { it -> "\"$it\"" }
Expand Down Expand Up @@ -877,6 +900,19 @@ abstract class AlertingRestTestCase : ODFERestTestCase() {
createUserRolesMapping(role, arrayOf(user))
}

fun createUserWithDocLevelSecurityTestData(
user: String,
index: String,
role: String,
backendRole: String,
dlsQuery: String
) {
createUser(user, user, arrayOf(backendRole))
createTestIndex(index)
createIndexRoleWithDocLevelSecurity(role, index, dlsQuery)
createUserRolesMapping(role, arrayOf(user))
}

companion object {
internal interface IProxy {
val version: String?
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1011,7 +1011,6 @@ class MonitorRunnerIT : AlertingRestTestCase() {
val monitor = createMonitor(randomBucketLevelMonitor(inputs = listOf(input), enabled = false, triggers = listOf(trigger)))
val response = executeMonitor(monitor.id, params = DRYRUN_MONITOR)
val output = entityAsMap(response)
// print("Output is: $output")

assertEquals(monitor.name, output["monitor_name"])
@Suppress("UNCHECKED_CAST")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -592,8 +592,7 @@ class MonitorRestApiIT : AlertingRestTestCase() {
)

val acknowledgedAlertsList = acknowledgedAlerts.toString()
alertsToAcknowledge.forEach {
alert ->
alertsToAcknowledge.forEach { alert ->
assertTrue("Alert with ID ${alert.id} not found in failed list.", acknowledgedAlertsList.contains(alert.id))
}

Expand Down Expand Up @@ -627,25 +626,21 @@ class MonitorRestApiIT : AlertingRestTestCase() {
)

val acknowledgedAlertsList = acknowledgedAlerts.toString()
alertsGroup2.forEach {
alert ->
alertsGroup2.forEach { alert ->
assertTrue("Alert with ID ${alert.id} not found in failed list.", acknowledgedAlertsList.contains(alert.id))
}
alertsGroup1.forEach {
alert ->
alertsGroup1.forEach { alert ->
assertFalse("Alert with ID ${alert.id} found in failed list.", acknowledgedAlertsList.contains(alert.id))
}

val failedResponse = responseMap["failed"] as List<String>
assertTrue("Expected ${alertsGroup1.size} alerts to fail acknowledgment.", failedResponse.size == alertsGroup1.size)

val failedResponseList = failedResponse.toString()
alertsGroup1.forEach {
alert ->
alertsGroup1.forEach { alert ->
assertTrue("Alert with ID ${alert.id} not found in failed list.", failedResponseList.contains(alert.id))
}
alertsGroup2.forEach {
alert ->
alertsGroup2.forEach { alert ->
assertFalse("Alert with ID ${alert.id} found in failed list.", failedResponseList.contains(alert.id))
}
}
Expand Down Expand Up @@ -907,8 +902,7 @@ class MonitorRestApiIT : AlertingRestTestCase() {
disableScheduledJob()

val responseMap = getAlertingStats()
// assertEquals("Cluster name is incorrect", responseMap["cluster_name"], "alerting_integTestCluster")
assertEquals("Scheduled job is not enabled", false, responseMap[ScheduledJobSettings.SWEEPER_ENABLED.key])
assertAlertingStatsSweeperEnabled(responseMap, false)
assertEquals("Scheduled job index exists but there are no scheduled jobs.", false, responseMap["scheduled_job_index_exists"])
val _nodes = responseMap["_nodes"] as Map<String, Int>
validateAlertingStatsNodeResponse(_nodes)
Expand All @@ -920,7 +914,7 @@ class MonitorRestApiIT : AlertingRestTestCase() {
val monitorId = createMonitor(randomQueryLevelMonitor(enabled = true), refresh = true).id

var alertingStats = getAlertingStats()
assertEquals("Scheduled job is not enabled", true, alertingStats[ScheduledJobSettings.SWEEPER_ENABLED.key])
assertAlertingStatsSweeperEnabled(alertingStats, true)
assertEquals("Scheduled job index does not exist", true, alertingStats["scheduled_job_index_exists"])
assertEquals("Scheduled job index is not yellow", "yellow", alertingStats["scheduled_job_index_status"])
assertEquals("Nodes are not on schedule", numberOfNodes, alertingStats["nodes_on_schedule"])
Expand All @@ -937,7 +931,7 @@ class MonitorRestApiIT : AlertingRestTestCase() {
disableScheduledJob()

alertingStats = getAlertingStats()
assertEquals("Scheduled job is still enabled", false, alertingStats[ScheduledJobSettings.SWEEPER_ENABLED.key])
assertAlertingStatsSweeperEnabled(alertingStats, false)
assertFalse(
"Monitor [$monitorId] was still scheduled based on the alerting stats response: $alertingStats",
isMonitorScheduled(monitorId, alertingStats)
Expand All @@ -950,7 +944,7 @@ class MonitorRestApiIT : AlertingRestTestCase() {
Thread.sleep(2000)

alertingStats = getAlertingStats()
assertEquals("Scheduled job is not enabled", true, alertingStats[ScheduledJobSettings.SWEEPER_ENABLED.key])
assertAlertingStatsSweeperEnabled(alertingStats, true)
assertTrue(
"Monitor [$monitorId] was not re-scheduled based on the alerting stats response: $alertingStats",
isMonitorScheduled(monitorId, alertingStats)
Expand All @@ -962,8 +956,7 @@ class MonitorRestApiIT : AlertingRestTestCase() {
enableScheduledJob()

val responseMap = getAlertingStats()
// assertEquals("Cluster name is incorrect", responseMap["cluster_name"], "alerting_integTestCluster")
assertEquals("Scheduled job is not enabled", true, responseMap[ScheduledJobSettings.SWEEPER_ENABLED.key])
assertAlertingStatsSweeperEnabled(responseMap, true)
assertEquals("Scheduled job index exists but there are no scheduled jobs.", false, responseMap["scheduled_job_index_exists"])
val _nodes = responseMap["_nodes"] as Map<String, Int>
validateAlertingStatsNodeResponse(_nodes)
Expand All @@ -975,8 +968,7 @@ class MonitorRestApiIT : AlertingRestTestCase() {
createRandomMonitor(refresh = true)

val responseMap = getAlertingStats()
// assertEquals("Cluster name is incorrect", responseMap["cluster_name"], "alerting_integTestCluster")
assertEquals("Scheduled job is not enabled", true, responseMap[ScheduledJobSettings.SWEEPER_ENABLED.key])
assertAlertingStatsSweeperEnabled(responseMap, true)
assertEquals("Scheduled job index does not exist", true, responseMap["scheduled_job_index_exists"])
assertEquals("Scheduled job index is not yellow", "yellow", responseMap["scheduled_job_index_status"])
assertEquals("Nodes are not on schedule", numberOfNodes, responseMap["nodes_on_schedule"])
Expand Down Expand Up @@ -1004,8 +996,7 @@ class MonitorRestApiIT : AlertingRestTestCase() {
createRandomMonitor(refresh = true)

val responseMap = getAlertingStats("/jobs_info")
// assertEquals("Cluster name is incorrect", responseMap["cluster_name"], "alerting_integTestCluster")
assertEquals("Scheduled job is not enabled", true, responseMap[ScheduledJobSettings.SWEEPER_ENABLED.key])
assertAlertingStatsSweeperEnabled(responseMap, true)
assertEquals("Scheduled job index does not exist", true, responseMap["scheduled_job_index_exists"])
assertEquals("Scheduled job index is not yellow", "yellow", responseMap["scheduled_job_index_status"])
assertEquals("Nodes not on schedule", numberOfNodes, responseMap["nodes_on_schedule"])
Expand Down Expand Up @@ -1123,4 +1114,17 @@ class MonitorRestApiIT : AlertingRestTestCase() {

return false
}

private fun assertAlertingStatsSweeperEnabled(alertingStatsResponse: Map<String, Any>, expected: Boolean) {
assertEquals(
"Legacy scheduled job enabled field is not set to $expected",
expected,
alertingStatsResponse[statsResponseOpendistroSweeperEnabledField]
)
assertEquals(
"Scheduled job is not ${if (expected) "enabled" else "disabled"}",
expected,
alertingStatsResponse[statsResponseOpenSearchSweeperEnabledField]
)
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -19,12 +19,15 @@ import org.opensearch.alerting.ALERTING_BASE_URI
import org.opensearch.alerting.ALWAYS_RUN
import org.opensearch.alerting.AlertingRestTestCase
import org.opensearch.alerting.DRYRUN_MONITOR
import org.opensearch.alerting.aggregation.bucketselectorext.BucketSelectorExtAggregationBuilder
import org.opensearch.alerting.assertUserNull
import org.opensearch.alerting.core.model.SearchInput
import org.opensearch.alerting.makeRequest
import org.opensearch.alerting.model.Alert
import org.opensearch.alerting.randomAction
import org.opensearch.alerting.randomAlert
import org.opensearch.alerting.randomBucketLevelMonitor
import org.opensearch.alerting.randomBucketLevelTrigger
import org.opensearch.alerting.randomQueryLevelMonitor
import org.opensearch.alerting.randomQueryLevelTrigger
import org.opensearch.alerting.randomTemplateScript
Expand All @@ -39,6 +42,9 @@ import org.opensearch.commons.authuser.User
import org.opensearch.commons.rest.SecureRestClientBuilder
import org.opensearch.index.query.QueryBuilders
import org.opensearch.rest.RestStatus
import org.opensearch.script.Script
import org.opensearch.search.aggregations.bucket.composite.CompositeAggregationBuilder
import org.opensearch.search.aggregations.bucket.composite.TermsValuesSourceBuilder
import org.opensearch.search.builder.SearchSourceBuilder
import org.opensearch.test.junit.annotations.TestLogging

Expand Down Expand Up @@ -521,4 +527,129 @@ class SecureMonitorRestApiIT : AlertingRestTestCase() {
deleteRoleMapping("alerting_full_access")
}
}

fun `test execute query-level monitor with user having partial index permissions`() {
if (!securityEnabled()) return

val testIndex = "hr_data"
createUserWithDocLevelSecurityTestData(
user,
testIndex,
"hr_role",
"HR",
"{\"term\": { \"accessible\": true}}"
)
createUserRolesMapping("alerting_full_access", arrayOf(user))

// Add a doc that is accessible to the user
indexDoc(
testIndex, "1",
"""
{
"test_field": "a",
"accessible": true
}
""".trimIndent()
)

// Add a second doc that is not accesible to the user
indexDoc(
testIndex, "2",
"""
{
"test_field": "b",
"accessible": false
}
""".trimIndent()
)

val input = SearchInput(indices = listOf(testIndex), query = SearchSourceBuilder().query(QueryBuilders.matchAllQuery()))
val triggerScript = """
// make sure there is exactly one hit
return ctx.results[0].hits.hits.size() == 1
""".trimIndent()

val trigger = randomQueryLevelTrigger(condition = Script(triggerScript))
val monitor = randomQueryLevelMonitor(inputs = listOf(input), triggers = listOf(trigger))

try {
executeMonitor(monitor.id, params = DRYRUN_MONITOR)
val alerts = searchAlerts(monitor)
assertEquals("Incorrect number of alerts", 1, alerts.size)
} finally {
deleteRoleMapping("hr_role")
deleteRole("hr_role")
deleteRoleMapping("alerting_full_access")
}
}

fun `test execute bucket-level monitor with user having partial index permissions`() {
if (!securityEnabled()) return

val testIndex = "hr_data"
createUserWithDocLevelSecurityTestData(
user,
testIndex,
"hr_role",
"HR",
"{\"term\": { \"accessible\": true}}"
)
createUserRolesMapping("alerting_full_access", arrayOf(user))

// Add a doc that is accessible to the user
indexDoc(
testIndex, "1",
"""
{
"test_field": "a",
"accessible": true
}
""".trimIndent()
)

// Add a second doc that is not accesible to the user
indexDoc(
testIndex, "2",
"""
{
"test_field": "b",
"accessible": false
}
""".trimIndent()
)

val compositeSources = listOf(
TermsValuesSourceBuilder("test_field").field("test_field")
)
val compositeAgg = CompositeAggregationBuilder("composite_agg", compositeSources)
val input = SearchInput(
indices = listOf(testIndex),
query = SearchSourceBuilder().size(0).query(QueryBuilders.matchAllQuery()).aggregation(compositeAgg)
)
val triggerScript = """
params.docCount > 0
""".trimIndent()

var trigger = randomBucketLevelTrigger()
trigger = trigger.copy(
bucketSelector = BucketSelectorExtAggregationBuilder(
name = trigger.id,
bucketsPathsMap = mapOf("docCount" to "_count"),
script = Script(triggerScript),
parentBucketPath = "composite_agg",
filter = null
)
)
val monitor = createMonitor(randomBucketLevelMonitor(inputs = listOf(input), enabled = false, triggers = listOf(trigger)))

try {
executeMonitor(monitor.id, params = DRYRUN_MONITOR)
val alerts = searchAlerts(monitor)
assertEquals("Incorrect number of alerts", 1, alerts.size)
} finally {
deleteRoleMapping("hr_role")
deleteRole("hr_role")
deleteRoleMapping("alerting_full_access")
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ package org.opensearch.alerting.core.action.node

import org.opensearch.action.FailedNodeException
import org.opensearch.action.support.nodes.BaseNodesResponse
import org.opensearch.alerting.core.settings.LegacyOpenDistroScheduledJobSettings
import org.opensearch.alerting.core.settings.ScheduledJobSettings
import org.opensearch.cluster.ClusterName
import org.opensearch.cluster.health.ClusterIndexHealth
Expand Down Expand Up @@ -77,6 +78,7 @@ class ScheduledJobsStatsResponse : BaseNodesResponse<ScheduledJobStats>, ToXCont
}

override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder {
builder.field(LegacyOpenDistroScheduledJobSettings.SWEEPER_ENABLED.key, scheduledJobEnabled)
builder.field(ScheduledJobSettings.SWEEPER_ENABLED.key, scheduledJobEnabled)
builder.field("scheduled_job_index_exists", indexExists)
builder.field("scheduled_job_index_status", indexHealth?.status?.name?.toLowerCase())
Expand Down
Loading

0 comments on commit 2df48f8

Please sign in to comment.