From 5ab54692949d3520a2e2ab7163648dfb9827b0bf Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Tue, 19 Jul 2022 15:17:32 +0530 Subject: [PATCH 01/84] Use the published zip for security plugin (#455) (#457) Signed-off-by: Ankit Kala (cherry picked from commit 27898e2f166416992b2cf852903822352762c54e) Co-authored-by: Ankit Kala --- .github/workflows/security-tests.yml | 12 ------------ build.gradle | 18 +++++++++++++++++- 2 files changed, 17 insertions(+), 13 deletions(-) diff --git a/.github/workflows/security-tests.yml b/.github/workflows/security-tests.yml index ee435d15..9b7bcb3f 100644 --- a/.github/workflows/security-tests.yml +++ b/.github/workflows/security-tests.yml @@ -22,18 +22,6 @@ jobs: # This step uses the checkout Github action: https://github.com/actions/checkout - name: Checkout Branch uses: actions/checkout@v2 - # Security plugin dependency - - name: Checkout security - uses: actions/checkout@v2 - with: - repository: 'opensearch-project/security' - path: security - ref: 'main' - - name: Build security - working-directory: ./security - run: | - ./gradlew clean build -Dbuild.snapshot=false -x test - cp build/distributions/opensearch-security-*.zip ../src/test/resources/security/plugin/opensearch-security.zip - name: Build and run Replication tests run: | ls -al src/test/resources/security/plugin diff --git a/build.gradle b/build.gradle index 4ff5e3fe..fbd17a12 100644 --- a/build.gradle +++ b/build.gradle @@ -56,6 +56,14 @@ buildscript { common_utils_version = System.getProperty("common_utils.version", opensearch_build) kotlin_version = System.getProperty("kotlin.version", "1.6.0") + // For fetching security zip from Maven. + // https://ci.opensearch.org/ci/dbc/distribution-build-opensearch/2.1.0/latest/linux/x64/tar/builds/opensearch/plugins/opensearch-security-2.1.0.0.zip + opensearch_no_snapshot = opensearch_version.replace("-SNAPSHOT","") + security_no_snapshot = opensearch_build.replace("-SNAPSHOT","") + security_plugin_path = "build/dependencies/security" + security_plugin_download_url = 'https://ci.opensearch.org/ci/dbc/distribution-build-opensearch/' + opensearch_no_snapshot + + '/latest/linux/x64/tar/builds/opensearch/plugins/opensearch-security-' + security_no_snapshot + '.zip' + } repositories { @@ -63,6 +71,7 @@ buildscript { mavenCentral() maven { url "https://aws.oss.sonatype.org/content/repositories/snapshots" } maven { url "https://plugins.gradle.org/m2/" } + maven { url "https://d1nvenhzbhpy0q.cloudfront.net/snapshots/lucene/" } } dependencies { @@ -209,7 +218,14 @@ def securityPluginFile = new Callable() { return new RegularFile() { @Override File getAsFile() { - return fileTree("$projectDir/src/test/resources/security/plugin/opensearch-security.zip").getSingleFile() + if (new File("$project.rootDir/$security_plugin_path").exists()) { + project.delete(files("$project.rootDir/$security_plugin_path")) + } + project.mkdir security_plugin_path + ant.get(src: security_plugin_download_url, + dest: security_plugin_path, + httpusecaches: false) + return fileTree(security_plugin_path).getSingleFile() } } } From 9a0d7980f69dc0703d844122c64bf2c2662d65da Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Wed, 3 Aug 2022 17:28:40 +0530 Subject: [PATCH 02/84] Adding Index Settings validation before starting replication (#461) (#470) * Adding Index Settings validation before starting replication Signed-off-by: Gaurav Bafna * Retrieving default index settings before starting replication Signed-off-by: Gaurav Bafna (cherry picked from commit 93b43f4504b3be58a4083b4da29c79729940a1d4) Co-authored-by: Gaurav Bafna --- .../index/TransportReplicateIndexAction.kt | 15 +++++++++---- .../replication/util/ValidationUtil.kt | 21 +++++++++++++++++++ .../integ/rest/StartReplicationIT.kt | 20 ++++++++++++++++++ 3 files changed, 52 insertions(+), 4 deletions(-) diff --git a/src/main/kotlin/org/opensearch/replication/action/index/TransportReplicateIndexAction.kt b/src/main/kotlin/org/opensearch/replication/action/index/TransportReplicateIndexAction.kt index 4557e44d..563a9996 100644 --- a/src/main/kotlin/org/opensearch/replication/action/index/TransportReplicateIndexAction.kt +++ b/src/main/kotlin/org/opensearch/replication/action/index/TransportReplicateIndexAction.kt @@ -33,7 +33,7 @@ import org.opensearch.action.support.HandledTransportAction import org.opensearch.action.support.IndicesOptions import org.opensearch.client.Client import org.opensearch.cluster.ClusterState -import org.opensearch.cluster.metadata.IndexMetadata +import org.opensearch.cluster.metadata.MetadataCreateIndexService import org.opensearch.common.inject.Inject import org.opensearch.common.settings.Settings import org.opensearch.env.Environment @@ -48,7 +48,8 @@ class TransportReplicateIndexAction @Inject constructor(transportService: Transp val threadPool: ThreadPool, actionFilters: ActionFilters, private val client : Client, - private val environment: Environment) : + private val environment: Environment, + private val metadataCreateIndexService: MetadataCreateIndexService) : HandledTransportAction(ReplicateIndexAction.NAME, transportService, actionFilters, ::ReplicateIndexRequest), CoroutineScope by GlobalScope { @@ -102,7 +103,13 @@ class TransportReplicateIndexAction @Inject constructor(transportService: Transp throw IllegalArgumentException("Cannot replicate k-NN index - ${request.leaderIndex}") } - ValidationUtil.validateAnalyzerSettings(environment, leaderSettings, request.settings) + ValidationUtil.validateIndexSettings( + environment, + request.followerIndex, + leaderSettings, + request.settings, + metadataCreateIndexService + ) // Setup checks are successful and trigger replication for the index // permissions evaluation to trigger replication is based on the current security context set @@ -128,7 +135,7 @@ class TransportReplicateIndexAction @Inject constructor(transportService: Transp private suspend fun getLeaderIndexSettings(leaderAlias: String, leaderIndex: String): Settings { val remoteClient = client.getRemoteClusterClient(leaderAlias) - val getSettingsRequest = GetSettingsRequest().includeDefaults(false).indices(leaderIndex) + val getSettingsRequest = GetSettingsRequest().includeDefaults(true).indices(leaderIndex) val settingsResponse = remoteClient.suspending(remoteClient.admin().indices()::getSettings, injectSecurityContext = true)(getSettingsRequest) return settingsResponse.indexToSettings.get(leaderIndex) ?: throw IndexNotFoundException("${leaderAlias}:${leaderIndex}") diff --git a/src/main/kotlin/org/opensearch/replication/util/ValidationUtil.kt b/src/main/kotlin/org/opensearch/replication/util/ValidationUtil.kt index ad4c20d4..515c96ec 100644 --- a/src/main/kotlin/org/opensearch/replication/util/ValidationUtil.kt +++ b/src/main/kotlin/org/opensearch/replication/util/ValidationUtil.kt @@ -31,6 +31,27 @@ object ValidationUtil { private val log = LogManager.getLogger(ValidationUtil::class.java) + fun validateIndexSettings( + environment: Environment, + followerIndex: String, + leaderSettings: Settings, + overriddenSettings: Settings, + metadataCreateIndexService: MetadataCreateIndexService + ) { + val settingsList = arrayOf(leaderSettings, overriddenSettings) + val desiredSettingsBuilder = Settings.builder() + // Desired settings are taking leader Settings and then overriding them with desired settings + for (settings in settingsList) { + for (key in settings.keySet()) { + desiredSettingsBuilder.copy(key, settings); + } + } + val desiredSettings = desiredSettingsBuilder.build() + + metadataCreateIndexService.validateIndexSettings(followerIndex,desiredSettings, false) + validateAnalyzerSettings(environment, leaderSettings, overriddenSettings) + } + fun validateAnalyzerSettings(environment: Environment, leaderSettings: Settings, overriddenSettings: Settings) { val analyserSettings = leaderSettings.filter { k: String? -> k!!.matches(Regex("index.analysis.*path")) } for (analyserSetting in analyserSettings.keySet()) { diff --git a/src/test/kotlin/org/opensearch/replication/integ/rest/StartReplicationIT.kt b/src/test/kotlin/org/opensearch/replication/integ/rest/StartReplicationIT.kt index e9bc717e..954b637d 100644 --- a/src/test/kotlin/org/opensearch/replication/integ/rest/StartReplicationIT.kt +++ b/src/test/kotlin/org/opensearch/replication/integ/rest/StartReplicationIT.kt @@ -1168,6 +1168,26 @@ class StartReplicationIT: MultiClusterRestTestCase() { } } + fun `test start replication invalid settings`() { + val followerClient = getClientForCluster(FOLLOWER) + val leaderClient = getClientForCluster(LEADER) + + createConnectionBetweenClusters(FOLLOWER, LEADER) + + val createIndexResponse = leaderClient.indices().create(CreateIndexRequest(leaderIndexName), RequestOptions.DEFAULT) + assertThat(createIndexResponse.isAcknowledged).isTrue() + val settings = Settings.builder() + .put("index.data_path", "/random-path/invalid-setting") + .build() + + try { + followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName, settings = settings)) + } catch (e: ResponseException) { + Assert.assertEquals(400, e.response.statusLine.statusCode) + Assert.assertTrue(e.message!!.contains("Validation Failed: 1: custom path [/random-path/invalid-setting] is not a sub-path of path.shared_data")) + } + } + fun `test that replication is not started when all primary shards are not in active state`() { val followerClient = getClientForCluster(FOLLOWER) val leaderClient = getClientForCluster(LEADER) From 3ffd3e0491d27deabb8796e29eb0925d9c0f8ea2 Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Thu, 4 Aug 2022 13:26:54 +0530 Subject: [PATCH 03/84] Changes to support replication plugin on OpenSearch v2.2.0 (#469) (#473) Signed-off-by: Sai Kumar (cherry picked from commit db1b226f7b4bc7c940d44e9b0f65f6e6ed4931c6) Co-authored-by: Sai Kumar --- .github/workflows/security-tests.yml | 4 ++-- build.gradle | 6 +++--- .../replication/metadata/TransportUpdateMetadataAction.kt | 8 ++++---- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/.github/workflows/security-tests.yml b/.github/workflows/security-tests.yml index 9b7bcb3f..a1415749 100644 --- a/.github/workflows/security-tests.yml +++ b/.github/workflows/security-tests.yml @@ -15,10 +15,10 @@ jobs: runs-on: ubuntu-latest steps: # This step uses the setup-java Github action: https://github.com/actions/setup-java - - name: Set Up JDK 11 + - name: Set Up JDK 17 uses: actions/setup-java@v1 with: - java-version: 11 + java-version: 17 # This step uses the checkout Github action: https://github.com/actions/checkout - name: Checkout Branch uses: actions/checkout@v2 diff --git a/build.gradle b/build.gradle index fbd17a12..c0d3cba0 100644 --- a/build.gradle +++ b/build.gradle @@ -36,7 +36,7 @@ import org.opensearch.gradle.test.RestIntegTestTask buildscript { ext { isSnapshot = "true" == System.getProperty("build.snapshot", "true") - opensearch_version = System.getProperty("opensearch.version", "2.1.0-SNAPSHOT") + opensearch_version = System.getProperty("opensearch.version", "2.2.0-SNAPSHOT") buildVersionQualifier = System.getProperty("build.version_qualifier", "") // e.g. 2.0.0-rc1-SNAPSHOT -> 2.0.0.0-rc1-SNAPSHOT version_tokens = opensearch_version.tokenize('-') @@ -50,7 +50,7 @@ buildscript { // for bwc tests - opensearch_previous_version = System.getProperty("bwc_older_version", "1.3.1") + opensearch_previous_version = System.getProperty("bwc_older_version", "2.1.0") plugin_previous_version = opensearch_previous_version.replaceAll(/(\.\d)([^\d]*)$/, '$1.0$2') common_utils_version = System.getProperty("common_utils.version", opensearch_build) @@ -62,7 +62,7 @@ buildscript { security_no_snapshot = opensearch_build.replace("-SNAPSHOT","") security_plugin_path = "build/dependencies/security" security_plugin_download_url = 'https://ci.opensearch.org/ci/dbc/distribution-build-opensearch/' + opensearch_no_snapshot + - '/latest/linux/x64/tar/builds/opensearch/plugins/opensearch-security-' + security_no_snapshot + '.zip' + '/5858/linux/x64/tar/builds/opensearch/plugins/opensearch-security-' + security_no_snapshot + '.zip' } diff --git a/src/main/kotlin/org/opensearch/replication/metadata/TransportUpdateMetadataAction.kt b/src/main/kotlin/org/opensearch/replication/metadata/TransportUpdateMetadataAction.kt index 7a7fb09d..2fab74ab 100644 --- a/src/main/kotlin/org/opensearch/replication/metadata/TransportUpdateMetadataAction.kt +++ b/src/main/kotlin/org/opensearch/replication/metadata/TransportUpdateMetadataAction.kt @@ -29,8 +29,8 @@ import org.opensearch.action.admin.indices.settings.put.UpdateSettingsClusterSta import org.opensearch.action.admin.indices.settings.put.UpdateSettingsRequest import org.opensearch.action.support.ActionFilters import org.opensearch.action.support.IndicesOptions +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction import org.opensearch.action.support.master.AcknowledgedResponse -import org.opensearch.action.support.master.TransportMasterNodeAction import org.opensearch.cluster.ClusterState import org.opensearch.cluster.ack.ClusterStateUpdateResponse import org.opensearch.cluster.ack.OpenIndexClusterStateUpdateResponse @@ -61,7 +61,7 @@ class TransportUpdateMetadataAction @Inject constructor( val updateSettingsService: MetadataUpdateSettingsService, val indexAliasService: MetadataIndexAliasesService, val indexStateService: MetadataIndexStateService -) : TransportMasterNodeAction(UpdateMetadataAction.NAME, +) : TransportClusterManagerNodeAction(UpdateMetadataAction.NAME, transportService, clusterService, threadPool, actionFilters, ::UpdateMetadataRequest, indexNameExpressionResolver) { companion object { @@ -75,7 +75,7 @@ class TransportUpdateMetadataAction @Inject constructor( return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE) } - override fun masterOperation( + override fun clusterManagerOperation( task: Task, request: UpdateMetadataRequest, state: ClusterState, @@ -286,7 +286,7 @@ class TransportUpdateMetadataAction @Inject constructor( } } - override fun masterOperation(request: UpdateMetadataRequest?, state: ClusterState?, listener: ActionListener?) { + override fun clusterManagerOperation(request: UpdateMetadataRequest?, state: ClusterState?, listener: ActionListener?) { throw UnsupportedOperationException("The task parameter is required") } From e0b90029892e65f658462807152a189f216406be Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Mon, 8 Aug 2022 11:37:00 +0530 Subject: [PATCH 04/84] Modified security artifacts to fetch from latest build version (#474) (#475) Signed-off-by: Sai Kumar (cherry picked from commit a1d17d3415fcd44844da53a649d9d3dea53aa2f6) Co-authored-by: Sai Kumar --- build.gradle | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build.gradle b/build.gradle index c0d3cba0..6bd89770 100644 --- a/build.gradle +++ b/build.gradle @@ -62,7 +62,7 @@ buildscript { security_no_snapshot = opensearch_build.replace("-SNAPSHOT","") security_plugin_path = "build/dependencies/security" security_plugin_download_url = 'https://ci.opensearch.org/ci/dbc/distribution-build-opensearch/' + opensearch_no_snapshot + - '/5858/linux/x64/tar/builds/opensearch/plugins/opensearch-security-' + security_no_snapshot + '.zip' + '/latest/linux/x64/tar/builds/opensearch/plugins/opensearch-security-' + security_no_snapshot + '.zip' } From 5959754d619920a6ef883f42a7047487d95eb647 Mon Sep 17 00:00:00 2001 From: Prudhvi Godithi Date: Tue, 23 Aug 2022 23:36:07 -0700 Subject: [PATCH 05/84] add updateVersion task (#489) Signed-off-by: prudhvigodithi Signed-off-by: prudhvigodithi --- build.gradle | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/build.gradle b/build.gradle index 6bd89770..c5e8e0e3 100644 --- a/build.gradle +++ b/build.gradle @@ -910,4 +910,15 @@ publishing { } } } -} \ No newline at end of file +} + +// updateVersion: Task to auto increment to the next development iteration +task updateVersion { + onlyIf { System.getProperty('newVersion') } + doLast { + ext.newVersion = System.getProperty('newVersion') + println "Setting version to ${newVersion}." + // String tokenization to support -SNAPSHOT + ant.replaceregexp(file:'build.gradle', match: '"opensearch.version", "\\d.*"', replace: '"opensearch.version", "' + newVersion.tokenize('-')[0] + '-SNAPSHOT"', flags:'g', byline:true) + } +} From 862e79a025e2df8f358b4f7a363ca78ecff9d238 Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Fri, 26 Aug 2022 09:17:12 +0530 Subject: [PATCH 06/84] Modified _stop replication API to remove any stale replication settings on existing index (#410) (#506) (cherry picked from commit 0bad3074ea75527af227cd9228e088b1877dae95) Co-authored-by: Sai Kumar --- .../TransportStopIndexReplicationAction.kt | 29 ++++--- .../replication/MultiClusterRestTestCase.kt | 31 ++++++- .../integ/rest/StopReplicationIT.kt | 82 ++++++++++++++++++- 3 files changed, 128 insertions(+), 14 deletions(-) diff --git a/src/main/kotlin/org/opensearch/replication/action/stop/TransportStopIndexReplicationAction.kt b/src/main/kotlin/org/opensearch/replication/action/stop/TransportStopIndexReplicationAction.kt index dcc5e9e5..fd82bba4 100644 --- a/src/main/kotlin/org/opensearch/replication/action/stop/TransportStopIndexReplicationAction.kt +++ b/src/main/kotlin/org/opensearch/replication/action/stop/TransportStopIndexReplicationAction.kt @@ -22,10 +22,7 @@ import org.opensearch.replication.metadata.UpdateMetadataAction import org.opensearch.replication.metadata.UpdateMetadataRequest import org.opensearch.replication.metadata.state.REPLICATION_LAST_KNOWN_OVERALL_STATE import org.opensearch.replication.metadata.state.getReplicationStateParamsForIndex -import org.opensearch.replication.metadata.store.ReplicationMetadata import org.opensearch.replication.seqno.RemoteClusterRetentionLeaseHelper -import org.opensearch.replication.task.index.IndexReplicationParams -import org.opensearch.replication.util.completeWith import org.opensearch.replication.util.coroutineContext import org.opensearch.replication.util.suspendExecute import org.opensearch.replication.util.suspending @@ -39,7 +36,6 @@ import org.opensearch.OpenSearchException import org.opensearch.action.ActionListener import org.opensearch.action.admin.indices.open.OpenIndexRequest import org.opensearch.action.support.ActionFilters -import org.opensearch.action.support.IndicesOptions import org.opensearch.action.support.master.AcknowledgedResponse import org.opensearch.action.support.master.TransportMasterNodeAction import org.opensearch.client.Client @@ -57,8 +53,6 @@ import org.opensearch.cluster.service.ClusterService import org.opensearch.common.inject.Inject import org.opensearch.common.io.stream.StreamInput import org.opensearch.common.settings.Settings -import org.opensearch.index.IndexNotFoundException -import org.opensearch.index.shard.ShardId import org.opensearch.replication.util.stackTraceToString import org.opensearch.threadpool.ThreadPool import org.opensearch.transport.TransportService @@ -99,7 +93,7 @@ class TransportStopIndexReplicationAction @Inject constructor(transportService: throw OpenSearchException("Failed to remove index block on ${request.indexName}") } - validateStopReplicationRequest(request) + validateReplicationStateOfIndex(request) // Index will be deleted if replication is stopped while it is restoring. So no need to close/reopen val restoring = clusterService.state().custom(RestoreInProgress.TYPE, RestoreInProgress.EMPTY).any { entry -> @@ -117,8 +111,9 @@ class TransportStopIndexReplicationAction @Inject constructor(transportService: throw OpenSearchException("Unable to close index: ${request.indexName}") } } - val replMetadata = replicationMetadataManager.getIndexReplicationMetadata(request.indexName) + try { + val replMetadata = replicationMetadataManager.getIndexReplicationMetadata(request.indexName) val remoteClient = client.getRemoteClusterClient(replMetadata.connectionName) val retentionLeaseHelper = RemoteClusterRetentionLeaseHelper(clusterService.clusterName.value(), remoteClient) retentionLeaseHelper.attemptRemoveRetentionLease(clusterService, replMetadata, request.indexName) @@ -127,12 +122,12 @@ class TransportStopIndexReplicationAction @Inject constructor(transportService: } val clusterStateUpdateResponse : AcknowledgedResponse = - clusterService.waitForClusterStateUpdate("stop_replication") { l -> StopReplicationTask(request, l)} + clusterService.waitForClusterStateUpdate("stop_replication") { l -> StopReplicationTask(request, l)} if (!clusterStateUpdateResponse.isAcknowledged) { throw OpenSearchException("Failed to update cluster state") } - // Index will be deleted if stop is called while it is restoring. So no need to reopen + // Index will be deleted if stop is called while it is restoring. So no need to reopen if (!restoring && state.routingTable.hasIndex(request.indexName)) { val reopenResponse = client.suspending(client.admin().indices()::open, injectSecurityContext = true)(OpenIndexRequest(request.indexName)) @@ -149,7 +144,15 @@ class TransportStopIndexReplicationAction @Inject constructor(transportService: } } - private fun validateStopReplicationRequest(request: StopIndexReplicationRequest) { + private fun validateReplicationStateOfIndex(request: StopIndexReplicationRequest) { + // If replication blocks/settings are present, Stop action should proceed with the clean-up + // This can happen during settings of follower index are carried over in the snapshot and the restore is + // performed using this snapshot. + if (clusterService.state().blocks.hasIndexBlock(request.indexName, INDEX_REPLICATION_BLOCK) + || clusterService.state().metadata.index(request.indexName)?.settings?.get(REPLICATED_INDEX_SETTING.key) != null) { + return + } + val replicationStateParams = getReplicationStateParamsForIndex(clusterService, request.indexName) ?: throw IllegalArgumentException("No replication in progress for index:${request.indexName}") @@ -187,13 +190,15 @@ class TransportStopIndexReplicationAction @Inject constructor(transportService: val mdBuilder = Metadata.builder(currentState.metadata) // remove replicated index setting val currentIndexMetadata = currentState.metadata.index(request.indexName) - if (currentIndexMetadata != null) { + if (currentIndexMetadata != null && + currentIndexMetadata.settings[REPLICATED_INDEX_SETTING.key] != null) { val newIndexMetadata = IndexMetadata.builder(currentIndexMetadata) .settings(Settings.builder().put(currentIndexMetadata.settings).putNull(REPLICATED_INDEX_SETTING.key)) .settingsVersion(1 + currentIndexMetadata.settingsVersion) mdBuilder.put(newIndexMetadata) } newState.metadata(mdBuilder) + return newState.build() } diff --git a/src/test/kotlin/org/opensearch/replication/MultiClusterRestTestCase.kt b/src/test/kotlin/org/opensearch/replication/MultiClusterRestTestCase.kt index 8f09b766..888d8af9 100644 --- a/src/test/kotlin/org/opensearch/replication/MultiClusterRestTestCase.kt +++ b/src/test/kotlin/org/opensearch/replication/MultiClusterRestTestCase.kt @@ -108,6 +108,9 @@ abstract class MultiClusterRestTestCase : OpenSearchTestCase() { val lowLevelClient = restClient.lowLevelClient!! var defaultSecuritySetupCompleted = false + companion object { + const val FS_SNAPSHOT_REPO = "repl_repo" + } } companion object { @@ -253,7 +256,33 @@ abstract class MultiClusterRestTestCase : OpenSearchTestCase() { */ @Before fun setup() { - testClusters.values.forEach { if(it.securityEnabled && !it.defaultSecuritySetupCompleted) setupDefaultSecurityRoles(it) } + testClusters.values.forEach { + registerSnapshotRepository(it) + if(it.securityEnabled && !it.defaultSecuritySetupCompleted) + setupDefaultSecurityRoles(it) + } + } + + /** + * Register snapshot repo - "fs" type on all the clusters + */ + private fun registerSnapshotRepository(testCluster: TestCluster) { + val getResponse: Map = OpenSearchRestTestCase.entityAsMap(testCluster.lowLevelClient.performRequest( + Request("GET", "/_cluster/settings?include_defaults=true&flat_settings=true"))) + val configuredRepositories = (getResponse["defaults"] as Map<*, *>)["path.repo"] as List<*> + if(configuredRepositories.isEmpty()) { + return + } + val repo = configuredRepositories[0] as String + val repoConfig = """ + { + "type": "fs", + "settings": { + "location": "$repo" + } + } + """.trimIndent() + triggerRequest(testCluster.lowLevelClient, "PUT", "_snapshot/${TestCluster.FS_SNAPSHOT_REPO}", repoConfig) } /** diff --git a/src/test/kotlin/org/opensearch/replication/integ/rest/StopReplicationIT.kt b/src/test/kotlin/org/opensearch/replication/integ/rest/StopReplicationIT.kt index 970d6109..09b797ae 100644 --- a/src/test/kotlin/org/opensearch/replication/integ/rest/StopReplicationIT.kt +++ b/src/test/kotlin/org/opensearch/replication/integ/rest/StopReplicationIT.kt @@ -17,22 +17,30 @@ import org.opensearch.replication.MultiClusterRestTestCase import org.opensearch.replication.StartReplicationRequest import org.opensearch.replication.startReplication import org.opensearch.replication.stopReplication +import org.opensearch.replication.replicationStatus +import org.opensearch.replication.getShardReplicationTasks +import org.opensearch.replication.`validate status syncing response` import org.apache.http.util.EntityUtils import org.assertj.core.api.Assertions.assertThat import org.assertj.core.api.Assertions.assertThatThrownBy +import org.junit.Assert import org.opensearch.OpenSearchStatusException import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest +import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotRequest +import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequest +import org.opensearch.action.admin.cluster.snapshots.status.SnapshotsStatusRequest import org.opensearch.action.index.IndexRequest import org.opensearch.client.Request import org.opensearch.client.RequestOptions import org.opensearch.client.ResponseException import org.opensearch.client.indices.CreateIndexRequest import org.opensearch.client.indices.GetIndexRequest +import org.opensearch.cluster.SnapshotsInProgress import org.opensearch.cluster.metadata.IndexMetadata import org.opensearch.common.settings.Settings import org.opensearch.common.unit.TimeValue import org.opensearch.index.mapper.MapperService -import org.opensearch.test.OpenSearchTestCase.assertBusy +import java.util.Random import java.util.concurrent.TimeUnit @@ -234,4 +242,76 @@ class StopReplicationIT: MultiClusterRestTestCase() { val sourceMap = mapOf("name" to randomAlphaOfLength(5)) followerClient.index(IndexRequest(followerIndexName).id("2").source(sourceMap), RequestOptions.DEFAULT) } + + fun `test stop replication with stale replication settings at leader cluster`() { + val followerClient = getClientForCluster(FOLLOWER) + val leaderClient = getClientForCluster(LEADER) + createConnectionBetweenClusters(FOLLOWER, LEADER, "source") + + val createIndexResponse = leaderClient.indices().create(CreateIndexRequest(leaderIndexName), RequestOptions.DEFAULT) + assertThat(createIndexResponse.isAcknowledged).isTrue() + val snapshotSuffix = Random().nextInt(1000).toString() + + try { + followerClient.startReplication( + StartReplicationRequest("source", leaderIndexName, followerIndexName), + TimeValue.timeValueSeconds(10), + true + ) + + assertBusy({ + var statusResp = followerClient.replicationStatus(followerIndexName) + `validate status syncing response`(statusResp) + assertThat(followerClient.getShardReplicationTasks(followerIndexName)).isNotEmpty() + }, 60, TimeUnit.SECONDS) + + // Trigger snapshot on the follower cluster + val createSnapshotRequest = CreateSnapshotRequest(TestCluster.FS_SNAPSHOT_REPO, "test-$snapshotSuffix") + createSnapshotRequest.waitForCompletion(true) + followerClient.snapshot().create(createSnapshotRequest, RequestOptions.DEFAULT) + + assertBusy { + var snapshotStatusResponse = followerClient.snapshot().status(SnapshotsStatusRequest(TestCluster.FS_SNAPSHOT_REPO, + arrayOf("test-$snapshotSuffix")), RequestOptions.DEFAULT) + for (snapshotStatus in snapshotStatusResponse.snapshots) { + Assert.assertEquals(SnapshotsInProgress.State.SUCCESS, snapshotStatus.state) + } + } + + // Restore follower index on leader cluster + val restoreSnapshotRequest = RestoreSnapshotRequest(TestCluster.FS_SNAPSHOT_REPO, "test-$snapshotSuffix") + restoreSnapshotRequest.indices(followerIndexName) + restoreSnapshotRequest.waitForCompletion(true) + restoreSnapshotRequest.renamePattern("(.+)") + restoreSnapshotRequest.renameReplacement("restored-\$1") + leaderClient.snapshot().restore(restoreSnapshotRequest, RequestOptions.DEFAULT) + + assertBusy { + assertThat(leaderClient.indices().exists(GetIndexRequest("restored-$followerIndexName"), RequestOptions.DEFAULT)).isEqualTo(true) + } + + // Invoke stop on the new leader cluster index + assertThatThrownBy { leaderClient.stopReplication("restored-$followerIndexName") } + .isInstanceOf(ResponseException::class.java) + .hasMessageContaining("Metadata for restored-$followerIndexName doesn't exist") + + // Start replication on the new leader index + followerClient.startReplication( + StartReplicationRequest("source", "restored-$followerIndexName", "restored-$followerIndexName"), + TimeValue.timeValueSeconds(10), + true, true + ) + + assertBusy({ + var statusResp = followerClient.replicationStatus("restored-$followerIndexName") + `validate status syncing response`(statusResp) + assertThat(followerClient.getShardReplicationTasks("restored-$followerIndexName")).isNotEmpty() + }, 60, TimeUnit.SECONDS) + + } finally { + followerClient.stopReplication("restored-$followerIndexName") + followerClient.stopReplication(followerIndexName) + } + + } } From 9df963906ce0a9c45d71f82a4ab4cb33bc3affad Mon Sep 17 00:00:00 2001 From: Gaurav Bafna <85113518+gbbafna@users.noreply.github.com> Date: Fri, 26 Aug 2022 10:46:16 +0530 Subject: [PATCH 07/84] Updating filters as well during Alias update (#491) (#499) Testing : Integ Test, Local Signed-off-by: Gaurav Bafna --- .../task/index/IndexReplicationTask.kt | 19 ++++++++++++------- .../integ/rest/StartReplicationIT.kt | 10 +++++++--- 2 files changed, 19 insertions(+), 10 deletions(-) diff --git a/src/main/kotlin/org/opensearch/replication/task/index/IndexReplicationTask.kt b/src/main/kotlin/org/opensearch/replication/task/index/IndexReplicationTask.kt index 33a311c0..156c2732 100644 --- a/src/main/kotlin/org/opensearch/replication/task/index/IndexReplicationTask.kt +++ b/src/main/kotlin/org/opensearch/replication/task/index/IndexReplicationTask.kt @@ -518,13 +518,18 @@ open class IndexReplicationTask(id: Long, type: String, action: String, descript for (alias in toAdd) { log.info("Adding alias ${alias.alias} from $followerIndexName") // Copying writeIndex from leader doesn't cause any issue as writes will be blocked anyways - request.addAliasAction(AliasActions.add().index(followerIndexName) - .alias(alias.alias) - .indexRouting(alias.indexRouting) - .searchRouting(alias.searchRouting) - .writeIndex(alias.writeIndex()) - .isHidden(alias.isHidden) - ) + var aliasAction = AliasActions.add().index(followerIndexName) + .alias(alias.alias) + .indexRouting(alias.indexRouting) + .searchRouting(alias.searchRouting) + .writeIndex(alias.writeIndex()) + .isHidden(alias.isHidden) + + if (alias.filteringRequired()) { + aliasAction = aliasAction.filter(alias.filter.string()) + } + + request.addAliasAction(aliasAction) } var toRemove = followerAliases - leaderAliases diff --git a/src/test/kotlin/org/opensearch/replication/integ/rest/StartReplicationIT.kt b/src/test/kotlin/org/opensearch/replication/integ/rest/StartReplicationIT.kt index 954b637d..f989859c 100644 --- a/src/test/kotlin/org/opensearch/replication/integ/rest/StartReplicationIT.kt +++ b/src/test/kotlin/org/opensearch/replication/integ/rest/StartReplicationIT.kt @@ -65,6 +65,7 @@ import org.opensearch.index.mapper.MapperService import org.opensearch.repositories.fs.FsRepository import org.opensearch.test.OpenSearchTestCase.assertBusy import org.junit.Assert +import org.opensearch.cluster.metadata.AliasMetadata import org.opensearch.common.xcontent.DeprecationHandler import org.opensearch.common.xcontent.NamedXContentRegistry import org.opensearch.replication.ReplicationPlugin.Companion.REPLICATION_INDEX_TRANSLOG_PRUNING_ENABLED_SETTING @@ -344,8 +345,10 @@ class StartReplicationIT: MultiClusterRestTestCase() { createConnectionBetweenClusters(FOLLOWER, LEADER) - val createIndexResponse = leaderClient.indices().create(CreateIndexRequest(leaderIndexName).alias(Alias("leaderAlias")), RequestOptions.DEFAULT) - assertThat(createIndexResponse.isAcknowledged).isTrue() + val createIndexResponse = leaderClient.indices().create(CreateIndexRequest(leaderIndexName) + .alias(Alias("leaderAlias").filter("{\"term\":{\"year\":2016}}").routing("1")) + , RequestOptions.DEFAULT) + assertThat(createIndexResponse.isAcknowledged).isTrue try { followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName), waitForRestore = true) @@ -361,6 +364,7 @@ class StartReplicationIT: MultiClusterRestTestCase() { followerClient.indices().getAlias(GetAliasesRequest().indices(followerIndexName), RequestOptions.DEFAULT).aliases[followerIndexName] ) + }, 30L, TimeUnit.SECONDS) } finally { followerClient.stopReplication(followerIndexName) @@ -541,7 +545,7 @@ class StartReplicationIT: MultiClusterRestTestCase() { var indicesAliasesRequest = IndicesAliasesRequest() var aliasAction = IndicesAliasesRequest.AliasActions.add() .index(leaderIndexName) - .alias("alias1") + .alias("alias1").filter("{\"term\":{\"year\":2016}}").routing("1") indicesAliasesRequest.addAliasAction(aliasAction) leaderClient.indices().updateAliases(indicesAliasesRequest, RequestOptions.DEFAULT) From d373ac4e84f588242b9ae697784e44efdf47b3ae Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Fri, 26 Aug 2022 15:48:26 +0530 Subject: [PATCH 08/84] Fix for missing ShardReplicationTasks on new nodes (#497) (#524) Signed-off-by: Ankit Kala Signed-off-by: Ankit Kala (cherry picked from commit 805f686a34393cf2cf26cf7011b71396c26f4fe3) Co-authored-by: Ankit Kala --- .../task/index/IndexReplicationTask.kt | 53 ++++++++-------- .../task/index/IndexReplicationTaskTests.kt | 61 ++++++++++++++++++- 2 files changed, 85 insertions(+), 29 deletions(-) diff --git a/src/main/kotlin/org/opensearch/replication/task/index/IndexReplicationTask.kt b/src/main/kotlin/org/opensearch/replication/task/index/IndexReplicationTask.kt index 156c2732..906312ac 100644 --- a/src/main/kotlin/org/opensearch/replication/task/index/IndexReplicationTask.kt +++ b/src/main/kotlin/org/opensearch/replication/task/index/IndexReplicationTask.kt @@ -183,7 +183,8 @@ open class IndexReplicationTask(id: Long, type: String, action: String, descript ReplicationState.INIT_FOLLOW -> { log.info("Starting shard tasks") addIndexBlockForReplication() - startShardFollowTasks(emptyMap()) + FollowingState(startNewOrMissingShardTasks()) + } ReplicationState.FOLLOWING -> { if (currentTaskState is FollowingState) { @@ -206,8 +207,8 @@ open class IndexReplicationTask(id: Long, type: String, action: String, descript // Tasks need to be started state } else { - state = pollShardTaskStatus((followingTaskState as FollowingState).shardReplicationTasks) - followingTaskState = startMissingShardTasks((followingTaskState as FollowingState).shardReplicationTasks) + state = pollShardTaskStatus() + followingTaskState = FollowingState(startNewOrMissingShardTasks()) when (state) { is MonitoringState -> { updateMetadata() @@ -285,24 +286,7 @@ open class IndexReplicationTask(id: Long, type: String, action: String, descript clusterService.addListener(this) } - private suspend fun startMissingShardTasks(shardTasks: Map>): IndexReplicationState { - val persistentTasks = clusterService.state().metadata.custom(PersistentTasksCustomMetadata.TYPE) - - val runningShardTasks = persistentTasks.findTasks(ShardReplicationExecutor.TASK_NAME, Predicate { true }).stream() - .map { task -> task.params as ShardReplicationParams } - .collect(Collectors.toList()) - - val runningTasksForCurrentIndex = shardTasks.filter { entry -> runningShardTasks.find { task -> task.followerShardId == entry.key } != null} - - val numMissingTasks = shardTasks.size - runningTasksForCurrentIndex.size - if (numMissingTasks > 0) { - log.info("Starting $numMissingTasks missing shard task(s)") - return startShardFollowTasks(runningTasksForCurrentIndex) - } - return FollowingState(shardTasks) - } - - private suspend fun pollShardTaskStatus(shardTasks: Map>): IndexReplicationState { + private suspend fun pollShardTaskStatus(): IndexReplicationState { val failedShardTasks = findAllReplicationFailedShardTasks(followerIndexName, clusterService.state()) if (failedShardTasks.isNotEmpty()) { log.info("Failed shard tasks - ", failedShardTasks) @@ -343,11 +327,16 @@ open class IndexReplicationTask(id: Long, type: String, action: String, descript registerCloseListeners() val clusterState = clusterService.state() val persistentTasks = clusterState.metadata.custom(PersistentTasksCustomMetadata.TYPE) - val runningShardTasks = persistentTasks.findTasks(ShardReplicationExecutor.TASK_NAME, Predicate { true }).stream() + + val followerShardIds = clusterService.state().routingTable.indicesRouting().get(followerIndexName).shards() + .map { shard -> shard.value.shardId } + .stream().collect(Collectors.toSet()) + val runningShardTasksForIndex = persistentTasks.findTasks(ShardReplicationExecutor.TASK_NAME, Predicate { true }).stream() .map { task -> task.params as ShardReplicationParams } + .filter {taskParam -> followerShardIds.contains(taskParam.followerShardId) } .collect(Collectors.toList()) - if (runningShardTasks.size == 0) { + if (runningShardTasksForIndex.size != followerShardIds.size) { return InitFollowState } @@ -696,19 +685,27 @@ open class IndexReplicationTask(id: Long, type: String, action: String, descript } } - private suspend fun - startShardFollowTasks(tasks: Map>): FollowingState { + suspend fun startNewOrMissingShardTasks(): Map> { assert(clusterService.state().routingTable.hasIndex(followerIndexName)) { "Can't find index $followerIndexName" } val shards = clusterService.state().routingTable.indicesRouting().get(followerIndexName).shards() - val newTasks = shards.map { + val persistentTasks = clusterService.state().metadata.custom(PersistentTasksCustomMetadata.TYPE) + val runningShardTasks = persistentTasks.findTasks(ShardReplicationExecutor.TASK_NAME, Predicate { true }).stream() + .map { task -> task as PersistentTask } + .filter { task -> task.params!!.followerShardId.indexName == followerIndexName} + .collect(Collectors.toMap( + {t: PersistentTask -> t.params!!.followerShardId}, + {t: PersistentTask -> t})) + + val tasks = shards.map { it.value.shardId }.associate { shardId -> - val task = tasks.getOrElse(shardId) { + val task = runningShardTasks.getOrElse(shardId) { startReplicationTask(ShardReplicationParams(leaderAlias, ShardId(leaderIndex, shardId.id), shardId)) } return@associate shardId to task } - return FollowingState(newTasks) + + return tasks } private suspend fun cancelRestore() { diff --git a/src/test/kotlin/org/opensearch/replication/task/index/IndexReplicationTaskTests.kt b/src/test/kotlin/org/opensearch/replication/task/index/IndexReplicationTaskTests.kt index 95b3f6f2..569eadf8 100644 --- a/src/test/kotlin/org/opensearch/replication/task/index/IndexReplicationTaskTests.kt +++ b/src/test/kotlin/org/opensearch/replication/task/index/IndexReplicationTaskTests.kt @@ -55,7 +55,6 @@ import org.opensearch.tasks.TaskManager import org.opensearch.test.ClusterServiceUtils import org.opensearch.test.ClusterServiceUtils.setState import org.opensearch.test.OpenSearchTestCase -import org.opensearch.test.OpenSearchTestCase.assertBusy import org.opensearch.threadpool.TestThreadPool import java.util.* import java.util.concurrent.TimeUnit @@ -150,6 +149,66 @@ class IndexReplicationTaskTests : OpenSearchTestCase() { } + fun testStartNewShardTasks() = runBlocking { + val replicationTask: IndexReplicationTask = spy(createIndexReplicationTask()) + var taskManager = Mockito.mock(TaskManager::class.java) + replicationTask.setPersistent(taskManager) + var rc = ReplicationContext(followerIndex) + var rm = ReplicationMetadata(connectionName, ReplicationStoreMetadataType.INDEX.name, ReplicationOverallState.RUNNING.name, "reason", rc, rc, Settings.EMPTY) + replicationTask.setReplicationMetadata(rm) + + // Build cluster state + val indices: MutableList = ArrayList() + indices.add(followerIndex) + var metadata = Metadata.builder() + .put(IndexMetadata.builder(REPLICATION_CONFIG_SYSTEM_INDEX).settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(0)) + .put(IndexMetadata.builder(followerIndex).settings(settings(Version.CURRENT)).numberOfShards(2).numberOfReplicas(0)) + .build() + var routingTableBuilder = RoutingTable.builder() + .addAsNew(metadata.index(REPLICATION_CONFIG_SYSTEM_INDEX)) + .addAsNew(metadata.index(followerIndex)) + var newClusterState = ClusterState.builder(clusterService.state()).routingTable(routingTableBuilder.build()).build() + setState(clusterService, newClusterState) + + // Try starting shard tasks + val shardTasks = replicationTask.startNewOrMissingShardTasks() + assertThat(shardTasks.size == 2).isTrue + } + + + fun testStartMissingShardTasks() = runBlocking { + val replicationTask: IndexReplicationTask = spy(createIndexReplicationTask()) + var taskManager = Mockito.mock(TaskManager::class.java) + replicationTask.setPersistent(taskManager) + var rc = ReplicationContext(followerIndex) + var rm = ReplicationMetadata(connectionName, ReplicationStoreMetadataType.INDEX.name, ReplicationOverallState.RUNNING.name, "reason", rc, rc, Settings.EMPTY) + replicationTask.setReplicationMetadata(rm) + + // Build cluster state + val indices: MutableList = ArrayList() + indices.add(followerIndex) + + val tasks = PersistentTasksCustomMetadata.builder() + var sId = ShardId(Index(followerIndex, "_na_"), 0) + tasks.addTask( "replication:0", ShardReplicationExecutor.TASK_NAME, ShardReplicationParams("remoteCluster", sId, sId), + PersistentTasksCustomMetadata.Assignment("other_node_", "test assignment on other node")) + + var metadata = Metadata.builder() + .put(IndexMetadata.builder(REPLICATION_CONFIG_SYSTEM_INDEX).settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(0)) + .put(IndexMetadata.builder(followerIndex).settings(settings(Version.CURRENT)).numberOfShards(2).numberOfReplicas(0)) + .putCustom(PersistentTasksCustomMetadata.TYPE, tasks.build()) + .build() + var routingTableBuilder = RoutingTable.builder() + .addAsNew(metadata.index(REPLICATION_CONFIG_SYSTEM_INDEX)) + .addAsNew(metadata.index(followerIndex)) + var newClusterState = ClusterState.builder(clusterService.state()).routingTable(routingTableBuilder.build()).build() + setState(clusterService, newClusterState) + + // Try starting shard tasks + val shardTasks = replicationTask.startNewOrMissingShardTasks() + assertThat(shardTasks.size == 2).isTrue + } + private fun createIndexReplicationTask() : IndexReplicationTask { var threadPool = TestThreadPool("IndexReplicationTask") //Hack Alert : Though it is meant to force rejection , this is to make overallTaskScope not null From 61aaa49025504401d1927310b1b4f70342012c63 Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Fri, 26 Aug 2022 18:32:26 +0530 Subject: [PATCH 09/84] Add lastExecutionTime for autofollow coroutine (#508) (#516) Signed-off-by: Ankit Kala Signed-off-by: Ankit Kala (cherry picked from commit 8f0a55c9a7e32f643345a972d28e4829d7e3c530) Co-authored-by: Ankit Kala --- .../task/autofollow/AutoFollowTask.kt | 6 +++++ .../integ/rest/UpdateAutoFollowPatternIT.kt | 23 +++++++++++++++++++ 2 files changed, 29 insertions(+) diff --git a/src/main/kotlin/org/opensearch/replication/task/autofollow/AutoFollowTask.kt b/src/main/kotlin/org/opensearch/replication/task/autofollow/AutoFollowTask.kt index 570e39b7..0685b79d 100644 --- a/src/main/kotlin/org/opensearch/replication/task/autofollow/AutoFollowTask.kt +++ b/src/main/kotlin/org/opensearch/replication/task/autofollow/AutoFollowTask.kt @@ -74,6 +74,7 @@ class AutoFollowTask(id: Long, type: String, action: String, description: String try { addRetryScheduler() pollForIndices() + stat.lastExecutionTime = System.currentTimeMillis() delay(replicationSettings.autofollowFetchPollDuration.millis) } catch(e: OpenSearchException) { @@ -252,6 +253,7 @@ class AutoFollowStat: Task.Status { var failCounterForRun :Long=0 var successCount: Long=0 var failedLeaderCall :Long=0 + var lastExecutionTime : Long=0 constructor(name: String, pattern: String) { @@ -266,6 +268,7 @@ class AutoFollowStat: Task.Status { failedIndices = inp.readSet(StreamInput::readString) successCount = inp.readLong() failedLeaderCall = inp.readLong() + lastExecutionTime = inp.readLong() } override fun writeTo(out: StreamOutput) { @@ -275,6 +278,7 @@ class AutoFollowStat: Task.Status { out.writeCollection(failedIndices, StreamOutput::writeString) out.writeLong(successCount) out.writeLong(failedLeaderCall) + out.writeLong(lastExecutionTime) } override fun getWriteableName(): String { @@ -289,6 +293,8 @@ class AutoFollowStat: Task.Status { builder.field("num_failed_start_replication", failCount) builder.field("num_failed_leader_calls", failedLeaderCall) builder.field("failed_indices", failedIndices) + builder.field("last_execution_time", lastExecutionTime) return builder.endObject() } + } diff --git a/src/test/kotlin/org/opensearch/replication/integ/rest/UpdateAutoFollowPatternIT.kt b/src/test/kotlin/org/opensearch/replication/integ/rest/UpdateAutoFollowPatternIT.kt index d1a757bd..82db8fb5 100644 --- a/src/test/kotlin/org/opensearch/replication/integ/rest/UpdateAutoFollowPatternIT.kt +++ b/src/test/kotlin/org/opensearch/replication/integ/rest/UpdateAutoFollowPatternIT.kt @@ -91,6 +91,7 @@ class UpdateAutoFollowPatternIT: MultiClusterRestTestCase() { .isEqualTo(true) followerClient.waitForShardTaskStart(leaderIndexNameNew, waitForShardTask) followerClient.waitForShardTaskStart(leaderIndexName, waitForShardTask) + var stats = followerClient.AutoFollowStats() var af_stats = stats.get("autofollow_stats")!! as ArrayList> for (key in af_stats) { @@ -118,8 +119,13 @@ class UpdateAutoFollowPatternIT: MultiClusterRestTestCase() { TimeValue.timeValueSeconds(30)) val clusterUpdateSetttingsReq = ClusterUpdateSettingsRequest().persistentSettings(settings) val clusterUpdateResponse = followerClient.cluster().putSettings(clusterUpdateSetttingsReq, RequestOptions.DEFAULT) + + var lastExecutionTime = 0L + var stats = followerClient.AutoFollowStats() + Assert.assertTrue(clusterUpdateResponse.isAcknowledged) followerClient.updateAutoFollowPattern(connectionAlias, indexPatternName, indexPattern) + leaderIndexNameNew = createRandomIndex(leaderClient) // Verify that newly created index on leader which match the pattern are also replicated. assertBusy({ @@ -127,8 +133,25 @@ class UpdateAutoFollowPatternIT: MultiClusterRestTestCase() { .exists(GetIndexRequest(leaderIndexNameNew), RequestOptions.DEFAULT)) .isEqualTo(true) followerClient.waitForShardTaskStart(leaderIndexNameNew, waitForShardTask) + var af_stats = stats.get("autofollow_stats")!! as ArrayList> + for (key in af_stats) { + if(key["name"] == indexPatternName) { + Assertions.assertThat(key["last_execution_time"]!! as Long).isNotEqualTo(0L) + lastExecutionTime = key["last_execution_time"]!! as Long + } + } + }, 30, TimeUnit.SECONDS) + assertBusy({ + var af_stats = stats.get("autofollow_stats")!! as ArrayList> + for (key in af_stats) { + if(key["name"] == indexPatternName) { + Assertions.assertThat(key["last_execution_time"]!! as Long).isNotEqualTo(lastExecutionTime) + } + } + }, 40, TimeUnit.SECONDS) + } finally { followerClient.deleteAutoFollowPattern(connectionAlias, indexPatternName) From ff4fe663443456fdfba1d8df57223d5265e6e9dc Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Tue, 30 Aug 2022 08:38:13 +0530 Subject: [PATCH 10/84] For segrep enabled indices, use NRTReplicationEngine for replica shards (#486) (#533) * Update engine factory to return NRTReplicationEngine for replica shards Signed-off-by: Suraj Singh * Address review comments Signed-off-by: Suraj Singh Signed-off-by: Suraj Singh (cherry picked from commit 81c2002c889094ae4071e403e76393c00c9b567c) Co-authored-by: Suraj Singh --- .../org/opensearch/replication/ReplicationPlugin.kt | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/src/main/kotlin/org/opensearch/replication/ReplicationPlugin.kt b/src/main/kotlin/org/opensearch/replication/ReplicationPlugin.kt index 3fa602b0..53c77866 100644 --- a/src/main/kotlin/org/opensearch/replication/ReplicationPlugin.kt +++ b/src/main/kotlin/org/opensearch/replication/ReplicationPlugin.kt @@ -143,6 +143,9 @@ import org.opensearch.watcher.ResourceWatcherService import java.util.Optional import java.util.function.Supplier +import org.opensearch.index.engine.NRTReplicationEngine + + @OpenForTesting internal class ReplicationPlugin : Plugin(), ActionPlugin, PersistentTaskPlugin, RepositoryPlugin, EnginePlugin { @@ -359,7 +362,14 @@ internal class ReplicationPlugin : Plugin(), ActionPlugin, PersistentTaskPlugin, override fun getEngineFactory(indexSettings: IndexSettings): Optional { return if (indexSettings.settings.get(REPLICATED_INDEX_SETTING.key) != null) { - Optional.of(EngineFactory { config -> ReplicationEngine(config) }) + Optional.of(EngineFactory { config -> + // Use NRTSegmentReplicationEngine for SEGMENT replication type indices replica shards + if (config.isReadOnlyReplica) { + NRTReplicationEngine(config) + } else { + ReplicationEngine(config) + } + }) } else { Optional.empty() } From 4006ef6af1b6b09ecc66690edbd4fd6c2ccb4949 Mon Sep 17 00:00:00 2001 From: Aman Khare <85096200+amkhar@users.noreply.github.com> Date: Wed, 7 Sep 2022 13:55:10 +0530 Subject: [PATCH 11/84] Bumped snakeyaml version to address CVE-2022-25857 (#540) Signed-off-by: Aman Khare Signed-off-by: Aman Khare Co-authored-by: Aman Khare --- build.gradle | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build.gradle b/build.gradle index c5e8e0e3..e56ddd5c 100644 --- a/build.gradle +++ b/build.gradle @@ -121,7 +121,7 @@ configurations.all { force 'org.apache.httpcomponents.client5:httpclient5:5.0.3' force 'org.apache.httpcomponents.client5:httpclient5-osgi:5.0.3' force 'com.fasterxml.jackson.core:jackson-databind:2.12.6' - force 'org.yaml:snakeyaml:1.26' + force 'org.yaml:snakeyaml:1.31' force 'org.codehaus.plexus:plexus-utils:3.0.24' } } From 0a44d0b41c4c4b3bdb38bdfb2997203e1020700c Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Wed, 7 Sep 2022 14:20:30 +0530 Subject: [PATCH 12/84] Increment version to 2.3.0-SNAPSHOT (#536) Signed-off-by: opensearch-ci-bot Signed-off-by: opensearch-ci-bot Co-authored-by: opensearch-ci-bot --- build.gradle | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build.gradle b/build.gradle index e56ddd5c..4e0895fd 100644 --- a/build.gradle +++ b/build.gradle @@ -36,7 +36,7 @@ import org.opensearch.gradle.test.RestIntegTestTask buildscript { ext { isSnapshot = "true" == System.getProperty("build.snapshot", "true") - opensearch_version = System.getProperty("opensearch.version", "2.2.0-SNAPSHOT") + opensearch_version = System.getProperty("opensearch.version", "2.3.0-SNAPSHOT") buildVersionQualifier = System.getProperty("build.version_qualifier", "") // e.g. 2.0.0-rc1-SNAPSHOT -> 2.0.0.0-rc1-SNAPSHOT version_tokens = opensearch_version.tokenize('-') From 78a6fb7d9b106306be660763611e193c534bbda7 Mon Sep 17 00:00:00 2001 From: Ankit Kala Date: Wed, 7 Sep 2022 15:08:48 +0530 Subject: [PATCH 13/84] 2.3.0 release notes (#547) Signed-off-by: Ankit Kala Signed-off-by: Ankit Kala --- ...s-cluster-replication.release-notes-2.3.0.0.md | 15 +++++++++++++++ 1 file changed, 15 insertions(+) create mode 100644 release-notes/opensearch-cross-cluster-replication.release-notes-2.3.0.0.md diff --git a/release-notes/opensearch-cross-cluster-replication.release-notes-2.3.0.0.md b/release-notes/opensearch-cross-cluster-replication.release-notes-2.3.0.0.md new file mode 100644 index 00000000..6a48a941 --- /dev/null +++ b/release-notes/opensearch-cross-cluster-replication.release-notes-2.3.0.0.md @@ -0,0 +1,15 @@ +## Version 2.3.0.0 Release Notes + +Compatible with OpenSearch 2.3.0 + +### Bug Fixes +* Updating filters as well during Alias update ([#491](https://github.com/opensearch-project/cross-cluster-replication/pull/491)) +* Modified _stop replication API to remove any stale replication settings on existing index ([#410](https://github.com/opensearch-project/cross-cluster-replication/pull/410)) +* Fix for missing ShardReplicationTasks on new nodes ([#497](https://github.com/opensearch-project/cross-cluster-replication/pull/497)) +* For segrep enabled indices, use NRTReplicationEngine for replica shards ([#486](https://github.com/opensearch-project/cross-cluster-replication/pull/486)) + +### Enhancements +* Add lastExecutionTime for autofollow coroutine ([#508](https://github.com/opensearch-project/cross-cluster-replication/pull/508)) +* Modified security artifacts to fetch from latest build version ([#474](https://github.com/opensearch-project/cross-cluster-replication/pull/474)) +* add updateVersion task ([#489](https://github.com/opensearch-project/cross-cluster-replication/pull/489)) +* Bumped snakeyaml version to address CVE-2022-25857 ([#540](https://github.com/opensearch-project/cross-cluster-replication/pull/540)) \ No newline at end of file From 569c0aba8e08e88ea69539cf6f27c5b3c4bc3c71 Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Tue, 4 Oct 2022 14:41:29 +0530 Subject: [PATCH 14/84] Upgrade Snakeyml and Jackson (#574) Signed-off-by: Sooraj Sinha (cherry picked from commit bf7ac767d24d89cc44b4d11236c210932eb850bd) Co-authored-by: Sooraj Sinha --- build.gradle | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/build.gradle b/build.gradle index 4e0895fd..1256bc32 100644 --- a/build.gradle +++ b/build.gradle @@ -120,8 +120,8 @@ configurations.all { force 'org.apache.httpcomponents:httpclient-osgi:4.5.13' force 'org.apache.httpcomponents.client5:httpclient5:5.0.3' force 'org.apache.httpcomponents.client5:httpclient5-osgi:5.0.3' - force 'com.fasterxml.jackson.core:jackson-databind:2.12.6' - force 'org.yaml:snakeyaml:1.31' + force 'com.fasterxml.jackson.core:jackson-databind:2.13.4' + force 'org.yaml:snakeyaml:1.32' force 'org.codehaus.plexus:plexus-utils:3.0.24' } } From 3603326a379ee2d3d66126e7b2ebaf9fcee4feec Mon Sep 17 00:00:00 2001 From: Ankit Kala Date: Fri, 21 Oct 2022 09:51:08 +0530 Subject: [PATCH 15/84] Merge pull request #589 from prudhvigodithi/main (#593) add groupId = org.opensearch.plugin (cherry picked from commit 5882c09221306c5eff140fc29939a0d2395d4b25) Signed-off-by: Ankit Kala Co-authored-by: sricharanvuppu <113983630+sricharanvuppu@users.noreply.github.com> --- build.gradle | 1 + 1 file changed, 1 insertion(+) diff --git a/build.gradle b/build.gradle index 1256bc32..30ab15a0 100644 --- a/build.gradle +++ b/build.gradle @@ -895,6 +895,7 @@ publishing { pom { name = opensearchplugin.name description = opensearchplugin.description + groupId = "org.opensearch.plugin" licenses { license { name = "The Apache License, Version 2.0" From 93c767b621e4843b3c9f45ed74bc03bf05aaa2c6 Mon Sep 17 00:00:00 2001 From: Mohit Kumar <113413713+mohitamg@users.noreply.github.com> Date: Thu, 20 Oct 2022 17:01:30 +0530 Subject: [PATCH 16/84] Updated jackson databind version to 2.13.4.2 (#596) Signed-off-by: Mohit Kumar Signed-off-by: Mohit Kumar (cherry picked from commit 0ec98b11c18ae06ab4454cbeb5e9f270891ac297) --- build.gradle | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build.gradle b/build.gradle index 30ab15a0..3bf5f3e3 100644 --- a/build.gradle +++ b/build.gradle @@ -120,7 +120,7 @@ configurations.all { force 'org.apache.httpcomponents:httpclient-osgi:4.5.13' force 'org.apache.httpcomponents.client5:httpclient5:5.0.3' force 'org.apache.httpcomponents.client5:httpclient5-osgi:5.0.3' - force 'com.fasterxml.jackson.core:jackson-databind:2.13.4' + force 'com.fasterxml.jackson.core:jackson-databind:2.13.4.2' force 'org.yaml:snakeyaml:1.32' force 'org.codehaus.plexus:plexus-utils:3.0.24' } From 278c1ef46babac17132f8575da3fecdabc3a363f Mon Sep 17 00:00:00 2001 From: Mohit Kumar Date: Wed, 26 Oct 2022 10:35:50 +0530 Subject: [PATCH 17/84] Changed jacoco tool version to 0.8.7 Signed-off-by: Mohit Kumar --- build.gradle | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/build.gradle b/build.gradle index 3bf5f3e3..d53c7db9 100644 --- a/build.gradle +++ b/build.gradle @@ -79,7 +79,7 @@ buildscript { classpath "org.jetbrains.kotlin:kotlin-gradle-plugin:${kotlin_version}" classpath "org.jetbrains.kotlin:kotlin-allopen:${kotlin_version}" classpath "io.gitlab.arturbosch.detekt:detekt-gradle-plugin:1.0.0-RC15" - classpath "org.jacoco:org.jacoco.agent:0.8.5" + classpath "org.jacoco:org.jacoco.agent:0.8.7" } } @@ -92,6 +92,9 @@ plugins { allprojects { group = "org.opensearch" version = "${opensearch_build}" + // Have resolve the jacoco version here to work with kotlin + // Ref: https://github.com/jacoco/jacoco/issues/1187 + jacoco.toolVersion = "0.8.7" } apply plugin: 'java' From 6766bed82a434a19e3e5de1acee05c6429a17936 Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Wed, 26 Oct 2022 12:32:11 +0530 Subject: [PATCH 18/84] Increment version to 2.4.0-SNAPSHOT (#553) Signed-off-by: opensearch-ci-bot Signed-off-by: opensearch-ci-bot Co-authored-by: opensearch-ci-bot --- build.gradle | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build.gradle b/build.gradle index 30ab15a0..a1280c79 100644 --- a/build.gradle +++ b/build.gradle @@ -36,7 +36,7 @@ import org.opensearch.gradle.test.RestIntegTestTask buildscript { ext { isSnapshot = "true" == System.getProperty("build.snapshot", "true") - opensearch_version = System.getProperty("opensearch.version", "2.3.0-SNAPSHOT") + opensearch_version = System.getProperty("opensearch.version", "2.4.0-SNAPSHOT") buildVersionQualifier = System.getProperty("build.version_qualifier", "") // e.g. 2.0.0-rc1-SNAPSHOT -> 2.0.0.0-rc1-SNAPSHOT version_tokens = opensearch_version.tokenize('-') From 967961b337b090686119b3ea281d9d2b05080cf8 Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Wed, 26 Oct 2022 13:03:34 +0530 Subject: [PATCH 19/84] Add windows & mac build (#591) (#599) Signed-off-by: Ankit Kala Signed-off-by: Ankit Kala (cherry picked from commit d12271a2909f32cf766d2405f7b1d3b8b1746c44) Co-authored-by: Ankit Kala --- .github/workflows/build-and-test.yml | 51 ++++++++++++++++++++++++++++ .github/workflows/build.yml | 32 ++++++----------- 2 files changed, 61 insertions(+), 22 deletions(-) create mode 100644 .github/workflows/build-and-test.yml diff --git a/.github/workflows/build-and-test.yml b/.github/workflows/build-and-test.yml new file mode 100644 index 00000000..e8a6ace5 --- /dev/null +++ b/.github/workflows/build-and-test.yml @@ -0,0 +1,51 @@ +name: CCR Test Workflow +# This workflow is triggered on pull requests to main branch +on: + pull_request: + branches: + - '*' + push: + branches: + - '*' + +# We build for all combinations but run tests only on one combination (linux & latest java) +jobs: + build: + continue-on-error: true + strategy: + matrix: + java: + - 11 + - 17 + # Job name + name: Run integration tests on linux with Java ${{ matrix.java }} + runs-on: ubuntu-latest + steps: + # This step uses the setup-java Github action: https://github.com/actions/setup-java + - name: Set Up JDK ${{ matrix.java }} + uses: actions/setup-java@v1 + with: + java-version: ${{ matrix.java }} + # This step uses the checkout Github action: https://github.com/actions/checkout + - name: Checkout Branch + uses: actions/checkout@v2 + - name: Build and run Replication tests + run: | + ./gradlew clean release -D"build.snapshot=true" + - name: Upload failed logs + uses: actions/upload-artifact@v2 + if: failure() + with: + name: logs + path: | + build/testclusters/integTest-*/logs/* + build/testclusters/leaderCluster-*/logs/* + build/testclusters/followCluster-*/logs/* + - name: Create Artifact Path + run: | + mkdir -p cross-cluster-replication-artifacts + cp ./build/distributions/*.zip cross-cluster-replication-artifacts + - name: Uploads coverage + with: + fetch-depth: 2 + uses: codecov/codecov-action@v1.2.1 diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index c3f599a0..1faf6511 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -1,4 +1,4 @@ -name: Test and Build Workflow +name: Build Replication plugin # This workflow is triggered on pull requests to main branch on: pull_request: @@ -8,16 +8,21 @@ on: branches: - '*' +# We build for other platforms except linux which is already covered in build-and-test. +# Also, We're not running tests here as those are already covered with linux build. jobs: build: + continue-on-error: true strategy: matrix: java: - - 11 - 17 + os: + - windows-latest + - macos-latest # Job name - name: Build Replication plugin - runs-on: ubuntu-latest + name: Java ${{ matrix.java }} On ${{ matrix.os }} + runs-on: ${{ matrix.os }} steps: # This step uses the setup-java Github action: https://github.com/actions/setup-java - name: Set Up JDK ${{ matrix.java }} @@ -29,21 +34,4 @@ jobs: uses: actions/checkout@v2 - name: Build and run Replication tests run: | - ./gradlew clean release -Dbuild.snapshot=true - - name: Upload failed logs - uses: actions/upload-artifact@v2 - if: failure() - with: - name: logs - path: | - build/testclusters/integTest-*/logs/* - build/testclusters/leaderCluster-*/logs/* - build/testclusters/followCluster-*/logs/* - - name: Create Artifact Path - run: | - mkdir -p cross-cluster-replication-artifacts - cp ./build/distributions/*.zip cross-cluster-replication-artifacts - - name: Uploads coverage - with: - fetch-depth: 2 - uses: codecov/codecov-action@v1.2.1 + ./gradlew clean release -D"build.snapshot=true" -x test -x IntegTest \ No newline at end of file From eab13029dd5de9a13ddacbaa81a0757c9276965a Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Mon, 31 Oct 2022 14:17:09 +0530 Subject: [PATCH 20/84] Include default index settings during leader setting validation (#601) (#602) Signed-off-by: Ankit Kala Signed-off-by: Ankit Kala (cherry picked from commit 67a7073e77d62e1d4b5cb75fc44ff00d06b2a6cc) Co-authored-by: Ankit Kala --- .../index/TransportReplicateIndexAction.kt | 21 +++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/src/main/kotlin/org/opensearch/replication/action/index/TransportReplicateIndexAction.kt b/src/main/kotlin/org/opensearch/replication/action/index/TransportReplicateIndexAction.kt index 563a9996..80b0e30b 100644 --- a/src/main/kotlin/org/opensearch/replication/action/index/TransportReplicateIndexAction.kt +++ b/src/main/kotlin/org/opensearch/replication/action/index/TransportReplicateIndexAction.kt @@ -93,7 +93,9 @@ class TransportReplicateIndexAction @Inject constructor(transportService: Transp !leaderSettings.get(ReplicationPlugin.REPLICATED_INDEX_SETTING.key).isNullOrBlank()) { throw IllegalArgumentException("Cannot Replicate a Replicated Index ${request.leaderIndex}") } - if (!leaderSettings.getAsBoolean(IndexSettings.INDEX_SOFT_DELETES_SETTING.key, true)) { + + // Soft deletes should be enabled for replication to work. + if (!leaderSettings.getAsBoolean(IndexSettings.INDEX_SOFT_DELETES_SETTING.key, false)) { throw IllegalArgumentException("Cannot Replicate an index where the setting ${IndexSettings.INDEX_SOFT_DELETES_SETTING.key} is disabled") } @@ -120,6 +122,7 @@ class TransportReplicateIndexAction @Inject constructor(transportService: Transp } } + private suspend fun getLeaderClusterState(leaderAlias: String, leaderIndex: String): ClusterState { val remoteClusterClient = client.getRemoteClusterClient(leaderAlias) val clusterStateRequest = remoteClusterClient.admin().cluster().prepareState() @@ -136,8 +139,18 @@ class TransportReplicateIndexAction @Inject constructor(transportService: Transp private suspend fun getLeaderIndexSettings(leaderAlias: String, leaderIndex: String): Settings { val remoteClient = client.getRemoteClusterClient(leaderAlias) val getSettingsRequest = GetSettingsRequest().includeDefaults(true).indices(leaderIndex) - val settingsResponse = remoteClient.suspending(remoteClient.admin().indices()::getSettings, - injectSecurityContext = true)(getSettingsRequest) - return settingsResponse.indexToSettings.get(leaderIndex) ?: throw IndexNotFoundException("${leaderAlias}:${leaderIndex}") + val settingsResponse = remoteClient.suspending( + remoteClient.admin().indices()::getSettings, + injectSecurityContext = true + )(getSettingsRequest) + + val leaderSettings = settingsResponse.indexToSettings.get(leaderIndex) + ?: throw IndexNotFoundException("${leaderAlias}:${leaderIndex}") + val leaderDefaultSettings = settingsResponse.indexToDefaultSettings.get(leaderIndex) + ?: throw IndexNotFoundException("${leaderAlias}:${leaderIndex}") + + // Since we want user configured as well as default settings, we combine both by putting default settings + // and then the explicitly set ones to override the default settings. + return Settings.builder().put(leaderDefaultSettings).put(leaderSettings).build() } } From dab654677e4e5aa717c0e9377f20ce3b75df3712 Mon Sep 17 00:00:00 2001 From: Ankit Kala Date: Mon, 31 Oct 2022 14:19:51 +0530 Subject: [PATCH 21/84] Add 2.4 release notes (#617) Signed-off-by: Ankit Kala Signed-off-by: Ankit Kala --- ...ross-cluster-replication.release-notes-2.4.0.0.md | 12 ++++++++++++ 1 file changed, 12 insertions(+) create mode 100644 release-notes/opensearch-cross-cluster-replication.release-notes-2.4.0.0.md diff --git a/release-notes/opensearch-cross-cluster-replication.release-notes-2.4.0.0.md b/release-notes/opensearch-cross-cluster-replication.release-notes-2.4.0.0.md new file mode 100644 index 00000000..c88eba10 --- /dev/null +++ b/release-notes/opensearch-cross-cluster-replication.release-notes-2.4.0.0.md @@ -0,0 +1,12 @@ +## Version 2.4.0.0 Release Notes + +Compatible with OpenSearch 2.4.0 + +### Bug Fixes +* Updated jackson databind version to 2.13.4.2 ([597](https://github.com/opensearch-project/cross-cluster-replication/pull/597)) +* Include default index settings during leader setting validation ([601](https://github.com/opensearch-project/cross-cluster-replication/pull/601)) + +### Infrastructure +* Add support for windows & mac build ([591](https://github.com/opensearch-project/cross-cluster-replication/pull/591)) +* add groupId = org.opensearch.plugin ([589](https://github.com/opensearch-project/cross-cluster-replication/pull/589)) +* Upgrade Snakeyml and Jackson ([574](https://github.com/opensearch-project/cross-cluster-replication/pull/574)) \ No newline at end of file From a7e86d9ab5779031ccaebd097d100e56c2ee3772 Mon Sep 17 00:00:00 2001 From: sricharanvuppu <113983630+sricharanvuppu@users.noreply.github.com> Date: Mon, 21 Nov 2022 19:03:36 +0530 Subject: [PATCH 22/84] stopReplication API removed from integ test cases (#625) Signed-off-by: sricharanvuppu cherry-picked from commit https://github.com/opensearch-project/cross-cluster-replication/commit/da8d0928c8988d4d8bb44928c83a93f8b143c15c --- .../replication/BasicReplicationIT.kt | 135 +-- .../replication/MultiClusterRestTestCase.kt | 24 + .../integ/rest/ClusterRerouteFollowerIT.kt | 69 +- .../integ/rest/ClusterRerouteLeaderIT.kt | 67 +- .../integ/rest/PauseReplicationIT.kt | 201 ++-- .../integ/rest/ReplicationStatusIT.kt | 14 +- .../integ/rest/ResumeReplicationIT.kt | 207 ++-- .../replication/integ/rest/SecurityBase.kt | 3 - .../integ/rest/SecurityCustomRolesIT.kt | 242 ++-- .../integ/rest/SecurityCustomRolesLeaderIT.kt | 15 - .../integ/rest/SecurityDlsFlsIT.kt | 170 +-- .../integ/rest/StartReplicationIT.kt | 1055 +++++++---------- .../integ/rest/StopReplicationIT.kt | 107 +- .../integ/rest/UpdateAutoFollowPatternIT.kt | 104 +- .../replication/task/TaskCancellationIT.kt | 48 +- .../shard/TransportReplayChangesActionIT.kt | 96 +- 16 files changed, 996 insertions(+), 1561 deletions(-) diff --git a/src/test/kotlin/org/opensearch/replication/BasicReplicationIT.kt b/src/test/kotlin/org/opensearch/replication/BasicReplicationIT.kt index 7d426ace..304f88f6 100644 --- a/src/test/kotlin/org/opensearch/replication/BasicReplicationIT.kt +++ b/src/test/kotlin/org/opensearch/replication/BasicReplicationIT.kt @@ -44,120 +44,97 @@ class BasicReplicationIT : MultiClusterRestTestCase() { val follower = getClientForCluster(FOLL) val leader = getClientForCluster(LEADER) createConnectionBetweenClusters(FOLL, LEADER) - val leaderIndex = randomAlphaOfLength(10).toLowerCase(Locale.ROOT) val followerIndex = randomAlphaOfLength(10).toLowerCase(Locale.ROOT) // Create an empty index on the leader and trigger replication on it val createIndexResponse = leader.indices().create(CreateIndexRequest(leaderIndex), RequestOptions.DEFAULT) assertThat(createIndexResponse.isAcknowledged).isTrue() - try { - follower.startReplication(StartReplicationRequest("source", leaderIndex, followerIndex), waitForRestore=true) - val source = mapOf("name" to randomAlphaOfLength(20), "age" to randomInt().toString()) - var response = leader.index(IndexRequest(leaderIndex).id("1").source(source), RequestOptions.DEFAULT) + follower.startReplication(StartReplicationRequest("source", leaderIndex, followerIndex), waitForRestore=true) + val source = mapOf("name" to randomAlphaOfLength(20), "age" to randomInt().toString()) + var response = leader.index(IndexRequest(leaderIndex).id("1").source(source), RequestOptions.DEFAULT) + assertThat(response.result).isEqualTo(Result.CREATED) + assertBusy({ + val getResponse = follower.get(GetRequest(followerIndex, "1"), RequestOptions.DEFAULT) + assertThat(getResponse.isExists).isTrue() + assertThat(getResponse.sourceAsMap).isEqualTo(source) + }, 60L, TimeUnit.SECONDS) + // Ensure force merge on leader doesn't impact replication + for (i in 2..5) { + response = leader.index(IndexRequest(leaderIndex).id("$i").source(source), RequestOptions.DEFAULT) assertThat(response.result).isEqualTo(Result.CREATED) - - assertBusy({ - val getResponse = follower.get(GetRequest(followerIndex, "1"), RequestOptions.DEFAULT) + } + leader.indices().forcemerge(ForceMergeRequest(leaderIndex), RequestOptions.DEFAULT) + for (i in 6..10) { + response = leader.index(IndexRequest(leaderIndex).id("$i").source(source), RequestOptions.DEFAULT) + assertThat(response.result).isEqualTo(Result.CREATED) + } + assertBusy({ + for (i in 2..10) { + val getResponse = follower.get(GetRequest(followerIndex, "$i"), RequestOptions.DEFAULT) assertThat(getResponse.isExists).isTrue() assertThat(getResponse.sourceAsMap).isEqualTo(source) - }, 60L, TimeUnit.SECONDS) - - // Ensure force merge on leader doesn't impact replication - for (i in 2..5) { - response = leader.index(IndexRequest(leaderIndex).id("$i").source(source), RequestOptions.DEFAULT) - assertThat(response.result).isEqualTo(Result.CREATED) - } - leader.indices().forcemerge(ForceMergeRequest(leaderIndex), RequestOptions.DEFAULT) - for (i in 6..10) { - response = leader.index(IndexRequest(leaderIndex).id("$i").source(source), RequestOptions.DEFAULT) - assertThat(response.result).isEqualTo(Result.CREATED) } - assertBusy({ - for (i in 2..10) { - val getResponse = follower.get(GetRequest(followerIndex, "$i"), RequestOptions.DEFAULT) - assertThat(getResponse.isExists).isTrue() - assertThat(getResponse.sourceAsMap).isEqualTo(source) - } - }, 60L, TimeUnit.SECONDS) - - // Force merge on follower however isn't allowed due to WRITE block - Assertions.assertThatThrownBy { - follower.indices().forcemerge(ForceMergeRequest(followerIndex), RequestOptions.DEFAULT) - }.isInstanceOf(OpenSearchStatusException::class.java) - .hasMessage("OpenSearch exception [type=cluster_block_exception, reason=index [$followerIndex] " + - "blocked by: [FORBIDDEN/1000/index read-only(cross-cluster-replication)];]") - - } finally { - follower.stopReplication(followerIndex) - } + }, 60L, TimeUnit.SECONDS) + // Force merge on follower however isn't allowed due to WRITE block + Assertions.assertThatThrownBy { + follower.indices().forcemerge(ForceMergeRequest(followerIndex), RequestOptions.DEFAULT) + }.isInstanceOf(OpenSearchStatusException::class.java) + .hasMessage("OpenSearch exception [type=cluster_block_exception, reason=index [$followerIndex] " + + "blocked by: [FORBIDDEN/1000/index read-only(cross-cluster-replication)];]") } fun `test existing index replication`() { val follower = getClientForCluster(FOLL) val leader = getClientForCluster(LEADER) createConnectionBetweenClusters(FOLL, LEADER) - // Create an index with data before commencing replication val leaderIndex = randomAlphaOfLength(10).toLowerCase(Locale.ROOT) val followerIndex = randomAlphaOfLength(10).toLowerCase(Locale.ROOT) val source = mapOf("name" to randomAlphaOfLength(20), "age" to randomInt().toString()) val response = leader.index(IndexRequest(leaderIndex).id("1").source(source), RequestOptions.DEFAULT) assertThat(response.result).withFailMessage("Failed to create leader data").isEqualTo(Result.CREATED) - follower.startReplication(StartReplicationRequest("source", leaderIndex, followerIndex), waitForRestore=true) - assertBusy { val getResponse = follower.get(GetRequest(followerIndex, "1"), RequestOptions.DEFAULT) assertThat(getResponse.isExists).isTrue() assertThat(getResponse.sourceAsMap).isEqualTo(source) } - follower.stopReplication(followerIndex) } fun `test that index operations are replayed to follower during replication`() { val followerClient = getClientForCluster(FOLL) val leaderClient = getClientForCluster(LEADER) createConnectionBetweenClusters(FOLL, LEADER) - + val leaderIndexName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT) + val followerIndexName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT) val createIndexResponse = leaderClient.indices().create(CreateIndexRequest(leaderIndexName), RequestOptions.DEFAULT) assertThat(createIndexResponse.isAcknowledged).isTrue() - - try { - followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName), waitForRestore=true) - - // Create document - var source = mapOf("name" to randomAlphaOfLength(20), "age" to randomInt().toString()) - var response = leaderClient.index(IndexRequest(leaderIndexName).id("1").source(source), RequestOptions.DEFAULT) - assertThat(response.result).withFailMessage("Failed to create leader data").isEqualTo(Result.CREATED) - - assertBusy({ - val getResponse = followerClient.get(GetRequest(followerIndexName, "1"), RequestOptions.DEFAULT) - assertThat(getResponse.isExists).isTrue() - assertThat(getResponse.sourceAsMap).isEqualTo(source) - }, 60L, TimeUnit.SECONDS) - - // Update document - source = mapOf("name" to randomAlphaOfLength(20), "age" to randomInt().toString()) - response = leaderClient.index(IndexRequest(leaderIndexName).id("1").source(source), RequestOptions.DEFAULT) - assertThat(response.result).withFailMessage("Failed to update leader data").isEqualTo(Result.UPDATED) - - assertBusy({ - val getResponse = followerClient.get(GetRequest(followerIndexName, "1"), RequestOptions.DEFAULT) - assertThat(getResponse.isExists).isTrue() - assertThat(getResponse.sourceAsMap).isEqualTo(source) - },60L, TimeUnit.SECONDS) - - // Delete document - val deleteResponse = leaderClient.delete(DeleteRequest(leaderIndexName).id("1"), RequestOptions.DEFAULT) - assertThat(deleteResponse.result).withFailMessage("Failed to delete leader data").isEqualTo(Result.DELETED) - - assertBusy({ - val getResponse = followerClient.get(GetRequest(followerIndexName, "1"), RequestOptions.DEFAULT) - assertThat(getResponse.isExists).isFalse() - }, 60L, TimeUnit.SECONDS) - } finally { - followerClient.stopReplication(followerIndexName) - } + followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName), waitForRestore=true) + // Create document + var source = mapOf("name" to randomAlphaOfLength(20), "age" to randomInt().toString()) + var response = leaderClient.index(IndexRequest(leaderIndexName).id("1").source(source), RequestOptions.DEFAULT) + assertThat(response.result).withFailMessage("Failed to create leader data").isEqualTo(Result.CREATED) + assertBusy({ + val getResponse = followerClient.get(GetRequest(followerIndexName, "1"), RequestOptions.DEFAULT) + assertThat(getResponse.isExists).isTrue() + assertThat(getResponse.sourceAsMap).isEqualTo(source) + }, 60L, TimeUnit.SECONDS) + // Update document + source = mapOf("name" to randomAlphaOfLength(20), "age" to randomInt().toString()) + response = leaderClient.index(IndexRequest(leaderIndexName).id("1").source(source), RequestOptions.DEFAULT) + assertThat(response.result).withFailMessage("Failed to update leader data").isEqualTo(Result.UPDATED) + assertBusy({ + val getResponse = followerClient.get(GetRequest(followerIndexName, "1"), RequestOptions.DEFAULT) + assertThat(getResponse.isExists).isTrue() + assertThat(getResponse.sourceAsMap).isEqualTo(source) + },60L, TimeUnit.SECONDS) + // Delete document + val deleteResponse = leaderClient.delete(DeleteRequest(leaderIndexName).id("1"), RequestOptions.DEFAULT) + assertThat(deleteResponse.result).withFailMessage("Failed to delete leader data").isEqualTo(Result.DELETED) + assertBusy({ + val getResponse = followerClient.get(GetRequest(followerIndexName, "1"), RequestOptions.DEFAULT) + assertThat(getResponse.isExists).isFalse() + }, 60L, TimeUnit.SECONDS) } } diff --git a/src/test/kotlin/org/opensearch/replication/MultiClusterRestTestCase.kt b/src/test/kotlin/org/opensearch/replication/MultiClusterRestTestCase.kt index 888d8af9..36fbd3fd 100644 --- a/src/test/kotlin/org/opensearch/replication/MultiClusterRestTestCase.kt +++ b/src/test/kotlin/org/opensearch/replication/MultiClusterRestTestCase.kt @@ -11,6 +11,7 @@ package org.opensearch.replication +import com.nhaarman.mockitokotlin2.stub import org.opensearch.replication.MultiClusterAnnotations.ClusterConfiguration import org.opensearch.replication.MultiClusterAnnotations.ClusterConfigurations import org.opensearch.replication.MultiClusterAnnotations.getAnnotationsFromClass @@ -421,6 +422,29 @@ abstract class MultiClusterRestTestCase : OpenSearchTestCase() { } protected fun wipeIndicesFromCluster(testCluster: TestCluster) { + + val indicesResponse = testCluster.lowLevelClient.performRequest((Request("GET","/_cat/indices/*,-.*?format=json&pretty"))) + val indicesResponseEntity = EntityUtils.toString(indicesResponse.entity) + var parser = XContentType.JSON.xContent().createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, indicesResponseEntity) + parser.list().forEach{ item-> + val str = item.toString() + val map = str.subSequence(1,str.length-1).split(",").associate { + val (key, value) = it.trim().split("=") + key to value + } + val ind = map.get("index") + try { + val stopRequest = Request("POST","/_plugins/_replication/" + ind.toString() + "/_stop") + stopRequest.setJsonEntity("{}") + stopRequest.setOptions(RequestOptions.DEFAULT) + val response=testCluster.lowLevelClient.performRequest(stopRequest) + } + catch (e:ResponseException){ + if(e.response.statusLine.statusCode!=400) { + throw e + } + } + } try { val deleteRequest = Request("DELETE", "*,-.*") // All except system indices val response = testCluster.lowLevelClient.performRequest(deleteRequest) diff --git a/src/test/kotlin/org/opensearch/replication/integ/rest/ClusterRerouteFollowerIT.kt b/src/test/kotlin/org/opensearch/replication/integ/rest/ClusterRerouteFollowerIT.kt index 87cb313b..3f0a8627 100644 --- a/src/test/kotlin/org/opensearch/replication/integ/rest/ClusterRerouteFollowerIT.kt +++ b/src/test/kotlin/org/opensearch/replication/integ/rest/ClusterRerouteFollowerIT.kt @@ -36,44 +36,35 @@ class ClusterRerouteFollowerIT : MultiClusterRestTestCase() { fun `test replication works after rerouting a shard from one node to another in follower cluster`() { val followerClient = getClientForCluster(FOLLOWER) val leaderClient = getClientForCluster(LEADER) - try { - changeTemplate(LEADER) - createConnectionBetweenClusters(FOLLOWER, LEADER) - val createIndexResponse = leaderClient.indices().create(CreateIndexRequest(leaderIndexName), RequestOptions.DEFAULT) - Assertions.assertThat(createIndexResponse.isAcknowledged).isTrue() - followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName)) - insertDocToIndex(LEADER, "1", "dummy data 1",leaderIndexName) - - //Querying ES cluster throws random exceptions like ClusterManagerNotDiscovered or ShardsFailed etc, so catching them and retrying - assertBusy ({ - try { - Assertions.assertThat(docs(FOLLOWER, followerIndexName)).contains("dummy data 1") - } catch (ex: Exception) { - Assert.fail("Exception while querying follower cluster. Failing to retry again") - } - }, 1, TimeUnit.MINUTES) - - val nodes = getNodesInCluster(FOLLOWER) - - val primaryNode = getPrimaryNodeForShard(FOLLOWER,followerIndexName, "0") - val unassignedNode = nodes.filter{!it.equals(primaryNode)}.stream().findFirst().get() - rerouteShard(FOLLOWER, "0", followerIndexName, primaryNode, unassignedNode) - - assertBusy ({ - Assertions.assertThat(getPrimaryNodeForShard(FOLLOWER,followerIndexName, "0")).isEqualTo(unassignedNode) - }, 1, TimeUnit.MINUTES) - logger.info("rereouted shard is " + getPrimaryNodeForShard(FOLLOWER,followerIndexName, "0")) - insertDocToIndex(LEADER, "2", "dummy data 2",leaderIndexName) - - assertBusy ({ - try { - Assertions.assertThat(docs(FOLLOWER, followerIndexName)).contains("dummy data 2") - } catch (ex: Exception) { - Assert.fail("Exception while querying follower cluster. Failing to retry again") - } - }, 1, TimeUnit.MINUTES) - } finally { - followerClient.stopReplication(followerIndexName) - } + changeTemplate(LEADER) + createConnectionBetweenClusters(FOLLOWER, LEADER) + val createIndexResponse = leaderClient.indices().create(CreateIndexRequest(leaderIndexName), RequestOptions.DEFAULT) + Assertions.assertThat(createIndexResponse.isAcknowledged).isTrue() + followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName)) + insertDocToIndex(LEADER, "1", "dummy data 1",leaderIndexName) + //Querying ES cluster throws random exceptions like ClusterManagerNotDiscovered or ShardsFailed etc, so catching them and retrying + assertBusy ({ + try { + Assertions.assertThat(docs(FOLLOWER, followerIndexName)).contains("dummy data 1") + } catch (ex: Exception) { + Assert.fail("Exception while querying follower cluster. Failing to retry again") + } + }, 1, TimeUnit.MINUTES) + val nodes = getNodesInCluster(FOLLOWER) + val primaryNode = getPrimaryNodeForShard(FOLLOWER,followerIndexName, "0") + val unassignedNode = nodes.filter{!it.equals(primaryNode)}.stream().findFirst().get() + rerouteShard(FOLLOWER, "0", followerIndexName, primaryNode, unassignedNode) + assertBusy ({ + Assertions.assertThat(getPrimaryNodeForShard(FOLLOWER,followerIndexName, "0")).isEqualTo(unassignedNode) + }, 1, TimeUnit.MINUTES) + logger.info("rereouted shard is " + getPrimaryNodeForShard(FOLLOWER,followerIndexName, "0")) + insertDocToIndex(LEADER, "2", "dummy data 2",leaderIndexName) + assertBusy ({ + try { + Assertions.assertThat(docs(FOLLOWER, followerIndexName)).contains("dummy data 2") + } catch (ex: Exception) { + Assert.fail("Exception while querying follower cluster. Failing to retry again") + } + }, 1, TimeUnit.MINUTES) } } diff --git a/src/test/kotlin/org/opensearch/replication/integ/rest/ClusterRerouteLeaderIT.kt b/src/test/kotlin/org/opensearch/replication/integ/rest/ClusterRerouteLeaderIT.kt index 3d40ca89..6c50f782 100644 --- a/src/test/kotlin/org/opensearch/replication/integ/rest/ClusterRerouteLeaderIT.kt +++ b/src/test/kotlin/org/opensearch/replication/integ/rest/ClusterRerouteLeaderIT.kt @@ -36,43 +36,34 @@ class ClusterRerouteLeaderIT : MultiClusterRestTestCase() { fun `test replication works after rerouting a shard from one node to another in leader cluster`() { val followerClient = getClientForCluster(FOLLOWER) val leaderClient = getClientForCluster(LEADER) - try { - changeTemplate(LEADER) - createConnectionBetweenClusters(FOLLOWER, LEADER) - val createIndexResponse = leaderClient.indices().create(CreateIndexRequest(leaderIndexName), RequestOptions.DEFAULT) - Assertions.assertThat(createIndexResponse.isAcknowledged).isTrue() - followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName)) - insertDocToIndex(LEADER, "1", "dummy data 1",leaderIndexName) - - //Querying ES cluster throws random exceptions like ClusterManagerNotDiscovered or ShardsFailed etc, so catching them and retrying - assertBusy ({ - try { - Assertions.assertThat(docs(FOLLOWER, followerIndexName)).contains("dummy data 1") - } catch (ex: Exception) { - Assert.fail("Exception while querying follower cluster. Failing to retry again") - } - }, 1, TimeUnit.MINUTES) - - val nodes = getNodesInCluster(LEADER) - val primaryNode = getPrimaryNodeForShard(LEADER,leaderIndexName, "0") - val unassignedNode = nodes.filter{!it.equals(primaryNode)}.stream().findFirst().get() - rerouteShard(LEADER, "0", leaderIndexName, primaryNode, unassignedNode) - - assertBusy ({ - Assertions.assertThat(getPrimaryNodeForShard(LEADER,leaderIndexName, "0")).isEqualTo(unassignedNode) - }, 1, TimeUnit.MINUTES) - - insertDocToIndex(LEADER, "2", "dummy data 2",leaderIndexName) - - assertBusy ({ - try { - Assertions.assertThat(docs(FOLLOWER, followerIndexName)).contains("dummy data 2") - } catch (ex: Exception) { - Assert.fail("Exception while querying follower cluster. Failing to retry again") - } - }, 1, TimeUnit.MINUTES) - } finally { - followerClient.stopReplication(followerIndexName) - } + changeTemplate(LEADER) + createConnectionBetweenClusters(FOLLOWER, LEADER) + val createIndexResponse = leaderClient.indices().create(CreateIndexRequest(leaderIndexName), RequestOptions.DEFAULT) + Assertions.assertThat(createIndexResponse.isAcknowledged).isTrue() + followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName)) + insertDocToIndex(LEADER, "1", "dummy data 1",leaderIndexName) + //Querying ES cluster throws random exceptions like ClusterManagerNotDiscovered or ShardsFailed etc, so catching them and retrying + assertBusy ({ + try { + Assertions.assertThat(docs(FOLLOWER, followerIndexName)).contains("dummy data 1") + } catch (ex: Exception) { + Assert.fail("Exception while querying follower cluster. Failing to retry again") + } + }, 1, TimeUnit.MINUTES) + val nodes = getNodesInCluster(LEADER) + val primaryNode = getPrimaryNodeForShard(LEADER,leaderIndexName, "0") + val unassignedNode = nodes.filter{!it.equals(primaryNode)}.stream().findFirst().get() + rerouteShard(LEADER, "0", leaderIndexName, primaryNode, unassignedNode) + assertBusy ({ + Assertions.assertThat(getPrimaryNodeForShard(LEADER,leaderIndexName, "0")).isEqualTo(unassignedNode) + }, 1, TimeUnit.MINUTES) + insertDocToIndex(LEADER, "2", "dummy data 2",leaderIndexName) + assertBusy ({ + try { + Assertions.assertThat(docs(FOLLOWER, followerIndexName)).contains("dummy data 2") + } catch (ex: Exception) { + Assert.fail("Exception while querying follower cluster. Failing to retry again") + } + }, 1, TimeUnit.MINUTES) } } diff --git a/src/test/kotlin/org/opensearch/replication/integ/rest/PauseReplicationIT.kt b/src/test/kotlin/org/opensearch/replication/integ/rest/PauseReplicationIT.kt index 2f582605..e622b5cb 100644 --- a/src/test/kotlin/org/opensearch/replication/integ/rest/PauseReplicationIT.kt +++ b/src/test/kotlin/org/opensearch/replication/integ/rest/PauseReplicationIT.kt @@ -56,38 +56,28 @@ class PauseReplicationIT: MultiClusterRestTestCase() { val leaderClient = getClientForCluster(LEADER) val followerIndexName = "pause_index_follow_state" createConnectionBetweenClusters(FOLLOWER, LEADER) - val createIndexResponse = leaderClient.indices().create(CreateIndexRequest(leaderIndexName), RequestOptions.DEFAULT) assertThat(createIndexResponse.isAcknowledged).isTrue() - try { - followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName), waitForRestore = true) - - val myReason = "I want to pause!" - - /* At this point, the follower cluster should be in FOLLOWING state. Next, we pause replication - and verify the same - */ - followerClient.pauseReplication(followerIndexName, myReason) - // Since, we were still in FOLLOWING phase when pause was called, the index - // in follower index should not have been deleted in follower cluster - assertBusy { - assertThat(followerClient.indices() - .exists(GetIndexRequest(followerIndexName), RequestOptions.DEFAULT)) - .isEqualTo(true) - } - - val statusResp = followerClient.replicationStatus(followerIndexName) - `validate paused status response`(statusResp, myReason) - - var settings = Settings.builder() - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) - .build() - - followerClient.updateReplication( followerIndexName, settings) - followerClient.resumeReplication(followerIndexName) - } finally { - followerClient.stopReplication(followerIndexName) + followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName), waitForRestore = true) + val myReason = "I want to pause!" + /* At this point, the follower cluster should be in FOLLOWING state. Next, we pause replication + and verify the same + */ + followerClient.pauseReplication(followerIndexName, myReason) + // Since, we were still in FOLLOWING phase when pause was called, the index + // in follower index should not have been deleted in follower cluster + assertBusy { + assertThat(followerClient.indices() + .exists(GetIndexRequest(followerIndexName), RequestOptions.DEFAULT)) + .isEqualTo(true) } + val statusResp = followerClient.replicationStatus(followerIndexName) + `validate paused status response`(statusResp, myReason) + var settings = Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .build() + followerClient.updateReplication( followerIndexName, settings) + followerClient.resumeReplication(followerIndexName) } fun `test pause replication in restoring state with multiple shards`() { @@ -113,7 +103,6 @@ class PauseReplicationIT: MultiClusterRestTestCase() { val leaderClient = getClientForCluster(LEADER) val followerIndexName = "pause_index_restore_state" createConnectionBetweenClusters(FOLLOWER, LEADER) - val createIndexResponse = leaderClient.indices().create(CreateIndexRequest(leaderIndexName).settings(settings), RequestOptions.DEFAULT) assertThat(createIndexResponse.isAcknowledged).isTrue() @@ -123,24 +112,20 @@ class PauseReplicationIT: MultiClusterRestTestCase() { assertThat(leaderClient.indices() .exists(GetIndexRequest(leaderIndexName), RequestOptions.DEFAULT)) } - try { - followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName), - TimeValue.timeValueSeconds(10), - false) - //Given the size of index, the replication should be in RESTORING phase at this point - assertThatThrownBy { - followerClient.pauseReplication(followerIndexName) - }.isInstanceOf(ResponseException::class.java) - .hasMessageContaining("Index is in restore phase currently for index: ${followerIndexName}") - // wait for the shard tasks to be up as the replication block is added before adding shard replication tasks - // During intermittent test failures, stop replication under finally block executes before this without removing - // replication block (even though next call to _stop replication API can succeed in removing this block). - assertBusy({ - assertTrue(followerClient.getShardReplicationTasks(followerIndexName).isNotEmpty()) - }, 30L, TimeUnit.SECONDS) - } finally { - followerClient.stopReplication(followerIndexName) - } + followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName), + TimeValue.timeValueSeconds(10), + false) + //Given the size of index, the replication should be in RESTORING phase at this point + assertThatThrownBy { + followerClient.pauseReplication(followerIndexName) + }.isInstanceOf(ResponseException::class.java) + .hasMessageContaining("Index is in restore phase currently for index: ${followerIndexName}") + // wait for the shard tasks to be up as the replication block is added before adding shard replication tasks + // During intermittent test failures, stop replication under finally block executes before this without removing + // replication block (even though next call to _stop replication API can succeed in removing this block). + assertBusy({ + assertTrue(followerClient.getShardReplicationTasks(followerIndexName).isNotEmpty()) + }, 30L, TimeUnit.SECONDS) } fun `test pause without replication in progress`() { @@ -166,12 +151,10 @@ class PauseReplicationIT: MultiClusterRestTestCase() { val leaderClient = getClientForCluster(LEADER) val followerIndexName = "pause_index_with_stop" createConnectionBetweenClusters(FOLLOWER, LEADER) - val createIndexResponse = leaderClient.indices().create(CreateIndexRequest(leaderIndexName), RequestOptions.DEFAULT) assertThat(createIndexResponse.isAcknowledged).isTrue() try { followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName), waitForRestore = true) - /* At this point, the follower cluster should be in FOLLOWING state. Next, we pause replication and verify the same */ @@ -195,38 +178,31 @@ class PauseReplicationIT: MultiClusterRestTestCase() { fun `test pause replication when leader cluster is unavailable`() { val followerClient = getClientForCluster(FOLLOWER) val followerIndexName = "pause_index_leader_down" - try { - val leaderClient = getClientForCluster(LEADER) - createConnectionBetweenClusters(FOLLOWER, LEADER) - val createIndexResponse = leaderClient.indices().create(CreateIndexRequest(leaderIndexName), RequestOptions.DEFAULT) - assertThat(createIndexResponse.isAcknowledged).isTrue() - followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName), - waitForRestore = true) - // Need to wait till index blocks appear into state - assertBusy({ - val clusterBlocksResponse = followerClient.lowLevelClient.performRequest(Request("GET", "/_cluster/state/blocks")) - val clusterResponseString = EntityUtils.toString(clusterBlocksResponse.entity) - assertThat(clusterResponseString.contains("cross-cluster-replication")) - .withFailMessage("Cant find replication block after starting replication") - .isTrue() - }, 10, TimeUnit.SECONDS) - - // setting an invalid seed so that leader cluster is unavailable - val settings: Settings = Settings.builder() - .putList("cluster.remote.source.seeds", "127.0.0.1:9305") - .build() - val updateSettingsRequest = ClusterUpdateSettingsRequest() - updateSettingsRequest.persistentSettings(settings) - followerClient.cluster().putSettings(updateSettingsRequest, RequestOptions.DEFAULT) - - followerClient.pauseReplication(followerIndexName) - - val statusResp = followerClient.replicationStatus(followerIndexName) - `validate paused status response`(statusResp) - - } finally { - followerClient.stopReplication(followerIndexName) - } + val leaderClient = getClientForCluster(LEADER) + createConnectionBetweenClusters(FOLLOWER, LEADER) + val createIndexResponse = leaderClient.indices().create(CreateIndexRequest(leaderIndexName), RequestOptions.DEFAULT) + assertThat(createIndexResponse.isAcknowledged).isTrue() + followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName), + waitForRestore = true) + // Need to wait till index blocks appear into state + assertBusy({ + val clusterBlocksResponse = followerClient.lowLevelClient.performRequest(Request("GET", "/_cluster/state/blocks")) + val clusterResponseString = EntityUtils.toString(clusterBlocksResponse.entity) + assertThat(clusterResponseString.contains("cross-cluster-replication")) + .withFailMessage("Cant find replication block after starting replication") + .isTrue() + }, 10, TimeUnit.SECONDS) + + // setting an invalid seed so that leader cluster is unavailable + val settings: Settings = Settings.builder() + .putList("cluster.remote.source.seeds", "127.0.0.1:9305") + .build() + val updateSettingsRequest = ClusterUpdateSettingsRequest() + updateSettingsRequest.persistentSettings(settings) + followerClient.cluster().putSettings(updateSettingsRequest, RequestOptions.DEFAULT) + followerClient.pauseReplication(followerIndexName) + val statusResp = followerClient.replicationStatus(followerIndexName) + `validate paused status response`(statusResp) } fun `test auto pause of index replication when leader index is unavailable`() { @@ -236,41 +212,30 @@ class PauseReplicationIT: MultiClusterRestTestCase() { val leaderIndexName2 = "leader2" val followerClient = getClientForCluster(FOLLOWER) val leaderClient = getClientForCluster(LEADER) - try { - createConnectionBetweenClusters(FOLLOWER, LEADER) - var createIndexResponse = leaderClient.indices().create(CreateIndexRequest(leaderIndexName1), RequestOptions.DEFAULT) - assertThat(createIndexResponse.isAcknowledged).isTrue() - createIndexResponse = leaderClient.indices().create(CreateIndexRequest(leaderIndexName2), RequestOptions.DEFAULT) - assertThat(createIndexResponse.isAcknowledged).isTrue() - - // For followerIndexName1 - followerClient.startReplication(StartReplicationRequest("source", leaderIndexName1, - followerIndexName1), waitForRestore = true) - - // For followerIndexName2 - followerClient.startReplication(StartReplicationRequest("source", leaderIndexName2, - followerIndexName2), waitForRestore = true) - - val deleteResponse = leaderClient.indices().delete(DeleteIndexRequest(leaderIndexName1), RequestOptions.DEFAULT) - assertThat(deleteResponse.isAcknowledged) - - // followerIndexName1 -> autopause - assertBusy({ - var statusResp = followerClient.replicationStatus(followerIndexName1) - assertThat(statusResp.containsKey("status")) - assertThat(statusResp.containsKey("reason")) - `validate paused status response due to leader index deleted`(statusResp) - }, 30, TimeUnit.SECONDS) - - // followerIndexName2 -> Syncing state - assertBusy({ - var statusResp = followerClient.replicationStatus(followerIndexName2) - `validate status syncing response`(statusResp) - }, 30, TimeUnit.SECONDS) - - } finally { - followerClient.stopReplication(followerIndexName2) - followerClient.stopReplication(followerIndexName1) - } + createConnectionBetweenClusters(FOLLOWER, LEADER) + var createIndexResponse = leaderClient.indices().create(CreateIndexRequest(leaderIndexName1), RequestOptions.DEFAULT) + assertThat(createIndexResponse.isAcknowledged).isTrue() + createIndexResponse = leaderClient.indices().create(CreateIndexRequest(leaderIndexName2), RequestOptions.DEFAULT) + assertThat(createIndexResponse.isAcknowledged).isTrue() + // For followerIndexName1 + followerClient.startReplication(StartReplicationRequest("source", leaderIndexName1, + followerIndexName1), waitForRestore = true) + // For followerIndexName2 + followerClient.startReplication(StartReplicationRequest("source", leaderIndexName2, + followerIndexName2), waitForRestore = true) + val deleteResponse = leaderClient.indices().delete(DeleteIndexRequest(leaderIndexName1), RequestOptions.DEFAULT) + assertThat(deleteResponse.isAcknowledged) + // followerIndexName1 -> autopause + assertBusy({ + var statusResp = followerClient.replicationStatus(followerIndexName1) + assertThat(statusResp.containsKey("status")) + assertThat(statusResp.containsKey("reason")) + `validate paused status response due to leader index deleted`(statusResp) + }, 30, TimeUnit.SECONDS) + // followerIndexName2 -> Syncing state + assertBusy({ + var statusResp = followerClient.replicationStatus(followerIndexName2) + `validate status syncing response`(statusResp) + }, 30, TimeUnit.SECONDS) } } diff --git a/src/test/kotlin/org/opensearch/replication/integ/rest/ReplicationStatusIT.kt b/src/test/kotlin/org/opensearch/replication/integ/rest/ReplicationStatusIT.kt index 4b4e3762..46a4c2f7 100644 --- a/src/test/kotlin/org/opensearch/replication/integ/rest/ReplicationStatusIT.kt +++ b/src/test/kotlin/org/opensearch/replication/integ/rest/ReplicationStatusIT.kt @@ -38,15 +38,11 @@ class ReplicationStatusIT: MultiClusterRestTestCase() { createConnectionBetweenClusters(FOLLOWER, LEADER) val createIndexResponse = leaderClient.indices().create(CreateIndexRequest(indexName), RequestOptions.DEFAULT) Assertions.assertThat(createIndexResponse.isAcknowledged).isTrue() - try { - followerClient.startReplication(StartReplicationRequest("source", indexName, indexName), waitForRestore = true) - assertBusy({ - var statusResp = followerClient.replicationStatus(indexName) - `validate status syncing response`(statusResp) - }, 30, TimeUnit.SECONDS) - } finally { - followerClient.stopReplication(indexName) - } + followerClient.startReplication(StartReplicationRequest("source", indexName, indexName), waitForRestore = true) + assertBusy({ + var statusResp = followerClient.replicationStatus(indexName) + `validate status syncing response`(statusResp) + }, 30, TimeUnit.SECONDS) } fun `test replication status without valid params`() { diff --git a/src/test/kotlin/org/opensearch/replication/integ/rest/ResumeReplicationIT.kt b/src/test/kotlin/org/opensearch/replication/integ/rest/ResumeReplicationIT.kt index a67b46f5..dfc062aa 100644 --- a/src/test/kotlin/org/opensearch/replication/integ/rest/ResumeReplicationIT.kt +++ b/src/test/kotlin/org/opensearch/replication/integ/rest/ResumeReplicationIT.kt @@ -59,26 +59,19 @@ class ResumeReplicationIT: MultiClusterRestTestCase() { fun `test pause and resume replication in following state and empty index`() { val followerClient = getClientForCluster(FOLLOWER) val leaderClient = getClientForCluster(LEADER) - createConnectionBetweenClusters(FOLLOWER, LEADER) - val createIndexResponse = leaderClient.indices().create(CreateIndexRequest(leaderIndexName), RequestOptions.DEFAULT) assertThat(createIndexResponse.isAcknowledged).isTrue() - try { - followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName), waitForRestore = true) - - /* At this point, the follower cluster should be in FOLLOWING state. Next, we pause replication - and verify the same - */ - followerClient.pauseReplication(followerIndexName) - var statusResp = followerClient.replicationStatus(followerIndexName) - `validate paused status response`(statusResp) - statusResp = followerClient.replicationStatus(followerIndexName,false) - `validate aggregated paused status response`(statusResp) - followerClient.resumeReplication(followerIndexName) - } finally { - followerClient.stopReplication(followerIndexName) - } + followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName), waitForRestore = true) + /* At this point, the follower cluster should be in FOLLOWING state. Next, we pause replication + and verify the same + */ + followerClient.pauseReplication(followerIndexName) + var statusResp = followerClient.replicationStatus(followerIndexName) + `validate paused status response`(statusResp) + statusResp = followerClient.replicationStatus(followerIndexName,false) + `validate aggregated paused status response`(statusResp) + followerClient.resumeReplication(followerIndexName) } @@ -86,127 +79,88 @@ class ResumeReplicationIT: MultiClusterRestTestCase() { val followerClient = getClientForCluster(FOLLOWER) val leaderClient = getClientForCluster(LEADER) createConnectionBetweenClusters(FOLLOWER, LEADER) - val createIndexResponse = leaderClient.indices().create(CreateIndexRequest(leaderIndexName), RequestOptions.DEFAULT) assertThat(createIndexResponse.isAcknowledged).isTrue() - try { - followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName), waitForRestore = true) - - assertThatThrownBy { - var statusResp = followerClient.replicationStatus(followerIndexName) - `validate status syncing response`(statusResp) - statusResp = followerClient.replicationStatus(followerIndexName,false) - `validate status syncing aggregated response`(statusResp) - followerClient.resumeReplication(followerIndexName) - statusResp = followerClient.replicationStatus(followerIndexName) - `validate not paused status response`(statusResp) - statusResp = followerClient.replicationStatus(followerIndexName,false) - `validate not paused status aggregated response`(statusResp) - }.isInstanceOf(ResponseException::class.java) - .hasMessageContaining("Replication on Index ${followerIndexName} is already running") - } finally { - followerClient.stopReplication(followerIndexName) - } + followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName), waitForRestore = true) + assertThatThrownBy { + var statusResp = followerClient.replicationStatus(followerIndexName) + `validate status syncing response`(statusResp) + statusResp = followerClient.replicationStatus(followerIndexName,false) + `validate status syncing aggregated response`(statusResp) + followerClient.resumeReplication(followerIndexName) + statusResp = followerClient.replicationStatus(followerIndexName) + `validate not paused status response`(statusResp) + statusResp = followerClient.replicationStatus(followerIndexName,false) + `validate not paused status aggregated response`(statusResp) + }.isInstanceOf(ResponseException::class.java) + .hasMessageContaining("Replication on Index ${followerIndexName} is already running") } fun `test resume without retention lease`() { val followerClient = getClientForCluster(FOLLOWER) val leaderClient = getClientForCluster(LEADER) - createConnectionBetweenClusters(FOLLOWER, LEADER) - var createIndexResponse = leaderClient.indices().create(CreateIndexRequest(leaderIndexName), RequestOptions.DEFAULT) assertThat(createIndexResponse.isAcknowledged).isTrue() - try { - followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName), waitForRestore = true) - - - followerClient.pauseReplication(followerIndexName) - - // If we delete the existing index and recreate the index with same name, retention leases should be lost - val deleteIndexResponse = leaderClient.indices().delete(DeleteIndexRequest(leaderIndexName), RequestOptions.DEFAULT) - assertThat(deleteIndexResponse.isAcknowledged).isTrue() - createIndexResponse = leaderClient.indices().create(CreateIndexRequest(leaderIndexName), RequestOptions.DEFAULT) - assertThat(createIndexResponse.isAcknowledged).isTrue() - - assertThatThrownBy { - followerClient.resumeReplication(followerIndexName) - }.isInstanceOf(ResponseException::class.java) - .hasMessageContaining("Retention lease doesn't exist. Replication can't be resumed for $followerIndexName") - } finally { - followerClient.stopReplication(followerIndexName) - } + followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName), waitForRestore = true) + followerClient.pauseReplication(followerIndexName) + // If we delete the existing index and recreate the index with same name, retention leases should be lost + val deleteIndexResponse = leaderClient.indices().delete(DeleteIndexRequest(leaderIndexName), RequestOptions.DEFAULT) + assertThat(deleteIndexResponse.isAcknowledged).isTrue() + createIndexResponse = leaderClient.indices().create(CreateIndexRequest(leaderIndexName), RequestOptions.DEFAULT) + assertThat(createIndexResponse.isAcknowledged).isTrue() + assertThatThrownBy { + followerClient.resumeReplication(followerIndexName) + }.isInstanceOf(ResponseException::class.java) + .hasMessageContaining("Retention lease doesn't exist. Replication can't be resumed for $followerIndexName") } fun `test pause and resume replication amid leader index close and open`() { val followerClient = getClientForCluster(FOLLOWER) val leaderClient = getClientForCluster(LEADER) - createConnectionBetweenClusters(FOLLOWER, LEADER) - val createIndexResponse = leaderClient.indices().create(CreateIndexRequest(leaderIndexName), RequestOptions.DEFAULT) assertThat(createIndexResponse.isAcknowledged).isTrue() - try { - followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName), waitForRestore = true) - - /* At this point, the follower cluster should be in FOLLOWING state. Next, we pause replication - and verify the same - */ - followerClient.pauseReplication(followerIndexName) - - leaderClient.indices().close(CloseIndexRequest(leaderIndexName), RequestOptions.DEFAULT); - leaderClient.indices().open(OpenIndexRequest(leaderIndexName), RequestOptions.DEFAULT); - - followerClient.resumeReplication(followerIndexName) - - //Update mapping post resume assert - val sourceMap : MutableMap = HashMap() - sourceMap["x"] = "y" - val indexResponse = leaderClient.index(IndexRequest(leaderIndexName).id("2").source(sourceMap), RequestOptions.DEFAULT) - assertThat(indexResponse.result).isIn(DocWriteResponse.Result.CREATED, DocWriteResponse.Result.UPDATED) - assertBusy ({ - Assert.assertEquals( - leaderClient.indices().getMapping(GetMappingsRequest().indices(leaderIndexName), RequestOptions.DEFAULT) - .mappings()[leaderIndexName], - followerClient.indices().getMapping(GetMappingsRequest().indices(followerIndexName), RequestOptions.DEFAULT) - .mappings()[followerIndexName] - ) - }, 60, TimeUnit.SECONDS) - - } finally { - followerClient.stopReplication(followerIndexName) - } + followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName), waitForRestore = true) + + /* At this point, the follower cluster should be in FOLLOWING state. Next, we pause replication + and verify the same + */ + followerClient.pauseReplication(followerIndexName) + leaderClient.indices().close(CloseIndexRequest(leaderIndexName), RequestOptions.DEFAULT); + leaderClient.indices().open(OpenIndexRequest(leaderIndexName), RequestOptions.DEFAULT); + followerClient.resumeReplication(followerIndexName) + //Update mapping post resume assert + val sourceMap : MutableMap = HashMap() + sourceMap["x"] = "y" + val indexResponse = leaderClient.index(IndexRequest(leaderIndexName).id("2").source(sourceMap), RequestOptions.DEFAULT) + assertThat(indexResponse.result).isIn(DocWriteResponse.Result.CREATED, DocWriteResponse.Result.UPDATED) + assertBusy ({ + Assert.assertEquals( + leaderClient.indices().getMapping(GetMappingsRequest().indices(leaderIndexName), RequestOptions.DEFAULT) + .mappings()[leaderIndexName], + followerClient.indices().getMapping(GetMappingsRequest().indices(followerIndexName), RequestOptions.DEFAULT) + .mappings()[followerIndexName] + ) + }, 60, TimeUnit.SECONDS) } fun `test pause and resume replication amid index close`() { val followerClient = getClientForCluster(FOLLOWER) val leaderClient = getClientForCluster(LEADER) - createConnectionBetweenClusters(FOLLOWER, LEADER) - val createIndexResponse = leaderClient.indices().create(CreateIndexRequest(leaderIndexName), RequestOptions.DEFAULT) assertThat(createIndexResponse.isAcknowledged).isTrue() - try { - followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName), waitForRestore = true) - - /* At this point, the follower cluster should be in FOLLOWING state. Next, we pause replication - and verify the same - */ - followerClient.pauseReplication(followerIndexName) - - leaderClient.indices().close(CloseIndexRequest(leaderIndexName), RequestOptions.DEFAULT); - - assertThatThrownBy { - followerClient.resumeReplication(followerIndexName) - }.isInstanceOf(ResponseException::class.java) - .hasMessageContaining("closed") - } finally { - try { - followerClient.stopReplication(followerIndexName) - } catch (e: Exception) { - // DO nothing - } - } + followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName), waitForRestore = true) + /* At this point, the follower cluster should be in FOLLOWING state. Next, we pause replication + and verify the same + */ + followerClient.pauseReplication(followerIndexName) + leaderClient.indices().close(CloseIndexRequest(leaderIndexName), RequestOptions.DEFAULT); + assertThatThrownBy { + followerClient.resumeReplication(followerIndexName) + }.isInstanceOf(ResponseException::class.java) + .hasMessageContaining("closed") } fun `test that replication fails to resume when custom analyser is not present in follower`() { @@ -217,24 +171,20 @@ class ResumeReplicationIT: MultiClusterRestTestCase() { val followerClient = getClientForCluster(FOLLOWER) try { Files.copy(synonyms, synonymPath) - val createIndexResponse = leaderClient.indices().create(CreateIndexRequest(leaderIndexName), RequestOptions.DEFAULT) assertThat(createIndexResponse.isAcknowledged).isTrue() - createConnectionBetweenClusters(FOLLOWER, LEADER) followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName), waitForRestore = true) followerClient.pauseReplication(followerIndexName) leaderClient.indices().close(CloseIndexRequest(leaderIndexName), RequestOptions.DEFAULT); val settings: Settings = Settings.builder().loadFromStream(synonymsJson, javaClass.getResourceAsStream(synonymsJson), false) .build() - try { leaderClient.indices().putSettings(UpdateSettingsRequest(leaderIndexName).settings(settings), RequestOptions.DEFAULT) } catch (e: Exception) { assumeNoException("Ignored test as analyzer setting could not be added", e) } leaderClient.indices().open(OpenIndexRequest(leaderIndexName), RequestOptions.DEFAULT); - assertThatThrownBy { followerClient.resumeReplication(followerIndexName) }.isInstanceOf(ResponseException::class.java).hasMessageContaining("resource_not_found_exception") @@ -242,11 +192,6 @@ class ResumeReplicationIT: MultiClusterRestTestCase() { if (Files.exists(synonymPath)) { Files.delete(synonymPath) } - try { - followerClient.stopReplication(followerIndexName) - } catch (e: Exception) { - // DO nothing - } } } @@ -261,26 +206,21 @@ class ResumeReplicationIT: MultiClusterRestTestCase() { val followerClient = getClientForCluster(FOLLOWER) try { Files.copy(synonyms, synonymPath) - val createIndexResponse = leaderClient.indices().create(CreateIndexRequest(leaderIndexName), RequestOptions.DEFAULT) assertThat(createIndexResponse.isAcknowledged).isTrue() - createConnectionBetweenClusters(FOLLOWER, LEADER) - followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName), waitForRestore = true) followerClient.pauseReplication(followerIndexName) leaderClient.indices().close(CloseIndexRequest(leaderIndexName), RequestOptions.DEFAULT); Files.copy(synonyms, followerSynonymPath) val settings: Settings = Settings.builder().loadFromStream(synonymsJson, javaClass.getResourceAsStream(synonymsJson), false) .build() - try { leaderClient.indices().putSettings(UpdateSettingsRequest(leaderIndexName).settings(settings), RequestOptions.DEFAULT) } catch (e: Exception) { assumeNoException("Ignored test as analyzer setting could not be added", e) } leaderClient.indices().open(OpenIndexRequest(leaderIndexName), RequestOptions.DEFAULT); - followerClient.resumeReplication(followerIndexName) var statusResp = followerClient.replicationStatus(followerIndexName) `validate status syncing response`(statusResp) @@ -291,11 +231,6 @@ class ResumeReplicationIT: MultiClusterRestTestCase() { if (Files.exists(followerSynonymPath)) { Files.delete(followerSynonymPath) } - try { - followerClient.stopReplication(followerIndexName) - } catch (e: Exception) { - // DO nothing - } } } @@ -312,7 +247,6 @@ class ResumeReplicationIT: MultiClusterRestTestCase() { try { Files.copy(synonyms, synonymPath) Files.copy(synonyms, followerSynonymPath) - var settings: Settings = Settings.builder().loadFromStream(synonymsJson, javaClass.getResourceAsStream(synonymsJson), false) .build() try { @@ -321,7 +255,6 @@ class ResumeReplicationIT: MultiClusterRestTestCase() { } catch (e: Exception) { assumeNoException("Ignored test as analyzer setting could not be added", e) } - createConnectionBetweenClusters(FOLLOWER, LEADER) val overriddenSettings: Settings = Settings.builder() .put("index.analysis.filter.my_filter.synonyms_path", followerSynonymFilename) @@ -329,7 +262,6 @@ class ResumeReplicationIT: MultiClusterRestTestCase() { followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName, overriddenSettings), waitForRestore = true) followerClient.pauseReplication(followerIndexName) leaderClient.indices().close(CloseIndexRequest(leaderIndexName), RequestOptions.DEFAULT); - Files.copy(synonyms, newSynonymPath) settings = Settings.builder() .put("index.analysis.filter.my_filter.synonyms_path", "synonyms_new.txt") @@ -340,7 +272,6 @@ class ResumeReplicationIT: MultiClusterRestTestCase() { assumeNoException("Ignored test as analyzer setting could not be added", e) } leaderClient.indices().open(OpenIndexRequest(leaderIndexName), RequestOptions.DEFAULT); - followerClient.resumeReplication(followerIndexName) var statusResp = followerClient.replicationStatus(followerIndexName) `validate status syncing response`(statusResp) @@ -354,12 +285,6 @@ class ResumeReplicationIT: MultiClusterRestTestCase() { if (Files.exists(newSynonymPath)) { Files.delete(newSynonymPath) } - try { - followerClient.stopReplication(followerIndexName) - } catch (e: Exception) { - // DO nothing - } } } - } diff --git a/src/test/kotlin/org/opensearch/replication/integ/rest/SecurityBase.kt b/src/test/kotlin/org/opensearch/replication/integ/rest/SecurityBase.kt index df37573c..cf12cb6c 100644 --- a/src/test/kotlin/org/opensearch/replication/integ/rest/SecurityBase.kt +++ b/src/test/kotlin/org/opensearch/replication/integ/rest/SecurityBase.kt @@ -243,7 +243,6 @@ abstract class SecurityBase : MultiClusterRestTestCase() { private fun createRoleWithPermissions(indexPattern: String, role: String) { val followerClient = testClusters.get(FOLLOWER) val persistentConnectionRequest = Request("PUT", "_plugins/_security/api/roles/"+role) - val entityAsString = """ { "cluster_permissions": [ @@ -278,7 +277,6 @@ abstract class SecurityBase : MultiClusterRestTestCase() { val entityAsString = """ {"users": ["$user"]} """.trimMargin() - persistentConnectionRequest.entity = NStringEntity(entityAsString, ContentType.APPLICATION_JSON) val persistentConnectionResponse = followerClient!!.lowLevelClient.performRequest(persistentConnectionRequest) assertTrue(HttpStatus.SC_CREATED.toLong() == persistentConnectionResponse.statusLine.statusCode.toLong() || @@ -307,7 +305,6 @@ abstract class SecurityBase : MultiClusterRestTestCase() { "password":"$password" } """.trimMargin() - persistentConnectionRequest.entity = NStringEntity(entityAsString, ContentType.APPLICATION_JSON) val persistentConnectionResponse = followerClient!!.lowLevelClient.performRequest(persistentConnectionRequest) assertTrue(HttpStatus.SC_CREATED.toLong() == persistentConnectionResponse.statusLine.statusCode.toLong() || diff --git a/src/test/kotlin/org/opensearch/replication/integ/rest/SecurityCustomRolesIT.kt b/src/test/kotlin/org/opensearch/replication/integ/rest/SecurityCustomRolesIT.kt index 8d5be9cc..8c302953 100644 --- a/src/test/kotlin/org/opensearch/replication/integ/rest/SecurityCustomRolesIT.kt +++ b/src/test/kotlin/org/opensearch/replication/integ/rest/SecurityCustomRolesIT.kt @@ -56,17 +56,14 @@ class SecurityCustomRolesIT: SecurityBase() { val createIndexResponse = leaderClient.indices().create(CreateIndexRequest(leaderIndexName), RequestOptions.DEFAULT) Assertions.assertThat(createIndexResponse.isAcknowledged).isTrue() - try { - var startReplicationRequest = StartReplicationRequest("source",leaderIndexName,followerIndexName, - useRoles = UseRoles(leaderClusterRole = "leaderRoleValidPerms",followerClusterRole = "followerRoleValidPerms")) - followerClient.startReplication(startReplicationRequest, - requestOptions= RequestOptions.DEFAULT.addBasicAuthHeader("testUser1","password"), waitForRestore = true) - assertBusy { - Assertions.assertThat(followerClient.indices().exists(GetIndexRequest(followerIndexName), RequestOptions.DEFAULT)).isEqualTo(true) - } - } finally { - followerClient.stopReplication(followerIndexName) + var startReplicationRequest = StartReplicationRequest("source",leaderIndexName,followerIndexName, + useRoles = UseRoles(leaderClusterRole = "leaderRoleValidPerms",followerClusterRole = "followerRoleValidPerms")) + + followerClient.startReplication(startReplicationRequest, + requestOptions= RequestOptions.DEFAULT.addBasicAuthHeader("testUser1","password"), waitForRestore = true) + assertBusy { + Assertions.assertThat(followerClient.indices().exists(GetIndexRequest(followerIndexName), RequestOptions.DEFAULT)).isEqualTo(true) } } @@ -116,26 +113,23 @@ class SecurityCustomRolesIT: SecurityBase() { val createIndexResponse = leaderClient.indices().create(CreateIndexRequest(leaderIndexName), RequestOptions.DEFAULT) Assertions.assertThat(createIndexResponse.isAcknowledged).isTrue() - try { - var startReplicationRequest = StartReplicationRequest("source",leaderIndexName,followerIndexName, - useRoles = UseRoles(leaderClusterRole = "leaderRoleValidPerms",followerClusterRole = "followerRoleValidPerms")) - var requestOptions = RequestOptions.DEFAULT.addBasicAuthHeader("testUser1","password") - followerClient.startReplication(startReplicationRequest, waitForRestore = true, - requestOptions = requestOptions) - - /* At this point, the follower cluster should be in FOLLOWING state. Next, we pause replication - and verify the same - */ - followerClient.pauseReplication(followerIndexName, - requestOptions = requestOptions) - // Validate paused replication using Status Api - assertBusy { - `validate aggregated paused status response`(followerClient.replicationStatus(followerIndexName, - requestOptions = requestOptions)) - } - } finally { - followerClient.stopReplication(followerIndexName) + var startReplicationRequest = StartReplicationRequest("source",leaderIndexName,followerIndexName, + useRoles = UseRoles(leaderClusterRole = "leaderRoleValidPerms",followerClusterRole = "followerRoleValidPerms")) + var requestOptions = RequestOptions.DEFAULT.addBasicAuthHeader("testUser1","password") + followerClient.startReplication(startReplicationRequest, waitForRestore = true, + requestOptions = requestOptions) + + /* At this point, the follower cluster should be in FOLLOWING state. Next, we pause replication + and verify the same + */ + followerClient.pauseReplication(followerIndexName, + requestOptions = requestOptions) + + // Validate paused replication using Status Api + assertBusy { + `validate aggregated paused status response`(followerClient.replicationStatus(followerIndexName, + requestOptions = requestOptions)) } } @@ -147,22 +141,18 @@ class SecurityCustomRolesIT: SecurityBase() { val createIndexResponse = leaderClient.indices().create(CreateIndexRequest(leaderIndexName), RequestOptions.DEFAULT) Assertions.assertThat(createIndexResponse.isAcknowledged).isTrue() - try { - var startReplicationRequest = StartReplicationRequest("source",leaderIndexName,followerIndexName, - useRoles = UseRoles(leaderClusterRole = "leaderRoleValidPerms",followerClusterRole = "followerRoleValidPerms")) - followerClient.startReplication(startReplicationRequest, waitForRestore = true, - requestOptions = RequestOptions.DEFAULT.addBasicAuthHeader("testUser1","password")) + var startReplicationRequest = StartReplicationRequest("source",leaderIndexName,followerIndexName, + useRoles = UseRoles(leaderClusterRole = "leaderRoleValidPerms",followerClusterRole = "followerRoleValidPerms")) - Assertions.assertThatThrownBy { - followerClient.pauseReplication(followerIndexName, - requestOptions = RequestOptions.DEFAULT.addBasicAuthHeader("testUser2","password")) - }.isInstanceOf(ResponseException::class.java) - .hasMessageContaining("403 Forbidden") + followerClient.startReplication(startReplicationRequest, waitForRestore = true, + requestOptions = RequestOptions.DEFAULT.addBasicAuthHeader("testUser1","password")) - } finally { - followerClient.stopReplication(followerIndexName) - } + Assertions.assertThatThrownBy { + followerClient.pauseReplication(followerIndexName, + requestOptions = RequestOptions.DEFAULT.addBasicAuthHeader("testUser2","password")) + }.isInstanceOf(ResponseException::class.java) + .hasMessageContaining("403 Forbidden") } fun `test for FOLLOWER that STATUS Api works for user with valid permissions`() { @@ -173,19 +163,16 @@ class SecurityCustomRolesIT: SecurityBase() { val createIndexResponse = leaderClient.indices().create(CreateIndexRequest(leaderIndexName), RequestOptions.DEFAULT) Assertions.assertThat(createIndexResponse.isAcknowledged).isTrue() - try { - var startReplicationRequest = StartReplicationRequest("source",leaderIndexName,followerIndexName, - useRoles = UseRoles(leaderClusterRole = "leaderRoleValidPerms",followerClusterRole = "followerRoleValidPerms")) - followerClient.startReplication(startReplicationRequest, waitForRestore = true, - requestOptions = RequestOptions.DEFAULT.addBasicAuthHeader("testUser1","password")) + var startReplicationRequest = StartReplicationRequest("source",leaderIndexName,followerIndexName, + useRoles = UseRoles(leaderClusterRole = "leaderRoleValidPerms",followerClusterRole = "followerRoleValidPerms")) - assertBusy { - `validate status syncing response`(followerClient.replicationStatus(followerIndexName, - requestOptions = RequestOptions.DEFAULT.addBasicAuthHeader("testUser1","password"))) - } - } finally { - followerClient.stopReplication(followerIndexName) + followerClient.startReplication(startReplicationRequest, waitForRestore = true, + requestOptions = RequestOptions.DEFAULT.addBasicAuthHeader("testUser1","password")) + + assertBusy { + `validate status syncing response`(followerClient.replicationStatus(followerIndexName, + requestOptions = RequestOptions.DEFAULT.addBasicAuthHeader("testUser1","password"))) } } @@ -197,21 +184,18 @@ class SecurityCustomRolesIT: SecurityBase() { val createIndexResponse = leaderClient.indices().create(CreateIndexRequest(leaderIndexName), RequestOptions.DEFAULT) Assertions.assertThat(createIndexResponse.isAcknowledged).isTrue() - try { - var startReplicationRequest = StartReplicationRequest("source",leaderIndexName,followerIndexName, - useRoles = UseRoles(leaderClusterRole = "leaderRoleValidPerms",followerClusterRole = "followerRoleValidPerms")) - followerClient.startReplication(startReplicationRequest, waitForRestore = true, - requestOptions = RequestOptions.DEFAULT.addBasicAuthHeader("testUser1","password")) + var startReplicationRequest = StartReplicationRequest("source",leaderIndexName,followerIndexName, + useRoles = UseRoles(leaderClusterRole = "leaderRoleValidPerms",followerClusterRole = "followerRoleValidPerms")) - Assertions.assertThatThrownBy { - followerClient.replicationStatus(followerIndexName, - requestOptions = RequestOptions.DEFAULT.addBasicAuthHeader("testUser2","password")) - }.isInstanceOf(ResponseException::class.java) - .hasMessageContaining("403 Forbidden") - } finally { - followerClient.stopReplication(followerIndexName) - } + followerClient.startReplication(startReplicationRequest, waitForRestore = true, + requestOptions = RequestOptions.DEFAULT.addBasicAuthHeader("testUser1","password")) + + Assertions.assertThatThrownBy { + followerClient.replicationStatus(followerIndexName, + requestOptions = RequestOptions.DEFAULT.addBasicAuthHeader("testUser2","password")) + }.isInstanceOf(ResponseException::class.java) + .hasMessageContaining("403 Forbidden") } fun `test for FOLLOWER that UPDATE settings works for user with valid permissions`() { @@ -229,89 +213,76 @@ class SecurityCustomRolesIT: SecurityBase() { val createIndexResponse = leaderClient.indices().create(CreateIndexRequest(leaderIndexName).settings(settings), RequestOptions.DEFAULT) Assertions.assertThat(createIndexResponse.isAcknowledged).isTrue() - try { - followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName, - useRoles = UseRoles(leaderClusterRole = "leaderRoleValidPerms",followerClusterRole = "followerRoleValidPerms")), + + followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName, + useRoles = UseRoles(leaderClusterRole = "leaderRoleValidPerms",followerClusterRole = "followerRoleValidPerms")), + requestOptions = RequestOptions.DEFAULT.addBasicAuthHeader("testUser1","password")) + assertBusy { + Assertions.assertThat(followerClient.indices() + .exists(GetIndexRequest(followerIndexName), RequestOptions.DEFAULT)) + .isEqualTo(true) + } + val getSettingsRequest = GetSettingsRequest() + getSettingsRequest.indices(followerIndexName) + Assert.assertEquals( + "1", + followerClient.indices() + .getSettings(getSettingsRequest, RequestOptions.DEFAULT) + .indexToSettings[followerIndexName][IndexMetadata.SETTING_NUMBER_OF_REPLICAS] + ) + + settings = Settings.builder() + .put("index.shard.check_on_startup", "checksum") + .build() + followerClient.updateReplication(followerIndexName, settings, requestOptions = RequestOptions.DEFAULT.addBasicAuthHeader("testUser1","password")) - assertBusy { - Assertions.assertThat(followerClient.indices() - .exists(GetIndexRequest(followerIndexName), RequestOptions.DEFAULT)) - .isEqualTo(true) - } - val getSettingsRequest = GetSettingsRequest() - getSettingsRequest.indices(followerIndexName) + + // Wait for the settings to get updated at follower cluster. + assertBusy ({ Assert.assertEquals( - "1", + "checksum", followerClient.indices() .getSettings(getSettingsRequest, RequestOptions.DEFAULT) - .indexToSettings[followerIndexName][IndexMetadata.SETTING_NUMBER_OF_REPLICAS] + .indexToSettings[followerIndexName]["index.shard.check_on_startup"] ) - - settings = Settings.builder() - .put("index.shard.check_on_startup", "checksum") - .build() - followerClient.updateReplication(followerIndexName, settings, - requestOptions = RequestOptions.DEFAULT.addBasicAuthHeader("testUser1","password")) - - // Wait for the settings to get updated at follower cluster. - assertBusy ({ - Assert.assertEquals( - "checksum", - followerClient.indices() - .getSettings(getSettingsRequest, RequestOptions.DEFAULT) - .indexToSettings[followerIndexName]["index.shard.check_on_startup"] - ) - }, 30L, TimeUnit.SECONDS) - } finally { - followerClient.stopReplication(followerIndexName) - } + }, 30L, TimeUnit.SECONDS) } fun `test for FOLLOWER that UPDATE settings is forbidden for user with invalid permissions`() { val followerClient = getClientForCluster(FOLLOWER) val leaderClient = getClientForCluster(LEADER) val followerIndexName = "follower-index1-settings-invalid-perm" - setMetadataSyncDelay() - createConnectionBetweenClusters(FOLLOWER, LEADER) - var settings = Settings.builder() .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) .build() - val createIndexResponse = leaderClient.indices().create(CreateIndexRequest(leaderIndexName).settings(settings), RequestOptions.DEFAULT) Assertions.assertThat(createIndexResponse.isAcknowledged).isTrue() - try { - followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName, - useRoles = UseRoles(leaderClusterRole = "leaderRoleValidPerms",followerClusterRole = "followerRoleValidPerms")), - requestOptions = RequestOptions.DEFAULT.addBasicAuthHeader("testUser1","password"), waitForRestore = true) - assertBusy { - Assertions.assertThat(followerClient.indices() - .exists(GetIndexRequest(followerIndexName), RequestOptions.DEFAULT)) - .isEqualTo(true) - } - val getSettingsRequest = GetSettingsRequest() - getSettingsRequest.indices(followerIndexName) - Assert.assertEquals( - "1", - followerClient.indices() - .getSettings(getSettingsRequest, RequestOptions.DEFAULT) - .indexToSettings[followerIndexName][IndexMetadata.SETTING_NUMBER_OF_REPLICAS] - ) - - settings = Settings.builder() - .put("index.shard.check_on_startup", "checksum") - .build() - - Assertions.assertThatThrownBy { - followerClient.updateReplication(followerIndexName, settings, - requestOptions = RequestOptions.DEFAULT.addBasicAuthHeader("testUser2","password")) - }.isInstanceOf(ResponseException::class.java) - .hasMessageContaining("403 Forbidden") - } finally { - followerClient.stopReplication(followerIndexName) + followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName, + useRoles = UseRoles(leaderClusterRole = "leaderRoleValidPerms",followerClusterRole = "followerRoleValidPerms")), + requestOptions = RequestOptions.DEFAULT.addBasicAuthHeader("testUser1","password"), waitForRestore = true) + assertBusy { + Assertions.assertThat(followerClient.indices() + .exists(GetIndexRequest(followerIndexName), RequestOptions.DEFAULT)) + .isEqualTo(true) } + val getSettingsRequest = GetSettingsRequest() + getSettingsRequest.indices(followerIndexName) + Assert.assertEquals( + "1", + followerClient.indices() + .getSettings(getSettingsRequest, RequestOptions.DEFAULT) + .indexToSettings[followerIndexName][IndexMetadata.SETTING_NUMBER_OF_REPLICAS] + ) + settings = Settings.builder() + .put("index.shard.check_on_startup", "checksum") + .build() + Assertions.assertThatThrownBy { + followerClient.updateReplication(followerIndexName, settings, + requestOptions = RequestOptions.DEFAULT.addBasicAuthHeader("testUser2","password")) + }.isInstanceOf(ResponseException::class.java) + .hasMessageContaining("403 Forbidden") } fun `test for FOLLOWER that AutoFollow works for user with valid permissions`() { @@ -324,12 +295,10 @@ class SecurityCustomRolesIT: SecurityBase() { val leaderIndexName = createRandomIndex(indexPrefix, leaderClient) var leaderIndexNameNew = "" createConnectionBetweenClusters(FOLLOWER, LEADER, connectionAlias) - try { followerClient.updateAutoFollowPattern(connectionAlias, indexPatternName, indexPattern, useRoles = UseRoles(leaderClusterRole = "leaderRoleValidPerms",followerClusterRole = "followerRoleValidPerms"), requestOptions= RequestOptions.DEFAULT.addBasicAuthHeader("testUser1","password")) - // Verify that existing index matching the pattern are replicated. assertBusy ({ Assertions.assertThat(followerClient.indices() @@ -337,7 +306,6 @@ class SecurityCustomRolesIT: SecurityBase() { .isEqualTo(true) }, 30, TimeUnit.SECONDS) Assertions.assertThat(getAutoFollowTasks(FOLLOWER).size).isEqualTo(1) - leaderIndexNameNew = createRandomIndex(indexPrefix, leaderClient) // Verify that newly created index on leader which match the pattern are also replicated. assertBusy ({ @@ -347,8 +315,6 @@ class SecurityCustomRolesIT: SecurityBase() { }, 60, TimeUnit.SECONDS) } finally { followerClient.deleteAutoFollowPattern(connectionAlias, indexPatternName) - followerClient.stopReplication(leaderIndexName, false) - followerClient.stopReplication(leaderIndexNameNew) } } @@ -358,7 +324,6 @@ class SecurityCustomRolesIT: SecurityBase() { val indexPattern = "follower-index1*" val indexPatternName = "test_pattern" createConnectionBetweenClusters(FOLLOWER, LEADER, connectionAlias) - Assertions.assertThatThrownBy { followerClient.updateAutoFollowPattern(connectionAlias, indexPatternName, indexPattern, useRoles = UseRoles(leaderClusterRole = "leaderRoleValidPerms",followerClusterRole = "followerRoleNoPerms"), @@ -388,16 +353,13 @@ class SecurityCustomRolesIT: SecurityBase() { val leaderClient = getClientForCluster(LEADER) val followerIndexName = "follower-index1" createConnectionBetweenClusters(FOLLOWER, LEADER) - val createIndexResponse = leaderClient.indices().create(CreateIndexRequest(leaderIndexName), RequestOptions.DEFAULT) Assertions.assertThat(createIndexResponse.isAcknowledged).isTrue() try { var startReplicationRequest = StartReplicationRequest("source",leaderIndexName,followerIndexName, useRoles = UseRoles(leaderClusterRole = "leaderRoleValidPerms",followerClusterRole = "followerRoleValidPerms")) - followerClient.startReplication(startReplicationRequest, waitForRestore = true, requestOptions = RequestOptions.DEFAULT.addBasicAuthHeader("testUser1","password")) - insertDocToIndex(LEADER, "1", "dummy data 1",leaderIndexName) //Querying ES cluster throws random exceptions like ClusterManagerNotDiscovered or ShardsFailed etc, so catching them and retrying assertBusy ({ @@ -407,7 +369,6 @@ class SecurityCustomRolesIT: SecurityBase() { Assert.fail("Exception while querying follower cluster. Failing to retry again") } }, 1, TimeUnit.MINUTES) - assertBusy { `validate status syncing response`(followerClient.replicationStatus(followerIndexName, requestOptions = RequestOptions.DEFAULT.addBasicAuthHeader("testUser1","password"))) @@ -422,7 +383,6 @@ class SecurityCustomRolesIT: SecurityBase() { }, 100, TimeUnit.SECONDS) } finally { updateRole(followerIndexName,"followerRoleValidPerms", true) - followerClient.stopReplication(followerIndexName) } } diff --git a/src/test/kotlin/org/opensearch/replication/integ/rest/SecurityCustomRolesLeaderIT.kt b/src/test/kotlin/org/opensearch/replication/integ/rest/SecurityCustomRolesLeaderIT.kt index af81a6b5..12df07a5 100644 --- a/src/test/kotlin/org/opensearch/replication/integ/rest/SecurityCustomRolesLeaderIT.kt +++ b/src/test/kotlin/org/opensearch/replication/integ/rest/SecurityCustomRolesLeaderIT.kt @@ -42,13 +42,10 @@ class SecurityCustomRolesLeaderIT: SecurityBase() { val leaderClient = getClientForCluster(LEADER) val followerIndexName = "follower-index1" createConnectionBetweenClusters(FOLLOWER, LEADER) - val createIndexResponse = leaderClient.indices().create(CreateIndexRequest(leaderIndexName), RequestOptions.DEFAULT) Assertions.assertThat(createIndexResponse.isAcknowledged).isTrue() - var startReplicationRequest = StartReplicationRequest("source",leaderIndexName,followerIndexName, useRoles = UseRoles(leaderClusterRole = "leaderRoleNoPerms",followerClusterRole = "followerRoleValidPerms")) - Assertions.assertThatThrownBy { followerClient.startReplication(startReplicationRequest, requestOptions= RequestOptions.DEFAULT.addBasicAuthHeader("testUser6","password")) } .isInstanceOf(ResponseException::class.java) @@ -61,16 +58,13 @@ class SecurityCustomRolesLeaderIT: SecurityBase() { val leaderClient = getClientForCluster(LEADER) val followerIndexName = "follower-index1" createConnectionBetweenClusters(FOLLOWER, LEADER) - val createIndexResponse = leaderClient.indices().create(CreateIndexRequest(leaderIndexName), RequestOptions.DEFAULT) Assertions.assertThat(createIndexResponse.isAcknowledged).isTrue() try { var startReplicationRequest = StartReplicationRequest("source",leaderIndexName,followerIndexName, useRoles = UseRoles(leaderClusterRole = "leaderRoleValidPerms",followerClusterRole = "followerRoleValidPerms")) - followerClient.startReplication(startReplicationRequest, waitForRestore = true, requestOptions = RequestOptions.DEFAULT.addBasicAuthHeader("testUser1","password")) - insertDocToIndex(LEADER, "1", "dummy data 1",leaderIndexName) //Querying ES cluster throws random exceptions like ClusterManagerNotDiscovered or ShardsFailed etc, so catching them and retrying assertBusy ({ @@ -80,23 +74,18 @@ class SecurityCustomRolesLeaderIT: SecurityBase() { Assert.fail("Exception while querying follower cluster. Failing to retry again") } }, 1, TimeUnit.MINUTES) - assertBusy { `validate status syncing response`(followerClient.replicationStatus(followerIndexName, requestOptions = RequestOptions.DEFAULT.addBasicAuthHeader("testUser1","password"))) } - updateRole(followerIndexName,"leaderRoleValidPerms", false) insertDocToIndex(LEADER, "2", "dummy data 2",leaderIndexName) - assertBusy ({ validatePausedState(followerClient.replicationStatus(followerIndexName, requestOptions = RequestOptions.DEFAULT.addBasicAuthHeader("testUser1","password"))) }, 100, TimeUnit.SECONDS) - } finally { updateRole(followerIndexName,"leaderRoleValidPerms", true) - followerClient.stopReplication(followerIndexName) } } @@ -105,17 +94,14 @@ class SecurityCustomRolesLeaderIT: SecurityBase() { val leaderClient = getClientForCluster(LEADER) val followerIndexName = "follower-index1" createConnectionBetweenClusters(FOLLOWER, LEADER) - val createIndexResponse = leaderClient.indices().create(CreateIndexRequest(leaderIndexName), RequestOptions.DEFAULT) Assertions.assertThat(createIndexResponse.isAcknowledged).isTrue() try { var startReplicationRequest = StartReplicationRequest("source",leaderIndexName,followerIndexName, useRoles = UseRoles(leaderClusterRole = "leaderRoleValidPerms",followerClusterRole = "followerRoleValidPerms")) - updateFileChunkPermissions("","leaderRoleValidPerms", false) followerClient.startReplication(startReplicationRequest, requestOptions = RequestOptions.DEFAULT.addBasicAuthHeader("testUser1","password")) - assertBusy ({ validateFailedState(followerClient.replicationStatus(followerIndexName, requestOptions = RequestOptions.DEFAULT.addBasicAuthHeader("testUser1","password"))) @@ -125,7 +111,6 @@ class SecurityCustomRolesLeaderIT: SecurityBase() { Assert.assertNull(ex) } finally { updateFileChunkPermissions("","leaderRoleValidPerms", true) - followerClient.stopReplication(followerIndexName) } } diff --git a/src/test/kotlin/org/opensearch/replication/integ/rest/SecurityDlsFlsIT.kt b/src/test/kotlin/org/opensearch/replication/integ/rest/SecurityDlsFlsIT.kt index 191471ae..82e7465d 100644 --- a/src/test/kotlin/org/opensearch/replication/integ/rest/SecurityDlsFlsIT.kt +++ b/src/test/kotlin/org/opensearch/replication/integ/rest/SecurityDlsFlsIT.kt @@ -42,16 +42,12 @@ class SecurityDlsFlsIT: SecurityBase() { fun `test for FOLLOWER that START replication is forbidden for user with DLS or FLS enabled`() { val followerClient = getClientForCluster(FOLLOWER) val leaderClient = getClientForCluster(LEADER) - val followerIndexName = "follower-index1-dlsfls-enabled" createConnectionBetweenClusters(FOLLOWER, LEADER) - val createIndexResponse = leaderClient.indices().create(CreateIndexRequest(leaderIndexName), RequestOptions.DEFAULT) Assertions.assertThat(createIndexResponse.isAcknowledged).isTrue() - var startReplicationRequest = StartReplicationRequest("source",leaderIndexName,followerIndexName, useRoles = UseRoles(leaderClusterRole = "leaderRoleValidPerms",followerClusterRole = "followerDlsRole")) - Assertions.assertThatThrownBy { followerClient.startReplication(startReplicationRequest, requestOptions= RequestOptions.DEFAULT.addBasicAuthHeader("testUser3","password")) } .isInstanceOf(ResponseException::class.java) @@ -61,7 +57,6 @@ class SecurityDlsFlsIT: SecurityBase() { fun `test for FOLLOWER that STOP replication is forbidden for user with DLS or FLS enabled`() { val followerClient = getClientForCluster(FOLLOWER) - Assertions.assertThatThrownBy { followerClient.stopReplication("follower-index1-stop-forbidden", requestOptions= RequestOptions.DEFAULT.addBasicAuthHeader("testUser3","password")) @@ -75,25 +70,18 @@ class SecurityDlsFlsIT: SecurityBase() { val leaderClient = getClientForCluster(LEADER) val followerIndexName = "follower-index1-pause-forbidden" createConnectionBetweenClusters(FOLLOWER, LEADER) - val createIndexResponse = leaderClient.indices().create(CreateIndexRequest(leaderIndexName), RequestOptions.DEFAULT) Assertions.assertThat(createIndexResponse.isAcknowledged).isTrue() - try { - var startReplicationRequest = StartReplicationRequest("source",leaderIndexName,followerIndexName, - useRoles = UseRoles(leaderClusterRole = "leaderRoleValidPerms",followerClusterRole = "followerRoleValidPerms")) - - followerClient.startReplication(startReplicationRequest, waitForRestore = true, - requestOptions= RequestOptions.DEFAULT.addBasicAuthHeader("testUser1","password")) - - Assertions.assertThatThrownBy { - followerClient.pauseReplication(followerIndexName, - requestOptions= RequestOptions.DEFAULT.addBasicAuthHeader("testUser3","password")) - }.isInstanceOf(ResponseException::class.java) - .hasMessageContaining(DLS_FLS_EXCEPTION_MESSAGE) - .hasMessageContaining("403 Forbidden") - } finally { - followerClient.stopReplication(followerIndexName) - } + var startReplicationRequest = StartReplicationRequest("source",leaderIndexName,followerIndexName, + useRoles = UseRoles(leaderClusterRole = "leaderRoleValidPerms",followerClusterRole = "followerRoleValidPerms")) + followerClient.startReplication(startReplicationRequest, waitForRestore = true, + requestOptions= RequestOptions.DEFAULT.addBasicAuthHeader("testUser1","password")) + Assertions.assertThatThrownBy { + followerClient.pauseReplication(followerIndexName, + requestOptions= RequestOptions.DEFAULT.addBasicAuthHeader("testUser3","password")) + }.isInstanceOf(ResponseException::class.java) + .hasMessageContaining(DLS_FLS_EXCEPTION_MESSAGE) + .hasMessageContaining("403 Forbidden") } fun `test for FOLLOWER that STATUS Api is forbidden for user with DLS or FLS enabled`() { @@ -101,88 +89,67 @@ class SecurityDlsFlsIT: SecurityBase() { val leaderClient = getClientForCluster(LEADER) val followerIndexName = "follower-index1-status-forbidden" createConnectionBetweenClusters(FOLLOWER, LEADER) - val createIndexResponse = leaderClient.indices().create(CreateIndexRequest(leaderIndexName), RequestOptions.DEFAULT) Assertions.assertThat(createIndexResponse.isAcknowledged).isTrue() - try { - var startReplicationRequest = StartReplicationRequest("source",leaderIndexName,followerIndexName, - useRoles = UseRoles(leaderClusterRole = "leaderRoleValidPerms",followerClusterRole = "followerRoleValidPerms")) - - followerClient.startReplication(startReplicationRequest, waitForRestore = true, - requestOptions= RequestOptions.DEFAULT.addBasicAuthHeader("testUser1","password")) - - Assertions.assertThatThrownBy { - followerClient.replicationStatus(followerIndexName, - requestOptions= RequestOptions.DEFAULT.addBasicAuthHeader("testUser3","password")) - }.isInstanceOf(ResponseException::class.java) - .hasMessageContaining(DLS_FLS_EXCEPTION_MESSAGE) - .hasMessageContaining("403 Forbidden") - } finally { - followerClient.stopReplication(followerIndexName) - } + var startReplicationRequest = StartReplicationRequest("source",leaderIndexName,followerIndexName, + useRoles = UseRoles(leaderClusterRole = "leaderRoleValidPerms",followerClusterRole = "followerRoleValidPerms")) + followerClient.startReplication(startReplicationRequest, waitForRestore = true, + requestOptions= RequestOptions.DEFAULT.addBasicAuthHeader("testUser1","password")) + Assertions.assertThatThrownBy { + followerClient.replicationStatus(followerIndexName, + requestOptions= RequestOptions.DEFAULT.addBasicAuthHeader("testUser3","password")) + }.isInstanceOf(ResponseException::class.java) + .hasMessageContaining(DLS_FLS_EXCEPTION_MESSAGE) + .hasMessageContaining("403 Forbidden") } fun `test for FOLLOWER that UPDATE settings is forbidden for user with DLS or FLS enabled`() { val followerClient = getClientForCluster(FOLLOWER) val leaderClient = getClientForCluster(LEADER) val followerIndexName = "follower-index1-update-forbidden" - setMetadataSyncDelay() - createConnectionBetweenClusters(FOLLOWER, LEADER) - var settings = Settings.builder() .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) .build() - val createIndexResponse = leaderClient.indices().create(CreateIndexRequest(leaderIndexName).settings(settings), RequestOptions.DEFAULT) Assertions.assertThat(createIndexResponse.isAcknowledged).isTrue() - try { - followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName, - useRoles = UseRoles(leaderClusterRole = "leaderRoleValidPerms",followerClusterRole = "followerRoleValidPerms")), - requestOptions= RequestOptions.DEFAULT.addBasicAuthHeader("testUser1","password"), waitForRestore = true) - assertBusy { - Assertions.assertThat(followerClient.indices() - .exists(GetIndexRequest(followerIndexName), RequestOptions.DEFAULT)) - .isEqualTo(true) - } - val getSettingsRequest = GetSettingsRequest() - getSettingsRequest.indices(followerIndexName) - Assert.assertEquals( - "1", - followerClient.indices() - .getSettings(getSettingsRequest, RequestOptions.DEFAULT) - .indexToSettings[followerIndexName][IndexMetadata.SETTING_NUMBER_OF_REPLICAS] - ) - - settings = Settings.builder() - .put("index.shard.check_on_startup", "checksum") - .build() - - Assertions.assertThatThrownBy { - followerClient.updateReplication(followerIndexName, settings, - requestOptions= RequestOptions.DEFAULT.addBasicAuthHeader("testUser3","password")) - }.isInstanceOf(ResponseException::class.java) - .hasMessageContaining(DLS_FLS_EXCEPTION_MESSAGE) - .hasMessageContaining("403 Forbidden") - } finally { - followerClient.stopReplication(followerIndexName) + followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName, + useRoles = UseRoles(leaderClusterRole = "leaderRoleValidPerms",followerClusterRole = "followerRoleValidPerms")), + requestOptions= RequestOptions.DEFAULT.addBasicAuthHeader("testUser1","password"), waitForRestore = true) + assertBusy { + Assertions.assertThat(followerClient.indices() + .exists(GetIndexRequest(followerIndexName), RequestOptions.DEFAULT)) + .isEqualTo(true) } + val getSettingsRequest = GetSettingsRequest() + getSettingsRequest.indices(followerIndexName) + Assert.assertEquals( + "1", + followerClient.indices() + .getSettings(getSettingsRequest, RequestOptions.DEFAULT) + .indexToSettings[followerIndexName][IndexMetadata.SETTING_NUMBER_OF_REPLICAS] + ) + settings = Settings.builder() + .put("index.shard.check_on_startup", "checksum") + .build() + Assertions.assertThatThrownBy { + followerClient.updateReplication(followerIndexName, settings, + requestOptions= RequestOptions.DEFAULT.addBasicAuthHeader("testUser3","password")) + }.isInstanceOf(ResponseException::class.java) + .hasMessageContaining(DLS_FLS_EXCEPTION_MESSAGE) + .hasMessageContaining("403 Forbidden") } fun `test for FOLLOWER that START replication is forbidden for user with FLS enabled`() { val followerClient = getClientForCluster(FOLLOWER) val leaderClient = getClientForCluster(LEADER) - val followerIndexName = "follower-index1-start-forbidden" createConnectionBetweenClusters(FOLLOWER, LEADER) - val createIndexResponse = leaderClient.indices().create(CreateIndexRequest(leaderIndexName), RequestOptions.DEFAULT) Assertions.assertThat(createIndexResponse.isAcknowledged).isTrue() - var startReplicationRequest = StartReplicationRequest("source",leaderIndexName,followerIndexName, useRoles = UseRoles(leaderClusterRole = "leaderRoleValidPerms",followerClusterRole = "followerFlsRole")) - Assertions.assertThatThrownBy { followerClient.startReplication(startReplicationRequest, requestOptions= RequestOptions.DEFAULT.addBasicAuthHeader("testUser4","password")) } .isInstanceOf(ResponseException::class.java) @@ -193,16 +160,12 @@ class SecurityDlsFlsIT: SecurityBase() { fun `test for FOLLOWER that START replication is forbidden for user with Field Masking enabled`() { val followerClient = getClientForCluster(FOLLOWER) val leaderClient = getClientForCluster(LEADER) - val followerIndexName = "follower-index1-start-only-fls" createConnectionBetweenClusters(FOLLOWER, LEADER) - val createIndexResponse = leaderClient.indices().create(CreateIndexRequest(leaderIndexName), RequestOptions.DEFAULT) Assertions.assertThat(createIndexResponse.isAcknowledged).isTrue() - var startReplicationRequest = StartReplicationRequest("source",leaderIndexName,followerIndexName, useRoles = UseRoles(leaderClusterRole = "leaderRoleValidPerms",followerClusterRole = "followerFieldMaskRole")) - Assertions.assertThatThrownBy { followerClient.startReplication(startReplicationRequest, requestOptions= RequestOptions.DEFAULT.addBasicAuthHeader("testUser5","password")) } .isInstanceOf(ResponseException::class.java) @@ -213,36 +176,27 @@ class SecurityDlsFlsIT: SecurityBase() { fun `test for FOLLOWER that START replication works for user with Field Masking enabled on a different index pattern`() { val followerClient = getClientForCluster(FOLLOWER) val leaderClient = getClientForCluster(LEADER) - val followerIndexName = "follower-index1-allow-start" createConnectionBetweenClusters(FOLLOWER, LEADER) - - try { - val createIndexResponse = - leaderClient.indices().create(CreateIndexRequest(leaderIndexName), RequestOptions.DEFAULT) - Assertions.assertThat(createIndexResponse.isAcknowledged).isTrue() - - var startReplicationRequest = StartReplicationRequest( - "source", leaderIndexName, followerIndexName, - useRoles = UseRoles( - leaderClusterRole = "leaderRoleValidPerms", - followerClusterRole = "followerFieldMaskRole2" - ) - ) - followerClient.startReplication( - startReplicationRequest, - requestOptions = RequestOptions.DEFAULT.addBasicAuthHeader("testUser7", "password"), - waitForRestore = true + val createIndexResponse = + leaderClient.indices().create(CreateIndexRequest(leaderIndexName), RequestOptions.DEFAULT) + Assertions.assertThat(createIndexResponse.isAcknowledged).isTrue() + var startReplicationRequest = StartReplicationRequest( + "source", leaderIndexName, followerIndexName, + useRoles = UseRoles( + leaderClusterRole = "leaderRoleValidPerms", + followerClusterRole = "followerFieldMaskRole2" ) - - OpenSearchTestCase.assertBusy { - Assertions.assertThat( - followerClient.indices().exists(GetIndexRequest(followerIndexName), RequestOptions.DEFAULT) - ).isEqualTo(true) - } - } - finally { - followerClient.stopReplication(followerIndexName) + ) + followerClient.startReplication( + startReplicationRequest, + requestOptions = RequestOptions.DEFAULT.addBasicAuthHeader("testUser7", "password"), + waitForRestore = true + ) + OpenSearchTestCase.assertBusy { + Assertions.assertThat( + followerClient.indices().exists(GetIndexRequest(followerIndexName), RequestOptions.DEFAULT) + ).isEqualTo(true) } } } \ No newline at end of file diff --git a/src/test/kotlin/org/opensearch/replication/integ/rest/StartReplicationIT.kt b/src/test/kotlin/org/opensearch/replication/integ/rest/StartReplicationIT.kt index f989859c..6946303c 100644 --- a/src/test/kotlin/org/opensearch/replication/integ/rest/StartReplicationIT.kt +++ b/src/test/kotlin/org/opensearch/replication/integ/rest/StartReplicationIT.kt @@ -73,6 +73,7 @@ import org.opensearch.replication.followerStats import org.opensearch.replication.leaderStats import org.opensearch.replication.updateReplicationStartBlockSetting import java.nio.file.Files +import java.util.* import java.util.concurrent.TimeUnit @@ -96,52 +97,39 @@ class StartReplicationIT: MultiClusterRestTestCase() { fun `test start replication in following state and empty index`() { val followerClient = getClientForCluster(FOLLOWER) val leaderClient = getClientForCluster(LEADER) - createConnectionBetweenClusters(FOLLOWER, LEADER) - val createIndexResponse = leaderClient.indices().create(CreateIndexRequest(leaderIndexName), RequestOptions.DEFAULT) assertThat(createIndexResponse.isAcknowledged).isTrue() - try { - followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName), waitForRestore = true) - assertBusy { - assertThat(followerClient.indices().exists(GetIndexRequest(followerIndexName), RequestOptions.DEFAULT)).isEqualTo(true) - } - } finally { - followerClient.stopReplication(followerIndexName) + followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName), waitForRestore = true) + assertBusy { + assertThat(followerClient.indices().exists(GetIndexRequest(followerIndexName), RequestOptions.DEFAULT)).isEqualTo(true) } } fun `test start replication with settings`() { val followerClient = getClientForCluster(FOLLOWER) val leaderClient = getClientForCluster(LEADER) - createConnectionBetweenClusters(FOLLOWER, LEADER) - val createIndexResponse = leaderClient.indices().create(CreateIndexRequest(leaderIndexName), RequestOptions.DEFAULT) assertThat(createIndexResponse.isAcknowledged).isTrue() val settings = Settings.builder() .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 3) .build() - try { - followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName, settings = settings), waitForRestore = true) - assertBusy { - assertThat(followerClient.indices().exists(GetIndexRequest(followerIndexName), RequestOptions.DEFAULT)).isEqualTo(true) - } - - val getSettingsRequest = GetSettingsRequest() - getSettingsRequest.indices(followerIndexName) - getSettingsRequest.includeDefaults(true) - assertBusy ({ - Assert.assertEquals( - "3", - followerClient.indices() - .getSettings(getSettingsRequest, RequestOptions.DEFAULT) - .indexToSettings[followerIndexName][IndexMetadata.SETTING_NUMBER_OF_REPLICAS] - ) - }, 15, TimeUnit.SECONDS) - } finally { - followerClient.stopReplication(followerIndexName) + followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName, settings = settings), waitForRestore = true) + assertBusy { + assertThat(followerClient.indices().exists(GetIndexRequest(followerIndexName), RequestOptions.DEFAULT)).isEqualTo(true) } + val getSettingsRequest = GetSettingsRequest() + getSettingsRequest.indices(followerIndexName) + getSettingsRequest.includeDefaults(true) + assertBusy ({ + Assert.assertEquals( + "3", + followerClient.indices() + .getSettings(getSettingsRequest, RequestOptions.DEFAULT) + .indexToSettings[followerIndexName][IndexMetadata.SETTING_NUMBER_OF_REPLICAS] + ) + }, 15, TimeUnit.SECONDS) } @@ -151,22 +139,18 @@ class StartReplicationIT: MultiClusterRestTestCase() { createConnectionBetweenClusters(FOLLOWER, LEADER) val createIndexResponse = leaderClient.indices().create(CreateIndexRequest(leaderIndexName), RequestOptions.DEFAULT) assertThat(createIndexResponse.isAcknowledged).isTrue() - try { - followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName), waitForRestore = true) - assertThat(followerClient.indices().exists(GetIndexRequest(followerIndexName), RequestOptions.DEFAULT)).isEqualTo(true) - leaderClient.lowLevelClient.performRequest(Request("POST", "/" + leaderIndexName + "/_close")) - assertBusy ({ - try { - assertThat(followerClient.replicationStatus(followerIndexName)).containsKey("status") - var statusResp = followerClient.replicationStatus(followerIndexName) - `validate paused status on closed index`(statusResp) - } catch (e : Exception) { - Assert.fail() - } - },30, TimeUnit.SECONDS) - } finally { - followerClient.stopReplication(followerIndexName) - } + followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName), waitForRestore = true) + assertThat(followerClient.indices().exists(GetIndexRequest(followerIndexName), RequestOptions.DEFAULT)).isEqualTo(true) + leaderClient.lowLevelClient.performRequest(Request("POST", "/" + leaderIndexName + "/_close")) + assertBusy ({ + try { + assertThat(followerClient.replicationStatus(followerIndexName)).containsKey("status") + var statusResp = followerClient.replicationStatus(followerIndexName) + `validate paused status on closed index`(statusResp) + } catch (e : Exception) { + Assert.fail() + } + },30, TimeUnit.SECONDS) } @@ -176,52 +160,38 @@ class StartReplicationIT: MultiClusterRestTestCase() { createConnectionBetweenClusters(FOLLOWER, LEADER) val createIndexResponse = leaderClient.indices().create(CreateIndexRequest(leaderIndexName), RequestOptions.DEFAULT) assertThat(createIndexResponse.isAcknowledged).isTrue() - try { - followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName), waitForRestore = true) - followerClient.pauseReplication(followerIndexName) - leaderClient.lowLevelClient.performRequest(Request("POST", "/" + leaderIndexName + "/_close")) - leaderClient.lowLevelClient.performRequest(Request("POST", "/" + leaderIndexName + "/_open")) - followerClient.resumeReplication(followerIndexName) - var statusResp = followerClient.replicationStatus(followerIndexName) - `validate not paused status response`(statusResp) - - } finally { - followerClient.stopReplication(followerIndexName) - } + followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName), waitForRestore = true) + followerClient.pauseReplication(followerIndexName) + leaderClient.lowLevelClient.performRequest(Request("POST", "/" + leaderIndexName + "/_close")) + leaderClient.lowLevelClient.performRequest(Request("POST", "/" + leaderIndexName + "/_open")) + followerClient.resumeReplication(followerIndexName) + var statusResp = followerClient.replicationStatus(followerIndexName) + `validate not paused status response`(statusResp) } fun `test start replication fails when replication has already been started for the same index`() { val followerClient = getClientForCluster(FOLLOWER) val leaderClient = getClientForCluster(LEADER) - createConnectionBetweenClusters(FOLLOWER, LEADER) - - try { - val createIndexResponse = - leaderClient.indices().create(CreateIndexRequest(leaderIndexName), RequestOptions.DEFAULT) - assertThat(createIndexResponse.isAcknowledged).isTrue() - followerClient.startReplication( - StartReplicationRequest("source", leaderIndexName, followerIndexName), - waitForRestore = true - ) - assertThatThrownBy { - followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName)) - }.isInstanceOf(ResponseException::class.java).hasMessageContaining( - "Cant use same index again for replication." + - " Delete the index:$followerIndexName" - ) - } - finally { - followerClient.stopReplication(followerIndexName) - } + val createIndexResponse = + leaderClient.indices().create(CreateIndexRequest(leaderIndexName), RequestOptions.DEFAULT) + assertThat(createIndexResponse.isAcknowledged).isTrue() + followerClient.startReplication( + StartReplicationRequest("source", leaderIndexName, followerIndexName), + waitForRestore = true + ) + assertThatThrownBy { + followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName)) + }.isInstanceOf(ResponseException::class.java).hasMessageContaining( + "Cant use same index again for replication." + + " Delete the index:$followerIndexName" + ) } fun `test start replication fails when remote cluster alias does not exist`() { val followerClient = getClientForCluster(FOLLOWER) val leaderClient = getClientForCluster(LEADER) - createConnectionBetweenClusters(FOLLOWER, LEADER) - val createIndexResponse = leaderClient.indices().create(CreateIndexRequest(leaderIndexName), RequestOptions.DEFAULT) assertThat(createIndexResponse.isAcknowledged).isTrue() assertThatThrownBy { @@ -233,9 +203,7 @@ class StartReplicationIT: MultiClusterRestTestCase() { fun `test start replication fails when index does not exist`() { val followerClient = getClientForCluster(FOLLOWER) val leaderClient = getClientForCluster(LEADER) - createConnectionBetweenClusters(FOLLOWER, LEADER) - val createIndexResponse = leaderClient.indices().create(CreateIndexRequest(leaderIndexName), RequestOptions.DEFAULT) assertThat(createIndexResponse.isAcknowledged).isTrue() assertThatThrownBy { @@ -247,9 +215,7 @@ class StartReplicationIT: MultiClusterRestTestCase() { fun `test start replication fails when the follower cluster is write blocked or metadata blocked`() { val followerClient = getClientForCluster(FOLLOWER) val leaderClient = getClientForCluster(LEADER) - createConnectionBetweenClusters(FOLLOWER, LEADER) - val createIndexResponse = leaderClient.indices().create(CreateIndexRequest(leaderIndexName), RequestOptions.DEFAULT) assertThat(createIndexResponse.isAcknowledged).isTrue() addClusterMetadataBlock(FOLLOWER, "true") @@ -264,122 +230,99 @@ class StartReplicationIT: MultiClusterRestTestCase() { fun `test that follower index has same mapping as leader index`() { val followerClient = getClientForCluster(FOLLOWER) val leaderClient = getClientForCluster(LEADER) - createConnectionBetweenClusters(FOLLOWER, LEADER) - val createIndexResponse = leaderClient.indices().create(CreateIndexRequest(leaderIndexName), RequestOptions.DEFAULT) assertThat(createIndexResponse.isAcknowledged).isTrue() - try { - followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName), waitForRestore = true) - assertBusy { - assertThat(followerClient.indices() - .exists(GetIndexRequest(followerIndexName), RequestOptions.DEFAULT)) - .isEqualTo(true) - } + followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName), waitForRestore = true) + assertBusy { + assertThat(followerClient.indices() + .exists(GetIndexRequest(followerIndexName), RequestOptions.DEFAULT)) + .isEqualTo(true) + } + Assert.assertEquals( + leaderClient.indices().getMapping(GetMappingsRequest().indices(leaderIndexName), RequestOptions.DEFAULT) + .mappings()[leaderIndexName], + followerClient.indices().getMapping(GetMappingsRequest().indices(followerIndexName), RequestOptions.DEFAULT) + .mappings()[followerIndexName] + ) + // test that new mapping created on leader is also propagated to follower + val putMappingRequest = PutMappingRequest(leaderIndexName) + putMappingRequest.source("{\"properties\":{\"name\":{\"type\":\"keyword\"}}}", XContentType.JSON) + leaderClient.indices().putMapping(putMappingRequest, RequestOptions.DEFAULT) + val sourceMap = mapOf("name" to randomAlphaOfLength(5)) + leaderClient.index(IndexRequest(leaderIndexName).id("1").source(sourceMap), RequestOptions.DEFAULT) + val leaderMappings = leaderClient.indices().getMapping(GetMappingsRequest().indices(leaderIndexName), RequestOptions.DEFAULT) + .mappings()[leaderIndexName] + assertBusy({ Assert.assertEquals( - leaderClient.indices().getMapping(GetMappingsRequest().indices(leaderIndexName), RequestOptions.DEFAULT) - .mappings()[leaderIndexName], - followerClient.indices().getMapping(GetMappingsRequest().indices(followerIndexName), RequestOptions.DEFAULT) - .mappings()[followerIndexName] + leaderMappings, + followerClient.indices().getMapping(GetMappingsRequest().indices(followerIndexName), RequestOptions.DEFAULT) + .mappings()[followerIndexName] ) - // test that new mapping created on leader is also propagated to follower - val putMappingRequest = PutMappingRequest(leaderIndexName) - putMappingRequest.source("{\"properties\":{\"name\":{\"type\":\"keyword\"}}}", XContentType.JSON) - leaderClient.indices().putMapping(putMappingRequest, RequestOptions.DEFAULT) - val sourceMap = mapOf("name" to randomAlphaOfLength(5)) - leaderClient.index(IndexRequest(leaderIndexName).id("1").source(sourceMap), RequestOptions.DEFAULT) - val leaderMappings = leaderClient.indices().getMapping(GetMappingsRequest().indices(leaderIndexName), RequestOptions.DEFAULT) - .mappings()[leaderIndexName] - assertBusy({ - Assert.assertEquals( - leaderMappings, - followerClient.indices().getMapping(GetMappingsRequest().indices(followerIndexName), RequestOptions.DEFAULT) - .mappings()[followerIndexName] - ) - }, 30L, TimeUnit.SECONDS) - - } finally { - followerClient.stopReplication(followerIndexName) - } + }, 30L, TimeUnit.SECONDS) } fun `test that index settings are getting replicated`() { val followerClient = getClientForCluster(FOLLOWER) val leaderClient = getClientForCluster(LEADER) - createConnectionBetweenClusters(FOLLOWER, LEADER) - val settings = Settings.builder() .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) .build() - val createIndexResponse = leaderClient.indices().create(CreateIndexRequest(leaderIndexName).settings(settings), RequestOptions.DEFAULT) assertThat(createIndexResponse.isAcknowledged).isTrue() - try { - followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName), - waitForRestore = true) - assertBusy { - assertThat(followerClient.indices() - .exists(GetIndexRequest(followerIndexName), RequestOptions.DEFAULT)) - .isEqualTo(true) - } - val getSettingsRequest = GetSettingsRequest() - getSettingsRequest.indices(followerIndexName) - getSettingsRequest.names(IndexMetadata.SETTING_NUMBER_OF_REPLICAS) - assertBusy({ - Assert.assertEquals( - "0", - followerClient.indices() - .getSettings(getSettingsRequest, RequestOptions.DEFAULT) - .indexToSettings[followerIndexName][IndexMetadata.SETTING_NUMBER_OF_REPLICAS] - ) - }, 30L, TimeUnit.SECONDS) - } finally { - followerClient.stopReplication(followerIndexName) + followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName), + waitForRestore = true) + assertBusy { + assertThat(followerClient.indices() + .exists(GetIndexRequest(followerIndexName), RequestOptions.DEFAULT)) + .isEqualTo(true) } + val getSettingsRequest = GetSettingsRequest() + getSettingsRequest.indices(followerIndexName) + getSettingsRequest.names(IndexMetadata.SETTING_NUMBER_OF_REPLICAS) + assertBusy({ + Assert.assertEquals( + "0", + followerClient.indices() + .getSettings(getSettingsRequest, RequestOptions.DEFAULT) + .indexToSettings[followerIndexName][IndexMetadata.SETTING_NUMBER_OF_REPLICAS] + ) + }, 30L, TimeUnit.SECONDS) } fun `test that aliases settings are getting replicated`() { val followerClient = getClientForCluster(FOLLOWER) val leaderClient = getClientForCluster(LEADER) - createConnectionBetweenClusters(FOLLOWER, LEADER) - val createIndexResponse = leaderClient.indices().create(CreateIndexRequest(leaderIndexName) .alias(Alias("leaderAlias").filter("{\"term\":{\"year\":2016}}").routing("1")) , RequestOptions.DEFAULT) assertThat(createIndexResponse.isAcknowledged).isTrue - try { - followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName), - waitForRestore = true) - assertBusy { - assertThat(followerClient.indices() - .exists(GetIndexRequest(followerIndexName), RequestOptions.DEFAULT)) - .isEqualTo(true) - } - assertBusy({ - Assert.assertEquals( - leaderClient.indices().getAlias(GetAliasesRequest().indices(leaderIndexName), - RequestOptions.DEFAULT).aliases[leaderIndexName], - followerClient.indices().getAlias(GetAliasesRequest().indices(followerIndexName), - RequestOptions.DEFAULT).aliases[followerIndexName] - ) - - }, 30L, TimeUnit.SECONDS) - } finally { - followerClient.stopReplication(followerIndexName) + followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName), + waitForRestore = true) + assertBusy { + assertThat(followerClient.indices() + .exists(GetIndexRequest(followerIndexName), RequestOptions.DEFAULT)) + .isEqualTo(true) } + assertBusy({ + Assert.assertEquals( + leaderClient.indices().getAlias(GetAliasesRequest().indices(leaderIndexName), + RequestOptions.DEFAULT).aliases[leaderIndexName], + followerClient.indices().getAlias(GetAliasesRequest().indices(followerIndexName), + RequestOptions.DEFAULT).aliases[followerIndexName] + ) + + }, 30L, TimeUnit.SECONDS) } fun `test that replication cannot be started on leader alias directly`() { val followerClient = getClientForCluster(FOLLOWER) val leaderClient = getClientForCluster(LEADER) - createConnectionBetweenClusters(FOLLOWER, LEADER, "source") - val createIndexResponse = leaderClient.indices().create(CreateIndexRequest(leaderIndexName).alias(Alias("leader_alias")), RequestOptions.DEFAULT) assertThat(createIndexResponse.isAcknowledged).isTrue() - try { followerClient.startReplication(StartReplicationRequest("source", "leader_alias", followerIndexName)) fail("Expected startReplication to fail") @@ -393,89 +336,67 @@ class StartReplicationIT: MultiClusterRestTestCase() { fun `test that translog settings are set on leader and not on follower`() { val followerClient = getClientForCluster(FOLLOWER) val leaderClient = getClientForCluster(LEADER) - createConnectionBetweenClusters(FOLLOWER, LEADER) - val createIndexResponse = leaderClient.indices().create(CreateIndexRequest(leaderIndexName), RequestOptions.DEFAULT) assertThat(createIndexResponse.isAcknowledged).isTrue() - try { - followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName), - waitForRestore = true) - assertBusy { - assertThat(followerClient.indices() - .exists(GetIndexRequest(followerIndexName), RequestOptions.DEFAULT)) - .isEqualTo(true) - assertThat(followerClient.indices() - .getSettings(GetSettingsRequest().indices(followerIndexName), RequestOptions.DEFAULT) - .getSetting(followerIndexName, - REPLICATION_INDEX_TRANSLOG_PRUNING_ENABLED_SETTING.key) - .isNullOrEmpty()) - } - - assertThat(leaderClient.indices() - .getSettings(GetSettingsRequest().indices(leaderIndexName), RequestOptions.DEFAULT) - .getSetting(leaderIndexName, - REPLICATION_INDEX_TRANSLOG_PRUNING_ENABLED_SETTING.key) == "true") - - } finally { - followerClient.stopReplication(followerIndexName) + followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName), + waitForRestore = true) + assertBusy { + assertThat(followerClient.indices() + .exists(GetIndexRequest(followerIndexName), RequestOptions.DEFAULT)) + .isEqualTo(true) + assertThat(followerClient.indices() + .getSettings(GetSettingsRequest().indices(followerIndexName), RequestOptions.DEFAULT) + .getSetting(followerIndexName, + REPLICATION_INDEX_TRANSLOG_PRUNING_ENABLED_SETTING.key) + .isNullOrEmpty()) } + assertThat(leaderClient.indices() + .getSettings(GetSettingsRequest().indices(leaderIndexName), RequestOptions.DEFAULT) + .getSetting(leaderIndexName, + REPLICATION_INDEX_TRANSLOG_PRUNING_ENABLED_SETTING.key) == "true") } fun `test that translog settings are set on leader`() { val followerClient = getClientForCluster(FOLLOWER) val leaderClient = getClientForCluster(LEADER) - createConnectionBetweenClusters(FOLLOWER, LEADER) - val createIndexResponse = leaderClient.indices().create(CreateIndexRequest(leaderIndexName), RequestOptions.DEFAULT) assertThat(createIndexResponse.isAcknowledged).isTrue() - try { - followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName), - waitForRestore = true) - - val leaderSettings = leaderClient.indices() - .getSettings(GetSettingsRequest().indices(leaderIndexName), RequestOptions.DEFAULT) - assertThat(leaderSettings.getSetting(leaderIndexName, - REPLICATION_INDEX_TRANSLOG_PRUNING_ENABLED_SETTING.key) == "true") - assertThat(leaderSettings.getSetting(leaderIndexName, - IndexSettings.INDEX_TRANSLOG_GENERATION_THRESHOLD_SIZE_SETTING.key) == "32mb") - - } finally { - followerClient.stopReplication(followerIndexName) - } + followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName), + waitForRestore = true) + val leaderSettings = leaderClient.indices() + .getSettings(GetSettingsRequest().indices(leaderIndexName), RequestOptions.DEFAULT) + assertThat(leaderSettings.getSetting(leaderIndexName, + REPLICATION_INDEX_TRANSLOG_PRUNING_ENABLED_SETTING.key) == "true") + assertThat(leaderSettings.getSetting(leaderIndexName, + IndexSettings.INDEX_TRANSLOG_GENERATION_THRESHOLD_SIZE_SETTING.key) == "32mb") } fun `test that replication continues after removing translog settings based on retention lease`() { val followerClient = getClientForCluster(FOLLOWER) val leaderClient = getClientForCluster(LEADER) - createConnectionBetweenClusters(FOLLOWER, LEADER) - val createIndexResponse = leaderClient.indices().create(CreateIndexRequest(leaderIndexName), RequestOptions.DEFAULT) assertThat(createIndexResponse.isAcknowledged).isTrue() - try { - followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName), - waitForRestore = true) - assertBusy { - assertThat(followerClient.indices() - .exists(GetIndexRequest(followerIndexName), RequestOptions.DEFAULT)) - .isEqualTo(true) - } - // Turn-off the settings and index doc - val settingsBuilder = Settings.builder() - .put(REPLICATION_INDEX_TRANSLOG_PRUNING_ENABLED_SETTING.key, false) - val settingsUpdateResponse = leaderClient.indices().putSettings(UpdateSettingsRequest(leaderIndexName) - .settings(settingsBuilder.build()), RequestOptions.DEFAULT) - Assert.assertEquals(settingsUpdateResponse.isAcknowledged, true) - val sourceMap = mapOf("name" to randomAlphaOfLength(5)) - leaderClient.index(IndexRequest(leaderIndexName).id("2").source(sourceMap), RequestOptions.DEFAULT) - assertBusy({ - followerClient.get(GetRequest(followerIndexName).id("2"), RequestOptions.DEFAULT).isExists - }, 30L, TimeUnit.SECONDS) - } finally { - followerClient.stopReplication(followerIndexName) + followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName), + waitForRestore = true) + assertBusy { + assertThat(followerClient.indices() + .exists(GetIndexRequest(followerIndexName), RequestOptions.DEFAULT)) + .isEqualTo(true) } + // Turn-off the settings and index doc + val settingsBuilder = Settings.builder() + .put(REPLICATION_INDEX_TRANSLOG_PRUNING_ENABLED_SETTING.key, false) + val settingsUpdateResponse = leaderClient.indices().putSettings(UpdateSettingsRequest(leaderIndexName) + .settings(settingsBuilder.build()), RequestOptions.DEFAULT) + Assert.assertEquals(settingsUpdateResponse.isAcknowledged, true) + val sourceMap = mapOf("name" to randomAlphaOfLength(5)) + leaderClient.index(IndexRequest(leaderIndexName).id("2").source(sourceMap), RequestOptions.DEFAULT) + assertBusy({ + followerClient.get(GetRequest(followerIndexName).id("2"), RequestOptions.DEFAULT).isExists + }, 30L, TimeUnit.SECONDS) } private fun addClusterMetadataBlock(clusterName: String, blockValue: String) { @@ -500,203 +421,163 @@ class StartReplicationIT: MultiClusterRestTestCase() { fun `test that dynamic index settings and alias are getting replicated `() { val followerClient = getClientForCluster(FOLLOWER) val leaderClient = getClientForCluster(LEADER) - setMetadataSyncDelay() - createConnectionBetweenClusters(FOLLOWER, LEADER) - var settings = Settings.builder() .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) .build() - val createIndexResponse = leaderClient.indices().create(CreateIndexRequest(leaderIndexName).settings(settings), RequestOptions.DEFAULT) assertThat(createIndexResponse.isAcknowledged).isTrue() - try { - followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName), - waitForRestore = true) - assertBusy { - assertThat(followerClient.indices() - .exists(GetIndexRequest(followerIndexName), RequestOptions.DEFAULT)) - .isEqualTo(true) - } - - settings = Settings.builder() - .build() - - followerClient.updateReplication( followerIndexName, settings) - - val getSettingsRequest = GetSettingsRequest() - getSettingsRequest.indices(followerIndexName) - getSettingsRequest.includeDefaults(true) - Assert.assertEquals( - "0", - followerClient.indices() - .getSettings(getSettingsRequest, RequestOptions.DEFAULT) - .indexToSettings[followerIndexName][IndexMetadata.SETTING_NUMBER_OF_REPLICAS] - ) - - settings = Settings.builder() - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 2) - .put("routing.allocation.enable", "none") - .build() - - leaderClient.indices().putSettings(UpdateSettingsRequest(leaderIndexName).settings(settings), RequestOptions.DEFAULT) - - var indicesAliasesRequest = IndicesAliasesRequest() - var aliasAction = IndicesAliasesRequest.AliasActions.add() - .index(leaderIndexName) - .alias("alias1").filter("{\"term\":{\"year\":2016}}").routing("1") - indicesAliasesRequest.addAliasAction(aliasAction) - leaderClient.indices().updateAliases(indicesAliasesRequest, RequestOptions.DEFAULT) - - TimeUnit.SECONDS.sleep(SLEEP_TIME_BETWEEN_SYNC) - getSettingsRequest.indices(followerIndexName) - // Leader setting is copied - assertBusy({ - Assert.assertEquals( - "2", - followerClient.indices() + followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName), + waitForRestore = true) + assertBusy { + assertThat(followerClient.indices() + .exists(GetIndexRequest(followerIndexName), RequestOptions.DEFAULT)) + .isEqualTo(true) + } + settings = Settings.builder() + .build() + followerClient.updateReplication( followerIndexName, settings) + val getSettingsRequest = GetSettingsRequest() + getSettingsRequest.indices(followerIndexName) + getSettingsRequest.includeDefaults(true) + Assert.assertEquals( + "0", + followerClient.indices() .getSettings(getSettingsRequest, RequestOptions.DEFAULT) .indexToSettings[followerIndexName][IndexMetadata.SETTING_NUMBER_OF_REPLICAS] - ) - assertEqualAliases() - }, 30L, TimeUnit.SECONDS) - - - // Case 2 : Blocklisted setting are not copied - Assert.assertNull(followerClient.indices() + ) + settings = Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 2) + .put("routing.allocation.enable", "none") + .build() + leaderClient.indices().putSettings(UpdateSettingsRequest(leaderIndexName).settings(settings), RequestOptions.DEFAULT) + var indicesAliasesRequest = IndicesAliasesRequest() + var aliasAction = IndicesAliasesRequest.AliasActions.add() + .index(leaderIndexName) + .alias("alias1").filter("{\"term\":{\"year\":2016}}").routing("1") + indicesAliasesRequest.addAliasAction(aliasAction) + leaderClient.indices().updateAliases(indicesAliasesRequest, RequestOptions.DEFAULT) + TimeUnit.SECONDS.sleep(SLEEP_TIME_BETWEEN_SYNC) + getSettingsRequest.indices(followerIndexName) + // Leader setting is copied + assertBusy({ + Assert.assertEquals( + "2", + followerClient.indices() .getSettings(getSettingsRequest, RequestOptions.DEFAULT) - .indexToSettings[followerIndexName].get("index.routing.allocation.enable")) - - //Alias test case 2: Update existing alias - aliasAction = IndicesAliasesRequest.AliasActions.add() - .index(leaderIndexName) - .routing("2") - .alias("alias1") - .writeIndex(true) - .isHidden(false) - indicesAliasesRequest.addAliasAction(aliasAction) - leaderClient.indices().updateAliases(indicesAliasesRequest, RequestOptions.DEFAULT) - - //Use Update API - settings = Settings.builder() - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 3) - .put("index.routing.allocation.enable", "none") - .put("index.search.idle.after", "10s") - .build() - - followerClient.updateReplication( followerIndexName, settings) - TimeUnit.SECONDS.sleep(SLEEP_TIME_BETWEEN_SYNC) - - // Case 3 : Updated Settings take higher priority. Blocklisted settins shouldn't matter for that - assertBusy({ - Assert.assertEquals( - "3", - followerClient.indices() - .getSettings(getSettingsRequest, RequestOptions.DEFAULT) - .indexToSettings[followerIndexName][IndexMetadata.SETTING_NUMBER_OF_REPLICAS] - ) - - Assert.assertEquals( - "10s", - followerClient.indices() - .getSettings(getSettingsRequest, RequestOptions.DEFAULT) - .indexToSettings[followerIndexName]["index.search.idle.after"] - ) - - Assert.assertEquals( - "none", - followerClient.indices() - .getSettings(getSettingsRequest, RequestOptions.DEFAULT) - .indexToSettings[followerIndexName]["index.routing.allocation.enable"] - ) - - assertEqualAliases() - }, 30L, TimeUnit.SECONDS) - - //Clear the settings - settings = Settings.builder() - .build() - followerClient.updateReplication( followerIndexName, settings) - - //Alias test case 3: Delete one alias and add another alias - aliasAction = IndicesAliasesRequest.AliasActions.remove() - .index(leaderIndexName) - .alias("alias1") - indicesAliasesRequest.addAliasAction(aliasAction + .indexToSettings[followerIndexName][IndexMetadata.SETTING_NUMBER_OF_REPLICAS] ) - leaderClient.indices().updateAliases(indicesAliasesRequest, RequestOptions.DEFAULT) - var aliasAction2 = IndicesAliasesRequest.AliasActions.add() - .index(leaderIndexName) - .routing("12") - .alias("alias2") - .indexRouting("indexRouting") - indicesAliasesRequest.addAliasAction(aliasAction2) - - TimeUnit.SECONDS.sleep(SLEEP_TIME_BETWEEN_SYNC) - - assertBusy({ - Assert.assertEquals( - null, - followerClient.indices() - .getSettings(getSettingsRequest, RequestOptions.DEFAULT) - .indexToSettings[followerIndexName]["index.search.idle.after"] - ) - assertEqualAliases() - }, 30L, TimeUnit.SECONDS) - - } finally { - followerClient.stopReplication(followerIndexName) - } - + assertEqualAliases() + }, 30L, TimeUnit.SECONDS) + // Case 2 : Blocklisted setting are not copied + Assert.assertNull(followerClient.indices() + .getSettings(getSettingsRequest, RequestOptions.DEFAULT) + .indexToSettings[followerIndexName].get("index.routing.allocation.enable")) + //Alias test case 2: Update existing alias + aliasAction = IndicesAliasesRequest.AliasActions.add() + .index(leaderIndexName) + .routing("2") + .alias("alias1") + .writeIndex(true) + .isHidden(false) + indicesAliasesRequest.addAliasAction(aliasAction) + leaderClient.indices().updateAliases(indicesAliasesRequest, RequestOptions.DEFAULT) + //Use Update API + settings = Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 3) + .put("index.routing.allocation.enable", "none") + .put("index.search.idle.after", "10s") + .build() + followerClient.updateReplication( followerIndexName, settings) + TimeUnit.SECONDS.sleep(SLEEP_TIME_BETWEEN_SYNC) + // Case 3 : Updated Settings take higher priority. Blocklisted settins shouldn't matter for that + assertBusy({ + Assert.assertEquals( + "3", + followerClient.indices() + .getSettings(getSettingsRequest, RequestOptions.DEFAULT) + .indexToSettings[followerIndexName][IndexMetadata.SETTING_NUMBER_OF_REPLICAS] + ) + Assert.assertEquals( + "10s", + followerClient.indices() + .getSettings(getSettingsRequest, RequestOptions.DEFAULT) + .indexToSettings[followerIndexName]["index.search.idle.after"] + ) + Assert.assertEquals( + "none", + followerClient.indices() + .getSettings(getSettingsRequest, RequestOptions.DEFAULT) + .indexToSettings[followerIndexName]["index.routing.allocation.enable"] + ) + assertEqualAliases() + }, 30L, TimeUnit.SECONDS) + //Clear the settings + settings = Settings.builder() + .build() + followerClient.updateReplication( followerIndexName, settings) + //Alias test case 3: Delete one alias and add another alias + aliasAction = IndicesAliasesRequest.AliasActions.remove() + .index(leaderIndexName) + .alias("alias1") + indicesAliasesRequest.addAliasAction(aliasAction + ) + leaderClient.indices().updateAliases(indicesAliasesRequest, RequestOptions.DEFAULT) + var aliasAction2 = IndicesAliasesRequest.AliasActions.add() + .index(leaderIndexName) + .routing("12") + .alias("alias2") + .indexRouting("indexRouting") + indicesAliasesRequest.addAliasAction(aliasAction2) + TimeUnit.SECONDS.sleep(SLEEP_TIME_BETWEEN_SYNC) + assertBusy({ + Assert.assertEquals( + null, + followerClient.indices() + .getSettings(getSettingsRequest, RequestOptions.DEFAULT) + .indexToSettings[followerIndexName]["index.search.idle.after"] + ) + assertEqualAliases() + }, 30L, TimeUnit.SECONDS) } fun `test that static index settings are getting replicated `() { val followerClient = getClientForCluster(FOLLOWER) val leaderClient = getClientForCluster(LEADER) - setMetadataSyncDelay() - createConnectionBetweenClusters(FOLLOWER, LEADER) - var settings = Settings.builder() .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) .build() - val createIndexResponse = leaderClient.indices().create(CreateIndexRequest(leaderIndexName).settings(settings), RequestOptions.DEFAULT) assertThat(createIndexResponse.isAcknowledged).isTrue() - try { - followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName), - waitForRestore = true) - assertBusy { - assertThat(followerClient.indices() - .exists(GetIndexRequest(followerIndexName), RequestOptions.DEFAULT)) - .isEqualTo(true) - } - val getSettingsRequest = GetSettingsRequest() - getSettingsRequest.indices(followerIndexName) - Assert.assertEquals( - "1", - followerClient.indices() - .getSettings(getSettingsRequest, RequestOptions.DEFAULT) - .indexToSettings[followerIndexName][IndexMetadata.SETTING_NUMBER_OF_REPLICAS] - ) - - settings = Settings.builder() - .put("index.shard.check_on_startup", "checksum") - .build() - followerClient.updateReplication(followerIndexName, settings) - - TimeUnit.SECONDS.sleep(SLEEP_TIME_BETWEEN_SYNC) - Assert.assertEquals( - "checksum", - followerClient.indices() - .getSettings(getSettingsRequest, RequestOptions.DEFAULT) - .indexToSettings[followerIndexName]["index.shard.check_on_startup"] - ) - } finally { - followerClient.stopReplication(followerIndexName) + followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName), + waitForRestore = true) + assertBusy { + assertThat(followerClient.indices() + .exists(GetIndexRequest(followerIndexName), RequestOptions.DEFAULT)) + .isEqualTo(true) } + val getSettingsRequest = GetSettingsRequest() + getSettingsRequest.indices(followerIndexName) + Assert.assertEquals( + "1", + followerClient.indices() + .getSettings(getSettingsRequest, RequestOptions.DEFAULT) + .indexToSettings[followerIndexName][IndexMetadata.SETTING_NUMBER_OF_REPLICAS] + ) + settings = Settings.builder() + .put("index.shard.check_on_startup", "checksum") + .build() + followerClient.updateReplication(followerIndexName, settings) + TimeUnit.SECONDS.sleep(SLEEP_TIME_BETWEEN_SYNC) + Assert.assertEquals( + "checksum", + followerClient.indices() + .getSettings(getSettingsRequest, RequestOptions.DEFAULT) + .indexToSettings[followerIndexName]["index.shard.check_on_startup"] + ) } fun `test that replication fails to start when custom analyser is not present in follower`() { @@ -709,7 +590,6 @@ class StartReplicationIT: MultiClusterRestTestCase() { val settings: Settings = Settings.builder().loadFromStream(synonymsJson, javaClass.getResourceAsStream(synonymsJson), false) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) .build() - val leaderClient = getClientForCluster(LEADER) val followerClient = getClientForCluster(FOLLOWER) try { @@ -719,9 +599,7 @@ class StartReplicationIT: MultiClusterRestTestCase() { } catch (e: Exception) { assumeNoException("Ignored test as analyzer setting could not be added", e) } - createConnectionBetweenClusters(FOLLOWER, LEADER) - assertThatThrownBy { followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName)) }.isInstanceOf(ResponseException::class.java).hasMessageContaining("resource_not_found_exception") @@ -741,11 +619,9 @@ class StartReplicationIT: MultiClusterRestTestCase() { try { Files.copy(synonyms, leaderSynonymPath) Files.copy(synonyms, followerSynonymPath) - val settings: Settings = Settings.builder().loadFromStream(synonymsJson, javaClass.getResourceAsStream(synonymsJson), false) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) .build() - val leaderClient = getClientForCluster(LEADER) val followerClient = getClientForCluster(FOLLOWER) try { @@ -755,17 +631,11 @@ class StartReplicationIT: MultiClusterRestTestCase() { } catch (e: Exception) { assumeNoException("Ignored test as analyzer setting could not be added", e) } - createConnectionBetweenClusters(FOLLOWER, LEADER) - - try { - followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName), - waitForRestore = true) - assertBusy { - assertThat(followerClient.indices().exists(GetIndexRequest(followerIndexName), RequestOptions.DEFAULT)).isEqualTo(true) - } - } finally { - followerClient.stopReplication(followerIndexName) + followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName), + waitForRestore = true) + assertBusy { + assertThat(followerClient.indices().exists(GetIndexRequest(followerIndexName), RequestOptions.DEFAULT)).isEqualTo(true) } } finally { if (Files.exists(leaderSynonymPath)) { @@ -787,11 +657,9 @@ class StartReplicationIT: MultiClusterRestTestCase() { try { Files.copy(synonyms, leaderSynonymPath) Files.copy(synonyms, followerSynonymPath) - val settings: Settings = Settings.builder().loadFromStream(synonymsJson, javaClass.getResourceAsStream(synonymsJson), false) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) .build() - val leaderClient = getClientForCluster(LEADER) val followerClient = getClientForCluster(FOLLOWER) try { @@ -801,20 +669,14 @@ class StartReplicationIT: MultiClusterRestTestCase() { } catch (e: Exception) { assumeNoException("Ignored test as analyzer setting could not be added", e) } - createConnectionBetweenClusters(FOLLOWER, LEADER) - - try { - val overriddenSettings: Settings = Settings.builder() - .put("index.analysis.filter.my_filter.synonyms_path", synonymFollowerFilename) - .build() - followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName, overriddenSettings), - waitForRestore = true) - assertBusy { - assertThat(followerClient.indices().exists(GetIndexRequest(followerIndexName), RequestOptions.DEFAULT)).isEqualTo(true) - } - } finally { - followerClient.stopReplication(followerIndexName) + val overriddenSettings: Settings = Settings.builder() + .put("index.analysis.filter.my_filter.synonyms_path", synonymFollowerFilename) + .build() + followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName, overriddenSettings), + waitForRestore = true) + assertBusy { + assertThat(followerClient.indices().exists(GetIndexRequest(followerIndexName), RequestOptions.DEFAULT)).isEqualTo(true) } } finally { if (Files.exists(leaderSynonymPath)) { @@ -829,73 +691,58 @@ class StartReplicationIT: MultiClusterRestTestCase() { fun `test that follower index cannot be deleted after starting replication`() { val followerClient = getClientForCluster(FOLLOWER) val leaderClient = getClientForCluster(LEADER) - createConnectionBetweenClusters(FOLLOWER, LEADER) - val createIndexResponse = leaderClient.indices().create(CreateIndexRequest(leaderIndexName), RequestOptions.DEFAULT) assertThat(createIndexResponse.isAcknowledged).isTrue() - - try { - followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName), - waitForRestore = true) - assertBusy { - assertThat(followerClient.indices().exists(GetIndexRequest(followerIndexName), RequestOptions.DEFAULT)).isEqualTo(true) - } - // Need to wait till index blocks appear into state - assertBusy { - val clusterBlocksResponse = followerClient.lowLevelClient.performRequest(Request("GET", "/_cluster/state/blocks")) - val clusterResponseString = EntityUtils.toString(clusterBlocksResponse.entity) - assertThat(clusterResponseString.contains("cross-cluster-replication")) - .withFailMessage("Cant find replication block afer starting replication") - .isTrue() - } - // Delete index - assertThatThrownBy { - followerClient.indices().delete(DeleteIndexRequest(followerIndexName), RequestOptions.DEFAULT) - }.isInstanceOf(OpenSearchStatusException::class.java).hasMessageContaining("cluster_block_exception") - // Close index - assertThatThrownBy { - followerClient.indices().close(CloseIndexRequest(followerIndexName), RequestOptions.DEFAULT) - }.isInstanceOf(OpenSearchStatusException::class.java).hasMessageContaining("cluster_block_exception") - // Index document - assertThatThrownBy { - val sourceMap = mapOf("name" to randomAlphaOfLength(5)) - followerClient.index(IndexRequest(followerIndexName).id("1").source(sourceMap), RequestOptions.DEFAULT) - }.isInstanceOf(OpenSearchStatusException::class.java).hasMessageContaining("cluster_block_exception") - } finally { - followerClient.stopReplication(followerIndexName) + followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName), + waitForRestore = true) + assertBusy { + assertThat(followerClient.indices().exists(GetIndexRequest(followerIndexName), RequestOptions.DEFAULT)).isEqualTo(true) } + // Need to wait till index blocks appear into state + assertBusy { + val clusterBlocksResponse = followerClient.lowLevelClient.performRequest(Request("GET", "/_cluster/state/blocks")) + val clusterResponseString = EntityUtils.toString(clusterBlocksResponse.entity) + assertThat(clusterResponseString.contains("cross-cluster-replication")) + .withFailMessage("Cant find replication block afer starting replication") + .isTrue() + } + // Delete index + assertThatThrownBy { + followerClient.indices().delete(DeleteIndexRequest(followerIndexName), RequestOptions.DEFAULT) + }.isInstanceOf(OpenSearchStatusException::class.java).hasMessageContaining("cluster_block_exception") + // Close index + assertThatThrownBy { + followerClient.indices().close(CloseIndexRequest(followerIndexName), RequestOptions.DEFAULT) + }.isInstanceOf(OpenSearchStatusException::class.java).hasMessageContaining("cluster_block_exception") + // Index document + assertThatThrownBy { + val sourceMap = mapOf("name" to randomAlphaOfLength(5)) + followerClient.index(IndexRequest(followerIndexName).id("1").source(sourceMap), RequestOptions.DEFAULT) + }.isInstanceOf(OpenSearchStatusException::class.java).hasMessageContaining("cluster_block_exception") } fun `test that replication gets paused if the leader index is deleted`() { val followerClient = getClientForCluster(FOLLOWER) val leaderClient = getClientForCluster(LEADER) - createConnectionBetweenClusters(FOLLOWER, LEADER) - val createIndexResponse = leaderClient.indices().create(CreateIndexRequest(leaderIndexName), RequestOptions.DEFAULT) assertThat(createIndexResponse.isAcknowledged).isTrue() - - try { - followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName), - waitForRestore = true) - assertBusy { - assertThat(followerClient.indices().exists(GetIndexRequest(followerIndexName), RequestOptions.DEFAULT)).isEqualTo(true) - } - assertBusy { - var statusResp = followerClient.replicationStatus(followerIndexName) - `validate status syncing response`(statusResp) - } - val deleteIndexResponse = leaderClient.indices().delete(DeleteIndexRequest(leaderIndexName), RequestOptions.DEFAULT) - assertThat(deleteIndexResponse.isAcknowledged).isTrue() - - assertBusy({ - var statusResp = followerClient.replicationStatus(followerIndexName) - `validate paused status response due to leader index deleted`(statusResp) - }, 15, TimeUnit.SECONDS) - } finally { - followerClient.stopReplication(followerIndexName) + followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName), + waitForRestore = true) + assertBusy { + assertThat(followerClient.indices().exists(GetIndexRequest(followerIndexName), RequestOptions.DEFAULT)).isEqualTo(true) } + assertBusy { + var statusResp = followerClient.replicationStatus(followerIndexName) + `validate status syncing response`(statusResp) + } + val deleteIndexResponse = leaderClient.indices().delete(DeleteIndexRequest(leaderIndexName), RequestOptions.DEFAULT) + assertThat(deleteIndexResponse.isAcknowledged).isTrue() + assertBusy({ + var statusResp = followerClient.replicationStatus(followerIndexName) + `validate paused status response due to leader index deleted`(statusResp) + }, 15, TimeUnit.SECONDS) } fun `test forcemerge on leader during replication bootstrap`() { @@ -907,7 +754,6 @@ class StartReplicationIT: MultiClusterRestTestCase() { val followerClient = getClientForCluster(FOLLOWER) val leaderClient = getClientForCluster(LEADER) createConnectionBetweenClusters(FOLLOWER, LEADER) - val createIndexResponse = leaderClient.indices().create(CreateIndexRequest(leaderIndexName).settings(settings), RequestOptions.DEFAULT) assertThat(createIndexResponse.isAcknowledged).isTrue() @@ -917,28 +763,22 @@ class StartReplicationIT: MultiClusterRestTestCase() { assertThat(leaderClient.indices() .exists(GetIndexRequest(leaderIndexName), RequestOptions.DEFAULT)) } - try { - followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName), - TimeValue.timeValueSeconds(10), - false) - //Given the size of index, the replication should be in RESTORING phase at this point - leaderClient.indices().forcemerge(ForceMergeRequest(leaderIndexName), RequestOptions.DEFAULT) - - assertBusy { - var statusResp = followerClient.replicationStatus(followerIndexName) - `validate status syncing response`(statusResp) - } - TimeUnit.SECONDS.sleep(30) - - assertBusy ({ - Assert.assertEquals(leaderClient.count(CountRequest(leaderIndexName), RequestOptions.DEFAULT).toString(), - followerClient.count(CountRequest(followerIndexName), RequestOptions.DEFAULT).toString()) - }, - 30, TimeUnit.SECONDS - ) - } finally { - followerClient.stopReplication(followerIndexName) + followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName), + TimeValue.timeValueSeconds(10), + false) + //Given the size of index, the replication should be in RESTORING phase at this point + leaderClient.indices().forcemerge(ForceMergeRequest(leaderIndexName), RequestOptions.DEFAULT) + assertBusy { + var statusResp = followerClient.replicationStatus(followerIndexName) + `validate status syncing response`(statusResp) } + TimeUnit.SECONDS.sleep(30) + assertBusy ({ + Assert.assertEquals(leaderClient.count(CountRequest(leaderIndexName), RequestOptions.DEFAULT).toString(), + followerClient.count(CountRequest(followerIndexName), RequestOptions.DEFAULT).toString()) + }, + 30, TimeUnit.SECONDS + ) } fun `test that snapshot on leader does not affect replication during bootstrap`() { @@ -950,15 +790,11 @@ class StartReplicationIT: MultiClusterRestTestCase() { val followerClient = getClientForCluster(FOLLOWER) val leaderClient = getClientForCluster(LEADER) createConnectionBetweenClusters(FOLLOWER, LEADER) - val repoPath = PathUtils.get(buildDir, repoPath) - val putRepositoryRequest = PutRepositoryRequest("my-repo") .type(FsRepository.TYPE) .settings("{\"location\": \"$repoPath\"}", XContentType.JSON) - leaderClient.snapshot().createRepository(putRepositoryRequest, RequestOptions.DEFAULT) - val createIndexResponse = leaderClient.indices().create( CreateIndexRequest(leaderIndexName).settings(settings), RequestOptions.DEFAULT @@ -972,152 +808,122 @@ class StartReplicationIT: MultiClusterRestTestCase() { .exists(GetIndexRequest(leaderIndexName), RequestOptions.DEFAULT) ) } - try { - followerClient.startReplication( - StartReplicationRequest("source", leaderIndexName, followerIndexName), - TimeValue.timeValueSeconds(10), - false - ) - //Given the size of index, the replication should be in RESTORING phase at this point - leaderClient.snapshot().create(CreateSnapshotRequest("my-repo", "snapshot_1").indices(leaderIndexName), RequestOptions.DEFAULT) - - assertBusy({ - var statusResp = followerClient.replicationStatus(followerIndexName) - `validate status syncing response`(statusResp) - }, 30, TimeUnit.SECONDS - ) - assertBusy({ - Assert.assertEquals( - leaderClient.count(CountRequest(leaderIndexName), RequestOptions.DEFAULT).toString(), - followerClient.count(CountRequest(followerIndexName), RequestOptions.DEFAULT).toString() - )}, - 30, TimeUnit.SECONDS - ) - } finally { - followerClient.stopReplication(followerIndexName) - } + followerClient.startReplication( + StartReplicationRequest("source", leaderIndexName, followerIndexName), + TimeValue.timeValueSeconds(10), + false + ) + //Given the size of index, the replication should be in RESTORING phase at this point + leaderClient.snapshot().create(CreateSnapshotRequest("my-repo", "snapshot_1").indices(leaderIndexName), RequestOptions.DEFAULT) + assertBusy({ + var statusResp = followerClient.replicationStatus(followerIndexName) + `validate status syncing response`(statusResp) + }, 30, TimeUnit.SECONDS + ) + assertBusy({ + Assert.assertEquals( + leaderClient.count(CountRequest(leaderIndexName), RequestOptions.DEFAULT).toString(), + followerClient.count(CountRequest(followerIndexName), RequestOptions.DEFAULT).toString() + )}, + 30, TimeUnit.SECONDS + ) } fun `test leader stats`() { val followerClient = getClientForCluster(FOLLOWER) val leaderClient = getClientForCluster(LEADER) - createConnectionBetweenClusters(FOLLOWER, LEADER) - val settings = Settings.builder() .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 2) .build() - val createIndexResponse = leaderClient.indices().create( CreateIndexRequest(leaderIndexName).settings(settings), RequestOptions.DEFAULT ) assertThat(createIndexResponse.isAcknowledged).isTrue() - - try { - followerClient.startReplication( - StartReplicationRequest("source", leaderIndexName, followerIndexName), - TimeValue.timeValueSeconds(10), - true - ) - - val docCount = 50 - - for (i in 1..docCount) { - val sourceMap = mapOf("name" to randomAlphaOfLength(5)) - leaderClient.index(IndexRequest(leaderIndexName).id(i.toString()).source(sourceMap), RequestOptions.DEFAULT) - } - - // Have to wait until the new operations are available to read at the leader cluster - assertBusy({ - val stats = leaderClient.leaderStats() - assertThat(stats.size).isEqualTo(9) - assertThat(stats.getValue("num_replicated_indices").toString()).isEqualTo("1") - assertThat(stats.getValue("operations_read").toString()).isEqualTo(docCount.toString()) - assertThat(stats.getValue("operations_read_lucene").toString()).isEqualTo("0") - assertThat(stats.getValue("operations_read_translog").toString()).isEqualTo(docCount.toString()) - assertThat(stats.containsKey("index_stats")) - }, 60L, TimeUnit.SECONDS) - - } finally { - followerClient.stopReplication(followerIndexName) + followerClient.startReplication( + StartReplicationRequest("source", leaderIndexName, followerIndexName), + TimeValue.timeValueSeconds(10), + true + ) + val docCount = 50 + for (i in 1..docCount) { + val sourceMap = mapOf("name" to randomAlphaOfLength(5)) + leaderClient.index(IndexRequest(leaderIndexName).id(i.toString()).source(sourceMap), RequestOptions.DEFAULT) } + // Have to wait until the new operations are available to read at the leader cluster + assertBusy({ + val stats = leaderClient.leaderStats() + assertThat(stats.size).isEqualTo(9) + assertThat(stats.getValue("num_replicated_indices").toString()).isEqualTo("1") + assertThat(stats.getValue("operations_read").toString()).isEqualTo(docCount.toString()) + assertThat(stats.getValue("operations_read_lucene").toString()).isEqualTo("0") + assertThat(stats.getValue("operations_read_translog").toString()).isEqualTo(docCount.toString()) + assertThat(stats.containsKey("index_stats")) + }, 60L, TimeUnit.SECONDS) } @AwaitsFix(bugUrl = "https://github.com/opensearch-project/cross-cluster-replication/issues/176") fun `test follower stats`() { val followerClient = getClientForCluster(FOLLOWER) val leaderClient = getClientForCluster(LEADER) - - val followerIndex2 = "follower_index_2" - val followerIndex3 = "follower_index_3" - + val leaderIndexName2 = randomAlphaOfLength(10).toLowerCase(Locale.ROOT)+"leader" + val followerIndexName2 = randomAlphaOfLength(10).toLowerCase(Locale.ROOT)+"follower" + val leaderIndexName3 = randomAlphaOfLength(10).toLowerCase(Locale.ROOT)+"leader" + val followerIndexName3 = randomAlphaOfLength(10).toLowerCase(Locale.ROOT)+"follower" +// val followerIndex2 = "follower_index_2" +// val followerIndex3 = "follower_index_3" createConnectionBetweenClusters(FOLLOWER, LEADER) - val createIndexResponse = leaderClient.indices().create( CreateIndexRequest(leaderIndexName), RequestOptions.DEFAULT ) assertThat(createIndexResponse.isAcknowledged).isTrue() - - try { - followerClient.startReplication( - StartReplicationRequest("source", leaderIndexName, followerIndexName), - TimeValue.timeValueSeconds(10), - true - ) - followerClient.startReplication( - StartReplicationRequest("source", leaderIndexName, followerIndex2), - TimeValue.timeValueSeconds(10), - true - ) - followerClient.startReplication( - StartReplicationRequest("source", leaderIndexName, followerIndex3), - TimeValue.timeValueSeconds(10), - true - ) - val docCount = 50 - for (i in 1..docCount) { - val sourceMap = mapOf("name" to randomAlphaOfLength(5)) - leaderClient.index(IndexRequest(leaderIndexName).id(i.toString()).source(sourceMap), RequestOptions.DEFAULT) - } - - followerClient.pauseReplication(followerIndex2) - followerClient.stopReplication(followerIndex3) - - - val stats = followerClient.followerStats() - assertThat(stats.getValue("num_syncing_indices").toString()).isEqualTo("1") - assertThat(stats.getValue("num_paused_indices").toString()).isEqualTo("1") - assertThat(stats.getValue("num_failed_indices").toString()).isEqualTo("0") - assertThat(stats.getValue("num_shard_tasks").toString()).isEqualTo("1") - assertThat(stats.getValue("operations_written").toString()).isEqualTo("50") - assertThat(stats.getValue("operations_read").toString()).isEqualTo("50") - assertThat(stats.getValue("failed_read_requests").toString()).isEqualTo("0") - assertThat(stats.getValue("failed_write_requests").toString()).isEqualTo("0") - assertThat(stats.containsKey("index_stats")) - assertThat(stats.size).isEqualTo(16) - - } finally { - followerClient.stopReplication(followerIndexName) - followerClient.stopReplication(followerIndex2) + followerClient.startReplication( + StartReplicationRequest("source", leaderIndexName, followerIndexName), + TimeValue.timeValueSeconds(10), + true + ) + followerClient.startReplication( + StartReplicationRequest("source", leaderIndexName2, followerIndexName2), + TimeValue.timeValueSeconds(10), + true + ) + followerClient.startReplication( + StartReplicationRequest("source", leaderIndexName3, followerIndexName3), + TimeValue.timeValueSeconds(10), + true + ) + val docCount = 50 + for (i in 1..docCount) { + val sourceMap = mapOf("name" to randomAlphaOfLength(5)) + leaderClient.index(IndexRequest(leaderIndexName).id(i.toString()).source(sourceMap), RequestOptions.DEFAULT) } + followerClient.pauseReplication(followerIndexName2) + val stats = followerClient.followerStats() + assertThat(stats.getValue("num_syncing_indices").toString()).isEqualTo("1") + assertThat(stats.getValue("num_paused_indices").toString()).isEqualTo("1") + assertThat(stats.getValue("num_failed_indices").toString()).isEqualTo("0") + assertThat(stats.getValue("num_shard_tasks").toString()).isEqualTo("1") + assertThat(stats.getValue("operations_written").toString()).isEqualTo("50") + assertThat(stats.getValue("operations_read").toString()).isEqualTo("50") + assertThat(stats.getValue("failed_read_requests").toString()).isEqualTo("0") + assertThat(stats.getValue("failed_write_requests").toString()).isEqualTo("0") + assertThat(stats.getValue("follower_checkpoint").toString()).isEqualTo((docCount-1).toString()) + assertThat(stats.containsKey("index_stats")) + assertThat(stats.size).isEqualTo(16) } fun `test that replication cannot be started on invalid indexName`() { val followerClient = getClientForCluster(FOLLOWER) val leaderClient = getClientForCluster(LEADER) - createConnectionBetweenClusters(FOLLOWER, LEADER) - val createIndexResponse = leaderClient.indices().create(CreateIndexRequest(leaderIndexName).alias(Alias("leaderAlias")), RequestOptions.DEFAULT) assertThat(createIndexResponse.isAcknowledged).isTrue() - assertValidationFailure(followerClient, "leaderIndex", followerIndexName, "Value leaderIndex must be lowercase") assertValidationFailure(followerClient, "leaderindex", "followerIndex", "Value followerIndex must be lowercase") - assertValidationFailure(followerClient, "test*", followerIndexName, "Value test* must not contain the following characters") assertValidationFailure(followerClient, "test#", followerIndexName, @@ -1128,10 +934,8 @@ class StartReplicationIT: MultiClusterRestTestCase() { "Value . must not be '.' or '..'") assertValidationFailure(followerClient, "..", followerIndexName, "Value .. must not be '.' or '..'") - assertValidationFailure(followerClient, "_leader", followerIndexName, "Value _leader must not start with '_' or '-' or '+'") - assertValidationFailure(followerClient, "-leader", followerIndexName, "Value -leader must not start with '_' or '-' or '+'") assertValidationFailure(followerClient, "+leader", followerIndexName, @@ -1145,45 +949,33 @@ class StartReplicationIT: MultiClusterRestTestCase() { fun `test that replication is not started when start block is set`() { val followerClient = getClientForCluster(FOLLOWER) val leaderClient = getClientForCluster(LEADER) - createConnectionBetweenClusters(FOLLOWER, LEADER) val createIndexResponse = leaderClient.indices().create( CreateIndexRequest(leaderIndexName), RequestOptions.DEFAULT ) assertThat(createIndexResponse.isAcknowledged).isTrue() - // Setting to add replication start block followerClient.updateReplicationStartBlockSetting(true) - assertThatThrownBy { followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName), waitForRestore = true) } .isInstanceOf(ResponseException::class.java) .hasMessageContaining("[FORBIDDEN] Replication START block is set") - // Remove replication start block and start replication followerClient.updateReplicationStartBlockSetting(false) - - try { followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName), waitForRestore = true) - } finally { - followerClient.stopReplication(followerIndexName) - } } fun `test start replication invalid settings`() { val followerClient = getClientForCluster(FOLLOWER) val leaderClient = getClientForCluster(LEADER) - createConnectionBetweenClusters(FOLLOWER, LEADER) - val createIndexResponse = leaderClient.indices().create(CreateIndexRequest(leaderIndexName), RequestOptions.DEFAULT) assertThat(createIndexResponse.isAcknowledged).isTrue() val settings = Settings.builder() .put("index.data_path", "/random-path/invalid-setting") .build() - try { followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName, settings = settings)) } catch (e: ResponseException) { @@ -1195,7 +987,6 @@ class StartReplicationIT: MultiClusterRestTestCase() { fun `test that replication is not started when all primary shards are not in active state`() { val followerClient = getClientForCluster(FOLLOWER) val leaderClient = getClientForCluster(LEADER) - createConnectionBetweenClusters(FOLLOWER, LEADER) // Exclude leader cluster nodes to stop assignment for the new shards excludeAllClusterNodes(LEADER) @@ -1211,13 +1002,11 @@ class StartReplicationIT: MultiClusterRestTestCase() { assertBusy { assertThat(leaderClient.indices().exists(GetIndexRequest(leaderIndexName), RequestOptions.DEFAULT)).isEqualTo(true) } - // start repilcation should fail as the shards are not active on the leader cluster assertThatThrownBy { followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName), waitForRestore = true) } .isInstanceOf(ResponseException::class.java) .hasMessageContaining("Primary shards in the Index[source:${leaderIndexName}] are not active") - } private fun excludeAllClusterNodes(clusterName: String) { @@ -1262,10 +1051,8 @@ class StartReplicationIT: MultiClusterRestTestCase() { var getAliasesRequest = GetAliasesRequest().indices(followerIndexName) var aliasRespone = followerClient.indices().getAlias(getAliasesRequest, RequestOptions.DEFAULT) var followerAliases = aliasRespone.aliases.get(followerIndexName) - aliasRespone = leaderClient.indices().getAlias(GetAliasesRequest().indices(leaderIndexName), RequestOptions.DEFAULT) var leaderAliases = aliasRespone.aliases.get(leaderIndexName) - Assert.assertEquals(followerAliases, leaderAliases) } } diff --git a/src/test/kotlin/org/opensearch/replication/integ/rest/StopReplicationIT.kt b/src/test/kotlin/org/opensearch/replication/integ/rest/StopReplicationIT.kt index 09b797ae..abfe247c 100644 --- a/src/test/kotlin/org/opensearch/replication/integ/rest/StopReplicationIT.kt +++ b/src/test/kotlin/org/opensearch/replication/integ/rest/StopReplicationIT.kt @@ -229,7 +229,6 @@ class StopReplicationIT: MultiClusterRestTestCase() { .withFailMessage("Cant find replication block after starting replication") .isTrue() }, 10, TimeUnit.SECONDS) - // Remove leader cluster from settings val settings: Settings = Settings.builder() .putNull("cluster.remote.source.seeds") @@ -237,7 +236,6 @@ class StopReplicationIT: MultiClusterRestTestCase() { val updateSettingsRequest = ClusterUpdateSettingsRequest() updateSettingsRequest.persistentSettings(settings) followerClient.cluster().putSettings(updateSettingsRequest, RequestOptions.DEFAULT) - followerClient.stopReplication(followerIndexName) val sourceMap = mapOf("name" to randomAlphaOfLength(5)) followerClient.index(IndexRequest(followerIndexName).id("2").source(sourceMap), RequestOptions.DEFAULT) @@ -247,71 +245,54 @@ class StopReplicationIT: MultiClusterRestTestCase() { val followerClient = getClientForCluster(FOLLOWER) val leaderClient = getClientForCluster(LEADER) createConnectionBetweenClusters(FOLLOWER, LEADER, "source") - val createIndexResponse = leaderClient.indices().create(CreateIndexRequest(leaderIndexName), RequestOptions.DEFAULT) assertThat(createIndexResponse.isAcknowledged).isTrue() val snapshotSuffix = Random().nextInt(1000).toString() - - try { - followerClient.startReplication( - StartReplicationRequest("source", leaderIndexName, followerIndexName), - TimeValue.timeValueSeconds(10), - true - ) - - assertBusy({ - var statusResp = followerClient.replicationStatus(followerIndexName) - `validate status syncing response`(statusResp) - assertThat(followerClient.getShardReplicationTasks(followerIndexName)).isNotEmpty() - }, 60, TimeUnit.SECONDS) - - // Trigger snapshot on the follower cluster - val createSnapshotRequest = CreateSnapshotRequest(TestCluster.FS_SNAPSHOT_REPO, "test-$snapshotSuffix") - createSnapshotRequest.waitForCompletion(true) - followerClient.snapshot().create(createSnapshotRequest, RequestOptions.DEFAULT) - - assertBusy { - var snapshotStatusResponse = followerClient.snapshot().status(SnapshotsStatusRequest(TestCluster.FS_SNAPSHOT_REPO, - arrayOf("test-$snapshotSuffix")), RequestOptions.DEFAULT) - for (snapshotStatus in snapshotStatusResponse.snapshots) { - Assert.assertEquals(SnapshotsInProgress.State.SUCCESS, snapshotStatus.state) - } - } - - // Restore follower index on leader cluster - val restoreSnapshotRequest = RestoreSnapshotRequest(TestCluster.FS_SNAPSHOT_REPO, "test-$snapshotSuffix") - restoreSnapshotRequest.indices(followerIndexName) - restoreSnapshotRequest.waitForCompletion(true) - restoreSnapshotRequest.renamePattern("(.+)") - restoreSnapshotRequest.renameReplacement("restored-\$1") - leaderClient.snapshot().restore(restoreSnapshotRequest, RequestOptions.DEFAULT) - - assertBusy { - assertThat(leaderClient.indices().exists(GetIndexRequest("restored-$followerIndexName"), RequestOptions.DEFAULT)).isEqualTo(true) + followerClient.startReplication( + StartReplicationRequest("source", leaderIndexName, followerIndexName), + TimeValue.timeValueSeconds(10), + true + ) + assertBusy({ + var statusResp = followerClient.replicationStatus(followerIndexName) + `validate status syncing response`(statusResp) + assertThat(followerClient.getShardReplicationTasks(followerIndexName)).isNotEmpty() + }, 60, TimeUnit.SECONDS) + // Trigger snapshot on the follower cluster + val createSnapshotRequest = CreateSnapshotRequest(TestCluster.FS_SNAPSHOT_REPO, "test-$snapshotSuffix") + createSnapshotRequest.waitForCompletion(true) + followerClient.snapshot().create(createSnapshotRequest, RequestOptions.DEFAULT) + assertBusy { + var snapshotStatusResponse = followerClient.snapshot().status(SnapshotsStatusRequest(TestCluster.FS_SNAPSHOT_REPO, + arrayOf("test-$snapshotSuffix")), RequestOptions.DEFAULT) + for (snapshotStatus in snapshotStatusResponse.snapshots) { + Assert.assertEquals(SnapshotsInProgress.State.SUCCESS, snapshotStatus.state) } - - // Invoke stop on the new leader cluster index - assertThatThrownBy { leaderClient.stopReplication("restored-$followerIndexName") } - .isInstanceOf(ResponseException::class.java) - .hasMessageContaining("Metadata for restored-$followerIndexName doesn't exist") - - // Start replication on the new leader index - followerClient.startReplication( - StartReplicationRequest("source", "restored-$followerIndexName", "restored-$followerIndexName"), - TimeValue.timeValueSeconds(10), - true, true - ) - - assertBusy({ - var statusResp = followerClient.replicationStatus("restored-$followerIndexName") - `validate status syncing response`(statusResp) - assertThat(followerClient.getShardReplicationTasks("restored-$followerIndexName")).isNotEmpty() - }, 60, TimeUnit.SECONDS) - - } finally { - followerClient.stopReplication("restored-$followerIndexName") - followerClient.stopReplication(followerIndexName) } - + // Restore follower index on leader cluster + val restoreSnapshotRequest = RestoreSnapshotRequest(TestCluster.FS_SNAPSHOT_REPO, "test-$snapshotSuffix") + restoreSnapshotRequest.indices(followerIndexName) + restoreSnapshotRequest.waitForCompletion(true) + restoreSnapshotRequest.renamePattern("(.+)") + restoreSnapshotRequest.renameReplacement("restored-\$1") + leaderClient.snapshot().restore(restoreSnapshotRequest, RequestOptions.DEFAULT) + assertBusy { + assertThat(leaderClient.indices().exists(GetIndexRequest("restored-$followerIndexName"), RequestOptions.DEFAULT)).isEqualTo(true) + } + // Invoke stop on the new leader cluster index + assertThatThrownBy { leaderClient.stopReplication("restored-$followerIndexName") } + .isInstanceOf(ResponseException::class.java) + .hasMessageContaining("Metadata for restored-$followerIndexName doesn't exist") + // Start replication on the new leader index + followerClient.startReplication( + StartReplicationRequest("source", "restored-$followerIndexName", "restored-$followerIndexName"), + TimeValue.timeValueSeconds(10), + true, true + ) + assertBusy({ + var statusResp = followerClient.replicationStatus("restored-$followerIndexName") + `validate status syncing response`(statusResp) + assertThat(followerClient.getShardReplicationTasks("restored-$followerIndexName")).isNotEmpty() + }, 60, TimeUnit.SECONDS) } } diff --git a/src/test/kotlin/org/opensearch/replication/integ/rest/UpdateAutoFollowPatternIT.kt b/src/test/kotlin/org/opensearch/replication/integ/rest/UpdateAutoFollowPatternIT.kt index 82db8fb5..cb0d332e 100644 --- a/src/test/kotlin/org/opensearch/replication/integ/rest/UpdateAutoFollowPatternIT.kt +++ b/src/test/kotlin/org/opensearch/replication/integ/rest/UpdateAutoFollowPatternIT.kt @@ -68,21 +68,17 @@ class UpdateAutoFollowPatternIT: MultiClusterRestTestCase() { val leaderIndexName = createRandomIndex(leaderClient) var leaderIndexNameNew = "" createConnectionBetweenClusters(FOLLOWER, LEADER, connectionAlias) - try { followerClient.updateAutoFollowPattern(connectionAlias, indexPatternName, indexPattern) - // Verify that existing index matching the pattern are replicated. assertBusy ({ Assertions.assertThat(followerClient.indices() .exists(GetIndexRequest(leaderIndexName), RequestOptions.DEFAULT)) .isEqualTo(true) }, 30, TimeUnit.SECONDS) - assertBusy ({ Assertions.assertThat(getAutoFollowTasks(FOLLOWER).size).isEqualTo(1) }, 10, TimeUnit.SECONDS) - leaderIndexNameNew = createRandomIndex(leaderClient) // Verify that newly created index on leader which match the pattern are also replicated. assertBusy ({ @@ -102,8 +98,6 @@ class UpdateAutoFollowPatternIT: MultiClusterRestTestCase() { }, 60, TimeUnit.SECONDS) } finally { followerClient.deleteAutoFollowPattern(connectionAlias, indexPatternName) - followerClient.stopReplication(leaderIndexName, false) - followerClient.stopReplication(leaderIndexNameNew) } } @@ -112,20 +106,16 @@ class UpdateAutoFollowPatternIT: MultiClusterRestTestCase() { val leaderClient = getClientForCluster(LEADER) var leaderIndexNameNew = "" createConnectionBetweenClusters(FOLLOWER, LEADER, connectionAlias) - try { // Set poll duration to 30sec from 60sec (default) val settings = Settings.builder().put(ReplicationPlugin.REPLICATION_AUTOFOLLOW_REMOTE_INDICES_POLL_INTERVAL.key, TimeValue.timeValueSeconds(30)) val clusterUpdateSetttingsReq = ClusterUpdateSettingsRequest().persistentSettings(settings) val clusterUpdateResponse = followerClient.cluster().putSettings(clusterUpdateSetttingsReq, RequestOptions.DEFAULT) - var lastExecutionTime = 0L var stats = followerClient.AutoFollowStats() - Assert.assertTrue(clusterUpdateResponse.isAcknowledged) followerClient.updateAutoFollowPattern(connectionAlias, indexPatternName, indexPattern) - leaderIndexNameNew = createRandomIndex(leaderClient) // Verify that newly created index on leader which match the pattern are also replicated. assertBusy({ @@ -140,9 +130,7 @@ class UpdateAutoFollowPatternIT: MultiClusterRestTestCase() { lastExecutionTime = key["last_execution_time"]!! as Long } } - }, 30, TimeUnit.SECONDS) - assertBusy({ var af_stats = stats.get("autofollow_stats")!! as ArrayList> for (key in af_stats) { @@ -151,11 +139,8 @@ class UpdateAutoFollowPatternIT: MultiClusterRestTestCase() { } } }, 40, TimeUnit.SECONDS) - - } finally { followerClient.deleteAutoFollowPattern(connectionAlias, indexPatternName) - followerClient.stopReplication(leaderIndexNameNew) } } @@ -165,14 +150,11 @@ class UpdateAutoFollowPatternIT: MultiClusterRestTestCase() { val leaderClient = getClientForCluster(LEADER) val leaderIndexName = createRandomIndex(leaderClient) createConnectionBetweenClusters(FOLLOWER, LEADER, connectionAlias) - try { val settings = Settings.builder() .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 3) .build() - followerClient.updateAutoFollowPattern(connectionAlias, indexPatternName, indexPattern, settings) - // Verify that existing index matching the pattern are replicated. assertBusy ({ Assertions.assertThat(followerClient.indices() @@ -180,8 +162,6 @@ class UpdateAutoFollowPatternIT: MultiClusterRestTestCase() { .isEqualTo(true) }, 30, TimeUnit.SECONDS) Assertions.assertThat(getAutoFollowTasks(FOLLOWER).size).isEqualTo(1) - - val getSettingsRequest = GetSettingsRequest() getSettingsRequest.indices(leaderIndexName) getSettingsRequest.includeDefaults(true) @@ -196,7 +176,6 @@ class UpdateAutoFollowPatternIT: MultiClusterRestTestCase() { }, 15, TimeUnit.SECONDS) } finally { followerClient.deleteAutoFollowPattern(connectionAlias, indexPatternName) - followerClient.stopReplication(leaderIndexName) } } @@ -204,27 +183,22 @@ class UpdateAutoFollowPatternIT: MultiClusterRestTestCase() { val indexPatternName2 = "test_pattern2" val indexPattern2 = "lead_index*" val leaderIndexName2 = "lead_index1" - val followerClient = getClientForCluster(FOLLOWER) val leaderClient = getClientForCluster(LEADER) val leaderIndexName = createRandomIndex(leaderClient) leaderClient.indices().create(CreateIndexRequest(leaderIndexName2), RequestOptions.DEFAULT) createConnectionBetweenClusters(FOLLOWER, LEADER, connectionAlias) - try { followerClient.updateAutoFollowPattern(connectionAlias, indexPatternName, indexPattern) followerClient.updateAutoFollowPattern(connectionAlias, indexPatternName2, indexPattern2) - assertBusy ({ Assertions.assertThat(getAutoFollowTasks(FOLLOWER).size).isEqualTo(2) }, 30, TimeUnit.SECONDS) - // Verify that existing index matching the pattern are replicated. assertBusy ({ Assertions.assertThat(followerClient.indices() .exists(GetIndexRequest(leaderIndexName2), RequestOptions.DEFAULT)) .isEqualTo(true) - var stats = followerClient.AutoFollowStats() Assertions.assertThat(stats.size).isEqualTo(5) assert(stats["num_success_start_replication"]!! as Int == 2) @@ -238,9 +212,7 @@ class UpdateAutoFollowPatternIT: MultiClusterRestTestCase() { followerClient.deleteAutoFollowPattern(connectionAlias, indexPatternName) followerClient.deleteAutoFollowPattern(connectionAlias, indexPatternName2) followerClient.waitForShardTaskStart(leaderIndexName) - followerClient.stopReplication(leaderIndexName) followerClient.waitForShardTaskStart(leaderIndexName2) - followerClient.stopReplication(leaderIndexName2) } } @@ -249,35 +221,26 @@ class UpdateAutoFollowPatternIT: MultiClusterRestTestCase() { val leaderClient = getClientForCluster(LEADER) val leaderIndexName = createRandomIndex(leaderClient) createConnectionBetweenClusters(FOLLOWER, LEADER, connectionAlias) - + followerClient.startReplication(StartReplicationRequest(connectionAlias, leaderIndexName, leaderIndexName), + TimeValue.timeValueSeconds(10),true, waitForRestore = true) + assertBusy({ + Assertions.assertThat(followerClient.indices() + .exists(GetIndexRequest(leaderIndexName), RequestOptions.DEFAULT)) + .isEqualTo(true) + }, 30, TimeUnit.SECONDS) + // Assert that there is no auto follow task & one index replication task + Assertions.assertThat(getAutoFollowTasks(FOLLOWER).size).isEqualTo(0) + Assertions.assertThat(getIndexReplicationTasks(FOLLOWER).size).isEqualTo(1) try { - followerClient.startReplication(StartReplicationRequest(connectionAlias, leaderIndexName, leaderIndexName), - TimeValue.timeValueSeconds(10),true, waitForRestore = true) - + followerClient.updateAutoFollowPattern(connectionAlias, indexPatternName, indexPattern) assertBusy({ - Assertions.assertThat(followerClient.indices() - .exists(GetIndexRequest(leaderIndexName), RequestOptions.DEFAULT)) - .isEqualTo(true) - }, 30, TimeUnit.SECONDS) - - // Assert that there is no auto follow task & one index replication task - Assertions.assertThat(getAutoFollowTasks(FOLLOWER).size).isEqualTo(0) - Assertions.assertThat(getIndexReplicationTasks(FOLLOWER).size).isEqualTo(1) - - try { - followerClient.updateAutoFollowPattern(connectionAlias, indexPatternName, indexPattern) - - assertBusy({ - // Assert that there is still only one index replication task - Assertions.assertThat(getAutoFollowTasks(FOLLOWER).size).isEqualTo(1) - Assertions.assertThat(getIndexReplicationTasks(FOLLOWER).size).isEqualTo(1) - followerClient.waitForShardTaskStart(leaderIndexName, waitForShardTask) - },30, TimeUnit.SECONDS) - } finally { - followerClient.deleteAutoFollowPattern(connectionAlias, indexPatternName) - } + // Assert that there is still only one index replication task + Assertions.assertThat(getAutoFollowTasks(FOLLOWER).size).isEqualTo(1) + Assertions.assertThat(getIndexReplicationTasks(FOLLOWER).size).isEqualTo(1) + followerClient.waitForShardTaskStart(leaderIndexName, waitForShardTask) + },30, TimeUnit.SECONDS) } finally { - followerClient.stopReplication(leaderIndexName) + followerClient.deleteAutoFollowPattern(connectionAlias, indexPatternName) } } @@ -293,7 +256,6 @@ class UpdateAutoFollowPatternIT: MultiClusterRestTestCase() { fun `test auto follow should fail on pattern name validation failure`() { val followerClient = getClientForCluster(FOLLOWER) createConnectionBetweenClusters(FOLLOWER, LEADER, connectionAlias) - assertPatternNameValidation(followerClient, "testPattern", "Value testPattern must be lowercase") assertPatternNameValidation(followerClient, "testPattern*", @@ -306,10 +268,8 @@ class UpdateAutoFollowPatternIT: MultiClusterRestTestCase() { "Value . must not be '.' or '..'") assertPatternNameValidation(followerClient, "..", "Value .. must not be '.' or '..'") - assertPatternNameValidation(followerClient, "_leader", "Value _leader must not start with '_' or '-' or '+'") - assertPatternNameValidation(followerClient, "-leader", "Value -leader must not start with '_' or '-' or '+'") assertPatternNameValidation(followerClient, "+leader", @@ -332,9 +292,7 @@ class UpdateAutoFollowPatternIT: MultiClusterRestTestCase() { val followerClient = getClientForCluster(FOLLOWER) val leaderClient = getClientForCluster(LEADER) createConnectionBetweenClusters(FOLLOWER, LEADER, connectionAlias) - val leaderIndexName = createRandomIndex(leaderClient) - try { followerClient.updateAutoFollowPattern(connectionAlias, indexPatternName, indexPattern) @@ -344,52 +302,42 @@ class UpdateAutoFollowPatternIT: MultiClusterRestTestCase() { .exists(GetIndexRequest(leaderIndexName), RequestOptions.DEFAULT)) .isEqualTo(true) } - Assertions.assertThat(getAutoFollowTasks(FOLLOWER).size).isEqualTo(1) Assertions.assertThat(getIndexReplicationTasks(FOLLOWER).size).isEqualTo(1) followerClient.waitForShardTaskStart(leaderIndexName, waitForShardTask) } finally { followerClient.deleteAutoFollowPattern(connectionAlias, indexPatternName) } - // Verify that auto follow tasks is stopped but the shard replication task remains. assertBusy ({ Assertions.assertThat(getAutoFollowTasks(FOLLOWER).size).isEqualTo(0) }, 30, TimeUnit.SECONDS) - Assertions.assertThat(getIndexReplicationTasks(FOLLOWER).size).isEqualTo(1) - followerClient.stopReplication(leaderIndexName) } fun `test autofollow task with start replication block`() { val followerClient = getClientForCluster(FOLLOWER) val leaderClient = getClientForCluster(LEADER) createConnectionBetweenClusters(FOLLOWER, LEADER, connectionAlias) - val leaderIndexName = createRandomIndex(leaderClient) try { - // Add replication start block followerClient.updateReplicationStartBlockSetting(true) followerClient.updateAutoFollowPattern(connectionAlias, indexPatternName, indexPattern) sleep(30000) // Default poll for auto follow in worst case - // verify both index replication tasks and autofollow tasks // Replication shouldn't have been started - 0 tasks // Autofollow task should still be up - 1 task Assertions.assertThat(getIndexReplicationTasks(FOLLOWER).size).isEqualTo(0) Assertions.assertThat(getAutoFollowTasks(FOLLOWER).size).isEqualTo(1) - // Remove replication start block followerClient.updateReplicationStartBlockSetting(false) sleep(45000) // poll for auto follow in worst case - // Index should be replicated and autofollow task should be present Assertions.assertThat(getIndexReplicationTasks(FOLLOWER).size).isEqualTo(1) Assertions.assertThat(getAutoFollowTasks(FOLLOWER).size).isEqualTo(1) } finally { followerClient.deleteAutoFollowPattern(connectionAlias, indexPatternName) - followerClient.stopReplication(leaderIndexName) } } @@ -397,38 +345,29 @@ class UpdateAutoFollowPatternIT: MultiClusterRestTestCase() { val followerClient = getClientForCluster(FOLLOWER) val leaderClient = getClientForCluster(LEADER) createConnectionBetweenClusters(FOLLOWER, LEADER, connectionAlias) - // create two leader indices and test autofollow to trigger to trigger jobs based on setting val leaderIndexName1 = createRandomIndex(leaderClient) val leaderIndexName2 = createRandomIndex(leaderClient) - followerClient.updateAutoFollowConcurrentStartReplicationJobSetting(2) try { followerClient.updateAutoFollowPattern(connectionAlias, indexPatternName, indexPattern) - // Verify that existing index matching the pattern are replicated. assertBusy { Assertions.assertThat(followerClient.indices() .exists(GetIndexRequest(leaderIndexName1), RequestOptions.DEFAULT)) .isEqualTo(true) } - assertBusy { Assertions.assertThat(followerClient.indices() .exists(GetIndexRequest(leaderIndexName2), RequestOptions.DEFAULT)) .isEqualTo(true) } - sleep(30000) // Default poll for auto follow in worst case - Assertions.assertThat(getAutoFollowTasks(FOLLOWER).size).isEqualTo(1) - } finally { // Reset default autofollow setting followerClient.updateAutoFollowConcurrentStartReplicationJobSetting(null) followerClient.deleteAutoFollowPattern(connectionAlias, indexPatternName) - followerClient.stopReplication(leaderIndexName1) - followerClient.stopReplication(leaderIndexName2) } } @@ -436,37 +375,28 @@ class UpdateAutoFollowPatternIT: MultiClusterRestTestCase() { val followerClient = getClientForCluster(FOLLOWER) val leaderClient = getClientForCluster(LEADER) createConnectionBetweenClusters(FOLLOWER, LEADER, connectionAlias) - // create two leader indices and test autofollow to trigger to trigger jobs based on setting val leaderIndexName1 = createRandomIndex(leaderClient) val leaderIndexName2 = createRandomIndex(leaderClient) - followerClient.updateAutoFollowConcurrentStartReplicationJobSetting(1) try { followerClient.updateAutoFollowPattern(connectionAlias, indexPatternName, indexPattern) - // Verify that existing index matching the pattern are replicated. assertBusy { // check that the index replication task is created for only index Assertions.assertThat(getIndexReplicationTasks(FOLLOWER).size).isEqualTo(1) } - sleep(30000) // Default poll for auto follow in worst case - assertBusy { // check that the index replication task is created for only index Assertions.assertThat(getIndexReplicationTasks(FOLLOWER).size).isEqualTo(2) } - sleep(30000) // Default poll for auto follow in worst case Assertions.assertThat(getAutoFollowTasks(FOLLOWER).size).isEqualTo(1) - } finally { // Reset default autofollow setting followerClient.updateAutoFollowConcurrentStartReplicationJobSetting(null) followerClient.deleteAutoFollowPattern(connectionAlias, indexPatternName) - followerClient.stopReplication(leaderIndexName1) - followerClient.stopReplication(leaderIndexName2) } } diff --git a/src/test/kotlin/org/opensearch/replication/task/TaskCancellationIT.kt b/src/test/kotlin/org/opensearch/replication/task/TaskCancellationIT.kt index 586290dc..5bbfef7e 100644 --- a/src/test/kotlin/org/opensearch/replication/task/TaskCancellationIT.kt +++ b/src/test/kotlin/org/opensearch/replication/task/TaskCancellationIT.kt @@ -45,68 +45,52 @@ class TaskCancellationIT : MultiClusterRestTestCase() { val followerClient = getClientForCluster(FOLLOWER) val leaderClient = getClientForCluster(LEADER) val primaryShards = 3 - createConnectionBetweenClusters(FOLLOWER, LEADER) - val createIndexResponse = leaderClient.indices().create( CreateIndexRequest(leaderIndexName).settings(Settings.builder().put("index.number_of_shards", primaryShards).build()), RequestOptions.DEFAULT) assertThat(createIndexResponse.isAcknowledged).isTrue() - try { - followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName)) - // Wait for Shard tasks to come up. - var tasks = Collections.emptyList() - assertBusy { - tasks = followerClient.getShardReplicationTasks(followerIndexName) - Assert.assertEquals(tasks.size, primaryShards) - } - - // Cancel one shard task - val cancelTasksRequest = CancelTasksRequest.Builder().withTaskId(TaskId(tasks[0])). - withWaitForCompletion(true).build() - followerClient.tasks().cancel(cancelTasksRequest, RequestOptions.DEFAULT) - - // Verify that replication is continuing and the shards tasks are up and running - assertBusy { - Assert.assertEquals(followerClient.getShardReplicationTasks(followerIndexName).size, primaryShards) - assertThat(followerClient.getIndexReplicationTask(followerIndexName).isNotBlank()).isTrue() - `validate status due shard task cancellation`(followerClient.replicationStatus(followerIndexName)) - } - } finally { - followerClient.stopReplication(followerIndexName) + followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName)) + // Wait for Shard tasks to come up. + var tasks = Collections.emptyList() + assertBusy { + tasks = followerClient.getShardReplicationTasks(followerIndexName) + Assert.assertEquals(tasks.size, primaryShards) + } + // Cancel one shard task + val cancelTasksRequest = CancelTasksRequest.Builder().withTaskId(TaskId(tasks[0])). + withWaitForCompletion(true).build() + followerClient.tasks().cancel(cancelTasksRequest, RequestOptions.DEFAULT) + // Verify that replication is continuing and the shards tasks are up and running + assertBusy { + Assert.assertEquals(followerClient.getShardReplicationTasks(followerIndexName).size, primaryShards) + assertThat(followerClient.getIndexReplicationTask(followerIndexName).isNotBlank()).isTrue() + `validate status due shard task cancellation`(followerClient.replicationStatus(followerIndexName)) } } fun `test user triggering cancel on an index task`() { val followerClient = getClientForCluster(FOLLOWER) val leaderClient = getClientForCluster(LEADER) - createConnectionBetweenClusters(FOLLOWER, LEADER) - val createIndexResponse = leaderClient.indices().create(CreateIndexRequest(leaderIndexName), RequestOptions.DEFAULT) assertThat(createIndexResponse.isAcknowledged).isTrue() - try { followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName)) // Wait for Shard tasks to come up. assertBusy { assertThat(followerClient.getShardReplicationTasks(followerIndexName).isEmpty()).isEqualTo(false) } - // Cancel the index replication task var task = followerClient.getIndexReplicationTask(followerIndexName) assertThat(task.isNullOrBlank()).isFalse() val cancelTasksRequest = CancelTasksRequest.Builder().withTaskId(TaskId(task)). withWaitForCompletion(true).build() followerClient.tasks().cancel(cancelTasksRequest, RequestOptions.DEFAULT) - // Verify that replication has paused. assertBusy { assertThat(followerClient.getShardReplicationTasks(followerIndexName).isEmpty()).isTrue() assertThat(followerClient.getIndexReplicationTask(followerIndexName).isNullOrBlank()).isTrue() `validate status due index task cancellation`(followerClient.replicationStatus(followerIndexName)) } - } finally { - followerClient.stopReplication(followerIndexName) - } } } diff --git a/src/test/kotlin/org/opensearch/replication/task/shard/TransportReplayChangesActionIT.kt b/src/test/kotlin/org/opensearch/replication/task/shard/TransportReplayChangesActionIT.kt index 8df58e23..58a56112 100644 --- a/src/test/kotlin/org/opensearch/replication/task/shard/TransportReplayChangesActionIT.kt +++ b/src/test/kotlin/org/opensearch/replication/task/shard/TransportReplayChangesActionIT.kt @@ -36,60 +36,48 @@ class TransportReplayChangesActionIT : MultiClusterRestTestCase() { // Create a leader/follower index val leaderIndex = randomAlphaOfLength(10).toLowerCase(Locale.ROOT) val followerIndex = randomAlphaOfLength(10).toLowerCase(Locale.ROOT) - - try { - val doc1 = mapOf("name" to randomAlphaOfLength(20)) - // Create Leader Index - val response = leader.index(IndexRequest(leaderIndex).id("1").source(doc1), RequestOptions.DEFAULT) - Assertions.assertThat(response.result) - .withFailMessage("Failed to create leader data").isEqualTo(DocWriteResponse.Result.CREATED) - - // Setup Mapping on leader - var putMappingRequest = PutMappingRequest(leaderIndex) - putMappingRequest.source( - "{\"dynamic\":\"strict\",\"properties\":{\"name\":{\"type\":\"text\"}}}", - XContentType.JSON - ) - leader.indices().putMapping(putMappingRequest, RequestOptions.DEFAULT) - - // Start replication - follower.startReplication( - StartReplicationRequest("source", leaderIndex, followerIndex), - waitForRestore = true - ) - assertBusy { - val getResponse = follower.get(GetRequest(followerIndex, "1"), RequestOptions.DEFAULT) - Assertions.assertThat(getResponse.isExists).isTrue() - Assertions.assertThat(getResponse.sourceAsMap).isEqualTo(doc1) - } - - // Add a new field in mapping. - putMappingRequest = PutMappingRequest(leaderIndex) - putMappingRequest.source( - "{\"dynamic\":\"strict\",\"properties\":{\"name\":{\"type\":\"text\"},\"place\":{\"type\":\"text\"}}}", - XContentType.JSON - ) - leader.indices().putMapping(putMappingRequest, RequestOptions.DEFAULT) - - // Ingest a doc on the leader - val doc2 = mapOf("name" to randomAlphaOfLength(5), "place" to randomAlphaOfLength(5)) - leader.index(IndexRequest(leaderIndex).id("2").source(doc2), RequestOptions.DEFAULT) - - // Verify that replication is working as expected. - assertBusy ({ - Assert.assertEquals(leader.count(CountRequest(leaderIndex), RequestOptions.DEFAULT).toString(), - follower.count(CountRequest(followerIndex), RequestOptions.DEFAULT).toString()) - `validate status syncing response`(follower.replicationStatus(followerIndex)) - val getResponse = follower.get(GetRequest(followerIndex, "2"), RequestOptions.DEFAULT) - Assertions.assertThat(getResponse.isExists).isTrue() - Assertions.assertThat(getResponse.sourceAsMap).isEqualTo(doc2) - }, - 30, TimeUnit.SECONDS - ) - - } finally { - follower.stopReplication(followerIndex) + val doc1 = mapOf("name" to randomAlphaOfLength(20)) + // Create Leader Index + val response = leader.index(IndexRequest(leaderIndex).id("1").source(doc1), RequestOptions.DEFAULT) + Assertions.assertThat(response.result) + .withFailMessage("Failed to create leader data").isEqualTo(DocWriteResponse.Result.CREATED) + // Setup Mapping on leader + var putMappingRequest = PutMappingRequest(leaderIndex) + putMappingRequest.source( + "{\"dynamic\":\"strict\",\"properties\":{\"name\":{\"type\":\"text\"}}}", + XContentType.JSON + ) + leader.indices().putMapping(putMappingRequest, RequestOptions.DEFAULT) + // Start replication + follower.startReplication( + StartReplicationRequest("source", leaderIndex, followerIndex), + waitForRestore = true + ) + assertBusy { + val getResponse = follower.get(GetRequest(followerIndex, "1"), RequestOptions.DEFAULT) + Assertions.assertThat(getResponse.isExists).isTrue() + Assertions.assertThat(getResponse.sourceAsMap).isEqualTo(doc1) } - + // Add a new field in mapping. + putMappingRequest = PutMappingRequest(leaderIndex) + putMappingRequest.source( + "{\"dynamic\":\"strict\",\"properties\":{\"name\":{\"type\":\"text\"},\"place\":{\"type\":\"text\"}}}", + XContentType.JSON + ) + leader.indices().putMapping(putMappingRequest, RequestOptions.DEFAULT) + // Ingest a doc on the leader + val doc2 = mapOf("name" to randomAlphaOfLength(5), "place" to randomAlphaOfLength(5)) + leader.index(IndexRequest(leaderIndex).id("2").source(doc2), RequestOptions.DEFAULT) + // Verify that replication is working as expected. + assertBusy ({ + Assert.assertEquals(leader.count(CountRequest(leaderIndex), RequestOptions.DEFAULT).toString(), + follower.count(CountRequest(followerIndex), RequestOptions.DEFAULT).toString()) + `validate status syncing response`(follower.replicationStatus(followerIndex)) + val getResponse = follower.get(GetRequest(followerIndex, "2"), RequestOptions.DEFAULT) + Assertions.assertThat(getResponse.isExists).isTrue() + Assertions.assertThat(getResponse.sourceAsMap).isEqualTo(doc2) + }, + 30, TimeUnit.SECONDS + ) } } \ No newline at end of file From e3d0868107ccaed8ff7488483d6b435ae62e0ec7 Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Fri, 9 Dec 2022 12:27:57 +0530 Subject: [PATCH 23/84] Increment version to 2.5.0-SNAPSHOT (#621) Signed-off-by: opensearch-ci-bot Signed-off-by: opensearch-ci-bot Co-authored-by: opensearch-ci-bot --- build.gradle | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build.gradle b/build.gradle index 6b1428e5..776968e6 100644 --- a/build.gradle +++ b/build.gradle @@ -36,7 +36,7 @@ import org.opensearch.gradle.test.RestIntegTestTask buildscript { ext { isSnapshot = "true" == System.getProperty("build.snapshot", "true") - opensearch_version = System.getProperty("opensearch.version", "2.4.0-SNAPSHOT") + opensearch_version = System.getProperty("opensearch.version", "2.5.0-SNAPSHOT") buildVersionQualifier = System.getProperty("build.version_qualifier", "") // e.g. 2.0.0-rc1-SNAPSHOT -> 2.0.0.0-rc1-SNAPSHOT version_tokens = opensearch_version.tokenize('-') From 93fe3fc608ad8daca881a6167f9135ac46bd7793 Mon Sep 17 00:00:00 2001 From: sricharanvuppu <113983630+sricharanvuppu@users.noreply.github.com> Date: Tue, 27 Dec 2022 14:06:07 +0530 Subject: [PATCH 24/84] stopping replication before clean up of indices (#635) Signed-off-by: sricharanvuppu --- .../org/opensearch/replication/BasicReplicationIT.kt | 1 - .../replication/MultiClusterRestTestCase.kt | 12 +++++++----- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/src/test/kotlin/org/opensearch/replication/BasicReplicationIT.kt b/src/test/kotlin/org/opensearch/replication/BasicReplicationIT.kt index 304f88f6..ff3bfa83 100644 --- a/src/test/kotlin/org/opensearch/replication/BasicReplicationIT.kt +++ b/src/test/kotlin/org/opensearch/replication/BasicReplicationIT.kt @@ -49,7 +49,6 @@ class BasicReplicationIT : MultiClusterRestTestCase() { // Create an empty index on the leader and trigger replication on it val createIndexResponse = leader.indices().create(CreateIndexRequest(leaderIndex), RequestOptions.DEFAULT) assertThat(createIndexResponse.isAcknowledged).isTrue() - follower.startReplication(StartReplicationRequest("source", leaderIndex, followerIndex), waitForRestore=true) val source = mapOf("name" to randomAlphaOfLength(20), "age" to randomInt().toString()) var response = leader.index(IndexRequest(leaderIndex).id("1").source(source), RequestOptions.DEFAULT) diff --git a/src/test/kotlin/org/opensearch/replication/MultiClusterRestTestCase.kt b/src/test/kotlin/org/opensearch/replication/MultiClusterRestTestCase.kt index 36fbd3fd..982cc6dc 100644 --- a/src/test/kotlin/org/opensearch/replication/MultiClusterRestTestCase.kt +++ b/src/test/kotlin/org/opensearch/replication/MultiClusterRestTestCase.kt @@ -57,6 +57,7 @@ import org.junit.After import org.junit.AfterClass import org.junit.Before import org.junit.BeforeClass +import org.opensearch.index.mapper.ObjectMapper import java.nio.file.Files import java.security.KeyManagementException import java.security.KeyStore @@ -420,9 +421,7 @@ abstract class MultiClusterRestTestCase : OpenSearchTestCase() { testCluster.lowLevelClient.performRequest(request) } } - - protected fun wipeIndicesFromCluster(testCluster: TestCluster) { - + private fun stopAllReplicationJobs(testCluster: TestCluster) { val indicesResponse = testCluster.lowLevelClient.performRequest((Request("GET","/_cat/indices/*,-.*?format=json&pretty"))) val indicesResponseEntity = EntityUtils.toString(indicesResponse.entity) var parser = XContentType.JSON.xContent().createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, indicesResponseEntity) @@ -433,18 +432,21 @@ abstract class MultiClusterRestTestCase : OpenSearchTestCase() { key to value } val ind = map.get("index") - try { + try { val stopRequest = Request("POST","/_plugins/_replication/" + ind.toString() + "/_stop") stopRequest.setJsonEntity("{}") stopRequest.setOptions(RequestOptions.DEFAULT) val response=testCluster.lowLevelClient.performRequest(stopRequest) - } + } catch (e:ResponseException){ if(e.response.statusLine.statusCode!=400) { throw e } } } + } + protected fun wipeIndicesFromCluster(testCluster: TestCluster) { + stopAllReplicationJobs(testCluster) try { val deleteRequest = Request("DELETE", "*,-.*") // All except system indices val response = testCluster.lowLevelClient.performRequest(deleteRequest) From 475d8da7456fca22c0f022027b8802a14ddb0150 Mon Sep 17 00:00:00 2001 From: sricharanvuppu <113983630+sricharanvuppu@users.noreply.github.com> Date: Tue, 10 Jan 2023 14:50:59 +0530 Subject: [PATCH 25/84] Updating multi-field mapping at follower (#671) (#680) * Updating multi-field mapping at follower Signed-off-by: sricharanvuppu Signed-off-by: sricharanvuppu (cherry picked from commit 4d12d6cfb17e2dbac02fdee815aff9bc95062b0a) --- .../task/index/IndexReplicationTask.kt | 34 +++++++++ .../integ/rest/StartReplicationIT.kt | 76 +++++++++++++++++++ 2 files changed, 110 insertions(+) diff --git a/src/main/kotlin/org/opensearch/replication/task/index/IndexReplicationTask.kt b/src/main/kotlin/org/opensearch/replication/task/index/IndexReplicationTask.kt index 906312ac..b4662aa3 100644 --- a/src/main/kotlin/org/opensearch/replication/task/index/IndexReplicationTask.kt +++ b/src/main/kotlin/org/opensearch/replication/task/index/IndexReplicationTask.kt @@ -55,6 +55,9 @@ import org.opensearch.action.admin.indices.alias.get.GetAliasesRequest import org.opensearch.action.admin.indices.delete.DeleteIndexRequest import org.opensearch.action.admin.indices.settings.get.GetSettingsRequest import org.opensearch.action.admin.indices.settings.put.UpdateSettingsRequest +import org.opensearch.action.admin.indices.mapping.get.GetMappingsRequest +import org.opensearch.action.admin.indices.mapping.put.PutMappingRequest +import org.opensearch.action.support.IndicesOptions import org.opensearch.client.Client import org.opensearch.client.Requests import org.opensearch.cluster.ClusterChangedEvent @@ -75,6 +78,7 @@ import org.opensearch.common.unit.ByteSizeValue import org.opensearch.common.xcontent.ToXContent import org.opensearch.common.xcontent.ToXContentObject import org.opensearch.common.xcontent.XContentBuilder +import org.opensearch.common.xcontent.XContentType import org.opensearch.index.Index import org.opensearch.index.IndexService import org.opensearch.index.IndexSettings @@ -88,6 +92,7 @@ import org.opensearch.persistent.PersistentTasksCustomMetadata.PersistentTask import org.opensearch.persistent.PersistentTasksNodeService import org.opensearch.persistent.PersistentTasksService import org.opensearch.replication.ReplicationException +import org.opensearch.replication.MappingNotAvailableException import org.opensearch.replication.ReplicationPlugin.Companion.REPLICATION_INDEX_TRANSLOG_PRUNING_ENABLED_SETTING import org.opensearch.rest.RestStatus import org.opensearch.tasks.TaskId @@ -100,6 +105,7 @@ import kotlin.coroutines.resume import kotlin.coroutines.resumeWithException import kotlin.coroutines.suspendCoroutine import kotlin.streams.toList +import org.opensearch.cluster.DiffableUtils open class IndexReplicationTask(id: Long, type: String, action: String, description: String, parentTask: TaskId, @@ -395,6 +401,19 @@ open class IndexReplicationTask(id: Long, type: String, action: String, descript } } + private suspend fun UpdateFollowereMapping(followerIndex: String,mappingSource: String) { + + val options = IndicesOptions.strictSingleIndexNoExpandForbidClosed() + if (null == mappingSource) { + throw MappingNotAvailableException("MappingSource is not available") + } + val putMappingRequest = PutMappingRequest().indices(followerIndex).indicesOptions(options) + .source(mappingSource, XContentType.JSON) + val updateMappingRequest = UpdateMetadataRequest(followerIndex, UpdateMetadataRequest.Type.MAPPING, putMappingRequest) + client.suspendExecute(UpdateMetadataAction.INSTANCE, updateMappingRequest, injectSecurityContext = true) + log.debug("Mappings synced for $followerIndex") + } + private suspend fun pollForMetadata(scope: CoroutineScope) { while (scope.isActive) { try { @@ -535,6 +554,21 @@ open class IndexReplicationTask(id: Long, type: String, action: String, descript } else { metadataUpdate = null } + val options = IndicesOptions.strictSingleIndexNoExpandForbidClosed() + var gmr = GetMappingsRequest().indices(this.leaderIndex.name).indicesOptions(options) + var mappingResponse = remoteClient.suspending(remoteClient.admin().indices()::getMappings, injectSecurityContext = true)(gmr) + var leaderMappingSource = mappingResponse.mappings.get(this.leaderIndex.name).source().toString() + val leaderProperties = mappingResponse.mappings().get(this.leaderIndex.name).sourceAsMap().toMap().get("properties") as Map + gmr = GetMappingsRequest().indices(this.followerIndexName).indicesOptions(options) + mappingResponse = client.suspending(client.admin().indices()::getMappings, injectSecurityContext = true)(gmr) + val followerProperties = mappingResponse.mappings().get(this.followerIndexName).sourceAsMap().toMap().get("properties") as Map + for(iter in followerProperties) { + if(leaderProperties.containsKey(iter.key) && leaderProperties.getValue(iter.key).toString()!=(iter.value).toString()){ + log.info("Updating Multi-field Mapping at Follower") + UpdateFollowereMapping(this.followerIndexName,leaderMappingSource) + break; + } + } } catch (e: Exception) { log.error("Error in getting the required metadata ${e.stackTraceToString()}") diff --git a/src/test/kotlin/org/opensearch/replication/integ/rest/StartReplicationIT.kt b/src/test/kotlin/org/opensearch/replication/integ/rest/StartReplicationIT.kt index 6946303c..01f280e0 100644 --- a/src/test/kotlin/org/opensearch/replication/integ/rest/StartReplicationIT.kt +++ b/src/test/kotlin/org/opensearch/replication/integ/rest/StartReplicationIT.kt @@ -12,6 +12,7 @@ package org.opensearch.replication.integ.rest +import kotlinx.coroutines.delay import org.opensearch.replication.IndexUtil import org.opensearch.replication.MultiClusterAnnotations import org.opensearch.replication.MultiClusterRestTestCase @@ -77,6 +78,7 @@ import java.util.* import java.util.concurrent.TimeUnit + @MultiClusterAnnotations.ClusterConfigurations( MultiClusterAnnotations.ClusterConfiguration(clusterName = LEADER), MultiClusterAnnotations.ClusterConfiguration(clusterName = FOLLOWER) @@ -1009,6 +1011,80 @@ class StartReplicationIT: MultiClusterRestTestCase() { .hasMessageContaining("Primary shards in the Index[source:${leaderIndexName}] are not active") } + fun `test that follower index mapping updates when leader index gets multi-field mapping`() { + val followerClient = getClientForCluster(FOLLOWER) + val leaderClient = getClientForCluster(LEADER) + createConnectionBetweenClusters(FOLLOWER, LEADER) + val createIndexResponse = leaderClient.indices().create(CreateIndexRequest(leaderIndexName), RequestOptions.DEFAULT) + assertThat(createIndexResponse.isAcknowledged).isTrue() + var putMappingRequest = PutMappingRequest(leaderIndexName) + putMappingRequest.source("{\"properties\":{\"field1\":{\"type\":\"text\"}}}", XContentType.JSON) + leaderClient.indices().putMapping(putMappingRequest, RequestOptions.DEFAULT) + val sourceMap = mapOf("field1" to randomAlphaOfLength(5)) + leaderClient.index(IndexRequest(leaderIndexName).id("1").source(sourceMap), RequestOptions.DEFAULT) + followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName), + waitForRestore = true) + assertBusy { + assertThat(followerClient.indices() + .exists(GetIndexRequest(followerIndexName), RequestOptions.DEFAULT)) + .isEqualTo(true) + } + Assert.assertEquals( + leaderClient.indices().getMapping(GetMappingsRequest().indices(leaderIndexName), RequestOptions.DEFAULT) + .mappings()[leaderIndexName], + followerClient.indices().getMapping(GetMappingsRequest().indices(followerIndexName), RequestOptions.DEFAULT) + .mappings()[followerIndexName] + ) + putMappingRequest = PutMappingRequest(leaderIndexName) + putMappingRequest.source("{\"properties\":{\"field1\":{\"type\":\"text\",\"fields\":{\"field2\":{\"type\":\"text\",\"analyzer\":\"standard\"},\"field3\":{\"type\":\"text\",\"analyzer\":\"standard\"}}}}}",XContentType.JSON) + leaderClient.indices().putMapping(putMappingRequest, RequestOptions.DEFAULT) + val leaderMappings = leaderClient.indices().getMapping(GetMappingsRequest().indices(leaderIndexName), RequestOptions.DEFAULT) + .mappings()[leaderIndexName] + TimeUnit.MINUTES.sleep(2) + Assert.assertEquals( + leaderMappings, + followerClient.indices().getMapping(GetMappingsRequest().indices(followerIndexName), RequestOptions.DEFAULT) + .mappings()[followerIndexName] + ) + } + + fun `test that follower index mapping does not update when only new fields are added but not respective docs in leader index`() { + val followerClient = getClientForCluster(FOLLOWER) + val leaderClient = getClientForCluster(LEADER) + createConnectionBetweenClusters(FOLLOWER, LEADER) + val createIndexResponse = leaderClient.indices().create(CreateIndexRequest(leaderIndexName), RequestOptions.DEFAULT) + assertThat(createIndexResponse.isAcknowledged).isTrue() + var putMappingRequest = PutMappingRequest(leaderIndexName) + putMappingRequest.source("{\"properties\":{\"name\":{\"type\":\"text\"}}}", XContentType.JSON) + leaderClient.indices().putMapping(putMappingRequest, RequestOptions.DEFAULT) + val sourceMap = mapOf("name" to randomAlphaOfLength(5)) + leaderClient.index(IndexRequest(leaderIndexName).id("1").source(sourceMap), RequestOptions.DEFAULT) + followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName), + waitForRestore = true) + assertBusy { + assertThat(followerClient.indices() + .exists(GetIndexRequest(followerIndexName), RequestOptions.DEFAULT)) + .isEqualTo(true) + } + Assert.assertEquals( + leaderClient.indices().getMapping(GetMappingsRequest().indices(leaderIndexName), RequestOptions.DEFAULT) + .mappings()[leaderIndexName], + followerClient.indices().getMapping(GetMappingsRequest().indices(followerIndexName), RequestOptions.DEFAULT) + .mappings()[followerIndexName] + ) + putMappingRequest = PutMappingRequest(leaderIndexName) + putMappingRequest.source("{\"properties\":{\"name\":{\"type\":\"text\"},\"age\":{\"type\":\"integer\"}}}",XContentType.JSON) + leaderClient.indices().putMapping(putMappingRequest, RequestOptions.DEFAULT) + val leaderMappings = leaderClient.indices().getMapping(GetMappingsRequest().indices(leaderIndexName), RequestOptions.DEFAULT) + .mappings()[leaderIndexName] + TimeUnit.MINUTES.sleep(2) + Assert.assertNotEquals( + leaderMappings, + followerClient.indices().getMapping(GetMappingsRequest().indices(followerIndexName), RequestOptions.DEFAULT) + .mappings()[followerIndexName] + ) + } + private fun excludeAllClusterNodes(clusterName: String) { val transientSettingsRequest = Request("PUT", "_cluster/settings") // Get IPs directly from the cluster to handle all cases - single node cluster, multi node cluster and remote test cluster. From bc3890ec0d707baa071cbcbafc8d894360acfd44 Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Tue, 7 Feb 2023 13:46:20 +0530 Subject: [PATCH 26/84] Increment version to 2.6.0-SNAPSHOT (#676) Signed-off-by: opensearch-ci-bot Co-authored-by: opensearch-ci-bot --- build.gradle | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build.gradle b/build.gradle index 776968e6..1ec6f1a9 100644 --- a/build.gradle +++ b/build.gradle @@ -36,7 +36,7 @@ import org.opensearch.gradle.test.RestIntegTestTask buildscript { ext { isSnapshot = "true" == System.getProperty("build.snapshot", "true") - opensearch_version = System.getProperty("opensearch.version", "2.5.0-SNAPSHOT") + opensearch_version = System.getProperty("opensearch.version", "2.6.0-SNAPSHOT") buildVersionQualifier = System.getProperty("build.version_qualifier", "") // e.g. 2.0.0-rc1-SNAPSHOT -> 2.0.0.0-rc1-SNAPSHOT version_tokens = opensearch_version.tokenize('-') From 0aa83028e996f7407463f9a25e2aa29cf4aa3902 Mon Sep 17 00:00:00 2001 From: Ankit Kala Date: Tue, 21 Feb 2023 13:24:59 +0530 Subject: [PATCH 27/84] Adding 2.6.0 release notes (#716) Signed-off-by: Ankit Kala --- ...ch-cross-cluster-replication.release-notes-2.6.0.0.md | 9 +++++++++ 1 file changed, 9 insertions(+) create mode 100644 release-notes/opensearch-cross-cluster-replication.release-notes-2.6.0.0.md diff --git a/release-notes/opensearch-cross-cluster-replication.release-notes-2.6.0.0.md b/release-notes/opensearch-cross-cluster-replication.release-notes-2.6.0.0.md new file mode 100644 index 00000000..e9868167 --- /dev/null +++ b/release-notes/opensearch-cross-cluster-replication.release-notes-2.6.0.0.md @@ -0,0 +1,9 @@ +## Version 2.6.0.0 Release Notes + +Compatible with OpenSearch 2.6.0 + +### Enhancements +* Stopping replication before clean up of indices ([635](https://github.com/opensearch-project/cross-cluster-replication/pull/635)) + +### Bug Fixes +* Updating multi-field mapping at follower ([686](https://github.com/opensearch-project/cross-cluster-replication/pull/686)) \ No newline at end of file From f11c87652f229b8dac9f963cd5b0aef99834fdfa Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Tue, 21 Feb 2023 13:25:13 +0530 Subject: [PATCH 28/84] Add github workflow for auto release (#712) (#714) Signed-off-by: Ankit Kala (cherry picked from commit 1a470d0b2d021ead0bcf6b6493780d80f4c234d7) Co-authored-by: Ankit Kala --- .github/workflows/auto-release.yml | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) create mode 100644 .github/workflows/auto-release.yml diff --git a/.github/workflows/auto-release.yml b/.github/workflows/auto-release.yml new file mode 100644 index 00000000..24eeb273 --- /dev/null +++ b/.github/workflows/auto-release.yml @@ -0,0 +1,29 @@ +name: Releases + +on: + push: + tags: + - '*' + +jobs: + + build: + runs-on: ubuntu-latest + permissions: + contents: write + steps: + - name: GitHub App token + id: github_app_token + uses: tibdex/github-app-token@v1.5.0 + with: + app_id: ${{ secrets.APP_ID }} + private_key: ${{ secrets.APP_PRIVATE_KEY }} + installation_id: 22958780 + - name: Get tag + id: tag + uses: dawidd6/action-get-tag@v1 + - uses: actions/checkout@v2 + - uses: ncipollo/release-action@v1 + with: + github_token: ${{ steps.github_app_token.outputs.token }} + bodyFile: release-notes/opensearch.release-notes-${{steps.tag.outputs.tag}}.md \ No newline at end of file From e4f2143f294c32936f9d3652f3db4a7eaefd274a Mon Sep 17 00:00:00 2001 From: Ankit Kala Date: Tue, 21 Feb 2023 10:27:31 +0530 Subject: [PATCH 29/84] Add github action to publish snapshots to maven Signed-off-by: Ankit Kala --- .github/workflows/maven-publish.yml | 34 +++++++++++++++++++++++++++++ build.gradle | 10 +++++++++ 2 files changed, 44 insertions(+) create mode 100644 .github/workflows/maven-publish.yml diff --git a/.github/workflows/maven-publish.yml b/.github/workflows/maven-publish.yml new file mode 100644 index 00000000..ba482727 --- /dev/null +++ b/.github/workflows/maven-publish.yml @@ -0,0 +1,34 @@ +name: Publish snapshots to maven + +on: + workflow_dispatch: + push: + branches: [ + main + 1.* + 2.* + ] + +jobs: + build-and-publish-snapshots: + runs-on: ubuntu-latest + permissions: + id-token: write + contents: write + steps: + - uses: actions/setup-java@v3 + with: + distribution: temurin # Temurin is a distribution of adoptium + java-version: 17 + - uses: actions/checkout@v3 + - uses: aws-actions/configure-aws-credentials@v1 + with: + role-to-assume: ${{ secrets.PUBLISH_SNAPSHOTS_ROLE }} + aws-region: us-east-1 + - name: publish snapshots to maven + run: | + export SONATYPE_USERNAME=$(aws secretsmanager get-secret-value --secret-id maven-snapshots-username --query SecretString --output text) + export SONATYPE_PASSWORD=$(aws secretsmanager get-secret-value --secret-id maven-snapshots-password --query SecretString --output text) + echo "::add-mask::$SONATYPE_USERNAME" + echo "::add-mask::$SONATYPE_PASSWORD" + ./gradlew publishPluginZipPublicationToSnapshotsRepository \ No newline at end of file diff --git a/build.gradle b/build.gradle index 1ec6f1a9..b0695df3 100644 --- a/build.gradle +++ b/build.gradle @@ -914,6 +914,16 @@ publishing { } } } + repositories { + maven { + name = "Snapshots" + url = "https://aws.oss.sonatype.org/content/repositories/snapshots" + credentials { + username "$System.env.SONATYPE_USERNAME" + password "$System.env.SONATYPE_PASSWORD" + } + } + } } // updateVersion: Task to auto increment to the next development iteration From a8a31d2d9c7089465ad3f06b9491d5e53eb7185f Mon Sep 17 00:00:00 2001 From: Ankit Kala Date: Fri, 3 Feb 2023 16:46:17 +0530 Subject: [PATCH 30/84] Build Fix: Update Strings.toString to pass the JSON XContentType (#699) Signed-off-by: Ankit Kala --- .../replication/task/index/IndexReplicationParams.kt | 3 ++- .../replication/task/shard/ShardReplicationParams.kt | 5 +++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/src/main/kotlin/org/opensearch/replication/task/index/IndexReplicationParams.kt b/src/main/kotlin/org/opensearch/replication/task/index/IndexReplicationParams.kt index 0452bc24..0c4f4a53 100644 --- a/src/main/kotlin/org/opensearch/replication/task/index/IndexReplicationParams.kt +++ b/src/main/kotlin/org/opensearch/replication/task/index/IndexReplicationParams.kt @@ -20,6 +20,7 @@ import org.opensearch.common.xcontent.ObjectParser import org.opensearch.common.xcontent.ToXContent import org.opensearch.common.xcontent.XContentBuilder import org.opensearch.common.xcontent.XContentParser +import org.opensearch.common.xcontent.XContentType import org.opensearch.index.Index import org.opensearch.persistent.PersistentTaskParams import java.io.IOException @@ -80,6 +81,6 @@ class IndexReplicationParams : PersistentTaskParams { } override fun toString(): String { - return Strings.toString(this) + return Strings.toString(XContentType.JSON, this) } } diff --git a/src/main/kotlin/org/opensearch/replication/task/shard/ShardReplicationParams.kt b/src/main/kotlin/org/opensearch/replication/task/shard/ShardReplicationParams.kt index 12432eae..eb5c23b1 100644 --- a/src/main/kotlin/org/opensearch/replication/task/shard/ShardReplicationParams.kt +++ b/src/main/kotlin/org/opensearch/replication/task/shard/ShardReplicationParams.kt @@ -20,6 +20,7 @@ import org.opensearch.common.xcontent.ObjectParser import org.opensearch.common.xcontent.ToXContent import org.opensearch.common.xcontent.XContentBuilder import org.opensearch.common.xcontent.XContentParser +import org.opensearch.common.xcontent.XContentType import org.opensearch.index.shard.ShardId import org.opensearch.persistent.PersistentTaskParams import java.io.IOException @@ -84,7 +85,7 @@ class ShardReplicationParams : PersistentTaskParams { } override fun toString(): String { - return Strings.toString(this) + return Strings.toString(XContentType.JSON, this) } class Builder { @@ -122,4 +123,4 @@ class ShardReplicationParams : PersistentTaskParams { followerShardIdObj.id)) } } -} \ No newline at end of file +} From 56a9b2700af2cb0952cb1a83712bd7b311808383 Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Tue, 28 Feb 2023 17:50:07 +0530 Subject: [PATCH 31/84] Update release notes file path in auto release workflow (#726) (#727) Signed-off-by: Ankit Kala (cherry picked from commit c3fddb6f2b42ae5659e28de5a69789edd089635c) Co-authored-by: Ankit Kala --- .github/workflows/auto-release.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/auto-release.yml b/.github/workflows/auto-release.yml index 24eeb273..5e6ac8c3 100644 --- a/.github/workflows/auto-release.yml +++ b/.github/workflows/auto-release.yml @@ -26,4 +26,4 @@ jobs: - uses: ncipollo/release-action@v1 with: github_token: ${{ steps.github_app_token.outputs.token }} - bodyFile: release-notes/opensearch.release-notes-${{steps.tag.outputs.tag}}.md \ No newline at end of file + bodyFile: release-notes/opensearch-cross-cluster-replication.release-notes-${{steps.tag.outputs.tag}}.md \ No newline at end of file From 0d6052f7512e2e956bcb27126e345c9b0c9bb0c5 Mon Sep 17 00:00:00 2001 From: Monu Singh Date: Thu, 3 Nov 2022 11:02:32 +0530 Subject: [PATCH 32/84] Merge pull request #543 from monu-aws/main Added support for integTest remote on Remote clusters Signed-off-by: Monu Singh (cherry picked from commit b8e52ee6f2b23fdc42262d1ee5bddcc92554d1ec) --- .DS_Store | Bin 0 -> 8196 bytes build.gradle | 29 +++++-- scripts/.DS_Store | Bin 0 -> 6148 bytes scripts/integtest.sh | 82 +++++++++++------- .../replication/MultiClusterRestTestCase.kt | 7 ++ .../integ/rest/ResumeReplicationIT.kt | 17 +++- .../integ/rest/StartReplicationIT.kt | 21 +++++ .../integ/rest/StopReplicationIT.kt | 5 ++ .../singleCluster/SingleClusterSanityIT.kt | 2 +- 9 files changed, 124 insertions(+), 39 deletions(-) create mode 100644 .DS_Store create mode 100644 scripts/.DS_Store diff --git a/.DS_Store b/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..96c2ac8285d47f36e1cbefd8a93881cb3c2028b0 GIT binary patch literal 8196 zcmeHMQBTuQ6h4HThR&v)*<-#Kk}3lWLhpu0k}Ktu)(#&QYOgu>VP z7%Nk%=LS%KPeI4_0vl)LFN*;EYuD~sI7-Er8$XejJnneXa&X<;Jtf}Y;wup@KXEt$GEcmM;1xD3abI^ zJnGUi?NXoGXnEvhct0|{YqU;VRHp{DfKO2K`6haH&}zch0d>N1 z%qDP?o=#+c$*}X7XFweaGo$>@*iE0ks;S+KZ6Rq(!?b5&?Y<|pE2XV5Ya8lC85S#- zW<7%^R;mWYw&?l9)v{Je);#S|gufmwVa*;Q=8%u`Wr>HtPZUCrOuPlFmh@Ag7tnYe zaoZ$|8tJ(T)-Gv1OD{23j!9Et1F^Cu%~)!g%fPL)kfLTSikeE?P1tc-{F4RJ$`3r5i>w#qk;j+Ry)tS*`>_tz@Ni{{b8yOl-r-s;-%anZPb`_7~7*1mV( z2VaptC_=r?X(m2Dt-qm_QQ^?)`(EJloye>bXC?A;M?Mpp^kz~_W&V6l!wo|;xbEAX!h zh-s>lW0;>?iWPj50)qt)cgQCQh;<)3Y7jTzAJwQ$%NMagG7l_PD>nO^)=4+n!!Z|VM%m" + usage + exit 1 + fi + + data=$(python3 -c "import json; cluster=$ENDPOINT_LIST ; data_nodes=cluster; print(data_nodes[0][\"data_nodes\"][0][\"endpoint\"],':',data_nodes[0][\"data_nodes\"][0][\"port\"],':',data_nodes[0][\"data_nodes\"][0][\"transport\"],',',data_nodes[1][\"data_nodes\"][0][\"endpoint\"],':',data_nodes[1][\"data_nodes\"][0][\"port\"],':',data_nodes[1][\"data_nodes\"][0][\"transport\"])" | tr -d "[:blank:]") + + + leader=$(echo $data | cut -d ',' -f1 | cut -d ':' -f1,2 ) + follower=$(echo $data | cut -d ',' -f2 | cut -d ':' -f1,2 ) + + LTRANSPORT_PORT=$(echo $data | cut -d ',' -f1 | cut -d ':' -f1,3 ) + FTRANSPORT_PORT=$(echo $data | cut -d ',' -f2 | cut -d ':' -f1,3 ) + eval "./gradlew integTestRemote -Dleader.http_host=\"$leader\" -Dfollower.http_host=\"$follower\" -Dfollower.transport_host=\"$FTRANSPORT_PORT\" -Dleader.transport_host=\"$LTRANSPORT_PORT\" -Dsecurity_enabled=\"$SECURITY_ENABLED\" -Duser=\"$USERNAME\" -Dpassword=\"$PASSWORD\" --console=plain " + +else + # Single cluster + if [ -z "$TRANSPORT_PORT" ] + then + TRANSPORT_PORT="9300" + fi + ./gradlew singleClusterSanityTest -Dfollower.http_host="$BIND_ADDRESS:$BIND_PORT" -Dfollower.transport_host="$BIND_ADDRESS:$TRANSPORT_PORT" -Dsecurity_enabled=$SECURITY_ENABLED -Duser=$USERNAME -Dpassword=$PASSWORD --console=plain +fi diff --git a/src/test/kotlin/org/opensearch/replication/MultiClusterRestTestCase.kt b/src/test/kotlin/org/opensearch/replication/MultiClusterRestTestCase.kt index 982cc6dc..c40af9a7 100644 --- a/src/test/kotlin/org/opensearch/replication/MultiClusterRestTestCase.kt +++ b/src/test/kotlin/org/opensearch/replication/MultiClusterRestTestCase.kt @@ -635,4 +635,11 @@ abstract class MultiClusterRestTestCase : OpenSearchTestCase() { updateSettingsRequest.transientSettings(Collections.singletonMap(ReplicationPlugin.REPLICATION_METADATA_SYNC_INTERVAL.key, "5s")) followerClient.cluster().putSettings(updateSettingsRequest, RequestOptions.DEFAULT) } + +// TODO Find a way to skip tests when tests are run for remote clusters + protected fun checkifIntegTestRemote(): Boolean { + val systemProperties = BootstrapInfo.getSystemProperties() + val integTestRemote = systemProperties.get("tests.integTestRemote") as String? + return integTestRemote.equals("true") + } } diff --git a/src/test/kotlin/org/opensearch/replication/integ/rest/ResumeReplicationIT.kt b/src/test/kotlin/org/opensearch/replication/integ/rest/ResumeReplicationIT.kt index dfc062aa..2a3b3bae 100644 --- a/src/test/kotlin/org/opensearch/replication/integ/rest/ResumeReplicationIT.kt +++ b/src/test/kotlin/org/opensearch/replication/integ/rest/ResumeReplicationIT.kt @@ -42,7 +42,7 @@ import org.opensearch.common.settings.Settings import org.junit.Assert import java.nio.file.Files import java.util.concurrent.TimeUnit - +import org.opensearch.bootstrap.BootstrapInfo @MultiClusterAnnotations.ClusterConfigurations( MultiClusterAnnotations.ClusterConfiguration(clusterName = LEADER), @@ -164,6 +164,11 @@ class ResumeReplicationIT: MultiClusterRestTestCase() { } fun `test that replication fails to resume when custom analyser is not present in follower`() { + + if(checkifIntegTestRemote()){ + return; + } + val synonyms = javaClass.getResourceAsStream("/analyzers/synonyms.txt") val config = PathUtils.get(buildDir, leaderClusterPath, "config") val synonymPath = config.resolve("synonyms.txt") @@ -196,6 +201,11 @@ class ResumeReplicationIT: MultiClusterRestTestCase() { } fun `test that replication resumes when custom analyser is present in follower`() { + + if(checkifIntegTestRemote()){ + return; + } + val synonyms = javaClass.getResourceAsStream("/analyzers/synonyms.txt") val config = PathUtils.get(buildDir, leaderClusterPath, "config") val synonymFilename = "synonyms.txt" @@ -235,6 +245,11 @@ class ResumeReplicationIT: MultiClusterRestTestCase() { } fun `test that replication resumes when custom analyser is overridden and present in follower`() { + + if(checkifIntegTestRemote()){ + return; + } + val synonyms = javaClass.getResourceAsStream("/analyzers/synonyms.txt") val config = PathUtils.get(buildDir, leaderClusterPath, "config") val synonymPath = config.resolve("synonyms.txt") diff --git a/src/test/kotlin/org/opensearch/replication/integ/rest/StartReplicationIT.kt b/src/test/kotlin/org/opensearch/replication/integ/rest/StartReplicationIT.kt index 01f280e0..af2f9d09 100644 --- a/src/test/kotlin/org/opensearch/replication/integ/rest/StartReplicationIT.kt +++ b/src/test/kotlin/org/opensearch/replication/integ/rest/StartReplicationIT.kt @@ -76,6 +76,7 @@ import org.opensearch.replication.updateReplicationStartBlockSetting import java.nio.file.Files import java.util.* import java.util.concurrent.TimeUnit +import org.opensearch.bootstrap.BootstrapInfo @@ -583,6 +584,11 @@ class StartReplicationIT: MultiClusterRestTestCase() { } fun `test that replication fails to start when custom analyser is not present in follower`() { + + if(checkifIntegTestRemote()){ + return; + } + val synonyms = javaClass.getResourceAsStream("/analyzers/synonyms.txt") val config = PathUtils.get(buildDir, leaderClusterPath, "config") val synonymPath = config.resolve("synonyms.txt") @@ -613,6 +619,11 @@ class StartReplicationIT: MultiClusterRestTestCase() { } fun `test that replication starts successfully when custom analyser is present in follower`() { + + if(checkifIntegTestRemote()){ + return; + } + val synonyms = javaClass.getResourceAsStream("/analyzers/synonyms.txt") val leaderConfig = PathUtils.get(buildDir, leaderClusterPath, "config") val leaderSynonymPath = leaderConfig.resolve("synonyms.txt") @@ -650,6 +661,11 @@ class StartReplicationIT: MultiClusterRestTestCase() { } fun `test that replication starts successfully when custom analyser is overridden and present in follower`() { + + if(checkifIntegTestRemote()){ + return; + } + val synonyms = javaClass.getResourceAsStream("/analyzers/synonyms.txt") val leaderConfig = PathUtils.get(buildDir, leaderClusterPath, "config") val leaderSynonymPath = leaderConfig.resolve("synonyms.txt") @@ -784,6 +800,11 @@ class StartReplicationIT: MultiClusterRestTestCase() { } fun `test that snapshot on leader does not affect replication during bootstrap`() { + + if(checkifIntegTestRemote()){ + return; + } + val settings = Settings.builder() .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 20) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) diff --git a/src/test/kotlin/org/opensearch/replication/integ/rest/StopReplicationIT.kt b/src/test/kotlin/org/opensearch/replication/integ/rest/StopReplicationIT.kt index abfe247c..22e780ec 100644 --- a/src/test/kotlin/org/opensearch/replication/integ/rest/StopReplicationIT.kt +++ b/src/test/kotlin/org/opensearch/replication/integ/rest/StopReplicationIT.kt @@ -242,6 +242,11 @@ class StopReplicationIT: MultiClusterRestTestCase() { } fun `test stop replication with stale replication settings at leader cluster`() { + + if(checkifIntegTestRemote()){ + return; + } + val followerClient = getClientForCluster(FOLLOWER) val leaderClient = getClientForCluster(LEADER) createConnectionBetweenClusters(FOLLOWER, LEADER, "source") diff --git a/src/test/kotlin/org/opensearch/replication/singleCluster/SingleClusterSanityIT.kt b/src/test/kotlin/org/opensearch/replication/singleCluster/SingleClusterSanityIT.kt index f7021ccb..9760e7c2 100644 --- a/src/test/kotlin/org/opensearch/replication/singleCluster/SingleClusterSanityIT.kt +++ b/src/test/kotlin/org/opensearch/replication/singleCluster/SingleClusterSanityIT.kt @@ -20,7 +20,7 @@ class SingleClusterSanityIT : MultiClusterRestTestCase() { companion object { private val log = LogManager.getLogger(SingleClusterSanityIT::class.java) - private const val followerClusterName = "follower" + private const val followerClusterName = "followCluster" private const val REPLICATION_PLUGIN_NAME = "opensearch-cross-cluster-replication" private const val SAMPLE_INDEX = "sample_test_index" From 58c6cbec1d05a41bb52b7fe3d4668ee5628c6f56 Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Wed, 1 Mar 2023 10:48:11 +0530 Subject: [PATCH 33/84] Increment version to 2.7.0-SNAPSHOT (#724) Signed-off-by: opensearch-ci-bot Co-authored-by: opensearch-ci-bot --- build.gradle | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build.gradle b/build.gradle index b0695df3..a8f52fd1 100644 --- a/build.gradle +++ b/build.gradle @@ -36,7 +36,7 @@ import org.opensearch.gradle.test.RestIntegTestTask buildscript { ext { isSnapshot = "true" == System.getProperty("build.snapshot", "true") - opensearch_version = System.getProperty("opensearch.version", "2.6.0-SNAPSHOT") + opensearch_version = System.getProperty("opensearch.version", "2.7.0-SNAPSHOT") buildVersionQualifier = System.getProperty("build.version_qualifier", "") // e.g. 2.0.0-rc1-SNAPSHOT -> 2.0.0.0-rc1-SNAPSHOT version_tokens = opensearch_version.tokenize('-') From 72082ad4637005f464b54c7f86d84d90f9f3a847 Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Thu, 9 Mar 2023 10:28:30 +0530 Subject: [PATCH 34/84] Modified autofollow stats to rely on single source for failed indices (#708) (#736) Modified autofollow stats to rely on single source for failed indices and further improved logging for the initial failures during leader calls. Signed-off-by: Sai Kumar (cherry picked from commit be023e490f5cd647f652bc5bd5b5953ac7b48657) Co-authored-by: Sai Kumar --- .../replication/ReplicationPlugin.kt | 2 +- .../task/autofollow/AutoFollowTask.kt | 27 +++++++++++-------- .../replication/ReplicationHelpers.kt | 10 +++++++ .../integ/rest/UpdateAutoFollowPatternIT.kt | 12 ++++++++- 4 files changed, 38 insertions(+), 13 deletions(-) diff --git a/src/main/kotlin/org/opensearch/replication/ReplicationPlugin.kt b/src/main/kotlin/org/opensearch/replication/ReplicationPlugin.kt index 53c77866..7e5933c0 100644 --- a/src/main/kotlin/org/opensearch/replication/ReplicationPlugin.kt +++ b/src/main/kotlin/org/opensearch/replication/ReplicationPlugin.kt @@ -178,7 +178,7 @@ internal class ReplicationPlugin : Plugin(), ActionPlugin, PersistentTaskPlugin, TimeValue.timeValueSeconds(1), Setting.Property.Dynamic, Setting.Property.NodeScope) val REPLICATION_AUTOFOLLOW_REMOTE_INDICES_POLL_INTERVAL = Setting.timeSetting ("plugins.replication.autofollow.fetch_poll_interval", TimeValue.timeValueSeconds(30), TimeValue.timeValueSeconds(30), TimeValue.timeValueHours(1), Setting.Property.Dynamic, Setting.Property.NodeScope) - val REPLICATION_AUTOFOLLOW_REMOTE_INDICES_RETRY_POLL_INTERVAL = Setting.timeSetting ("plugins.replication.autofollow.retry_poll_interval", TimeValue.timeValueHours(1), TimeValue.timeValueMinutes(30), + val REPLICATION_AUTOFOLLOW_REMOTE_INDICES_RETRY_POLL_INTERVAL = Setting.timeSetting ("plugins.replication.autofollow.retry_poll_interval", TimeValue.timeValueHours(1), TimeValue.timeValueMinutes(1), TimeValue.timeValueHours(4), Setting.Property.Dynamic, Setting.Property.NodeScope) val REPLICATION_METADATA_SYNC_INTERVAL = Setting.timeSetting("plugins.replication.follower.metadata_sync_interval", TimeValue.timeValueSeconds(60), TimeValue.timeValueSeconds(5), diff --git a/src/main/kotlin/org/opensearch/replication/task/autofollow/AutoFollowTask.kt b/src/main/kotlin/org/opensearch/replication/task/autofollow/AutoFollowTask.kt index 0685b79d..1ac2f7de 100644 --- a/src/main/kotlin/org/opensearch/replication/task/autofollow/AutoFollowTask.kt +++ b/src/main/kotlin/org/opensearch/replication/task/autofollow/AutoFollowTask.kt @@ -63,7 +63,6 @@ class AutoFollowTask(id: Long, type: String, action: String, description: String override val followerIndexName: String = params.patternName //Special case for auto follow override val log = Loggers.getLogger(javaClass, leaderAlias) private var trackingIndicesOnTheCluster = setOf() - private var failedIndices = ConcurrentSkipListSet() // Failed indices for replication from this autofollow task private var replicationJobsQueue = ConcurrentSkipListSet() // To keep track of outstanding replication jobs for this autofollow task private var retryScheduler: Scheduler.ScheduledCancellable? = null lateinit var stat: AutoFollowStat @@ -91,14 +90,21 @@ class AutoFollowTask(id: Long, type: String, action: String, description: String } private fun addRetryScheduler() { + log.debug("Adding retry scheduler") if(retryScheduler != null && !retryScheduler!!.isCancelled) { return } - retryScheduler = try { - threadPool.schedule({ failedIndices.clear() }, replicationSettings.autofollowRetryPollDuration, ThreadPool.Names.GENERIC) + try { + retryScheduler = threadPool.schedule( + { + log.debug("Clearing failed indices to schedule for the next retry") + stat.failedIndices.clear() + }, + replicationSettings.autofollowRetryPollDuration, + ThreadPool.Names.SAME) } catch (e: Exception) { log.error("Error scheduling retry on failed autofollow indices ${e.stackTraceToString()}") - null + retryScheduler = null } } @@ -123,10 +129,10 @@ class AutoFollowTask(id: Long, type: String, action: String, description: String } catch (e: Exception) { // Ideally, Calls to the remote cluster shouldn't fail and autofollow task should be able to pick-up the newly created indices // matching the pattern. Should be safe to retry after configured delay. - stat.failedLeaderCall++ - if(stat.failedLeaderCall > 0 && stat.failedLeaderCall.rem(10) == 0L) { + if(stat.failedLeaderCall >= 0 && stat.failedLeaderCall.rem(10) == 0L) { log.error("Fetching remote indices failed with error - ${e.stackTraceToString()}") } + stat.failedLeaderCall++ } var currentIndices = clusterService.state().metadata().concreteAllIndices.asIterable() // All indices - open and closed on the cluster @@ -138,7 +144,7 @@ class AutoFollowTask(id: Long, type: String, action: String, description: String trackingIndicesOnTheCluster = currentIndices.toSet() } } - remoteIndices = remoteIndices.minus(currentIndices).minus(failedIndices).minus(replicationJobsQueue) + remoteIndices = remoteIndices.minus(currentIndices).minus(stat.failedIndices).minus(replicationJobsQueue) stat.failCounterForRun = 0 startReplicationJobs(remoteIndices) @@ -207,8 +213,6 @@ class AutoFollowTask(id: Long, type: String, action: String, description: String } catch (e: OpenSearchSecurityException) { // For permission related failures, Adding as part of failed indices as autofollow role doesn't have required permissions. log.trace("Cannot start replication on $leaderIndex due to missing permissions $e") - failedIndices.add(leaderIndex) - } catch (e: Exception) { // Any failure other than security exception can be safely retried and not adding to the failed indices log.warn("Failed to start replication for $leaderAlias:$leaderIndex -> $leaderIndex.", e) @@ -249,7 +253,7 @@ class AutoFollowStat: Task.Status { val name :String val pattern :String var failCount: Long=0 - var failedIndices :MutableSet = mutableSetOf() + var failedIndices = ConcurrentSkipListSet() // Failed indices for replication from this autofollow task var failCounterForRun :Long=0 var successCount: Long=0 var failedLeaderCall :Long=0 @@ -265,7 +269,8 @@ class AutoFollowStat: Task.Status { name = inp.readString() pattern = inp.readString() failCount = inp.readLong() - failedIndices = inp.readSet(StreamInput::readString) + val inpFailedIndices = inp.readList(StreamInput::readString) + failedIndices = ConcurrentSkipListSet(inpFailedIndices) successCount = inp.readLong() failedLeaderCall = inp.readLong() lastExecutionTime = inp.readLong() diff --git a/src/test/kotlin/org/opensearch/replication/ReplicationHelpers.kt b/src/test/kotlin/org/opensearch/replication/ReplicationHelpers.kt index d5ebf0c9..884d8b01 100644 --- a/src/test/kotlin/org/opensearch/replication/ReplicationHelpers.kt +++ b/src/test/kotlin/org/opensearch/replication/ReplicationHelpers.kt @@ -381,6 +381,16 @@ fun RestHighLevelClient.updateReplicationStartBlockSetting(enabled: Boolean) { assertThat(response.isAcknowledged).isTrue() } +fun RestHighLevelClient.updateAutofollowRetrySetting(duration: String) { + var settings: Settings = Settings.builder() + .put("plugins.replication.autofollow.retry_poll_interval", duration) + .build() + var updateSettingsRequest = ClusterUpdateSettingsRequest() + updateSettingsRequest.persistentSettings(settings) + val response = this.cluster().putSettings(updateSettingsRequest, RequestOptions.DEFAULT) + assertThat(response.isAcknowledged).isTrue() +} + fun RestHighLevelClient.updateAutoFollowConcurrentStartReplicationJobSetting(concurrentJobs: Int?) { val settings = if(concurrentJobs != null) { Settings.builder() diff --git a/src/test/kotlin/org/opensearch/replication/integ/rest/UpdateAutoFollowPatternIT.kt b/src/test/kotlin/org/opensearch/replication/integ/rest/UpdateAutoFollowPatternIT.kt index cb0d332e..9f12bbfb 100644 --- a/src/test/kotlin/org/opensearch/replication/integ/rest/UpdateAutoFollowPatternIT.kt +++ b/src/test/kotlin/org/opensearch/replication/integ/rest/UpdateAutoFollowPatternIT.kt @@ -42,6 +42,7 @@ import org.opensearch.cluster.metadata.MetadataCreateIndexService import org.opensearch.replication.AutoFollowStats import org.opensearch.replication.ReplicationPlugin import org.opensearch.replication.updateReplicationStartBlockSetting +import org.opensearch.replication.updateAutofollowRetrySetting import org.opensearch.replication.updateAutoFollowConcurrentStartReplicationJobSetting import org.opensearch.replication.waitForShardTaskStart import org.opensearch.test.OpenSearchTestCase.assertBusy @@ -321,6 +322,8 @@ class UpdateAutoFollowPatternIT: MultiClusterRestTestCase() { createConnectionBetweenClusters(FOLLOWER, LEADER, connectionAlias) val leaderIndexName = createRandomIndex(leaderClient) try { + //modify retry duration to account for autofollow trigger in next retry + followerClient.updateAutofollowRetrySetting("1m") // Add replication start block followerClient.updateReplicationStartBlockSetting(true) followerClient.updateAutoFollowPattern(connectionAlias, indexPatternName, indexPattern) @@ -330,12 +333,19 @@ class UpdateAutoFollowPatternIT: MultiClusterRestTestCase() { // Autofollow task should still be up - 1 task Assertions.assertThat(getIndexReplicationTasks(FOLLOWER).size).isEqualTo(0) Assertions.assertThat(getAutoFollowTasks(FOLLOWER).size).isEqualTo(1) + + var stats = followerClient.AutoFollowStats() + var failedIndices = stats["failed_indices"] as List<*> + assert(failedIndices.size == 1) // Remove replication start block followerClient.updateReplicationStartBlockSetting(false) - sleep(45000) // poll for auto follow in worst case + sleep(60000) // wait for auto follow trigger in the worst case // Index should be replicated and autofollow task should be present Assertions.assertThat(getIndexReplicationTasks(FOLLOWER).size).isEqualTo(1) Assertions.assertThat(getAutoFollowTasks(FOLLOWER).size).isEqualTo(1) + stats = followerClient.AutoFollowStats() + failedIndices = stats["failed_indices"] as List<*> + assert(failedIndices.isEmpty()) } finally { followerClient.deleteAutoFollowPattern(connectionAlias, indexPatternName) } From ac2040ab1a9ee011c1a9f32f86cd3b64db5e014a Mon Sep 17 00:00:00 2001 From: Monu Singh Date: Thu, 23 Mar 2023 09:06:46 +0530 Subject: [PATCH 35/84] Merge pull request #745 from opensearch-project/monu-aws-patch-1 Signed-off-by: Monu Singh (cherry picked from commit f9746e54d030828a1ea0721456fe10dd094010cb) --- .../replication/integ/rest/UpdateAutoFollowPatternIT.kt | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/test/kotlin/org/opensearch/replication/integ/rest/UpdateAutoFollowPatternIT.kt b/src/test/kotlin/org/opensearch/replication/integ/rest/UpdateAutoFollowPatternIT.kt index 9f12bbfb..b38d0dce 100644 --- a/src/test/kotlin/org/opensearch/replication/integ/rest/UpdateAutoFollowPatternIT.kt +++ b/src/test/kotlin/org/opensearch/replication/integ/rest/UpdateAutoFollowPatternIT.kt @@ -197,6 +197,8 @@ class UpdateAutoFollowPatternIT: MultiClusterRestTestCase() { }, 30, TimeUnit.SECONDS) // Verify that existing index matching the pattern are replicated. assertBusy ({ + followerClient.waitForShardTaskStart(leaderIndexName) + followerClient.waitForShardTaskStart(leaderIndexName2) Assertions.assertThat(followerClient.indices() .exists(GetIndexRequest(leaderIndexName2), RequestOptions.DEFAULT)) .isEqualTo(true) @@ -208,12 +210,10 @@ class UpdateAutoFollowPatternIT: MultiClusterRestTestCase() { assert(key["num_success_start_replication"]!! as Int == 1) } assertTrue(af_stats.size == 2) - }, 30, TimeUnit.SECONDS) + }, 60, TimeUnit.SECONDS) } finally { followerClient.deleteAutoFollowPattern(connectionAlias, indexPatternName) followerClient.deleteAutoFollowPattern(connectionAlias, indexPatternName2) - followerClient.waitForShardTaskStart(leaderIndexName) - followerClient.waitForShardTaskStart(leaderIndexName2) } } @@ -451,4 +451,4 @@ class UpdateAutoFollowPatternIT: MultiClusterRestTestCase() { assertEquals(HttpStatus.SC_OK.toLong(), persistentConnectionResponse.statusLine.statusCode.toLong()) } -} \ No newline at end of file +} From 88ce8be22f496be9c78aa6c730965457e9173415 Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Thu, 30 Mar 2023 12:26:22 +0530 Subject: [PATCH 36/84] Update imports from org.opensearch.common.xcontent to org.opensearch.core.xcontent (#718) (#761) Signed-off-by: Ankit Kala (cherry picked from commit d29f88607269eb528588228acdf0857f0c49d397) Co-authored-by: Ankit Kala --- .../opensearch/replication/ReplicationPlugin.kt | 6 +++--- .../AutoFollowClusterManagerNodeRequest.kt | 6 +++--- .../autofollow/UpdateAutoFollowPatternRequest.kt | 12 ++++++------ .../ReplicateIndexClusterManagerNodeRequest.kt | 6 +++--- .../action/index/ReplicateIndexRequest.kt | 14 +++++++------- .../action/index/block/UpdateIndexBlockRequest.kt | 10 +++++----- .../action/pause/PauseIndexReplicationRequest.kt | 12 ++++++------ .../action/resume/ResumeIndexReplicationRequest.kt | 2 +- .../replication/action/setup/SetupChecksRequest.kt | 6 +++--- .../action/setup/ValidatePermissionsRequest.kt | 6 +++--- .../action/stats/AutoFollowStatsAction.kt | 8 ++++---- .../action/stats/FollowerStatsResponse.kt | 8 ++++---- .../action/stats/LeaderStatsResponse.kt | 8 ++++---- .../action/status/ReplicationStatusResponse.kt | 6 +++--- .../replication/action/status/ShardInfoRequest.kt | 6 +++--- .../replication/action/status/ShardInfoResponse.kt | 8 ++++---- .../action/stop/StopIndexReplicationRequest.kt | 4 ++-- .../action/update/UpdateIndexReplicationRequest.kt | 4 ++-- .../metadata/state/ReplicationStateMetadata.kt | 6 +++--- .../metadata/store/ReplicationMetadata.kt | 10 +++++----- .../metadata/store/ReplicationMetadataStore.kt | 8 +++++++- .../replication/rest/AutoFollowStatsHandler.kt | 4 ++-- .../replication/rest/FollowerStatsHandler.kt | 4 ++-- .../replication/rest/LeaderStatsHandler.kt | 4 ++-- .../replication/seqno/RemoteClusterStats.kt | 8 ++++---- .../task/CrossClusterReplicationTask.kt | 6 +++--- .../replication/task/ReplicationState.kt | 6 +++--- .../task/autofollow/AutoFollowParams.kt | 10 +++++----- .../replication/task/autofollow/AutoFollowTask.kt | 4 ++-- .../task/index/IndexReplicationParams.kt | 10 +++++----- .../task/index/IndexReplicationState.kt | 10 +++++----- .../replication/task/index/IndexReplicationTask.kt | 6 +++--- .../replication/task/shard/FollowerClusterStats.kt | 6 +++--- .../task/shard/ShardReplicationParams.kt | 10 +++++----- .../task/shard/ShardReplicationState.kt | 10 +++++----- .../replication/MultiClusterRestTestCase.kt | 5 ++--- .../opensearch/replication/ReplicationHelpers.kt | 4 ++-- .../replication/integ/rest/StartReplicationIT.kt | 4 ++-- .../task/index/IndexReplicationTaskTests.kt | 2 +- .../replication/task/index/NoOpClient.kt | 2 +- 40 files changed, 138 insertions(+), 133 deletions(-) diff --git a/src/main/kotlin/org/opensearch/replication/ReplicationPlugin.kt b/src/main/kotlin/org/opensearch/replication/ReplicationPlugin.kt index 7e5933c0..9b50bb2b 100644 --- a/src/main/kotlin/org/opensearch/replication/ReplicationPlugin.kt +++ b/src/main/kotlin/org/opensearch/replication/ReplicationPlugin.kt @@ -82,7 +82,7 @@ import org.opensearch.cluster.metadata.RepositoryMetadata import org.opensearch.cluster.node.DiscoveryNodes import org.opensearch.cluster.service.ClusterService import org.opensearch.common.CheckedFunction -import org.opensearch.common.ParseField +import org.opensearch.core.ParseField import org.opensearch.common.component.LifecycleComponent import org.opensearch.common.io.stream.NamedWriteableRegistry import org.opensearch.common.io.stream.Writeable @@ -96,8 +96,8 @@ import org.opensearch.common.unit.ByteSizeUnit import org.opensearch.common.unit.ByteSizeValue import org.opensearch.common.unit.TimeValue import org.opensearch.common.util.concurrent.OpenSearchExecutors -import org.opensearch.common.xcontent.NamedXContentRegistry -import org.opensearch.common.xcontent.XContentParser +import org.opensearch.core.xcontent.NamedXContentRegistry +import org.opensearch.core.xcontent.XContentParser import org.opensearch.commons.utils.OpenForTesting import org.opensearch.env.Environment import org.opensearch.env.NodeEnvironment diff --git a/src/main/kotlin/org/opensearch/replication/action/autofollow/AutoFollowClusterManagerNodeRequest.kt b/src/main/kotlin/org/opensearch/replication/action/autofollow/AutoFollowClusterManagerNodeRequest.kt index 80c05995..f9199ea0 100644 --- a/src/main/kotlin/org/opensearch/replication/action/autofollow/AutoFollowClusterManagerNodeRequest.kt +++ b/src/main/kotlin/org/opensearch/replication/action/autofollow/AutoFollowClusterManagerNodeRequest.kt @@ -16,9 +16,9 @@ import org.opensearch.action.ActionRequestValidationException import org.opensearch.action.support.master.MasterNodeRequest import org.opensearch.common.io.stream.StreamInput import org.opensearch.common.io.stream.StreamOutput -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.common.xcontent.ToXContentObject -import org.opensearch.common.xcontent.XContentBuilder +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.ToXContentObject +import org.opensearch.core.xcontent.XContentBuilder class AutoFollowClusterManagerNodeRequest: MasterNodeRequest, ToXContentObject { var user: User? = null diff --git a/src/main/kotlin/org/opensearch/replication/action/autofollow/UpdateAutoFollowPatternRequest.kt b/src/main/kotlin/org/opensearch/replication/action/autofollow/UpdateAutoFollowPatternRequest.kt index 30d16b29..a7077793 100644 --- a/src/main/kotlin/org/opensearch/replication/action/autofollow/UpdateAutoFollowPatternRequest.kt +++ b/src/main/kotlin/org/opensearch/replication/action/autofollow/UpdateAutoFollowPatternRequest.kt @@ -16,15 +16,15 @@ import org.opensearch.replication.metadata.store.KEY_SETTINGS import org.opensearch.replication.util.ValidationUtil.validateName import org.opensearch.action.ActionRequestValidationException import org.opensearch.action.support.master.AcknowledgedRequest -import org.opensearch.common.ParseField +import org.opensearch.core.ParseField import org.opensearch.common.io.stream.StreamInput import org.opensearch.common.io.stream.StreamOutput import org.opensearch.common.settings.Settings -import org.opensearch.common.xcontent.ObjectParser -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.common.xcontent.ToXContentObject -import org.opensearch.common.xcontent.XContentBuilder -import org.opensearch.common.xcontent.XContentParser +import org.opensearch.core.xcontent.ObjectParser +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.ToXContentObject +import org.opensearch.core.xcontent.XContentBuilder +import org.opensearch.core.xcontent.XContentParser import java.util.Collections import java.util.function.BiConsumer import java.util.function.BiFunction diff --git a/src/main/kotlin/org/opensearch/replication/action/index/ReplicateIndexClusterManagerNodeRequest.kt b/src/main/kotlin/org/opensearch/replication/action/index/ReplicateIndexClusterManagerNodeRequest.kt index 2c06b6ca..63f77023 100644 --- a/src/main/kotlin/org/opensearch/replication/action/index/ReplicateIndexClusterManagerNodeRequest.kt +++ b/src/main/kotlin/org/opensearch/replication/action/index/ReplicateIndexClusterManagerNodeRequest.kt @@ -16,9 +16,9 @@ import org.opensearch.action.ActionRequestValidationException import org.opensearch.action.support.master.MasterNodeRequest import org.opensearch.common.io.stream.StreamInput import org.opensearch.common.io.stream.StreamOutput -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.common.xcontent.ToXContentObject -import org.opensearch.common.xcontent.XContentBuilder +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.ToXContentObject +import org.opensearch.core.xcontent.XContentBuilder class ReplicateIndexClusterManagerNodeRequest: MasterNodeRequest, ToXContentObject { diff --git a/src/main/kotlin/org/opensearch/replication/action/index/ReplicateIndexRequest.kt b/src/main/kotlin/org/opensearch/replication/action/index/ReplicateIndexRequest.kt index 894996db..bb0f9aaf 100644 --- a/src/main/kotlin/org/opensearch/replication/action/index/ReplicateIndexRequest.kt +++ b/src/main/kotlin/org/opensearch/replication/action/index/ReplicateIndexRequest.kt @@ -17,16 +17,16 @@ import org.opensearch.action.ActionRequestValidationException import org.opensearch.action.IndicesRequest import org.opensearch.action.support.IndicesOptions import org.opensearch.action.support.master.AcknowledgedRequest -import org.opensearch.common.ParseField +import org.opensearch.core.ParseField import org.opensearch.common.io.stream.StreamInput import org.opensearch.common.io.stream.StreamOutput import org.opensearch.common.settings.Settings -import org.opensearch.common.xcontent.ObjectParser -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.common.xcontent.ToXContent.Params -import org.opensearch.common.xcontent.ToXContentObject -import org.opensearch.common.xcontent.XContentBuilder -import org.opensearch.common.xcontent.XContentParser +import org.opensearch.core.xcontent.ObjectParser +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.ToXContent.Params +import org.opensearch.core.xcontent.ToXContentObject +import org.opensearch.core.xcontent.XContentBuilder +import org.opensearch.core.xcontent.XContentParser import java.io.IOException import java.util.Collections import java.util.function.BiConsumer diff --git a/src/main/kotlin/org/opensearch/replication/action/index/block/UpdateIndexBlockRequest.kt b/src/main/kotlin/org/opensearch/replication/action/index/block/UpdateIndexBlockRequest.kt index 12eade15..d9b51933 100644 --- a/src/main/kotlin/org/opensearch/replication/action/index/block/UpdateIndexBlockRequest.kt +++ b/src/main/kotlin/org/opensearch/replication/action/index/block/UpdateIndexBlockRequest.kt @@ -15,13 +15,13 @@ import org.opensearch.action.ActionRequestValidationException import org.opensearch.action.IndicesRequest import org.opensearch.action.support.IndicesOptions import org.opensearch.action.support.master.AcknowledgedRequest -import org.opensearch.common.ParseField +import org.opensearch.core.ParseField import org.opensearch.common.io.stream.StreamInput import org.opensearch.common.io.stream.StreamOutput -import org.opensearch.common.xcontent.ObjectParser -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.common.xcontent.ToXContentObject -import org.opensearch.common.xcontent.XContentBuilder +import org.opensearch.core.xcontent.ObjectParser +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.ToXContentObject +import org.opensearch.core.xcontent.XContentBuilder import java.util.function.Supplier enum class IndexBlockUpdateType { diff --git a/src/main/kotlin/org/opensearch/replication/action/pause/PauseIndexReplicationRequest.kt b/src/main/kotlin/org/opensearch/replication/action/pause/PauseIndexReplicationRequest.kt index a1e6bf8f..e69dcb44 100644 --- a/src/main/kotlin/org/opensearch/replication/action/pause/PauseIndexReplicationRequest.kt +++ b/src/main/kotlin/org/opensearch/replication/action/pause/PauseIndexReplicationRequest.kt @@ -16,14 +16,14 @@ import org.opensearch.action.ActionRequestValidationException import org.opensearch.action.IndicesRequest import org.opensearch.action.support.IndicesOptions import org.opensearch.action.support.master.AcknowledgedRequest -import org.opensearch.common.ParseField +import org.opensearch.core.ParseField import org.opensearch.common.io.stream.StreamInput import org.opensearch.common.io.stream.StreamOutput -import org.opensearch.common.xcontent.ObjectParser -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.common.xcontent.ToXContentObject -import org.opensearch.common.xcontent.XContentBuilder -import org.opensearch.common.xcontent.XContentParser +import org.opensearch.core.xcontent.ObjectParser +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.ToXContentObject +import org.opensearch.core.xcontent.XContentBuilder +import org.opensearch.core.xcontent.XContentParser class PauseIndexReplicationRequest : AcknowledgedRequest, IndicesRequest.Replaceable, ToXContentObject { diff --git a/src/main/kotlin/org/opensearch/replication/action/resume/ResumeIndexReplicationRequest.kt b/src/main/kotlin/org/opensearch/replication/action/resume/ResumeIndexReplicationRequest.kt index 9ebef602..dd219272 100644 --- a/src/main/kotlin/org/opensearch/replication/action/resume/ResumeIndexReplicationRequest.kt +++ b/src/main/kotlin/org/opensearch/replication/action/resume/ResumeIndexReplicationRequest.kt @@ -17,7 +17,7 @@ import org.opensearch.action.support.IndicesOptions import org.opensearch.action.support.master.AcknowledgedRequest import org.opensearch.common.io.stream.StreamInput import org.opensearch.common.io.stream.StreamOutput -import org.opensearch.common.xcontent.* +import org.opensearch.core.xcontent.* class ResumeIndexReplicationRequest : AcknowledgedRequest, IndicesRequest.Replaceable, ToXContentObject { diff --git a/src/main/kotlin/org/opensearch/replication/action/setup/SetupChecksRequest.kt b/src/main/kotlin/org/opensearch/replication/action/setup/SetupChecksRequest.kt index 4d14d10f..57b9fffb 100644 --- a/src/main/kotlin/org/opensearch/replication/action/setup/SetupChecksRequest.kt +++ b/src/main/kotlin/org/opensearch/replication/action/setup/SetupChecksRequest.kt @@ -16,9 +16,9 @@ import org.opensearch.action.ActionRequestValidationException import org.opensearch.action.support.master.AcknowledgedRequest import org.opensearch.common.io.stream.StreamInput import org.opensearch.common.io.stream.StreamOutput -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.common.xcontent.ToXContentObject -import org.opensearch.common.xcontent.XContentBuilder +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.ToXContentObject +import org.opensearch.core.xcontent.XContentBuilder class SetupChecksRequest: AcknowledgedRequest, ToXContentObject { val followerContext: ReplicationContext diff --git a/src/main/kotlin/org/opensearch/replication/action/setup/ValidatePermissionsRequest.kt b/src/main/kotlin/org/opensearch/replication/action/setup/ValidatePermissionsRequest.kt index 1b51d95d..430fe9b8 100644 --- a/src/main/kotlin/org/opensearch/replication/action/setup/ValidatePermissionsRequest.kt +++ b/src/main/kotlin/org/opensearch/replication/action/setup/ValidatePermissionsRequest.kt @@ -17,9 +17,9 @@ import org.opensearch.action.support.IndicesOptions import org.opensearch.action.support.master.AcknowledgedRequest import org.opensearch.common.io.stream.StreamInput import org.opensearch.common.io.stream.StreamOutput -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.common.xcontent.ToXContentObject -import org.opensearch.common.xcontent.XContentBuilder +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.ToXContentObject +import org.opensearch.core.xcontent.XContentBuilder class ValidatePermissionsRequest: AcknowledgedRequest, IndicesRequest.Replaceable, ToXContentObject { val cluster: String diff --git a/src/main/kotlin/org/opensearch/replication/action/stats/AutoFollowStatsAction.kt b/src/main/kotlin/org/opensearch/replication/action/stats/AutoFollowStatsAction.kt index 678b6449..0878d377 100644 --- a/src/main/kotlin/org/opensearch/replication/action/stats/AutoFollowStatsAction.kt +++ b/src/main/kotlin/org/opensearch/replication/action/stats/AutoFollowStatsAction.kt @@ -18,10 +18,10 @@ import org.opensearch.action.support.tasks.BaseTasksResponse import org.opensearch.common.io.stream.StreamInput import org.opensearch.common.io.stream.StreamOutput import org.opensearch.common.io.stream.Writeable -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.common.xcontent.ToXContent.EMPTY_PARAMS -import org.opensearch.common.xcontent.ToXContentObject -import org.opensearch.common.xcontent.XContentBuilder +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.ToXContent.EMPTY_PARAMS +import org.opensearch.core.xcontent.ToXContentObject +import org.opensearch.core.xcontent.XContentBuilder import org.opensearch.replication.task.autofollow.AutoFollowStat import java.io.IOException diff --git a/src/main/kotlin/org/opensearch/replication/action/stats/FollowerStatsResponse.kt b/src/main/kotlin/org/opensearch/replication/action/stats/FollowerStatsResponse.kt index 3b405bde..53271fe4 100644 --- a/src/main/kotlin/org/opensearch/replication/action/stats/FollowerStatsResponse.kt +++ b/src/main/kotlin/org/opensearch/replication/action/stats/FollowerStatsResponse.kt @@ -19,10 +19,10 @@ import org.opensearch.cluster.ClusterName import org.opensearch.common.Strings import org.opensearch.common.io.stream.StreamInput import org.opensearch.common.io.stream.StreamOutput -import org.opensearch.common.xcontent.ToXContent.EMPTY_PARAMS -import org.opensearch.common.xcontent.ToXContent.Params -import org.opensearch.common.xcontent.ToXContentObject -import org.opensearch.common.xcontent.XContentBuilder +import org.opensearch.core.xcontent.ToXContent.EMPTY_PARAMS +import org.opensearch.core.xcontent.ToXContent.Params +import org.opensearch.core.xcontent.ToXContentObject +import org.opensearch.core.xcontent.XContentBuilder import org.opensearch.common.xcontent.XContentFactory import org.opensearch.index.shard.ShardId import org.opensearch.replication.metadata.ReplicationOverallState diff --git a/src/main/kotlin/org/opensearch/replication/action/stats/LeaderStatsResponse.kt b/src/main/kotlin/org/opensearch/replication/action/stats/LeaderStatsResponse.kt index 235331d5..47333152 100644 --- a/src/main/kotlin/org/opensearch/replication/action/stats/LeaderStatsResponse.kt +++ b/src/main/kotlin/org/opensearch/replication/action/stats/LeaderStatsResponse.kt @@ -19,10 +19,10 @@ import org.opensearch.cluster.ClusterName import org.opensearch.common.Strings import org.opensearch.common.io.stream.StreamInput import org.opensearch.common.io.stream.StreamOutput -import org.opensearch.common.xcontent.ToXContent.EMPTY_PARAMS -import org.opensearch.common.xcontent.ToXContent.Params -import org.opensearch.common.xcontent.ToXContentObject -import org.opensearch.common.xcontent.XContentBuilder +import org.opensearch.core.xcontent.ToXContent.EMPTY_PARAMS +import org.opensearch.core.xcontent.ToXContent.Params +import org.opensearch.core.xcontent.ToXContentObject +import org.opensearch.core.xcontent.XContentBuilder import org.opensearch.common.xcontent.XContentFactory import org.opensearch.replication.seqno.RemoteShardMetric import org.opensearch.replication.seqno.RemoteShardMetric.RemoteStats diff --git a/src/main/kotlin/org/opensearch/replication/action/status/ReplicationStatusResponse.kt b/src/main/kotlin/org/opensearch/replication/action/status/ReplicationStatusResponse.kt index b7b6fa73..a4832381 100644 --- a/src/main/kotlin/org/opensearch/replication/action/status/ReplicationStatusResponse.kt +++ b/src/main/kotlin/org/opensearch/replication/action/status/ReplicationStatusResponse.kt @@ -16,9 +16,9 @@ import org.opensearch.action.support.DefaultShardOperationFailedException import org.opensearch.action.support.broadcast.BroadcastResponse import org.opensearch.common.io.stream.StreamInput import org.opensearch.common.io.stream.StreamOutput -import org.opensearch.common.xcontent.ToXContent.Params -import org.opensearch.common.xcontent.ToXContentObject -import org.opensearch.common.xcontent.XContentBuilder +import org.opensearch.core.xcontent.ToXContent.Params +import org.opensearch.core.xcontent.ToXContentObject +import org.opensearch.core.xcontent.XContentBuilder import java.io.IOException class ReplicationStatusResponse : BroadcastResponse, ToXContentObject { diff --git a/src/main/kotlin/org/opensearch/replication/action/status/ShardInfoRequest.kt b/src/main/kotlin/org/opensearch/replication/action/status/ShardInfoRequest.kt index 23549470..024dd976 100644 --- a/src/main/kotlin/org/opensearch/replication/action/status/ShardInfoRequest.kt +++ b/src/main/kotlin/org/opensearch/replication/action/status/ShardInfoRequest.kt @@ -16,9 +16,9 @@ import org.opensearch.action.support.IndicesOptions import org.opensearch.action.support.broadcast.BroadcastRequest import org.opensearch.common.io.stream.StreamInput import org.opensearch.common.io.stream.StreamOutput -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.common.xcontent.ToXContentObject -import org.opensearch.common.xcontent.XContentBuilder +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.ToXContentObject +import org.opensearch.core.xcontent.XContentBuilder class ShardInfoRequest : BroadcastRequest , ToXContentObject { diff --git a/src/main/kotlin/org/opensearch/replication/action/status/ShardInfoResponse.kt b/src/main/kotlin/org/opensearch/replication/action/status/ShardInfoResponse.kt index 4cadea82..af111889 100644 --- a/src/main/kotlin/org/opensearch/replication/action/status/ShardInfoResponse.kt +++ b/src/main/kotlin/org/opensearch/replication/action/status/ShardInfoResponse.kt @@ -13,12 +13,12 @@ package org.opensearch.replication.action.status import org.opensearch.action.support.broadcast.BroadcastResponse import org.opensearch.action.support.broadcast.BroadcastShardResponse -import org.opensearch.common.ParseField +import org.opensearch.core.ParseField import org.opensearch.common.io.stream.StreamInput import org.opensearch.common.io.stream.StreamOutput -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.common.xcontent.ToXContentObject -import org.opensearch.common.xcontent.XContentBuilder +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.ToXContentObject +import org.opensearch.core.xcontent.XContentBuilder import org.opensearch.index.shard.ShardId import java.io.IOException diff --git a/src/main/kotlin/org/opensearch/replication/action/stop/StopIndexReplicationRequest.kt b/src/main/kotlin/org/opensearch/replication/action/stop/StopIndexReplicationRequest.kt index 32d44b7a..2f447eb8 100644 --- a/src/main/kotlin/org/opensearch/replication/action/stop/StopIndexReplicationRequest.kt +++ b/src/main/kotlin/org/opensearch/replication/action/stop/StopIndexReplicationRequest.kt @@ -15,10 +15,10 @@ import org.opensearch.action.ActionRequestValidationException import org.opensearch.action.IndicesRequest import org.opensearch.action.support.IndicesOptions import org.opensearch.action.support.master.AcknowledgedRequest -import org.opensearch.common.ParseField +import org.opensearch.core.ParseField import org.opensearch.common.io.stream.StreamInput import org.opensearch.common.io.stream.StreamOutput -import org.opensearch.common.xcontent.* +import org.opensearch.core.xcontent.* class StopIndexReplicationRequest : AcknowledgedRequest, IndicesRequest.Replaceable, ToXContentObject { diff --git a/src/main/kotlin/org/opensearch/replication/action/update/UpdateIndexReplicationRequest.kt b/src/main/kotlin/org/opensearch/replication/action/update/UpdateIndexReplicationRequest.kt index 1dde6ddc..753e2f62 100644 --- a/src/main/kotlin/org/opensearch/replication/action/update/UpdateIndexReplicationRequest.kt +++ b/src/main/kotlin/org/opensearch/replication/action/update/UpdateIndexReplicationRequest.kt @@ -15,12 +15,12 @@ import org.opensearch.action.ActionRequestValidationException import org.opensearch.action.IndicesRequest import org.opensearch.action.support.IndicesOptions import org.opensearch.action.support.master.AcknowledgedRequest -import org.opensearch.common.ParseField +import org.opensearch.core.ParseField import org.opensearch.common.io.stream.StreamInput import org.opensearch.common.io.stream.StreamOutput import org.opensearch.common.settings.Settings import org.opensearch.common.settings.Settings.readSettingsFromStream -import org.opensearch.common.xcontent.* +import org.opensearch.core.xcontent.* import java.io.IOException import java.util.* diff --git a/src/main/kotlin/org/opensearch/replication/metadata/state/ReplicationStateMetadata.kt b/src/main/kotlin/org/opensearch/replication/metadata/state/ReplicationStateMetadata.kt index edfec793..d6d5c6d9 100644 --- a/src/main/kotlin/org/opensearch/replication/metadata/state/ReplicationStateMetadata.kt +++ b/src/main/kotlin/org/opensearch/replication/metadata/state/ReplicationStateMetadata.kt @@ -20,9 +20,9 @@ import org.opensearch.cluster.metadata.Metadata import org.opensearch.cluster.service.ClusterService import org.opensearch.common.io.stream.StreamInput import org.opensearch.common.io.stream.StreamOutput -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.common.xcontent.XContentBuilder -import org.opensearch.common.xcontent.XContentParser +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.XContentBuilder +import org.opensearch.core.xcontent.XContentParser import java.io.IOException import java.util.EnumSet import kotlin.collections.HashMap diff --git a/src/main/kotlin/org/opensearch/replication/metadata/store/ReplicationMetadata.kt b/src/main/kotlin/org/opensearch/replication/metadata/store/ReplicationMetadata.kt index 892a86a9..86cd80c2 100644 --- a/src/main/kotlin/org/opensearch/replication/metadata/store/ReplicationMetadata.kt +++ b/src/main/kotlin/org/opensearch/replication/metadata/store/ReplicationMetadata.kt @@ -12,15 +12,15 @@ package org.opensearch.replication.metadata.store import org.opensearch.commons.authuser.User -import org.opensearch.common.ParseField +import org.opensearch.core.ParseField import org.opensearch.common.io.stream.StreamInput import org.opensearch.common.io.stream.StreamOutput import org.opensearch.common.io.stream.Writeable import org.opensearch.common.settings.Settings -import org.opensearch.common.xcontent.ObjectParser -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.common.xcontent.XContentBuilder -import org.opensearch.common.xcontent.XContentParser +import org.opensearch.core.xcontent.ObjectParser +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.XContentBuilder +import org.opensearch.core.xcontent.XContentParser import java.io.IOException import java.util.* import java.util.function.BiConsumer diff --git a/src/main/kotlin/org/opensearch/replication/metadata/store/ReplicationMetadataStore.kt b/src/main/kotlin/org/opensearch/replication/metadata/store/ReplicationMetadataStore.kt index 0ce5683f..ba0122ee 100644 --- a/src/main/kotlin/org/opensearch/replication/metadata/store/ReplicationMetadataStore.kt +++ b/src/main/kotlin/org/opensearch/replication/metadata/store/ReplicationMetadataStore.kt @@ -33,7 +33,13 @@ import org.opensearch.cluster.service.ClusterService import org.opensearch.common.component.AbstractLifecycleComponent import org.opensearch.common.settings.Settings import org.opensearch.common.util.concurrent.ThreadContext -import org.opensearch.common.xcontent.* +import org.opensearch.common.xcontent.XContentType +import org.opensearch.common.xcontent.XContentFactory +import org.opensearch.common.xcontent.XContentHelper +import org.opensearch.common.xcontent.LoggingDeprecationHandler +import org.opensearch.core.xcontent.NamedXContentRegistry +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.XContentParser import org.opensearch.replication.util.suspendExecuteWithRetries class ReplicationMetadataStore constructor(val client: Client, val clusterService: ClusterService, diff --git a/src/main/kotlin/org/opensearch/replication/rest/AutoFollowStatsHandler.kt b/src/main/kotlin/org/opensearch/replication/rest/AutoFollowStatsHandler.kt index 2f09c4b1..42421eb4 100644 --- a/src/main/kotlin/org/opensearch/replication/rest/AutoFollowStatsHandler.kt +++ b/src/main/kotlin/org/opensearch/replication/rest/AutoFollowStatsHandler.kt @@ -2,8 +2,8 @@ package org.opensearch.replication.rest import org.apache.logging.log4j.LogManager import org.opensearch.client.node.NodeClient -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.common.xcontent.XContentBuilder +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.XContentBuilder import org.opensearch.common.xcontent.XContentFactory import org.opensearch.replication.action.stats.AutoFollowStatsAction import org.opensearch.replication.action.stats.AutoFollowStatsRequest diff --git a/src/main/kotlin/org/opensearch/replication/rest/FollowerStatsHandler.kt b/src/main/kotlin/org/opensearch/replication/rest/FollowerStatsHandler.kt index 12026466..ce5013f6 100644 --- a/src/main/kotlin/org/opensearch/replication/rest/FollowerStatsHandler.kt +++ b/src/main/kotlin/org/opensearch/replication/rest/FollowerStatsHandler.kt @@ -2,8 +2,8 @@ package org.opensearch.replication.rest import org.apache.logging.log4j.LogManager import org.opensearch.client.node.NodeClient -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.common.xcontent.XContentBuilder +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.XContentBuilder import org.opensearch.common.xcontent.XContentFactory import org.opensearch.replication.action.stats.FollowerStatsAction import org.opensearch.replication.action.stats.FollowerStatsRequest diff --git a/src/main/kotlin/org/opensearch/replication/rest/LeaderStatsHandler.kt b/src/main/kotlin/org/opensearch/replication/rest/LeaderStatsHandler.kt index 98a68c27..d71379bf 100644 --- a/src/main/kotlin/org/opensearch/replication/rest/LeaderStatsHandler.kt +++ b/src/main/kotlin/org/opensearch/replication/rest/LeaderStatsHandler.kt @@ -2,8 +2,8 @@ package org.opensearch.replication.rest import org.apache.logging.log4j.LogManager import org.opensearch.client.node.NodeClient -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.common.xcontent.XContentBuilder +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.XContentBuilder import org.opensearch.common.xcontent.XContentFactory import org.opensearch.replication.action.stats.LeaderStatsAction import org.opensearch.replication.action.stats.LeaderStatsRequest diff --git a/src/main/kotlin/org/opensearch/replication/seqno/RemoteClusterStats.kt b/src/main/kotlin/org/opensearch/replication/seqno/RemoteClusterStats.kt index d6635288..cc565d03 100644 --- a/src/main/kotlin/org/opensearch/replication/seqno/RemoteClusterStats.kt +++ b/src/main/kotlin/org/opensearch/replication/seqno/RemoteClusterStats.kt @@ -15,10 +15,10 @@ import org.opensearch.common.component.AbstractLifecycleComponent import org.opensearch.common.inject.Singleton import org.opensearch.common.io.stream.StreamInput import org.opensearch.common.io.stream.StreamOutput -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.common.xcontent.ToXContentFragment -import org.opensearch.common.xcontent.ToXContentObject -import org.opensearch.common.xcontent.XContentBuilder +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.ToXContentFragment +import org.opensearch.core.xcontent.ToXContentObject +import org.opensearch.core.xcontent.XContentBuilder import org.opensearch.index.shard.ShardId import java.util.concurrent.atomic.AtomicLong diff --git a/src/main/kotlin/org/opensearch/replication/task/CrossClusterReplicationTask.kt b/src/main/kotlin/org/opensearch/replication/task/CrossClusterReplicationTask.kt index 933be175..75477625 100644 --- a/src/main/kotlin/org/opensearch/replication/task/CrossClusterReplicationTask.kt +++ b/src/main/kotlin/org/opensearch/replication/task/CrossClusterReplicationTask.kt @@ -36,9 +36,9 @@ import org.opensearch.client.Client import org.opensearch.cluster.service.ClusterService import org.opensearch.common.io.stream.StreamOutput import org.opensearch.common.settings.Settings -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.common.xcontent.ToXContentObject -import org.opensearch.common.xcontent.XContentBuilder +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.ToXContentObject +import org.opensearch.core.xcontent.XContentBuilder import org.opensearch.index.IndexService import org.opensearch.index.shard.IndexShard import org.opensearch.index.shard.ShardId diff --git a/src/main/kotlin/org/opensearch/replication/task/ReplicationState.kt b/src/main/kotlin/org/opensearch/replication/task/ReplicationState.kt index 57c41f64..3a81f74e 100644 --- a/src/main/kotlin/org/opensearch/replication/task/ReplicationState.kt +++ b/src/main/kotlin/org/opensearch/replication/task/ReplicationState.kt @@ -14,9 +14,9 @@ package org.opensearch.replication.task import org.opensearch.common.io.stream.StreamInput import org.opensearch.common.io.stream.StreamOutput import org.opensearch.common.io.stream.Writeable -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.common.xcontent.ToXContentFragment -import org.opensearch.common.xcontent.XContentBuilder +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.ToXContentFragment +import org.opensearch.core.xcontent.XContentBuilder /** * Enum that represents the state of replication of either shards or indices. diff --git a/src/main/kotlin/org/opensearch/replication/task/autofollow/AutoFollowParams.kt b/src/main/kotlin/org/opensearch/replication/task/autofollow/AutoFollowParams.kt index e85d323d..9bcecf64 100644 --- a/src/main/kotlin/org/opensearch/replication/task/autofollow/AutoFollowParams.kt +++ b/src/main/kotlin/org/opensearch/replication/task/autofollow/AutoFollowParams.kt @@ -12,13 +12,13 @@ package org.opensearch.replication.task.autofollow import org.opensearch.Version -import org.opensearch.common.ParseField +import org.opensearch.core.ParseField import org.opensearch.common.io.stream.StreamInput import org.opensearch.common.io.stream.StreamOutput -import org.opensearch.common.xcontent.ObjectParser -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.common.xcontent.XContentBuilder -import org.opensearch.common.xcontent.XContentParser +import org.opensearch.core.xcontent.ObjectParser +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.XContentBuilder +import org.opensearch.core.xcontent.XContentParser import org.opensearch.persistent.PersistentTaskParams import java.io.IOException diff --git a/src/main/kotlin/org/opensearch/replication/task/autofollow/AutoFollowTask.kt b/src/main/kotlin/org/opensearch/replication/task/autofollow/AutoFollowTask.kt index 1ac2f7de..da89580e 100644 --- a/src/main/kotlin/org/opensearch/replication/task/autofollow/AutoFollowTask.kt +++ b/src/main/kotlin/org/opensearch/replication/task/autofollow/AutoFollowTask.kt @@ -32,8 +32,8 @@ import org.opensearch.cluster.service.ClusterService import org.opensearch.common.io.stream.StreamInput import org.opensearch.common.io.stream.StreamOutput import org.opensearch.common.logging.Loggers -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.common.xcontent.XContentBuilder +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.XContentBuilder import org.opensearch.persistent.PersistentTaskState import org.opensearch.replication.ReplicationException import org.opensearch.replication.action.status.ReplicationStatusAction diff --git a/src/main/kotlin/org/opensearch/replication/task/index/IndexReplicationParams.kt b/src/main/kotlin/org/opensearch/replication/task/index/IndexReplicationParams.kt index 0c4f4a53..efaf2af6 100644 --- a/src/main/kotlin/org/opensearch/replication/task/index/IndexReplicationParams.kt +++ b/src/main/kotlin/org/opensearch/replication/task/index/IndexReplicationParams.kt @@ -12,14 +12,14 @@ package org.opensearch.replication.task.index import org.opensearch.Version -import org.opensearch.common.ParseField +import org.opensearch.core.ParseField import org.opensearch.common.Strings import org.opensearch.common.io.stream.StreamInput import org.opensearch.common.io.stream.StreamOutput -import org.opensearch.common.xcontent.ObjectParser -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.common.xcontent.XContentBuilder -import org.opensearch.common.xcontent.XContentParser +import org.opensearch.core.xcontent.ObjectParser +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.XContentBuilder +import org.opensearch.core.xcontent.XContentParser import org.opensearch.common.xcontent.XContentType import org.opensearch.index.Index import org.opensearch.persistent.PersistentTaskParams diff --git a/src/main/kotlin/org/opensearch/replication/task/index/IndexReplicationState.kt b/src/main/kotlin/org/opensearch/replication/task/index/IndexReplicationState.kt index 1ea7afaf..010d1447 100644 --- a/src/main/kotlin/org/opensearch/replication/task/index/IndexReplicationState.kt +++ b/src/main/kotlin/org/opensearch/replication/task/index/IndexReplicationState.kt @@ -13,13 +13,13 @@ package org.opensearch.replication.task.index import org.opensearch.replication.task.ReplicationState import org.opensearch.replication.task.shard.ShardReplicationParams -import org.opensearch.common.ParseField +import org.opensearch.core.ParseField import org.opensearch.common.io.stream.StreamInput import org.opensearch.common.io.stream.StreamOutput -import org.opensearch.common.xcontent.ObjectParser -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.common.xcontent.XContentBuilder -import org.opensearch.common.xcontent.XContentParser +import org.opensearch.core.xcontent.ObjectParser +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.XContentBuilder +import org.opensearch.core.xcontent.XContentParser import org.opensearch.index.shard.ShardId import org.opensearch.persistent.PersistentTaskState import org.opensearch.persistent.PersistentTasksCustomMetadata.PersistentTask diff --git a/src/main/kotlin/org/opensearch/replication/task/index/IndexReplicationTask.kt b/src/main/kotlin/org/opensearch/replication/task/index/IndexReplicationTask.kt index b4662aa3..bc3cf5e0 100644 --- a/src/main/kotlin/org/opensearch/replication/task/index/IndexReplicationTask.kt +++ b/src/main/kotlin/org/opensearch/replication/task/index/IndexReplicationTask.kt @@ -75,9 +75,9 @@ import org.opensearch.common.settings.Settings import org.opensearch.common.settings.SettingsModule import org.opensearch.common.unit.ByteSizeUnit import org.opensearch.common.unit.ByteSizeValue -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.common.xcontent.ToXContentObject -import org.opensearch.common.xcontent.XContentBuilder +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.ToXContentObject +import org.opensearch.core.xcontent.XContentBuilder import org.opensearch.common.xcontent.XContentType import org.opensearch.index.Index import org.opensearch.index.IndexService diff --git a/src/main/kotlin/org/opensearch/replication/task/shard/FollowerClusterStats.kt b/src/main/kotlin/org/opensearch/replication/task/shard/FollowerClusterStats.kt index 09a0bc60..db112a1f 100644 --- a/src/main/kotlin/org/opensearch/replication/task/shard/FollowerClusterStats.kt +++ b/src/main/kotlin/org/opensearch/replication/task/shard/FollowerClusterStats.kt @@ -15,9 +15,9 @@ import org.apache.logging.log4j.LogManager import org.opensearch.common.inject.Singleton import org.opensearch.common.io.stream.StreamInput import org.opensearch.common.io.stream.StreamOutput -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.common.xcontent.ToXContentFragment -import org.opensearch.common.xcontent.XContentBuilder +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.ToXContentFragment +import org.opensearch.core.xcontent.XContentBuilder import org.opensearch.common.xcontent.XContentType import org.opensearch.index.shard.ShardId import java.util.concurrent.atomic.AtomicLong diff --git a/src/main/kotlin/org/opensearch/replication/task/shard/ShardReplicationParams.kt b/src/main/kotlin/org/opensearch/replication/task/shard/ShardReplicationParams.kt index eb5c23b1..d8f790a1 100644 --- a/src/main/kotlin/org/opensearch/replication/task/shard/ShardReplicationParams.kt +++ b/src/main/kotlin/org/opensearch/replication/task/shard/ShardReplicationParams.kt @@ -12,14 +12,14 @@ package org.opensearch.replication.task.shard import org.opensearch.Version -import org.opensearch.common.ParseField +import org.opensearch.core.ParseField import org.opensearch.common.Strings import org.opensearch.common.io.stream.StreamInput import org.opensearch.common.io.stream.StreamOutput -import org.opensearch.common.xcontent.ObjectParser -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.common.xcontent.XContentBuilder -import org.opensearch.common.xcontent.XContentParser +import org.opensearch.core.xcontent.ObjectParser +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.XContentBuilder +import org.opensearch.core.xcontent.XContentParser import org.opensearch.common.xcontent.XContentType import org.opensearch.index.shard.ShardId import org.opensearch.persistent.PersistentTaskParams diff --git a/src/main/kotlin/org/opensearch/replication/task/shard/ShardReplicationState.kt b/src/main/kotlin/org/opensearch/replication/task/shard/ShardReplicationState.kt index 52849d92..f0c3fc88 100644 --- a/src/main/kotlin/org/opensearch/replication/task/shard/ShardReplicationState.kt +++ b/src/main/kotlin/org/opensearch/replication/task/shard/ShardReplicationState.kt @@ -13,13 +13,13 @@ package org.opensearch.replication.task.shard import org.opensearch.replication.task.ReplicationState import org.opensearch.OpenSearchException -import org.opensearch.common.ParseField +import org.opensearch.core.ParseField import org.opensearch.common.io.stream.StreamInput import org.opensearch.common.io.stream.StreamOutput -import org.opensearch.common.xcontent.ObjectParser -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.common.xcontent.XContentBuilder -import org.opensearch.common.xcontent.XContentParser +import org.opensearch.core.xcontent.ObjectParser +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.XContentBuilder +import org.opensearch.core.xcontent.XContentParser import org.opensearch.persistent.PersistentTaskState import java.io.IOException import java.lang.IllegalArgumentException diff --git a/src/test/kotlin/org/opensearch/replication/MultiClusterRestTestCase.kt b/src/test/kotlin/org/opensearch/replication/MultiClusterRestTestCase.kt index c40af9a7..744b7223 100644 --- a/src/test/kotlin/org/opensearch/replication/MultiClusterRestTestCase.kt +++ b/src/test/kotlin/org/opensearch/replication/MultiClusterRestTestCase.kt @@ -42,8 +42,8 @@ import org.opensearch.common.io.PathUtils import org.opensearch.common.settings.Settings import org.opensearch.common.unit.TimeValue import org.opensearch.common.util.concurrent.ThreadContext -import org.opensearch.common.xcontent.DeprecationHandler -import org.opensearch.common.xcontent.NamedXContentRegistry +import org.opensearch.core.xcontent.DeprecationHandler +import org.opensearch.core.xcontent.NamedXContentRegistry import org.opensearch.common.xcontent.XContentHelper import org.opensearch.common.xcontent.XContentType import org.opensearch.common.xcontent.json.JsonXContent @@ -57,7 +57,6 @@ import org.junit.After import org.junit.AfterClass import org.junit.Before import org.junit.BeforeClass -import org.opensearch.index.mapper.ObjectMapper import java.nio.file.Files import java.security.KeyManagementException import java.security.KeyStore diff --git a/src/test/kotlin/org/opensearch/replication/ReplicationHelpers.kt b/src/test/kotlin/org/opensearch/replication/ReplicationHelpers.kt index 884d8b01..3fe4e11f 100644 --- a/src/test/kotlin/org/opensearch/replication/ReplicationHelpers.kt +++ b/src/test/kotlin/org/opensearch/replication/ReplicationHelpers.kt @@ -23,8 +23,8 @@ import org.opensearch.client.Response import org.opensearch.client.RestHighLevelClient import org.opensearch.common.settings.Settings import org.opensearch.common.unit.TimeValue -import org.opensearch.common.xcontent.DeprecationHandler -import org.opensearch.common.xcontent.NamedXContentRegistry +import org.opensearch.core.xcontent.DeprecationHandler +import org.opensearch.core.xcontent.NamedXContentRegistry import org.opensearch.common.xcontent.XContentType import org.opensearch.test.OpenSearchTestCase.assertBusy import org.opensearch.test.rest.OpenSearchRestTestCase diff --git a/src/test/kotlin/org/opensearch/replication/integ/rest/StartReplicationIT.kt b/src/test/kotlin/org/opensearch/replication/integ/rest/StartReplicationIT.kt index af2f9d09..5b2595c5 100644 --- a/src/test/kotlin/org/opensearch/replication/integ/rest/StartReplicationIT.kt +++ b/src/test/kotlin/org/opensearch/replication/integ/rest/StartReplicationIT.kt @@ -67,8 +67,8 @@ import org.opensearch.repositories.fs.FsRepository import org.opensearch.test.OpenSearchTestCase.assertBusy import org.junit.Assert import org.opensearch.cluster.metadata.AliasMetadata -import org.opensearch.common.xcontent.DeprecationHandler -import org.opensearch.common.xcontent.NamedXContentRegistry +import org.opensearch.core.xcontent.DeprecationHandler +import org.opensearch.core.xcontent.NamedXContentRegistry import org.opensearch.replication.ReplicationPlugin.Companion.REPLICATION_INDEX_TRANSLOG_PRUNING_ENABLED_SETTING import org.opensearch.replication.followerStats import org.opensearch.replication.leaderStats diff --git a/src/test/kotlin/org/opensearch/replication/task/index/IndexReplicationTaskTests.kt b/src/test/kotlin/org/opensearch/replication/task/index/IndexReplicationTaskTests.kt index 569eadf8..083533d9 100644 --- a/src/test/kotlin/org/opensearch/replication/task/index/IndexReplicationTaskTests.kt +++ b/src/test/kotlin/org/opensearch/replication/task/index/IndexReplicationTaskTests.kt @@ -30,7 +30,7 @@ import org.opensearch.cluster.routing.RoutingTable import org.opensearch.common.settings.Settings import org.opensearch.common.settings.SettingsModule import org.opensearch.common.unit.TimeValue -import org.opensearch.common.xcontent.NamedXContentRegistry +import org.opensearch.core.xcontent.NamedXContentRegistry import org.opensearch.index.Index import org.opensearch.index.shard.ShardId import org.opensearch.persistent.PersistentTaskParams diff --git a/src/test/kotlin/org/opensearch/replication/task/index/NoOpClient.kt b/src/test/kotlin/org/opensearch/replication/task/index/NoOpClient.kt index 1c92c0bb..35af7cb4 100644 --- a/src/test/kotlin/org/opensearch/replication/task/index/NoOpClient.kt +++ b/src/test/kotlin/org/opensearch/replication/task/index/NoOpClient.kt @@ -32,7 +32,7 @@ import org.opensearch.common.UUIDs import org.opensearch.common.bytes.BytesReference import org.opensearch.common.collect.ImmutableOpenMap import org.opensearch.common.settings.Settings -import org.opensearch.common.xcontent.ToXContent +import org.opensearch.core.xcontent.ToXContent import org.opensearch.common.xcontent.XContentFactory import org.opensearch.index.Index import org.opensearch.index.get.GetResult From 04492693760fb733fb9ee72e276a05489a3d3e1a Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Fri, 14 Apr 2023 14:29:49 +0530 Subject: [PATCH 37/84] Adding a proxy mode connection setup for CCR (#786) (#795) Adding a proxy mode feature for CCR setup (cherry picked from commit fcbbfb74d288c2eb5cd2f933214b2d205d825832) Signed-off-by: Ishank katiyar Co-authored-by: ishankka <111563763+ishankka@users.noreply.github.com> --- .../RemoteClusterRepositoriesService.kt | 16 +++- .../RemoteClusterRepositoriesServiceTests.kt | 90 +++++++++++++++++++ 2 files changed, 102 insertions(+), 4 deletions(-) create mode 100644 src/test/kotlin/org/opensearch/replication/repository/RemoteClusterRepositoriesServiceTests.kt diff --git a/src/main/kotlin/org/opensearch/replication/repository/RemoteClusterRepositoriesService.kt b/src/main/kotlin/org/opensearch/replication/repository/RemoteClusterRepositoriesService.kt index 46823482..c3995e47 100644 --- a/src/main/kotlin/org/opensearch/replication/repository/RemoteClusterRepositoriesService.kt +++ b/src/main/kotlin/org/opensearch/replication/repository/RemoteClusterRepositoriesService.kt @@ -13,6 +13,7 @@ package org.opensearch.replication.repository import org.opensearch.cluster.service.ClusterService import org.opensearch.common.settings.ClusterSettings import org.opensearch.repositories.RepositoriesService +import org.opensearch.transport.ProxyConnectionStrategy.PROXY_ADDRESS import org.opensearch.transport.SniffConnectionStrategy.REMOTE_CLUSTER_SEEDS import java.util.function.Supplier @@ -24,12 +25,12 @@ class RemoteClusterRepositoriesService(private val repositoriesService: Supplier } private fun listenForUpdates(clusterSettings: ClusterSettings) { - // TODO: Proxy support from ES 7.7. Needs additional handling based on those settings - clusterSettings.addAffixUpdateConsumer(REMOTE_CLUSTER_SEEDS, this::updateRepositoryDetails) { _, _ -> Unit } + clusterSettings.addAffixUpdateConsumer(REMOTE_CLUSTER_SEEDS, this::updateRepositoryDetailsForSeeds) { _, _ -> Unit } + clusterSettings.addAffixUpdateConsumer(PROXY_ADDRESS, this::updateRepositoryDetailsForProxy) { _, _ -> Unit } } - private fun updateRepositoryDetails(alias: String, seeds: List?) { - if(seeds == null || seeds.isEmpty()) { + private fun updateRepositoryDetailsForSeeds(alias: String, seeds: List?) { + if(seeds.isNullOrEmpty()) { repositoriesService.get().unregisterInternalRepository(REMOTE_REPOSITORY_PREFIX + alias) return } @@ -37,4 +38,11 @@ class RemoteClusterRepositoriesService(private val repositoriesService: Supplier repositoriesService.get().registerInternalRepository(REMOTE_REPOSITORY_PREFIX + alias, REMOTE_REPOSITORY_TYPE) } + private fun updateRepositoryDetailsForProxy(alias: String, proxyIp: String?) { + if(proxyIp.isNullOrEmpty()) { + repositoriesService.get().unregisterInternalRepository(REMOTE_REPOSITORY_PREFIX + alias) + return + } + repositoriesService.get().registerInternalRepository(REMOTE_REPOSITORY_PREFIX + alias, REMOTE_REPOSITORY_TYPE) + } } \ No newline at end of file diff --git a/src/test/kotlin/org/opensearch/replication/repository/RemoteClusterRepositoriesServiceTests.kt b/src/test/kotlin/org/opensearch/replication/repository/RemoteClusterRepositoriesServiceTests.kt new file mode 100644 index 00000000..6ec80b9b --- /dev/null +++ b/src/test/kotlin/org/opensearch/replication/repository/RemoteClusterRepositoriesServiceTests.kt @@ -0,0 +1,90 @@ +package org.opensearch.replication.repository + +import com.nhaarman.mockitokotlin2.times +import org.mockito.Mockito +import org.opensearch.Version +import org.opensearch.cluster.node.DiscoveryNode +import org.opensearch.cluster.node.DiscoveryNodeRole +import org.opensearch.common.settings.ClusterSettings +import org.opensearch.common.settings.Settings +import org.opensearch.repositories.RepositoriesService +import org.opensearch.test.ClusterServiceUtils +import org.opensearch.test.OpenSearchTestCase +import org.opensearch.threadpool.TestThreadPool +import java.util.function.Supplier + +class RemoteClusterRepositoriesServiceTests : OpenSearchTestCase() { + + fun `test changes in seed_nodes`() { + var clusterSetting = ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + var threadPool = TestThreadPool("ReplicationPluginTest") + val discoveryNode = DiscoveryNode( + "node", + buildNewFakeTransportAddress(), emptyMap(), + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ) + var clusterService = ClusterServiceUtils.createClusterService(threadPool, discoveryNode, clusterSetting) + val repositoriesService = Mockito.mock(RepositoriesService::class.java) + RemoteClusterRepositoriesService(Supplier { repositoriesService }, clusterService) + clusterSetting.applySettings(Settings.builder().putList("cluster.remote.con-alias.seeds", "127.0.0.1:9300", "127.0.0.2:9300").build()) + Mockito.verify(repositoriesService, times(1)).registerInternalRepository(REMOTE_REPOSITORY_PREFIX + "con-alias", REMOTE_REPOSITORY_TYPE) + threadPool.shutdown() + } + + fun `test removal of seed_nodes`() { + var clusterSetting = ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + var threadPool = TestThreadPool("ReplicationPluginTest") + val discoveryNode = DiscoveryNode( + "node", + buildNewFakeTransportAddress(), emptyMap(), + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ) + var clusterService = ClusterServiceUtils.createClusterService(threadPool, discoveryNode, clusterSetting) + val repositoriesService = Mockito.mock(RepositoriesService::class.java) + RemoteClusterRepositoriesService(Supplier { repositoriesService }, clusterService) + clusterSetting.applySettings(Settings.builder().putList("cluster.remote.con-alias.seeds", "127.0.0.1:9300", "127.0.0.2:9300").build()) + Mockito.verify(repositoriesService, times(1)).registerInternalRepository(REMOTE_REPOSITORY_PREFIX + "con-alias", REMOTE_REPOSITORY_TYPE) + clusterSetting.applySettings(Settings.builder().putNull("cluster.remote.con-alias.seeds").build()) + Mockito.verify(repositoriesService, times(1)).unregisterInternalRepository(REMOTE_REPOSITORY_PREFIX + "con-alias") + threadPool.shutdown() + } + + fun `test changes in proxy_id for proxy-setup`() { + var clusterSetting = ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + var threadPool = TestThreadPool("ReplicationPluginTest") + val discoveryNode = DiscoveryNode( + "node", + buildNewFakeTransportAddress(), emptyMap(), + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ) + var clusterService = ClusterServiceUtils.createClusterService(threadPool, discoveryNode, clusterSetting) + val repositoriesService = Mockito.mock(RepositoriesService::class.java) + RemoteClusterRepositoriesService(Supplier { repositoriesService }, clusterService) + clusterSetting.applySettings(Settings.builder().put("cluster.remote.con-alias.mode", "proxy").put("cluster.remote.con-alias.proxy_address", "127.0.0.1:100").build()) + Mockito.verify(repositoriesService, times(1)).registerInternalRepository(REMOTE_REPOSITORY_PREFIX + "con-alias", REMOTE_REPOSITORY_TYPE) + threadPool.shutdown() + } + + fun `test removal of proxy_id for proxy-setup`() { + var clusterSetting = ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + var threadPool = TestThreadPool("ReplicationPluginTest") + val discoveryNode = DiscoveryNode( + "node", + buildNewFakeTransportAddress(), emptyMap(), + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ) + var clusterService = ClusterServiceUtils.createClusterService(threadPool, discoveryNode, clusterSetting) + val repositoriesService = Mockito.mock(RepositoriesService::class.java) + RemoteClusterRepositoriesService(Supplier { repositoriesService }, clusterService) + clusterSetting.applySettings(Settings.builder().put("cluster.remote.con-alias.mode", "proxy").put("cluster.remote.con-alias.proxy_address", "127.0.0.1:100").build()) + Mockito.verify(repositoriesService, times(1)).registerInternalRepository(REMOTE_REPOSITORY_PREFIX + "con-alias", REMOTE_REPOSITORY_TYPE) + clusterSetting.applySettings(Settings.builder().putNull("cluster.remote.con-alias.mode").build()) + clusterSetting.applySettings(Settings.builder().putNull("cluster.remote.con-alias.proxy_address").build()) + Mockito.verify(repositoriesService, times(1)).unregisterInternalRepository(REMOTE_REPOSITORY_PREFIX + "con-alias") + threadPool.shutdown() + } +} From f265458fb60c457c4f103f1f5a3be9e44a2db6dd Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Fri, 14 Apr 2023 16:00:04 +0530 Subject: [PATCH 38/84] handling null pointer exception in multi-filed mapping (#757) (#789) * handling null pointer exception in multi-filed mapping (cherry picked from commit 501bd283505f61ebecf617849da19d28e2724da9) Signed-off-by: sricharanvuppu Co-authored-by: sricharanvuppu <113983630+sricharanvuppu@users.noreply.github.com> --- .../task/index/IndexReplicationTask.kt | 24 +++++++++---------- 1 file changed, 11 insertions(+), 13 deletions(-) diff --git a/src/main/kotlin/org/opensearch/replication/task/index/IndexReplicationTask.kt b/src/main/kotlin/org/opensearch/replication/task/index/IndexReplicationTask.kt index bc3cf5e0..381c96d5 100644 --- a/src/main/kotlin/org/opensearch/replication/task/index/IndexReplicationTask.kt +++ b/src/main/kotlin/org/opensearch/replication/task/index/IndexReplicationTask.kt @@ -401,12 +401,9 @@ open class IndexReplicationTask(id: Long, type: String, action: String, descript } } - private suspend fun UpdateFollowereMapping(followerIndex: String,mappingSource: String) { + private suspend fun updateFollowerMapping(followerIndex: String,mappingSource: String?) { val options = IndicesOptions.strictSingleIndexNoExpandForbidClosed() - if (null == mappingSource) { - throw MappingNotAvailableException("MappingSource is not available") - } val putMappingRequest = PutMappingRequest().indices(followerIndex).indicesOptions(options) .source(mappingSource, XContentType.JSON) val updateMappingRequest = UpdateMetadataRequest(followerIndex, UpdateMetadataRequest.Type.MAPPING, putMappingRequest) @@ -557,19 +554,20 @@ open class IndexReplicationTask(id: Long, type: String, action: String, descript val options = IndicesOptions.strictSingleIndexNoExpandForbidClosed() var gmr = GetMappingsRequest().indices(this.leaderIndex.name).indicesOptions(options) var mappingResponse = remoteClient.suspending(remoteClient.admin().indices()::getMappings, injectSecurityContext = true)(gmr) - var leaderMappingSource = mappingResponse.mappings.get(this.leaderIndex.name).source().toString() - val leaderProperties = mappingResponse.mappings().get(this.leaderIndex.name).sourceAsMap().toMap().get("properties") as Map + var leaderMappingSource = mappingResponse?.mappings?.get(this.leaderIndex.name)?.source()?.toString() + @Suppress("UNCHECKED_CAST") + val leaderProperties = mappingResponse?.mappings()?.get(this.leaderIndex.name)?.sourceAsMap()?.toMap()?.get("properties") as? Map? gmr = GetMappingsRequest().indices(this.followerIndexName).indicesOptions(options) mappingResponse = client.suspending(client.admin().indices()::getMappings, injectSecurityContext = true)(gmr) - val followerProperties = mappingResponse.mappings().get(this.followerIndexName).sourceAsMap().toMap().get("properties") as Map - for(iter in followerProperties) { - if(leaderProperties.containsKey(iter.key) && leaderProperties.getValue(iter.key).toString()!=(iter.value).toString()){ - log.info("Updating Multi-field Mapping at Follower") - UpdateFollowereMapping(this.followerIndexName,leaderMappingSource) - break; + @Suppress("UNCHECKED_CAST") + val followerProperties = mappingResponse?.mappings()?.get(this.followerIndexName)?.sourceAsMap()?.toMap()?.get("properties") as? Map? + for((key,value) in followerProperties?: emptyMap()) { + if (leaderProperties?.getValue(key).toString() != (value).toString()) { + log.debug("Updating Multi-field Mapping at Follower") + updateFollowerMapping(this.followerIndexName, leaderMappingSource) + break } } - } catch (e: Exception) { log.error("Error in getting the required metadata ${e.stackTraceToString()}") } finally { From 16908ccdc6221444e84f8ac6e1bfff0d0162e8ad Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Fri, 14 Apr 2023 16:04:28 +0530 Subject: [PATCH 39/84] Handled batch requests for replication metadata update under cluster state (#772) (#778) Signed-off-by: Sai Kumar (cherry picked from commit c5d4cdc5332c0a0839f29ffdea4f2d52f2307f0d) Co-authored-by: Sai Kumar --- .../metadata/UpdateReplicationMetadata.kt | 23 +++--- .../state/UpdateReplicationMetadataTests.kt | 75 +++++++++++++++++++ 2 files changed, 89 insertions(+), 9 deletions(-) create mode 100644 src/test/kotlin/org/opensearch/replication/metadata/state/UpdateReplicationMetadataTests.kt diff --git a/src/main/kotlin/org/opensearch/replication/metadata/UpdateReplicationMetadata.kt b/src/main/kotlin/org/opensearch/replication/metadata/UpdateReplicationMetadata.kt index 856adbb3..54d4663e 100644 --- a/src/main/kotlin/org/opensearch/replication/metadata/UpdateReplicationMetadata.kt +++ b/src/main/kotlin/org/opensearch/replication/metadata/UpdateReplicationMetadata.kt @@ -49,29 +49,34 @@ class UpdateReplicationStateDetailsTaskExecutor private constructor() override fun execute(currentState: ClusterState, tasks: List) : ClusterStateTaskExecutor.ClusterTasksResult { - return getClusterStateUpdateTaskResult(tasks[0], currentState) + log.debug("Executing replication state update for $tasks") + return getClusterStateUpdateTaskResult(tasks, currentState) } - private fun getClusterStateUpdateTaskResult(request: UpdateReplicationStateDetailsRequest, + private fun getClusterStateUpdateTaskResult(requests: List, currentState: ClusterState) : ClusterStateTaskExecutor.ClusterTasksResult { val currentMetadata = currentState.metadata().custom(ReplicationStateMetadata.NAME) ?: ReplicationStateMetadata.EMPTY - val newMetadata = getUpdatedReplicationMetadata(request, currentMetadata) - if (currentMetadata == newMetadata) { - return getStateUpdateTaskResultForClusterState(request, currentState) // no change + var updatedMetadata = currentMetadata + // compute metadata update for the batched requests + for(request in requests) { + updatedMetadata = getUpdatedReplicationMetadata(request, updatedMetadata) + } + if (currentMetadata == updatedMetadata) { + return getStateUpdateTaskResultForClusterState(requests, currentState) // no change } else { val mdBuilder = Metadata.builder(currentState.metadata) - .putCustom(ReplicationStateMetadata.NAME, newMetadata) + .putCustom(ReplicationStateMetadata.NAME, updatedMetadata) val newClusterState = ClusterState.Builder(currentState).metadata(mdBuilder).build() - return getStateUpdateTaskResultForClusterState(request, newClusterState) + return getStateUpdateTaskResultForClusterState(requests, newClusterState) } } - private fun getStateUpdateTaskResultForClusterState(request: UpdateReplicationStateDetailsRequest, + private fun getStateUpdateTaskResultForClusterState(requests: List, clusterState: ClusterState) : ClusterStateTaskExecutor.ClusterTasksResult { return ClusterStateTaskExecutor.ClusterTasksResult.builder() - .success(request).build(clusterState) + .successes(requests).build(clusterState) } private fun getUpdatedReplicationMetadata(request: UpdateReplicationStateDetailsRequest, diff --git a/src/test/kotlin/org/opensearch/replication/metadata/state/UpdateReplicationMetadataTests.kt b/src/test/kotlin/org/opensearch/replication/metadata/state/UpdateReplicationMetadataTests.kt new file mode 100644 index 00000000..a3a68430 --- /dev/null +++ b/src/test/kotlin/org/opensearch/replication/metadata/state/UpdateReplicationMetadataTests.kt @@ -0,0 +1,75 @@ +package org.opensearch.replication.metadata.state + +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope +import org.junit.Assert +import org.opensearch.cluster.ClusterState +import org.opensearch.replication.action.replicationstatedetails.UpdateReplicationStateDetailsRequest +import org.opensearch.replication.metadata.ReplicationOverallState +import org.opensearch.replication.metadata.UpdateReplicationStateDetailsTaskExecutor +import org.opensearch.test.ClusterServiceUtils +import org.opensearch.test.OpenSearchTestCase +import org.opensearch.threadpool.TestThreadPool + +@ThreadLeakScope(ThreadLeakScope.Scope.NONE) +class UpdateReplicationMetadataTests : OpenSearchTestCase() { + + var threadPool = TestThreadPool("ReplicationPluginTest") + var clusterService = ClusterServiceUtils.createClusterService(threadPool) + + fun `test single task update`() { + val currentState: ClusterState = clusterService.state() + // single task + val tasks = arrayListOf(UpdateReplicationStateDetailsRequest("test-index", + hashMapOf("REPLICATION_LAST_KNOWN_OVERALL_STATE" to "RUNNING"), UpdateReplicationStateDetailsRequest.UpdateType.ADD)) + val tasksResult = UpdateReplicationStateDetailsTaskExecutor.INSTANCE.execute(currentState, tasks) + + val updatedReplicationDetails = tasksResult.resultingState?.metadata + ?.custom(ReplicationStateMetadata.NAME)?.replicationDetails + + Assert.assertNotNull(updatedReplicationDetails) + Assert.assertNotNull(updatedReplicationDetails?.get("test-index")) + val replicationStateParams = updatedReplicationDetails?.get("test-index") + + Assert.assertEquals(ReplicationOverallState.RUNNING.name, replicationStateParams?.get(REPLICATION_LAST_KNOWN_OVERALL_STATE)) + } + + fun `test multiple tasks to add replication metadata`() { + val currentState: ClusterState = clusterService.state() + // multiple tasks + val tasks = arrayListOf(UpdateReplicationStateDetailsRequest("test-index-1", + hashMapOf("REPLICATION_LAST_KNOWN_OVERALL_STATE" to "RUNNING"), UpdateReplicationStateDetailsRequest.UpdateType.ADD), + UpdateReplicationStateDetailsRequest("test-index-2", + hashMapOf("REPLICATION_LAST_KNOWN_OVERALL_STATE" to "RUNNING"), UpdateReplicationStateDetailsRequest.UpdateType.ADD)) + val tasksResult = UpdateReplicationStateDetailsTaskExecutor.INSTANCE.execute(currentState, tasks) + + val updatedReplicationDetails = tasksResult.resultingState?.metadata + ?.custom(ReplicationStateMetadata.NAME)?.replicationDetails + + Assert.assertNotNull(updatedReplicationDetails) + Assert.assertNotNull(updatedReplicationDetails?.get("test-index-1")) + var replicationStateParams = updatedReplicationDetails?.get("test-index-1") + Assert.assertEquals(ReplicationOverallState.RUNNING.name, replicationStateParams?.get(REPLICATION_LAST_KNOWN_OVERALL_STATE)) + Assert.assertNotNull(updatedReplicationDetails?.get("test-index-2")) + replicationStateParams = updatedReplicationDetails?.get("test-index-2") + Assert.assertEquals(ReplicationOverallState.RUNNING.name, replicationStateParams?.get(REPLICATION_LAST_KNOWN_OVERALL_STATE)) + } + + fun `test multiple tasks to add and delete replication metadata`() { + val currentState: ClusterState = clusterService.state() + // multiple tasks + val tasks = arrayListOf(UpdateReplicationStateDetailsRequest("test-index-1", + hashMapOf("REPLICATION_LAST_KNOWN_OVERALL_STATE" to "RUNNING"), UpdateReplicationStateDetailsRequest.UpdateType.ADD), + UpdateReplicationStateDetailsRequest("test-index-2", + hashMapOf("REPLICATION_LAST_KNOWN_OVERALL_STATE" to "RUNNING"), UpdateReplicationStateDetailsRequest.UpdateType.REMOVE)) + val tasksResult = UpdateReplicationStateDetailsTaskExecutor.INSTANCE.execute(currentState, tasks) + + val updatedReplicationDetails = tasksResult.resultingState?.metadata + ?.custom(ReplicationStateMetadata.NAME)?.replicationDetails + + Assert.assertNotNull(updatedReplicationDetails) + Assert.assertNotNull(updatedReplicationDetails?.get("test-index-1")) + var replicationStateParams = updatedReplicationDetails?.get("test-index-1") + Assert.assertEquals(ReplicationOverallState.RUNNING.name, replicationStateParams?.get(REPLICATION_LAST_KNOWN_OVERALL_STATE)) + Assert.assertNull(updatedReplicationDetails?.get("test-index-2")) + } +} \ No newline at end of file From 10e4449f230a50a0b4fd5c833517ade4cd4ebe3c Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Tue, 18 Apr 2023 10:29:59 +0530 Subject: [PATCH 40/84] Add setting to use document replication for system indices. (#802) (#803) Signed-off-by: Rishikesh1159 (cherry picked from commit 55b6968af90d739d8448c5d88ed5204a240d0f86) Co-authored-by: Rishikesh Pasham <62345295+Rishikesh1159@users.noreply.github.com> --- .../replication/metadata/store/ReplicationMetadataStore.kt | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/main/kotlin/org/opensearch/replication/metadata/store/ReplicationMetadataStore.kt b/src/main/kotlin/org/opensearch/replication/metadata/store/ReplicationMetadataStore.kt index ba0122ee..18a0cd6c 100644 --- a/src/main/kotlin/org/opensearch/replication/metadata/store/ReplicationMetadataStore.kt +++ b/src/main/kotlin/org/opensearch/replication/metadata/store/ReplicationMetadataStore.kt @@ -40,6 +40,7 @@ import org.opensearch.common.xcontent.LoggingDeprecationHandler import org.opensearch.core.xcontent.NamedXContentRegistry import org.opensearch.core.xcontent.ToXContent import org.opensearch.core.xcontent.XContentParser +import org.opensearch.indices.replication.common.ReplicationType import org.opensearch.replication.util.suspendExecuteWithRetries class ReplicationMetadataStore constructor(val client: Client, val clusterService: ClusterService, @@ -265,6 +266,7 @@ class ReplicationMetadataStore constructor(val client: Client, val clusterServic .put(IndexMetadata.INDEX_AUTO_EXPAND_REPLICAS_SETTING.key, "0-1") .put(IndexMetadata.INDEX_PRIORITY_SETTING.key, Int.MAX_VALUE) .put(IndexMetadata.INDEX_HIDDEN_SETTING.key, true) + .put(IndexMetadata.INDEX_REPLICATION_TYPE_SETTING.key, ReplicationType.DOCUMENT) // System Indices should use Document Replication strategy .build() } From 794b6ec0afb71c196ffec3af0227642c73fa70f2 Mon Sep 17 00:00:00 2001 From: Monu Singh Date: Fri, 28 Apr 2023 16:37:43 +0530 Subject: [PATCH 41/84] Update Gradle Wrapper to 7.6.1 (#815) Upgrade Gradle version to 7.6.1 to solve build failure with jackson-core-2.15.jar Signed-off-by: Monu Singh --- gradle/wrapper/gradle-wrapper.properties | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties index f51fff5e..68efe1de 100644 --- a/gradle/wrapper/gradle-wrapper.properties +++ b/gradle/wrapper/gradle-wrapper.properties @@ -11,6 +11,6 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionUrl=https\://services.gradle.org/distributions/gradle-7.4.2-all.zip +distributionUrl=https\://services.gradle.org/distributions/gradle-7.6.1-all.zip zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists From 90285161eb3bb9bcdc0c4e8b408d4965b84c53ac Mon Sep 17 00:00:00 2001 From: Monu Singh Date: Tue, 2 May 2023 13:01:12 +0530 Subject: [PATCH 42/84] [Backport] Allow knn index to be used with ccr (#760) (#813) (#817) * Allow knn index to be used with ccr (#760) Signed-off-by: Monu Singh (cherry picked from commit 3b43e55ce05011eca30432bcbdd20a1c6b405d5a) --- .github/workflows/security-knn-tests.yml | 119 ++++++++++++++++++ build.gradle | 43 ++++++- .../replication/ReplicationPlugin.kt | 3 +- .../index/TransportReplicateIndexAction.kt | 11 +- .../TransportResumeIndexReplicationAction.kt | 10 +- .../replication/util/ValidationUtil.kt | 16 +++ .../replication/BasicReplicationIT.kt | 65 ++++++++++ .../replication/MultiClusterRestTestCase.kt | 2 + 8 files changed, 253 insertions(+), 16 deletions(-) create mode 100644 .github/workflows/security-knn-tests.yml diff --git a/.github/workflows/security-knn-tests.yml b/.github/workflows/security-knn-tests.yml new file mode 100644 index 00000000..de049f0c --- /dev/null +++ b/.github/workflows/security-knn-tests.yml @@ -0,0 +1,119 @@ +name: Security and knn tests +# This workflow is triggered on pull requests to main branch +on: + pull_request: + branches: + - '*' + push: + branches: + - '*' + +jobs: + req: + # Job name + name: plugin check + runs-on: ubuntu-latest + outputs: + isSecurityPluginAvailable: ${{ steps.plugin-availability-check.outputs.isSecurityPluginAvailable }} + isKnnPluginAvailable: ${{ steps.plugin-availability-check.outputs.isKnnPluginAvailable }} + steps: + # This step uses the checkout Github action: https://github.com/actions/checkout + - name: Checkout Branch + uses: actions/checkout@v2 + - id: plugin-availability-check + name: "plugin check" + run: | + opensearch_version=$(grep "System.getProperty(\"opensearch.version\", \"" build.gradle | grep '\([0-9]\|[.]\)\{5\}' -o) + opensearch_version=$opensearch_version".0-SNAPSHOT" + # we publish build artifacts to the below url + sec_plugin_url="https://aws.oss.sonatype.org/content/repositories/snapshots/org/opensearch/plugin/opensearch-security/"$opensearch_version"/" + sec_st=$(curl -s -o /dev/null -w "%{http_code}" $sec_plugin_url) + if [ "$sec_st" = "200" ]; then + echo "isSecurityPluginAvailable=True" >> $GITHUB_OUTPUT + cat $GITHUB_OUTPUT + else + echo "isSecurityPluginAvailable=False" >> $GITHUB_OUTPUT + cat $GITHUB_OUTPUT + fi + knn_plugin_url="https://aws.oss.sonatype.org/content/repositories/snapshots/org/opensearch/plugin/opensearch-knn/"$opensearch_version"/" + knn_st=$(curl -s -o /dev/null -w "%{http_code}" $knn_plugin_url) + if [ "$knn_st" = "200" ]; then + echo "isKnnPluginAvailable=True" >> $GITHUB_OUTPUT + cat $GITHUB_OUTPUT + else + echo "isKnnPluginAvailable=False" >> $GITHUB_OUTPUT + cat $GITHUB_OUTPUT + fi + + build: + needs: req + if: ${{ 'True' == needs.req.outputs.isSecurityPluginAvailable }} + # Job name + name: Build and Run Security tests + runs-on: ubuntu-latest + steps: + # This step uses the setup-java Github action: https://github.com/actions/setup-java + - name: Set Up JDK 17 + uses: actions/setup-java@v1 + with: + java-version: 17 + # This step uses the checkout Github action: https://github.com/actions/checkout + - name: Checkout Branch + uses: actions/checkout@v2 + - name: Build and run Replication tests + run: | + ls -al src/test/resources/security/plugin + ./gradlew clean release -Dbuild.snapshot=true -PnumNodes=1 -Psecurity=true + - name: Upload failed logs + uses: actions/upload-artifact@v2 + if: failure() + with: + name: logs + path: | + build/testclusters/integTest-*/logs/* + build/testclusters/leaderCluster-*/logs/* + build/testclusters/followCluster-*/logs/* + - name: Create Artifact Path + run: | + mkdir -p cross-cluster-replication-artifacts + cp ./build/distributions/*.zip cross-cluster-replication-artifacts + - name: Uploads coverage + with: + fetch-depth: 2 + uses: codecov/codecov-action@v1.2.1 + + knn-build: + needs: req + if: ${{ 'True' == needs.req.outputs.isKnnPluginAvailable }} + # Job name + name: Build and Run Knn tests + runs-on: ubuntu-latest + steps: + # This step uses the setup-java Github action: https://github.com/actions/setup-java + - name: Set Up JDK 17 + uses: actions/setup-java@v1 + with: + java-version: 17 + # This step uses the checkout Github action: https://github.com/actions/checkout + - name: Checkout Branch + uses: actions/checkout@v2 + - name: Build and run Replication tests + run: | + ./gradlew clean release -Dbuild.snapshot=true -PnumNodes=1 -Dtests.class=org.opensearch.replication.BasicReplicationIT -Dtests.method="test knn index replication" -Pknn=true + - name: Upload failed logs + uses: actions/upload-artifact@v2 + if: failure() + with: + name: logs + path: | + build/testclusters/integTest-*/logs/* + build/testclusters/leaderCluster-*/logs/* + build/testclusters/followCluster-*/logs/* + - name: Create Artifact Path + run: | + mkdir -p cross-cluster-replication-artifacts + cp ./build/distributions/*.zip cross-cluster-replication-artifacts + - name: Uploads coverage + with: + fetch-depth: 2 + uses: codecov/codecov-action@v1.2.1 \ No newline at end of file diff --git a/build.gradle b/build.gradle index 0b893358..cf501426 100644 --- a/build.gradle +++ b/build.gradle @@ -63,7 +63,9 @@ buildscript { security_plugin_path = "build/dependencies/security" security_plugin_download_url = 'https://ci.opensearch.org/ci/dbc/distribution-build-opensearch/' + opensearch_no_snapshot + '/latest/linux/x64/tar/builds/opensearch/plugins/opensearch-security-' + security_no_snapshot + '.zip' - + knn_plugin_path = "build/dependencies/knn" + knn_plugin_download_url = 'https://ci.opensearch.org/ci/dbc/distribution-build-opensearch/' + opensearch_no_snapshot + + '/latest/linux/x64/tar/builds/opensearch/plugins/opensearch-knn-' + security_no_snapshot + '.zip' } repositories { @@ -97,6 +99,8 @@ allprojects { jacoco.toolVersion = "0.8.7" } + + apply plugin: 'java' apply plugin: 'jacoco' apply plugin: 'idea' @@ -234,6 +238,28 @@ def securityPluginFile = new Callable() { } } +def knnEnabled = findProperty("knn") == "true" + +def knnPluginFile = new Callable() { + @Override + RegularFile call() throws Exception { + return new RegularFile() { + @Override + File getAsFile() { + if (new File("$project.rootDir/$knn_plugin_path").exists()) { + project.delete(files("$project.rootDir/$knn_plugin_path")) + } + project.mkdir knn_plugin_path + ant.get(src: knn_plugin_download_url, + dest: knn_plugin_path, + httpusecaches: false) + return fileTree(knn_plugin_path).getSingleFile() + } + } + } +} + + // Clone of WaitForHttpResource with updated code to support Cross cluster usecase class CrossClusterWaitForHttpResource { @@ -350,8 +376,13 @@ testClusters { if(securityEnabled) { plugin(provider(securityPluginFile)) } - int debugPort = 5005 testDistribution = "INTEG_TEST" + if(knnEnabled) { + plugin(provider(knnPluginFile)) + testDistribution = "ARCHIVE" + } + int debugPort = 5005 + if (_numNodes > 1) numberOfNodes = _numNodes //numberOfNodes = 3 setting 'path.repo', repo.absolutePath @@ -363,6 +394,10 @@ testClusters { if(securityEnabled) { plugin(provider(securityPluginFile)) } + if(knnEnabled) { + plugin(provider(knnPluginFile)) + testDistribution = "ARCHIVE" + } int debugPort = 5010 if (_numNodes > 1) numberOfNodes = _numNodes //numberOfNodes = 3 @@ -418,6 +453,10 @@ integTest { useCluster testClusters.leaderCluster useCluster testClusters.followCluster + if(knnEnabled){ + nonInputProperties.systemProperty('tests.knn_plugin_enabled', "true") + } + // We skip BWC test here as those get run as part of separate target `bwcTestSuite`. filter { excludeTestsMatching "org.opensearch.replication.bwc.*IT" diff --git a/src/main/kotlin/org/opensearch/replication/ReplicationPlugin.kt b/src/main/kotlin/org/opensearch/replication/ReplicationPlugin.kt index 9b50bb2b..3ea64fe0 100644 --- a/src/main/kotlin/org/opensearch/replication/ReplicationPlugin.kt +++ b/src/main/kotlin/org/opensearch/replication/ReplicationPlugin.kt @@ -156,9 +156,10 @@ internal class ReplicationPlugin : Plugin(), ActionPlugin, PersistentTaskPlugin, private var followerClusterStats = FollowerClusterStats() companion object { + const val KNN_INDEX_SETTING = "index.knn" + const val KNN_PLUGIN_PRESENT_SETTING = "knn.plugin.enabled" const val REPLICATION_EXECUTOR_NAME_LEADER = "replication_leader" const val REPLICATION_EXECUTOR_NAME_FOLLOWER = "replication_follower" - const val KNN_INDEX_SETTING = "index.knn" val REPLICATED_INDEX_SETTING: Setting = Setting.simpleString("index.plugins.replication.follower.leader_index", Setting.Property.InternalIndex, Setting.Property.IndexScope) val REPLICATION_FOLLOWER_OPS_BATCH_SIZE: Setting = Setting.intSetting("plugins.replication.follower.index.ops_batch_size", 50000, 16, diff --git a/src/main/kotlin/org/opensearch/replication/action/index/TransportReplicateIndexAction.kt b/src/main/kotlin/org/opensearch/replication/action/index/TransportReplicateIndexAction.kt index 80b0e30b..becb1360 100644 --- a/src/main/kotlin/org/opensearch/replication/action/index/TransportReplicateIndexAction.kt +++ b/src/main/kotlin/org/opensearch/replication/action/index/TransportReplicateIndexAction.kt @@ -36,15 +36,16 @@ import org.opensearch.cluster.ClusterState import org.opensearch.cluster.metadata.MetadataCreateIndexService import org.opensearch.common.inject.Inject import org.opensearch.common.settings.Settings +import org.opensearch.cluster.service.ClusterService import org.opensearch.env.Environment import org.opensearch.index.IndexNotFoundException import org.opensearch.index.IndexSettings -import org.opensearch.replication.ReplicationPlugin.Companion.KNN_INDEX_SETTING import org.opensearch.tasks.Task import org.opensearch.threadpool.ThreadPool import org.opensearch.transport.TransportService class TransportReplicateIndexAction @Inject constructor(transportService: TransportService, + private val clusterService: ClusterService, val threadPool: ThreadPool, actionFilters: ActionFilters, private val client : Client, @@ -98,12 +99,8 @@ class TransportReplicateIndexAction @Inject constructor(transportService: Transp if (!leaderSettings.getAsBoolean(IndexSettings.INDEX_SOFT_DELETES_SETTING.key, false)) { throw IllegalArgumentException("Cannot Replicate an index where the setting ${IndexSettings.INDEX_SOFT_DELETES_SETTING.key} is disabled") } - - // For k-NN indices, k-NN loads its own engine and this conflicts with the replication follower engine - // Blocking k-NN indices for replication - if(leaderSettings.getAsBoolean(KNN_INDEX_SETTING, false)) { - throw IllegalArgumentException("Cannot replicate k-NN index - ${request.leaderIndex}") - } + //Not starting replication if leader index is knn as knn plugin is not installed on follower. + ValidationUtil.checkKNNEligibility(leaderSettings, clusterService, request.leaderIndex) ValidationUtil.validateIndexSettings( environment, diff --git a/src/main/kotlin/org/opensearch/replication/action/resume/TransportResumeIndexReplicationAction.kt b/src/main/kotlin/org/opensearch/replication/action/resume/TransportResumeIndexReplicationAction.kt index 7be95e67..55d87cf0 100644 --- a/src/main/kotlin/org/opensearch/replication/action/resume/TransportResumeIndexReplicationAction.kt +++ b/src/main/kotlin/org/opensearch/replication/action/resume/TransportResumeIndexReplicationAction.kt @@ -49,11 +49,12 @@ import org.opensearch.cluster.metadata.IndexMetadata import org.opensearch.cluster.metadata.IndexNameExpressionResolver import org.opensearch.cluster.service.ClusterService import org.opensearch.common.inject.Inject +import org.opensearch.replication.ReplicationPlugin.Companion.KNN_INDEX_SETTING +import org.opensearch.replication.ReplicationPlugin.Companion.KNN_PLUGIN_PRESENT_SETTING import org.opensearch.common.io.stream.StreamInput import org.opensearch.env.Environment import org.opensearch.index.IndexNotFoundException import org.opensearch.index.shard.ShardId -import org.opensearch.replication.ReplicationPlugin.Companion.KNN_INDEX_SETTING import org.opensearch.threadpool.ThreadPool import org.opensearch.transport.TransportService import java.io.IOException @@ -102,11 +103,8 @@ class TransportResumeIndexReplicationAction @Inject constructor(transportService val leaderSettings = settingsResponse.indexToSettings.get(params.leaderIndex.name) ?: throw IndexNotFoundException(params.leaderIndex.name) - // k-NN Setting is a static setting. In case the setting is changed at the leader index before resume, - // block the resume. - if(leaderSettings.getAsBoolean(KNN_INDEX_SETTING, false)) { - throw IllegalStateException("Cannot resume replication for k-NN enabled index ${params.leaderIndex.name}.") - } + /// Not starting replication if leader index is knn as knn plugin is not installed on follower. + ValidationUtil.checkKNNEligibility(leaderSettings, clusterService, params.leaderIndex.name) ValidationUtil.validateAnalyzerSettings(environment, leaderSettings, replMetdata.settings) diff --git a/src/main/kotlin/org/opensearch/replication/util/ValidationUtil.kt b/src/main/kotlin/org/opensearch/replication/util/ValidationUtil.kt index 515c96ec..1cfa7444 100644 --- a/src/main/kotlin/org/opensearch/replication/util/ValidationUtil.kt +++ b/src/main/kotlin/org/opensearch/replication/util/ValidationUtil.kt @@ -23,6 +23,9 @@ import org.opensearch.common.settings.Settings import org.opensearch.env.Environment import org.opensearch.index.IndexNotFoundException import java.io.UnsupportedEncodingException +import org.opensearch.cluster.service.ClusterService +import org.opensearch.replication.ReplicationPlugin.Companion.KNN_INDEX_SETTING +import org.opensearch.replication.ReplicationPlugin.Companion.KNN_PLUGIN_PRESENT_SETTING import java.nio.file.Files import java.nio.file.Path import java.util.Locale @@ -138,4 +141,17 @@ object ValidationUtil { throw validationException } } + + /** + * Throw exception if leader index is knn a knn is not installed + */ + fun checkKNNEligibility(leaderSettings: Settings, clusterService: ClusterService, leaderIndex: String) { + if(leaderSettings.getAsBoolean(KNN_INDEX_SETTING, false)) { + if(clusterService.clusterSettings.get(KNN_PLUGIN_PRESENT_SETTING) == null){ + throw IllegalStateException("Cannot proceed with replication for k-NN enabled index ${leaderIndex} as knn plugin is not installed.") + } + } + + } + } diff --git a/src/test/kotlin/org/opensearch/replication/BasicReplicationIT.kt b/src/test/kotlin/org/opensearch/replication/BasicReplicationIT.kt index ff3bfa83..c87fe4ec 100644 --- a/src/test/kotlin/org/opensearch/replication/BasicReplicationIT.kt +++ b/src/test/kotlin/org/opensearch/replication/BasicReplicationIT.kt @@ -23,8 +23,10 @@ import org.opensearch.action.get.GetRequest import org.opensearch.action.index.IndexRequest import org.opensearch.client.RequestOptions import org.opensearch.client.indices.CreateIndexRequest +import org.opensearch.common.xcontent.XContentType import org.opensearch.common.CheckedRunnable import org.opensearch.test.OpenSearchTestCase.assertBusy +import org.opensearch.client.indices.PutMappingRequest import org.junit.Assert import java.util.Locale import java.util.concurrent.TimeUnit @@ -83,6 +85,69 @@ class BasicReplicationIT : MultiClusterRestTestCase() { "blocked by: [FORBIDDEN/1000/index read-only(cross-cluster-replication)];]") } + fun `test knn index replication`() { + + + val followerClient = getClientForCluster(FOLL) + val leaderClient = getClientForCluster(LEADER) + createConnectionBetweenClusters(FOLL, LEADER) + val leaderIndexName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT) + val followerIndexNameInitial = randomAlphaOfLength(10).toLowerCase(Locale.ROOT) + val followerIndexName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT) + val KNN_INDEX_MAPPING = "{\"properties\":{\"my_vector1\":{\"type\":\"knn_vector\",\"dimension\":2},\"my_vector2\":{\"type\":\"knn_vector\",\"dimension\":4}}}" + // create knn-index on leader cluster + try { + val createIndexResponse = leaderClient.indices().create( + CreateIndexRequest(leaderIndexName) + .mapping(KNN_INDEX_MAPPING, XContentType.JSON), RequestOptions.DEFAULT + ) + assertThat(createIndexResponse.isAcknowledged).isTrue() + } catch (e: Exception){ + //index creation will fail if Knn plugin is not installed + assumeNoException("Could not create Knn index on leader cluster", e) + } + followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName), waitForRestore=true) + // Create document + var source = mapOf("my_vector1" to listOf(2.5,3.5) , "price" to 7.1) + var response = leaderClient.index(IndexRequest(leaderIndexName).id("1").source(source), RequestOptions.DEFAULT) + assertThat(response.result).withFailMessage("Failed to create leader data").isEqualTo(Result.CREATED) + assertBusy({ + val getResponse = followerClient.get(GetRequest(followerIndexName, "1"), RequestOptions.DEFAULT) + assertThat(getResponse.isExists).isTrue() + assertThat(getResponse.sourceAsMap).isEqualTo(source) + }, 60L, TimeUnit.SECONDS) + + // Update document + source = mapOf("my_vector1" to listOf(3.5,4.5) , "price" to 12.9) + response = leaderClient.index(IndexRequest(leaderIndexName).id("1").source(source), RequestOptions.DEFAULT) + assertThat(response.result).withFailMessage("Failed to update leader data").isEqualTo(Result.UPDATED) + assertBusy({ + val getResponse = followerClient.get(GetRequest(followerIndexName, "1"), RequestOptions.DEFAULT) + assertThat(getResponse.isExists).isTrue() + assertThat(getResponse.sourceAsMap).isEqualTo(source) + },60L, TimeUnit.SECONDS) + val KNN_INDEX_MAPPING1 = "{\"properties\":{\"my_vector1\":{\"type\":\"knn_vector\",\"dimension\":2},\"my_vector2\":{\"type\":\"knn_vector\",\"dimension\":4},\"my_vector3\":{\"type\":\"knn_vector\",\"dimension\":4}}}" + val updateIndexResponse = leaderClient.indices().putMapping( + PutMappingRequest(leaderIndexName).source(KNN_INDEX_MAPPING1, XContentType.JSON) , RequestOptions.DEFAULT + ) + source = mapOf("my_vector3" to listOf(3.1,4.5,5.7,8.9) , "price" to 17.9) + response = leaderClient.index(IndexRequest(leaderIndexName).id("2").source(source), RequestOptions.DEFAULT) + assertThat(response.result).withFailMessage("Failed to update leader data").isEqualTo(Result.CREATED) + assertBusy({ + val getResponse = followerClient.get(GetRequest(followerIndexName, "2"), RequestOptions.DEFAULT) + assertThat(getResponse.isExists).isTrue() + assertThat(getResponse.sourceAsMap).isEqualTo(source) + },60L, TimeUnit.SECONDS) + assertThat(updateIndexResponse.isAcknowledged).isTrue() + // Delete document + val deleteResponse = leaderClient.delete(DeleteRequest(leaderIndexName).id("1"), RequestOptions.DEFAULT) + assertThat(deleteResponse.result).withFailMessage("Failed to delete leader data").isEqualTo(Result.DELETED) + assertBusy({ + val getResponse = followerClient.get(GetRequest(followerIndexName, "1"), RequestOptions.DEFAULT) + assertThat(getResponse.isExists).isFalse() + }, 60L, TimeUnit.SECONDS) + } + fun `test existing index replication`() { val follower = getClientForCluster(FOLL) val leader = getClientForCluster(LEADER) diff --git a/src/test/kotlin/org/opensearch/replication/MultiClusterRestTestCase.kt b/src/test/kotlin/org/opensearch/replication/MultiClusterRestTestCase.kt index 744b7223..0e3e3208 100644 --- a/src/test/kotlin/org/opensearch/replication/MultiClusterRestTestCase.kt +++ b/src/test/kotlin/org/opensearch/replication/MultiClusterRestTestCase.kt @@ -641,4 +641,6 @@ abstract class MultiClusterRestTestCase : OpenSearchTestCase() { val integTestRemote = systemProperties.get("tests.integTestRemote") as String? return integTestRemote.equals("true") } + + } From c226dc4ca5476c4849f87e3f9f288c21810811a3 Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Wed, 17 May 2023 16:44:01 +0530 Subject: [PATCH 43/84] Extract number of nodes (#826) (#828) * Extract number of nodes Extract number of nodes from the input to pass as PnumNodes Signed-off-by: Monu Singh Signed-off-by: Monu Singh * Update integtest.sh Added further logging Signed-off-by: Monu Singh Signed-off-by: Monu Singh --------- Signed-off-by: Monu Singh (cherry picked from commit 9408b3d2b21dc786af593e7e93ca43181ed5bba9) Co-authored-by: Monu Singh --- scripts/integtest.sh | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/scripts/integtest.sh b/scripts/integtest.sh index ece01bc0..1a0498ae 100755 --- a/scripts/integtest.sh +++ b/scripts/integtest.sh @@ -97,10 +97,19 @@ then leader=$(echo $data | cut -d ',' -f1 | cut -d ':' -f1,2 ) follower=$(echo $data | cut -d ',' -f2 | cut -d ':' -f1,2 ) + echo "leader: $leader" + echo "follower: $follower" + + # Get number of nodes, assuming both leader and follower have same number of nodes + numNodes=$((${follower##*:} - ${leader##*:})) + echo "numNodes: $numNodes" LTRANSPORT_PORT=$(echo $data | cut -d ',' -f1 | cut -d ':' -f1,3 ) FTRANSPORT_PORT=$(echo $data | cut -d ',' -f2 | cut -d ':' -f1,3 ) - eval "./gradlew integTestRemote -Dleader.http_host=\"$leader\" -Dfollower.http_host=\"$follower\" -Dfollower.transport_host=\"$FTRANSPORT_PORT\" -Dleader.transport_host=\"$LTRANSPORT_PORT\" -Dsecurity_enabled=\"$SECURITY_ENABLED\" -Duser=\"$USERNAME\" -Dpassword=\"$PASSWORD\" --console=plain " + echo "LTRANSPORT_PORT: $LTRANSPORT_PORT" + echo "FTRANSPORT_PORT: $FTRANSPORT_PORT" + + eval "./gradlew integTestRemote -Dleader.http_host=\"$leader\" -Dfollower.http_host=\"$follower\" -Dfollower.transport_host=\"$FTRANSPORT_PORT\" -Dleader.transport_host=\"$LTRANSPORT_PORT\" -Dsecurity_enabled=\"$SECURITY_ENABLED\" -Duser=\"$USERNAME\" -Dpassword=\"$PASSWORD\" -PnumNodes=$numNodes --console=plain " else # Single cluster From 845edbde0d05ef0f84973d7e86c90a23a4d0e9eb Mon Sep 17 00:00:00 2001 From: opensearch-ci-bot Date: Thu, 18 May 2023 00:07:00 +0000 Subject: [PATCH 44/84] Increment version to 2.8.0-SNAPSHOT Signed-off-by: opensearch-ci-bot --- build.gradle | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build.gradle b/build.gradle index cf501426..80776756 100644 --- a/build.gradle +++ b/build.gradle @@ -36,7 +36,7 @@ import org.opensearch.gradle.test.RestIntegTestTask buildscript { ext { isSnapshot = "true" == System.getProperty("build.snapshot", "true") - opensearch_version = System.getProperty("opensearch.version", "2.7.0-SNAPSHOT") + opensearch_version = System.getProperty("opensearch.version", "2.8.0-SNAPSHOT") buildVersionQualifier = System.getProperty("build.version_qualifier", "") // e.g. 2.0.0-rc1-SNAPSHOT -> 2.0.0.0-rc1-SNAPSHOT version_tokens = opensearch_version.tokenize('-') From ac0242700c7c5569964d40905602ca4e3146f38e Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Tue, 23 May 2023 12:46:59 +0530 Subject: [PATCH 45/84] Update snakeyaml to version 2.0 (#861) Signed-off-by: Monu Singh (cherry picked from commit e3256ebf6d556c8f6c7612a30d8737cb443b564c) Co-authored-by: Monu Singh --- build.gradle | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build.gradle b/build.gradle index cf501426..869e302c 100644 --- a/build.gradle +++ b/build.gradle @@ -128,7 +128,7 @@ configurations.all { force 'org.apache.httpcomponents.client5:httpclient5:5.0.3' force 'org.apache.httpcomponents.client5:httpclient5-osgi:5.0.3' force 'com.fasterxml.jackson.core:jackson-databind:2.13.4.2' - force 'org.yaml:snakeyaml:1.32' + force 'org.yaml:snakeyaml:2.0' force 'org.codehaus.plexus:plexus-utils:3.0.24' } } From 9d048647b7e5fe3e78ba5dd06f84f7a967a476ea Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Wed, 24 May 2023 09:56:10 +0530 Subject: [PATCH 46/84] Handle serialization issues with UpdateReplicationStateDetailsRequest (#866) (#872) Signed-off-by: Ankit Kala (cherry picked from commit 3e3787dddaa99a4085153024f064566730881fb9) Co-authored-by: Ankit Kala --- .../UpdateReplicationStateDetailsRequest.kt | 3 +- ...dateReplicationStateDetailsRequestTests.kt | 42 +++++++++++++++++++ 2 files changed, 44 insertions(+), 1 deletion(-) create mode 100644 src/test/kotlin/org/opensearch/replication/action/replicationstatedetails/UpdateReplicationStateDetailsRequestTests.kt diff --git a/src/main/kotlin/org/opensearch/replication/action/replicationstatedetails/UpdateReplicationStateDetailsRequest.kt b/src/main/kotlin/org/opensearch/replication/action/replicationstatedetails/UpdateReplicationStateDetailsRequest.kt index bddcd528..04fedd0a 100644 --- a/src/main/kotlin/org/opensearch/replication/action/replicationstatedetails/UpdateReplicationStateDetailsRequest.kt +++ b/src/main/kotlin/org/opensearch/replication/action/replicationstatedetails/UpdateReplicationStateDetailsRequest.kt @@ -48,6 +48,7 @@ class UpdateReplicationStateDetailsRequest: AcknowledgedRequest Date: Wed, 24 May 2023 10:20:34 +0530 Subject: [PATCH 47/84] Update retention lease format (#850) Signed-off-by: Monu Singh --- .../TransportResumeIndexReplicationAction.kt | 14 ++-- .../TransportStopIndexReplicationAction.kt | 2 +- .../repository/RemoteClusterRepository.kt | 4 +- .../RemoteClusterRetentionLeaseHelper.kt | 82 ++++++++++++++++--- .../task/index/IndexReplicationTask.kt | 2 +- .../task/shard/ShardReplicationTask.kt | 2 +- .../bwc/BackwardsCompatibilityIT.kt | 32 ++++++++ 7 files changed, 117 insertions(+), 21 deletions(-) diff --git a/src/main/kotlin/org/opensearch/replication/action/resume/TransportResumeIndexReplicationAction.kt b/src/main/kotlin/org/opensearch/replication/action/resume/TransportResumeIndexReplicationAction.kt index 55d87cf0..f281d642 100644 --- a/src/main/kotlin/org/opensearch/replication/action/resume/TransportResumeIndexReplicationAction.kt +++ b/src/main/kotlin/org/opensearch/replication/action/resume/TransportResumeIndexReplicationAction.kt @@ -49,16 +49,16 @@ import org.opensearch.cluster.metadata.IndexMetadata import org.opensearch.cluster.metadata.IndexNameExpressionResolver import org.opensearch.cluster.service.ClusterService import org.opensearch.common.inject.Inject -import org.opensearch.replication.ReplicationPlugin.Companion.KNN_INDEX_SETTING -import org.opensearch.replication.ReplicationPlugin.Companion.KNN_PLUGIN_PRESENT_SETTING + import org.opensearch.common.io.stream.StreamInput import org.opensearch.env.Environment import org.opensearch.index.IndexNotFoundException import org.opensearch.index.shard.ShardId +import org.opensearch.replication.util.indicesService import org.opensearch.threadpool.ThreadPool import org.opensearch.transport.TransportService import java.io.IOException -import java.lang.IllegalStateException + class TransportResumeIndexReplicationAction @Inject constructor(transportService: TransportService, clusterService: ClusterService, @@ -132,10 +132,14 @@ class TransportResumeIndexReplicationAction @Inject constructor(transportService var isResumable = true val remoteClient = client.getRemoteClusterClient(params.leaderAlias) val shards = clusterService.state().routingTable.indicesRouting().get(params.followerIndexName).shards() - val retentionLeaseHelper = RemoteClusterRetentionLeaseHelper(clusterService.clusterName.value(), remoteClient) + val retentionLeaseHelper = RemoteClusterRetentionLeaseHelper(clusterService.clusterName.value(), clusterService.state().metadata.clusterUUID(), remoteClient) shards.forEach { val followerShardId = it.value.shardId - if (!retentionLeaseHelper.verifyRetentionLeaseExist(ShardId(params.leaderIndex, followerShardId.id), followerShardId)) { + + val followerIndexService = indicesService.indexServiceSafe(followerShardId.index) + val indexShard = followerIndexService.getShard(followerShardId.id) + + if (!retentionLeaseHelper.verifyRetentionLeaseExist(ShardId(params.leaderIndex, followerShardId.id), followerShardId, indexShard.lastSyncedGlobalCheckpoint+1)) { isResumable = false } } diff --git a/src/main/kotlin/org/opensearch/replication/action/stop/TransportStopIndexReplicationAction.kt b/src/main/kotlin/org/opensearch/replication/action/stop/TransportStopIndexReplicationAction.kt index fd82bba4..8f7ff425 100644 --- a/src/main/kotlin/org/opensearch/replication/action/stop/TransportStopIndexReplicationAction.kt +++ b/src/main/kotlin/org/opensearch/replication/action/stop/TransportStopIndexReplicationAction.kt @@ -115,7 +115,7 @@ class TransportStopIndexReplicationAction @Inject constructor(transportService: try { val replMetadata = replicationMetadataManager.getIndexReplicationMetadata(request.indexName) val remoteClient = client.getRemoteClusterClient(replMetadata.connectionName) - val retentionLeaseHelper = RemoteClusterRetentionLeaseHelper(clusterService.clusterName.value(), remoteClient) + val retentionLeaseHelper = RemoteClusterRetentionLeaseHelper(clusterService.clusterName.value(), clusterService.state().metadata.clusterUUID(), remoteClient) retentionLeaseHelper.attemptRemoveRetentionLease(clusterService, replMetadata, request.indexName) } catch(e: Exception) { log.error("Failed to remove retention lease from the leader cluster", e) diff --git a/src/main/kotlin/org/opensearch/replication/repository/RemoteClusterRepository.kt b/src/main/kotlin/org/opensearch/replication/repository/RemoteClusterRepository.kt index 6cfbff0d..1e5fac38 100644 --- a/src/main/kotlin/org/opensearch/replication/repository/RemoteClusterRepository.kt +++ b/src/main/kotlin/org/opensearch/replication/repository/RemoteClusterRepository.kt @@ -57,6 +57,7 @@ import org.opensearch.index.store.Store import org.opensearch.indices.recovery.RecoverySettings import org.opensearch.indices.recovery.RecoveryState import org.opensearch.replication.ReplicationPlugin.Companion.REPLICATION_INDEX_TRANSLOG_PRUNING_ENABLED_SETTING +import org.opensearch.replication.seqno.RemoteClusterRetentionLeaseHelper import org.opensearch.replication.util.stackTraceToString import org.opensearch.repositories.IndexId import org.opensearch.repositories.Repository @@ -285,7 +286,8 @@ class RemoteClusterRepository(private val repositoryMetadata: RepositoryMetadata snapshotShardId.id) restoreUUID = UUIDs.randomBase64UUID() val getStoreMetadataRequest = GetStoreMetadataRequest(restoreUUID, leaderShardNode, leaderShardId, - clusterService.clusterName.value(), followerShardId) + RemoteClusterRetentionLeaseHelper.getFollowerClusterNameWithUUID(clusterService.clusterName.value(), clusterService.state().metadata.clusterUUID()), + followerShardId) // Gets the remote store metadata val metadataResponse = executeActionOnRemote(GetStoreMetadataAction.INSTANCE, getStoreMetadataRequest, followerIndexName) diff --git a/src/main/kotlin/org/opensearch/replication/seqno/RemoteClusterRetentionLeaseHelper.kt b/src/main/kotlin/org/opensearch/replication/seqno/RemoteClusterRetentionLeaseHelper.kt index 8771e25d..e755a7be 100644 --- a/src/main/kotlin/org/opensearch/replication/seqno/RemoteClusterRetentionLeaseHelper.kt +++ b/src/main/kotlin/org/opensearch/replication/seqno/RemoteClusterRetentionLeaseHelper.kt @@ -23,29 +23,42 @@ import org.opensearch.index.seqno.RetentionLeaseAlreadyExistsException import org.opensearch.index.seqno.RetentionLeaseInvalidRetainingSeqNoException import org.opensearch.index.seqno.RetentionLeaseNotFoundException import org.opensearch.index.shard.ShardId -import org.opensearch.replication.action.stop.TransportStopIndexReplicationAction import org.opensearch.replication.metadata.store.ReplicationMetadata +import org.opensearch.replication.repository.RemoteClusterRepository import org.opensearch.replication.task.index.IndexReplicationParams import org.opensearch.replication.util.stackTraceToString import org.opensearch.replication.util.suspending -class RemoteClusterRetentionLeaseHelper constructor(val followerClusterName: String, val client: Client) { +class RemoteClusterRetentionLeaseHelper constructor(var followerClusterNameWithUUID: String, val client: Client) { - private val retentionLeaseSource = retentionLeaseSource(followerClusterName) + private val retentionLeaseSource = retentionLeaseSource(followerClusterNameWithUUID) + private var followerClusterUUID : String = "" + private var followerClusterName : String = "" + + constructor(followerClusterName: String, followerClusterUUID: String, client: Client) :this(followerClusterName, client){ + this.followerClusterUUID = followerClusterUUID + this.followerClusterName = followerClusterName + this.followerClusterNameWithUUID = getFollowerClusterNameWithUUID(followerClusterName, followerClusterUUID) + } companion object { private val log = LogManager.getLogger(RemoteClusterRetentionLeaseHelper::class.java) const val RETENTION_LEASE_PREFIX = "replication:" - fun retentionLeaseSource(followerClusterName: String): String = "${RETENTION_LEASE_PREFIX}${followerClusterName}" + fun retentionLeaseSource(followerClusterName: String): String + = "${RETENTION_LEASE_PREFIX}${followerClusterName}" fun retentionLeaseIdForShard(followerClusterName: String, followerShardId: ShardId): String { val retentionLeaseSource = retentionLeaseSource(followerClusterName) return "$retentionLeaseSource:${followerShardId}" } + + fun getFollowerClusterNameWithUUID(followerClusterName: String, followerClusterUUID: String): String{ + return "$followerClusterName:$followerClusterUUID" + } } - public suspend fun verifyRetentionLeaseExist(leaderShardId: ShardId, followerShardId: ShardId): Boolean { - val retentionLeaseId = retentionLeaseIdForShard(followerClusterName, followerShardId) + public suspend fun verifyRetentionLeaseExist(leaderShardId: ShardId, followerShardId: ShardId, seqNo: Long): Boolean { + val retentionLeaseId = retentionLeaseIdForShard(followerClusterNameWithUUID, followerShardId) // Currently there is no API to describe/list the retention leases . // So we are verifying the existence of lease by trying to renew a lease by same name . // If retention lease doesn't exist, this will throw an RetentionLeaseNotFoundException exception @@ -60,15 +73,60 @@ class RemoteClusterRetentionLeaseHelper constructor(val followerClusterName: Str return true } catch (e: RetentionLeaseNotFoundException) { + return addNewRetentionLeaseIfOldExists(leaderShardId, followerShardId, seqNo) + }catch (e : Exception) { return false } return true } + private suspend fun addNewRetentionLeaseIfOldExists(leaderShardId: ShardId, followerShardId: ShardId, seqNo: Long): Boolean { + //Check for old retention lease id + val oldRetentionLeaseId = retentionLeaseIdForShard(followerClusterName, followerShardId) + val requestForOldId = RetentionLeaseActions.RenewRequest(leaderShardId, oldRetentionLeaseId, RetentionLeaseActions.RETAIN_ALL, retentionLeaseSource) + try { + client.suspendExecute(RetentionLeaseActions.Renew.INSTANCE, requestForOldId) + } catch (ex: RetentionLeaseInvalidRetainingSeqNoException) { + //old retention lease id present, will add new retention lease + log.info("Old retention lease Id ${oldRetentionLeaseId} present with invalid seq number, adding new retention lease with ID:" + + "${retentionLeaseIdForShard(followerClusterNameWithUUID, followerShardId)} ") + return addNewRetentionLease(leaderShardId, seqNo, followerShardId, RemoteClusterRepository.REMOTE_CLUSTER_REPO_REQ_TIMEOUT_IN_MILLI_SEC ) + }catch (ex: Exception){ + log.info("Encountered Exception while checking for old retention lease: ${ex.stackTraceToString()}") + return false + } + log.info("Old retention lease Id ${oldRetentionLeaseId}, adding new retention lease with ID:" + + "${retentionLeaseIdForShard(followerClusterNameWithUUID, followerShardId)} ") + return addNewRetentionLease(leaderShardId,seqNo, followerShardId, RemoteClusterRepository.REMOTE_CLUSTER_REPO_REQ_TIMEOUT_IN_MILLI_SEC ) + } + + + private suspend fun addNewRetentionLease(leaderShardId: ShardId, seqNo: Long, followerShardId: ShardId, timeout: Long): Boolean { + val retentionLeaseId = retentionLeaseIdForShard(followerClusterNameWithUUID, followerShardId) + val request = RetentionLeaseActions.AddRequest(leaderShardId, retentionLeaseId, seqNo, retentionLeaseSource) + try { + client.suspendExecute(RetentionLeaseActions.Add.INSTANCE, request) + return true + } catch (e: Exception) { + log.info("Exception while adding new retention lease with i: $retentionLeaseId") + return false + } + } + public suspend fun renewRetentionLease(leaderShardId: ShardId, seqNo: Long, followerShardId: ShardId) { - val retentionLeaseId = retentionLeaseIdForShard(followerClusterName, followerShardId) + val retentionLeaseId = retentionLeaseIdForShard(followerClusterNameWithUUID, followerShardId) val request = RetentionLeaseActions.RenewRequest(leaderShardId, retentionLeaseId, seqNo, retentionLeaseSource) - client.suspendExecute(RetentionLeaseActions.Renew.INSTANCE, request) + try { + client.suspendExecute(RetentionLeaseActions.Renew.INSTANCE, request) + }catch (e: RetentionLeaseNotFoundException){ + //New retention lease not found, checking presense of old retention lease + log.info("Retention lease with ID: ${retentionLeaseId} not found," + + " checking for old retention lease with ID: ${retentionLeaseIdForShard(followerClusterName, followerShardId)}") + if(!addNewRetentionLeaseIfOldExists(leaderShardId, followerShardId, seqNo)){ + log.info("Both new $retentionLeaseId and old ${retentionLeaseIdForShard(followerClusterNameWithUUID, followerShardId)} retention lease not found.") + throw e + } + } } public suspend fun attemptRemoveRetentionLease(clusterService: ClusterService, replMetadata: ReplicationMetadata, @@ -78,7 +136,7 @@ class RemoteClusterRetentionLeaseHelper constructor(val followerClusterName: Str val params = IndexReplicationParams(replMetadata.connectionName, remoteMetadata.index, followerIndexName) val remoteClient = client.getRemoteClusterClient(params.leaderAlias) val shards = clusterService.state().routingTable.indicesRouting().get(params.followerIndexName).shards() - val retentionLeaseHelper = RemoteClusterRetentionLeaseHelper(clusterService.clusterName.value(), remoteClient) + val retentionLeaseHelper = RemoteClusterRetentionLeaseHelper(clusterService.clusterName.value(), followerClusterUUID, remoteClient) shards.forEach { val followerShardId = it.value.shardId log.debug("Removing lease for $followerShardId.id ") @@ -102,7 +160,7 @@ class RemoteClusterRetentionLeaseHelper constructor(val followerClusterName: Str public suspend fun attemptRetentionLeaseRemoval(leaderShardId: ShardId, followerShardId: ShardId) { - val retentionLeaseId = retentionLeaseIdForShard(followerClusterName, followerShardId) + val retentionLeaseId = retentionLeaseIdForShard(followerClusterNameWithUUID, followerShardId) val request = RetentionLeaseActions.RemoveRequest(leaderShardId, retentionLeaseId) try { client.suspendExecute(RetentionLeaseActions.Remove.INSTANCE, request) @@ -123,7 +181,7 @@ class RemoteClusterRetentionLeaseHelper constructor(val followerClusterName: Str */ public fun addRetentionLease(leaderShardId: ShardId, seqNo: Long, followerShardId: ShardId, timeout: Long) { - val retentionLeaseId = retentionLeaseIdForShard(followerClusterName, followerShardId) + val retentionLeaseId = retentionLeaseIdForShard(followerClusterNameWithUUID, followerShardId) val request = RetentionLeaseActions.AddRequest(leaderShardId, retentionLeaseId, seqNo, retentionLeaseSource) try { client.execute(RetentionLeaseActions.Add.INSTANCE, request).actionGet(timeout) @@ -138,7 +196,7 @@ class RemoteClusterRetentionLeaseHelper constructor(val followerClusterName: Str public fun renewRetentionLease(leaderShardId: ShardId, seqNo: Long, followerShardId: ShardId, timeout: Long) { - val retentionLeaseId = retentionLeaseIdForShard(followerClusterName, followerShardId) + val retentionLeaseId = retentionLeaseIdForShard(followerClusterNameWithUUID, followerShardId) val request = RetentionLeaseActions.RenewRequest(leaderShardId, retentionLeaseId, seqNo, retentionLeaseSource) client.execute(RetentionLeaseActions.Renew.INSTANCE, request).actionGet(timeout) } diff --git a/src/main/kotlin/org/opensearch/replication/task/index/IndexReplicationTask.kt b/src/main/kotlin/org/opensearch/replication/task/index/IndexReplicationTask.kt index 381c96d5..44385eff 100644 --- a/src/main/kotlin/org/opensearch/replication/task/index/IndexReplicationTask.kt +++ b/src/main/kotlin/org/opensearch/replication/task/index/IndexReplicationTask.kt @@ -132,7 +132,7 @@ open class IndexReplicationTask(id: Long, type: String, action: String, descript override val followerIndexName = params.followerIndexName override val log = Loggers.getLogger(javaClass, Index(params.followerIndexName, ClusterState.UNKNOWN_UUID)) - private val retentionLeaseHelper = RemoteClusterRetentionLeaseHelper(clusterService.clusterName.value(), remoteClient) + private val retentionLeaseHelper = RemoteClusterRetentionLeaseHelper(clusterService.clusterName.value(), clusterService.state().metadata.clusterUUID(), remoteClient) private var shouldCallEvalMonitoring = true private var updateSettingsContinuousFailCount = 0 private var updateAliasContinousFailCount = 0 diff --git a/src/main/kotlin/org/opensearch/replication/task/shard/ShardReplicationTask.kt b/src/main/kotlin/org/opensearch/replication/task/shard/ShardReplicationTask.kt index a7418917..24128f8f 100644 --- a/src/main/kotlin/org/opensearch/replication/task/shard/ShardReplicationTask.kt +++ b/src/main/kotlin/org/opensearch/replication/task/shard/ShardReplicationTask.kt @@ -69,7 +69,7 @@ class ShardReplicationTask(id: Long, type: String, action: String, description: private val leaderShardId = params.leaderShardId private val followerShardId = params.followerShardId private val remoteClient = client.getRemoteClusterClient(leaderAlias) - private val retentionLeaseHelper = RemoteClusterRetentionLeaseHelper(clusterService.clusterName.value(), remoteClient) + private val retentionLeaseHelper = RemoteClusterRetentionLeaseHelper(clusterService.clusterName.value(), clusterService.state().metadata.clusterUUID(), remoteClient) private var lastLeaseRenewalMillis = System.currentTimeMillis() //Start backOff for exceptions with a second diff --git a/src/test/kotlin/org/opensearch/replication/bwc/BackwardsCompatibilityIT.kt b/src/test/kotlin/org/opensearch/replication/bwc/BackwardsCompatibilityIT.kt index 41b8d960..f6f2c893 100644 --- a/src/test/kotlin/org/opensearch/replication/bwc/BackwardsCompatibilityIT.kt +++ b/src/test/kotlin/org/opensearch/replication/bwc/BackwardsCompatibilityIT.kt @@ -1,5 +1,6 @@ package org.opensearch.replication.bwc; +import org.apache.http.util.EntityUtils import org.assertj.core.api.Assertions import org.junit.Assert import org.junit.BeforeClass @@ -8,13 +9,16 @@ import org.opensearch.action.admin.cluster.health.ClusterHealthRequest import org.opensearch.action.delete.DeleteRequest import org.opensearch.action.get.GetRequest import org.opensearch.action.index.IndexRequest +import org.opensearch.client.Request import org.opensearch.client.RequestOptions +import org.opensearch.client.RestHighLevelClient import org.opensearch.client.indices.CreateIndexRequest import org.opensearch.replication.MultiClusterAnnotations import org.opensearch.replication.MultiClusterRestTestCase import org.opensearch.replication.StartReplicationRequest import org.opensearch.replication.startReplication import org.opensearch.test.OpenSearchTestCase.assertBusy +import org.opensearch.test.rest.OpenSearchRestTestCase import java.util.Collections import java.util.concurrent.TimeUnit import java.util.stream.Collectors @@ -139,12 +143,40 @@ class BackwardsCompatibilityIT : MultiClusterRestTestCase() { Assertions.assertThat(getResponse.isExists).isTrue() Assertions.assertThat(getResponse.sourceAsMap).isEqualTo(source) }, 60, TimeUnit.SECONDS) + + //Check for latest retention lease when full cluster restart is done + if (ClusterStatus.from(System.getProperty("tests.bwcTask")) == ClusterStatus.FULL_CLUSTER_RESTART || ClusterStatus.from( + System.getProperty("tests.bwcTask")) == ClusterStatus.ROLLING_UPGRADED) { + validateNewRetentionLeaseId(follower, leader) + } + } catch (e: Exception) { logger.info("Exception while verifying the replication ${e.printStackTrace()}") throw e } } + private fun validateNewRetentionLeaseId( + follower: RestHighLevelClient, + leader: RestHighLevelClient + ) { + assertBusy({ + val followerClusterInfo: Map = + OpenSearchRestTestCase.entityAsMap(follower.lowLevelClient.performRequest(Request("GET", "/"))) + val clusterUUID = (followerClusterInfo["cluster_uuid"] as String) + val clusterName = (followerClusterInfo["cluster_name"] as String) + assert(clusterUUID.isNotEmpty()) + assert(clusterName.isNotEmpty()) + val expectedRetentionLeaseId = + "replication" + ":" + clusterName + ":" + clusterUUID + ":[" + LEADER_INDEX + "]" + + val retentionLeaseinfo = + leader.lowLevelClient.performRequest(Request("GET", "/$LEADER_INDEX/_stats/docs?level=shards")) + val retentionLeaseInfoString = EntityUtils.toString(retentionLeaseinfo.entity) + assertTrue(retentionLeaseInfoString.contains(expectedRetentionLeaseId)) + }, 60, TimeUnit.SECONDS) + } + // Verifies that replication plugin is installed on all the nodes og the cluster. @Throws(java.lang.Exception::class) private fun verifyReplicationPluginInstalled(clusterName: String) { From acc51d5eb262fb9d1c61f97d804bcf75e7cd2ae7 Mon Sep 17 00:00:00 2001 From: Monu Singh Date: Wed, 24 May 2023 11:03:35 +0530 Subject: [PATCH 48/84] Updated plugin code based on OpenSearch-3.0 upstream changes (#766) (#844) Signed-off-by: Sai Kumar Co-authored-by: Sai Kumar --- .../replication/repository/RemoteClusterRestoreLeaderService.kt | 2 +- .../index/translog/ReplicationTranslogDeletionPolicyTests.kt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/main/kotlin/org/opensearch/replication/repository/RemoteClusterRestoreLeaderService.kt b/src/main/kotlin/org/opensearch/replication/repository/RemoteClusterRestoreLeaderService.kt index 7adfc8aa..5eea937b 100644 --- a/src/main/kotlin/org/opensearch/replication/repository/RemoteClusterRestoreLeaderService.kt +++ b/src/main/kotlin/org/opensearch/replication/repository/RemoteClusterRestoreLeaderService.kt @@ -21,7 +21,7 @@ import org.opensearch.common.component.AbstractLifecycleComponent import org.opensearch.common.inject.Inject import org.opensearch.common.inject.Singleton import org.opensearch.common.lucene.store.InputStreamIndexInput -import org.opensearch.core.internal.io.IOUtils +import org.opensearch.common.util.io.IOUtils import org.opensearch.index.seqno.RetentionLeaseActions import org.opensearch.index.store.Store import org.opensearch.indices.IndicesService diff --git a/src/test/kotlin/org/opensearch/index/translog/ReplicationTranslogDeletionPolicyTests.kt b/src/test/kotlin/org/opensearch/index/translog/ReplicationTranslogDeletionPolicyTests.kt index cebf7890..38387621 100644 --- a/src/test/kotlin/org/opensearch/index/translog/ReplicationTranslogDeletionPolicyTests.kt +++ b/src/test/kotlin/org/opensearch/index/translog/ReplicationTranslogDeletionPolicyTests.kt @@ -18,7 +18,7 @@ import org.opensearch.common.bytes.BytesArray import org.opensearch.common.bytes.ReleasableBytesReference import org.opensearch.common.collect.Tuple import org.opensearch.common.util.BigArrays -import org.opensearch.core.internal.io.IOUtils +import org.opensearch.common.util.io.IOUtils import org.opensearch.index.seqno.RetentionLease import org.opensearch.index.seqno.RetentionLeases import org.opensearch.index.shard.ShardId From ea56da7ae171cd91184589ab9445444b8ccc5433 Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Wed, 24 May 2023 11:05:28 +0530 Subject: [PATCH 49/84] Update to set remoteTranslogEnabled=false (#834) (#836) Upstream change https://github.com/opensearch-project/OpenSearch/commit/613f4aa046912b583925a9a03cb2294efd2a002c#diff-73db07f833f37213626303b1b984703bdf4cfb539529aa72e2ad8f55cea0a5e7R168 started failing builds due to mandatory param remoteTranslogEnabled Signed-off-by: Monu Singh (cherry picked from commit dacfd78638b9682b80784c6c50d323379d202903) Co-authored-by: Monu Singh --- .../index/translog/ReplicationTranslogDeletionPolicyTests.kt | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/test/kotlin/org/opensearch/index/translog/ReplicationTranslogDeletionPolicyTests.kt b/src/test/kotlin/org/opensearch/index/translog/ReplicationTranslogDeletionPolicyTests.kt index 38387621..8378bbbb 100644 --- a/src/test/kotlin/org/opensearch/index/translog/ReplicationTranslogDeletionPolicyTests.kt +++ b/src/test/kotlin/org/opensearch/index/translog/ReplicationTranslogDeletionPolicyTests.kt @@ -177,7 +177,8 @@ class ReplicationTranslogDeletionPolicyTests : OpenSearchTestCase() { randomNonNegativeLong(), TragicExceptionHolder(), { }, - BigArrays.NON_RECYCLING_INSTANCE + BigArrays.NON_RECYCLING_INSTANCE, + false ) writer = Mockito.spy(writer) Mockito.doReturn(now - (numberOfReaders - gen + 1) * 1000).`when`(writer).lastModifiedTime @@ -193,4 +194,4 @@ class ReplicationTranslogDeletionPolicyTests : OpenSearchTestCase() { } return Tuple(readers, writer) } -} \ No newline at end of file +} From 57755ecaa6330389b3fc47a06ab691caa020592d Mon Sep 17 00:00:00 2001 From: Monu Singh Date: Fri, 26 May 2023 12:24:34 +0530 Subject: [PATCH 50/84] Merge pull request #624 from priyatsh/main (#888) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Github-Issue-544:Replication auto pauses on follower cluster having w… (cherry picked from commit 2fba1f7dfa395580f3aef864d7047f56454f684f) Signed-off-by: Monu Singh Co-authored-by: Priyanka Sharma <114481135+priyatsh@users.noreply.github.com> --- .../task/index/IndexReplicationTask.kt | 3 +- .../integ/rest/StartReplicationIT.kt | 152 ++++++++++++++++++ 2 files changed, 154 insertions(+), 1 deletion(-) diff --git a/src/main/kotlin/org/opensearch/replication/task/index/IndexReplicationTask.kt b/src/main/kotlin/org/opensearch/replication/task/index/IndexReplicationTask.kt index 44385eff..421699f8 100644 --- a/src/main/kotlin/org/opensearch/replication/task/index/IndexReplicationTask.kt +++ b/src/main/kotlin/org/opensearch/replication/task/index/IndexReplicationTask.kt @@ -149,7 +149,8 @@ open class IndexReplicationTask(id: Long, type: String, action: String, descript IndexMetadata.INDEX_BLOCKS_READ_ONLY_ALLOW_DELETE_SETTING, EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING, EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE_SETTING, - IndexSettings.INDEX_SOFT_DELETES_RETENTION_LEASE_PERIOD_SETTING + IndexSettings.INDEX_SOFT_DELETES_RETENTION_LEASE_PERIOD_SETTING, + IndexMetadata.SETTING_WAIT_FOR_ACTIVE_SHARDS ) val blockListedSettings :Set = blSettings.stream().map { k -> k.key }.collect(Collectors.toSet()) diff --git a/src/test/kotlin/org/opensearch/replication/integ/rest/StartReplicationIT.kt b/src/test/kotlin/org/opensearch/replication/integ/rest/StartReplicationIT.kt index 5b2595c5..cd3d849b 100644 --- a/src/test/kotlin/org/opensearch/replication/integ/rest/StartReplicationIT.kt +++ b/src/test/kotlin/org/opensearch/replication/integ/rest/StartReplicationIT.kt @@ -1106,6 +1106,158 @@ class StartReplicationIT: MultiClusterRestTestCase() { ) } + fun `test that wait_for_active_shards setting is set on leader and not on follower`() { + val followerClient = getClientForCluster(FOLLOWER) + val leaderClient = getClientForCluster(LEADER) + + createConnectionBetweenClusters(FOLLOWER, LEADER) + + val settings = Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexMetadata.SETTING_WAIT_FOR_ACTIVE_SHARDS.getKey(), Integer.toString(2)) + .build() + + val createIndexResponse = leaderClient.indices().create(CreateIndexRequest(leaderIndexName).settings(settings), RequestOptions.DEFAULT) + assertThat(createIndexResponse.isAcknowledged).isTrue() + try { + followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName)) + assertBusy { + assertThat(followerClient.indices() + .exists(GetIndexRequest(followerIndexName), RequestOptions.DEFAULT)) + .isEqualTo(true) + } + TimeUnit.SECONDS.sleep(SLEEP_TIME_BETWEEN_SYNC) + + // Verify the setting on leader + val getLeaderSettingsRequest = GetSettingsRequest() + getLeaderSettingsRequest.indices(leaderIndexName) + getLeaderSettingsRequest.includeDefaults(true) + + assertBusy ({ + Assert.assertEquals( + "2", + leaderClient.indices() + .getSettings(getLeaderSettingsRequest, RequestOptions.DEFAULT) + .indexToSettings[leaderIndexName][IndexMetadata.SETTING_WAIT_FOR_ACTIVE_SHARDS.getKey()] + ) + }, 15, TimeUnit.SECONDS) + + // Verify that the setting is not updated on follower and follower has default value of the setting + val getSettingsRequest = GetSettingsRequest() + getSettingsRequest.indices(followerIndexName) + getSettingsRequest.includeDefaults(true) + + assertBusy ({ + Assert.assertEquals( + "1", + followerClient.indices() + .getSettings(getSettingsRequest, RequestOptions.DEFAULT) + .getSetting(followerIndexName, IndexMetadata.SETTING_WAIT_FOR_ACTIVE_SHARDS.key) + ) + }, 15, TimeUnit.SECONDS) + } finally { + followerClient.stopReplication(followerIndexName) + } + } + + fun `test that wait_for_active_shards setting is updated on leader and not on follower`() { + val followerClient = getClientForCluster(FOLLOWER) + val leaderClient = getClientForCluster(LEADER) + + createConnectionBetweenClusters(FOLLOWER, LEADER) + + val createIndexResponse = leaderClient.indices().create(CreateIndexRequest(leaderIndexName), RequestOptions.DEFAULT) + assertThat(createIndexResponse.isAcknowledged).isTrue() + try { + followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName)) + assertBusy { + assertThat(followerClient.indices() + .exists(GetIndexRequest(followerIndexName), RequestOptions.DEFAULT)) + .isEqualTo(true) + } + TimeUnit.SECONDS.sleep(SLEEP_TIME_BETWEEN_SYNC) + + //Use Update API + val settingsBuilder = Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexMetadata.SETTING_WAIT_FOR_ACTIVE_SHARDS.getKey(), Integer.toString(2)) + + val settingsUpdateResponse = leaderClient.indices().putSettings(UpdateSettingsRequest(leaderIndexName) + .settings(settingsBuilder.build()), RequestOptions.DEFAULT) + Assert.assertEquals(settingsUpdateResponse.isAcknowledged, true) + + TimeUnit.SECONDS.sleep(SLEEP_TIME_BETWEEN_SYNC) + + // Verify the setting on leader + val getLeaderSettingsRequest = GetSettingsRequest() + getLeaderSettingsRequest.indices(leaderIndexName) + getLeaderSettingsRequest.includeDefaults(true) + + assertBusy ({ + Assert.assertEquals( + "2", + leaderClient.indices() + .getSettings(getLeaderSettingsRequest, RequestOptions.DEFAULT) + .indexToSettings[leaderIndexName][IndexMetadata.SETTING_WAIT_FOR_ACTIVE_SHARDS.getKey()] + ) + }, 15, TimeUnit.SECONDS) + + + val getSettingsRequest = GetSettingsRequest() + getSettingsRequest.indices(followerIndexName) + getSettingsRequest.includeDefaults(true) + + assertBusy ({ + Assert.assertEquals( + "1", + followerClient.indices() + .getSettings(getSettingsRequest, RequestOptions.DEFAULT) + .getSetting(followerIndexName, IndexMetadata.SETTING_WAIT_FOR_ACTIVE_SHARDS.key) + ) + }, 15, TimeUnit.SECONDS) + } finally { + followerClient.stopReplication(followerIndexName) + } + } + + fun `test that wait_for_active_shards setting is updated on follower through start replication api`() { + val followerClient = getClientForCluster(FOLLOWER) + val leaderClient = getClientForCluster(LEADER) + + createConnectionBetweenClusters(FOLLOWER, LEADER) + + val createIndexResponse = leaderClient.indices().create(CreateIndexRequest(leaderIndexName), RequestOptions.DEFAULT) + assertThat(createIndexResponse.isAcknowledged).isTrue() + + val settings = Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexMetadata.SETTING_WAIT_FOR_ACTIVE_SHARDS.getKey(), Integer.toString(2)) + .build() + try { + followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName, settings = settings)) + assertBusy { + assertThat(followerClient.indices() + .exists(GetIndexRequest(followerIndexName), RequestOptions.DEFAULT)) + .isEqualTo(true) + } + TimeUnit.SECONDS.sleep(SLEEP_TIME_BETWEEN_SYNC) + + val getSettingsRequest = GetSettingsRequest() + getSettingsRequest.indices(followerIndexName) + getSettingsRequest.includeDefaults(true) + assertBusy ({ + Assert.assertEquals( + "2", + followerClient.indices() + .getSettings(getSettingsRequest, RequestOptions.DEFAULT) + .indexToSettings[followerIndexName][IndexMetadata.SETTING_WAIT_FOR_ACTIVE_SHARDS.getKey()] + ) + }, 15, TimeUnit.SECONDS) + } finally { + followerClient.stopReplication(followerIndexName) + } + } + private fun excludeAllClusterNodes(clusterName: String) { val transientSettingsRequest = Request("PUT", "_cluster/settings") // Get IPs directly from the cluster to handle all cases - single node cluster, multi node cluster and remote test cluster. From 8ef64516f56d347e36f2a8d24f98925a9f87f442 Mon Sep 17 00:00:00 2001 From: Monu Singh Date: Mon, 29 May 2023 16:17:09 +0530 Subject: [PATCH 51/84] Add release notes for 2.8 release Signed-off-by: Monu Singh (cherry picked from commit 3e55af48249a6570e3a71376b59f97f92cf4279e) --- ...cross-cluster-replication.release-notes-2.8.0.0.md | 11 +++++++++++ 1 file changed, 11 insertions(+) create mode 100644 release-notes/opensearch-cross-cluster-replication.release-notes-2.8.0.0.md diff --git a/release-notes/opensearch-cross-cluster-replication.release-notes-2.8.0.0.md b/release-notes/opensearch-cross-cluster-replication.release-notes-2.8.0.0.md new file mode 100644 index 00000000..47719aef --- /dev/null +++ b/release-notes/opensearch-cross-cluster-replication.release-notes-2.8.0.0.md @@ -0,0 +1,11 @@ +## Version 2.8.0.0 Release Notes + +Compatible with OpenSearch 2.8.0 + + +### Enhancements +* Support CCR for k-NN enabled indices ([#760](https://github.com/opensearch-project/cross-cluster-replication/pull/760)) + +### Bug Fixes +* Handle serialization issues with UpdateReplicationStateDetailsRequest ([#866](https://github.com/opensearch-project/cross-cluster-replication/pull/866)) +* Two followers using same remote alias can result in replication being auto-paused ([#833](https://github.com/opensearch-project/cross-cluster-replication/pull/833)) From 9e06f40522f8c0fa006ac2b565e76d2e303f700d Mon Sep 17 00:00:00 2001 From: Sai Kumar Date: Wed, 31 May 2023 14:15:09 +0530 Subject: [PATCH 52/84] [Backport] Handle clean-up of stale index task during cancellation (#645) (#909) Signed-off-by: Sai Kumar --- .../task/index/IndexReplicationTask.kt | 52 +++++++++++++---- .../replication/ReplicationHelpers.kt | 2 +- .../task/index/IndexReplicationTaskTests.kt | 57 +++++++++++++++++++ 3 files changed, 98 insertions(+), 13 deletions(-) diff --git a/src/main/kotlin/org/opensearch/replication/task/index/IndexReplicationTask.kt b/src/main/kotlin/org/opensearch/replication/task/index/IndexReplicationTask.kt index 421699f8..2796eb43 100644 --- a/src/main/kotlin/org/opensearch/replication/task/index/IndexReplicationTask.kt +++ b/src/main/kotlin/org/opensearch/replication/task/index/IndexReplicationTask.kt @@ -156,7 +156,8 @@ open class IndexReplicationTask(id: Long, type: String, action: String, descript val blockListedSettings :Set = blSettings.stream().map { k -> k.key }.collect(Collectors.toSet()) const val SLEEP_TIME_BETWEEN_POLL_MS = 5000L - const val TASK_CANCELLATION_REASON = "Index replication task was cancelled by user" + const val AUTOPAUSED_REASON_PREFIX = "AutoPaused: " + const val TASK_CANCELLATION_REASON = AUTOPAUSED_REASON_PREFIX + "Index replication task was cancelled by user" } @@ -263,13 +264,6 @@ open class IndexReplicationTask(id: Long, type: String, action: String, descript } } - override fun onCancelled() { - log.info("Cancelling the index replication task.") - client.execute(PauseIndexReplicationAction.INSTANCE, - PauseIndexReplicationRequest(followerIndexName, TASK_CANCELLATION_REASON)) - super.onCancelled() - } - private suspend fun failReplication(failedState: FailedState) { withContext(NonCancellable) { val reason = failedState.errorMsg @@ -313,6 +307,23 @@ open class IndexReplicationTask(id: Long, type: String, action: String, descript return MonitoringState } + fun isTrackingTaskForIndex(): Boolean { + val persistentTasks = clusterService.state().metadata.custom(PersistentTasksCustomMetadata.TYPE) + val runningTasksForIndex = persistentTasks.findTasks(IndexReplicationExecutor.TASK_NAME, Predicate { true }).stream() + .map { task -> task as PersistentTask } + .filter { task -> task.params!!.followerIndexName == followerIndexName} + .toArray() + assert(runningTasksForIndex.size <= 1) { "Found more than one running index task for index[$followerIndexName]" } + for (runningTask in runningTasksForIndex) { + val currentTask = runningTask as PersistentTask + log.info("Verifying task details - currentTask={isAssigned=${currentTask.isAssigned},executorNode=${currentTask.executorNode}}") + if(currentTask.isAssigned && currentTask.executorNode == clusterService.state().nodes.localNodeId) { + return true + } + } + return false + } + private fun isResumed(): Boolean { return clusterService.state().routingTable.hasIndex(followerIndexName) } @@ -651,7 +662,7 @@ open class IndexReplicationTask(id: Long, type: String, action: String, descript log.error("Going to initiate auto-pause of $followerIndexName due to shard failures - $state") val pauseReplicationResponse = client.suspendExecute( replicationMetadata, - PauseIndexReplicationAction.INSTANCE, PauseIndexReplicationRequest(followerIndexName, "AutoPaused: ${state.errorMsg}"), + PauseIndexReplicationAction.INSTANCE, PauseIndexReplicationRequest(followerIndexName, "$AUTOPAUSED_REASON_PREFIX + ${state.errorMsg}"), defaultContext = true ) if (!pauseReplicationResponse.isAcknowledged) { @@ -688,10 +699,27 @@ open class IndexReplicationTask(id: Long, type: String, action: String, descript } override suspend fun cleanup() { - if (currentTaskState.state == ReplicationState.RESTORING) { - log.info("Replication stopped before restore could finish, so removing partial restore..") - cancelRestore() + // If the task is already running on the other node, + // OpenSearch persistent task framework cancels any stale tasks on the old nodes. + // Currently, we don't have view on the cancellation reason. Before triggering + // any further actions on the index from this task, verify that, this is the actual task tracking the index. + // - stale task during cancellation shouldn't trigger further actions. + if(isTrackingTaskForIndex()) { + if (currentTaskState.state == ReplicationState.RESTORING) { + log.info("Replication stopped before restore could finish, so removing partial restore..") + cancelRestore() + } + + // if cancelled and not in paused state. + val replicationStateParams = getReplicationStateParamsForIndex(clusterService, followerIndexName) + if(isCancelled && replicationStateParams != null + && replicationStateParams[REPLICATION_LAST_KNOWN_OVERALL_STATE] == ReplicationOverallState.RUNNING.name) { + log.info("Task is cancelled. Moving the index to auto-pause state") + client.execute(PauseIndexReplicationAction.INSTANCE, + PauseIndexReplicationRequest(followerIndexName, TASK_CANCELLATION_REASON)) + } } + /* This is to minimise overhead of calling an additional listener as * it continues to be called even after the task is completed. */ diff --git a/src/test/kotlin/org/opensearch/replication/ReplicationHelpers.kt b/src/test/kotlin/org/opensearch/replication/ReplicationHelpers.kt index 3fe4e11f..f5c9ff24 100644 --- a/src/test/kotlin/org/opensearch/replication/ReplicationHelpers.kt +++ b/src/test/kotlin/org/opensearch/replication/ReplicationHelpers.kt @@ -51,7 +51,7 @@ const val REST_REPLICATION_TASKS = "_tasks?actions=*replication*&detailed&pretty const val REST_LEADER_STATS = "${REST_REPLICATION_PREFIX}leader_stats" const val REST_FOLLOWER_STATS = "${REST_REPLICATION_PREFIX}follower_stats" const val REST_AUTO_FOLLOW_STATS = "${REST_REPLICATION_PREFIX}autofollow_stats" -const val INDEX_TASK_CANCELLATION_REASON = "Index replication task was cancelled by user" +const val INDEX_TASK_CANCELLATION_REASON = "AutoPaused: Index replication task was cancelled by user" const val STATUS_REASON_USER_INITIATED = "User initiated" const val STATUS_REASON_SHARD_TASK_CANCELLED = "Shard task killed or cancelled." const val STATUS_REASON_INDEX_NOT_FOUND = "no such index" diff --git a/src/test/kotlin/org/opensearch/replication/task/index/IndexReplicationTaskTests.kt b/src/test/kotlin/org/opensearch/replication/task/index/IndexReplicationTaskTests.kt index 083533d9..2032cc26 100644 --- a/src/test/kotlin/org/opensearch/replication/task/index/IndexReplicationTaskTests.kt +++ b/src/test/kotlin/org/opensearch/replication/task/index/IndexReplicationTaskTests.kt @@ -26,6 +26,8 @@ import org.opensearch.cluster.ClusterStateObserver import org.opensearch.cluster.RestoreInProgress import org.opensearch.cluster.metadata.IndexMetadata import org.opensearch.cluster.metadata.Metadata +import org.opensearch.cluster.node.DiscoveryNode +import org.opensearch.cluster.node.DiscoveryNodes import org.opensearch.cluster.routing.RoutingTable import org.opensearch.common.settings.Settings import org.opensearch.common.settings.SettingsModule @@ -209,6 +211,61 @@ class IndexReplicationTaskTests : OpenSearchTestCase() { assertThat(shardTasks.size == 2).isTrue } + fun testIsTrackingTaskForIndex() = runBlocking { + val replicationTask: IndexReplicationTask = spy(createIndexReplicationTask()) + var taskManager = Mockito.mock(TaskManager::class.java) + replicationTask.setPersistent(taskManager) + var rc = ReplicationContext(followerIndex) + var rm = ReplicationMetadata(connectionName, ReplicationStoreMetadataType.INDEX.name, ReplicationOverallState.RUNNING.name, "reason", rc, rc, Settings.EMPTY) + replicationTask.setReplicationMetadata(rm) + + // when index replication task is valid + var tasks = PersistentTasksCustomMetadata.builder() + var leaderIndex = Index(followerIndex, "_na_") + tasks.addTask( "replication:0", IndexReplicationExecutor.TASK_NAME, IndexReplicationParams("remoteCluster", leaderIndex, followerIndex), + PersistentTasksCustomMetadata.Assignment("same_node", "test assignment on other node")) + + var metadata = Metadata.builder() + .put(IndexMetadata.builder(REPLICATION_CONFIG_SYSTEM_INDEX).settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(0)) + .put(IndexMetadata.builder(followerIndex).settings(settings(Version.CURRENT)).numberOfShards(2).numberOfReplicas(0)) + .putCustom(PersistentTasksCustomMetadata.TYPE, tasks.build()) + .build() + var routingTableBuilder = RoutingTable.builder() + .addAsNew(metadata.index(REPLICATION_CONFIG_SYSTEM_INDEX)) + .addAsNew(metadata.index(followerIndex)) + var discoveryNodesBuilder = DiscoveryNodes.Builder() + .localNodeId("same_node") + var newClusterState = ClusterState.builder(clusterService.state()) + .metadata(metadata) + .routingTable(routingTableBuilder.build()) + .nodes(discoveryNodesBuilder.build()).build() + setState(clusterService, newClusterState) + assertThat(replicationTask.isTrackingTaskForIndex()).isTrue + + // when index replication task is not valid + tasks = PersistentTasksCustomMetadata.builder() + leaderIndex = Index(followerIndex, "_na_") + tasks.addTask( "replication:0", IndexReplicationExecutor.TASK_NAME, IndexReplicationParams("remoteCluster", leaderIndex, followerIndex), + PersistentTasksCustomMetadata.Assignment("other_node", "test assignment on other node")) + + metadata = Metadata.builder() + .put(IndexMetadata.builder(REPLICATION_CONFIG_SYSTEM_INDEX).settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(0)) + .put(IndexMetadata.builder(followerIndex).settings(settings(Version.CURRENT)).numberOfShards(2).numberOfReplicas(0)) + .putCustom(PersistentTasksCustomMetadata.TYPE, tasks.build()) + .build() + routingTableBuilder = RoutingTable.builder() + .addAsNew(metadata.index(REPLICATION_CONFIG_SYSTEM_INDEX)) + .addAsNew(metadata.index(followerIndex)) + discoveryNodesBuilder = DiscoveryNodes.Builder() + .localNodeId("same_node") + newClusterState = ClusterState.builder(clusterService.state()) + .metadata(metadata) + .routingTable(routingTableBuilder.build()) + .nodes(discoveryNodesBuilder.build()).build() + setState(clusterService, newClusterState) + assertThat(replicationTask.isTrackingTaskForIndex()).isFalse + } + private fun createIndexReplicationTask() : IndexReplicationTask { var threadPool = TestThreadPool("IndexReplicationTask") //Hack Alert : Though it is meant to force rejection , this is to make overallTaskScope not null From d6ef8b3190c7010c941f7fb32e6c6589765fb178 Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Wed, 31 May 2023 20:11:10 +0530 Subject: [PATCH 53/84] Handling exception in getAssignment method (#881) (#923) * Handling exception in getAssignment method Handling exception in getAssignment method Signed-off-by: Nishant Goel Signed-off-by: Nishant Goel <113011736+nisgoel-amazon@users.noreply.github.com> * Adding UT for getAssignment Method Signed-off-by: Nishant Goel --------- Signed-off-by: Nishant Goel <113011736+nisgoel-amazon@users.noreply.github.com> Signed-off-by: Nishant Goel (cherry picked from commit 14b9268249b53f3ec6f69cffa451dbe86c2c1a33) Co-authored-by: Nishant Goel <113011736+nisgoel-amazon@users.noreply.github.com> --- .../task/shard/ShardReplicationExecutor.kt | 11 +- .../shard/ShardReplicationExecutorTests.kt | 148 ++++++++++++++++++ 2 files changed, 156 insertions(+), 3 deletions(-) create mode 100644 src/test/kotlin/org/opensearch/replication/task/shard/ShardReplicationExecutorTests.kt diff --git a/src/main/kotlin/org/opensearch/replication/task/shard/ShardReplicationExecutor.kt b/src/main/kotlin/org/opensearch/replication/task/shard/ShardReplicationExecutor.kt index 9e509a79..11be6056 100644 --- a/src/main/kotlin/org/opensearch/replication/task/shard/ShardReplicationExecutor.kt +++ b/src/main/kotlin/org/opensearch/replication/task/shard/ShardReplicationExecutor.kt @@ -56,9 +56,14 @@ class ShardReplicationExecutor(executor: String, private val clusterService : Cl } override fun getAssignment(params: ShardReplicationParams, clusterState: ClusterState) : Assignment { - val primaryShard = clusterState.routingTable().shardRoutingTable(params.followerShardId).primaryShard() - if (!primaryShard.active()) return SHARD_NOT_ACTIVE - return Assignment(primaryShard.currentNodeId(), "node with primary shard") + try { + val primaryShard = clusterState.routingTable().shardRoutingTable(params.followerShardId).primaryShard() + if (!primaryShard.active()) return SHARD_NOT_ACTIVE + return Assignment(primaryShard.currentNodeId(), "node with primary shard") + } catch (e: Exception) { + log.error("Failed to assign shard replication task with id ${params.followerShardId}", e) + return SHARD_NOT_ACTIVE + } } override fun nodeOperation(task: AllocatedPersistentTask, params: ShardReplicationParams, state: PersistentTaskState?) { diff --git a/src/test/kotlin/org/opensearch/replication/task/shard/ShardReplicationExecutorTests.kt b/src/test/kotlin/org/opensearch/replication/task/shard/ShardReplicationExecutorTests.kt new file mode 100644 index 00000000..630234f8 --- /dev/null +++ b/src/test/kotlin/org/opensearch/replication/task/shard/ShardReplicationExecutorTests.kt @@ -0,0 +1,148 @@ +package org.opensearch.replication.task.shard + +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope +import org.junit.Assert +import org.junit.Before +import org.junit.Test +import org.mockito.Mockito +import org.opensearch.Version +import org.opensearch.cluster.ClusterState +import org.opensearch.cluster.metadata.IndexMetadata +import org.opensearch.cluster.metadata.Metadata +import org.opensearch.cluster.routing.* +import org.opensearch.common.unit.TimeValue +import org.opensearch.core.xcontent.NamedXContentRegistry +import org.opensearch.index.Index +import org.opensearch.index.shard.ShardId +import org.opensearch.replication.ReplicationSettings +import org.opensearch.replication.metadata.ReplicationMetadataManager +import org.opensearch.replication.metadata.store.ReplicationMetadataStore +import org.opensearch.replication.task.index.* +import org.opensearch.test.ClusterServiceUtils +import org.opensearch.test.OpenSearchTestCase +import org.opensearch.threadpool.TestThreadPool +import java.util.ArrayList +import java.util.concurrent.TimeUnit + +@ThreadLeakScope(ThreadLeakScope.Scope.NONE) +class ShardReplicationExecutorTests: OpenSearchTestCase() { + + companion object { + var followerIndex = "follower-index" + var remoteCluster = "remote-cluster" + } + + private lateinit var shardReplicationExecutor: ShardReplicationExecutor + + private var threadPool = TestThreadPool("ShardExecutorTest") + private var clusterService = ClusterServiceUtils.createClusterService(threadPool) + + @Before + fun setup() { + val spyClient = Mockito.spy(NoOpClient("testName")) + val replicationMetadataManager = ReplicationMetadataManager(clusterService, spyClient, + ReplicationMetadataStore(spyClient, clusterService, NamedXContentRegistry.EMPTY) + ) + val followerStats = FollowerClusterStats() + val followerShardId = ShardId("follower", "follower_uuid", 0) + followerStats.stats[followerShardId] = FollowerShardMetric() + + val replicationSettings = Mockito.mock(ReplicationSettings::class.java) + replicationSettings.metadataSyncInterval = TimeValue(100, TimeUnit.MILLISECONDS) + shardReplicationExecutor = ShardReplicationExecutor( + "test_executor", + clusterService, + threadPool, + spyClient, + replicationMetadataManager, + replicationSettings, + followerStats + ) + } + + @Test + fun `getAssignment should not throw exception when no shard is present` () { + val sId = ShardId(Index(followerIndex, "_na_"), 0) + val params = ShardReplicationParams(remoteCluster, sId, sId) + val clusterState = createClusterState(null, null) + + try { + val assignment = shardReplicationExecutor.getAssignment(params, clusterState) + Assert.assertEquals(null, assignment.executorNode) + } catch (e: Exception) { + // Validation should not throw an exception, so the test should fail if it reaches this line + Assert.fail("Expected Exception should not be thrown") + } + } + + @Test + fun `getAssignment should return null if shard is present but is not active` () { + val sId = ShardId(Index(followerIndex, "_na_"), 0) + val params = ShardReplicationParams(remoteCluster, sId, sId) + val unassignedShard = ShardRouting.newUnassigned( + sId, + true, + RecoverySource.EmptyStoreRecoverySource.INSTANCE, + UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null) + ) + val clusterState = createClusterState(sId, unassignedShard) + + try { + val assignment = shardReplicationExecutor.getAssignment(params, clusterState) + Assert.assertEquals(null, assignment.executorNode) + } catch (e: Exception) { + // Validation should not throw an exception, so the test should fail if it reaches this line + Assert.fail("Expected Exception should not be thrown") + } + } + + @Test + fun `getAssignment should return node when shard is present` () { + val sId = ShardId(Index(followerIndex, "_na_"), 0) + val params = ShardReplicationParams(remoteCluster, sId, sId) + val initializingShard = TestShardRouting.newShardRouting( + followerIndex, + sId.id, + "1", + true, + ShardRoutingState.INITIALIZING + ) + val startedShard = initializingShard.moveToStarted() + val clusterState = createClusterState(sId, startedShard) + + try { + val assignment = shardReplicationExecutor.getAssignment(params, clusterState) + Assert.assertEquals(initializingShard.currentNodeId(), assignment.executorNode) + } catch (e: Exception) { + // Validation should not throw an exception, so the test should fail if it reaches this line + Assert.fail("Expected Exception should not be thrown") + } + } + + private fun createClusterState(shardId: ShardId?, shardRouting: ShardRouting?): ClusterState { + val indices: MutableList = ArrayList() + indices.add(followerIndex) + val metadata = Metadata.builder() + .put( + IndexMetadata.builder(ReplicationMetadataStore.REPLICATION_CONFIG_SYSTEM_INDEX).settings(settings( + Version.CURRENT)).numberOfShards(1).numberOfReplicas(0)) + .put( + IndexMetadata.builder(IndexReplicationTaskTests.followerIndex).settings(settings( + Version.CURRENT)).numberOfShards(2).numberOfReplicas(0)) + .build() + + val routingTableBuilder = RoutingTable.builder() + .addAsNew(metadata.index(ReplicationMetadataStore.REPLICATION_CONFIG_SYSTEM_INDEX)) + .addAsNew(metadata.index(followerIndex)) + + if (shardId != null) { + routingTableBuilder.add( + IndexRoutingTable.builder(shardId.index) + .addShard(shardRouting) + .build() + ) + } + + return ClusterState.builder(clusterService.state()).routingTable(routingTableBuilder.build()).build() + } +} \ No newline at end of file From 689ae33791610d69dada452a466ee8795fd3bc23 Mon Sep 17 00:00:00 2001 From: Sai Kumar Date: Fri, 2 Jun 2023 16:04:11 +0530 Subject: [PATCH 54/84] Modify autofollow retry scheduler logic check to account for completed runs (#839) (#897) Signed-off-by: Sai Kumar --- .../task/autofollow/AutoFollowTask.kt | 3 +- .../integ/rest/UpdateAutoFollowPatternIT.kt | 59 +++++++++++-------- 2 files changed, 38 insertions(+), 24 deletions(-) diff --git a/src/main/kotlin/org/opensearch/replication/task/autofollow/AutoFollowTask.kt b/src/main/kotlin/org/opensearch/replication/task/autofollow/AutoFollowTask.kt index da89580e..15c22922 100644 --- a/src/main/kotlin/org/opensearch/replication/task/autofollow/AutoFollowTask.kt +++ b/src/main/kotlin/org/opensearch/replication/task/autofollow/AutoFollowTask.kt @@ -45,6 +45,7 @@ import org.opensearch.tasks.TaskId import org.opensearch.threadpool.Scheduler import org.opensearch.threadpool.ThreadPool import java.util.concurrent.ConcurrentSkipListSet +import java.util.concurrent.TimeUnit class AutoFollowTask(id: Long, type: String, action: String, description: String, parentTask: TaskId, headers: Map, @@ -91,7 +92,7 @@ class AutoFollowTask(id: Long, type: String, action: String, description: String private fun addRetryScheduler() { log.debug("Adding retry scheduler") - if(retryScheduler != null && !retryScheduler!!.isCancelled) { + if(retryScheduler != null && retryScheduler!!.getDelay(TimeUnit.NANOSECONDS) > 0L) { return } try { diff --git a/src/test/kotlin/org/opensearch/replication/integ/rest/UpdateAutoFollowPatternIT.kt b/src/test/kotlin/org/opensearch/replication/integ/rest/UpdateAutoFollowPatternIT.kt index b38d0dce..9393bc32 100644 --- a/src/test/kotlin/org/opensearch/replication/integ/rest/UpdateAutoFollowPatternIT.kt +++ b/src/test/kotlin/org/opensearch/replication/integ/rest/UpdateAutoFollowPatternIT.kt @@ -23,6 +23,7 @@ import org.opensearch.replication.task.index.IndexReplicationExecutor import org.apache.http.HttpStatus import org.apache.http.entity.ContentType import org.apache.http.nio.entity.NStringEntity +import org.apache.logging.log4j.LogManager import org.assertj.core.api.Assertions import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest import org.opensearch.action.admin.indices.settings.get.GetSettingsRequest @@ -41,6 +42,7 @@ import org.opensearch.cluster.metadata.IndexMetadata import org.opensearch.cluster.metadata.MetadataCreateIndexService import org.opensearch.replication.AutoFollowStats import org.opensearch.replication.ReplicationPlugin +import org.opensearch.replication.action.changes.TransportGetChangesAction import org.opensearch.replication.updateReplicationStartBlockSetting import org.opensearch.replication.updateAutofollowRetrySetting import org.opensearch.replication.updateAutoFollowConcurrentStartReplicationJobSetting @@ -63,6 +65,10 @@ class UpdateAutoFollowPatternIT: MultiClusterRestTestCase() { private val longIndexPatternName = "index_".repeat(43) private val waitForShardTask = TimeValue.timeValueSeconds(10) + companion object { + private val log = LogManager.getLogger(UpdateAutoFollowPatternIT::class.java) + } + fun `test auto follow pattern`() { val followerClient = getClientForCluster(FOLLOWER) val leaderClient = getClientForCluster(LEADER) @@ -316,36 +322,43 @@ class UpdateAutoFollowPatternIT: MultiClusterRestTestCase() { Assertions.assertThat(getIndexReplicationTasks(FOLLOWER).size).isEqualTo(1) } - fun `test autofollow task with start replication block`() { + fun `test autofollow task with start replication block and retries`() { val followerClient = getClientForCluster(FOLLOWER) val leaderClient = getClientForCluster(LEADER) createConnectionBetweenClusters(FOLLOWER, LEADER, connectionAlias) - val leaderIndexName = createRandomIndex(leaderClient) try { //modify retry duration to account for autofollow trigger in next retry followerClient.updateAutofollowRetrySetting("1m") - // Add replication start block - followerClient.updateReplicationStartBlockSetting(true) - followerClient.updateAutoFollowPattern(connectionAlias, indexPatternName, indexPattern) - sleep(30000) // Default poll for auto follow in worst case - // verify both index replication tasks and autofollow tasks - // Replication shouldn't have been started - 0 tasks - // Autofollow task should still be up - 1 task - Assertions.assertThat(getIndexReplicationTasks(FOLLOWER).size).isEqualTo(0) - Assertions.assertThat(getAutoFollowTasks(FOLLOWER).size).isEqualTo(1) + for (repeat in 1..2) { + log.info("Current Iteration $repeat") + // Add replication start block + followerClient.updateReplicationStartBlockSetting(true) + createRandomIndex(leaderClient) + followerClient.updateAutoFollowPattern(connectionAlias, indexPatternName, indexPattern) + sleep(95000) // wait for auto follow trigger in the worst case + // verify both index replication tasks and autofollow tasks + // Replication shouldn't have been started - (repeat-1) tasks as for current loop index shouldn't be + // created yet. + // Autofollow task should still be up - 1 task + Assertions.assertThat(getIndexReplicationTasks(FOLLOWER).size).isEqualTo(repeat-1) + Assertions.assertThat(getAutoFollowTasks(FOLLOWER).size).isEqualTo(1) - var stats = followerClient.AutoFollowStats() - var failedIndices = stats["failed_indices"] as List<*> - assert(failedIndices.size == 1) - // Remove replication start block - followerClient.updateReplicationStartBlockSetting(false) - sleep(60000) // wait for auto follow trigger in the worst case - // Index should be replicated and autofollow task should be present - Assertions.assertThat(getIndexReplicationTasks(FOLLOWER).size).isEqualTo(1) - Assertions.assertThat(getAutoFollowTasks(FOLLOWER).size).isEqualTo(1) - stats = followerClient.AutoFollowStats() - failedIndices = stats["failed_indices"] as List<*> - assert(failedIndices.isEmpty()) + var stats = followerClient.AutoFollowStats() + var failedIndices = stats["failed_indices"] as List<*> + // Every time failed replication task will be 1 as + // there are already running jobs in the previous iteration + log.info("Current failed indices $failedIndices") + assert(failedIndices.size == 1) + // Remove replication start block + followerClient.updateReplicationStartBlockSetting(false) + sleep(95000) // wait for auto follow trigger in the worst case + // Index should be replicated and autofollow task should be present + Assertions.assertThat(getIndexReplicationTasks(FOLLOWER).size).isEqualTo(repeat) + Assertions.assertThat(getAutoFollowTasks(FOLLOWER).size).isEqualTo(1) + stats = followerClient.AutoFollowStats() + failedIndices = stats["failed_indices"] as List<*> + assert(failedIndices.isEmpty()) + } } finally { followerClient.deleteAutoFollowPattern(connectionAlias, indexPatternName) } From a2bfc04ae89691dae9a1641f645095de4f7bc012 Mon Sep 17 00:00:00 2001 From: Gaurav Bafna <85113518+gbbafna@users.noreply.github.com> Date: Mon, 4 Jul 2022 12:22:33 +0530 Subject: [PATCH 55/84] Correctly updating the followerCheckpoint in stats api (#438) Summary : We need to update followerCheckpoint after writing to the follower index. Currently, we are not waiting for the writes and updating it with soon-to-be stale values Signed-off-by: Gaurav Bafna --- .../task/shard/ShardReplicationTask.kt | 2 +- .../task/shard/TranslogSequencer.kt | 5 ++++ .../task/shard/TranslogSequencerTests.kt | 28 +++++++++++++------ 3 files changed, 25 insertions(+), 10 deletions(-) diff --git a/src/main/kotlin/org/opensearch/replication/task/shard/ShardReplicationTask.kt b/src/main/kotlin/org/opensearch/replication/task/shard/ShardReplicationTask.kt index 24128f8f..7b58d419 100644 --- a/src/main/kotlin/org/opensearch/replication/task/shard/ShardReplicationTask.kt +++ b/src/main/kotlin/org/opensearch/replication/task/shard/ShardReplicationTask.kt @@ -217,6 +217,7 @@ class ShardReplicationTask(id: Long, type: String, action: String, description: TaskId(clusterService.nodeName, id), client, indexShard.localCheckpoint, followerClusterStats) val changeTracker = ShardReplicationChangesTracker(indexShard, replicationSettings) + followerClusterStats.stats[followerShardId]!!.followerCheckpoint = indexShard.localCheckpoint coroutineScope { while (isActive) { rateLimiter.acquire() @@ -273,7 +274,6 @@ class ShardReplicationTask(id: Long, type: String, action: String, description: //hence renew retention lease with lastSyncedGlobalCheckpoint + 1 so that any shard that picks up shard replication task has data until then. try { retentionLeaseHelper.renewRetentionLease(leaderShardId, indexShard.lastSyncedGlobalCheckpoint + 1, followerShardId) - followerClusterStats.stats[followerShardId]!!.followerCheckpoint = indexShard.lastSyncedGlobalCheckpoint lastLeaseRenewalMillis = System.currentTimeMillis() } catch (ex: Exception) { when (ex) { diff --git a/src/main/kotlin/org/opensearch/replication/task/shard/TranslogSequencer.kt b/src/main/kotlin/org/opensearch/replication/task/shard/TranslogSequencer.kt index be5fe89c..38b625bf 100644 --- a/src/main/kotlin/org/opensearch/replication/task/shard/TranslogSequencer.kt +++ b/src/main/kotlin/org/opensearch/replication/task/shard/TranslogSequencer.kt @@ -28,6 +28,7 @@ import org.opensearch.client.Client import org.opensearch.common.logging.Loggers import org.opensearch.index.shard.ShardId import org.opensearch.index.translog.Translog +import org.opensearch.replication.util.indicesService import org.opensearch.tasks.TaskId import java.util.ArrayList import java.util.concurrent.ConcurrentHashMap @@ -55,6 +56,9 @@ class TranslogSequencer(scope: CoroutineScope, private val replicationMetadata: private val log = Loggers.getLogger(javaClass, followerShardId)!! private val completed = CompletableDeferred() + val followerIndexService = indicesService.indexServiceSafe(followerShardId.index) + val indexShard = followerIndexService.getShard(followerShardId.id) + private val sequencer = scope.actor(capacity = Channel.UNLIMITED) { // Exceptions thrown here will mark the channel as failed and the next attempt to send to the channel will // raise the same exception. See [SendChannel.close] method for details. @@ -88,6 +92,7 @@ class TranslogSequencer(scope: CoroutineScope, private val replicationMetadata: val tookInNanos = System.nanoTime() - relativeStartNanos followerClusterStats.stats[followerShardId]!!.totalWriteTime.addAndGet(TimeUnit.NANOSECONDS.toMillis(tookInNanos)) followerClusterStats.stats[followerShardId]!!.opsWritten.addAndGet(replayRequest.changes.size.toLong()) + followerClusterStats.stats[followerShardId]!!.followerCheckpoint = indexShard.localCheckpoint } highWatermark = next.changes.lastOrNull()?.seqNo() ?: highWatermark } diff --git a/src/test/kotlin/org/opensearch/replication/task/shard/TranslogSequencerTests.kt b/src/test/kotlin/org/opensearch/replication/task/shard/TranslogSequencerTests.kt index ed5afb06..ac377687 100644 --- a/src/test/kotlin/org/opensearch/replication/task/shard/TranslogSequencerTests.kt +++ b/src/test/kotlin/org/opensearch/replication/task/shard/TranslogSequencerTests.kt @@ -11,32 +11,37 @@ package org.opensearch.replication.task.shard -import org.opensearch.replication.action.changes.GetChangesResponse -import org.opensearch.replication.action.replay.ReplayChangesAction -import org.opensearch.replication.action.replay.ReplayChangesRequest -import org.opensearch.replication.action.replay.ReplayChangesResponse -import org.opensearch.replication.metadata.ReplicationOverallState -import org.opensearch.replication.metadata.store.ReplicationContext -import org.opensearch.replication.metadata.store.ReplicationMetadata -import org.opensearch.replication.metadata.store.ReplicationStoreMetadataType import kotlinx.coroutines.ExperimentalCoroutinesApi import kotlinx.coroutines.ObsoleteCoroutinesApi import kotlinx.coroutines.test.runBlockingTest import org.assertj.core.api.Assertions.assertThat +import org.mockito.Mockito import org.opensearch.action.ActionListener import org.opensearch.action.ActionRequest import org.opensearch.action.ActionResponse import org.opensearch.action.ActionType import org.opensearch.action.support.replication.ReplicationResponse.ShardInfo import org.opensearch.common.settings.Settings +import org.opensearch.index.IndexService +import org.opensearch.index.shard.IndexShard import org.opensearch.index.shard.ShardId import org.opensearch.index.translog.Translog +import org.opensearch.indices.IndicesService +import org.opensearch.replication.action.changes.GetChangesResponse +import org.opensearch.replication.action.replay.ReplayChangesAction +import org.opensearch.replication.action.replay.ReplayChangesRequest +import org.opensearch.replication.action.replay.ReplayChangesResponse +import org.opensearch.replication.metadata.ReplicationOverallState +import org.opensearch.replication.metadata.store.ReplicationContext +import org.opensearch.replication.metadata.store.ReplicationMetadata +import org.opensearch.replication.metadata.store.ReplicationStoreMetadataType +import org.opensearch.replication.util.indicesService import org.opensearch.tasks.TaskId.EMPTY_TASK_ID import org.opensearch.test.OpenSearchTestCase -import org.opensearch.test.OpenSearchTestCase.randomList import org.opensearch.test.client.NoOpClient import java.util.Locale + @ObsoleteCoroutinesApi class TranslogSequencerTests : OpenSearchTestCase() { @@ -83,6 +88,11 @@ class TranslogSequencerTests : OpenSearchTestCase() { val stats = FollowerClusterStats() stats.stats[followerShardId] = FollowerShardMetric() val startSeqNo = randomNonNegativeLong() + indicesService = Mockito.mock(IndicesService::class.java) + val followerIndexService = Mockito.mock(IndexService::class.java) + val indexShard = Mockito.mock(IndexShard::class.java) + Mockito.`when`(indicesService.indexServiceSafe(followerShardId.index)).thenReturn(followerIndexService) + Mockito.`when`(followerIndexService.getShard(followerShardId.id)).thenReturn(indexShard) val sequencer = TranslogSequencer(this, replicationMetadata, followerShardId, leaderAlias, leaderIndex, EMPTY_TASK_ID, client, startSeqNo, stats) From f9aca4727d466a77367af3515b14f604226b8599 Mon Sep 17 00:00:00 2001 From: Ankit Kala Date: Fri, 2 Jun 2023 13:00:44 +0530 Subject: [PATCH 56/84] Initialize the leaderCheckpoint with follower shard's localCheckpoint (#904) Signed-off-by: Ankit Kala --- .../RemoteClusterRestoreLeaderService.kt | 4 +- .../RemoteClusterRetentionLeaseHelper.kt | 44 ++++++++++--- .../task/index/IndexReplicationTask.kt | 2 +- .../task/shard/ShardReplicationTask.kt | 4 ++ .../replication/MultiClusterRestTestCase.kt | 35 ++++++++++- .../rest/ReplicationStopThenRestartIT.kt | 62 +++++++++++++++++++ 6 files changed, 138 insertions(+), 13 deletions(-) create mode 100644 src/test/kotlin/org/opensearch/replication/integ/rest/ReplicationStopThenRestartIT.kt diff --git a/src/main/kotlin/org/opensearch/replication/repository/RemoteClusterRestoreLeaderService.kt b/src/main/kotlin/org/opensearch/replication/repository/RemoteClusterRestoreLeaderService.kt index 5eea937b..5c06e4d4 100644 --- a/src/main/kotlin/org/opensearch/replication/repository/RemoteClusterRestoreLeaderService.kt +++ b/src/main/kotlin/org/opensearch/replication/repository/RemoteClusterRestoreLeaderService.kt @@ -114,8 +114,8 @@ class RemoteClusterRestoreLeaderService @Inject constructor(private val indicesS var fromSeqNo = RetentionLeaseActions.RETAIN_ALL // Adds the retention lease for fromSeqNo for the next stage of the replication. - retentionLeaseHelper.addRetentionLease(request.leaderShardId, fromSeqNo, - request.followerShardId, RemoteClusterRepository.REMOTE_CLUSTER_REPO_REQ_TIMEOUT_IN_MILLI_SEC) + retentionLeaseHelper.addRetentionLease(request.leaderShardId, fromSeqNo, request.followerShardId, + RemoteClusterRepository.REMOTE_CLUSTER_REPO_REQ_TIMEOUT_IN_MILLI_SEC) /** * At this point, it should be safe to release retention lock as the retention lease diff --git a/src/main/kotlin/org/opensearch/replication/seqno/RemoteClusterRetentionLeaseHelper.kt b/src/main/kotlin/org/opensearch/replication/seqno/RemoteClusterRetentionLeaseHelper.kt index e755a7be..29116c51 100644 --- a/src/main/kotlin/org/opensearch/replication/seqno/RemoteClusterRetentionLeaseHelper.kt +++ b/src/main/kotlin/org/opensearch/replication/seqno/RemoteClusterRetentionLeaseHelper.kt @@ -22,6 +22,7 @@ import org.opensearch.index.seqno.RetentionLeaseActions import org.opensearch.index.seqno.RetentionLeaseAlreadyExistsException import org.opensearch.index.seqno.RetentionLeaseInvalidRetainingSeqNoException import org.opensearch.index.seqno.RetentionLeaseNotFoundException +import org.opensearch.index.shard.IndexShard import org.opensearch.index.shard.ShardId import org.opensearch.replication.metadata.store.ReplicationMetadata import org.opensearch.replication.repository.RemoteClusterRepository @@ -175,22 +176,47 @@ class RemoteClusterRetentionLeaseHelper constructor(var followerClusterNameWithU } } + public fun attemptRetentionLeaseRemoval(leaderShardId: ShardId, followerShardId: ShardId, timeout: Long) { + val retentionLeaseId = retentionLeaseIdForShard(followerClusterNameWithUUID, followerShardId) + val request = RetentionLeaseActions.RemoveRequest(leaderShardId, retentionLeaseId) + try { + client.execute(RetentionLeaseActions.Remove.INSTANCE, request).actionGet(timeout) + log.info("Removed retention lease with id - $retentionLeaseId") + } catch(e: RetentionLeaseNotFoundException) { + // log error and bail + log.error(e.stackTraceToString()) + } catch (e: Exception) { + // We are not bubbling up the exception as the stop action/ task cleanup should succeed + // even if we fail to remove the retention lease from leader cluster + log.error("Exception in removing retention lease", e) + } + } + /** * Remove these once the callers are moved to above APIs */ public fun addRetentionLease(leaderShardId: ShardId, seqNo: Long, - followerShardId: ShardId, timeout: Long) { + followerShardId: ShardId, timeout: Long) { val retentionLeaseId = retentionLeaseIdForShard(followerClusterNameWithUUID, followerShardId) val request = RetentionLeaseActions.AddRequest(leaderShardId, retentionLeaseId, seqNo, retentionLeaseSource) - try { - client.execute(RetentionLeaseActions.Add.INSTANCE, request).actionGet(timeout) - } catch (e: RetentionLeaseAlreadyExistsException) { - log.error(e.stackTraceToString()) - log.info("Renew retention lease as it already exists $retentionLeaseId with $seqNo") - // Only one retention lease should exists for the follower shard - // Ideally, this should have got cleaned-up - renewRetentionLease(leaderShardId, seqNo, followerShardId, timeout) + var canRetry = true + while (true) { + try { + log.info("Adding retention lease $retentionLeaseId") + client.execute(RetentionLeaseActions.Add.INSTANCE, request).actionGet(timeout) + break + } catch (e: RetentionLeaseAlreadyExistsException) { + log.info("Found a stale retention lease $retentionLeaseId on leader.") + if (canRetry) { + canRetry = false + attemptRetentionLeaseRemoval(leaderShardId, followerShardId, timeout) + log.info("Cleared stale retention lease $retentionLeaseId on leader. Retrying...") + } else { + log.error(e.stackTraceToString()) + throw e + } + } } } diff --git a/src/main/kotlin/org/opensearch/replication/task/index/IndexReplicationTask.kt b/src/main/kotlin/org/opensearch/replication/task/index/IndexReplicationTask.kt index 2796eb43..4d2537ad 100644 --- a/src/main/kotlin/org/opensearch/replication/task/index/IndexReplicationTask.kt +++ b/src/main/kotlin/org/opensearch/replication/task/index/IndexReplicationTask.kt @@ -290,7 +290,7 @@ open class IndexReplicationTask(id: Long, type: String, action: String, descript private suspend fun pollShardTaskStatus(): IndexReplicationState { val failedShardTasks = findAllReplicationFailedShardTasks(followerIndexName, clusterService.state()) if (failedShardTasks.isNotEmpty()) { - log.info("Failed shard tasks - ", failedShardTasks) + log.info("Failed shard tasks - $failedShardTasks") var msg = "" for ((shard, task) in failedShardTasks) { val taskState = task.state diff --git a/src/main/kotlin/org/opensearch/replication/task/shard/ShardReplicationTask.kt b/src/main/kotlin/org/opensearch/replication/task/shard/ShardReplicationTask.kt index 7b58d419..f08b2c6b 100644 --- a/src/main/kotlin/org/opensearch/replication/task/shard/ShardReplicationTask.kt +++ b/src/main/kotlin/org/opensearch/replication/task/shard/ShardReplicationTask.kt @@ -218,6 +218,10 @@ class ShardReplicationTask(id: Long, type: String, action: String, description: val changeTracker = ShardReplicationChangesTracker(indexShard, replicationSettings) followerClusterStats.stats[followerShardId]!!.followerCheckpoint = indexShard.localCheckpoint + // In case the shard task starts on a new node and there are no active writes on the leader shard, leader checkpoint + // never gets initialized and defaults to 0. To get around this, we set the leaderCheckpoint to follower shard's + // localCheckpoint as the leader shard is guaranteed to equal or more. + followerClusterStats.stats[followerShardId]!!.leaderCheckpoint = indexShard.localCheckpoint coroutineScope { while (isActive) { rateLimiter.acquire() diff --git a/src/test/kotlin/org/opensearch/replication/MultiClusterRestTestCase.kt b/src/test/kotlin/org/opensearch/replication/MultiClusterRestTestCase.kt index 0e3e3208..0dd38a22 100644 --- a/src/test/kotlin/org/opensearch/replication/MultiClusterRestTestCase.kt +++ b/src/test/kotlin/org/opensearch/replication/MultiClusterRestTestCase.kt @@ -11,7 +11,6 @@ package org.opensearch.replication -import com.nhaarman.mockitokotlin2.stub import org.opensearch.replication.MultiClusterAnnotations.ClusterConfiguration import org.opensearch.replication.MultiClusterAnnotations.ClusterConfigurations import org.opensearch.replication.MultiClusterAnnotations.getAnnotationsFromClass @@ -21,6 +20,7 @@ import org.apache.http.HttpHost import org.apache.http.HttpStatus import org.apache.http.client.config.RequestConfig import org.apache.http.entity.ContentType +import org.apache.http.entity.StringEntity import org.apache.http.impl.nio.client.HttpAsyncClientBuilder import org.apache.http.message.BasicHeader import org.apache.http.nio.conn.ssl.SSLIOSessionStrategy @@ -512,6 +512,28 @@ abstract class MultiClusterRestTestCase : OpenSearchTestCase() { return OpenSearchRestTestCase.entityAsList(client.performRequest(Request("GET", endpoint))) } + protected fun deleteConnection(fromClusterName: String, connectionName: String="source") { + val fromCluster = getNamedCluster(fromClusterName) + val persistentConnectionRequest = Request("PUT", "_cluster/settings") + + val entityAsString = """ + { + "persistent": { + "cluster": { + "remote": { + "$connectionName": { + "seeds": null + } + } + } + } + }""".trimMargin() + + persistentConnectionRequest.entity = StringEntity(entityAsString, ContentType.APPLICATION_JSON) + val persistentConnectionResponse = fromCluster.lowLevelClient.performRequest(persistentConnectionRequest) + assertEquals(HttpStatus.SC_OK.toLong(), persistentConnectionResponse.statusLine.statusCode.toLong()) + } + protected fun createConnectionBetweenClusters(fromClusterName: String, toClusterName: String, connectionName: String="source") { val toCluster = getNamedCluster(toClusterName) val fromCluster = getNamedCluster(fromClusterName) @@ -642,5 +664,16 @@ abstract class MultiClusterRestTestCase : OpenSearchTestCase() { return integTestRemote.equals("true") } + protected fun docCount(cluster: RestHighLevelClient, indexName: String) : Int { + val persistentConnectionRequest = Request("GET", "/$indexName/_search?pretty&q=*") + + val persistentConnectionResponse = cluster.lowLevelClient.performRequest(persistentConnectionRequest) + val statusResponse: Map>> = OpenSearchRestTestCase.entityAsMap(persistentConnectionResponse) as Map>> + return statusResponse["hits"]?.get("total")?.get("value") as Int + } + + protected fun deleteIndex(testCluster: RestHighLevelClient, indexName: String) { + testCluster.lowLevelClient.performRequest(Request("DELETE", indexName)) + } } diff --git a/src/test/kotlin/org/opensearch/replication/integ/rest/ReplicationStopThenRestartIT.kt b/src/test/kotlin/org/opensearch/replication/integ/rest/ReplicationStopThenRestartIT.kt new file mode 100644 index 00000000..33ebb790 --- /dev/null +++ b/src/test/kotlin/org/opensearch/replication/integ/rest/ReplicationStopThenRestartIT.kt @@ -0,0 +1,62 @@ +package org.opensearch.replication.integ.rest + +import org.opensearch.replication.MultiClusterRestTestCase +import org.opensearch.replication.MultiClusterAnnotations +import org.opensearch.replication.StartReplicationRequest +import org.opensearch.replication.startReplication +import org.opensearch.replication.stopReplication +import org.assertj.core.api.Assertions +import org.opensearch.client.RequestOptions +import org.opensearch.client.indices.CreateIndexRequest +import org.junit.Assert +import java.util.concurrent.TimeUnit + + +@MultiClusterAnnotations.ClusterConfigurations( + MultiClusterAnnotations.ClusterConfiguration(clusterName = LEADER), + MultiClusterAnnotations.ClusterConfiguration(clusterName = FOLLOWER) +) + +class ReplicationStopThenRestartIT : MultiClusterRestTestCase() { + private val leaderIndexName = "leader_index" + private val followerIndexName = "follower_index" + + fun `test replication works after unclean stop and start`() { + val followerClient = getClientForCluster(FOLLOWER) + val leaderClient = getClientForCluster(LEADER) + changeTemplate(LEADER) + createConnectionBetweenClusters(FOLLOWER, LEADER) + val createIndexResponse = leaderClient.indices().create(CreateIndexRequest(leaderIndexName), RequestOptions.DEFAULT) + Assertions.assertThat(createIndexResponse.isAcknowledged).isTrue() + followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName)) + insertDocToIndex(LEADER, "1", "dummy data 1",leaderIndexName) + insertDocToIndex(LEADER, "2", "dummy data 1",leaderIndexName) + + assertBusy ({ + try { + Assert.assertEquals(2, docCount(followerClient, followerIndexName)) + } catch (ex: Exception) { + ex.printStackTrace(); + Assert.fail("Exception while querying follower cluster. Failing to retry again {}") + } + }, 1, TimeUnit.MINUTES) + + + deleteConnection(FOLLOWER) + followerClient.stopReplication(followerIndexName, shouldWait = true) + deleteIndex(followerClient, followerIndexName) + + createConnectionBetweenClusters(FOLLOWER, LEADER) + insertDocToIndex(LEADER, "3", "dummy data 1",leaderIndexName) + insertDocToIndex(LEADER, "4", "dummy data 1",leaderIndexName) + followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName)) + + assertBusy ({ + try { + Assert.assertEquals(4, docCount(followerClient, followerIndexName)) + } catch (ex: Exception) { + Assert.fail("Exception while querying follower cluster. Failing to retry again") + } + }, 1, TimeUnit.MINUTES) + } +} From 474eb6713f6b0fc86a40013a8c649f7c5f68c7f1 Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Fri, 2 Jun 2023 18:05:17 +0530 Subject: [PATCH 57/84] Remove any stale replication tasks from cluster state (#905) (#973) Clear stale replication tasks in STOP API Signed-off-by: monusingh-1 (cherry picked from commit bc9b61a4807e93d660d7b34cc12409fa6de778c7) Co-authored-by: Monu Singh --- .../TransportStopIndexReplicationAction.kt | 38 +++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/src/main/kotlin/org/opensearch/replication/action/stop/TransportStopIndexReplicationAction.kt b/src/main/kotlin/org/opensearch/replication/action/stop/TransportStopIndexReplicationAction.kt index 8f7ff425..8a6fdf71 100644 --- a/src/main/kotlin/org/opensearch/replication/action/stop/TransportStopIndexReplicationAction.kt +++ b/src/main/kotlin/org/opensearch/replication/action/stop/TransportStopIndexReplicationAction.kt @@ -54,6 +54,8 @@ import org.opensearch.common.inject.Inject import org.opensearch.common.io.stream.StreamInput import org.opensearch.common.settings.Settings import org.opensearch.replication.util.stackTraceToString +import org.opensearch.persistent.PersistentTasksCustomMetadata +import org.opensearch.persistent.RemovePersistentTaskAction import org.opensearch.threadpool.ThreadPool import org.opensearch.transport.TransportService import java.io.IOException @@ -136,6 +138,7 @@ class TransportStopIndexReplicationAction @Inject constructor(transportService: } } replicationMetadataManager.deleteIndexReplicationMetadata(request.indexName) + removeStaleReplicationTasksFromClusterState(request) listener.onResponse(AcknowledgedResponse(true)) } catch (e: Exception) { log.error("Stop replication failed for index[${request.indexName}] with error ${e.stackTraceToString()}") @@ -144,6 +147,32 @@ class TransportStopIndexReplicationAction @Inject constructor(transportService: } } + private suspend fun removeStaleReplicationTasksFromClusterState(request: StopIndexReplicationRequest) { + try { + val allTasks: PersistentTasksCustomMetadata = + clusterService.state().metadata().custom(PersistentTasksCustomMetadata.TYPE) + for (singleTask in allTasks.tasks()) { + if (isReplicationTask(singleTask, request) && !singleTask.isAssigned){ + log.info("Removing task: ${singleTask.id} from cluster state") + val removeRequest: RemovePersistentTaskAction.Request = + RemovePersistentTaskAction.Request(singleTask.id) + client.suspendExecute(RemovePersistentTaskAction.INSTANCE, removeRequest) + } + } + } catch (e: Exception) { + log.info("Could not update cluster state") + } + } + + // Remove index replication task metadata, format replication:index:fruit-1 + // Remove shard replication task metadata, format replication:[fruit-1][0] + private fun isReplicationTask( + singleTask: PersistentTasksCustomMetadata.PersistentTask<*>, + request: StopIndexReplicationRequest + ) = singleTask.id.startsWith("replication:") && + (singleTask.id == "replication:index:${request.indexName}" || singleTask.id.split(":")[1].contains(request.indexName)) + + private fun validateReplicationStateOfIndex(request: StopIndexReplicationRequest) { // If replication blocks/settings are present, Stop action should proceed with the clean-up // This can happen during settings of follower index are carried over in the snapshot and the restore is @@ -153,6 +182,15 @@ class TransportStopIndexReplicationAction @Inject constructor(transportService: return } + //check for stale replication tasks + val allTasks: PersistentTasksCustomMetadata? = + clusterService.state()?.metadata()?.custom(PersistentTasksCustomMetadata.TYPE) + allTasks?.tasks()?.forEach{ + if (isReplicationTask(it, request) && !it.isAssigned){ + return + } + } + val replicationStateParams = getReplicationStateParamsForIndex(clusterService, request.indexName) ?: throw IllegalArgumentException("No replication in progress for index:${request.indexName}") From a09aab9ed2128b82861c09a56df7a6670f5cd1a9 Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Wed, 7 Jun 2023 19:20:02 +0530 Subject: [PATCH 58/84] Use strong password in integ test (#987) (#990) Signed-off-by: Sooraj Sinha (cherry picked from commit 93205a1caba599d526f71cb807f7cb66de256cf2) Co-authored-by: Sooraj Sinha <81695996+soosinha@users.noreply.github.com> --- .../replication/integ/rest/SecurityBase.kt | 24 ++++++----- .../integ/rest/SecurityCustomRolesIT.kt | 40 +++++++++---------- .../integ/rest/SecurityCustomRolesLeaderIT.kt | 12 +++--- .../integ/rest/SecurityDlsFlsIT.kt | 22 +++++----- 4 files changed, 50 insertions(+), 48 deletions(-) diff --git a/src/test/kotlin/org/opensearch/replication/integ/rest/SecurityBase.kt b/src/test/kotlin/org/opensearch/replication/integ/rest/SecurityBase.kt index cf12cb6c..2159bc80 100644 --- a/src/test/kotlin/org/opensearch/replication/integ/rest/SecurityBase.kt +++ b/src/test/kotlin/org/opensearch/replication/integ/rest/SecurityBase.kt @@ -20,6 +20,8 @@ import org.junit.AfterClass import org.junit.BeforeClass import javax.swing.text.StyledEditorKit +const val INTEG_TEST_PASSWORD = "ccr-integ-test@123" + abstract class SecurityBase : MultiClusterRestTestCase() { companion object { var initialized : Boolean = false @@ -284,17 +286,17 @@ abstract class SecurityBase : MultiClusterRestTestCase() { } private fun addUsers(){ - addUserToCluster("testUser1","password", FOLLOWER) - addUserToCluster("testUser1","password", LEADER) - addUserToCluster("testUser2","password", FOLLOWER) - addUserToCluster("testUser2","password", LEADER) - addUserToCluster("testUser3","password", FOLLOWER) - addUserToCluster("testUser4","password", FOLLOWER) - addUserToCluster("testUser5","password", FOLLOWER) - addUserToCluster("testUser6","password", LEADER) - addUserToCluster("testUser6","password", FOLLOWER) - addUserToCluster("testUser7","password", LEADER) - addUserToCluster("testUser7","password", FOLLOWER) + addUserToCluster("testUser1", INTEG_TEST_PASSWORD, FOLLOWER) + addUserToCluster("testUser1", INTEG_TEST_PASSWORD, LEADER) + addUserToCluster("testUser2", INTEG_TEST_PASSWORD, FOLLOWER) + addUserToCluster("testUser2", INTEG_TEST_PASSWORD, LEADER) + addUserToCluster("testUser3", INTEG_TEST_PASSWORD, FOLLOWER) + addUserToCluster("testUser4", INTEG_TEST_PASSWORD, FOLLOWER) + addUserToCluster("testUser5", INTEG_TEST_PASSWORD, FOLLOWER) + addUserToCluster("testUser6", INTEG_TEST_PASSWORD, LEADER) + addUserToCluster("testUser6", INTEG_TEST_PASSWORD, FOLLOWER) + addUserToCluster("testUser7", INTEG_TEST_PASSWORD, LEADER) + addUserToCluster("testUser7", INTEG_TEST_PASSWORD, FOLLOWER) } private fun addUserToCluster(userName: String, password: String, clusterName: String) { diff --git a/src/test/kotlin/org/opensearch/replication/integ/rest/SecurityCustomRolesIT.kt b/src/test/kotlin/org/opensearch/replication/integ/rest/SecurityCustomRolesIT.kt index 8c302953..94257594 100644 --- a/src/test/kotlin/org/opensearch/replication/integ/rest/SecurityCustomRolesIT.kt +++ b/src/test/kotlin/org/opensearch/replication/integ/rest/SecurityCustomRolesIT.kt @@ -61,7 +61,7 @@ class SecurityCustomRolesIT: SecurityBase() { useRoles = UseRoles(leaderClusterRole = "leaderRoleValidPerms",followerClusterRole = "followerRoleValidPerms")) followerClient.startReplication(startReplicationRequest, - requestOptions= RequestOptions.DEFAULT.addBasicAuthHeader("testUser1","password"), waitForRestore = true) + requestOptions= RequestOptions.DEFAULT.addBasicAuthHeader("testUser1",INTEG_TEST_PASSWORD), waitForRestore = true) assertBusy { Assertions.assertThat(followerClient.indices().exists(GetIndexRequest(followerIndexName), RequestOptions.DEFAULT)).isEqualTo(true) } @@ -80,7 +80,7 @@ class SecurityCustomRolesIT: SecurityBase() { useRoles = UseRoles(leaderClusterRole = "leaderRoleValidPerms",followerClusterRole = "followerRoleNoPerms")) Assertions.assertThatThrownBy { followerClient.startReplication(startReplicationRequest, - requestOptions= RequestOptions.DEFAULT.addBasicAuthHeader("testUser2","password")) } + requestOptions= RequestOptions.DEFAULT.addBasicAuthHeader("testUser2",INTEG_TEST_PASSWORD)) } .isInstanceOf(ResponseException::class.java) .hasMessageContaining("403 Forbidden") } @@ -90,7 +90,7 @@ class SecurityCustomRolesIT: SecurityBase() { Assertions.assertThatThrownBy { followerClient.stopReplication("follower-index1", - requestOptions= RequestOptions.DEFAULT.addBasicAuthHeader("testUser1","password")) + requestOptions= RequestOptions.DEFAULT.addBasicAuthHeader("testUser1",INTEG_TEST_PASSWORD)) }.isInstanceOf(ResponseException::class.java) .hasMessageContaining("No replication in progress for index:follower-index1") } @@ -100,7 +100,7 @@ class SecurityCustomRolesIT: SecurityBase() { Assertions.assertThatThrownBy { followerClient.stopReplication("follower-index1", - requestOptions= RequestOptions.DEFAULT.addBasicAuthHeader("testUser2","password")) + requestOptions= RequestOptions.DEFAULT.addBasicAuthHeader("testUser2",INTEG_TEST_PASSWORD)) }.isInstanceOf(ResponseException::class.java) .hasMessageContaining("403 Forbidden") } @@ -116,7 +116,7 @@ class SecurityCustomRolesIT: SecurityBase() { var startReplicationRequest = StartReplicationRequest("source",leaderIndexName,followerIndexName, useRoles = UseRoles(leaderClusterRole = "leaderRoleValidPerms",followerClusterRole = "followerRoleValidPerms")) - var requestOptions = RequestOptions.DEFAULT.addBasicAuthHeader("testUser1","password") + var requestOptions = RequestOptions.DEFAULT.addBasicAuthHeader("testUser1",INTEG_TEST_PASSWORD) followerClient.startReplication(startReplicationRequest, waitForRestore = true, requestOptions = requestOptions) @@ -146,11 +146,11 @@ class SecurityCustomRolesIT: SecurityBase() { useRoles = UseRoles(leaderClusterRole = "leaderRoleValidPerms",followerClusterRole = "followerRoleValidPerms")) followerClient.startReplication(startReplicationRequest, waitForRestore = true, - requestOptions = RequestOptions.DEFAULT.addBasicAuthHeader("testUser1","password")) + requestOptions = RequestOptions.DEFAULT.addBasicAuthHeader("testUser1",INTEG_TEST_PASSWORD)) Assertions.assertThatThrownBy { followerClient.pauseReplication(followerIndexName, - requestOptions = RequestOptions.DEFAULT.addBasicAuthHeader("testUser2","password")) + requestOptions = RequestOptions.DEFAULT.addBasicAuthHeader("testUser2",INTEG_TEST_PASSWORD)) }.isInstanceOf(ResponseException::class.java) .hasMessageContaining("403 Forbidden") } @@ -168,11 +168,11 @@ class SecurityCustomRolesIT: SecurityBase() { useRoles = UseRoles(leaderClusterRole = "leaderRoleValidPerms",followerClusterRole = "followerRoleValidPerms")) followerClient.startReplication(startReplicationRequest, waitForRestore = true, - requestOptions = RequestOptions.DEFAULT.addBasicAuthHeader("testUser1","password")) + requestOptions = RequestOptions.DEFAULT.addBasicAuthHeader("testUser1",INTEG_TEST_PASSWORD)) assertBusy { `validate status syncing response`(followerClient.replicationStatus(followerIndexName, - requestOptions = RequestOptions.DEFAULT.addBasicAuthHeader("testUser1","password"))) + requestOptions = RequestOptions.DEFAULT.addBasicAuthHeader("testUser1",INTEG_TEST_PASSWORD))) } } @@ -189,11 +189,11 @@ class SecurityCustomRolesIT: SecurityBase() { useRoles = UseRoles(leaderClusterRole = "leaderRoleValidPerms",followerClusterRole = "followerRoleValidPerms")) followerClient.startReplication(startReplicationRequest, waitForRestore = true, - requestOptions = RequestOptions.DEFAULT.addBasicAuthHeader("testUser1","password")) + requestOptions = RequestOptions.DEFAULT.addBasicAuthHeader("testUser1",INTEG_TEST_PASSWORD)) Assertions.assertThatThrownBy { followerClient.replicationStatus(followerIndexName, - requestOptions = RequestOptions.DEFAULT.addBasicAuthHeader("testUser2","password")) + requestOptions = RequestOptions.DEFAULT.addBasicAuthHeader("testUser2",INTEG_TEST_PASSWORD)) }.isInstanceOf(ResponseException::class.java) .hasMessageContaining("403 Forbidden") } @@ -216,7 +216,7 @@ class SecurityCustomRolesIT: SecurityBase() { followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName, useRoles = UseRoles(leaderClusterRole = "leaderRoleValidPerms",followerClusterRole = "followerRoleValidPerms")), - requestOptions = RequestOptions.DEFAULT.addBasicAuthHeader("testUser1","password")) + requestOptions = RequestOptions.DEFAULT.addBasicAuthHeader("testUser1",INTEG_TEST_PASSWORD)) assertBusy { Assertions.assertThat(followerClient.indices() .exists(GetIndexRequest(followerIndexName), RequestOptions.DEFAULT)) @@ -235,7 +235,7 @@ class SecurityCustomRolesIT: SecurityBase() { .put("index.shard.check_on_startup", "checksum") .build() followerClient.updateReplication(followerIndexName, settings, - requestOptions = RequestOptions.DEFAULT.addBasicAuthHeader("testUser1","password")) + requestOptions = RequestOptions.DEFAULT.addBasicAuthHeader("testUser1",INTEG_TEST_PASSWORD)) // Wait for the settings to get updated at follower cluster. assertBusy ({ @@ -261,7 +261,7 @@ class SecurityCustomRolesIT: SecurityBase() { Assertions.assertThat(createIndexResponse.isAcknowledged).isTrue() followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName, useRoles = UseRoles(leaderClusterRole = "leaderRoleValidPerms",followerClusterRole = "followerRoleValidPerms")), - requestOptions = RequestOptions.DEFAULT.addBasicAuthHeader("testUser1","password"), waitForRestore = true) + requestOptions = RequestOptions.DEFAULT.addBasicAuthHeader("testUser1",INTEG_TEST_PASSWORD), waitForRestore = true) assertBusy { Assertions.assertThat(followerClient.indices() .exists(GetIndexRequest(followerIndexName), RequestOptions.DEFAULT)) @@ -280,7 +280,7 @@ class SecurityCustomRolesIT: SecurityBase() { .build() Assertions.assertThatThrownBy { followerClient.updateReplication(followerIndexName, settings, - requestOptions = RequestOptions.DEFAULT.addBasicAuthHeader("testUser2","password")) + requestOptions = RequestOptions.DEFAULT.addBasicAuthHeader("testUser2",INTEG_TEST_PASSWORD)) }.isInstanceOf(ResponseException::class.java) .hasMessageContaining("403 Forbidden") } @@ -298,7 +298,7 @@ class SecurityCustomRolesIT: SecurityBase() { try { followerClient.updateAutoFollowPattern(connectionAlias, indexPatternName, indexPattern, useRoles = UseRoles(leaderClusterRole = "leaderRoleValidPerms",followerClusterRole = "followerRoleValidPerms"), - requestOptions= RequestOptions.DEFAULT.addBasicAuthHeader("testUser1","password")) + requestOptions= RequestOptions.DEFAULT.addBasicAuthHeader("testUser1",INTEG_TEST_PASSWORD)) // Verify that existing index matching the pattern are replicated. assertBusy ({ Assertions.assertThat(followerClient.indices() @@ -327,7 +327,7 @@ class SecurityCustomRolesIT: SecurityBase() { Assertions.assertThatThrownBy { followerClient.updateAutoFollowPattern(connectionAlias, indexPatternName, indexPattern, useRoles = UseRoles(leaderClusterRole = "leaderRoleValidPerms",followerClusterRole = "followerRoleNoPerms"), - requestOptions= RequestOptions.DEFAULT.addBasicAuthHeader("testUser2","password")) + requestOptions= RequestOptions.DEFAULT.addBasicAuthHeader("testUser2",INTEG_TEST_PASSWORD)) }.isInstanceOf(ResponseException::class.java) .hasMessageContaining("403 Forbidden") } @@ -359,7 +359,7 @@ class SecurityCustomRolesIT: SecurityBase() { var startReplicationRequest = StartReplicationRequest("source",leaderIndexName,followerIndexName, useRoles = UseRoles(leaderClusterRole = "leaderRoleValidPerms",followerClusterRole = "followerRoleValidPerms")) followerClient.startReplication(startReplicationRequest, waitForRestore = true, - requestOptions = RequestOptions.DEFAULT.addBasicAuthHeader("testUser1","password")) + requestOptions = RequestOptions.DEFAULT.addBasicAuthHeader("testUser1",INTEG_TEST_PASSWORD)) insertDocToIndex(LEADER, "1", "dummy data 1",leaderIndexName) //Querying ES cluster throws random exceptions like ClusterManagerNotDiscovered or ShardsFailed etc, so catching them and retrying assertBusy ({ @@ -371,7 +371,7 @@ class SecurityCustomRolesIT: SecurityBase() { }, 1, TimeUnit.MINUTES) assertBusy { `validate status syncing response`(followerClient.replicationStatus(followerIndexName, - requestOptions = RequestOptions.DEFAULT.addBasicAuthHeader("testUser1","password"))) + requestOptions = RequestOptions.DEFAULT.addBasicAuthHeader("testUser1",INTEG_TEST_PASSWORD))) } updateRole(followerIndexName,"followerRoleValidPerms", false) @@ -379,7 +379,7 @@ class SecurityCustomRolesIT: SecurityBase() { assertBusy ({ validatePausedState(followerClient.replicationStatus(followerIndexName, - requestOptions = RequestOptions.DEFAULT.addBasicAuthHeader("testUser1","password"))) + requestOptions = RequestOptions.DEFAULT.addBasicAuthHeader("testUser1",INTEG_TEST_PASSWORD))) }, 100, TimeUnit.SECONDS) } finally { updateRole(followerIndexName,"followerRoleValidPerms", true) diff --git a/src/test/kotlin/org/opensearch/replication/integ/rest/SecurityCustomRolesLeaderIT.kt b/src/test/kotlin/org/opensearch/replication/integ/rest/SecurityCustomRolesLeaderIT.kt index 12df07a5..8d0d2904 100644 --- a/src/test/kotlin/org/opensearch/replication/integ/rest/SecurityCustomRolesLeaderIT.kt +++ b/src/test/kotlin/org/opensearch/replication/integ/rest/SecurityCustomRolesLeaderIT.kt @@ -47,7 +47,7 @@ class SecurityCustomRolesLeaderIT: SecurityBase() { var startReplicationRequest = StartReplicationRequest("source",leaderIndexName,followerIndexName, useRoles = UseRoles(leaderClusterRole = "leaderRoleNoPerms",followerClusterRole = "followerRoleValidPerms")) Assertions.assertThatThrownBy { followerClient.startReplication(startReplicationRequest, - requestOptions= RequestOptions.DEFAULT.addBasicAuthHeader("testUser6","password")) } + requestOptions= RequestOptions.DEFAULT.addBasicAuthHeader("testUser6",INTEG_TEST_PASSWORD)) } .isInstanceOf(ResponseException::class.java) .hasMessageContaining("403 Forbidden") .hasMessageContaining("no permissions for [indices:admin/plugins/replication/index/setup/validate]") @@ -64,7 +64,7 @@ class SecurityCustomRolesLeaderIT: SecurityBase() { var startReplicationRequest = StartReplicationRequest("source",leaderIndexName,followerIndexName, useRoles = UseRoles(leaderClusterRole = "leaderRoleValidPerms",followerClusterRole = "followerRoleValidPerms")) followerClient.startReplication(startReplicationRequest, waitForRestore = true, - requestOptions = RequestOptions.DEFAULT.addBasicAuthHeader("testUser1","password")) + requestOptions = RequestOptions.DEFAULT.addBasicAuthHeader("testUser1",INTEG_TEST_PASSWORD)) insertDocToIndex(LEADER, "1", "dummy data 1",leaderIndexName) //Querying ES cluster throws random exceptions like ClusterManagerNotDiscovered or ShardsFailed etc, so catching them and retrying assertBusy ({ @@ -76,13 +76,13 @@ class SecurityCustomRolesLeaderIT: SecurityBase() { }, 1, TimeUnit.MINUTES) assertBusy { `validate status syncing response`(followerClient.replicationStatus(followerIndexName, - requestOptions = RequestOptions.DEFAULT.addBasicAuthHeader("testUser1","password"))) + requestOptions = RequestOptions.DEFAULT.addBasicAuthHeader("testUser1",INTEG_TEST_PASSWORD))) } updateRole(followerIndexName,"leaderRoleValidPerms", false) insertDocToIndex(LEADER, "2", "dummy data 2",leaderIndexName) assertBusy ({ validatePausedState(followerClient.replicationStatus(followerIndexName, - requestOptions = RequestOptions.DEFAULT.addBasicAuthHeader("testUser1","password"))) + requestOptions = RequestOptions.DEFAULT.addBasicAuthHeader("testUser1",INTEG_TEST_PASSWORD))) }, 100, TimeUnit.SECONDS) } finally { updateRole(followerIndexName,"leaderRoleValidPerms", true) @@ -101,10 +101,10 @@ class SecurityCustomRolesLeaderIT: SecurityBase() { useRoles = UseRoles(leaderClusterRole = "leaderRoleValidPerms",followerClusterRole = "followerRoleValidPerms")) updateFileChunkPermissions("","leaderRoleValidPerms", false) followerClient.startReplication(startReplicationRequest, - requestOptions = RequestOptions.DEFAULT.addBasicAuthHeader("testUser1","password")) + requestOptions = RequestOptions.DEFAULT.addBasicAuthHeader("testUser1",INTEG_TEST_PASSWORD)) assertBusy ({ validateFailedState(followerClient.replicationStatus(followerIndexName, - requestOptions = RequestOptions.DEFAULT.addBasicAuthHeader("testUser1","password"))) + requestOptions = RequestOptions.DEFAULT.addBasicAuthHeader("testUser1",INTEG_TEST_PASSWORD))) }, 60, TimeUnit.SECONDS) } catch (ex : Exception) { logger.info("Exception is", ex) diff --git a/src/test/kotlin/org/opensearch/replication/integ/rest/SecurityDlsFlsIT.kt b/src/test/kotlin/org/opensearch/replication/integ/rest/SecurityDlsFlsIT.kt index 82e7465d..5198d2d0 100644 --- a/src/test/kotlin/org/opensearch/replication/integ/rest/SecurityDlsFlsIT.kt +++ b/src/test/kotlin/org/opensearch/replication/integ/rest/SecurityDlsFlsIT.kt @@ -49,7 +49,7 @@ class SecurityDlsFlsIT: SecurityBase() { var startReplicationRequest = StartReplicationRequest("source",leaderIndexName,followerIndexName, useRoles = UseRoles(leaderClusterRole = "leaderRoleValidPerms",followerClusterRole = "followerDlsRole")) Assertions.assertThatThrownBy { followerClient.startReplication(startReplicationRequest, - requestOptions= RequestOptions.DEFAULT.addBasicAuthHeader("testUser3","password")) } + requestOptions= RequestOptions.DEFAULT.addBasicAuthHeader("testUser3",INTEG_TEST_PASSWORD)) } .isInstanceOf(ResponseException::class.java) .hasMessageContaining(DLS_FLS_EXCEPTION_MESSAGE) .hasMessageContaining("403 Forbidden") @@ -59,7 +59,7 @@ class SecurityDlsFlsIT: SecurityBase() { val followerClient = getClientForCluster(FOLLOWER) Assertions.assertThatThrownBy { followerClient.stopReplication("follower-index1-stop-forbidden", - requestOptions= RequestOptions.DEFAULT.addBasicAuthHeader("testUser3","password")) + requestOptions= RequestOptions.DEFAULT.addBasicAuthHeader("testUser3",INTEG_TEST_PASSWORD)) }.isInstanceOf(ResponseException::class.java) .hasMessageContaining(DLS_FLS_EXCEPTION_MESSAGE) .hasMessageContaining("403 Forbidden") @@ -75,10 +75,10 @@ class SecurityDlsFlsIT: SecurityBase() { var startReplicationRequest = StartReplicationRequest("source",leaderIndexName,followerIndexName, useRoles = UseRoles(leaderClusterRole = "leaderRoleValidPerms",followerClusterRole = "followerRoleValidPerms")) followerClient.startReplication(startReplicationRequest, waitForRestore = true, - requestOptions= RequestOptions.DEFAULT.addBasicAuthHeader("testUser1","password")) + requestOptions= RequestOptions.DEFAULT.addBasicAuthHeader("testUser1",INTEG_TEST_PASSWORD)) Assertions.assertThatThrownBy { followerClient.pauseReplication(followerIndexName, - requestOptions= RequestOptions.DEFAULT.addBasicAuthHeader("testUser3","password")) + requestOptions= RequestOptions.DEFAULT.addBasicAuthHeader("testUser3",INTEG_TEST_PASSWORD)) }.isInstanceOf(ResponseException::class.java) .hasMessageContaining(DLS_FLS_EXCEPTION_MESSAGE) .hasMessageContaining("403 Forbidden") @@ -94,10 +94,10 @@ class SecurityDlsFlsIT: SecurityBase() { var startReplicationRequest = StartReplicationRequest("source",leaderIndexName,followerIndexName, useRoles = UseRoles(leaderClusterRole = "leaderRoleValidPerms",followerClusterRole = "followerRoleValidPerms")) followerClient.startReplication(startReplicationRequest, waitForRestore = true, - requestOptions= RequestOptions.DEFAULT.addBasicAuthHeader("testUser1","password")) + requestOptions= RequestOptions.DEFAULT.addBasicAuthHeader("testUser1",INTEG_TEST_PASSWORD)) Assertions.assertThatThrownBy { followerClient.replicationStatus(followerIndexName, - requestOptions= RequestOptions.DEFAULT.addBasicAuthHeader("testUser3","password")) + requestOptions= RequestOptions.DEFAULT.addBasicAuthHeader("testUser3",INTEG_TEST_PASSWORD)) }.isInstanceOf(ResponseException::class.java) .hasMessageContaining(DLS_FLS_EXCEPTION_MESSAGE) .hasMessageContaining("403 Forbidden") @@ -116,7 +116,7 @@ class SecurityDlsFlsIT: SecurityBase() { Assertions.assertThat(createIndexResponse.isAcknowledged).isTrue() followerClient.startReplication(StartReplicationRequest("source", leaderIndexName, followerIndexName, useRoles = UseRoles(leaderClusterRole = "leaderRoleValidPerms",followerClusterRole = "followerRoleValidPerms")), - requestOptions= RequestOptions.DEFAULT.addBasicAuthHeader("testUser1","password"), waitForRestore = true) + requestOptions= RequestOptions.DEFAULT.addBasicAuthHeader("testUser1",INTEG_TEST_PASSWORD), waitForRestore = true) assertBusy { Assertions.assertThat(followerClient.indices() .exists(GetIndexRequest(followerIndexName), RequestOptions.DEFAULT)) @@ -135,7 +135,7 @@ class SecurityDlsFlsIT: SecurityBase() { .build() Assertions.assertThatThrownBy { followerClient.updateReplication(followerIndexName, settings, - requestOptions= RequestOptions.DEFAULT.addBasicAuthHeader("testUser3","password")) + requestOptions= RequestOptions.DEFAULT.addBasicAuthHeader("testUser3",INTEG_TEST_PASSWORD)) }.isInstanceOf(ResponseException::class.java) .hasMessageContaining(DLS_FLS_EXCEPTION_MESSAGE) .hasMessageContaining("403 Forbidden") @@ -151,7 +151,7 @@ class SecurityDlsFlsIT: SecurityBase() { var startReplicationRequest = StartReplicationRequest("source",leaderIndexName,followerIndexName, useRoles = UseRoles(leaderClusterRole = "leaderRoleValidPerms",followerClusterRole = "followerFlsRole")) Assertions.assertThatThrownBy { followerClient.startReplication(startReplicationRequest, - requestOptions= RequestOptions.DEFAULT.addBasicAuthHeader("testUser4","password")) } + requestOptions= RequestOptions.DEFAULT.addBasicAuthHeader("testUser4",INTEG_TEST_PASSWORD)) } .isInstanceOf(ResponseException::class.java) .hasMessageContaining(DLS_FLS_EXCEPTION_MESSAGE) .hasMessageContaining("403 Forbidden") @@ -167,7 +167,7 @@ class SecurityDlsFlsIT: SecurityBase() { var startReplicationRequest = StartReplicationRequest("source",leaderIndexName,followerIndexName, useRoles = UseRoles(leaderClusterRole = "leaderRoleValidPerms",followerClusterRole = "followerFieldMaskRole")) Assertions.assertThatThrownBy { followerClient.startReplication(startReplicationRequest, - requestOptions= RequestOptions.DEFAULT.addBasicAuthHeader("testUser5","password")) } + requestOptions= RequestOptions.DEFAULT.addBasicAuthHeader("testUser5",INTEG_TEST_PASSWORD)) } .isInstanceOf(ResponseException::class.java) .hasMessageContaining(DLS_FLS_EXCEPTION_MESSAGE) .hasMessageContaining("403 Forbidden") @@ -190,7 +190,7 @@ class SecurityDlsFlsIT: SecurityBase() { ) followerClient.startReplication( startReplicationRequest, - requestOptions = RequestOptions.DEFAULT.addBasicAuthHeader("testUser7", "password"), + requestOptions = RequestOptions.DEFAULT.addBasicAuthHeader("testUser7", INTEG_TEST_PASSWORD), waitForRestore = true ) OpenSearchTestCase.assertBusy { From b98d38438c35d770a65ff1c2e4d7ec8529896344 Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Thu, 6 Jul 2023 12:09:02 +0530 Subject: [PATCH 59/84] Avoid use of indicesService in Resume replicaiton flow (#1030) (#1035) Avoid use of indicesService in Resume replicaiton flow. Signed-off-by: monusingh-1 (cherry picked from commit 66d126e626b5ddbe84d1139655478e14fb9de0be) Co-authored-by: Monu Singh --- .../action/resume/TransportResumeIndexReplicationAction.kt | 6 +----- .../replication/seqno/RemoteClusterRetentionLeaseHelper.kt | 4 ++-- 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/src/main/kotlin/org/opensearch/replication/action/resume/TransportResumeIndexReplicationAction.kt b/src/main/kotlin/org/opensearch/replication/action/resume/TransportResumeIndexReplicationAction.kt index f281d642..9ca85549 100644 --- a/src/main/kotlin/org/opensearch/replication/action/resume/TransportResumeIndexReplicationAction.kt +++ b/src/main/kotlin/org/opensearch/replication/action/resume/TransportResumeIndexReplicationAction.kt @@ -54,7 +54,6 @@ import org.opensearch.common.io.stream.StreamInput import org.opensearch.env.Environment import org.opensearch.index.IndexNotFoundException import org.opensearch.index.shard.ShardId -import org.opensearch.replication.util.indicesService import org.opensearch.threadpool.ThreadPool import org.opensearch.transport.TransportService import java.io.IOException @@ -136,10 +135,7 @@ class TransportResumeIndexReplicationAction @Inject constructor(transportService shards.forEach { val followerShardId = it.value.shardId - val followerIndexService = indicesService.indexServiceSafe(followerShardId.index) - val indexShard = followerIndexService.getShard(followerShardId.id) - - if (!retentionLeaseHelper.verifyRetentionLeaseExist(ShardId(params.leaderIndex, followerShardId.id), followerShardId, indexShard.lastSyncedGlobalCheckpoint+1)) { + if (!retentionLeaseHelper.verifyRetentionLeaseExist(ShardId(params.leaderIndex, followerShardId.id), followerShardId)) { isResumable = false } } diff --git a/src/main/kotlin/org/opensearch/replication/seqno/RemoteClusterRetentionLeaseHelper.kt b/src/main/kotlin/org/opensearch/replication/seqno/RemoteClusterRetentionLeaseHelper.kt index 29116c51..a1c1ee2f 100644 --- a/src/main/kotlin/org/opensearch/replication/seqno/RemoteClusterRetentionLeaseHelper.kt +++ b/src/main/kotlin/org/opensearch/replication/seqno/RemoteClusterRetentionLeaseHelper.kt @@ -58,7 +58,7 @@ class RemoteClusterRetentionLeaseHelper constructor(var followerClusterNameWithU } } - public suspend fun verifyRetentionLeaseExist(leaderShardId: ShardId, followerShardId: ShardId, seqNo: Long): Boolean { + public suspend fun verifyRetentionLeaseExist(leaderShardId: ShardId, followerShardId: ShardId): Boolean { val retentionLeaseId = retentionLeaseIdForShard(followerClusterNameWithUUID, followerShardId) // Currently there is no API to describe/list the retention leases . // So we are verifying the existence of lease by trying to renew a lease by same name . @@ -74,7 +74,7 @@ class RemoteClusterRetentionLeaseHelper constructor(var followerClusterNameWithU return true } catch (e: RetentionLeaseNotFoundException) { - return addNewRetentionLeaseIfOldExists(leaderShardId, followerShardId, seqNo) + return addNewRetentionLeaseIfOldExists(leaderShardId, followerShardId, RetentionLeaseActions.RETAIN_ALL) }catch (e : Exception) { return false } From d2b49ffb7388e91944604566f526bc98b7c2bd5b Mon Sep 17 00:00:00 2001 From: sricharanvuppu <113983630+sricharanvuppu@users.noreply.github.com> Date: Wed, 12 Jul 2023 20:52:51 +0530 Subject: [PATCH 60/84] [Backport 2.x] Handling OpenSearchRejectExecuteException Exception (#1004) (#1027) * Handling OpenSearchRejectExecuteException Exception (#1004) * Handling OpenSearchRejectExecuteException Exception * introduced writersPerShard setting. Signed-off-by: sricharanvuppu (cherry picked from commit 448e7a7501e5ff8740dc2d7635c08ae62d19147e) Signed-off-by: sricharanvuppu * adding missing checkpoint and correcting follower stats test case Signed-off-by: sricharanvuppu --------- Signed-off-by: sricharanvuppu --- .../replication/ReplicationException.kt | 14 +++- .../replication/ReplicationPlugin.kt | 16 ++-- .../replication/ReplicationSettings.kt | 2 + .../task/shard/ShardReplicationTask.kt | 3 +- .../task/shard/TranslogSequencer.kt | 80 ++++++++++++++----- .../opensearch/replication/util/Extensions.kt | 25 ++++-- .../integ/rest/StartReplicationIT.kt | 18 ++--- .../task/shard/TranslogSequencerTests.kt | 2 +- 8 files changed, 111 insertions(+), 49 deletions(-) diff --git a/src/main/kotlin/org/opensearch/replication/ReplicationException.kt b/src/main/kotlin/org/opensearch/replication/ReplicationException.kt index 89d2456c..891be0a3 100644 --- a/src/main/kotlin/org/opensearch/replication/ReplicationException.kt +++ b/src/main/kotlin/org/opensearch/replication/ReplicationException.kt @@ -12,22 +12,28 @@ package org.opensearch.replication import org.opensearch.OpenSearchException +import org.opensearch.OpenSearchStatusException import org.opensearch.action.ShardOperationFailedException import org.opensearch.cluster.metadata.IndexMetadata.INDEX_UUID_NA_VALUE import org.opensearch.index.shard.ShardId +import org.opensearch.rest.RestStatus /** * Base class replication exceptions. Note: Replication process may throw exceptions that do not derive from this such as * [org.opensearch.ResourceAlreadyExistsException], [org.opensearch.index.IndexNotFoundException] or * [org.opensearch.index.shard.ShardNotFoundException]. */ -class ReplicationException: OpenSearchException { +class ReplicationException: OpenSearchStatusException { - constructor(message: String, vararg args: Any) : super(message, *args) + constructor(message: String, status : RestStatus, cause: Throwable, vararg args: Any) : super(message, status, cause, *args) - constructor(message: String, cause: Throwable, vararg args: Any) : super(message, cause, *args) + constructor(message: String, vararg args: Any) : super(message, RestStatus.INTERNAL_SERVER_ERROR, *args) - constructor(message: String, shardFailures: Array) : super(message) { + constructor(message: String, status: RestStatus, vararg args: Any) : super(message, status, *args) + + constructor(cause: Throwable, status: RestStatus, vararg args: Any) : super(cause.message, status, *args) + + constructor(message: String, shardFailures: Array): super(message, shardFailures.firstOrNull()?.status()?:RestStatus.INTERNAL_SERVER_ERROR) { shardFailures.firstOrNull()?.let { setShard(ShardId(it.index(), INDEX_UUID_NA_VALUE, it.shardId())) // Add first failure as cause and rest as suppressed... diff --git a/src/main/kotlin/org/opensearch/replication/ReplicationPlugin.kt b/src/main/kotlin/org/opensearch/replication/ReplicationPlugin.kt index 3ea64fe0..4254412c 100644 --- a/src/main/kotlin/org/opensearch/replication/ReplicationPlugin.kt +++ b/src/main/kotlin/org/opensearch/replication/ReplicationPlugin.kt @@ -175,6 +175,8 @@ internal class ReplicationPlugin : Plugin(), ActionPlugin, PersistentTaskPlugin, Setting.Property.Dynamic, Setting.Property.NodeScope) val REPLICATION_FOLLOWER_CONCURRENT_READERS_PER_SHARD = Setting.intSetting("plugins.replication.follower.concurrent_readers_per_shard", 2, 1, Setting.Property.Dynamic, Setting.Property.NodeScope) + val REPLICATION_FOLLOWER_CONCURRENT_WRITERS_PER_SHARD = Setting.intSetting("plugins.replication.follower.concurrent_writers_per_shard", 2, 1, + Setting.Property.Dynamic, Setting.Property.NodeScope) val REPLICATION_PARALLEL_READ_POLL_INTERVAL = Setting.timeSetting ("plugins.replication.follower.poll_interval", TimeValue.timeValueMillis(50), TimeValue.timeValueMillis(1), TimeValue.timeValueSeconds(1), Setting.Property.Dynamic, Setting.Property.NodeScope) val REPLICATION_AUTOFOLLOW_REMOTE_INDICES_POLL_INTERVAL = Setting.timeSetting ("plugins.replication.autofollow.fetch_poll_interval", TimeValue.timeValueSeconds(30), TimeValue.timeValueSeconds(30), @@ -346,14 +348,14 @@ internal class ReplicationPlugin : Plugin(), ActionPlugin, PersistentTaskPlugin, override fun getSettings(): List> { return listOf(REPLICATED_INDEX_SETTING, REPLICATION_FOLLOWER_OPS_BATCH_SIZE, REPLICATION_LEADER_THREADPOOL_SIZE, - REPLICATION_LEADER_THREADPOOL_QUEUE_SIZE, REPLICATION_FOLLOWER_CONCURRENT_READERS_PER_SHARD, - REPLICATION_FOLLOWER_RECOVERY_CHUNK_SIZE, REPLICATION_FOLLOWER_RECOVERY_PARALLEL_CHUNKS, - REPLICATION_PARALLEL_READ_POLL_INTERVAL, REPLICATION_AUTOFOLLOW_REMOTE_INDICES_POLL_INTERVAL, - REPLICATION_AUTOFOLLOW_REMOTE_INDICES_RETRY_POLL_INTERVAL, REPLICATION_METADATA_SYNC_INTERVAL, - REPLICATION_RETENTION_LEASE_MAX_FAILURE_DURATION, REPLICATION_INDEX_TRANSLOG_PRUNING_ENABLED_SETTING, - REPLICATION_INDEX_TRANSLOG_RETENTION_SIZE, REPLICATION_FOLLOWER_BLOCK_START, REPLICATION_AUTOFOLLOW_CONCURRENT_REPLICATION_JOBS_TRIGGER_SIZE) + REPLICATION_LEADER_THREADPOOL_QUEUE_SIZE, REPLICATION_FOLLOWER_CONCURRENT_READERS_PER_SHARD, + REPLICATION_FOLLOWER_RECOVERY_CHUNK_SIZE, REPLICATION_FOLLOWER_RECOVERY_PARALLEL_CHUNKS, + REPLICATION_PARALLEL_READ_POLL_INTERVAL, REPLICATION_AUTOFOLLOW_REMOTE_INDICES_POLL_INTERVAL, + REPLICATION_AUTOFOLLOW_REMOTE_INDICES_RETRY_POLL_INTERVAL, REPLICATION_METADATA_SYNC_INTERVAL, + REPLICATION_RETENTION_LEASE_MAX_FAILURE_DURATION, REPLICATION_INDEX_TRANSLOG_PRUNING_ENABLED_SETTING, + REPLICATION_INDEX_TRANSLOG_RETENTION_SIZE, REPLICATION_FOLLOWER_BLOCK_START, REPLICATION_AUTOFOLLOW_CONCURRENT_REPLICATION_JOBS_TRIGGER_SIZE, + REPLICATION_FOLLOWER_CONCURRENT_WRITERS_PER_SHARD) } - override fun getInternalRepositories(env: Environment, namedXContentRegistry: NamedXContentRegistry, clusterService: ClusterService, recoverySettings: RecoverySettings): Map { val repoFactory = Repository.Factory { repoMetadata: RepositoryMetadata -> diff --git a/src/main/kotlin/org/opensearch/replication/ReplicationSettings.kt b/src/main/kotlin/org/opensearch/replication/ReplicationSettings.kt index a6d1bbd3..2b516f8e 100644 --- a/src/main/kotlin/org/opensearch/replication/ReplicationSettings.kt +++ b/src/main/kotlin/org/opensearch/replication/ReplicationSettings.kt @@ -24,6 +24,7 @@ open class ReplicationSettings(clusterService: ClusterService) { @Volatile var chunkSize = ReplicationPlugin.REPLICATION_FOLLOWER_RECOVERY_CHUNK_SIZE.get(clusterService.settings) @Volatile var concurrentFileChunks = ReplicationPlugin.REPLICATION_FOLLOWER_RECOVERY_PARALLEL_CHUNKS.get(clusterService.settings) @Volatile var readersPerShard = clusterService.clusterSettings.get(ReplicationPlugin.REPLICATION_FOLLOWER_CONCURRENT_READERS_PER_SHARD) + @Volatile var writersPerShard = clusterService.clusterSettings.get(ReplicationPlugin.REPLICATION_FOLLOWER_CONCURRENT_WRITERS_PER_SHARD) @Volatile var batchSize = clusterService.clusterSettings.get(ReplicationPlugin.REPLICATION_FOLLOWER_OPS_BATCH_SIZE) @Volatile var pollDuration: TimeValue = clusterService.clusterSettings.get(ReplicationPlugin.REPLICATION_PARALLEL_READ_POLL_INTERVAL) @Volatile var autofollowFetchPollDuration = clusterService.clusterSettings.get(ReplicationPlugin.REPLICATION_AUTOFOLLOW_REMOTE_INDICES_POLL_INTERVAL) @@ -41,6 +42,7 @@ open class ReplicationSettings(clusterService: ClusterService) { clusterSettings.addSettingsUpdateConsumer(ReplicationPlugin.REPLICATION_FOLLOWER_RECOVERY_CHUNK_SIZE) { value: ByteSizeValue -> this.chunkSize = value} clusterSettings.addSettingsUpdateConsumer(ReplicationPlugin.REPLICATION_FOLLOWER_RECOVERY_PARALLEL_CHUNKS) { value: Int -> this.concurrentFileChunks = value} clusterSettings.addSettingsUpdateConsumer(ReplicationPlugin.REPLICATION_FOLLOWER_CONCURRENT_READERS_PER_SHARD) { value: Int -> this.readersPerShard = value} + clusterSettings.addSettingsUpdateConsumer(ReplicationPlugin.REPLICATION_FOLLOWER_CONCURRENT_WRITERS_PER_SHARD) { value: Int -> this.writersPerShard = value} clusterSettings.addSettingsUpdateConsumer(ReplicationPlugin.REPLICATION_FOLLOWER_OPS_BATCH_SIZE) { batchSize = it } clusterSettings.addSettingsUpdateConsumer(ReplicationPlugin.REPLICATION_PARALLEL_READ_POLL_INTERVAL) { pollDuration = it } clusterSettings.addSettingsUpdateConsumer(ReplicationPlugin.REPLICATION_RETENTION_LEASE_MAX_FAILURE_DURATION) { leaseRenewalMaxFailureDuration = it } diff --git a/src/main/kotlin/org/opensearch/replication/task/shard/ShardReplicationTask.kt b/src/main/kotlin/org/opensearch/replication/task/shard/ShardReplicationTask.kt index f08b2c6b..e393805e 100644 --- a/src/main/kotlin/org/opensearch/replication/task/shard/ShardReplicationTask.kt +++ b/src/main/kotlin/org/opensearch/replication/task/shard/ShardReplicationTask.kt @@ -214,7 +214,7 @@ class ShardReplicationTask(id: Long, type: String, action: String, description: // Since this setting is not dynamic, setting update would only reflect after pause-resume or on a new replication job. val rateLimiter = Semaphore(replicationSettings.readersPerShard) val sequencer = TranslogSequencer(scope, replicationMetadata, followerShardId, leaderAlias, leaderShardId.indexName, - TaskId(clusterService.nodeName, id), client, indexShard.localCheckpoint, followerClusterStats) + TaskId(clusterService.nodeName, id), client, indexShard.localCheckpoint, followerClusterStats, replicationSettings.writersPerShard) val changeTracker = ShardReplicationChangesTracker(indexShard, replicationSettings) followerClusterStats.stats[followerShardId]!!.followerCheckpoint = indexShard.localCheckpoint @@ -255,7 +255,6 @@ class ShardReplicationTask(id: Long, type: String, action: String, description: followerClusterStats.stats[followerShardId]!!.opsReadFailures.addAndGet(1) logInfo("Unable to get changes from seqNo: $fromSeqNo. ${e.stackTraceToString()}") changeTracker.updateBatchFetched(false, fromSeqNo, toSeqNo, fromSeqNo - 1,-1) - // Propagate 4xx exceptions up the chain and halt replication as they are irrecoverable val range4xx = 400.rangeTo(499) if (e is OpenSearchException && diff --git a/src/main/kotlin/org/opensearch/replication/task/shard/TranslogSequencer.kt b/src/main/kotlin/org/opensearch/replication/task/shard/TranslogSequencer.kt index 38b625bf..d8c976dc 100644 --- a/src/main/kotlin/org/opensearch/replication/task/shard/TranslogSequencer.kt +++ b/src/main/kotlin/org/opensearch/replication/task/shard/TranslogSequencer.kt @@ -21,11 +21,14 @@ import org.opensearch.replication.util.suspendExecuteWithRetries import kotlinx.coroutines.CompletableDeferred import kotlinx.coroutines.CoroutineScope import kotlinx.coroutines.ObsoleteCoroutinesApi -import kotlinx.coroutines.channels.Channel import kotlinx.coroutines.channels.actor import kotlinx.coroutines.launch +import kotlinx.coroutines.sync.Semaphore import org.opensearch.client.Client +import org.opensearch.OpenSearchException +import org.opensearch.action.support.TransportActions import org.opensearch.common.logging.Loggers +import org.opensearch.index.IndexNotFoundException import org.opensearch.index.shard.ShardId import org.opensearch.index.translog.Translog import org.opensearch.replication.util.indicesService @@ -33,6 +36,8 @@ import org.opensearch.tasks.TaskId import java.util.ArrayList import java.util.concurrent.ConcurrentHashMap import java.util.concurrent.TimeUnit +import org.opensearch.rest.RestStatus + /** * A TranslogSequencer allows multiple producers of [Translog.Operation]s to write them in sequence number order to an @@ -50,7 +55,7 @@ class TranslogSequencer(scope: CoroutineScope, private val replicationMetadata: private val followerShardId: ShardId, private val leaderAlias: String, private val leaderIndexName: String, private val parentTaskId: TaskId, private val client: Client, initialSeqNo: Long, - private val followerClusterStats: FollowerClusterStats) { + private val followerClusterStats: FollowerClusterStats, writersPerShard : Int) { private val unAppliedChanges = ConcurrentHashMap() private val log = Loggers.getLogger(javaClass, followerShardId)!! @@ -59,40 +64,74 @@ class TranslogSequencer(scope: CoroutineScope, private val replicationMetadata: val followerIndexService = indicesService.indexServiceSafe(followerShardId.index) val indexShard = followerIndexService.getShard(followerShardId.id) - private val sequencer = scope.actor(capacity = Channel.UNLIMITED) { + private val sequencer = scope.actor(capacity = 0) { + // Exceptions thrown here will mark the channel as failed and the next attempt to send to the channel will // raise the same exception. See [SendChannel.close] method for details. + val rateLimiter = Semaphore(writersPerShard) var highWatermark = initialSeqNo for (m in channel) { + while (unAppliedChanges.containsKey(highWatermark + 1)) { val next = unAppliedChanges.remove(highWatermark + 1)!! val replayRequest = ReplayChangesRequest(followerShardId, next.changes, next.maxSeqNoOfUpdatesOrDeletes, leaderAlias, leaderIndexName) replayRequest.parentTask = parentTaskId + rateLimiter.acquire() launch { var relativeStartNanos = System.nanoTime() val retryOnExceptions = ArrayList>() retryOnExceptions.add(MappingNotAvailableException::class.java) + var tryReplay = true + try { + while (tryReplay) { + tryReplay = false + try { + val replayResponse = client.suspendExecuteWithRetries( + replicationMetadata, + ReplayChangesAction.INSTANCE, + replayRequest, + log = log, + retryOn = retryOnExceptions + ) + if (replayResponse.shardInfo.failed > 0) { + replayResponse.shardInfo.failures.forEachIndexed { i, failure -> + log.error("Failed replaying changes. Failure:$i:$failure}") + } + followerClusterStats.stats[followerShardId]!!.opsWriteFailures.addAndGet( + replayResponse.shardInfo.failed.toLong() + ) + throw ReplicationException( + "failed to replay changes", + replayResponse.shardInfo.failures + ) + } - val replayResponse = client.suspendExecuteWithRetries( - replicationMetadata, - ReplayChangesAction.INSTANCE, - replayRequest, - log = log, - retryOn = retryOnExceptions - ) - if (replayResponse.shardInfo.failed > 0) { - replayResponse.shardInfo.failures.forEachIndexed { i, failure -> - log.error("Failed replaying changes. Failure:$i:$failure}") + val tookInNanos = System.nanoTime() - relativeStartNanos + followerClusterStats.stats[followerShardId]!!.totalWriteTime.addAndGet( + TimeUnit.NANOSECONDS.toMillis(tookInNanos) + ) + followerClusterStats.stats[followerShardId]!!.opsWritten.addAndGet( + replayRequest.changes.size.toLong() + ) + followerClusterStats.stats[followerShardId]!!.followerCheckpoint = indexShard.localCheckpoint + } catch (e: OpenSearchException) { + if (e !is IndexNotFoundException && (retryOnExceptions.contains(e.javaClass) + || TransportActions.isShardNotAvailableException(e) + // This waits for the dependencies to load and retry. Helps during boot-up + || e.status().status >= 500 + || e.status() == RestStatus.TOO_MANY_REQUESTS)) { + tryReplay = true + } + else { + log.error("Got non-retriable Exception:${e.message} with status:${e.status()}") + throw e + } + } } - followerClusterStats.stats[followerShardId]!!.opsWriteFailures.addAndGet(replayResponse.shardInfo.failed.toLong()) - throw ReplicationException("failed to replay changes", replayResponse.shardInfo.failures) + } finally { + rateLimiter.release() } - - val tookInNanos = System.nanoTime() - relativeStartNanos - followerClusterStats.stats[followerShardId]!!.totalWriteTime.addAndGet(TimeUnit.NANOSECONDS.toMillis(tookInNanos)) - followerClusterStats.stats[followerShardId]!!.opsWritten.addAndGet(replayRequest.changes.size.toLong()) - followerClusterStats.stats[followerShardId]!!.followerCheckpoint = indexShard.localCheckpoint } highWatermark = next.changes.lastOrNull()?.seqNo() ?: highWatermark } @@ -105,6 +144,7 @@ class TranslogSequencer(scope: CoroutineScope, private val replicationMetadata: completed.await() } + suspend fun send(changes : GetChangesResponse) { unAppliedChanges[changes.fromSeqNo] = changes sequencer.send(Unit) diff --git a/src/main/kotlin/org/opensearch/replication/util/Extensions.kt b/src/main/kotlin/org/opensearch/replication/util/Extensions.kt index 96749f7b..643cc010 100644 --- a/src/main/kotlin/org/opensearch/replication/util/Extensions.kt +++ b/src/main/kotlin/org/opensearch/replication/util/Extensions.kt @@ -29,6 +29,7 @@ import org.opensearch.action.index.IndexResponse import org.opensearch.action.support.TransportActions import org.opensearch.client.Client import org.opensearch.common.util.concurrent.ThreadContext +import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException import org.opensearch.index.IndexNotFoundException import org.opensearch.index.shard.ShardId import org.opensearch.index.store.Store @@ -43,6 +44,7 @@ import org.opensearch.transport.NodeDisconnectedException import org.opensearch.transport.NodeNotConnectedException import java.io.PrintWriter import java.io.StringWriter +import java.lang.Exception /* * Extension function to use the store object @@ -110,7 +112,8 @@ suspend fun Client.suspendExecuteWith defaultContext: Boolean = false): Resp { var currentBackoff = backoff retryOn.addAll(defaultRetryableExceptions()) - repeat(numberOfRetries - 1) { + var retryException: Exception + repeat(numberOfRetries - 1) { index -> try { return suspendExecute(replicationMetadata, action, req, injectSecurityContext = injectSecurityContext, defaultContext = defaultContext) @@ -122,19 +125,29 @@ suspend fun Client.suspendExecuteWith // This waits for the dependencies to load and retry. Helps during boot-up || e.status().status >= 500 || e.status() == RestStatus.TOO_MANY_REQUESTS)) { - log.warn("Encountered a failure while executing in $req. Retrying in ${currentBackoff/1000} seconds" + - ".", e) - delay(currentBackoff) - currentBackoff = (currentBackoff * factor).toLong().coerceAtMost(maxTimeOut) + retryException = e; } else { throw e } + } catch (e: OpenSearchRejectedExecutionException) { + if(index < numberOfRetries-2) { + retryException = e; + } + else { + throw ReplicationException(e, RestStatus.TOO_MANY_REQUESTS) + } } + log.warn( + "Encountered a failure while executing in $req. Retrying in ${currentBackoff / 1000} seconds" + + ".", retryException + ) + delay(currentBackoff) + currentBackoff = (currentBackoff * factor).toLong().coerceAtMost(maxTimeOut) + } return suspendExecute(replicationMetadata, action, req, injectSecurityContext = injectSecurityContext, defaultContext = defaultContext) // last attempt } - /** * Restore shard from leader cluster with retries. * Only specified error are retried diff --git a/src/test/kotlin/org/opensearch/replication/integ/rest/StartReplicationIT.kt b/src/test/kotlin/org/opensearch/replication/integ/rest/StartReplicationIT.kt index cd3d849b..e6355d33 100644 --- a/src/test/kotlin/org/opensearch/replication/integ/rest/StartReplicationIT.kt +++ b/src/test/kotlin/org/opensearch/replication/integ/rest/StartReplicationIT.kt @@ -886,16 +886,12 @@ class StartReplicationIT: MultiClusterRestTestCase() { }, 60L, TimeUnit.SECONDS) } - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/cross-cluster-replication/issues/176") + fun `test follower stats`() { val followerClient = getClientForCluster(FOLLOWER) val leaderClient = getClientForCluster(LEADER) - val leaderIndexName2 = randomAlphaOfLength(10).toLowerCase(Locale.ROOT)+"leader" val followerIndexName2 = randomAlphaOfLength(10).toLowerCase(Locale.ROOT)+"follower" - val leaderIndexName3 = randomAlphaOfLength(10).toLowerCase(Locale.ROOT)+"leader" val followerIndexName3 = randomAlphaOfLength(10).toLowerCase(Locale.ROOT)+"follower" -// val followerIndex2 = "follower_index_2" -// val followerIndex3 = "follower_index_3" createConnectionBetweenClusters(FOLLOWER, LEADER) val createIndexResponse = leaderClient.indices().create( CreateIndexRequest(leaderIndexName), @@ -908,12 +904,12 @@ class StartReplicationIT: MultiClusterRestTestCase() { true ) followerClient.startReplication( - StartReplicationRequest("source", leaderIndexName2, followerIndexName2), + StartReplicationRequest("source", leaderIndexName, followerIndexName2), TimeValue.timeValueSeconds(10), true ) followerClient.startReplication( - StartReplicationRequest("source", leaderIndexName3, followerIndexName3), + StartReplicationRequest("source", leaderIndexName, followerIndexName3), TimeValue.timeValueSeconds(10), true ) @@ -923,12 +919,16 @@ class StartReplicationIT: MultiClusterRestTestCase() { leaderClient.index(IndexRequest(leaderIndexName).id(i.toString()).source(sourceMap), RequestOptions.DEFAULT) } followerClient.pauseReplication(followerIndexName2) - val stats = followerClient.followerStats() + followerClient.stopReplication(followerIndexName3) + var stats = followerClient.followerStats() assertThat(stats.getValue("num_syncing_indices").toString()).isEqualTo("1") assertThat(stats.getValue("num_paused_indices").toString()).isEqualTo("1") assertThat(stats.getValue("num_failed_indices").toString()).isEqualTo("0") assertThat(stats.getValue("num_shard_tasks").toString()).isEqualTo("1") - assertThat(stats.getValue("operations_written").toString()).isEqualTo("50") + assertBusy({ + stats = followerClient.followerStats() + assertThat(stats.getValue("operations_written").toString()).isEqualTo("50") + }, 60, TimeUnit.SECONDS) assertThat(stats.getValue("operations_read").toString()).isEqualTo("50") assertThat(stats.getValue("failed_read_requests").toString()).isEqualTo("0") assertThat(stats.getValue("failed_write_requests").toString()).isEqualTo("0") diff --git a/src/test/kotlin/org/opensearch/replication/task/shard/TranslogSequencerTests.kt b/src/test/kotlin/org/opensearch/replication/task/shard/TranslogSequencerTests.kt index ac377687..fe6ad1c8 100644 --- a/src/test/kotlin/org/opensearch/replication/task/shard/TranslogSequencerTests.kt +++ b/src/test/kotlin/org/opensearch/replication/task/shard/TranslogSequencerTests.kt @@ -94,7 +94,7 @@ class TranslogSequencerTests : OpenSearchTestCase() { Mockito.`when`(indicesService.indexServiceSafe(followerShardId.index)).thenReturn(followerIndexService) Mockito.`when`(followerIndexService.getShard(followerShardId.id)).thenReturn(indexShard) val sequencer = TranslogSequencer(this, replicationMetadata, followerShardId, leaderAlias, leaderIndex, EMPTY_TASK_ID, - client, startSeqNo, stats) + client, startSeqNo, stats, 2) // Send requests out of order (shuffled seqNo) and await for them to be processed. var batchSeqNo = startSeqNo From 51cf59320ef894ef31b973da50976cc582aa5f03 Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Fri, 21 Jul 2023 13:46:31 -0400 Subject: [PATCH 61/84] Add release notes for 2.9.0 (#1063) (#1088) Add release notes for 2.9 release Signed-off-by: monusingh-1 (cherry picked from commit 96299efbe515571a454c260cb913e34917130d34) Co-authored-by: Monu Singh --- ...ross-cluster-replication.release-notes-2.9.0.0.md | 12 ++++++++++++ 1 file changed, 12 insertions(+) create mode 100644 release-notes/opensearch-cross-cluster-replication.release-notes-2.9.0.0.md diff --git a/release-notes/opensearch-cross-cluster-replication.release-notes-2.9.0.0.md b/release-notes/opensearch-cross-cluster-replication.release-notes-2.9.0.0.md new file mode 100644 index 00000000..45b2379d --- /dev/null +++ b/release-notes/opensearch-cross-cluster-replication.release-notes-2.9.0.0.md @@ -0,0 +1,12 @@ +## Version 2.9.0.0 Release Notes + +Compatible with OpenSearch 2.9.0 + + +### Bug Fixes +* Handle bug in Shard replication task assignment ([#881](https://github.com/opensearch-project/cross-cluster-replication/pull/881)) +* Update Leader checkpoint when shard replication task is reinitialized ([#904](https://github.com/opensearch-project/cross-cluster-replication/pull/904)) +* Correctly handle retention lease renewal (if the lease already exists) during bootstrap ([#904](https://github.com/opensearch-project/cross-cluster-replication/pull/904)) +* Clear persistent tasks from cluster state after STOP API is triggered ([#905](https://github.com/opensearch-project/cross-cluster-replication/pull/905)) +* Handle OpenSearchRejectExecuteException Exception during replay ([#1004](https://github.com/opensearch-project/cross-cluster-replication/pull/1004)) +* Fix Resume replication flow in dedicated master node configuration ([#1030](https://github.com/opensearch-project/cross-cluster-replication/pull/1030)) \ No newline at end of file From 46403eb9f7261dc492ba930edd7d54fe9a623dd5 Mon Sep 17 00:00:00 2001 From: Monu Singh Date: Tue, 25 Jul 2023 09:18:48 +0530 Subject: [PATCH 62/84] Backport Refactor changes to 2.x (#1092) * Open Upgrade gradle version and remove remaining use of ImmutableOpenMap (#814) Signed-off-by: Monu Singh * Move from common to core.common (#1087) Keep up with core changes opensearch-project/OpenSearch#8157 move from common to core.common Change kotlin.version "1.8.21" as Class 'org.opensearch.commons.utils.OpenForTesting' was compiled with "1.8.21" version of Kotlin. Ref Set following to use 1.6 as 1.8.21 is not available yet. Checked that other OpenSearch plugins are also doing the same. Set kotlinx-coroutines-core to 1.6.0 set kotlinx-coroutines-test to 1.6.0 Added else in when statements as from Kotlin 1.7 version onward exhaustive list is mandatory. 'when' expression must be exhaustive, add necessary 'else' branch --------- Signed-off-by: monusingh-1 --------- Signed-off-by: Monu Singh Signed-off-by: monusingh-1 --- build.gradle | 8 ++-- .../replication/ReplicationException.kt | 6 +-- .../replication/ReplicationPlugin.kt | 4 +- .../AutoFollowClusterManagerNodeRequest.kt | 4 +- ...sportAutoFollowClusterManagerNodeAction.kt | 2 +- .../UpdateAutoFollowPatternRequest.kt | 4 +- .../action/changes/GetChangesRequest.kt | 6 +-- .../action/changes/GetChangesResponse.kt | 4 +- .../changes/TransportGetChangesAction.kt | 6 +-- ...ReplicateIndexClusterManagerNodeRequest.kt | 4 +- .../action/index/ReplicateIndexRequest.kt | 4 +- .../action/index/ReplicateIndexResponse.kt | 4 +- ...tReplicateIndexClusterManagerNodeAction.kt | 4 +- .../block/TransportUpddateIndexBlockAction.kt | 2 +- .../index/block/UpdateIndexBlockRequest.kt | 4 +- .../pause/PauseIndexReplicationRequest.kt | 4 +- .../TransportPauseIndexReplicationAction.kt | 2 +- .../action/replay/ReplayChangesRequest.kt | 6 +-- .../action/replay/ReplayChangesResponse.kt | 2 +- .../replay/TransportReplayChangesAction.kt | 6 +-- .../TransportUpdateReplicationStateDetails.kt | 2 +- .../UpdateReplicationStateDetailsRequest.kt | 4 +- .../action/repository/GetFileChunkRequest.kt | 6 +-- .../action/repository/GetFileChunkResponse.kt | 6 +-- .../repository/GetStoreMetadataRequest.kt | 4 +- .../repository/GetStoreMetadataResponse.kt | 4 +- .../ReleaseLeaderResourcesRequest.kt | 4 +- .../RemoteClusterRepositoryRequest.kt | 6 +-- .../repository/TransportGetFileChunkAction.kt | 8 ++-- .../TransportGetStoreMetadataAction.kt | 6 +-- .../TransportReleaseLeaderResourcesAction.kt | 6 +-- .../resume/ResumeIndexReplicationRequest.kt | 4 +- .../TransportResumeIndexReplicationAction.kt | 10 ++--- .../action/setup/SetupChecksRequest.kt | 4 +- .../setup/TransportSetupChecksAction.kt | 2 +- .../setup/ValidatePermissionsRequest.kt | 4 +- .../action/stats/AutoFollowStatsAction.kt | 6 +-- .../action/stats/AutoFollowStatsRequest.kt | 4 +- .../action/stats/FollowerNodeStatsResponse.kt | 6 +-- .../action/stats/FollowerStatsAction.kt | 2 +- .../action/stats/FollowerStatsRequest.kt | 4 +- .../action/stats/FollowerStatsResponse.kt | 6 +-- .../action/stats/LeaderNodeStatsResponse.kt | 6 +-- .../action/stats/LeaderStatsAction.kt | 2 +- .../action/stats/LeaderStatsRequest.kt | 4 +- .../action/stats/LeaderStatsResponse.kt | 4 +- .../action/stats/NodeStatsRequest.kt | 4 +- .../stats/TransportFollowerStatsAction.kt | 2 +- .../stats/TransportLeaderStatsAction.kt | 4 +- .../action/status/ReplicationStatusAction.kt | 2 +- .../status/ReplicationStatusResponse.kt | 6 +-- .../action/status/ShardInfoRequest.kt | 4 +- .../action/status/ShardInfoResponse.kt | 6 +-- .../action/status/ShardsInfoAction.kt | 2 +- .../action/status/TranportShardsInfoAction.kt | 6 +-- .../stop/StopIndexReplicationRequest.kt | 4 +- .../TransportStopIndexReplicationAction.kt | 2 +- .../TransportUpdateIndexReplicationAction.kt | 2 +- .../update/UpdateIndexReplicationRequest.kt | 4 +- .../metadata/TransportUpdateMetadataAction.kt | 8 ++-- .../metadata/UpdateIndexBlockTask.kt | 10 ++--- .../metadata/UpdateMetadataRequest.kt | 4 +- .../state/ReplicationStateMetadata.kt | 4 +- .../metadata/store/ReplicationMetadata.kt | 6 +-- .../RemoteClusterMultiChunkTransfer.kt | 2 +- .../repository/RemoteClusterRepository.kt | 10 ++--- .../rest/AutoFollowStatsHandler.kt | 2 +- .../replication/rest/FollowerStatsHandler.kt | 2 +- .../replication/rest/LeaderStatsHandler.kt | 2 +- .../rest/UpdateAutoFollowPatternsHandler.kt | 2 +- .../replication/rest/UpdateIndexHandler.kt | 2 +- .../RemoteClusterRetentionLeaseHelper.kt | 6 +-- .../replication/seqno/RemoteClusterStats.kt | 6 +-- .../task/CrossClusterReplicationTask.kt | 6 +-- .../replication/task/IndexCloseListener.kt | 2 +- .../replication/task/ReplicationState.kt | 6 +-- .../task/autofollow/AutoFollowParams.kt | 4 +- .../task/autofollow/AutoFollowTask.kt | 6 +-- .../task/index/IndexReplicationParams.kt | 6 +-- .../task/index/IndexReplicationState.kt | 6 +-- .../task/index/IndexReplicationTask.kt | 44 +++++++++---------- .../task/shard/FollowerClusterStats.kt | 6 +-- .../task/shard/ShardReplicationParams.kt | 8 ++-- .../task/shard/ShardReplicationState.kt | 4 +- .../task/shard/ShardReplicationTask.kt | 4 +- .../task/shard/TranslogSequencer.kt | 4 +- .../opensearch/replication/util/Extensions.kt | 4 +- .../ReplicationTranslogDeletionPolicyTests.kt | 4 +- .../bwc/BackwardsCompatibilityIT.kt | 1 + .../integ/rest/SecurityCustomRolesIT.kt | 6 +-- .../integ/rest/SecurityDlsFlsIT.kt | 2 +- .../integ/rest/StartReplicationIT.kt | 28 ++++++------ .../integ/rest/UpdateAutoFollowPatternIT.kt | 2 +- .../singleCluster/SingleClusterSanityIT.kt | 1 + .../task/index/IndexReplicationTaskTests.kt | 4 +- .../replication/task/index/NoOpClient.kt | 14 +++--- .../shard/ShardReplicationExecutorTests.kt | 4 +- .../task/shard/TranslogSequencerTests.kt | 2 +- 98 files changed, 250 insertions(+), 252 deletions(-) diff --git a/build.gradle b/build.gradle index ccbfaf1e..ead3cae9 100644 --- a/build.gradle +++ b/build.gradle @@ -36,7 +36,7 @@ import org.opensearch.gradle.test.RestIntegTestTask buildscript { ext { isSnapshot = "true" == System.getProperty("build.snapshot", "true") - opensearch_version = System.getProperty("opensearch.version", "2.8.0-SNAPSHOT") + opensearch_version = System.getProperty("opensearch.version", "2.10.0-SNAPSHOT") buildVersionQualifier = System.getProperty("build.version_qualifier", "") // e.g. 2.0.0-rc1-SNAPSHOT -> 2.0.0.0-rc1-SNAPSHOT version_tokens = opensearch_version.tokenize('-') @@ -54,7 +54,7 @@ buildscript { plugin_previous_version = opensearch_previous_version.replaceAll(/(\.\d)([^\d]*)$/, '$1.0$2') common_utils_version = System.getProperty("common_utils.version", opensearch_build) - kotlin_version = System.getProperty("kotlin.version", "1.6.0") + kotlin_version = System.getProperty("kotlin.version", "1.8.21") // For fetching security zip from Maven. // https://ci.opensearch.org/ci/dbc/distribution-build-opensearch/2.1.0/latest/linux/x64/tar/builds/opensearch/plugins/opensearch-security-2.1.0.0.zip @@ -141,13 +141,13 @@ dependencies { implementation "org.jetbrains.kotlin:kotlin-stdlib-common:${kotlin_version}" implementation "org.jetbrains:annotations:13.0" implementation "com.github.seancfoley:ipaddress:5.3.3" - implementation "org.jetbrains.kotlinx:kotlinx-coroutines-core:${kotlin_version}" + implementation "org.jetbrains.kotlinx:kotlinx-coroutines-core:1.6.0" // Moving away from kotlin_version implementation "org.opensearch:common-utils:${common_utils_version}" testImplementation "org.opensearch.test:framework:${opensearch_version}" testImplementation "org.assertj:assertj-core:3.17.2" testImplementation "org.opensearch.client:opensearch-rest-high-level-client:${opensearch_version}" - testImplementation "org.jetbrains.kotlinx:kotlinx-coroutines-test:${kotlin_version}" + testImplementation "org.jetbrains.kotlinx:kotlinx-coroutines-test:1.6.0" // Moving away from kotlin_version testImplementation "org.jetbrains.kotlin:kotlin-test:${kotlin_version}" testImplementation "com.nhaarman.mockitokotlin2:mockito-kotlin:2.2.0" } diff --git a/src/main/kotlin/org/opensearch/replication/ReplicationException.kt b/src/main/kotlin/org/opensearch/replication/ReplicationException.kt index 891be0a3..83e59220 100644 --- a/src/main/kotlin/org/opensearch/replication/ReplicationException.kt +++ b/src/main/kotlin/org/opensearch/replication/ReplicationException.kt @@ -13,10 +13,10 @@ package org.opensearch.replication import org.opensearch.OpenSearchException import org.opensearch.OpenSearchStatusException -import org.opensearch.action.ShardOperationFailedException +import org.opensearch.core.action.ShardOperationFailedException import org.opensearch.cluster.metadata.IndexMetadata.INDEX_UUID_NA_VALUE -import org.opensearch.index.shard.ShardId -import org.opensearch.rest.RestStatus +import org.opensearch.core.index.shard.ShardId +import org.opensearch.core.rest.RestStatus /** * Base class replication exceptions. Note: Replication process may throw exceptions that do not derive from this such as diff --git a/src/main/kotlin/org/opensearch/replication/ReplicationPlugin.kt b/src/main/kotlin/org/opensearch/replication/ReplicationPlugin.kt index 4254412c..7d66c976 100644 --- a/src/main/kotlin/org/opensearch/replication/ReplicationPlugin.kt +++ b/src/main/kotlin/org/opensearch/replication/ReplicationPlugin.kt @@ -84,8 +84,8 @@ import org.opensearch.cluster.service.ClusterService import org.opensearch.common.CheckedFunction import org.opensearch.core.ParseField import org.opensearch.common.component.LifecycleComponent -import org.opensearch.common.io.stream.NamedWriteableRegistry -import org.opensearch.common.io.stream.Writeable +import org.opensearch.core.common.io.stream.NamedWriteableRegistry +import org.opensearch.core.common.io.stream.Writeable import org.opensearch.common.settings.ClusterSettings import org.opensearch.common.settings.IndexScopedSettings import org.opensearch.common.settings.Setting diff --git a/src/main/kotlin/org/opensearch/replication/action/autofollow/AutoFollowClusterManagerNodeRequest.kt b/src/main/kotlin/org/opensearch/replication/action/autofollow/AutoFollowClusterManagerNodeRequest.kt index f9199ea0..216809d8 100644 --- a/src/main/kotlin/org/opensearch/replication/action/autofollow/AutoFollowClusterManagerNodeRequest.kt +++ b/src/main/kotlin/org/opensearch/replication/action/autofollow/AutoFollowClusterManagerNodeRequest.kt @@ -14,8 +14,8 @@ package org.opensearch.replication.action.autofollow import org.opensearch.commons.authuser.User import org.opensearch.action.ActionRequestValidationException import org.opensearch.action.support.master.MasterNodeRequest -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput import org.opensearch.core.xcontent.ToXContent import org.opensearch.core.xcontent.ToXContentObject import org.opensearch.core.xcontent.XContentBuilder diff --git a/src/main/kotlin/org/opensearch/replication/action/autofollow/TransportAutoFollowClusterManagerNodeAction.kt b/src/main/kotlin/org/opensearch/replication/action/autofollow/TransportAutoFollowClusterManagerNodeAction.kt index e3c59444..cc93d88a 100644 --- a/src/main/kotlin/org/opensearch/replication/action/autofollow/TransportAutoFollowClusterManagerNodeAction.kt +++ b/src/main/kotlin/org/opensearch/replication/action/autofollow/TransportAutoFollowClusterManagerNodeAction.kt @@ -39,7 +39,7 @@ import org.opensearch.cluster.block.ClusterBlockLevel import org.opensearch.cluster.metadata.IndexNameExpressionResolver import org.opensearch.cluster.service.ClusterService import org.opensearch.common.inject.Inject -import org.opensearch.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamInput import org.opensearch.common.settings.IndexScopedSettings import org.opensearch.replication.ReplicationException import org.opensearch.threadpool.ThreadPool diff --git a/src/main/kotlin/org/opensearch/replication/action/autofollow/UpdateAutoFollowPatternRequest.kt b/src/main/kotlin/org/opensearch/replication/action/autofollow/UpdateAutoFollowPatternRequest.kt index a7077793..165ede8b 100644 --- a/src/main/kotlin/org/opensearch/replication/action/autofollow/UpdateAutoFollowPatternRequest.kt +++ b/src/main/kotlin/org/opensearch/replication/action/autofollow/UpdateAutoFollowPatternRequest.kt @@ -17,8 +17,8 @@ import org.opensearch.replication.util.ValidationUtil.validateName import org.opensearch.action.ActionRequestValidationException import org.opensearch.action.support.master.AcknowledgedRequest import org.opensearch.core.ParseField -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput import org.opensearch.common.settings.Settings import org.opensearch.core.xcontent.ObjectParser import org.opensearch.core.xcontent.ToXContent diff --git a/src/main/kotlin/org/opensearch/replication/action/changes/GetChangesRequest.kt b/src/main/kotlin/org/opensearch/replication/action/changes/GetChangesRequest.kt index d3aeb8e7..43e1fa33 100644 --- a/src/main/kotlin/org/opensearch/replication/action/changes/GetChangesRequest.kt +++ b/src/main/kotlin/org/opensearch/replication/action/changes/GetChangesRequest.kt @@ -14,9 +14,9 @@ package org.opensearch.replication.action.changes import org.opensearch.action.ActionRequestValidationException import org.opensearch.action.support.single.shard.SingleShardRequest import org.opensearch.cluster.node.DiscoveryNode -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput -import org.opensearch.index.shard.ShardId +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput +import org.opensearch.core.index.shard.ShardId import org.opensearch.transport.RemoteClusterAwareRequest class GetChangesRequest : SingleShardRequest { diff --git a/src/main/kotlin/org/opensearch/replication/action/changes/GetChangesResponse.kt b/src/main/kotlin/org/opensearch/replication/action/changes/GetChangesResponse.kt index f3adaf2e..c71b4795 100644 --- a/src/main/kotlin/org/opensearch/replication/action/changes/GetChangesResponse.kt +++ b/src/main/kotlin/org/opensearch/replication/action/changes/GetChangesResponse.kt @@ -12,8 +12,8 @@ package org.opensearch.replication.action.changes import org.opensearch.action.ActionResponse -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput import org.opensearch.index.translog.Translog class GetChangesResponse(val changes: List, diff --git a/src/main/kotlin/org/opensearch/replication/action/changes/TransportGetChangesAction.kt b/src/main/kotlin/org/opensearch/replication/action/changes/TransportGetChangesAction.kt index c483ebad..a3995999 100644 --- a/src/main/kotlin/org/opensearch/replication/action/changes/TransportGetChangesAction.kt +++ b/src/main/kotlin/org/opensearch/replication/action/changes/TransportGetChangesAction.kt @@ -23,10 +23,10 @@ import org.opensearch.cluster.metadata.IndexNameExpressionResolver import org.opensearch.cluster.routing.ShardsIterator import org.opensearch.cluster.service.ClusterService import org.opensearch.common.inject.Inject -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.Writeable +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.Writeable import org.opensearch.common.unit.TimeValue -import org.opensearch.index.shard.ShardId +import org.opensearch.core.index.shard.ShardId import org.opensearch.index.translog.Translog import org.opensearch.indices.IndicesService import org.opensearch.replication.ReplicationPlugin.Companion.REPLICATION_INDEX_TRANSLOG_PRUNING_ENABLED_SETTING diff --git a/src/main/kotlin/org/opensearch/replication/action/index/ReplicateIndexClusterManagerNodeRequest.kt b/src/main/kotlin/org/opensearch/replication/action/index/ReplicateIndexClusterManagerNodeRequest.kt index 63f77023..0b741d4e 100644 --- a/src/main/kotlin/org/opensearch/replication/action/index/ReplicateIndexClusterManagerNodeRequest.kt +++ b/src/main/kotlin/org/opensearch/replication/action/index/ReplicateIndexClusterManagerNodeRequest.kt @@ -14,8 +14,8 @@ package org.opensearch.replication.action.index import org.opensearch.commons.authuser.User import org.opensearch.action.ActionRequestValidationException import org.opensearch.action.support.master.MasterNodeRequest -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput import org.opensearch.core.xcontent.ToXContent import org.opensearch.core.xcontent.ToXContentObject import org.opensearch.core.xcontent.XContentBuilder diff --git a/src/main/kotlin/org/opensearch/replication/action/index/ReplicateIndexRequest.kt b/src/main/kotlin/org/opensearch/replication/action/index/ReplicateIndexRequest.kt index bb0f9aaf..6024798a 100644 --- a/src/main/kotlin/org/opensearch/replication/action/index/ReplicateIndexRequest.kt +++ b/src/main/kotlin/org/opensearch/replication/action/index/ReplicateIndexRequest.kt @@ -18,8 +18,8 @@ import org.opensearch.action.IndicesRequest import org.opensearch.action.support.IndicesOptions import org.opensearch.action.support.master.AcknowledgedRequest import org.opensearch.core.ParseField -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput import org.opensearch.common.settings.Settings import org.opensearch.core.xcontent.ObjectParser import org.opensearch.core.xcontent.ToXContent diff --git a/src/main/kotlin/org/opensearch/replication/action/index/ReplicateIndexResponse.kt b/src/main/kotlin/org/opensearch/replication/action/index/ReplicateIndexResponse.kt index 6ab3cc75..31963036 100644 --- a/src/main/kotlin/org/opensearch/replication/action/index/ReplicateIndexResponse.kt +++ b/src/main/kotlin/org/opensearch/replication/action/index/ReplicateIndexResponse.kt @@ -12,8 +12,8 @@ package org.opensearch.replication.action.index import org.opensearch.action.support.master.AcknowledgedResponse -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput class ReplicateIndexResponse(val ack: Boolean) : AcknowledgedResponse(ack) { constructor(inp: StreamInput) : this(inp.readBoolean()) diff --git a/src/main/kotlin/org/opensearch/replication/action/index/TransportReplicateIndexClusterManagerNodeAction.kt b/src/main/kotlin/org/opensearch/replication/action/index/TransportReplicateIndexClusterManagerNodeAction.kt index 1a926798..36fe81f2 100644 --- a/src/main/kotlin/org/opensearch/replication/action/index/TransportReplicateIndexClusterManagerNodeAction.kt +++ b/src/main/kotlin/org/opensearch/replication/action/index/TransportReplicateIndexClusterManagerNodeAction.kt @@ -40,14 +40,14 @@ import org.opensearch.cluster.metadata.IndexMetadata import org.opensearch.cluster.metadata.IndexNameExpressionResolver import org.opensearch.cluster.service.ClusterService import org.opensearch.common.inject.Inject -import org.opensearch.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamInput import org.opensearch.common.settings.IndexScopedSettings import org.opensearch.index.IndexNotFoundException import org.opensearch.persistent.PersistentTasksService import org.opensearch.replication.ReplicationPlugin import org.opensearch.replication.util.stackTraceToString import org.opensearch.repositories.RepositoriesService -import org.opensearch.rest.RestStatus +import org.opensearch.core.rest.RestStatus import org.opensearch.threadpool.ThreadPool import org.opensearch.transport.TransportService import java.io.IOException diff --git a/src/main/kotlin/org/opensearch/replication/action/index/block/TransportUpddateIndexBlockAction.kt b/src/main/kotlin/org/opensearch/replication/action/index/block/TransportUpddateIndexBlockAction.kt index 189e7acc..55e569aa 100644 --- a/src/main/kotlin/org/opensearch/replication/action/index/block/TransportUpddateIndexBlockAction.kt +++ b/src/main/kotlin/org/opensearch/replication/action/index/block/TransportUpddateIndexBlockAction.kt @@ -31,7 +31,7 @@ import org.opensearch.cluster.block.ClusterBlockLevel import org.opensearch.cluster.metadata.IndexNameExpressionResolver import org.opensearch.cluster.service.ClusterService import org.opensearch.common.inject.Inject -import org.opensearch.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamInput import org.opensearch.threadpool.ThreadPool import org.opensearch.transport.TransportService import java.io.IOException diff --git a/src/main/kotlin/org/opensearch/replication/action/index/block/UpdateIndexBlockRequest.kt b/src/main/kotlin/org/opensearch/replication/action/index/block/UpdateIndexBlockRequest.kt index d9b51933..4c766468 100644 --- a/src/main/kotlin/org/opensearch/replication/action/index/block/UpdateIndexBlockRequest.kt +++ b/src/main/kotlin/org/opensearch/replication/action/index/block/UpdateIndexBlockRequest.kt @@ -16,8 +16,8 @@ import org.opensearch.action.IndicesRequest import org.opensearch.action.support.IndicesOptions import org.opensearch.action.support.master.AcknowledgedRequest import org.opensearch.core.ParseField -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput import org.opensearch.core.xcontent.ObjectParser import org.opensearch.core.xcontent.ToXContent import org.opensearch.core.xcontent.ToXContentObject diff --git a/src/main/kotlin/org/opensearch/replication/action/pause/PauseIndexReplicationRequest.kt b/src/main/kotlin/org/opensearch/replication/action/pause/PauseIndexReplicationRequest.kt index e69dcb44..fce15d2e 100644 --- a/src/main/kotlin/org/opensearch/replication/action/pause/PauseIndexReplicationRequest.kt +++ b/src/main/kotlin/org/opensearch/replication/action/pause/PauseIndexReplicationRequest.kt @@ -17,8 +17,8 @@ import org.opensearch.action.IndicesRequest import org.opensearch.action.support.IndicesOptions import org.opensearch.action.support.master.AcknowledgedRequest import org.opensearch.core.ParseField -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput import org.opensearch.core.xcontent.ObjectParser import org.opensearch.core.xcontent.ToXContent import org.opensearch.core.xcontent.ToXContentObject diff --git a/src/main/kotlin/org/opensearch/replication/action/pause/TransportPauseIndexReplicationAction.kt b/src/main/kotlin/org/opensearch/replication/action/pause/TransportPauseIndexReplicationAction.kt index abd54403..eaf828a1 100644 --- a/src/main/kotlin/org/opensearch/replication/action/pause/TransportPauseIndexReplicationAction.kt +++ b/src/main/kotlin/org/opensearch/replication/action/pause/TransportPauseIndexReplicationAction.kt @@ -37,7 +37,7 @@ import org.opensearch.cluster.block.ClusterBlockLevel import org.opensearch.cluster.metadata.IndexNameExpressionResolver import org.opensearch.cluster.service.ClusterService import org.opensearch.common.inject.Inject -import org.opensearch.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamInput import org.opensearch.threadpool.ThreadPool import org.opensearch.transport.TransportService import java.io.IOException diff --git a/src/main/kotlin/org/opensearch/replication/action/replay/ReplayChangesRequest.kt b/src/main/kotlin/org/opensearch/replication/action/replay/ReplayChangesRequest.kt index 7ac35526..a7366b1a 100644 --- a/src/main/kotlin/org/opensearch/replication/action/replay/ReplayChangesRequest.kt +++ b/src/main/kotlin/org/opensearch/replication/action/replay/ReplayChangesRequest.kt @@ -12,9 +12,9 @@ package org.opensearch.replication.action.replay import org.opensearch.action.support.replication.ReplicatedWriteRequest -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput -import org.opensearch.index.shard.ShardId +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput +import org.opensearch.core.index.shard.ShardId import org.opensearch.index.translog.Translog class ReplayChangesRequest : ReplicatedWriteRequest { diff --git a/src/main/kotlin/org/opensearch/replication/action/replay/ReplayChangesResponse.kt b/src/main/kotlin/org/opensearch/replication/action/replay/ReplayChangesResponse.kt index b0bab6fb..5fe155f4 100644 --- a/src/main/kotlin/org/opensearch/replication/action/replay/ReplayChangesResponse.kt +++ b/src/main/kotlin/org/opensearch/replication/action/replay/ReplayChangesResponse.kt @@ -13,7 +13,7 @@ package org.opensearch.replication.action.replay import org.opensearch.action.support.WriteResponse import org.opensearch.action.support.replication.ReplicationResponse -import org.opensearch.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamInput class ReplayChangesResponse : ReplicationResponse, WriteResponse { diff --git a/src/main/kotlin/org/opensearch/replication/action/replay/TransportReplayChangesAction.kt b/src/main/kotlin/org/opensearch/replication/action/replay/TransportReplayChangesAction.kt index 9b3a78b6..c874e92f 100644 --- a/src/main/kotlin/org/opensearch/replication/action/replay/TransportReplayChangesAction.kt +++ b/src/main/kotlin/org/opensearch/replication/action/replay/TransportReplayChangesAction.kt @@ -39,10 +39,10 @@ import org.opensearch.cluster.action.index.MappingUpdatedAction import org.opensearch.cluster.action.shard.ShardStateAction import org.opensearch.cluster.block.ClusterBlockLevel import org.opensearch.cluster.service.ClusterService -import org.opensearch.common.bytes.BytesReference +import org.opensearch.core.common.bytes.BytesReference import org.opensearch.common.inject.Inject -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.Writeable +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.Writeable import org.opensearch.common.settings.Settings import org.opensearch.common.xcontent.XContentType import org.opensearch.index.IndexingPressureService diff --git a/src/main/kotlin/org/opensearch/replication/action/replicationstatedetails/TransportUpdateReplicationStateDetails.kt b/src/main/kotlin/org/opensearch/replication/action/replicationstatedetails/TransportUpdateReplicationStateDetails.kt index ada0aefa..0b744482 100644 --- a/src/main/kotlin/org/opensearch/replication/action/replicationstatedetails/TransportUpdateReplicationStateDetails.kt +++ b/src/main/kotlin/org/opensearch/replication/action/replicationstatedetails/TransportUpdateReplicationStateDetails.kt @@ -30,7 +30,7 @@ import org.opensearch.cluster.block.ClusterBlockLevel import org.opensearch.cluster.metadata.IndexNameExpressionResolver import org.opensearch.cluster.service.ClusterService import org.opensearch.common.inject.Inject -import org.opensearch.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamInput import org.opensearch.threadpool.ThreadPool import org.opensearch.transport.TransportService diff --git a/src/main/kotlin/org/opensearch/replication/action/replicationstatedetails/UpdateReplicationStateDetailsRequest.kt b/src/main/kotlin/org/opensearch/replication/action/replicationstatedetails/UpdateReplicationStateDetailsRequest.kt index 04fedd0a..1b139495 100644 --- a/src/main/kotlin/org/opensearch/replication/action/replicationstatedetails/UpdateReplicationStateDetailsRequest.kt +++ b/src/main/kotlin/org/opensearch/replication/action/replicationstatedetails/UpdateReplicationStateDetailsRequest.kt @@ -14,8 +14,8 @@ package org.opensearch.replication.action.replicationstatedetails import org.opensearch.replication.metadata.state.ReplicationStateParams import org.opensearch.action.ActionRequestValidationException import org.opensearch.action.support.master.AcknowledgedRequest -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput class UpdateReplicationStateDetailsRequest: AcknowledgedRequest { diff --git a/src/main/kotlin/org/opensearch/replication/action/repository/GetFileChunkRequest.kt b/src/main/kotlin/org/opensearch/replication/action/repository/GetFileChunkRequest.kt index 8f9213b1..d959866e 100644 --- a/src/main/kotlin/org/opensearch/replication/action/repository/GetFileChunkRequest.kt +++ b/src/main/kotlin/org/opensearch/replication/action/repository/GetFileChunkRequest.kt @@ -13,9 +13,9 @@ package org.opensearch.replication.action.repository import org.opensearch.action.ActionRequestValidationException import org.opensearch.cluster.node.DiscoveryNode -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput -import org.opensearch.index.shard.ShardId +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput +import org.opensearch.core.index.shard.ShardId import org.opensearch.index.store.StoreFileMetadata class GetFileChunkRequest : RemoteClusterRepositoryRequest { diff --git a/src/main/kotlin/org/opensearch/replication/action/repository/GetFileChunkResponse.kt b/src/main/kotlin/org/opensearch/replication/action/repository/GetFileChunkResponse.kt index 5e292138..325e20f0 100644 --- a/src/main/kotlin/org/opensearch/replication/action/repository/GetFileChunkResponse.kt +++ b/src/main/kotlin/org/opensearch/replication/action/repository/GetFileChunkResponse.kt @@ -12,9 +12,9 @@ package org.opensearch.replication.action.repository import org.opensearch.action.ActionResponse -import org.opensearch.common.bytes.BytesReference -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput +import org.opensearch.core.common.bytes.BytesReference +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput import org.opensearch.index.store.StoreFileMetadata class GetFileChunkResponse : ActionResponse { diff --git a/src/main/kotlin/org/opensearch/replication/action/repository/GetStoreMetadataRequest.kt b/src/main/kotlin/org/opensearch/replication/action/repository/GetStoreMetadataRequest.kt index f8940730..d26e37e1 100644 --- a/src/main/kotlin/org/opensearch/replication/action/repository/GetStoreMetadataRequest.kt +++ b/src/main/kotlin/org/opensearch/replication/action/repository/GetStoreMetadataRequest.kt @@ -13,8 +13,8 @@ package org.opensearch.replication.action.repository import org.opensearch.action.ActionRequestValidationException import org.opensearch.cluster.node.DiscoveryNode -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.index.shard.ShardId +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.index.shard.ShardId class GetStoreMetadataRequest : RemoteClusterRepositoryRequest { diff --git a/src/main/kotlin/org/opensearch/replication/action/repository/GetStoreMetadataResponse.kt b/src/main/kotlin/org/opensearch/replication/action/repository/GetStoreMetadataResponse.kt index baeb62b5..9ef97259 100644 --- a/src/main/kotlin/org/opensearch/replication/action/repository/GetStoreMetadataResponse.kt +++ b/src/main/kotlin/org/opensearch/replication/action/repository/GetStoreMetadataResponse.kt @@ -12,8 +12,8 @@ package org.opensearch.replication.action.repository import org.opensearch.action.ActionResponse -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput import org.opensearch.index.store.Store class GetStoreMetadataResponse : ActionResponse { diff --git a/src/main/kotlin/org/opensearch/replication/action/repository/ReleaseLeaderResourcesRequest.kt b/src/main/kotlin/org/opensearch/replication/action/repository/ReleaseLeaderResourcesRequest.kt index 2ea55a01..97908e9f 100644 --- a/src/main/kotlin/org/opensearch/replication/action/repository/ReleaseLeaderResourcesRequest.kt +++ b/src/main/kotlin/org/opensearch/replication/action/repository/ReleaseLeaderResourcesRequest.kt @@ -13,8 +13,8 @@ package org.opensearch.replication.action.repository import org.opensearch.action.ActionRequestValidationException import org.opensearch.cluster.node.DiscoveryNode -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.index.shard.ShardId +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.index.shard.ShardId class ReleaseLeaderResourcesRequest: RemoteClusterRepositoryRequest { diff --git a/src/main/kotlin/org/opensearch/replication/action/repository/RemoteClusterRepositoryRequest.kt b/src/main/kotlin/org/opensearch/replication/action/repository/RemoteClusterRepositoryRequest.kt index 8a600a4e..8dd702d9 100644 --- a/src/main/kotlin/org/opensearch/replication/action/repository/RemoteClusterRepositoryRequest.kt +++ b/src/main/kotlin/org/opensearch/replication/action/repository/RemoteClusterRepositoryRequest.kt @@ -13,9 +13,9 @@ package org.opensearch.replication.action.repository import org.opensearch.action.support.single.shard.SingleShardRequest import org.opensearch.cluster.node.DiscoveryNode -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput -import org.opensearch.index.shard.ShardId +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput +import org.opensearch.core.index.shard.ShardId import org.opensearch.transport.RemoteClusterAwareRequest abstract class RemoteClusterRepositoryRequest?>: diff --git a/src/main/kotlin/org/opensearch/replication/action/repository/TransportGetFileChunkAction.kt b/src/main/kotlin/org/opensearch/replication/action/repository/TransportGetFileChunkAction.kt index 2564a0e3..9b8b13e1 100644 --- a/src/main/kotlin/org/opensearch/replication/action/repository/TransportGetFileChunkAction.kt +++ b/src/main/kotlin/org/opensearch/replication/action/repository/TransportGetFileChunkAction.kt @@ -20,11 +20,11 @@ import org.opensearch.cluster.ClusterState import org.opensearch.cluster.metadata.IndexNameExpressionResolver import org.opensearch.cluster.routing.ShardsIterator import org.opensearch.cluster.service.ClusterService -import org.opensearch.common.bytes.BytesArray +import org.opensearch.core.common.bytes.BytesArray import org.opensearch.common.inject.Inject -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.Writeable -import org.opensearch.index.shard.ShardId +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.Writeable +import org.opensearch.core.index.shard.ShardId import org.opensearch.indices.IndicesService import org.opensearch.threadpool.ThreadPool import org.opensearch.transport.TransportActionProxy diff --git a/src/main/kotlin/org/opensearch/replication/action/repository/TransportGetStoreMetadataAction.kt b/src/main/kotlin/org/opensearch/replication/action/repository/TransportGetStoreMetadataAction.kt index e3c677d8..44ce8570 100644 --- a/src/main/kotlin/org/opensearch/replication/action/repository/TransportGetStoreMetadataAction.kt +++ b/src/main/kotlin/org/opensearch/replication/action/repository/TransportGetStoreMetadataAction.kt @@ -20,9 +20,9 @@ import org.opensearch.cluster.metadata.IndexNameExpressionResolver import org.opensearch.cluster.routing.ShardsIterator import org.opensearch.cluster.service.ClusterService import org.opensearch.common.inject.Inject -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.Writeable -import org.opensearch.index.shard.ShardId +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.Writeable +import org.opensearch.core.index.shard.ShardId import org.opensearch.threadpool.ThreadPool import org.opensearch.transport.TransportActionProxy import org.opensearch.transport.TransportService diff --git a/src/main/kotlin/org/opensearch/replication/action/repository/TransportReleaseLeaderResourcesAction.kt b/src/main/kotlin/org/opensearch/replication/action/repository/TransportReleaseLeaderResourcesAction.kt index 9818ebcc..62a615b3 100644 --- a/src/main/kotlin/org/opensearch/replication/action/repository/TransportReleaseLeaderResourcesAction.kt +++ b/src/main/kotlin/org/opensearch/replication/action/repository/TransportReleaseLeaderResourcesAction.kt @@ -21,9 +21,9 @@ import org.opensearch.cluster.metadata.IndexNameExpressionResolver import org.opensearch.cluster.routing.ShardsIterator import org.opensearch.cluster.service.ClusterService import org.opensearch.common.inject.Inject -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.Writeable -import org.opensearch.index.shard.ShardId +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.Writeable +import org.opensearch.core.index.shard.ShardId import org.opensearch.threadpool.ThreadPool import org.opensearch.transport.TransportActionProxy import org.opensearch.transport.TransportService diff --git a/src/main/kotlin/org/opensearch/replication/action/resume/ResumeIndexReplicationRequest.kt b/src/main/kotlin/org/opensearch/replication/action/resume/ResumeIndexReplicationRequest.kt index dd219272..10220a7f 100644 --- a/src/main/kotlin/org/opensearch/replication/action/resume/ResumeIndexReplicationRequest.kt +++ b/src/main/kotlin/org/opensearch/replication/action/resume/ResumeIndexReplicationRequest.kt @@ -15,8 +15,8 @@ import org.opensearch.action.ActionRequestValidationException import org.opensearch.action.IndicesRequest import org.opensearch.action.support.IndicesOptions import org.opensearch.action.support.master.AcknowledgedRequest -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput import org.opensearch.core.xcontent.* class ResumeIndexReplicationRequest : AcknowledgedRequest, IndicesRequest.Replaceable, ToXContentObject { diff --git a/src/main/kotlin/org/opensearch/replication/action/resume/TransportResumeIndexReplicationAction.kt b/src/main/kotlin/org/opensearch/replication/action/resume/TransportResumeIndexReplicationAction.kt index 9ca85549..a1128913 100644 --- a/src/main/kotlin/org/opensearch/replication/action/resume/TransportResumeIndexReplicationAction.kt +++ b/src/main/kotlin/org/opensearch/replication/action/resume/TransportResumeIndexReplicationAction.kt @@ -50,10 +50,10 @@ import org.opensearch.cluster.metadata.IndexNameExpressionResolver import org.opensearch.cluster.service.ClusterService import org.opensearch.common.inject.Inject -import org.opensearch.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamInput import org.opensearch.env.Environment import org.opensearch.index.IndexNotFoundException -import org.opensearch.index.shard.ShardId +import org.opensearch.core.index.shard.ShardId import org.opensearch.threadpool.ThreadPool import org.opensearch.transport.TransportService import java.io.IOException @@ -130,9 +130,9 @@ class TransportResumeIndexReplicationAction @Inject constructor(transportService private suspend fun isResumable(params :IndexReplicationParams): Boolean { var isResumable = true val remoteClient = client.getRemoteClusterClient(params.leaderAlias) - val shards = clusterService.state().routingTable.indicesRouting().get(params.followerIndexName).shards() + val shards = clusterService.state().routingTable.indicesRouting().get(params.followerIndexName)?.shards() val retentionLeaseHelper = RemoteClusterRetentionLeaseHelper(clusterService.clusterName.value(), clusterService.state().metadata.clusterUUID(), remoteClient) - shards.forEach { + shards?.forEach { val followerShardId = it.value.shardId if (!retentionLeaseHelper.verifyRetentionLeaseExist(ShardId(params.leaderIndex, followerShardId.id), followerShardId)) { @@ -146,7 +146,7 @@ class TransportResumeIndexReplicationAction @Inject constructor(transportService // clean up all retention leases we may have accidentally took while doing verifyRetentionLeaseExist . // Idempotent Op which does no harm - shards.forEach { + shards?.forEach { val followerShardId = it.value.shardId log.debug("Removing lease for $followerShardId.id ") retentionLeaseHelper.attemptRetentionLeaseRemoval(ShardId(params.leaderIndex, followerShardId.id), followerShardId) diff --git a/src/main/kotlin/org/opensearch/replication/action/setup/SetupChecksRequest.kt b/src/main/kotlin/org/opensearch/replication/action/setup/SetupChecksRequest.kt index 57b9fffb..44e53460 100644 --- a/src/main/kotlin/org/opensearch/replication/action/setup/SetupChecksRequest.kt +++ b/src/main/kotlin/org/opensearch/replication/action/setup/SetupChecksRequest.kt @@ -14,8 +14,8 @@ package org.opensearch.replication.action.setup import org.opensearch.replication.metadata.store.ReplicationContext import org.opensearch.action.ActionRequestValidationException import org.opensearch.action.support.master.AcknowledgedRequest -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput import org.opensearch.core.xcontent.ToXContent import org.opensearch.core.xcontent.ToXContentObject import org.opensearch.core.xcontent.XContentBuilder diff --git a/src/main/kotlin/org/opensearch/replication/action/setup/TransportSetupChecksAction.kt b/src/main/kotlin/org/opensearch/replication/action/setup/TransportSetupChecksAction.kt index 82315e37..0b1169b6 100644 --- a/src/main/kotlin/org/opensearch/replication/action/setup/TransportSetupChecksAction.kt +++ b/src/main/kotlin/org/opensearch/replication/action/setup/TransportSetupChecksAction.kt @@ -26,7 +26,7 @@ import org.opensearch.cluster.service.ClusterService import org.opensearch.common.inject.Inject import org.opensearch.common.util.concurrent.ThreadContext import org.opensearch.replication.util.stackTraceToString -import org.opensearch.rest.RestStatus +import org.opensearch.core.rest.RestStatus import org.opensearch.tasks.Task import org.opensearch.threadpool.ThreadPool import org.opensearch.transport.ActionNotFoundTransportException diff --git a/src/main/kotlin/org/opensearch/replication/action/setup/ValidatePermissionsRequest.kt b/src/main/kotlin/org/opensearch/replication/action/setup/ValidatePermissionsRequest.kt index 430fe9b8..7808fd85 100644 --- a/src/main/kotlin/org/opensearch/replication/action/setup/ValidatePermissionsRequest.kt +++ b/src/main/kotlin/org/opensearch/replication/action/setup/ValidatePermissionsRequest.kt @@ -15,8 +15,8 @@ import org.opensearch.action.ActionRequestValidationException import org.opensearch.action.IndicesRequest import org.opensearch.action.support.IndicesOptions import org.opensearch.action.support.master.AcknowledgedRequest -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput import org.opensearch.core.xcontent.ToXContent import org.opensearch.core.xcontent.ToXContentObject import org.opensearch.core.xcontent.XContentBuilder diff --git a/src/main/kotlin/org/opensearch/replication/action/stats/AutoFollowStatsAction.kt b/src/main/kotlin/org/opensearch/replication/action/stats/AutoFollowStatsAction.kt index 0878d377..3c7b60fe 100644 --- a/src/main/kotlin/org/opensearch/replication/action/stats/AutoFollowStatsAction.kt +++ b/src/main/kotlin/org/opensearch/replication/action/stats/AutoFollowStatsAction.kt @@ -15,9 +15,9 @@ import org.opensearch.action.ActionType import org.opensearch.action.FailedNodeException import org.opensearch.action.TaskOperationFailure import org.opensearch.action.support.tasks.BaseTasksResponse -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput -import org.opensearch.common.io.stream.Writeable +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput +import org.opensearch.core.common.io.stream.Writeable import org.opensearch.core.xcontent.ToXContent import org.opensearch.core.xcontent.ToXContent.EMPTY_PARAMS import org.opensearch.core.xcontent.ToXContentObject diff --git a/src/main/kotlin/org/opensearch/replication/action/stats/AutoFollowStatsRequest.kt b/src/main/kotlin/org/opensearch/replication/action/stats/AutoFollowStatsRequest.kt index 93102cf5..7bce9a0d 100644 --- a/src/main/kotlin/org/opensearch/replication/action/stats/AutoFollowStatsRequest.kt +++ b/src/main/kotlin/org/opensearch/replication/action/stats/AutoFollowStatsRequest.kt @@ -13,8 +13,8 @@ package org.opensearch.replication.action.stats import org.opensearch.action.ActionRequestValidationException import org.opensearch.action.support.tasks.BaseTasksRequest -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput import org.opensearch.replication.task.autofollow.AutoFollowTask import org.opensearch.tasks.Task import java.io.IOException diff --git a/src/main/kotlin/org/opensearch/replication/action/stats/FollowerNodeStatsResponse.kt b/src/main/kotlin/org/opensearch/replication/action/stats/FollowerNodeStatsResponse.kt index cdff07ab..a20ae942 100644 --- a/src/main/kotlin/org/opensearch/replication/action/stats/FollowerNodeStatsResponse.kt +++ b/src/main/kotlin/org/opensearch/replication/action/stats/FollowerNodeStatsResponse.kt @@ -13,9 +13,9 @@ package org.opensearch.replication.action.stats import org.opensearch.action.support.nodes.BaseNodeResponse import org.opensearch.cluster.node.DiscoveryNode -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput -import org.opensearch.index.shard.ShardId +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput +import org.opensearch.core.index.shard.ShardId import org.opensearch.replication.task.shard.FollowerShardMetric import org.opensearch.replication.task.shard.FollowerShardMetric.FollowerStats import java.io.IOException diff --git a/src/main/kotlin/org/opensearch/replication/action/stats/FollowerStatsAction.kt b/src/main/kotlin/org/opensearch/replication/action/stats/FollowerStatsAction.kt index 8ecafb98..9d07bea1 100644 --- a/src/main/kotlin/org/opensearch/replication/action/stats/FollowerStatsAction.kt +++ b/src/main/kotlin/org/opensearch/replication/action/stats/FollowerStatsAction.kt @@ -12,7 +12,7 @@ package org.opensearch.replication.action.stats import org.opensearch.action.ActionType -import org.opensearch.common.io.stream.Writeable +import org.opensearch.core.common.io.stream.Writeable class FollowerStatsAction : ActionType(NAME, reader) { companion object { diff --git a/src/main/kotlin/org/opensearch/replication/action/stats/FollowerStatsRequest.kt b/src/main/kotlin/org/opensearch/replication/action/stats/FollowerStatsRequest.kt index e79e4d2e..9dfdf8f5 100644 --- a/src/main/kotlin/org/opensearch/replication/action/stats/FollowerStatsRequest.kt +++ b/src/main/kotlin/org/opensearch/replication/action/stats/FollowerStatsRequest.kt @@ -12,8 +12,8 @@ package org.opensearch.replication.action.stats import org.opensearch.action.support.nodes.BaseNodesRequest -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput import java.io.IOException /** diff --git a/src/main/kotlin/org/opensearch/replication/action/stats/FollowerStatsResponse.kt b/src/main/kotlin/org/opensearch/replication/action/stats/FollowerStatsResponse.kt index 53271fe4..9d4ae15c 100644 --- a/src/main/kotlin/org/opensearch/replication/action/stats/FollowerStatsResponse.kt +++ b/src/main/kotlin/org/opensearch/replication/action/stats/FollowerStatsResponse.kt @@ -17,14 +17,14 @@ import org.opensearch.action.FailedNodeException import org.opensearch.action.support.nodes.BaseNodesResponse import org.opensearch.cluster.ClusterName import org.opensearch.common.Strings -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput import org.opensearch.core.xcontent.ToXContent.EMPTY_PARAMS import org.opensearch.core.xcontent.ToXContent.Params import org.opensearch.core.xcontent.ToXContentObject import org.opensearch.core.xcontent.XContentBuilder import org.opensearch.common.xcontent.XContentFactory -import org.opensearch.index.shard.ShardId +import org.opensearch.core.index.shard.ShardId import org.opensearch.replication.metadata.ReplicationOverallState import org.opensearch.replication.metadata.state.REPLICATION_LAST_KNOWN_OVERALL_STATE import org.opensearch.replication.metadata.state.ReplicationStateMetadata diff --git a/src/main/kotlin/org/opensearch/replication/action/stats/LeaderNodeStatsResponse.kt b/src/main/kotlin/org/opensearch/replication/action/stats/LeaderNodeStatsResponse.kt index a326772b..cdc4b4d8 100644 --- a/src/main/kotlin/org/opensearch/replication/action/stats/LeaderNodeStatsResponse.kt +++ b/src/main/kotlin/org/opensearch/replication/action/stats/LeaderNodeStatsResponse.kt @@ -13,9 +13,9 @@ package org.opensearch.replication.action.stats import org.opensearch.action.support.nodes.BaseNodeResponse import org.opensearch.cluster.node.DiscoveryNode -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput -import org.opensearch.index.shard.ShardId +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput +import org.opensearch.core.index.shard.ShardId import org.opensearch.replication.seqno.RemoteShardMetric import org.opensearch.replication.seqno.RemoteShardMetric.RemoteStats import java.io.IOException diff --git a/src/main/kotlin/org/opensearch/replication/action/stats/LeaderStatsAction.kt b/src/main/kotlin/org/opensearch/replication/action/stats/LeaderStatsAction.kt index be7b3cd4..83c100d7 100644 --- a/src/main/kotlin/org/opensearch/replication/action/stats/LeaderStatsAction.kt +++ b/src/main/kotlin/org/opensearch/replication/action/stats/LeaderStatsAction.kt @@ -12,7 +12,7 @@ package org.opensearch.replication.action.stats import org.opensearch.action.ActionType -import org.opensearch.common.io.stream.Writeable +import org.opensearch.core.common.io.stream.Writeable class LeaderStatsAction : ActionType(NAME, reader) { companion object { diff --git a/src/main/kotlin/org/opensearch/replication/action/stats/LeaderStatsRequest.kt b/src/main/kotlin/org/opensearch/replication/action/stats/LeaderStatsRequest.kt index 5523a859..53ed027b 100644 --- a/src/main/kotlin/org/opensearch/replication/action/stats/LeaderStatsRequest.kt +++ b/src/main/kotlin/org/opensearch/replication/action/stats/LeaderStatsRequest.kt @@ -12,8 +12,8 @@ package org.opensearch.replication.action.stats import org.opensearch.action.support.nodes.BaseNodesRequest -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput import java.io.IOException /** diff --git a/src/main/kotlin/org/opensearch/replication/action/stats/LeaderStatsResponse.kt b/src/main/kotlin/org/opensearch/replication/action/stats/LeaderStatsResponse.kt index 47333152..29abc78c 100644 --- a/src/main/kotlin/org/opensearch/replication/action/stats/LeaderStatsResponse.kt +++ b/src/main/kotlin/org/opensearch/replication/action/stats/LeaderStatsResponse.kt @@ -17,8 +17,8 @@ import org.opensearch.action.FailedNodeException import org.opensearch.action.support.nodes.BaseNodesResponse import org.opensearch.cluster.ClusterName import org.opensearch.common.Strings -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput import org.opensearch.core.xcontent.ToXContent.EMPTY_PARAMS import org.opensearch.core.xcontent.ToXContent.Params import org.opensearch.core.xcontent.ToXContentObject diff --git a/src/main/kotlin/org/opensearch/replication/action/stats/NodeStatsRequest.kt b/src/main/kotlin/org/opensearch/replication/action/stats/NodeStatsRequest.kt index 19e180da..6e71b8e6 100644 --- a/src/main/kotlin/org/opensearch/replication/action/stats/NodeStatsRequest.kt +++ b/src/main/kotlin/org/opensearch/replication/action/stats/NodeStatsRequest.kt @@ -12,8 +12,8 @@ package org.opensearch.replication.action.stats import org.opensearch.action.support.nodes.BaseNodeRequest -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput import java.io.IOException class NodeStatsRequest : BaseNodeRequest { diff --git a/src/main/kotlin/org/opensearch/replication/action/stats/TransportFollowerStatsAction.kt b/src/main/kotlin/org/opensearch/replication/action/stats/TransportFollowerStatsAction.kt index e33d9a0c..7a258ef1 100644 --- a/src/main/kotlin/org/opensearch/replication/action/stats/TransportFollowerStatsAction.kt +++ b/src/main/kotlin/org/opensearch/replication/action/stats/TransportFollowerStatsAction.kt @@ -20,7 +20,7 @@ import org.opensearch.action.support.nodes.TransportNodesAction import org.opensearch.client.node.NodeClient import org.opensearch.cluster.service.ClusterService import org.opensearch.common.inject.Inject -import org.opensearch.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamInput import org.opensearch.replication.metadata.state.ReplicationStateMetadata import org.opensearch.replication.seqno.RemoteClusterStats import org.opensearch.replication.task.shard.FollowerClusterStats diff --git a/src/main/kotlin/org/opensearch/replication/action/stats/TransportLeaderStatsAction.kt b/src/main/kotlin/org/opensearch/replication/action/stats/TransportLeaderStatsAction.kt index 7cc9ec11..f6a91f2c 100644 --- a/src/main/kotlin/org/opensearch/replication/action/stats/TransportLeaderStatsAction.kt +++ b/src/main/kotlin/org/opensearch/replication/action/stats/TransportLeaderStatsAction.kt @@ -20,8 +20,8 @@ import org.opensearch.action.support.nodes.TransportNodesAction import org.opensearch.client.node.NodeClient import org.opensearch.cluster.service.ClusterService import org.opensearch.common.inject.Inject -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.index.shard.ShardId +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.index.shard.ShardId import org.opensearch.indices.IndicesService import org.opensearch.replication.seqno.RemoteClusterRetentionLeaseHelper.Companion.RETENTION_LEASE_PREFIX import org.opensearch.replication.seqno.RemoteClusterStats diff --git a/src/main/kotlin/org/opensearch/replication/action/status/ReplicationStatusAction.kt b/src/main/kotlin/org/opensearch/replication/action/status/ReplicationStatusAction.kt index f2416354..eb4de39f 100644 --- a/src/main/kotlin/org/opensearch/replication/action/status/ReplicationStatusAction.kt +++ b/src/main/kotlin/org/opensearch/replication/action/status/ReplicationStatusAction.kt @@ -12,7 +12,7 @@ package org.opensearch.replication.action.status import org.opensearch.action.ActionType -import org.opensearch.common.io.stream.Writeable +import org.opensearch.core.common.io.stream.Writeable class ReplicationStatusAction : ActionType(NAME, reader) { companion object { diff --git a/src/main/kotlin/org/opensearch/replication/action/status/ReplicationStatusResponse.kt b/src/main/kotlin/org/opensearch/replication/action/status/ReplicationStatusResponse.kt index a4832381..dc982cee 100644 --- a/src/main/kotlin/org/opensearch/replication/action/status/ReplicationStatusResponse.kt +++ b/src/main/kotlin/org/opensearch/replication/action/status/ReplicationStatusResponse.kt @@ -12,10 +12,10 @@ package org.opensearch.replication.action.status -import org.opensearch.action.support.DefaultShardOperationFailedException +import org.opensearch.core.action.support.DefaultShardOperationFailedException import org.opensearch.action.support.broadcast.BroadcastResponse -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput import org.opensearch.core.xcontent.ToXContent.Params import org.opensearch.core.xcontent.ToXContentObject import org.opensearch.core.xcontent.XContentBuilder diff --git a/src/main/kotlin/org/opensearch/replication/action/status/ShardInfoRequest.kt b/src/main/kotlin/org/opensearch/replication/action/status/ShardInfoRequest.kt index 024dd976..67e09d6c 100644 --- a/src/main/kotlin/org/opensearch/replication/action/status/ShardInfoRequest.kt +++ b/src/main/kotlin/org/opensearch/replication/action/status/ShardInfoRequest.kt @@ -14,8 +14,8 @@ package org.opensearch.replication.action.status import org.opensearch.action.ActionRequestValidationException import org.opensearch.action.support.IndicesOptions import org.opensearch.action.support.broadcast.BroadcastRequest -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput import org.opensearch.core.xcontent.ToXContent import org.opensearch.core.xcontent.ToXContentObject import org.opensearch.core.xcontent.XContentBuilder diff --git a/src/main/kotlin/org/opensearch/replication/action/status/ShardInfoResponse.kt b/src/main/kotlin/org/opensearch/replication/action/status/ShardInfoResponse.kt index af111889..528e86df 100644 --- a/src/main/kotlin/org/opensearch/replication/action/status/ShardInfoResponse.kt +++ b/src/main/kotlin/org/opensearch/replication/action/status/ShardInfoResponse.kt @@ -14,12 +14,12 @@ package org.opensearch.replication.action.status import org.opensearch.action.support.broadcast.BroadcastResponse import org.opensearch.action.support.broadcast.BroadcastShardResponse import org.opensearch.core.ParseField -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput import org.opensearch.core.xcontent.ToXContent import org.opensearch.core.xcontent.ToXContentObject import org.opensearch.core.xcontent.XContentBuilder -import org.opensearch.index.shard.ShardId +import org.opensearch.core.index.shard.ShardId import java.io.IOException class ShardInfoResponse : BroadcastShardResponse, ToXContentObject { diff --git a/src/main/kotlin/org/opensearch/replication/action/status/ShardsInfoAction.kt b/src/main/kotlin/org/opensearch/replication/action/status/ShardsInfoAction.kt index 81f6ba4e..d77cd714 100644 --- a/src/main/kotlin/org/opensearch/replication/action/status/ShardsInfoAction.kt +++ b/src/main/kotlin/org/opensearch/replication/action/status/ShardsInfoAction.kt @@ -12,7 +12,7 @@ package org.opensearch.replication.action.status import org.opensearch.action.ActionType -import org.opensearch.common.io.stream.Writeable +import org.opensearch.core.common.io.stream.Writeable class ShardsInfoAction : ActionType(NAME, reader) { diff --git a/src/main/kotlin/org/opensearch/replication/action/status/TranportShardsInfoAction.kt b/src/main/kotlin/org/opensearch/replication/action/status/TranportShardsInfoAction.kt index d3f542d8..4fd73b2f 100644 --- a/src/main/kotlin/org/opensearch/replication/action/status/TranportShardsInfoAction.kt +++ b/src/main/kotlin/org/opensearch/replication/action/status/TranportShardsInfoAction.kt @@ -13,7 +13,7 @@ package org.opensearch.replication.action.status import org.apache.logging.log4j.LogManager import org.opensearch.action.support.ActionFilters -import org.opensearch.action.support.DefaultShardOperationFailedException +import org.opensearch.core.action.support.DefaultShardOperationFailedException import org.opensearch.action.support.broadcast.node.TransportBroadcastByNodeAction import org.opensearch.cluster.ClusterState import org.opensearch.cluster.block.ClusterBlockException @@ -21,8 +21,8 @@ import org.opensearch.cluster.metadata.IndexNameExpressionResolver import org.opensearch.cluster.routing.* import org.opensearch.cluster.service.ClusterService import org.opensearch.common.inject.Inject -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.Writeable +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.Writeable import org.opensearch.index.IndexService import org.opensearch.indices.IndicesService import org.opensearch.threadpool.ThreadPool diff --git a/src/main/kotlin/org/opensearch/replication/action/stop/StopIndexReplicationRequest.kt b/src/main/kotlin/org/opensearch/replication/action/stop/StopIndexReplicationRequest.kt index 2f447eb8..3ae5eff9 100644 --- a/src/main/kotlin/org/opensearch/replication/action/stop/StopIndexReplicationRequest.kt +++ b/src/main/kotlin/org/opensearch/replication/action/stop/StopIndexReplicationRequest.kt @@ -16,8 +16,8 @@ import org.opensearch.action.IndicesRequest import org.opensearch.action.support.IndicesOptions import org.opensearch.action.support.master.AcknowledgedRequest import org.opensearch.core.ParseField -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput import org.opensearch.core.xcontent.* class StopIndexReplicationRequest : AcknowledgedRequest, IndicesRequest.Replaceable, ToXContentObject { diff --git a/src/main/kotlin/org/opensearch/replication/action/stop/TransportStopIndexReplicationAction.kt b/src/main/kotlin/org/opensearch/replication/action/stop/TransportStopIndexReplicationAction.kt index 8a6fdf71..f18cbb30 100644 --- a/src/main/kotlin/org/opensearch/replication/action/stop/TransportStopIndexReplicationAction.kt +++ b/src/main/kotlin/org/opensearch/replication/action/stop/TransportStopIndexReplicationAction.kt @@ -51,7 +51,7 @@ import org.opensearch.cluster.metadata.IndexNameExpressionResolver import org.opensearch.cluster.metadata.Metadata import org.opensearch.cluster.service.ClusterService import org.opensearch.common.inject.Inject -import org.opensearch.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamInput import org.opensearch.common.settings.Settings import org.opensearch.replication.util.stackTraceToString import org.opensearch.persistent.PersistentTasksCustomMetadata diff --git a/src/main/kotlin/org/opensearch/replication/action/update/TransportUpdateIndexReplicationAction.kt b/src/main/kotlin/org/opensearch/replication/action/update/TransportUpdateIndexReplicationAction.kt index 7ca866d8..333ed9f7 100644 --- a/src/main/kotlin/org/opensearch/replication/action/update/TransportUpdateIndexReplicationAction.kt +++ b/src/main/kotlin/org/opensearch/replication/action/update/TransportUpdateIndexReplicationAction.kt @@ -33,7 +33,7 @@ import org.opensearch.cluster.block.ClusterBlockLevel import org.opensearch.cluster.metadata.IndexNameExpressionResolver import org.opensearch.cluster.service.ClusterService import org.opensearch.common.inject.Inject -import org.opensearch.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamInput import org.opensearch.common.settings.IndexScopedSettings import org.opensearch.threadpool.ThreadPool import org.opensearch.transport.TransportService diff --git a/src/main/kotlin/org/opensearch/replication/action/update/UpdateIndexReplicationRequest.kt b/src/main/kotlin/org/opensearch/replication/action/update/UpdateIndexReplicationRequest.kt index 753e2f62..b498ed92 100644 --- a/src/main/kotlin/org/opensearch/replication/action/update/UpdateIndexReplicationRequest.kt +++ b/src/main/kotlin/org/opensearch/replication/action/update/UpdateIndexReplicationRequest.kt @@ -16,8 +16,8 @@ import org.opensearch.action.IndicesRequest import org.opensearch.action.support.IndicesOptions import org.opensearch.action.support.master.AcknowledgedRequest import org.opensearch.core.ParseField -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput import org.opensearch.common.settings.Settings import org.opensearch.common.settings.Settings.readSettingsFromStream import org.opensearch.core.xcontent.* diff --git a/src/main/kotlin/org/opensearch/replication/metadata/TransportUpdateMetadataAction.kt b/src/main/kotlin/org/opensearch/replication/metadata/TransportUpdateMetadataAction.kt index 2fab74ab..21b1643f 100644 --- a/src/main/kotlin/org/opensearch/replication/metadata/TransportUpdateMetadataAction.kt +++ b/src/main/kotlin/org/opensearch/replication/metadata/TransportUpdateMetadataAction.kt @@ -40,8 +40,8 @@ import org.opensearch.cluster.metadata.* import org.opensearch.cluster.metadata.AliasAction.RemoveIndex import org.opensearch.cluster.service.ClusterService import org.opensearch.common.inject.Inject -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.index.Index +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.index.Index import org.opensearch.index.IndexNotFoundException import org.opensearch.replication.util.stackTraceToString import org.opensearch.rest.action.admin.indices.AliasesNotFoundException @@ -274,8 +274,8 @@ class TransportUpdateMetadataAction @Inject constructor( val indexAsArray = arrayOf(concreteIndex) val aliasMetadata = metadata.findAliases(action, indexAsArray) val finalAliases: MutableList = ArrayList() - for (curAliases in aliasMetadata.values()) { - for (aliasMeta in curAliases.value) { + for (curAliases in aliasMetadata.values) { + for (aliasMeta in curAliases) { finalAliases.add(aliasMeta.alias()) } } diff --git a/src/main/kotlin/org/opensearch/replication/metadata/UpdateIndexBlockTask.kt b/src/main/kotlin/org/opensearch/replication/metadata/UpdateIndexBlockTask.kt index 8d9d385f..e67293b6 100644 --- a/src/main/kotlin/org/opensearch/replication/metadata/UpdateIndexBlockTask.kt +++ b/src/main/kotlin/org/opensearch/replication/metadata/UpdateIndexBlockTask.kt @@ -22,10 +22,10 @@ import org.opensearch.cluster.block.ClusterBlockException import org.opensearch.cluster.block.ClusterBlockLevel import org.opensearch.cluster.block.ClusterBlocks import org.opensearch.cluster.service.ClusterService -import org.opensearch.common.collect.ImmutableOpenMap import org.opensearch.index.IndexNotFoundException -import org.opensearch.rest.RestStatus -import java.util.* +import org.opensearch.core.rest.RestStatus +import java.util.Collections +import java.util.EnumSet /* This is our custom index block to prevent changes to follower @@ -49,11 +49,11 @@ fun checkIfIndexBlockedWithLevel(clusterService: ClusterService, clusterBlockLevel: ClusterBlockLevel) { clusterService.state().routingTable.index(indexName) ?: throw IndexNotFoundException("Index with name:$indexName doesn't exist") - val writeIndexBlockMap : ImmutableOpenMap> = clusterService.state().blocks() + val writeIndexBlockMap : Map> = clusterService.state().blocks() .indices(clusterBlockLevel) if (!writeIndexBlockMap.containsKey(indexName)) return - val clusterBlocksSet : Set = writeIndexBlockMap.get(indexName) + val clusterBlocksSet : Set = writeIndexBlockMap.getOrDefault(indexName, Collections.emptySet()) if (clusterBlocksSet.contains(INDEX_REPLICATION_BLOCK) && clusterBlocksSet.size > 1) throw ClusterBlockException(clusterBlocksSet) diff --git a/src/main/kotlin/org/opensearch/replication/metadata/UpdateMetadataRequest.kt b/src/main/kotlin/org/opensearch/replication/metadata/UpdateMetadataRequest.kt index 25abe070..5f2e4488 100644 --- a/src/main/kotlin/org/opensearch/replication/metadata/UpdateMetadataRequest.kt +++ b/src/main/kotlin/org/opensearch/replication/metadata/UpdateMetadataRequest.kt @@ -18,8 +18,8 @@ import org.opensearch.action.admin.indices.mapping.put.PutMappingRequest import org.opensearch.action.admin.indices.open.OpenIndexRequest import org.opensearch.action.admin.indices.settings.put.UpdateSettingsRequest import org.opensearch.action.support.master.AcknowledgedRequest -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput class UpdateMetadataRequest : AcknowledgedRequest { var indexName: String diff --git a/src/main/kotlin/org/opensearch/replication/metadata/state/ReplicationStateMetadata.kt b/src/main/kotlin/org/opensearch/replication/metadata/state/ReplicationStateMetadata.kt index d6d5c6d9..b87ed1da 100644 --- a/src/main/kotlin/org/opensearch/replication/metadata/state/ReplicationStateMetadata.kt +++ b/src/main/kotlin/org/opensearch/replication/metadata/state/ReplicationStateMetadata.kt @@ -18,8 +18,8 @@ import org.opensearch.cluster.DiffableUtils.getStringKeySerializer import org.opensearch.cluster.NamedDiff import org.opensearch.cluster.metadata.Metadata import org.opensearch.cluster.service.ClusterService -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput import org.opensearch.core.xcontent.ToXContent import org.opensearch.core.xcontent.XContentBuilder import org.opensearch.core.xcontent.XContentParser diff --git a/src/main/kotlin/org/opensearch/replication/metadata/store/ReplicationMetadata.kt b/src/main/kotlin/org/opensearch/replication/metadata/store/ReplicationMetadata.kt index 86cd80c2..891d9970 100644 --- a/src/main/kotlin/org/opensearch/replication/metadata/store/ReplicationMetadata.kt +++ b/src/main/kotlin/org/opensearch/replication/metadata/store/ReplicationMetadata.kt @@ -13,9 +13,9 @@ package org.opensearch.replication.metadata.store import org.opensearch.commons.authuser.User import org.opensearch.core.ParseField -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput -import org.opensearch.common.io.stream.Writeable +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput +import org.opensearch.core.common.io.stream.Writeable import org.opensearch.common.settings.Settings import org.opensearch.core.xcontent.ObjectParser import org.opensearch.core.xcontent.ToXContent diff --git a/src/main/kotlin/org/opensearch/replication/repository/RemoteClusterMultiChunkTransfer.kt b/src/main/kotlin/org/opensearch/replication/repository/RemoteClusterMultiChunkTransfer.kt index f0995346..b3097f13 100644 --- a/src/main/kotlin/org/opensearch/replication/repository/RemoteClusterMultiChunkTransfer.kt +++ b/src/main/kotlin/org/opensearch/replication/repository/RemoteClusterMultiChunkTransfer.kt @@ -28,7 +28,7 @@ import org.opensearch.client.Client import org.opensearch.cluster.node.DiscoveryNode import org.opensearch.common.unit.ByteSizeValue import org.opensearch.common.util.concurrent.ThreadContext -import org.opensearch.index.shard.ShardId +import org.opensearch.core.index.shard.ShardId import org.opensearch.index.store.Store import org.opensearch.index.store.StoreFileMetadata import org.opensearch.indices.recovery.MultiChunkTransfer diff --git a/src/main/kotlin/org/opensearch/replication/repository/RemoteClusterRepository.kt b/src/main/kotlin/org/opensearch/replication/repository/RemoteClusterRepository.kt index 1e5fac38..f5e7a149 100644 --- a/src/main/kotlin/org/opensearch/replication/repository/RemoteClusterRepository.kt +++ b/src/main/kotlin/org/opensearch/replication/repository/RemoteClusterRepository.kt @@ -51,7 +51,7 @@ import org.opensearch.common.component.AbstractLifecycleComponent import org.opensearch.common.metrics.CounterMetric import org.opensearch.common.settings.Settings import org.opensearch.index.mapper.MapperService -import org.opensearch.index.shard.ShardId +import org.opensearch.core.index.shard.ShardId import org.opensearch.index.snapshots.IndexShardSnapshotStatus import org.opensearch.index.store.Store import org.opensearch.indices.recovery.RecoverySettings @@ -193,8 +193,8 @@ class RemoteClusterRepository(private val repositoryMetadata: RepositoryMetadata override fun getRepositoryData(listener: ActionListener) { val clusterState = getLeaderClusterState(false, false) val shardGenerations = ShardGenerations.builder() - clusterState.metadata.indices.values() - .map { it.value } + clusterState.metadata.indices.values + .map { it } .forEach { indexMetadata -> val indexId = IndexId(indexMetadata.index.name, indexMetadata.indexUUID) for (i in 0 until indexMetadata.numberOfShards) { @@ -215,7 +215,7 @@ class RemoteClusterRepository(private val repositoryMetadata: RepositoryMetadata override fun getSnapshotInfo(snapshotId: SnapshotId): SnapshotInfo { val leaderClusterState = getLeaderClusterState(false, false) assert(REMOTE_SNAPSHOT_NAME.equals(snapshotId.name), { "SnapshotName differs" }) - val indices = leaderClusterState.metadata().indices().keys().map { x -> x.value } + val indices = leaderClusterState.metadata().indices().keys.toList() return SnapshotInfo(snapshotId, indices, emptyList(), SnapshotState.SUCCESS, Version.CURRENT) } @@ -244,7 +244,7 @@ class RemoteClusterRepository(private val repositoryMetadata: RepositoryMetadata builder.remove(REPLICATION_INDEX_TRANSLOG_PRUNING_ENABLED_SETTING.key) val indexMdBuilder = IndexMetadata.builder(indexMetadata).settings(builder) - indexMetadata.aliases.valuesIt().forEach { + indexMetadata.aliases.values.forEach { indexMdBuilder.putAlias(it) } return indexMdBuilder.build() diff --git a/src/main/kotlin/org/opensearch/replication/rest/AutoFollowStatsHandler.kt b/src/main/kotlin/org/opensearch/replication/rest/AutoFollowStatsHandler.kt index 42421eb4..f9055c70 100644 --- a/src/main/kotlin/org/opensearch/replication/rest/AutoFollowStatsHandler.kt +++ b/src/main/kotlin/org/opensearch/replication/rest/AutoFollowStatsHandler.kt @@ -15,7 +15,7 @@ import org.opensearch.rest.RestChannel import org.opensearch.rest.RestHandler import org.opensearch.rest.RestRequest import org.opensearch.rest.RestResponse -import org.opensearch.rest.RestStatus +import org.opensearch.core.rest.RestStatus import org.opensearch.rest.action.RestResponseListener import java.io.IOException diff --git a/src/main/kotlin/org/opensearch/replication/rest/FollowerStatsHandler.kt b/src/main/kotlin/org/opensearch/replication/rest/FollowerStatsHandler.kt index ce5013f6..3dc0e4dc 100644 --- a/src/main/kotlin/org/opensearch/replication/rest/FollowerStatsHandler.kt +++ b/src/main/kotlin/org/opensearch/replication/rest/FollowerStatsHandler.kt @@ -15,7 +15,7 @@ import org.opensearch.rest.RestChannel import org.opensearch.rest.RestHandler import org.opensearch.rest.RestRequest import org.opensearch.rest.RestResponse -import org.opensearch.rest.RestStatus +import org.opensearch.core.rest.RestStatus import org.opensearch.rest.action.RestResponseListener import java.io.IOException diff --git a/src/main/kotlin/org/opensearch/replication/rest/LeaderStatsHandler.kt b/src/main/kotlin/org/opensearch/replication/rest/LeaderStatsHandler.kt index d71379bf..17acc842 100644 --- a/src/main/kotlin/org/opensearch/replication/rest/LeaderStatsHandler.kt +++ b/src/main/kotlin/org/opensearch/replication/rest/LeaderStatsHandler.kt @@ -15,7 +15,7 @@ import org.opensearch.rest.RestChannel import org.opensearch.rest.RestHandler import org.opensearch.rest.RestRequest import org.opensearch.rest.RestResponse -import org.opensearch.rest.RestStatus +import org.opensearch.core.rest.RestStatus import org.opensearch.rest.action.RestResponseListener import java.io.IOException diff --git a/src/main/kotlin/org/opensearch/replication/rest/UpdateAutoFollowPatternsHandler.kt b/src/main/kotlin/org/opensearch/replication/rest/UpdateAutoFollowPatternsHandler.kt index ccb9463a..a8a738e2 100644 --- a/src/main/kotlin/org/opensearch/replication/rest/UpdateAutoFollowPatternsHandler.kt +++ b/src/main/kotlin/org/opensearch/replication/rest/UpdateAutoFollowPatternsHandler.kt @@ -19,7 +19,7 @@ import org.opensearch.rest.BaseRestHandler import org.opensearch.rest.BaseRestHandler.RestChannelConsumer import org.opensearch.rest.RestHandler import org.opensearch.rest.RestRequest -import org.opensearch.rest.RestStatus +import org.opensearch.core.rest.RestStatus import org.opensearch.rest.action.RestToXContentListener class UpdateAutoFollowPatternsHandler : BaseRestHandler() { diff --git a/src/main/kotlin/org/opensearch/replication/rest/UpdateIndexHandler.kt b/src/main/kotlin/org/opensearch/replication/rest/UpdateIndexHandler.kt index 22e4a6e1..9a5eb086 100644 --- a/src/main/kotlin/org/opensearch/replication/rest/UpdateIndexHandler.kt +++ b/src/main/kotlin/org/opensearch/replication/rest/UpdateIndexHandler.kt @@ -17,7 +17,7 @@ import org.opensearch.replication.task.index.IndexReplicationExecutor.Companion. import org.opensearch.action.support.IndicesOptions import org.opensearch.client.Requests import org.opensearch.client.node.NodeClient -import org.opensearch.common.Strings +import org.opensearch.core.common.Strings import org.opensearch.rest.BaseRestHandler import org.opensearch.rest.BaseRestHandler.RestChannelConsumer import org.opensearch.rest.RestChannel diff --git a/src/main/kotlin/org/opensearch/replication/seqno/RemoteClusterRetentionLeaseHelper.kt b/src/main/kotlin/org/opensearch/replication/seqno/RemoteClusterRetentionLeaseHelper.kt index a1c1ee2f..823f61c0 100644 --- a/src/main/kotlin/org/opensearch/replication/seqno/RemoteClusterRetentionLeaseHelper.kt +++ b/src/main/kotlin/org/opensearch/replication/seqno/RemoteClusterRetentionLeaseHelper.kt @@ -23,7 +23,7 @@ import org.opensearch.index.seqno.RetentionLeaseAlreadyExistsException import org.opensearch.index.seqno.RetentionLeaseInvalidRetainingSeqNoException import org.opensearch.index.seqno.RetentionLeaseNotFoundException import org.opensearch.index.shard.IndexShard -import org.opensearch.index.shard.ShardId +import org.opensearch.core.index.shard.ShardId import org.opensearch.replication.metadata.store.ReplicationMetadata import org.opensearch.replication.repository.RemoteClusterRepository import org.opensearch.replication.task.index.IndexReplicationParams @@ -136,9 +136,9 @@ class RemoteClusterRetentionLeaseHelper constructor(var followerClusterNameWithU val remoteMetadata = getLeaderIndexMetadata(replMetadata.connectionName, replMetadata.leaderContext.resource) val params = IndexReplicationParams(replMetadata.connectionName, remoteMetadata.index, followerIndexName) val remoteClient = client.getRemoteClusterClient(params.leaderAlias) - val shards = clusterService.state().routingTable.indicesRouting().get(params.followerIndexName).shards() + val shards = clusterService.state().routingTable.indicesRouting().get(params.followerIndexName)?.shards() val retentionLeaseHelper = RemoteClusterRetentionLeaseHelper(clusterService.clusterName.value(), followerClusterUUID, remoteClient) - shards.forEach { + shards?.forEach { val followerShardId = it.value.shardId log.debug("Removing lease for $followerShardId.id ") retentionLeaseHelper.attemptRetentionLeaseRemoval(ShardId(params.leaderIndex, followerShardId.id), followerShardId) diff --git a/src/main/kotlin/org/opensearch/replication/seqno/RemoteClusterStats.kt b/src/main/kotlin/org/opensearch/replication/seqno/RemoteClusterStats.kt index cc565d03..80a9d6d2 100644 --- a/src/main/kotlin/org/opensearch/replication/seqno/RemoteClusterStats.kt +++ b/src/main/kotlin/org/opensearch/replication/seqno/RemoteClusterStats.kt @@ -13,13 +13,13 @@ package org.opensearch.replication.seqno import org.opensearch.common.component.AbstractLifecycleComponent import org.opensearch.common.inject.Singleton -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput import org.opensearch.core.xcontent.ToXContent import org.opensearch.core.xcontent.ToXContentFragment import org.opensearch.core.xcontent.ToXContentObject import org.opensearch.core.xcontent.XContentBuilder -import org.opensearch.index.shard.ShardId +import org.opensearch.core.index.shard.ShardId import java.util.concurrent.atomic.AtomicLong class RemoteShardMetric { diff --git a/src/main/kotlin/org/opensearch/replication/task/CrossClusterReplicationTask.kt b/src/main/kotlin/org/opensearch/replication/task/CrossClusterReplicationTask.kt index 75477625..7f03dd79 100644 --- a/src/main/kotlin/org/opensearch/replication/task/CrossClusterReplicationTask.kt +++ b/src/main/kotlin/org/opensearch/replication/task/CrossClusterReplicationTask.kt @@ -34,20 +34,20 @@ import org.opensearch.action.ActionListener import org.opensearch.action.ActionResponse import org.opensearch.client.Client import org.opensearch.cluster.service.ClusterService -import org.opensearch.common.io.stream.StreamOutput +import org.opensearch.core.common.io.stream.StreamOutput import org.opensearch.common.settings.Settings import org.opensearch.core.xcontent.ToXContent import org.opensearch.core.xcontent.ToXContentObject import org.opensearch.core.xcontent.XContentBuilder import org.opensearch.index.IndexService import org.opensearch.index.shard.IndexShard -import org.opensearch.index.shard.ShardId +import org.opensearch.core.index.shard.ShardId import org.opensearch.indices.cluster.IndicesClusterStateService import org.opensearch.persistent.AllocatedPersistentTask import org.opensearch.persistent.PersistentTaskState import org.opensearch.persistent.PersistentTasksService import org.opensearch.replication.util.stackTraceToString -import org.opensearch.rest.RestStatus +import org.opensearch.core.rest.RestStatus import org.opensearch.tasks.TaskId import org.opensearch.tasks.TaskManager import org.opensearch.threadpool.ThreadPool diff --git a/src/main/kotlin/org/opensearch/replication/task/IndexCloseListener.kt b/src/main/kotlin/org/opensearch/replication/task/IndexCloseListener.kt index ecd87698..cbc4f676 100644 --- a/src/main/kotlin/org/opensearch/replication/task/IndexCloseListener.kt +++ b/src/main/kotlin/org/opensearch/replication/task/IndexCloseListener.kt @@ -15,7 +15,7 @@ import org.opensearch.common.settings.Settings import org.opensearch.index.IndexService import org.opensearch.index.shard.IndexEventListener import org.opensearch.index.shard.IndexShard -import org.opensearch.index.shard.ShardId +import org.opensearch.core.index.shard.ShardId import org.opensearch.indices.cluster.IndicesClusterStateService import java.util.Collections import java.util.concurrent.ConcurrentHashMap diff --git a/src/main/kotlin/org/opensearch/replication/task/ReplicationState.kt b/src/main/kotlin/org/opensearch/replication/task/ReplicationState.kt index 3a81f74e..1f0763b8 100644 --- a/src/main/kotlin/org/opensearch/replication/task/ReplicationState.kt +++ b/src/main/kotlin/org/opensearch/replication/task/ReplicationState.kt @@ -11,9 +11,9 @@ package org.opensearch.replication.task -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput -import org.opensearch.common.io.stream.Writeable +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput +import org.opensearch.core.common.io.stream.Writeable import org.opensearch.core.xcontent.ToXContent import org.opensearch.core.xcontent.ToXContentFragment import org.opensearch.core.xcontent.XContentBuilder diff --git a/src/main/kotlin/org/opensearch/replication/task/autofollow/AutoFollowParams.kt b/src/main/kotlin/org/opensearch/replication/task/autofollow/AutoFollowParams.kt index 9bcecf64..2de3ad8e 100644 --- a/src/main/kotlin/org/opensearch/replication/task/autofollow/AutoFollowParams.kt +++ b/src/main/kotlin/org/opensearch/replication/task/autofollow/AutoFollowParams.kt @@ -13,8 +13,8 @@ package org.opensearch.replication.task.autofollow import org.opensearch.Version import org.opensearch.core.ParseField -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput import org.opensearch.core.xcontent.ObjectParser import org.opensearch.core.xcontent.ToXContent import org.opensearch.core.xcontent.XContentBuilder diff --git a/src/main/kotlin/org/opensearch/replication/task/autofollow/AutoFollowTask.kt b/src/main/kotlin/org/opensearch/replication/task/autofollow/AutoFollowTask.kt index 15c22922..d47843d6 100644 --- a/src/main/kotlin/org/opensearch/replication/task/autofollow/AutoFollowTask.kt +++ b/src/main/kotlin/org/opensearch/replication/task/autofollow/AutoFollowTask.kt @@ -29,8 +29,8 @@ import org.opensearch.action.admin.indices.get.GetIndexRequest import org.opensearch.action.support.IndicesOptions import org.opensearch.client.Client import org.opensearch.cluster.service.ClusterService -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput import org.opensearch.common.logging.Loggers import org.opensearch.core.xcontent.ToXContent import org.opensearch.core.xcontent.XContentBuilder @@ -39,7 +39,7 @@ import org.opensearch.replication.ReplicationException import org.opensearch.replication.action.status.ReplicationStatusAction import org.opensearch.replication.action.status.ShardInfoRequest import org.opensearch.replication.action.status.ShardInfoResponse -import org.opensearch.rest.RestStatus +import org.opensearch.core.rest.RestStatus import org.opensearch.tasks.Task import org.opensearch.tasks.TaskId import org.opensearch.threadpool.Scheduler diff --git a/src/main/kotlin/org/opensearch/replication/task/index/IndexReplicationParams.kt b/src/main/kotlin/org/opensearch/replication/task/index/IndexReplicationParams.kt index efaf2af6..6a758b8f 100644 --- a/src/main/kotlin/org/opensearch/replication/task/index/IndexReplicationParams.kt +++ b/src/main/kotlin/org/opensearch/replication/task/index/IndexReplicationParams.kt @@ -14,14 +14,14 @@ package org.opensearch.replication.task.index import org.opensearch.Version import org.opensearch.core.ParseField import org.opensearch.common.Strings -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput import org.opensearch.core.xcontent.ObjectParser import org.opensearch.core.xcontent.ToXContent import org.opensearch.core.xcontent.XContentBuilder import org.opensearch.core.xcontent.XContentParser import org.opensearch.common.xcontent.XContentType -import org.opensearch.index.Index +import org.opensearch.core.index.Index import org.opensearch.persistent.PersistentTaskParams import java.io.IOException diff --git a/src/main/kotlin/org/opensearch/replication/task/index/IndexReplicationState.kt b/src/main/kotlin/org/opensearch/replication/task/index/IndexReplicationState.kt index 010d1447..d55accd7 100644 --- a/src/main/kotlin/org/opensearch/replication/task/index/IndexReplicationState.kt +++ b/src/main/kotlin/org/opensearch/replication/task/index/IndexReplicationState.kt @@ -14,13 +14,13 @@ package org.opensearch.replication.task.index import org.opensearch.replication.task.ReplicationState import org.opensearch.replication.task.shard.ShardReplicationParams import org.opensearch.core.ParseField -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput import org.opensearch.core.xcontent.ObjectParser import org.opensearch.core.xcontent.ToXContent import org.opensearch.core.xcontent.XContentBuilder import org.opensearch.core.xcontent.XContentParser -import org.opensearch.index.shard.ShardId +import org.opensearch.core.index.shard.ShardId import org.opensearch.persistent.PersistentTaskState import org.opensearch.persistent.PersistentTasksCustomMetadata.PersistentTask import java.io.IOException diff --git a/src/main/kotlin/org/opensearch/replication/task/index/IndexReplicationTask.kt b/src/main/kotlin/org/opensearch/replication/task/index/IndexReplicationTask.kt index 4d2537ad..f520926b 100644 --- a/src/main/kotlin/org/opensearch/replication/task/index/IndexReplicationTask.kt +++ b/src/main/kotlin/org/opensearch/replication/task/index/IndexReplicationTask.kt @@ -68,7 +68,7 @@ import org.opensearch.cluster.RestoreInProgress import org.opensearch.cluster.metadata.IndexMetadata import org.opensearch.cluster.routing.allocation.decider.EnableAllocationDecider import org.opensearch.cluster.service.ClusterService -import org.opensearch.common.io.stream.StreamOutput +import org.opensearch.core.common.io.stream.StreamOutput import org.opensearch.common.logging.Loggers import org.opensearch.common.settings.Setting import org.opensearch.common.settings.Settings @@ -79,11 +79,11 @@ import org.opensearch.core.xcontent.ToXContent import org.opensearch.core.xcontent.ToXContentObject import org.opensearch.core.xcontent.XContentBuilder import org.opensearch.common.xcontent.XContentType -import org.opensearch.index.Index +import org.opensearch.core.index.Index import org.opensearch.index.IndexService import org.opensearch.index.IndexSettings import org.opensearch.index.shard.IndexShard -import org.opensearch.index.shard.ShardId +import org.opensearch.core.index.shard.ShardId import org.opensearch.indices.cluster.IndicesClusterStateService import org.opensearch.indices.recovery.RecoveryState import org.opensearch.persistent.PersistentTaskState @@ -94,7 +94,7 @@ import org.opensearch.persistent.PersistentTasksService import org.opensearch.replication.ReplicationException import org.opensearch.replication.MappingNotAvailableException import org.opensearch.replication.ReplicationPlugin.Companion.REPLICATION_INDEX_TRANSLOG_PRUNING_ENABLED_SETTING -import org.opensearch.rest.RestStatus +import org.opensearch.core.rest.RestStatus import org.opensearch.tasks.TaskId import org.opensearch.tasks.TaskManager import org.opensearch.threadpool.ThreadPool @@ -346,9 +346,9 @@ open class IndexReplicationTask(id: Long, type: String, action: String, descript val clusterState = clusterService.state() val persistentTasks = clusterState.metadata.custom(PersistentTasksCustomMetadata.TYPE) - val followerShardIds = clusterService.state().routingTable.indicesRouting().get(followerIndexName).shards() - .map { shard -> shard.value.shardId } - .stream().collect(Collectors.toSet()) + val followerShardIds = clusterService.state().routingTable.indicesRouting().get(followerIndexName)?.shards() + ?.map { shard -> shard.value.shardId } + ?.stream()?.collect(Collectors.toSet()).orEmpty() val runningShardTasksForIndex = persistentTasks.findTasks(ShardReplicationExecutor.TASK_NAME, Predicate { true }).stream() .map { task -> task.params as ShardReplicationParams } .filter {taskParam -> followerShardIds.contains(taskParam.followerShardId) } @@ -434,16 +434,16 @@ open class IndexReplicationTask(id: Long, type: String, action: String, descript // If we we want to retrieve just the version of settings and alias versions, there are two options // 1. Include this in GetChanges and communicate it to IndexTask via Metadata // 2. Add another API to retrieve version of settings & aliases. Persist current version in Metadata - var leaderSettings = settingsResponse.indexToSettings.get(this.leaderIndex.name) - leaderSettings = leaderSettings.filter { k: String? -> + var leaderSettings = settingsResponse.indexToSettings.getOrDefault(this.leaderIndex.name, Settings.EMPTY) + leaderSettings = leaderSettings.filter { k: String -> !blockListedSettings.contains(k) } gsr = GetSettingsRequest().includeDefaults(false).indices(this.followerIndexName) settingsResponse = client.suspending(client.admin().indices()::getSettings, injectSecurityContext = true)(gsr) - var followerSettings = settingsResponse.indexToSettings.get(this.followerIndexName) + var followerSettings = settingsResponse.indexToSettings.getOrDefault(this.followerIndexName, Settings.EMPTY) - followerSettings = followerSettings.filter { k: String? -> + followerSettings = followerSettings.filter { k: String -> k != REPLICATED_INDEX_SETTING.key } @@ -516,11 +516,11 @@ open class IndexReplicationTask(id: Long, type: String, action: String, descript //Alias var getAliasesRequest = GetAliasesRequest().indices(this.leaderIndex.name) var getAliasesRes = remoteClient.suspending(remoteClient.admin().indices()::getAliases, injectSecurityContext = true)(getAliasesRequest) - var leaderAliases = getAliasesRes.aliases.get(this.leaderIndex.name) + var leaderAliases = getAliasesRes.aliases.getOrDefault(this.leaderIndex.name, Collections.emptyList()) getAliasesRequest = GetAliasesRequest().indices(followerIndexName) getAliasesRes = client.suspending(client.admin().indices()::getAliases, injectSecurityContext = true)(getAliasesRequest) - var followerAliases = getAliasesRes.aliases.get(followerIndexName) + var followerAliases = getAliasesRes.aliases.getOrDefault(followerIndexName, Collections.emptyList()) var request :IndicesAliasesRequest? @@ -606,8 +606,8 @@ open class IndexReplicationTask(id: Long, type: String, action: String, descript try { //Step 1 : Remove the tasks - val shards = clusterService.state().routingTable.indicesRouting().get(followerIndexName).shards() - shards.forEach { + val shards = clusterService.state().routingTable.indicesRouting().get(followerIndexName)?.shards() + shards?.forEach { persistentTasksService.removeTask(ShardReplicationTask.taskIdForShard(it.value.shardId)) } @@ -748,7 +748,7 @@ open class IndexReplicationTask(id: Long, type: String, action: String, descript suspend fun startNewOrMissingShardTasks(): Map> { assert(clusterService.state().routingTable.hasIndex(followerIndexName)) { "Can't find index $followerIndexName" } - val shards = clusterService.state().routingTable.indicesRouting().get(followerIndexName).shards() + val shards = clusterService.state().routingTable.indicesRouting().get(followerIndexName)?.shards() val persistentTasks = clusterService.state().metadata.custom(PersistentTasksCustomMetadata.TYPE) val runningShardTasks = persistentTasks.findTasks(ShardReplicationExecutor.TASK_NAME, Predicate { true }).stream() .map { task -> task as PersistentTask } @@ -757,14 +757,14 @@ open class IndexReplicationTask(id: Long, type: String, action: String, descript {t: PersistentTask -> t.params!!.followerShardId}, {t: PersistentTask -> t})) - val tasks = shards.map { + val tasks = shards?.map { it.value.shardId - }.associate { shardId -> + }?.associate { shardId -> val task = runningShardTasks.getOrElse(shardId) { startReplicationTask(ShardReplicationParams(leaderAlias, ShardId(leaderIndex, shardId.id), shardId)) } return@associate shardId to task - } + }.orEmpty() return tasks } @@ -865,9 +865,9 @@ open class IndexReplicationTask(id: Long, type: String, action: String, descript This can happen if there was a badly timed cluster manager node failure.""".trimIndent()) } } else if (restore.state() == RestoreInProgress.State.FAILURE) { - val failureReason = restore.shards().values().find { - it.value.state() == RestoreInProgress.State.FAILURE - }!!.value.reason() + val failureReason = restore.shards().values.find { + it.state() == RestoreInProgress.State.FAILURE + }!!.reason() return FailedState(Collections.emptyMap(), failureReason) } else { return InitFollowState diff --git a/src/main/kotlin/org/opensearch/replication/task/shard/FollowerClusterStats.kt b/src/main/kotlin/org/opensearch/replication/task/shard/FollowerClusterStats.kt index db112a1f..77cfb672 100644 --- a/src/main/kotlin/org/opensearch/replication/task/shard/FollowerClusterStats.kt +++ b/src/main/kotlin/org/opensearch/replication/task/shard/FollowerClusterStats.kt @@ -13,13 +13,13 @@ package org.opensearch.replication.task.shard import org.apache.logging.log4j.LogManager import org.opensearch.common.inject.Singleton -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput import org.opensearch.core.xcontent.ToXContent import org.opensearch.core.xcontent.ToXContentFragment import org.opensearch.core.xcontent.XContentBuilder import org.opensearch.common.xcontent.XContentType -import org.opensearch.index.shard.ShardId +import org.opensearch.core.index.shard.ShardId import java.util.concurrent.atomic.AtomicLong class FollowerShardMetric { diff --git a/src/main/kotlin/org/opensearch/replication/task/shard/ShardReplicationParams.kt b/src/main/kotlin/org/opensearch/replication/task/shard/ShardReplicationParams.kt index d8f790a1..c1981de1 100644 --- a/src/main/kotlin/org/opensearch/replication/task/shard/ShardReplicationParams.kt +++ b/src/main/kotlin/org/opensearch/replication/task/shard/ShardReplicationParams.kt @@ -14,17 +14,17 @@ package org.opensearch.replication.task.shard import org.opensearch.Version import org.opensearch.core.ParseField import org.opensearch.common.Strings -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput import org.opensearch.core.xcontent.ObjectParser import org.opensearch.core.xcontent.ToXContent import org.opensearch.core.xcontent.XContentBuilder import org.opensearch.core.xcontent.XContentParser import org.opensearch.common.xcontent.XContentType -import org.opensearch.index.shard.ShardId +import org.opensearch.core.index.shard.ShardId import org.opensearch.persistent.PersistentTaskParams import java.io.IOException -import org.opensearch.index.Index +import org.opensearch.core.index.Index class ShardReplicationParams : PersistentTaskParams { diff --git a/src/main/kotlin/org/opensearch/replication/task/shard/ShardReplicationState.kt b/src/main/kotlin/org/opensearch/replication/task/shard/ShardReplicationState.kt index f0c3fc88..33efd3ca 100644 --- a/src/main/kotlin/org/opensearch/replication/task/shard/ShardReplicationState.kt +++ b/src/main/kotlin/org/opensearch/replication/task/shard/ShardReplicationState.kt @@ -14,8 +14,8 @@ package org.opensearch.replication.task.shard import org.opensearch.replication.task.ReplicationState import org.opensearch.OpenSearchException import org.opensearch.core.ParseField -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput import org.opensearch.core.xcontent.ObjectParser import org.opensearch.core.xcontent.ToXContent import org.opensearch.core.xcontent.XContentBuilder diff --git a/src/main/kotlin/org/opensearch/replication/task/shard/ShardReplicationTask.kt b/src/main/kotlin/org/opensearch/replication/task/shard/ShardReplicationTask.kt index e393805e..e165c916 100644 --- a/src/main/kotlin/org/opensearch/replication/task/shard/ShardReplicationTask.kt +++ b/src/main/kotlin/org/opensearch/replication/task/shard/ShardReplicationTask.kt @@ -47,10 +47,10 @@ import org.opensearch.common.logging.Loggers import org.opensearch.index.seqno.RetentionLeaseActions import org.opensearch.index.seqno.RetentionLeaseInvalidRetainingSeqNoException import org.opensearch.index.seqno.RetentionLeaseNotFoundException -import org.opensearch.index.shard.ShardId +import org.opensearch.core.index.shard.ShardId import org.opensearch.persistent.PersistentTaskState import org.opensearch.persistent.PersistentTasksNodeService -import org.opensearch.rest.RestStatus +import org.opensearch.core.rest.RestStatus import org.opensearch.tasks.TaskId import org.opensearch.threadpool.ThreadPool import org.opensearch.transport.NodeNotConnectedException diff --git a/src/main/kotlin/org/opensearch/replication/task/shard/TranslogSequencer.kt b/src/main/kotlin/org/opensearch/replication/task/shard/TranslogSequencer.kt index d8c976dc..bbb6837a 100644 --- a/src/main/kotlin/org/opensearch/replication/task/shard/TranslogSequencer.kt +++ b/src/main/kotlin/org/opensearch/replication/task/shard/TranslogSequencer.kt @@ -29,14 +29,14 @@ import org.opensearch.OpenSearchException import org.opensearch.action.support.TransportActions import org.opensearch.common.logging.Loggers import org.opensearch.index.IndexNotFoundException -import org.opensearch.index.shard.ShardId +import org.opensearch.core.index.shard.ShardId import org.opensearch.index.translog.Translog import org.opensearch.replication.util.indicesService import org.opensearch.tasks.TaskId import java.util.ArrayList import java.util.concurrent.ConcurrentHashMap import java.util.concurrent.TimeUnit -import org.opensearch.rest.RestStatus +import org.opensearch.core.rest.RestStatus /** diff --git a/src/main/kotlin/org/opensearch/replication/util/Extensions.kt b/src/main/kotlin/org/opensearch/replication/util/Extensions.kt index 643cc010..1928ad6b 100644 --- a/src/main/kotlin/org/opensearch/replication/util/Extensions.kt +++ b/src/main/kotlin/org/opensearch/replication/util/Extensions.kt @@ -31,13 +31,13 @@ import org.opensearch.client.Client import org.opensearch.common.util.concurrent.ThreadContext import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException import org.opensearch.index.IndexNotFoundException -import org.opensearch.index.shard.ShardId +import org.opensearch.core.index.shard.ShardId import org.opensearch.index.store.Store import org.opensearch.indices.recovery.RecoveryState import org.opensearch.replication.ReplicationException import org.opensearch.replication.util.stackTraceToString import org.opensearch.repositories.IndexId -import org.opensearch.rest.RestStatus +import org.opensearch.core.rest.RestStatus import org.opensearch.snapshots.SnapshotId import org.opensearch.transport.ConnectTransportException import org.opensearch.transport.NodeDisconnectedException diff --git a/src/test/kotlin/org/opensearch/index/translog/ReplicationTranslogDeletionPolicyTests.kt b/src/test/kotlin/org/opensearch/index/translog/ReplicationTranslogDeletionPolicyTests.kt index 8378bbbb..a59e75fe 100644 --- a/src/test/kotlin/org/opensearch/index/translog/ReplicationTranslogDeletionPolicyTests.kt +++ b/src/test/kotlin/org/opensearch/index/translog/ReplicationTranslogDeletionPolicyTests.kt @@ -14,14 +14,14 @@ import org.apache.lucene.store.ByteArrayDataOutput import org.hamcrest.Matchers.equalTo import org.mockito.Mockito import org.opensearch.common.UUIDs -import org.opensearch.common.bytes.BytesArray +import org.opensearch.core.common.bytes.BytesArray import org.opensearch.common.bytes.ReleasableBytesReference import org.opensearch.common.collect.Tuple import org.opensearch.common.util.BigArrays import org.opensearch.common.util.io.IOUtils import org.opensearch.index.seqno.RetentionLease import org.opensearch.index.seqno.RetentionLeases -import org.opensearch.index.shard.ShardId +import org.opensearch.core.index.shard.ShardId import org.opensearch.test.OpenSearchTestCase import java.io.IOException import java.nio.channels.FileChannel diff --git a/src/test/kotlin/org/opensearch/replication/bwc/BackwardsCompatibilityIT.kt b/src/test/kotlin/org/opensearch/replication/bwc/BackwardsCompatibilityIT.kt index f6f2c893..e8e803a8 100644 --- a/src/test/kotlin/org/opensearch/replication/bwc/BackwardsCompatibilityIT.kt +++ b/src/test/kotlin/org/opensearch/replication/bwc/BackwardsCompatibilityIT.kt @@ -82,6 +82,7 @@ class BackwardsCompatibilityIT : MultiClusterRestTestCase() { ClusterStatus.ONE_THIRD_UPGRADED, ClusterStatus.TWO_THIRD_UPGRADED, ClusterStatus.ROLLING_UPGRADED, ClusterStatus.FULL_CLUSTER_RESTART -> verifyReplication() ClusterStatus.COMPLETE_SUITE -> {} // Do nothing as all tests have run already + else -> {throw AssertionError("${ClusterStatus.from(System.getProperty("tests.bwcTask"))} is not a valid option for ClusterStatus")} } } diff --git a/src/test/kotlin/org/opensearch/replication/integ/rest/SecurityCustomRolesIT.kt b/src/test/kotlin/org/opensearch/replication/integ/rest/SecurityCustomRolesIT.kt index 94257594..99a18171 100644 --- a/src/test/kotlin/org/opensearch/replication/integ/rest/SecurityCustomRolesIT.kt +++ b/src/test/kotlin/org/opensearch/replication/integ/rest/SecurityCustomRolesIT.kt @@ -228,7 +228,7 @@ class SecurityCustomRolesIT: SecurityBase() { "1", followerClient.indices() .getSettings(getSettingsRequest, RequestOptions.DEFAULT) - .indexToSettings[followerIndexName][IndexMetadata.SETTING_NUMBER_OF_REPLICAS] + .indexToSettings.getOrDefault(followerIndexName, Settings.EMPTY)[IndexMetadata.SETTING_NUMBER_OF_REPLICAS] ) settings = Settings.builder() @@ -243,7 +243,7 @@ class SecurityCustomRolesIT: SecurityBase() { "checksum", followerClient.indices() .getSettings(getSettingsRequest, RequestOptions.DEFAULT) - .indexToSettings[followerIndexName]["index.shard.check_on_startup"] + .indexToSettings.getOrDefault(followerIndexName, Settings.EMPTY)["index.shard.check_on_startup"] ) }, 30L, TimeUnit.SECONDS) } @@ -273,7 +273,7 @@ class SecurityCustomRolesIT: SecurityBase() { "1", followerClient.indices() .getSettings(getSettingsRequest, RequestOptions.DEFAULT) - .indexToSettings[followerIndexName][IndexMetadata.SETTING_NUMBER_OF_REPLICAS] + .indexToSettings.getOrDefault(followerIndexName, Settings.EMPTY)[IndexMetadata.SETTING_NUMBER_OF_REPLICAS] ) settings = Settings.builder() .put("index.shard.check_on_startup", "checksum") diff --git a/src/test/kotlin/org/opensearch/replication/integ/rest/SecurityDlsFlsIT.kt b/src/test/kotlin/org/opensearch/replication/integ/rest/SecurityDlsFlsIT.kt index 5198d2d0..b8ababe8 100644 --- a/src/test/kotlin/org/opensearch/replication/integ/rest/SecurityDlsFlsIT.kt +++ b/src/test/kotlin/org/opensearch/replication/integ/rest/SecurityDlsFlsIT.kt @@ -128,7 +128,7 @@ class SecurityDlsFlsIT: SecurityBase() { "1", followerClient.indices() .getSettings(getSettingsRequest, RequestOptions.DEFAULT) - .indexToSettings[followerIndexName][IndexMetadata.SETTING_NUMBER_OF_REPLICAS] + .indexToSettings.getOrDefault(followerIndexName, Settings.EMPTY)[IndexMetadata.SETTING_NUMBER_OF_REPLICAS] ) settings = Settings.builder() .put("index.shard.check_on_startup", "checksum") diff --git a/src/test/kotlin/org/opensearch/replication/integ/rest/StartReplicationIT.kt b/src/test/kotlin/org/opensearch/replication/integ/rest/StartReplicationIT.kt index e6355d33..da783f6b 100644 --- a/src/test/kotlin/org/opensearch/replication/integ/rest/StartReplicationIT.kt +++ b/src/test/kotlin/org/opensearch/replication/integ/rest/StartReplicationIT.kt @@ -130,7 +130,7 @@ class StartReplicationIT: MultiClusterRestTestCase() { "3", followerClient.indices() .getSettings(getSettingsRequest, RequestOptions.DEFAULT) - .indexToSettings[followerIndexName][IndexMetadata.SETTING_NUMBER_OF_REPLICAS] + .indexToSettings.getOrDefault(followerIndexName, Settings.EMPTY)[IndexMetadata.SETTING_NUMBER_OF_REPLICAS] ) }, 15, TimeUnit.SECONDS) } @@ -289,7 +289,7 @@ class StartReplicationIT: MultiClusterRestTestCase() { "0", followerClient.indices() .getSettings(getSettingsRequest, RequestOptions.DEFAULT) - .indexToSettings[followerIndexName][IndexMetadata.SETTING_NUMBER_OF_REPLICAS] + .indexToSettings.getOrDefault(followerIndexName, Settings.EMPTY)[IndexMetadata.SETTING_NUMBER_OF_REPLICAS] ) }, 30L, TimeUnit.SECONDS) } @@ -448,7 +448,7 @@ class StartReplicationIT: MultiClusterRestTestCase() { "0", followerClient.indices() .getSettings(getSettingsRequest, RequestOptions.DEFAULT) - .indexToSettings[followerIndexName][IndexMetadata.SETTING_NUMBER_OF_REPLICAS] + .indexToSettings.getOrDefault(followerIndexName, Settings.EMPTY)[IndexMetadata.SETTING_NUMBER_OF_REPLICAS] ) settings = Settings.builder() .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 2) @@ -469,14 +469,14 @@ class StartReplicationIT: MultiClusterRestTestCase() { "2", followerClient.indices() .getSettings(getSettingsRequest, RequestOptions.DEFAULT) - .indexToSettings[followerIndexName][IndexMetadata.SETTING_NUMBER_OF_REPLICAS] + .indexToSettings.getOrDefault(followerIndexName, Settings.EMPTY)[IndexMetadata.SETTING_NUMBER_OF_REPLICAS] ) assertEqualAliases() }, 30L, TimeUnit.SECONDS) // Case 2 : Blocklisted setting are not copied Assert.assertNull(followerClient.indices() .getSettings(getSettingsRequest, RequestOptions.DEFAULT) - .indexToSettings[followerIndexName].get("index.routing.allocation.enable")) + .indexToSettings.getOrDefault(followerIndexName, Settings.EMPTY).get("index.routing.allocation.enable")) //Alias test case 2: Update existing alias aliasAction = IndicesAliasesRequest.AliasActions.add() .index(leaderIndexName) @@ -500,19 +500,19 @@ class StartReplicationIT: MultiClusterRestTestCase() { "3", followerClient.indices() .getSettings(getSettingsRequest, RequestOptions.DEFAULT) - .indexToSettings[followerIndexName][IndexMetadata.SETTING_NUMBER_OF_REPLICAS] + .indexToSettings.getOrDefault(followerIndexName, Settings.EMPTY)[IndexMetadata.SETTING_NUMBER_OF_REPLICAS] ) Assert.assertEquals( "10s", followerClient.indices() .getSettings(getSettingsRequest, RequestOptions.DEFAULT) - .indexToSettings[followerIndexName]["index.search.idle.after"] + .indexToSettings.getOrDefault(followerIndexName, Settings.EMPTY)["index.search.idle.after"] ) Assert.assertEquals( "none", followerClient.indices() .getSettings(getSettingsRequest, RequestOptions.DEFAULT) - .indexToSettings[followerIndexName]["index.routing.allocation.enable"] + .indexToSettings.getOrDefault(followerIndexName, Settings.EMPTY)["index.routing.allocation.enable"] ) assertEqualAliases() }, 30L, TimeUnit.SECONDS) @@ -539,7 +539,7 @@ class StartReplicationIT: MultiClusterRestTestCase() { null, followerClient.indices() .getSettings(getSettingsRequest, RequestOptions.DEFAULT) - .indexToSettings[followerIndexName]["index.search.idle.after"] + .indexToSettings.getOrDefault(followerIndexName, Settings.EMPTY)["index.search.idle.after"] ) assertEqualAliases() }, 30L, TimeUnit.SECONDS) @@ -568,7 +568,7 @@ class StartReplicationIT: MultiClusterRestTestCase() { "1", followerClient.indices() .getSettings(getSettingsRequest, RequestOptions.DEFAULT) - .indexToSettings[followerIndexName][IndexMetadata.SETTING_NUMBER_OF_REPLICAS] + .indexToSettings.getOrDefault(followerIndexName, Settings.EMPTY)[IndexMetadata.SETTING_NUMBER_OF_REPLICAS] ) settings = Settings.builder() .put("index.shard.check_on_startup", "checksum") @@ -579,7 +579,7 @@ class StartReplicationIT: MultiClusterRestTestCase() { "checksum", followerClient.indices() .getSettings(getSettingsRequest, RequestOptions.DEFAULT) - .indexToSettings[followerIndexName]["index.shard.check_on_startup"] + .indexToSettings.getOrDefault(followerIndexName, Settings.EMPTY)["index.shard.check_on_startup"] ) } @@ -1138,7 +1138,7 @@ class StartReplicationIT: MultiClusterRestTestCase() { "2", leaderClient.indices() .getSettings(getLeaderSettingsRequest, RequestOptions.DEFAULT) - .indexToSettings[leaderIndexName][IndexMetadata.SETTING_WAIT_FOR_ACTIVE_SHARDS.getKey()] + .indexToSettings.getOrDefault(leaderIndexName, Settings.EMPTY)[IndexMetadata.SETTING_WAIT_FOR_ACTIVE_SHARDS.getKey()] ) }, 15, TimeUnit.SECONDS) @@ -1198,7 +1198,7 @@ class StartReplicationIT: MultiClusterRestTestCase() { "2", leaderClient.indices() .getSettings(getLeaderSettingsRequest, RequestOptions.DEFAULT) - .indexToSettings[leaderIndexName][IndexMetadata.SETTING_WAIT_FOR_ACTIVE_SHARDS.getKey()] + .indexToSettings.getOrDefault(leaderIndexName, Settings.EMPTY)[IndexMetadata.SETTING_WAIT_FOR_ACTIVE_SHARDS.getKey()] ) }, 15, TimeUnit.SECONDS) @@ -1250,7 +1250,7 @@ class StartReplicationIT: MultiClusterRestTestCase() { "2", followerClient.indices() .getSettings(getSettingsRequest, RequestOptions.DEFAULT) - .indexToSettings[followerIndexName][IndexMetadata.SETTING_WAIT_FOR_ACTIVE_SHARDS.getKey()] + .indexToSettings.getOrDefault(followerIndexName, Settings.EMPTY)[IndexMetadata.SETTING_WAIT_FOR_ACTIVE_SHARDS.getKey()] ) }, 15, TimeUnit.SECONDS) } finally { diff --git a/src/test/kotlin/org/opensearch/replication/integ/rest/UpdateAutoFollowPatternIT.kt b/src/test/kotlin/org/opensearch/replication/integ/rest/UpdateAutoFollowPatternIT.kt index 9393bc32..2234bd86 100644 --- a/src/test/kotlin/org/opensearch/replication/integ/rest/UpdateAutoFollowPatternIT.kt +++ b/src/test/kotlin/org/opensearch/replication/integ/rest/UpdateAutoFollowPatternIT.kt @@ -177,7 +177,7 @@ class UpdateAutoFollowPatternIT: MultiClusterRestTestCase() { "3", followerClient.indices() .getSettings(getSettingsRequest, RequestOptions.DEFAULT) - .indexToSettings[leaderIndexName][IndexMetadata.SETTING_NUMBER_OF_REPLICAS] + .indexToSettings.getOrDefault(leaderIndexName, Settings.EMPTY)[IndexMetadata.SETTING_NUMBER_OF_REPLICAS] ) followerClient.waitForShardTaskStart(leaderIndexName, waitForShardTask) }, 15, TimeUnit.SECONDS) diff --git a/src/test/kotlin/org/opensearch/replication/singleCluster/SingleClusterSanityIT.kt b/src/test/kotlin/org/opensearch/replication/singleCluster/SingleClusterSanityIT.kt index 9760e7c2..24b9bb15 100644 --- a/src/test/kotlin/org/opensearch/replication/singleCluster/SingleClusterSanityIT.kt +++ b/src/test/kotlin/org/opensearch/replication/singleCluster/SingleClusterSanityIT.kt @@ -45,6 +45,7 @@ class SingleClusterSanityIT : MultiClusterRestTestCase() { fun testReplicationPluginWithSingleCluster() { when(ClusterState.from(System.getProperty("tests.sanitySingleCluster"))) { ClusterState.SINGLE_CLUSTER_SANITY_SUITE -> basicReplicationSanityWithSingleCluster() + else -> {throw AssertionError("${System.getProperty("tests.sanitySingleCluster")} is not a valid option for ClusterState")} } } diff --git a/src/test/kotlin/org/opensearch/replication/task/index/IndexReplicationTaskTests.kt b/src/test/kotlin/org/opensearch/replication/task/index/IndexReplicationTaskTests.kt index 2032cc26..00bde557 100644 --- a/src/test/kotlin/org/opensearch/replication/task/index/IndexReplicationTaskTests.kt +++ b/src/test/kotlin/org/opensearch/replication/task/index/IndexReplicationTaskTests.kt @@ -33,8 +33,8 @@ import org.opensearch.common.settings.Settings import org.opensearch.common.settings.SettingsModule import org.opensearch.common.unit.TimeValue import org.opensearch.core.xcontent.NamedXContentRegistry -import org.opensearch.index.Index -import org.opensearch.index.shard.ShardId +import org.opensearch.core.index.Index +import org.opensearch.core.index.shard.ShardId import org.opensearch.persistent.PersistentTaskParams import org.opensearch.persistent.PersistentTasksCustomMetadata import org.opensearch.persistent.PersistentTasksService diff --git a/src/test/kotlin/org/opensearch/replication/task/index/NoOpClient.kt b/src/test/kotlin/org/opensearch/replication/task/index/NoOpClient.kt index 35af7cb4..606d7a18 100644 --- a/src/test/kotlin/org/opensearch/replication/task/index/NoOpClient.kt +++ b/src/test/kotlin/org/opensearch/replication/task/index/NoOpClient.kt @@ -29,14 +29,13 @@ import org.opensearch.action.get.GetAction import org.opensearch.action.get.GetResponse import org.opensearch.action.support.master.AcknowledgedResponse import org.opensearch.common.UUIDs -import org.opensearch.common.bytes.BytesReference -import org.opensearch.common.collect.ImmutableOpenMap +import org.opensearch.core.common.bytes.BytesReference import org.opensearch.common.settings.Settings import org.opensearch.core.xcontent.ToXContent import org.opensearch.common.xcontent.XContentFactory -import org.opensearch.index.Index +import org.opensearch.core.index.Index import org.opensearch.index.get.GetResult -import org.opensearch.index.shard.ShardId +import org.opensearch.core.index.shard.ShardId import org.opensearch.indices.recovery.RecoveryState import org.opensearch.persistent.PersistentTaskResponse import org.opensearch.persistent.PersistentTasksCustomMetadata @@ -55,8 +54,7 @@ import org.opensearch.snapshots.RestoreInfo import org.opensearch.test.OpenSearchTestCase import org.opensearch.test.client.NoOpNodeClient import java.lang.reflect.Field -import java.util.ArrayList -import java.util.HashMap +import java.util.* open class NoOpClient(testName :String) : NoOpNodeClient(testName) { @Override @@ -109,9 +107,7 @@ open class NoOpClient(testName :String) : NoOpNodeClient(testName) { val indexToSettings = HashMap() indexToSettings[IndexReplicationTaskTests.followerIndex] = desiredSettingsBuilder.build() - - val settingsMap = ImmutableOpenMap.builder().putAll(indexToSettings).build() - var settingResponse = GetSettingsResponse(settingsMap, settingsMap) + var settingResponse = GetSettingsResponse(indexToSettings, indexToSettings) listener.onResponse(settingResponse as Response) } else if (action == RecoveryAction.INSTANCE) { val shardRecoveryStates: MutableMap> = HashMap() diff --git a/src/test/kotlin/org/opensearch/replication/task/shard/ShardReplicationExecutorTests.kt b/src/test/kotlin/org/opensearch/replication/task/shard/ShardReplicationExecutorTests.kt index 630234f8..39275542 100644 --- a/src/test/kotlin/org/opensearch/replication/task/shard/ShardReplicationExecutorTests.kt +++ b/src/test/kotlin/org/opensearch/replication/task/shard/ShardReplicationExecutorTests.kt @@ -12,8 +12,8 @@ import org.opensearch.cluster.metadata.Metadata import org.opensearch.cluster.routing.* import org.opensearch.common.unit.TimeValue import org.opensearch.core.xcontent.NamedXContentRegistry -import org.opensearch.index.Index -import org.opensearch.index.shard.ShardId +import org.opensearch.core.index.Index +import org.opensearch.core.index.shard.ShardId import org.opensearch.replication.ReplicationSettings import org.opensearch.replication.metadata.ReplicationMetadataManager import org.opensearch.replication.metadata.store.ReplicationMetadataStore diff --git a/src/test/kotlin/org/opensearch/replication/task/shard/TranslogSequencerTests.kt b/src/test/kotlin/org/opensearch/replication/task/shard/TranslogSequencerTests.kt index fe6ad1c8..730289ed 100644 --- a/src/test/kotlin/org/opensearch/replication/task/shard/TranslogSequencerTests.kt +++ b/src/test/kotlin/org/opensearch/replication/task/shard/TranslogSequencerTests.kt @@ -24,7 +24,7 @@ import org.opensearch.action.support.replication.ReplicationResponse.ShardInfo import org.opensearch.common.settings.Settings import org.opensearch.index.IndexService import org.opensearch.index.shard.IndexShard -import org.opensearch.index.shard.ShardId +import org.opensearch.core.index.shard.ShardId import org.opensearch.index.translog.Translog import org.opensearch.indices.IndicesService import org.opensearch.replication.action.changes.GetChangesResponse From 1bdcdec3e12d57efceb31ce2e611ae61d67479e9 Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Mon, 7 Aug 2023 12:20:45 +0530 Subject: [PATCH 63/84] [Backport 2.x] [Refactor] for CircuitBreaker and Lifecycle (#1097) * Refactor for CircuitBreaker and Lifecycle (#1096) Refactor for CircuitBreaker and Lifecycle (#1096) Signed-off-by: monusingh-1 (cherry picked from commit 0e6ad7a9722a82ad4c6dba5cbfbcb8a96b914a3a) * Refactor to core.common.Strings and MediaType Signed-off-by: monusingh-1 --------- Signed-off-by: monusingh-1 Co-authored-by: Monu Singh --- .../index/translog/ReplicationTranslogDeletionPolicy.kt | 2 +- .../kotlin/org/opensearch/replication/ReplicationPlugin.kt | 6 +++--- .../org/opensearch/replication/ReplicationSettings.kt | 2 +- .../replication/action/stats/FollowerStatsResponse.kt | 3 +-- .../replication/action/stats/LeaderStatsResponse.kt | 3 +-- .../replication/metadata/store/ReplicationMetadataStore.kt | 2 +- .../repository/RemoteClusterMultiChunkTransfer.kt | 2 +- .../replication/repository/RemoteClusterRepository.kt | 2 +- .../repository/RemoteClusterRestoreLeaderService.kt | 2 +- .../org/opensearch/replication/seqno/RemoteClusterStats.kt | 2 +- .../replication/seqno/RemoteClusterTranslogService.kt | 2 +- .../replication/task/index/IndexReplicationParams.kt | 2 +- .../replication/task/index/IndexReplicationTask.kt | 4 ++-- .../replication/task/shard/ShardReplicationParams.kt | 2 +- .../kotlin/org/opensearch/replication/util/Injectables.kt | 2 +- .../org/opensearch/replication/util/ValidationUtil.kt | 2 +- .../org/opensearch/replication/MultiClusterRestTestCase.kt | 3 +-- .../kotlin/org/opensearch/replication/ReplicationHelpers.kt | 4 ++-- 18 files changed, 22 insertions(+), 25 deletions(-) diff --git a/src/main/kotlin/org/opensearch/index/translog/ReplicationTranslogDeletionPolicy.kt b/src/main/kotlin/org/opensearch/index/translog/ReplicationTranslogDeletionPolicy.kt index ec7a36d0..963044dd 100644 --- a/src/main/kotlin/org/opensearch/index/translog/ReplicationTranslogDeletionPolicy.kt +++ b/src/main/kotlin/org/opensearch/index/translog/ReplicationTranslogDeletionPolicy.kt @@ -1,6 +1,6 @@ package org.opensearch.index.translog -import org.opensearch.common.unit.ByteSizeValue +import org.opensearch.core.common.unit.ByteSizeValue import org.opensearch.index.IndexSettings import org.opensearch.index.seqno.RetentionLease import org.opensearch.index.seqno.RetentionLeases diff --git a/src/main/kotlin/org/opensearch/replication/ReplicationPlugin.kt b/src/main/kotlin/org/opensearch/replication/ReplicationPlugin.kt index 7d66c976..8a17fc51 100644 --- a/src/main/kotlin/org/opensearch/replication/ReplicationPlugin.kt +++ b/src/main/kotlin/org/opensearch/replication/ReplicationPlugin.kt @@ -83,7 +83,7 @@ import org.opensearch.cluster.node.DiscoveryNodes import org.opensearch.cluster.service.ClusterService import org.opensearch.common.CheckedFunction import org.opensearch.core.ParseField -import org.opensearch.common.component.LifecycleComponent +import org.opensearch.common.lifecycle.LifecycleComponent import org.opensearch.core.common.io.stream.NamedWriteableRegistry import org.opensearch.core.common.io.stream.Writeable import org.opensearch.common.settings.ClusterSettings @@ -92,8 +92,8 @@ import org.opensearch.common.settings.Setting import org.opensearch.common.settings.Settings import org.opensearch.common.settings.SettingsFilter import org.opensearch.common.settings.SettingsModule -import org.opensearch.common.unit.ByteSizeUnit -import org.opensearch.common.unit.ByteSizeValue +import org.opensearch.core.common.unit.ByteSizeUnit +import org.opensearch.core.common.unit.ByteSizeValue import org.opensearch.common.unit.TimeValue import org.opensearch.common.util.concurrent.OpenSearchExecutors import org.opensearch.core.xcontent.NamedXContentRegistry diff --git a/src/main/kotlin/org/opensearch/replication/ReplicationSettings.kt b/src/main/kotlin/org/opensearch/replication/ReplicationSettings.kt index 2b516f8e..a5065c6d 100644 --- a/src/main/kotlin/org/opensearch/replication/ReplicationSettings.kt +++ b/src/main/kotlin/org/opensearch/replication/ReplicationSettings.kt @@ -13,7 +13,7 @@ package org.opensearch.replication import org.opensearch.cluster.service.ClusterService import org.opensearch.common.settings.ClusterSettings -import org.opensearch.common.unit.ByteSizeValue +import org.opensearch.core.common.unit.ByteSizeValue import org.opensearch.common.unit.TimeValue import org.opensearch.commons.utils.OpenForTesting diff --git a/src/main/kotlin/org/opensearch/replication/action/stats/FollowerStatsResponse.kt b/src/main/kotlin/org/opensearch/replication/action/stats/FollowerStatsResponse.kt index 9d4ae15c..3b4c4503 100644 --- a/src/main/kotlin/org/opensearch/replication/action/stats/FollowerStatsResponse.kt +++ b/src/main/kotlin/org/opensearch/replication/action/stats/FollowerStatsResponse.kt @@ -16,7 +16,6 @@ import org.apache.logging.log4j.LogManager import org.opensearch.action.FailedNodeException import org.opensearch.action.support.nodes.BaseNodesResponse import org.opensearch.cluster.ClusterName -import org.opensearch.common.Strings import org.opensearch.core.common.io.stream.StreamInput import org.opensearch.core.common.io.stream.StreamOutput import org.opensearch.core.xcontent.ToXContent.EMPTY_PARAMS @@ -117,7 +116,7 @@ class FollowerStatsResponse : BaseNodesResponse, ToX override fun toString(): String { val builder: XContentBuilder = XContentFactory.jsonBuilder().prettyPrint() toXContent(builder, EMPTY_PARAMS) - return Strings.toString(builder) + return builder.toString() } } diff --git a/src/main/kotlin/org/opensearch/replication/action/stats/LeaderStatsResponse.kt b/src/main/kotlin/org/opensearch/replication/action/stats/LeaderStatsResponse.kt index 29abc78c..d3a54454 100644 --- a/src/main/kotlin/org/opensearch/replication/action/stats/LeaderStatsResponse.kt +++ b/src/main/kotlin/org/opensearch/replication/action/stats/LeaderStatsResponse.kt @@ -16,7 +16,6 @@ import org.apache.logging.log4j.LogManager import org.opensearch.action.FailedNodeException import org.opensearch.action.support.nodes.BaseNodesResponse import org.opensearch.cluster.ClusterName -import org.opensearch.common.Strings import org.opensearch.core.common.io.stream.StreamInput import org.opensearch.core.common.io.stream.StreamOutput import org.opensearch.core.xcontent.ToXContent.EMPTY_PARAMS @@ -76,7 +75,7 @@ class LeaderStatsResponse : BaseNodesResponse, ToXCont override fun toString(): String { val builder: XContentBuilder = XContentFactory.jsonBuilder().prettyPrint() toXContent(builder, EMPTY_PARAMS) - return Strings.toString(builder) + return builder.toString() } } diff --git a/src/main/kotlin/org/opensearch/replication/metadata/store/ReplicationMetadataStore.kt b/src/main/kotlin/org/opensearch/replication/metadata/store/ReplicationMetadataStore.kt index 18a0cd6c..d5cbe751 100644 --- a/src/main/kotlin/org/opensearch/replication/metadata/store/ReplicationMetadataStore.kt +++ b/src/main/kotlin/org/opensearch/replication/metadata/store/ReplicationMetadataStore.kt @@ -30,7 +30,7 @@ import org.opensearch.client.Client import org.opensearch.cluster.health.ClusterHealthStatus import org.opensearch.cluster.metadata.IndexMetadata import org.opensearch.cluster.service.ClusterService -import org.opensearch.common.component.AbstractLifecycleComponent +import org.opensearch.common.lifecycle.AbstractLifecycleComponent import org.opensearch.common.settings.Settings import org.opensearch.common.util.concurrent.ThreadContext import org.opensearch.common.xcontent.XContentType diff --git a/src/main/kotlin/org/opensearch/replication/repository/RemoteClusterMultiChunkTransfer.kt b/src/main/kotlin/org/opensearch/replication/repository/RemoteClusterMultiChunkTransfer.kt index b3097f13..344fbcbb 100644 --- a/src/main/kotlin/org/opensearch/replication/repository/RemoteClusterMultiChunkTransfer.kt +++ b/src/main/kotlin/org/opensearch/replication/repository/RemoteClusterMultiChunkTransfer.kt @@ -26,7 +26,7 @@ import org.apache.logging.log4j.Logger import org.opensearch.action.ActionListener import org.opensearch.client.Client import org.opensearch.cluster.node.DiscoveryNode -import org.opensearch.common.unit.ByteSizeValue +import org.opensearch.core.common.unit.ByteSizeValue import org.opensearch.common.util.concurrent.ThreadContext import org.opensearch.core.index.shard.ShardId import org.opensearch.index.store.Store diff --git a/src/main/kotlin/org/opensearch/replication/repository/RemoteClusterRepository.kt b/src/main/kotlin/org/opensearch/replication/repository/RemoteClusterRepository.kt index f5e7a149..7272c006 100644 --- a/src/main/kotlin/org/opensearch/replication/repository/RemoteClusterRepository.kt +++ b/src/main/kotlin/org/opensearch/replication/repository/RemoteClusterRepository.kt @@ -47,7 +47,7 @@ import org.opensearch.cluster.node.DiscoveryNode import org.opensearch.cluster.service.ClusterService import org.opensearch.common.Nullable import org.opensearch.common.UUIDs -import org.opensearch.common.component.AbstractLifecycleComponent +import org.opensearch.common.lifecycle.AbstractLifecycleComponent import org.opensearch.common.metrics.CounterMetric import org.opensearch.common.settings.Settings import org.opensearch.index.mapper.MapperService diff --git a/src/main/kotlin/org/opensearch/replication/repository/RemoteClusterRestoreLeaderService.kt b/src/main/kotlin/org/opensearch/replication/repository/RemoteClusterRestoreLeaderService.kt index 5c06e4d4..22c279a2 100644 --- a/src/main/kotlin/org/opensearch/replication/repository/RemoteClusterRestoreLeaderService.kt +++ b/src/main/kotlin/org/opensearch/replication/repository/RemoteClusterRestoreLeaderService.kt @@ -17,7 +17,7 @@ import org.opensearch.replication.util.performOp import org.opensearch.OpenSearchException import org.opensearch.action.support.single.shard.SingleShardRequest import org.opensearch.client.node.NodeClient -import org.opensearch.common.component.AbstractLifecycleComponent +import org.opensearch.common.lifecycle.AbstractLifecycleComponent import org.opensearch.common.inject.Inject import org.opensearch.common.inject.Singleton import org.opensearch.common.lucene.store.InputStreamIndexInput diff --git a/src/main/kotlin/org/opensearch/replication/seqno/RemoteClusterStats.kt b/src/main/kotlin/org/opensearch/replication/seqno/RemoteClusterStats.kt index 80a9d6d2..9481543a 100644 --- a/src/main/kotlin/org/opensearch/replication/seqno/RemoteClusterStats.kt +++ b/src/main/kotlin/org/opensearch/replication/seqno/RemoteClusterStats.kt @@ -11,7 +11,7 @@ package org.opensearch.replication.seqno -import org.opensearch.common.component.AbstractLifecycleComponent +import org.opensearch.common.lifecycle.AbstractLifecycleComponent import org.opensearch.common.inject.Singleton import org.opensearch.core.common.io.stream.StreamInput import org.opensearch.core.common.io.stream.StreamOutput diff --git a/src/main/kotlin/org/opensearch/replication/seqno/RemoteClusterTranslogService.kt b/src/main/kotlin/org/opensearch/replication/seqno/RemoteClusterTranslogService.kt index 21edfea3..bc210c72 100644 --- a/src/main/kotlin/org/opensearch/replication/seqno/RemoteClusterTranslogService.kt +++ b/src/main/kotlin/org/opensearch/replication/seqno/RemoteClusterTranslogService.kt @@ -13,7 +13,7 @@ package org.opensearch.replication.seqno import org.apache.logging.log4j.LogManager import org.opensearch.ResourceNotFoundException -import org.opensearch.common.component.AbstractLifecycleComponent +import org.opensearch.common.lifecycle.AbstractLifecycleComponent import org.opensearch.common.inject.Singleton import org.opensearch.index.engine.Engine import org.opensearch.index.shard.IndexShard diff --git a/src/main/kotlin/org/opensearch/replication/task/index/IndexReplicationParams.kt b/src/main/kotlin/org/opensearch/replication/task/index/IndexReplicationParams.kt index 6a758b8f..5d045b3d 100644 --- a/src/main/kotlin/org/opensearch/replication/task/index/IndexReplicationParams.kt +++ b/src/main/kotlin/org/opensearch/replication/task/index/IndexReplicationParams.kt @@ -13,7 +13,7 @@ package org.opensearch.replication.task.index import org.opensearch.Version import org.opensearch.core.ParseField -import org.opensearch.common.Strings +import org.opensearch.core.common.Strings import org.opensearch.core.common.io.stream.StreamInput import org.opensearch.core.common.io.stream.StreamOutput import org.opensearch.core.xcontent.ObjectParser diff --git a/src/main/kotlin/org/opensearch/replication/task/index/IndexReplicationTask.kt b/src/main/kotlin/org/opensearch/replication/task/index/IndexReplicationTask.kt index f520926b..ce3cb43a 100644 --- a/src/main/kotlin/org/opensearch/replication/task/index/IndexReplicationTask.kt +++ b/src/main/kotlin/org/opensearch/replication/task/index/IndexReplicationTask.kt @@ -73,8 +73,8 @@ import org.opensearch.common.logging.Loggers import org.opensearch.common.settings.Setting import org.opensearch.common.settings.Settings import org.opensearch.common.settings.SettingsModule -import org.opensearch.common.unit.ByteSizeUnit -import org.opensearch.common.unit.ByteSizeValue +import org.opensearch.core.common.unit.ByteSizeUnit +import org.opensearch.core.common.unit.ByteSizeValue import org.opensearch.core.xcontent.ToXContent import org.opensearch.core.xcontent.ToXContentObject import org.opensearch.core.xcontent.XContentBuilder diff --git a/src/main/kotlin/org/opensearch/replication/task/shard/ShardReplicationParams.kt b/src/main/kotlin/org/opensearch/replication/task/shard/ShardReplicationParams.kt index c1981de1..95b805b1 100644 --- a/src/main/kotlin/org/opensearch/replication/task/shard/ShardReplicationParams.kt +++ b/src/main/kotlin/org/opensearch/replication/task/shard/ShardReplicationParams.kt @@ -13,7 +13,7 @@ package org.opensearch.replication.task.shard import org.opensearch.Version import org.opensearch.core.ParseField -import org.opensearch.common.Strings +import org.opensearch.core.common.Strings import org.opensearch.core.common.io.stream.StreamInput import org.opensearch.core.common.io.stream.StreamOutput import org.opensearch.core.xcontent.ObjectParser diff --git a/src/main/kotlin/org/opensearch/replication/util/Injectables.kt b/src/main/kotlin/org/opensearch/replication/util/Injectables.kt index 55f45690..88e4cd48 100644 --- a/src/main/kotlin/org/opensearch/replication/util/Injectables.kt +++ b/src/main/kotlin/org/opensearch/replication/util/Injectables.kt @@ -11,7 +11,7 @@ package org.opensearch.replication.util -import org.opensearch.common.component.AbstractLifecycleComponent +import org.opensearch.common.lifecycle.AbstractLifecycleComponent import org.opensearch.common.inject.Inject import org.opensearch.indices.IndicesService import org.opensearch.persistent.PersistentTasksService diff --git a/src/main/kotlin/org/opensearch/replication/util/ValidationUtil.kt b/src/main/kotlin/org/opensearch/replication/util/ValidationUtil.kt index 1cfa7444..1d6b8c2e 100644 --- a/src/main/kotlin/org/opensearch/replication/util/ValidationUtil.kt +++ b/src/main/kotlin/org/opensearch/replication/util/ValidationUtil.kt @@ -17,7 +17,7 @@ import org.opensearch.Version import org.opensearch.cluster.ClusterState import org.opensearch.cluster.metadata.IndexMetadata import org.opensearch.cluster.metadata.MetadataCreateIndexService -import org.opensearch.common.Strings +import org.opensearch.core.common.Strings import org.opensearch.common.ValidationException import org.opensearch.common.settings.Settings import org.opensearch.env.Environment diff --git a/src/test/kotlin/org/opensearch/replication/MultiClusterRestTestCase.kt b/src/test/kotlin/org/opensearch/replication/MultiClusterRestTestCase.kt index 0dd38a22..523ff913 100644 --- a/src/test/kotlin/org/opensearch/replication/MultiClusterRestTestCase.kt +++ b/src/test/kotlin/org/opensearch/replication/MultiClusterRestTestCase.kt @@ -37,7 +37,6 @@ import org.opensearch.client.ResponseException import org.opensearch.client.RestClient import org.opensearch.client.RestClientBuilder import org.opensearch.client.RestHighLevelClient -import org.opensearch.common.Strings import org.opensearch.common.io.PathUtils import org.opensearch.common.settings.Settings import org.opensearch.common.unit.TimeValue @@ -416,7 +415,7 @@ abstract class MultiClusterRestTestCase : OpenSearchTestCase() { clearCommand.endObject() if (mustClear) { val request = Request("PUT", "/_cluster/settings") - request.setJsonEntity(Strings.toString(clearCommand)) + request.setJsonEntity(clearCommand.toString()) testCluster.lowLevelClient.performRequest(request) } } diff --git a/src/test/kotlin/org/opensearch/replication/ReplicationHelpers.kt b/src/test/kotlin/org/opensearch/replication/ReplicationHelpers.kt index f5c9ff24..4a0c6a3a 100644 --- a/src/test/kotlin/org/opensearch/replication/ReplicationHelpers.kt +++ b/src/test/kotlin/org/opensearch/replication/ReplicationHelpers.kt @@ -25,7 +25,7 @@ import org.opensearch.common.settings.Settings import org.opensearch.common.unit.TimeValue import org.opensearch.core.xcontent.DeprecationHandler import org.opensearch.core.xcontent.NamedXContentRegistry -import org.opensearch.common.xcontent.XContentType +import org.opensearch.core.xcontent.MediaType import org.opensearch.test.OpenSearchTestCase.assertBusy import org.opensearch.test.rest.OpenSearchRestTestCase import org.junit.Assert @@ -96,7 +96,7 @@ fun RestHighLevelClient.startReplication(request: StartReplicationRequest, waitForNoInitializingShards() } fun getAckResponse(lowLevelResponse: Response): AcknowledgedResponse { - val xContentType = XContentType.fromMediaType(lowLevelResponse.entity.contentType.value) + val xContentType = MediaType.fromMediaType(lowLevelResponse.entity.contentType.value) val xcp = xContentType.xContent().createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.IGNORE_DEPRECATIONS, lowLevelResponse.entity.content) return AcknowledgedResponse.fromXContent(xcp) From 9e2587c8722b6c9f6fc3ff62f039523e25c5c6f8 Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Wed, 16 Aug 2023 15:29:48 +0530 Subject: [PATCH 64/84] Refactor for upstream changes from PR import 9082 (#1102) (#1103) Signed-off-by: monusingh-1 (cherry picked from commit 505e242e59023ea84ae20723299530654c398c23) Co-authored-by: Monu Singh --- .../kotlin/org/opensearch/replication/ReplicationPlugin.kt | 2 +- .../TransportAutoFollowClusterManagerNodeAction.kt | 2 +- .../autofollow/TransportUpdateAutoFollowPatternAction.kt | 2 +- .../replication/action/changes/GetChangesResponse.kt | 2 +- .../replication/action/changes/TransportGetChangesAction.kt | 2 +- .../action/index/TransportReplicateIndexAction.kt | 2 +- .../TransportReplicateIndexClusterManagerNodeAction.kt | 2 +- .../action/index/block/TransportUpddateIndexBlockAction.kt | 2 +- .../action/pause/TransportPauseIndexReplicationAction.kt | 2 +- .../action/replay/TransportReplayChangesAction.kt | 2 +- .../TransportUpdateReplicationStateDetails.kt | 2 +- .../replication/action/repository/GetFileChunkResponse.kt | 2 +- .../action/repository/GetStoreMetadataResponse.kt | 2 +- .../action/resume/TransportResumeIndexReplicationAction.kt | 2 +- .../replication/action/setup/TransportSetupChecksAction.kt | 2 +- .../action/setup/TransportValidatePermissionsAction.kt | 2 +- .../action/stats/TransportAutoFollowStatsAction.kt | 2 +- .../action/status/TransportReplicationStatusAction.kt | 2 +- .../action/stop/TransportStopIndexReplicationAction.kt | 2 +- .../action/update/TransportUpdateIndexReplicationAction.kt | 2 +- .../replication/metadata/TransportUpdateMetadataAction.kt | 2 +- .../opensearch/replication/metadata/UpdateIndexBlockTask.kt | 2 +- .../replication/metadata/UpdateReplicationMetadata.kt | 2 +- .../repository/RemoteClusterMultiChunkTransfer.kt | 2 +- .../replication/repository/RemoteClusterRepository.kt | 4 ++-- .../replication/task/CrossClusterReplicationTask.kt | 6 +++--- .../replication/task/autofollow/AutoFollowExecutor.kt | 2 +- .../replication/task/autofollow/AutoFollowTask.kt | 2 +- .../replication/task/index/IndexReplicationExecutor.kt | 2 +- .../replication/task/index/IndexReplicationTask.kt | 4 ++-- .../replication/task/shard/ShardReplicationExecutor.kt | 2 +- .../replication/task/shard/ShardReplicationTask.kt | 2 +- .../opensearch/replication/task/shard/TranslogSequencer.kt | 2 +- .../kotlin/org/opensearch/replication/util/Coroutines.kt | 4 ++-- .../kotlin/org/opensearch/replication/util/Extensions.kt | 4 ++-- .../org/opensearch/replication/util/SecurityContext.kt | 2 +- .../replication/task/index/IndexReplicationTaskTests.kt | 2 +- .../org/opensearch/replication/task/index/NoOpClient.kt | 4 ++-- .../replication/task/shard/TranslogSequencerTests.kt | 6 +++--- 39 files changed, 48 insertions(+), 48 deletions(-) diff --git a/src/main/kotlin/org/opensearch/replication/ReplicationPlugin.kt b/src/main/kotlin/org/opensearch/replication/ReplicationPlugin.kt index 8a17fc51..ecc1f888 100644 --- a/src/main/kotlin/org/opensearch/replication/ReplicationPlugin.kt +++ b/src/main/kotlin/org/opensearch/replication/ReplicationPlugin.kt @@ -73,7 +73,7 @@ import org.opensearch.replication.task.shard.ShardReplicationParams import org.opensearch.replication.task.shard.ShardReplicationState import org.opensearch.replication.util.Injectables import org.opensearch.action.ActionRequest -import org.opensearch.action.ActionResponse +import org.opensearch.core.action.ActionResponse import org.opensearch.client.Client import org.opensearch.cluster.NamedDiff import org.opensearch.cluster.metadata.IndexNameExpressionResolver diff --git a/src/main/kotlin/org/opensearch/replication/action/autofollow/TransportAutoFollowClusterManagerNodeAction.kt b/src/main/kotlin/org/opensearch/replication/action/autofollow/TransportAutoFollowClusterManagerNodeAction.kt index cc93d88a..140a708d 100644 --- a/src/main/kotlin/org/opensearch/replication/action/autofollow/TransportAutoFollowClusterManagerNodeAction.kt +++ b/src/main/kotlin/org/opensearch/replication/action/autofollow/TransportAutoFollowClusterManagerNodeAction.kt @@ -28,7 +28,7 @@ import org.apache.logging.log4j.LogManager import org.opensearch.OpenSearchException import org.opensearch.ResourceAlreadyExistsException import org.opensearch.ResourceNotFoundException -import org.opensearch.action.ActionListener +import org.opensearch.core.action.ActionListener import org.opensearch.action.support.ActionFilters import org.opensearch.action.support.master.AcknowledgedResponse import org.opensearch.action.support.master.TransportMasterNodeAction diff --git a/src/main/kotlin/org/opensearch/replication/action/autofollow/TransportUpdateAutoFollowPatternAction.kt b/src/main/kotlin/org/opensearch/replication/action/autofollow/TransportUpdateAutoFollowPatternAction.kt index 4cb32b1a..7b0f8c67 100644 --- a/src/main/kotlin/org/opensearch/replication/action/autofollow/TransportUpdateAutoFollowPatternAction.kt +++ b/src/main/kotlin/org/opensearch/replication/action/autofollow/TransportUpdateAutoFollowPatternAction.kt @@ -24,7 +24,7 @@ import kotlinx.coroutines.CoroutineScope import kotlinx.coroutines.GlobalScope import kotlinx.coroutines.launch import org.apache.logging.log4j.LogManager -import org.opensearch.action.ActionListener +import org.opensearch.core.action.ActionListener import org.opensearch.action.support.ActionFilters import org.opensearch.action.support.HandledTransportAction import org.opensearch.action.support.master.AcknowledgedResponse diff --git a/src/main/kotlin/org/opensearch/replication/action/changes/GetChangesResponse.kt b/src/main/kotlin/org/opensearch/replication/action/changes/GetChangesResponse.kt index c71b4795..8de61312 100644 --- a/src/main/kotlin/org/opensearch/replication/action/changes/GetChangesResponse.kt +++ b/src/main/kotlin/org/opensearch/replication/action/changes/GetChangesResponse.kt @@ -11,7 +11,7 @@ package org.opensearch.replication.action.changes -import org.opensearch.action.ActionResponse +import org.opensearch.core.action.ActionResponse import org.opensearch.core.common.io.stream.StreamInput import org.opensearch.core.common.io.stream.StreamOutput import org.opensearch.index.translog.Translog diff --git a/src/main/kotlin/org/opensearch/replication/action/changes/TransportGetChangesAction.kt b/src/main/kotlin/org/opensearch/replication/action/changes/TransportGetChangesAction.kt index a3995999..dbee183d 100644 --- a/src/main/kotlin/org/opensearch/replication/action/changes/TransportGetChangesAction.kt +++ b/src/main/kotlin/org/opensearch/replication/action/changes/TransportGetChangesAction.kt @@ -15,7 +15,7 @@ import kotlinx.coroutines.GlobalScope import kotlinx.coroutines.launch import org.apache.logging.log4j.LogManager import org.opensearch.OpenSearchTimeoutException -import org.opensearch.action.ActionListener +import org.opensearch.core.action.ActionListener import org.opensearch.action.support.ActionFilters import org.opensearch.action.support.single.shard.TransportSingleShardAction import org.opensearch.cluster.ClusterState diff --git a/src/main/kotlin/org/opensearch/replication/action/index/TransportReplicateIndexAction.kt b/src/main/kotlin/org/opensearch/replication/action/index/TransportReplicateIndexAction.kt index becb1360..8b2de1ea 100644 --- a/src/main/kotlin/org/opensearch/replication/action/index/TransportReplicateIndexAction.kt +++ b/src/main/kotlin/org/opensearch/replication/action/index/TransportReplicateIndexAction.kt @@ -26,7 +26,7 @@ import kotlinx.coroutines.CoroutineScope import kotlinx.coroutines.GlobalScope import kotlinx.coroutines.launch import org.apache.logging.log4j.LogManager -import org.opensearch.action.ActionListener +import org.opensearch.core.action.ActionListener import org.opensearch.action.admin.indices.settings.get.GetSettingsRequest import org.opensearch.action.support.ActionFilters import org.opensearch.action.support.HandledTransportAction diff --git a/src/main/kotlin/org/opensearch/replication/action/index/TransportReplicateIndexClusterManagerNodeAction.kt b/src/main/kotlin/org/opensearch/replication/action/index/TransportReplicateIndexClusterManagerNodeAction.kt index 36fe81f2..042509f2 100644 --- a/src/main/kotlin/org/opensearch/replication/action/index/TransportReplicateIndexClusterManagerNodeAction.kt +++ b/src/main/kotlin/org/opensearch/replication/action/index/TransportReplicateIndexClusterManagerNodeAction.kt @@ -27,7 +27,7 @@ import kotlinx.coroutines.GlobalScope import kotlinx.coroutines.launch import org.apache.logging.log4j.LogManager import org.opensearch.OpenSearchStatusException -import org.opensearch.action.ActionListener +import org.opensearch.core.action.ActionListener import org.opensearch.action.support.ActionFilters import org.opensearch.action.support.IndicesOptions import org.opensearch.action.support.master.AcknowledgedResponse diff --git a/src/main/kotlin/org/opensearch/replication/action/index/block/TransportUpddateIndexBlockAction.kt b/src/main/kotlin/org/opensearch/replication/action/index/block/TransportUpddateIndexBlockAction.kt index 55e569aa..c2e58bdd 100644 --- a/src/main/kotlin/org/opensearch/replication/action/index/block/TransportUpddateIndexBlockAction.kt +++ b/src/main/kotlin/org/opensearch/replication/action/index/block/TransportUpddateIndexBlockAction.kt @@ -20,7 +20,7 @@ import kotlinx.coroutines.GlobalScope import kotlinx.coroutines.launch import org.apache.logging.log4j.LogManager import org.opensearch.OpenSearchException -import org.opensearch.action.ActionListener +import org.opensearch.core.action.ActionListener import org.opensearch.action.support.ActionFilters import org.opensearch.action.support.master.AcknowledgedResponse import org.opensearch.action.support.master.TransportMasterNodeAction diff --git a/src/main/kotlin/org/opensearch/replication/action/pause/TransportPauseIndexReplicationAction.kt b/src/main/kotlin/org/opensearch/replication/action/pause/TransportPauseIndexReplicationAction.kt index eaf828a1..c4663685 100644 --- a/src/main/kotlin/org/opensearch/replication/action/pause/TransportPauseIndexReplicationAction.kt +++ b/src/main/kotlin/org/opensearch/replication/action/pause/TransportPauseIndexReplicationAction.kt @@ -22,7 +22,7 @@ import kotlinx.coroutines.launch import org.apache.logging.log4j.LogManager import org.opensearch.OpenSearchException import org.opensearch.ResourceAlreadyExistsException -import org.opensearch.action.ActionListener +import org.opensearch.core.action.ActionListener import org.opensearch.action.support.ActionFilters import org.opensearch.action.support.master.AcknowledgedRequest import org.opensearch.action.support.master.AcknowledgedResponse diff --git a/src/main/kotlin/org/opensearch/replication/action/replay/TransportReplayChangesAction.kt b/src/main/kotlin/org/opensearch/replication/action/replay/TransportReplayChangesAction.kt index c874e92f..fbf9e181 100644 --- a/src/main/kotlin/org/opensearch/replication/action/replay/TransportReplayChangesAction.kt +++ b/src/main/kotlin/org/opensearch/replication/action/replay/TransportReplayChangesAction.kt @@ -25,7 +25,7 @@ import kotlinx.coroutines.SupervisorJob import kotlinx.coroutines.asCoroutineDispatcher import kotlinx.coroutines.launch import org.apache.logging.log4j.LogManager -import org.opensearch.action.ActionListener +import org.opensearch.core.action.ActionListener import org.opensearch.action.admin.indices.mapping.get.GetMappingsRequest import org.opensearch.action.admin.indices.mapping.put.PutMappingRequest import org.opensearch.action.index.IndexRequest diff --git a/src/main/kotlin/org/opensearch/replication/action/replicationstatedetails/TransportUpdateReplicationStateDetails.kt b/src/main/kotlin/org/opensearch/replication/action/replicationstatedetails/TransportUpdateReplicationStateDetails.kt index 0b744482..bc629db7 100644 --- a/src/main/kotlin/org/opensearch/replication/action/replicationstatedetails/TransportUpdateReplicationStateDetails.kt +++ b/src/main/kotlin/org/opensearch/replication/action/replicationstatedetails/TransportUpdateReplicationStateDetails.kt @@ -18,7 +18,7 @@ import org.opensearch.replication.util.submitClusterStateUpdateTask import kotlinx.coroutines.CoroutineScope import kotlinx.coroutines.GlobalScope import kotlinx.coroutines.launch -import org.opensearch.action.ActionListener +import org.opensearch.core.action.ActionListener import org.opensearch.action.support.ActionFilters import org.opensearch.action.support.master.AcknowledgedRequest import org.opensearch.action.support.master.AcknowledgedResponse diff --git a/src/main/kotlin/org/opensearch/replication/action/repository/GetFileChunkResponse.kt b/src/main/kotlin/org/opensearch/replication/action/repository/GetFileChunkResponse.kt index 325e20f0..6f432c4b 100644 --- a/src/main/kotlin/org/opensearch/replication/action/repository/GetFileChunkResponse.kt +++ b/src/main/kotlin/org/opensearch/replication/action/repository/GetFileChunkResponse.kt @@ -11,7 +11,7 @@ package org.opensearch.replication.action.repository -import org.opensearch.action.ActionResponse +import org.opensearch.core.action.ActionResponse import org.opensearch.core.common.bytes.BytesReference import org.opensearch.core.common.io.stream.StreamInput import org.opensearch.core.common.io.stream.StreamOutput diff --git a/src/main/kotlin/org/opensearch/replication/action/repository/GetStoreMetadataResponse.kt b/src/main/kotlin/org/opensearch/replication/action/repository/GetStoreMetadataResponse.kt index 9ef97259..ad3d66ab 100644 --- a/src/main/kotlin/org/opensearch/replication/action/repository/GetStoreMetadataResponse.kt +++ b/src/main/kotlin/org/opensearch/replication/action/repository/GetStoreMetadataResponse.kt @@ -11,7 +11,7 @@ package org.opensearch.replication.action.repository -import org.opensearch.action.ActionResponse +import org.opensearch.core.action.ActionResponse import org.opensearch.core.common.io.stream.StreamInput import org.opensearch.core.common.io.stream.StreamOutput import org.opensearch.index.store.Store diff --git a/src/main/kotlin/org/opensearch/replication/action/resume/TransportResumeIndexReplicationAction.kt b/src/main/kotlin/org/opensearch/replication/action/resume/TransportResumeIndexReplicationAction.kt index a1128913..d748d479 100644 --- a/src/main/kotlin/org/opensearch/replication/action/resume/TransportResumeIndexReplicationAction.kt +++ b/src/main/kotlin/org/opensearch/replication/action/resume/TransportResumeIndexReplicationAction.kt @@ -35,7 +35,7 @@ import kotlinx.coroutines.launch import org.apache.logging.log4j.LogManager import org.opensearch.ResourceAlreadyExistsException import org.opensearch.ResourceNotFoundException -import org.opensearch.action.ActionListener +import org.opensearch.core.action.ActionListener import org.opensearch.action.admin.indices.settings.get.GetSettingsRequest import org.opensearch.action.support.ActionFilters import org.opensearch.action.support.IndicesOptions diff --git a/src/main/kotlin/org/opensearch/replication/action/setup/TransportSetupChecksAction.kt b/src/main/kotlin/org/opensearch/replication/action/setup/TransportSetupChecksAction.kt index 0b1169b6..ca495da5 100644 --- a/src/main/kotlin/org/opensearch/replication/action/setup/TransportSetupChecksAction.kt +++ b/src/main/kotlin/org/opensearch/replication/action/setup/TransportSetupChecksAction.kt @@ -16,7 +16,7 @@ import org.opensearch.replication.util.SecurityContext import org.apache.logging.log4j.LogManager import org.opensearch.OpenSearchSecurityException import org.opensearch.ExceptionsHelper -import org.opensearch.action.ActionListener +import org.opensearch.core.action.ActionListener import org.opensearch.action.StepListener import org.opensearch.action.support.ActionFilters import org.opensearch.action.support.HandledTransportAction diff --git a/src/main/kotlin/org/opensearch/replication/action/setup/TransportValidatePermissionsAction.kt b/src/main/kotlin/org/opensearch/replication/action/setup/TransportValidatePermissionsAction.kt index 2b746c1a..a592bc55 100644 --- a/src/main/kotlin/org/opensearch/replication/action/setup/TransportValidatePermissionsAction.kt +++ b/src/main/kotlin/org/opensearch/replication/action/setup/TransportValidatePermissionsAction.kt @@ -13,7 +13,7 @@ package org.opensearch.replication.action.setup import org.opensearch.replication.util.completeWith import org.apache.logging.log4j.LogManager -import org.opensearch.action.ActionListener +import org.opensearch.core.action.ActionListener import org.opensearch.action.support.ActionFilters import org.opensearch.action.support.HandledTransportAction import org.opensearch.action.support.master.AcknowledgedResponse diff --git a/src/main/kotlin/org/opensearch/replication/action/stats/TransportAutoFollowStatsAction.kt b/src/main/kotlin/org/opensearch/replication/action/stats/TransportAutoFollowStatsAction.kt index 8176a478..677039ca 100644 --- a/src/main/kotlin/org/opensearch/replication/action/stats/TransportAutoFollowStatsAction.kt +++ b/src/main/kotlin/org/opensearch/replication/action/stats/TransportAutoFollowStatsAction.kt @@ -14,7 +14,7 @@ package org.opensearch.replication.action.stats import kotlinx.coroutines.CoroutineScope import kotlinx.coroutines.GlobalScope import org.apache.logging.log4j.LogManager -import org.opensearch.action.ActionListener +import org.opensearch.core.action.ActionListener import org.opensearch.action.FailedNodeException import org.opensearch.action.TaskOperationFailure import org.opensearch.action.support.ActionFilters diff --git a/src/main/kotlin/org/opensearch/replication/action/status/TransportReplicationStatusAction.kt b/src/main/kotlin/org/opensearch/replication/action/status/TransportReplicationStatusAction.kt index 317fb621..76cc1153 100644 --- a/src/main/kotlin/org/opensearch/replication/action/status/TransportReplicationStatusAction.kt +++ b/src/main/kotlin/org/opensearch/replication/action/status/TransportReplicationStatusAction.kt @@ -20,7 +20,7 @@ import kotlinx.coroutines.GlobalScope import kotlinx.coroutines.launch import org.apache.logging.log4j.LogManager import org.opensearch.ResourceNotFoundException -import org.opensearch.action.ActionListener +import org.opensearch.core.action.ActionListener import org.opensearch.action.support.ActionFilters import org.opensearch.action.support.HandledTransportAction import org.opensearch.client.Client diff --git a/src/main/kotlin/org/opensearch/replication/action/stop/TransportStopIndexReplicationAction.kt b/src/main/kotlin/org/opensearch/replication/action/stop/TransportStopIndexReplicationAction.kt index f18cbb30..d9af050a 100644 --- a/src/main/kotlin/org/opensearch/replication/action/stop/TransportStopIndexReplicationAction.kt +++ b/src/main/kotlin/org/opensearch/replication/action/stop/TransportStopIndexReplicationAction.kt @@ -33,7 +33,7 @@ import kotlinx.coroutines.GlobalScope import kotlinx.coroutines.launch import org.apache.logging.log4j.LogManager import org.opensearch.OpenSearchException -import org.opensearch.action.ActionListener +import org.opensearch.core.action.ActionListener import org.opensearch.action.admin.indices.open.OpenIndexRequest import org.opensearch.action.support.ActionFilters import org.opensearch.action.support.master.AcknowledgedResponse diff --git a/src/main/kotlin/org/opensearch/replication/action/update/TransportUpdateIndexReplicationAction.kt b/src/main/kotlin/org/opensearch/replication/action/update/TransportUpdateIndexReplicationAction.kt index 333ed9f7..4baf7a1d 100644 --- a/src/main/kotlin/org/opensearch/replication/action/update/TransportUpdateIndexReplicationAction.kt +++ b/src/main/kotlin/org/opensearch/replication/action/update/TransportUpdateIndexReplicationAction.kt @@ -22,7 +22,7 @@ import kotlinx.coroutines.Dispatchers import kotlinx.coroutines.GlobalScope import kotlinx.coroutines.launch import org.apache.logging.log4j.LogManager -import org.opensearch.action.ActionListener +import org.opensearch.core.action.ActionListener import org.opensearch.action.support.ActionFilters import org.opensearch.action.support.master.AcknowledgedResponse import org.opensearch.action.support.master.TransportMasterNodeAction diff --git a/src/main/kotlin/org/opensearch/replication/metadata/TransportUpdateMetadataAction.kt b/src/main/kotlin/org/opensearch/replication/metadata/TransportUpdateMetadataAction.kt index 21b1643f..6f2ecfa7 100644 --- a/src/main/kotlin/org/opensearch/replication/metadata/TransportUpdateMetadataAction.kt +++ b/src/main/kotlin/org/opensearch/replication/metadata/TransportUpdateMetadataAction.kt @@ -13,7 +13,7 @@ package org.opensearch.replication.metadata import org.apache.logging.log4j.LogManager import org.apache.logging.log4j.message.ParameterizedMessage -import org.opensearch.action.ActionListener +import org.opensearch.core.action.ActionListener import org.opensearch.action.IndicesRequest import org.opensearch.action.admin.indices.alias.IndicesAliasesClusterStateUpdateRequest import org.opensearch.action.admin.indices.alias.IndicesAliasesRequest diff --git a/src/main/kotlin/org/opensearch/replication/metadata/UpdateIndexBlockTask.kt b/src/main/kotlin/org/opensearch/replication/metadata/UpdateIndexBlockTask.kt index e67293b6..38f2d85a 100644 --- a/src/main/kotlin/org/opensearch/replication/metadata/UpdateIndexBlockTask.kt +++ b/src/main/kotlin/org/opensearch/replication/metadata/UpdateIndexBlockTask.kt @@ -13,7 +13,7 @@ package org.opensearch.replication.metadata import org.opensearch.replication.action.index.block.IndexBlockUpdateType import org.opensearch.replication.action.index.block.UpdateIndexBlockRequest -import org.opensearch.action.ActionListener +import org.opensearch.core.action.ActionListener import org.opensearch.action.support.master.AcknowledgedResponse import org.opensearch.cluster.AckedClusterStateUpdateTask import org.opensearch.cluster.ClusterState diff --git a/src/main/kotlin/org/opensearch/replication/metadata/UpdateReplicationMetadata.kt b/src/main/kotlin/org/opensearch/replication/metadata/UpdateReplicationMetadata.kt index 54d4663e..5c69a10a 100644 --- a/src/main/kotlin/org/opensearch/replication/metadata/UpdateReplicationMetadata.kt +++ b/src/main/kotlin/org/opensearch/replication/metadata/UpdateReplicationMetadata.kt @@ -14,7 +14,7 @@ package org.opensearch.replication.metadata import org.opensearch.replication.action.replicationstatedetails.UpdateReplicationStateDetailsRequest import org.opensearch.replication.metadata.state.ReplicationStateMetadata import org.apache.logging.log4j.LogManager -import org.opensearch.action.ActionListener +import org.opensearch.core.action.ActionListener import org.opensearch.cluster.AckedClusterStateUpdateTask import org.opensearch.cluster.ClusterState import org.opensearch.cluster.ClusterStateTaskExecutor diff --git a/src/main/kotlin/org/opensearch/replication/repository/RemoteClusterMultiChunkTransfer.kt b/src/main/kotlin/org/opensearch/replication/repository/RemoteClusterMultiChunkTransfer.kt index 344fbcbb..e1460e0b 100644 --- a/src/main/kotlin/org/opensearch/replication/repository/RemoteClusterMultiChunkTransfer.kt +++ b/src/main/kotlin/org/opensearch/replication/repository/RemoteClusterMultiChunkTransfer.kt @@ -23,7 +23,7 @@ import kotlinx.coroutines.launch import kotlinx.coroutines.sync.Mutex import kotlinx.coroutines.sync.withLock import org.apache.logging.log4j.Logger -import org.opensearch.action.ActionListener +import org.opensearch.core.action.ActionListener import org.opensearch.client.Client import org.opensearch.cluster.node.DiscoveryNode import org.opensearch.core.common.unit.ByteSizeValue diff --git a/src/main/kotlin/org/opensearch/replication/repository/RemoteClusterRepository.kt b/src/main/kotlin/org/opensearch/replication/repository/RemoteClusterRepository.kt index 7272c006..4493c9bb 100644 --- a/src/main/kotlin/org/opensearch/replication/repository/RemoteClusterRepository.kt +++ b/src/main/kotlin/org/opensearch/replication/repository/RemoteClusterRepository.kt @@ -30,9 +30,9 @@ import kotlinx.coroutines.Dispatchers import org.apache.logging.log4j.LogManager import org.apache.lucene.index.IndexCommit import org.opensearch.Version -import org.opensearch.action.ActionListener +import org.opensearch.core.action.ActionListener import org.opensearch.action.ActionRequest -import org.opensearch.action.ActionResponse +import org.opensearch.core.action.ActionResponse import org.opensearch.action.ActionType import org.opensearch.action.admin.indices.stats.IndicesStatsAction import org.opensearch.action.admin.indices.stats.IndicesStatsRequest diff --git a/src/main/kotlin/org/opensearch/replication/task/CrossClusterReplicationTask.kt b/src/main/kotlin/org/opensearch/replication/task/CrossClusterReplicationTask.kt index 7f03dd79..9f39da74 100644 --- a/src/main/kotlin/org/opensearch/replication/task/CrossClusterReplicationTask.kt +++ b/src/main/kotlin/org/opensearch/replication/task/CrossClusterReplicationTask.kt @@ -30,8 +30,8 @@ import kotlinx.coroutines.withTimeoutOrNull import kotlinx.coroutines.ObsoleteCoroutinesApi import org.apache.logging.log4j.Logger import org.opensearch.OpenSearchException -import org.opensearch.action.ActionListener -import org.opensearch.action.ActionResponse +import org.opensearch.core.action.ActionListener +import org.opensearch.core.action.ActionResponse import org.opensearch.client.Client import org.opensearch.cluster.service.ClusterService import org.opensearch.core.common.io.stream.StreamOutput @@ -48,7 +48,7 @@ import org.opensearch.persistent.PersistentTaskState import org.opensearch.persistent.PersistentTasksService import org.opensearch.replication.util.stackTraceToString import org.opensearch.core.rest.RestStatus -import org.opensearch.tasks.TaskId +import org.opensearch.core.tasks.TaskId import org.opensearch.tasks.TaskManager import org.opensearch.threadpool.ThreadPool diff --git a/src/main/kotlin/org/opensearch/replication/task/autofollow/AutoFollowExecutor.kt b/src/main/kotlin/org/opensearch/replication/task/autofollow/AutoFollowExecutor.kt index afa8a30e..4faebf45 100644 --- a/src/main/kotlin/org/opensearch/replication/task/autofollow/AutoFollowExecutor.kt +++ b/src/main/kotlin/org/opensearch/replication/task/autofollow/AutoFollowExecutor.kt @@ -19,7 +19,7 @@ import org.opensearch.persistent.AllocatedPersistentTask import org.opensearch.persistent.PersistentTaskState import org.opensearch.persistent.PersistentTasksCustomMetadata.PersistentTask import org.opensearch.persistent.PersistentTasksExecutor -import org.opensearch.tasks.TaskId +import org.opensearch.core.tasks.TaskId import org.opensearch.threadpool.ThreadPool class AutoFollowExecutor(executor: String, private val clusterService: ClusterService, diff --git a/src/main/kotlin/org/opensearch/replication/task/autofollow/AutoFollowTask.kt b/src/main/kotlin/org/opensearch/replication/task/autofollow/AutoFollowTask.kt index d47843d6..ca376e59 100644 --- a/src/main/kotlin/org/opensearch/replication/task/autofollow/AutoFollowTask.kt +++ b/src/main/kotlin/org/opensearch/replication/task/autofollow/AutoFollowTask.kt @@ -41,7 +41,7 @@ import org.opensearch.replication.action.status.ShardInfoRequest import org.opensearch.replication.action.status.ShardInfoResponse import org.opensearch.core.rest.RestStatus import org.opensearch.tasks.Task -import org.opensearch.tasks.TaskId +import org.opensearch.core.tasks.TaskId import org.opensearch.threadpool.Scheduler import org.opensearch.threadpool.ThreadPool import java.util.concurrent.ConcurrentSkipListSet diff --git a/src/main/kotlin/org/opensearch/replication/task/index/IndexReplicationExecutor.kt b/src/main/kotlin/org/opensearch/replication/task/index/IndexReplicationExecutor.kt index 8c2e88ca..72b1484f 100644 --- a/src/main/kotlin/org/opensearch/replication/task/index/IndexReplicationExecutor.kt +++ b/src/main/kotlin/org/opensearch/replication/task/index/IndexReplicationExecutor.kt @@ -27,7 +27,7 @@ import org.opensearch.persistent.AllocatedPersistentTask import org.opensearch.persistent.PersistentTaskState import org.opensearch.persistent.PersistentTasksCustomMetadata.PersistentTask import org.opensearch.persistent.PersistentTasksExecutor -import org.opensearch.tasks.TaskId +import org.opensearch.core.tasks.TaskId import org.opensearch.threadpool.ThreadPool class IndexReplicationExecutor(executor: String, private val clusterService: ClusterService, diff --git a/src/main/kotlin/org/opensearch/replication/task/index/IndexReplicationTask.kt b/src/main/kotlin/org/opensearch/replication/task/index/IndexReplicationTask.kt index ce3cb43a..d0c812a7 100644 --- a/src/main/kotlin/org/opensearch/replication/task/index/IndexReplicationTask.kt +++ b/src/main/kotlin/org/opensearch/replication/task/index/IndexReplicationTask.kt @@ -48,7 +48,7 @@ import kotlinx.coroutines.launch import kotlinx.coroutines.withContext import org.opensearch.OpenSearchException import org.opensearch.OpenSearchTimeoutException -import org.opensearch.action.ActionListener +import org.opensearch.core.action.ActionListener import org.opensearch.action.admin.indices.alias.IndicesAliasesRequest import org.opensearch.action.admin.indices.alias.IndicesAliasesRequest.AliasActions import org.opensearch.action.admin.indices.alias.get.GetAliasesRequest @@ -95,7 +95,7 @@ import org.opensearch.replication.ReplicationException import org.opensearch.replication.MappingNotAvailableException import org.opensearch.replication.ReplicationPlugin.Companion.REPLICATION_INDEX_TRANSLOG_PRUNING_ENABLED_SETTING import org.opensearch.core.rest.RestStatus -import org.opensearch.tasks.TaskId +import org.opensearch.core.tasks.TaskId import org.opensearch.tasks.TaskManager import org.opensearch.threadpool.ThreadPool import java.util.Collections diff --git a/src/main/kotlin/org/opensearch/replication/task/shard/ShardReplicationExecutor.kt b/src/main/kotlin/org/opensearch/replication/task/shard/ShardReplicationExecutor.kt index 11be6056..526935ef 100644 --- a/src/main/kotlin/org/opensearch/replication/task/shard/ShardReplicationExecutor.kt +++ b/src/main/kotlin/org/opensearch/replication/task/shard/ShardReplicationExecutor.kt @@ -26,7 +26,7 @@ import org.opensearch.persistent.PersistentTaskState import org.opensearch.persistent.PersistentTasksCustomMetadata.Assignment import org.opensearch.persistent.PersistentTasksCustomMetadata.PersistentTask import org.opensearch.persistent.PersistentTasksExecutor -import org.opensearch.tasks.TaskId +import org.opensearch.core.tasks.TaskId import org.opensearch.threadpool.ThreadPool class ShardReplicationExecutor(executor: String, private val clusterService : ClusterService, diff --git a/src/main/kotlin/org/opensearch/replication/task/shard/ShardReplicationTask.kt b/src/main/kotlin/org/opensearch/replication/task/shard/ShardReplicationTask.kt index e165c916..c41ee084 100644 --- a/src/main/kotlin/org/opensearch/replication/task/shard/ShardReplicationTask.kt +++ b/src/main/kotlin/org/opensearch/replication/task/shard/ShardReplicationTask.kt @@ -51,7 +51,7 @@ import org.opensearch.core.index.shard.ShardId import org.opensearch.persistent.PersistentTaskState import org.opensearch.persistent.PersistentTasksNodeService import org.opensearch.core.rest.RestStatus -import org.opensearch.tasks.TaskId +import org.opensearch.core.tasks.TaskId import org.opensearch.threadpool.ThreadPool import org.opensearch.transport.NodeNotConnectedException import java.time.Duration diff --git a/src/main/kotlin/org/opensearch/replication/task/shard/TranslogSequencer.kt b/src/main/kotlin/org/opensearch/replication/task/shard/TranslogSequencer.kt index bbb6837a..fcbfc33c 100644 --- a/src/main/kotlin/org/opensearch/replication/task/shard/TranslogSequencer.kt +++ b/src/main/kotlin/org/opensearch/replication/task/shard/TranslogSequencer.kt @@ -32,7 +32,7 @@ import org.opensearch.index.IndexNotFoundException import org.opensearch.core.index.shard.ShardId import org.opensearch.index.translog.Translog import org.opensearch.replication.util.indicesService -import org.opensearch.tasks.TaskId +import org.opensearch.core.tasks.TaskId import java.util.ArrayList import java.util.concurrent.ConcurrentHashMap import java.util.concurrent.TimeUnit diff --git a/src/main/kotlin/org/opensearch/replication/util/Coroutines.kt b/src/main/kotlin/org/opensearch/replication/util/Coroutines.kt index 47e7b723..b561c795 100644 --- a/src/main/kotlin/org/opensearch/replication/util/Coroutines.kt +++ b/src/main/kotlin/org/opensearch/replication/util/Coroutines.kt @@ -15,9 +15,9 @@ import org.opensearch.replication.metadata.store.ReplicationMetadata import kotlinx.coroutines.* import org.opensearch.OpenSearchTimeoutException import org.opensearch.ExceptionsHelper -import org.opensearch.action.ActionListener +import org.opensearch.core.action.ActionListener import org.opensearch.action.ActionRequest -import org.opensearch.action.ActionResponse +import org.opensearch.core.action.ActionResponse import org.opensearch.action.ActionType import org.opensearch.action.support.master.AcknowledgedRequest import org.opensearch.action.support.master.MasterNodeRequest diff --git a/src/main/kotlin/org/opensearch/replication/util/Extensions.kt b/src/main/kotlin/org/opensearch/replication/util/Extensions.kt index 1928ad6b..7a73fa5f 100644 --- a/src/main/kotlin/org/opensearch/replication/util/Extensions.kt +++ b/src/main/kotlin/org/opensearch/replication/util/Extensions.kt @@ -20,9 +20,9 @@ import org.apache.logging.log4j.Logger import org.opensearch.OpenSearchException import org.opensearch.OpenSearchSecurityException import org.opensearch.ResourceNotFoundException -import org.opensearch.action.ActionListener +import org.opensearch.core.action.ActionListener import org.opensearch.action.ActionRequest -import org.opensearch.action.ActionResponse +import org.opensearch.core.action.ActionResponse import org.opensearch.action.ActionType import org.opensearch.action.index.IndexRequestBuilder import org.opensearch.action.index.IndexResponse diff --git a/src/main/kotlin/org/opensearch/replication/util/SecurityContext.kt b/src/main/kotlin/org/opensearch/replication/util/SecurityContext.kt index 2490dd0c..f811324a 100644 --- a/src/main/kotlin/org/opensearch/replication/util/SecurityContext.kt +++ b/src/main/kotlin/org/opensearch/replication/util/SecurityContext.kt @@ -29,7 +29,7 @@ import org.opensearch.commons.ConfigConstants import org.opensearch.commons.authuser.User import org.apache.logging.log4j.LogManager import org.opensearch.action.ActionRequest -import org.opensearch.action.ActionResponse +import org.opensearch.core.action.ActionResponse import org.opensearch.action.ActionType import org.opensearch.common.util.concurrent.ThreadContext import org.opensearch.transport.RemoteClusterAwareRequest diff --git a/src/test/kotlin/org/opensearch/replication/task/index/IndexReplicationTaskTests.kt b/src/test/kotlin/org/opensearch/replication/task/index/IndexReplicationTaskTests.kt index 00bde557..b1e44f39 100644 --- a/src/test/kotlin/org/opensearch/replication/task/index/IndexReplicationTaskTests.kt +++ b/src/test/kotlin/org/opensearch/replication/task/index/IndexReplicationTaskTests.kt @@ -52,7 +52,7 @@ import org.opensearch.replication.task.shard.ShardReplicationExecutor import org.opensearch.replication.task.shard.ShardReplicationParams import org.opensearch.snapshots.Snapshot import org.opensearch.snapshots.SnapshotId -import org.opensearch.tasks.TaskId.EMPTY_TASK_ID +import org.opensearch.core.tasks.TaskId.EMPTY_TASK_ID import org.opensearch.tasks.TaskManager import org.opensearch.test.ClusterServiceUtils import org.opensearch.test.ClusterServiceUtils.setState diff --git a/src/test/kotlin/org/opensearch/replication/task/index/NoOpClient.kt b/src/test/kotlin/org/opensearch/replication/task/index/NoOpClient.kt index 606d7a18..6f1904fd 100644 --- a/src/test/kotlin/org/opensearch/replication/task/index/NoOpClient.kt +++ b/src/test/kotlin/org/opensearch/replication/task/index/NoOpClient.kt @@ -12,9 +12,9 @@ package org.opensearch.replication.task.index import com.nhaarman.mockitokotlin2.doReturn import org.mockito.Mockito -import org.opensearch.action.ActionListener +import org.opensearch.core.action.ActionListener import org.opensearch.action.ActionRequest -import org.opensearch.action.ActionResponse +import org.opensearch.core.action.ActionResponse import org.opensearch.action.ActionType import org.opensearch.action.admin.cluster.health.ClusterHealthAction import org.opensearch.action.admin.cluster.health.ClusterHealthResponse diff --git a/src/test/kotlin/org/opensearch/replication/task/shard/TranslogSequencerTests.kt b/src/test/kotlin/org/opensearch/replication/task/shard/TranslogSequencerTests.kt index 730289ed..e33e0f87 100644 --- a/src/test/kotlin/org/opensearch/replication/task/shard/TranslogSequencerTests.kt +++ b/src/test/kotlin/org/opensearch/replication/task/shard/TranslogSequencerTests.kt @@ -16,9 +16,9 @@ import kotlinx.coroutines.ObsoleteCoroutinesApi import kotlinx.coroutines.test.runBlockingTest import org.assertj.core.api.Assertions.assertThat import org.mockito.Mockito -import org.opensearch.action.ActionListener +import org.opensearch.core.action.ActionListener import org.opensearch.action.ActionRequest -import org.opensearch.action.ActionResponse +import org.opensearch.core.action.ActionResponse import org.opensearch.action.ActionType import org.opensearch.action.support.replication.ReplicationResponse.ShardInfo import org.opensearch.common.settings.Settings @@ -36,7 +36,7 @@ import org.opensearch.replication.metadata.store.ReplicationContext import org.opensearch.replication.metadata.store.ReplicationMetadata import org.opensearch.replication.metadata.store.ReplicationStoreMetadataType import org.opensearch.replication.util.indicesService -import org.opensearch.tasks.TaskId.EMPTY_TASK_ID +import org.opensearch.core.tasks.TaskId.EMPTY_TASK_ID import org.opensearch.test.OpenSearchTestCase import org.opensearch.test.client.NoOpClient import java.util.Locale From 63db8263324d38faf209b99e7048c1e6c21acd0a Mon Sep 17 00:00:00 2001 From: Monu Singh Date: Fri, 18 Aug 2023 11:55:44 +0530 Subject: [PATCH 65/84] Fix test for single node and consume numNodes (#1091) (#1105) Signed-off-by: monusingh-1 --- build.gradle | 3 +++ .../replication/MultiClusterRestTestCase.kt | 18 ++++++++++---- .../replication/ReplicationHelpers.kt | 3 +++ .../integ/rest/ClusterRerouteFollowerIT.kt | 3 ++- .../integ/rest/ClusterRerouteLeaderIT.kt | 3 ++- .../integ/rest/ResumeReplicationIT.kt | 15 ++++-------- .../integ/rest/StartReplicationIT.kt | 24 +++++++++---------- .../integ/rest/StopReplicationIT.kt | 6 ++--- 8 files changed, 42 insertions(+), 33 deletions(-) diff --git a/build.gradle b/build.gradle index ead3cae9..70f582ee 100644 --- a/build.gradle +++ b/build.gradle @@ -930,6 +930,9 @@ task integTestRemote (type: RestIntegTestTask) { systemProperty "tests.cluster.leaderCluster.security_enabled", System.getProperty("security_enabled") nonInputProperties.systemProperty('tests.integTestRemote', "true") + var numberOfNodes = findProperty('numNodes') as Integer + systemProperty "tests.cluster.followCluster.total_nodes", "${-> numberOfNodes.toString()}" + systemProperty "tests.cluster.leaderCluster.total_nodes", "${-> numberOfNodes.toString()}" systemProperty "build.dir", "${buildDir}" } diff --git a/src/test/kotlin/org/opensearch/replication/MultiClusterRestTestCase.kt b/src/test/kotlin/org/opensearch/replication/MultiClusterRestTestCase.kt index 523ff913..af7e997e 100644 --- a/src/test/kotlin/org/opensearch/replication/MultiClusterRestTestCase.kt +++ b/src/test/kotlin/org/opensearch/replication/MultiClusterRestTestCase.kt @@ -117,7 +117,6 @@ abstract class MultiClusterRestTestCase : OpenSearchTestCase() { lateinit var testClusters : Map var isSecurityPropertyEnabled = false var forceInitSecurityConfiguration = false - var isMultiNodeClusterConfiguration = true internal fun createTestCluster(configuration: ClusterConfiguration) : TestCluster { return createTestCluster(configuration.clusterName, configuration.preserveSnapshots, configuration.preserveIndices, @@ -130,7 +129,6 @@ abstract class MultiClusterRestTestCase : OpenSearchTestCase() { val httpHostsProp = systemProperties.get("tests.cluster.${cluster}.http_hosts") as String? val transportHostsProp = systemProperties.get("tests.cluster.${cluster}.transport_hosts") as String? val securityEnabled = systemProperties.get("tests.cluster.${cluster}.security_enabled") as String? - val totalNodes = systemProperties.get("tests.cluster.${cluster}.total_nodes") as String? requireNotNull(httpHostsProp) { "Missing http hosts property for cluster: $cluster."} requireNotNull(transportHostsProp) { "Missing transport hosts property for cluster: $cluster."} @@ -142,9 +140,6 @@ abstract class MultiClusterRestTestCase : OpenSearchTestCase() { isSecurityPropertyEnabled = true } - if(totalNodes != null && totalNodes < "2") { - isMultiNodeClusterConfiguration = false - } forceInitSecurityConfiguration = isSecurityPropertyEnabled && initSecurityConfiguration @@ -663,6 +658,19 @@ abstract class MultiClusterRestTestCase : OpenSearchTestCase() { return integTestRemote.equals("true") } + protected fun isMultiNodeClusterConfiguration(leaderCluster: String, followerCluster: String): Boolean{ + val systemProperties = BootstrapInfo.getSystemProperties() + val totalLeaderNodes = systemProperties.get("tests.cluster.${leaderCluster}.total_nodes") as String + val totalFollowerNodes = systemProperties.get("tests.cluster.${followerCluster}.total_nodes") as String + + assertNotNull(totalLeaderNodes) + assertNotNull(totalFollowerNodes) + if(totalLeaderNodes < "2" || totalFollowerNodes < "2" ) { + return false + } + return true + } + protected fun docCount(cluster: RestHighLevelClient, indexName: String) : Int { val persistentConnectionRequest = Request("GET", "/$indexName/_search?pretty&q=*") diff --git a/src/test/kotlin/org/opensearch/replication/ReplicationHelpers.kt b/src/test/kotlin/org/opensearch/replication/ReplicationHelpers.kt index 4a0c6a3a..6640f911 100644 --- a/src/test/kotlin/org/opensearch/replication/ReplicationHelpers.kt +++ b/src/test/kotlin/org/opensearch/replication/ReplicationHelpers.kt @@ -55,6 +55,9 @@ const val INDEX_TASK_CANCELLATION_REASON = "AutoPaused: Index replication task w const val STATUS_REASON_USER_INITIATED = "User initiated" const val STATUS_REASON_SHARD_TASK_CANCELLED = "Shard task killed or cancelled." const val STATUS_REASON_INDEX_NOT_FOUND = "no such index" +const val ANALYZERS_NOT_ACCESSIBLE_FOR_REMOTE_CLUSTERS = "Analysers are not accessible when run on remote clusters." +const val SNAPSHOTS_NOT_ACCESSIBLE_FOR_REMOTE_CLUSTERS = "Snapshots are not accessible when run on remote clusters." +const val REROUTE_TESTS_NOT_ELIGIBLE_FOR_SINGLE_NODE_CLUSTER = "Reroute not eligible for single node clusters" fun RestHighLevelClient.startReplication(request: StartReplicationRequest, diff --git a/src/test/kotlin/org/opensearch/replication/integ/rest/ClusterRerouteFollowerIT.kt b/src/test/kotlin/org/opensearch/replication/integ/rest/ClusterRerouteFollowerIT.kt index 3f0a8627..3f6b16ab 100644 --- a/src/test/kotlin/org/opensearch/replication/integ/rest/ClusterRerouteFollowerIT.kt +++ b/src/test/kotlin/org/opensearch/replication/integ/rest/ClusterRerouteFollowerIT.kt @@ -7,6 +7,7 @@ import org.opensearch.replication.startReplication import org.opensearch.replication.stopReplication import org.apache.http.entity.ContentType import org.apache.http.nio.entity.NStringEntity +import org.opensearch.replication.REROUTE_TESTS_NOT_ELIGIBLE_FOR_SINGLE_NODE_CLUSTER import org.assertj.core.api.Assertions import org.opensearch.client.Request import org.opensearch.client.RequestOptions @@ -30,7 +31,7 @@ class ClusterRerouteFollowerIT : MultiClusterRestTestCase() { @Before fun beforeTest() { - Assume.assumeTrue(isMultiNodeClusterConfiguration) + Assume.assumeTrue(REROUTE_TESTS_NOT_ELIGIBLE_FOR_SINGLE_NODE_CLUSTER, isMultiNodeClusterConfiguration(LEADER, FOLLOWER)) } fun `test replication works after rerouting a shard from one node to another in follower cluster`() { diff --git a/src/test/kotlin/org/opensearch/replication/integ/rest/ClusterRerouteLeaderIT.kt b/src/test/kotlin/org/opensearch/replication/integ/rest/ClusterRerouteLeaderIT.kt index 6c50f782..ff5ad172 100644 --- a/src/test/kotlin/org/opensearch/replication/integ/rest/ClusterRerouteLeaderIT.kt +++ b/src/test/kotlin/org/opensearch/replication/integ/rest/ClusterRerouteLeaderIT.kt @@ -17,6 +17,7 @@ import org.junit.Assert import org.junit.Assume import org.junit.Before import org.junit.Ignore +import org.opensearch.replication.REROUTE_TESTS_NOT_ELIGIBLE_FOR_SINGLE_NODE_CLUSTER import java.util.concurrent.TimeUnit @MultiClusterAnnotations.ClusterConfigurations( @@ -30,7 +31,7 @@ class ClusterRerouteLeaderIT : MultiClusterRestTestCase() { @Before fun beforeTest() { - Assume.assumeTrue(isMultiNodeClusterConfiguration) + Assume.assumeTrue(REROUTE_TESTS_NOT_ELIGIBLE_FOR_SINGLE_NODE_CLUSTER, isMultiNodeClusterConfiguration(LEADER, FOLLOWER),) } fun `test replication works after rerouting a shard from one node to another in leader cluster`() { diff --git a/src/test/kotlin/org/opensearch/replication/integ/rest/ResumeReplicationIT.kt b/src/test/kotlin/org/opensearch/replication/integ/rest/ResumeReplicationIT.kt index 2a3b3bae..deca986c 100644 --- a/src/test/kotlin/org/opensearch/replication/integ/rest/ResumeReplicationIT.kt +++ b/src/test/kotlin/org/opensearch/replication/integ/rest/ResumeReplicationIT.kt @@ -40,9 +40,10 @@ import org.opensearch.client.indices.GetMappingsRequest import org.opensearch.common.io.PathUtils import org.opensearch.common.settings.Settings import org.junit.Assert +import org.junit.Assume import java.nio.file.Files import java.util.concurrent.TimeUnit -import org.opensearch.bootstrap.BootstrapInfo +import org.opensearch.replication.ANALYZERS_NOT_ACCESSIBLE_FOR_REMOTE_CLUSTERS @MultiClusterAnnotations.ClusterConfigurations( MultiClusterAnnotations.ClusterConfiguration(clusterName = LEADER), @@ -165,9 +166,7 @@ class ResumeReplicationIT: MultiClusterRestTestCase() { fun `test that replication fails to resume when custom analyser is not present in follower`() { - if(checkifIntegTestRemote()){ - return; - } + Assume.assumeFalse(ANALYZERS_NOT_ACCESSIBLE_FOR_REMOTE_CLUSTERS, checkifIntegTestRemote()) val synonyms = javaClass.getResourceAsStream("/analyzers/synonyms.txt") val config = PathUtils.get(buildDir, leaderClusterPath, "config") @@ -202,9 +201,7 @@ class ResumeReplicationIT: MultiClusterRestTestCase() { fun `test that replication resumes when custom analyser is present in follower`() { - if(checkifIntegTestRemote()){ - return; - } + Assume.assumeFalse(ANALYZERS_NOT_ACCESSIBLE_FOR_REMOTE_CLUSTERS, checkifIntegTestRemote()) val synonyms = javaClass.getResourceAsStream("/analyzers/synonyms.txt") val config = PathUtils.get(buildDir, leaderClusterPath, "config") @@ -246,9 +243,7 @@ class ResumeReplicationIT: MultiClusterRestTestCase() { fun `test that replication resumes when custom analyser is overridden and present in follower`() { - if(checkifIntegTestRemote()){ - return; - } + Assume.assumeFalse(ANALYZERS_NOT_ACCESSIBLE_FOR_REMOTE_CLUSTERS, checkifIntegTestRemote()) val synonyms = javaClass.getResourceAsStream("/analyzers/synonyms.txt") val config = PathUtils.get(buildDir, leaderClusterPath, "config") diff --git a/src/test/kotlin/org/opensearch/replication/integ/rest/StartReplicationIT.kt b/src/test/kotlin/org/opensearch/replication/integ/rest/StartReplicationIT.kt index da783f6b..5d35c62e 100644 --- a/src/test/kotlin/org/opensearch/replication/integ/rest/StartReplicationIT.kt +++ b/src/test/kotlin/org/opensearch/replication/integ/rest/StartReplicationIT.kt @@ -25,6 +25,8 @@ import org.opensearch.replication.resumeReplication import org.opensearch.replication.`validate paused status response due to leader index deleted` import org.opensearch.replication.`validate status syncing response` import org.opensearch.replication.startReplication +import org.opensearch.replication.ANALYZERS_NOT_ACCESSIBLE_FOR_REMOTE_CLUSTERS +import org.opensearch.replication.SNAPSHOTS_NOT_ACCESSIBLE_FOR_REMOTE_CLUSTERS import org.opensearch.replication.stopReplication import org.opensearch.replication.updateReplication import org.apache.http.HttpStatus @@ -66,7 +68,7 @@ import org.opensearch.index.mapper.MapperService import org.opensearch.repositories.fs.FsRepository import org.opensearch.test.OpenSearchTestCase.assertBusy import org.junit.Assert -import org.opensearch.cluster.metadata.AliasMetadata +import org.junit.Assume import org.opensearch.core.xcontent.DeprecationHandler import org.opensearch.core.xcontent.NamedXContentRegistry import org.opensearch.replication.ReplicationPlugin.Companion.REPLICATION_INDEX_TRANSLOG_PRUNING_ENABLED_SETTING @@ -585,9 +587,7 @@ class StartReplicationIT: MultiClusterRestTestCase() { fun `test that replication fails to start when custom analyser is not present in follower`() { - if(checkifIntegTestRemote()){ - return; - } + Assume.assumeFalse(ANALYZERS_NOT_ACCESSIBLE_FOR_REMOTE_CLUSTERS, checkifIntegTestRemote()) val synonyms = javaClass.getResourceAsStream("/analyzers/synonyms.txt") val config = PathUtils.get(buildDir, leaderClusterPath, "config") @@ -620,9 +620,7 @@ class StartReplicationIT: MultiClusterRestTestCase() { fun `test that replication starts successfully when custom analyser is present in follower`() { - if(checkifIntegTestRemote()){ - return; - } + Assume.assumeFalse(ANALYZERS_NOT_ACCESSIBLE_FOR_REMOTE_CLUSTERS, checkifIntegTestRemote()) val synonyms = javaClass.getResourceAsStream("/analyzers/synonyms.txt") val leaderConfig = PathUtils.get(buildDir, leaderClusterPath, "config") @@ -662,9 +660,7 @@ class StartReplicationIT: MultiClusterRestTestCase() { fun `test that replication starts successfully when custom analyser is overridden and present in follower`() { - if(checkifIntegTestRemote()){ - return; - } + Assume.assumeFalse(ANALYZERS_NOT_ACCESSIBLE_FOR_REMOTE_CLUSTERS, checkifIntegTestRemote()) val synonyms = javaClass.getResourceAsStream("/analyzers/synonyms.txt") val leaderConfig = PathUtils.get(buildDir, leaderClusterPath, "config") @@ -801,9 +797,7 @@ class StartReplicationIT: MultiClusterRestTestCase() { fun `test that snapshot on leader does not affect replication during bootstrap`() { - if(checkifIntegTestRemote()){ - return; - } + Assume.assumeFalse(SNAPSHOTS_NOT_ACCESSIBLE_FOR_REMOTE_CLUSTERS,checkifIntegTestRemote()) val settings = Settings.builder() .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 20) @@ -1221,6 +1215,10 @@ class StartReplicationIT: MultiClusterRestTestCase() { } fun `test that wait_for_active_shards setting is updated on follower through start replication api`() { + + Assume.assumeTrue("Ignore this test if clusters dont have multiple nodes as this test reles on wait_for_active_shards", + isMultiNodeClusterConfiguration(LEADER, FOLLOWER)) + val followerClient = getClientForCluster(FOLLOWER) val leaderClient = getClientForCluster(LEADER) diff --git a/src/test/kotlin/org/opensearch/replication/integ/rest/StopReplicationIT.kt b/src/test/kotlin/org/opensearch/replication/integ/rest/StopReplicationIT.kt index 22e780ec..54caa11f 100644 --- a/src/test/kotlin/org/opensearch/replication/integ/rest/StopReplicationIT.kt +++ b/src/test/kotlin/org/opensearch/replication/integ/rest/StopReplicationIT.kt @@ -24,6 +24,7 @@ import org.apache.http.util.EntityUtils import org.assertj.core.api.Assertions.assertThat import org.assertj.core.api.Assertions.assertThatThrownBy import org.junit.Assert +import org.junit.Assume import org.opensearch.OpenSearchStatusException import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotRequest @@ -40,6 +41,7 @@ import org.opensearch.cluster.metadata.IndexMetadata import org.opensearch.common.settings.Settings import org.opensearch.common.unit.TimeValue import org.opensearch.index.mapper.MapperService +import org.opensearch.replication.SNAPSHOTS_NOT_ACCESSIBLE_FOR_REMOTE_CLUSTERS import java.util.Random import java.util.concurrent.TimeUnit @@ -243,9 +245,7 @@ class StopReplicationIT: MultiClusterRestTestCase() { fun `test stop replication with stale replication settings at leader cluster`() { - if(checkifIntegTestRemote()){ - return; - } + Assume.assumeFalse(SNAPSHOTS_NOT_ACCESSIBLE_FOR_REMOTE_CLUSTERS, checkifIntegTestRemote()) val followerClient = getClientForCluster(FOLLOWER) val leaderClient = getClientForCluster(LEADER) From 22f41f9422284c64a64b7fac5626098b984ab341 Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Tue, 22 Aug 2023 20:28:52 +0530 Subject: [PATCH 66/84] Revert "Add setting to use document replication for system indices. (#802)" (#1107) (#1108) This reverts commit 55b6968af90d739d8448c5d88ed5204a240d0f86. Signed-off-by: monusingh-1 (cherry picked from commit 9750b7800a208b9f2bea2646813dc59857cfed73) Co-authored-by: Monu Singh --- .../replication/metadata/store/ReplicationMetadataStore.kt | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/main/kotlin/org/opensearch/replication/metadata/store/ReplicationMetadataStore.kt b/src/main/kotlin/org/opensearch/replication/metadata/store/ReplicationMetadataStore.kt index d5cbe751..d838a21c 100644 --- a/src/main/kotlin/org/opensearch/replication/metadata/store/ReplicationMetadataStore.kt +++ b/src/main/kotlin/org/opensearch/replication/metadata/store/ReplicationMetadataStore.kt @@ -40,7 +40,6 @@ import org.opensearch.common.xcontent.LoggingDeprecationHandler import org.opensearch.core.xcontent.NamedXContentRegistry import org.opensearch.core.xcontent.ToXContent import org.opensearch.core.xcontent.XContentParser -import org.opensearch.indices.replication.common.ReplicationType import org.opensearch.replication.util.suspendExecuteWithRetries class ReplicationMetadataStore constructor(val client: Client, val clusterService: ClusterService, @@ -266,7 +265,6 @@ class ReplicationMetadataStore constructor(val client: Client, val clusterServic .put(IndexMetadata.INDEX_AUTO_EXPAND_REPLICAS_SETTING.key, "0-1") .put(IndexMetadata.INDEX_PRIORITY_SETTING.key, Int.MAX_VALUE) .put(IndexMetadata.INDEX_HIDDEN_SETTING.key, true) - .put(IndexMetadata.INDEX_REPLICATION_TYPE_SETTING.key, ReplicationType.DOCUMENT) // System Indices should use Document Replication strategy .build() } From 022ca320b2e5f728f3458e5439d3927525da17b8 Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Fri, 1 Sep 2023 18:32:36 +0530 Subject: [PATCH 67/84] Implement abstract methods of Repository (#1111) (#1113) * Implement methods Signed-off-by: monusingh-1 * Implement methods Signed-off-by: monusingh-1 --------- Signed-off-by: monusingh-1 (cherry picked from commit b6d1b56654a83220a33db162b592a9835ec132ec) Co-authored-by: Monu Singh --- .../replication/repository/RemoteClusterRepository.kt | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/main/kotlin/org/opensearch/replication/repository/RemoteClusterRepository.kt b/src/main/kotlin/org/opensearch/replication/repository/RemoteClusterRepository.kt index 4493c9bb..bb7aefdb 100644 --- a/src/main/kotlin/org/opensearch/replication/repository/RemoteClusterRepository.kt +++ b/src/main/kotlin/org/opensearch/replication/repository/RemoteClusterRepository.kt @@ -107,6 +107,14 @@ class RemoteClusterRepository(private val repositoryMetadata: RepositoryMetadata return restoreRateLimitingTimeInNanos.count() } + override fun getRemoteUploadThrottleTimeInNanos(): Long { + throw UnsupportedOperationException("Operation not permitted") + } + + override fun getRemoteDownloadThrottleTimeInNanos(): Long { + throw UnsupportedOperationException("Operation not permitted") + } + override fun finalizeSnapshot(shardGenerations: ShardGenerations?, repositoryStateId: Long, clusterMetadata: Metadata?, snapshotInfo: SnapshotInfo?, repositoryMetaVersion: Version?, stateTransformer: Function?, From fefbcf8ce1cd12369ab9c9b0bf5da84a60c23e8f Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Mon, 4 Sep 2023 15:43:57 +0530 Subject: [PATCH 68/84] Removed timeout excessive logging in case of index is idle in replication (#1114) (#1115) * Removed timeout excessive logging in case of index is idle in replication Signed-off-by: Mohit Kumar * Removed timeout excessive logging in case of index is idle in replication Signed-off-by: Mohit Kumar * Removed timeout excessive stack trace logging in case of index is idle in replication Signed-off-by: Mohit Kumar * Removed timeout excessive stack trace logging in case of index is idle in replication Signed-off-by: Mohit Kumar * Changed the log statement Signed-off-by: Mohit Kumar --------- Signed-off-by: Mohit Kumar (cherry picked from commit 426a2deb85bc3c58a3439e829e6902e68e6f658d) Co-authored-by: Mohit Kumar <113413713+mohitamg@users.noreply.github.com> --- src/main/kotlin/org/opensearch/replication/util/Extensions.kt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/main/kotlin/org/opensearch/replication/util/Extensions.kt b/src/main/kotlin/org/opensearch/replication/util/Extensions.kt index 7a73fa5f..81aaa446 100644 --- a/src/main/kotlin/org/opensearch/replication/util/Extensions.kt +++ b/src/main/kotlin/org/opensearch/replication/util/Extensions.kt @@ -138,8 +138,8 @@ suspend fun Client.suspendExecuteWith } } log.warn( - "Encountered a failure while executing in $req. Retrying in ${currentBackoff / 1000} seconds" + - ".", retryException + "Encountered a failure(can be ignored) while getting changes: OpenSearchTimeoutException. Retrying in ${currentBackoff / 1000} seconds" + + "." ) delay(currentBackoff) currentBackoff = (currentBackoff * factor).toLong().coerceAtMost(maxTimeOut) From 4bfc65732e4930acf2f46aeed68b40da91237819 Mon Sep 17 00:00:00 2001 From: Mohit Kumar <113413713+mohitamg@users.noreply.github.com> Date: Wed, 6 Sep 2023 14:03:26 +0530 Subject: [PATCH 69/84] Create opensearch-cross-cluster-replication.release-notes-2.10.0.0.md (#1117) Signed-off-by: Mohit Kumar <113413713+mohitamg@users.noreply.github.com> (cherry picked from commit 4273a1953714debfbfbe395007e3d7c4d3f00888) --- ...cross-cluster-replication.release-notes-2.10.0.0.md | 10 ++++++++++ 1 file changed, 10 insertions(+) create mode 100644 release-notes/opensearch-cross-cluster-replication.release-notes-2.10.0.0.md diff --git a/release-notes/opensearch-cross-cluster-replication.release-notes-2.10.0.0.md b/release-notes/opensearch-cross-cluster-replication.release-notes-2.10.0.0.md new file mode 100644 index 00000000..be0d8015 --- /dev/null +++ b/release-notes/opensearch-cross-cluster-replication.release-notes-2.10.0.0.md @@ -0,0 +1,10 @@ +## Version 2.10.0.0 Release Notes + +Compatible with OpenSearch 2.10.0 + + +### Bug Fixes +* Settings are synced before syncing mapping ([#994](https://github.com/opensearch-project/cross-cluster-replication/pull/994)) +* Handled OpenSearchRejectExecuteException, introduced new setting ```plugins.replication.follower.concurrent_writers_per_shard```. ([#1004](https://github.com/opensearch-project/cross-cluster-replication/pull/1004)) +* Fixed tests relying on wait_for_active_shards, fixed test for single Node and consume numNodes ([#1091](https://github.com/opensearch-project/cross-cluster-replication/pull/1091)) +* Excessive logging avoided during certain exception types such as OpensearchTimeoutException ([#1114](https://github.com/opensearch-project/cross-cluster-replication/pull/1114)) From 752b80576117a74d08c18e5c22f42fc23a3e9fea Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Thu, 7 Sep 2023 20:08:58 +0530 Subject: [PATCH 70/84] Implement abstract methods (#1121) (#1122) Signed-off-by: monusingh-1 (cherry picked from commit 0fbe5a7aeb1be4c3dccd657332c2f41c3513028d) Co-authored-by: Monu Singh --- .../replication/repository/RemoteClusterRepository.kt | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/main/kotlin/org/opensearch/replication/repository/RemoteClusterRepository.kt b/src/main/kotlin/org/opensearch/replication/repository/RemoteClusterRepository.kt index bb7aefdb..832977b2 100644 --- a/src/main/kotlin/org/opensearch/replication/repository/RemoteClusterRepository.kt +++ b/src/main/kotlin/org/opensearch/replication/repository/RemoteClusterRepository.kt @@ -364,6 +364,10 @@ class RemoteClusterRepository(private val repositoryMetadata: RepositoryMetadata return true } + override fun isSystemRepository(): Boolean { + throw UnsupportedOperationException("Operation not permitted") + } + /* * This method makes a blocking call to the leader cluster From 1fc3c6c0cf8c02a179110006256c5da7b5b57cb3 Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Fri, 8 Sep 2023 17:16:52 +0530 Subject: [PATCH 71/84] update requirements to use latest version of request (#1106) (#1112) Signed-off-by: monusingh-1 (cherry picked from commit f00ac43decea640e6a8a497f5c1d1b4be97aed5f) Co-authored-by: Monu Singh --- perf_workflow/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/perf_workflow/requirements.txt b/perf_workflow/requirements.txt index a0e09e61..1a1ea282 100644 --- a/perf_workflow/requirements.txt +++ b/perf_workflow/requirements.txt @@ -3,7 +3,7 @@ validators yamlfix cerberus pipenv -requests +requests~=2.31.0 retry ndg-httpsclient pyopenssl From 294a993f960b8b207ec4aa4c135bcd660e14a5d5 Mon Sep 17 00:00:00 2001 From: Sooraj Sinha <81695996+soosinha@users.noreply.github.com> Date: Mon, 11 Sep 2023 12:41:24 +0530 Subject: [PATCH 72/84] Remove numNodes parameter in security tests (#991) (#1135) Some of the integ tests like those for wait_for_active_shards need atleast 2 nodes in the cluster to run successfully. Removing the numNodes=1 parameter so that by default 2 node cluster is created Signed-off-by: Sooraj Sinha (cherry picked from commit c19dcba791a8a60f660e1367925bd08d01a57b9e) --- .github/workflows/security-knn-tests.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/security-knn-tests.yml b/.github/workflows/security-knn-tests.yml index de049f0c..867df6c8 100644 --- a/.github/workflows/security-knn-tests.yml +++ b/.github/workflows/security-knn-tests.yml @@ -63,7 +63,7 @@ jobs: - name: Build and run Replication tests run: | ls -al src/test/resources/security/plugin - ./gradlew clean release -Dbuild.snapshot=true -PnumNodes=1 -Psecurity=true + ./gradlew clean release -Dbuild.snapshot=true -Psecurity=true - name: Upload failed logs uses: actions/upload-artifact@v2 if: failure() @@ -116,4 +116,4 @@ jobs: - name: Uploads coverage with: fetch-depth: 2 - uses: codecov/codecov-action@v1.2.1 \ No newline at end of file + uses: codecov/codecov-action@v1.2.1 From ab5375bd02276dd1055f0e7d49d7ce9bb2a7001b Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Tue, 26 Sep 2023 09:56:46 +0530 Subject: [PATCH 73/84] Increment version to 2.11.0-SNAPSHOT (#1120) Signed-off-by: opensearch-ci-bot Co-authored-by: opensearch-ci-bot --- build.gradle | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build.gradle b/build.gradle index 70f582ee..70bfc2aa 100644 --- a/build.gradle +++ b/build.gradle @@ -36,7 +36,7 @@ import org.opensearch.gradle.test.RestIntegTestTask buildscript { ext { isSnapshot = "true" == System.getProperty("build.snapshot", "true") - opensearch_version = System.getProperty("opensearch.version", "2.10.0-SNAPSHOT") + opensearch_version = System.getProperty("opensearch.version", "2.11.0-SNAPSHOT") buildVersionQualifier = System.getProperty("build.version_qualifier", "") // e.g. 2.0.0-rc1-SNAPSHOT -> 2.0.0.0-rc1-SNAPSHOT version_tokens = opensearch_version.tokenize('-') From 5c3a3c2fab950abc368a2ab69f483f825c2284d2 Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Thu, 5 Oct 2023 20:20:15 +0530 Subject: [PATCH 74/84] Create opensearch-cross-cluster-replication.release-notes-2.11.0.md (#1218) --- ...pensearch-cross-cluster-replication.release-notes-2.11.0.md | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 release-notes/opensearch-cross-cluster-replication.release-notes-2.11.0.md diff --git a/release-notes/opensearch-cross-cluster-replication.release-notes-2.11.0.md b/release-notes/opensearch-cross-cluster-replication.release-notes-2.11.0.md new file mode 100644 index 00000000..2a075f7f --- /dev/null +++ b/release-notes/opensearch-cross-cluster-replication.release-notes-2.11.0.md @@ -0,0 +1,3 @@ +## Version 2.11.0 Release Notes + +Compatible with OpenSearch 2.11.0 From 1f623c94730450769ea17d095067f7e605600865 Mon Sep 17 00:00:00 2001 From: Monu Singh Date: Wed, 18 Oct 2023 13:44:56 +0530 Subject: [PATCH 75/84] Container image (#1249) (#1273) --- .github/workflows/build-and-test.yml | 45 +++++++++++++++--------- .github/workflows/build.yml | 2 +- .github/workflows/security-knn-tests.yml | 33 +++++++++++++---- 3 files changed, 56 insertions(+), 24 deletions(-) diff --git a/.github/workflows/build-and-test.yml b/.github/workflows/build-and-test.yml index e8a6ace5..125d13db 100644 --- a/.github/workflows/build-and-test.yml +++ b/.github/workflows/build-and-test.yml @@ -10,28 +10,39 @@ on: # We build for all combinations but run tests only on one combination (linux & latest java) jobs: - build: - continue-on-error: true + Get-CI-Image-Tag: + uses: opensearch-project/opensearch-build/.github/workflows/get-ci-image-tag.yml@main + with: + product: opensearch + + build-test-linux: strategy: matrix: - java: - - 11 - - 17 - # Job name - name: Run integration tests on linux with Java ${{ matrix.java }} + java: [11, 17] + + name: Build CCR Plugin on Linux using Container Image runs-on: ubuntu-latest + needs: Get-CI-Image-Tag + container: + # using the same image which is used by opensearch-build team to build the OpenSearch Distribution + # this image tag is subject to change as more dependencies and updates will arrive over time + image: ${{ needs.Get-CI-Image-Tag.outputs.ci-image-version-linux }} + # need to switch to root so that github actions can install runner binary on container without permission issues. + options: --user root + steps: - # This step uses the setup-java Github action: https://github.com/actions/setup-java - - name: Set Up JDK ${{ matrix.java }} + - name: Checkout CCR + uses: actions/checkout@v2 + - name: Setup Java ${{ matrix.java }} uses: actions/setup-java@v1 with: java-version: ${{ matrix.java }} - # This step uses the checkout Github action: https://github.com/actions/checkout - - name: Checkout Branch - uses: actions/checkout@v2 - - name: Build and run Replication tests + + - name: Run build + # switching the user, as OpenSearch cluster can only be started as root/Administrator on linux-deb/linux-rpm/windows-zip. run: | - ./gradlew clean release -D"build.snapshot=true" + chown -R 1000:1000 `pwd` + su `id -un 1000` -c 'whoami && java -version && ./gradlew --refresh-dependencies clean release -D"build.snapshot=true"' - name: Upload failed logs uses: actions/upload-artifact@v2 if: failure() @@ -45,7 +56,7 @@ jobs: run: | mkdir -p cross-cluster-replication-artifacts cp ./build/distributions/*.zip cross-cluster-replication-artifacts - - name: Uploads coverage + - name: Upload Coverage Report + uses: codecov/codecov-action@v1 with: - fetch-depth: 2 - uses: codecov/codecov-action@v1.2.1 + token: ${{ secrets.CODECOV_TOKEN }} diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 1faf6511..ca44eebf 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -34,4 +34,4 @@ jobs: uses: actions/checkout@v2 - name: Build and run Replication tests run: | - ./gradlew clean release -D"build.snapshot=true" -x test -x IntegTest \ No newline at end of file + ./gradlew --refresh-dependencies clean release -D"build.snapshot=true" -x test -x IntegTest diff --git a/.github/workflows/security-knn-tests.yml b/.github/workflows/security-knn-tests.yml index 867df6c8..c3259a50 100644 --- a/.github/workflows/security-knn-tests.yml +++ b/.github/workflows/security-knn-tests.yml @@ -9,6 +9,11 @@ on: - '*' jobs: + Get-CI-Image-Tag: + uses: opensearch-project/opensearch-build/.github/workflows/get-ci-image-tag.yml@main + with: + product: opensearch + req: # Job name name: plugin check @@ -45,12 +50,19 @@ jobs: cat $GITHUB_OUTPUT fi - build: - needs: req + build-linux: + needs: [req, Get-CI-Image-Tag] if: ${{ 'True' == needs.req.outputs.isSecurityPluginAvailable }} # Job name name: Build and Run Security tests runs-on: ubuntu-latest + container: + # using the same image which is used by opensearch-build team to build the OpenSearch Distribution + # this image tag is subject to change as more dependencies and updates will arrive over time + image: ${{ needs.Get-CI-Image-Tag.outputs.ci-image-version-linux }} + # need to switch to root so that github actions can install runner binary on container without permission issues. + options: --user root + steps: # This step uses the setup-java Github action: https://github.com/actions/setup-java - name: Set Up JDK 17 @@ -62,8 +74,9 @@ jobs: uses: actions/checkout@v2 - name: Build and run Replication tests run: | + chown -R 1000:1000 `pwd` ls -al src/test/resources/security/plugin - ./gradlew clean release -Dbuild.snapshot=true -Psecurity=true + su `id -un 1000` -c "whoami && java -version && ./gradlew --refresh-dependencies clean release -Dbuild.snapshot=true -Psecurity=true" - name: Upload failed logs uses: actions/upload-artifact@v2 if: failure() @@ -82,12 +95,19 @@ jobs: fetch-depth: 2 uses: codecov/codecov-action@v1.2.1 - knn-build: - needs: req + knn-build-linux: + needs: [req, Get-CI-Image-Tag] if: ${{ 'True' == needs.req.outputs.isKnnPluginAvailable }} # Job name name: Build and Run Knn tests runs-on: ubuntu-latest + container: + # using the same image which is used by opensearch-build team to build the OpenSearch Distribution + # this image tag is subject to change as more dependencies and updates will arrive over time + image: ${{ needs.Get-CI-Image-Tag.outputs.ci-image-version-linux }} + # need to switch to root so that github actions can install runner binary on container without permission issues. + options: --user root + steps: # This step uses the setup-java Github action: https://github.com/actions/setup-java - name: Set Up JDK 17 @@ -99,7 +119,8 @@ jobs: uses: actions/checkout@v2 - name: Build and run Replication tests run: | - ./gradlew clean release -Dbuild.snapshot=true -PnumNodes=1 -Dtests.class=org.opensearch.replication.BasicReplicationIT -Dtests.method="test knn index replication" -Pknn=true + chown -R 1000:1000 `pwd` + su `id -un 1000` -c 'whoami && java -version && ./gradlew --refresh-dependencies clean release -Dbuild.snapshot=true -PnumNodes=1 -Dtests.class=org.opensearch.replication.BasicReplicationIT -Dtests.method="test knn index replication" -Pknn=true' - name: Upload failed logs uses: actions/upload-artifact@v2 if: failure() From ee577a1729dec1b0bb05ddc3b767a54e6e905401 Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Wed, 18 Oct 2023 14:39:33 +0530 Subject: [PATCH 76/84] CCR: Route requests to primary for remote store enabled leader clusters (#1275) Signed-off-by: Ankit Kala (cherry picked from commit e1d25969b823e7149f3ab5cff269edec3f839773) Co-authored-by: Ankit Kala --- .../replication/ReplicationPlugin.kt | 15 ++++++-- .../changes/TransportGetChangesAction.kt | 35 ++++++++++++------- .../repository/RemoteClusterRepository.kt | 3 ++ .../replication/util/ValidationUtil.kt | 7 ++++ 4 files changed, 45 insertions(+), 15 deletions(-) diff --git a/src/main/kotlin/org/opensearch/replication/ReplicationPlugin.kt b/src/main/kotlin/org/opensearch/replication/ReplicationPlugin.kt index ecc1f888..39ba84eb 100644 --- a/src/main/kotlin/org/opensearch/replication/ReplicationPlugin.kt +++ b/src/main/kotlin/org/opensearch/replication/ReplicationPlugin.kt @@ -144,12 +144,14 @@ import java.util.Optional import java.util.function.Supplier import org.opensearch.index.engine.NRTReplicationEngine +import org.opensearch.replication.util.ValidationUtil @OpenForTesting internal class ReplicationPlugin : Plugin(), ActionPlugin, PersistentTaskPlugin, RepositoryPlugin, EnginePlugin { private lateinit var client: Client + private lateinit var clusterService: ClusterService private lateinit var threadPool: ThreadPool private lateinit var replicationMetadataManager: ReplicationMetadataManager private lateinit var replicationSettings: ReplicationSettings @@ -207,6 +209,7 @@ internal class ReplicationPlugin : Plugin(), ActionPlugin, PersistentTaskPlugin, repositoriesService: Supplier): Collection { this.client = client this.threadPool = threadPool + this.clusterService = clusterService this.replicationMetadataManager = ReplicationMetadataManager(clusterService, client, ReplicationMetadataStore(client, clusterService, xContentRegistry)) this.replicationSettings = ReplicationSettings(clusterService) @@ -379,9 +382,15 @@ internal class ReplicationPlugin : Plugin(), ActionPlugin, PersistentTaskPlugin, } override fun getCustomTranslogDeletionPolicyFactory(): Optional { - return Optional.of(TranslogDeletionPolicyFactory{ - indexSettings, retentionLeasesSupplier -> ReplicationTranslogDeletionPolicy(indexSettings, retentionLeasesSupplier) - }) + // We don't need a retention lease translog deletion policy for remote store enabled clusters as + // we fetch the operations directly from lucene in such cases. + return if (ValidationUtil.isRemoteStoreEnabledCluster(clusterService) == false) { + Optional.of(TranslogDeletionPolicyFactory { indexSettings, retentionLeasesSupplier -> + ReplicationTranslogDeletionPolicy(indexSettings, retentionLeasesSupplier) + }) + } else { + Optional.empty() + } } override fun onIndexModule(indexModule: IndexModule) { diff --git a/src/main/kotlin/org/opensearch/replication/action/changes/TransportGetChangesAction.kt b/src/main/kotlin/org/opensearch/replication/action/changes/TransportGetChangesAction.kt index dbee183d..392555b5 100644 --- a/src/main/kotlin/org/opensearch/replication/action/changes/TransportGetChangesAction.kt +++ b/src/main/kotlin/org/opensearch/replication/action/changes/TransportGetChangesAction.kt @@ -27,6 +27,7 @@ import org.opensearch.core.common.io.stream.StreamInput import org.opensearch.core.common.io.stream.Writeable import org.opensearch.common.unit.TimeValue import org.opensearch.core.index.shard.ShardId +import org.opensearch.index.shard.IndexShard import org.opensearch.index.translog.Translog import org.opensearch.indices.IndicesService import org.opensearch.replication.ReplicationPlugin.Companion.REPLICATION_INDEX_TRANSLOG_PRUNING_ENABLED_SETTING @@ -34,10 +35,7 @@ import org.opensearch.replication.ReplicationPlugin.Companion.REPLICATION_EXECUT import org.opensearch.replication.seqno.RemoteClusterStats import org.opensearch.replication.seqno.RemoteClusterTranslogService import org.opensearch.replication.seqno.RemoteShardMetric -import org.opensearch.replication.util.completeWith -import org.opensearch.replication.util.coroutineContext -import org.opensearch.replication.util.stackTraceToString -import org.opensearch.replication.util.waitForGlobalCheckpoint +import org.opensearch.replication.util.* import org.opensearch.threadpool.ThreadPool import org.opensearch.transport.TransportActionProxy import org.opensearch.transport.TransportService @@ -79,7 +77,8 @@ class TransportGetChangesAction @Inject constructor(threadPool: ThreadPool, clus indexMetric.lastFetchTime.set(relativeStartNanos) val indexShard = indicesService.indexServiceSafe(shardId.index).getShard(shardId.id) - if (indexShard.lastSyncedGlobalCheckpoint < request.fromSeqNo) { + val isRemoteStoreEnabled = ValidationUtil.isRemoteStoreEnabledCluster(clusterService) + if (lastGlobalCheckpoint(indexShard, isRemoteStoreEnabled) < request.fromSeqNo) { // There are no new operations to sync. Do a long poll and wait for GlobalCheckpoint to advance. If // the checkpoint doesn't advance by the timeout this throws an ESTimeoutException which the caller // should catch and start a new poll. @@ -88,18 +87,18 @@ class TransportGetChangesAction @Inject constructor(threadPool: ThreadPool, clus // At this point indexShard.lastKnownGlobalCheckpoint has advanced but it may not yet have been synced // to the translog, which means we can't return those changes. Return to the caller to retry. // TODO: Figure out a better way to wait for the global checkpoint to be synced to the translog - if (indexShard.lastSyncedGlobalCheckpoint < request.fromSeqNo) { - assert(gcp > indexShard.lastSyncedGlobalCheckpoint) { "Checkpoint didn't advance at all" } + if (lastGlobalCheckpoint(indexShard, isRemoteStoreEnabled) < request.fromSeqNo) { + assert(gcp > lastGlobalCheckpoint(indexShard, isRemoteStoreEnabled)) { "Checkpoint didn't advance at all $gcp ${lastGlobalCheckpoint(indexShard, isRemoteStoreEnabled)}" } throw OpenSearchTimeoutException("global checkpoint not synced. Retry after a few miliseconds...") } } relativeStartNanos = System.nanoTime() // At this point lastSyncedGlobalCheckpoint is at least fromSeqNo - val toSeqNo = min(indexShard.lastSyncedGlobalCheckpoint, request.toSeqNo) + val toSeqNo = min(lastGlobalCheckpoint(indexShard, isRemoteStoreEnabled), request.toSeqNo) var ops: List = listOf() - var fetchFromTranslog = isTranslogPruningByRetentionLeaseEnabled(shardId) + var fetchFromTranslog = isTranslogPruningByRetentionLeaseEnabled(shardId) && isRemoteStoreEnabled == false if(fetchFromTranslog) { try { ops = translogService.getHistoryOfOperations(indexShard, request.fromSeqNo, toSeqNo) @@ -137,12 +136,22 @@ class TransportGetChangesAction @Inject constructor(threadPool: ThreadPool, clus indexMetric.ops.addAndGet(ops.size.toLong()) ops.stream().forEach{op -> indexMetric.bytesRead.addAndGet(op.estimateSize()) } - - GetChangesResponse(ops, request.fromSeqNo, indexShard.maxSeqNoOfUpdatesOrDeletes, indexShard.lastSyncedGlobalCheckpoint) + GetChangesResponse(ops, request.fromSeqNo, indexShard.maxSeqNoOfUpdatesOrDeletes, lastGlobalCheckpoint(indexShard, isRemoteStoreEnabled)) } } } + private fun lastGlobalCheckpoint(indexShard: IndexShard, isRemoteStoreEnabled: Boolean): Long { + // We rely on lastSyncedGlobalCheckpoint as it has been durably written to disk. In case of remote store + // enabled clusters, the semantics are slightly different, and we can't use lastSyncedGlobalCheckpoint. Falling back to + // lastKnownGlobalCheckpoint in such cases. + return if (isRemoteStoreEnabled) { + indexShard.lastKnownGlobalCheckpoint + } else { + indexShard.lastSyncedGlobalCheckpoint + } + } + private fun isTranslogPruningByRetentionLeaseEnabled(shardId: ShardId): Boolean { val enabled = clusterService.state().metadata.indices.get(shardId.indexName) @@ -162,7 +171,9 @@ class TransportGetChangesAction @Inject constructor(threadPool: ThreadPool, clus } override fun shards(state: ClusterState, request: InternalRequest): ShardsIterator { + val shardIt = state.routingTable().shardRoutingTable(request.request().shardId) // Random active shards - return state.routingTable().shardRoutingTable(request.request().shardId).activeInitializingShardsRandomIt() + return if (ValidationUtil.isRemoteStoreEnabledCluster(clusterService)) shardIt.primaryShardIt() + else shardIt.activeInitializingShardsRandomIt() } } \ No newline at end of file diff --git a/src/main/kotlin/org/opensearch/replication/repository/RemoteClusterRepository.kt b/src/main/kotlin/org/opensearch/replication/repository/RemoteClusterRepository.kt index 832977b2..8ea986c7 100644 --- a/src/main/kotlin/org/opensearch/replication/repository/RemoteClusterRepository.kt +++ b/src/main/kotlin/org/opensearch/replication/repository/RemoteClusterRepository.kt @@ -250,6 +250,9 @@ class RemoteClusterRepository(private val repositoryMetadata: RepositoryMetadata // Remove translog pruning for the follower index builder.remove(REPLICATION_INDEX_TRANSLOG_PRUNING_ENABLED_SETTING.key) + builder.remove(IndexMetadata.SETTING_REMOTE_STORE_ENABLED) + builder.remove(IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY) + builder.remove(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY) val indexMdBuilder = IndexMetadata.builder(indexMetadata).settings(builder) indexMetadata.aliases.values.forEach { diff --git a/src/main/kotlin/org/opensearch/replication/util/ValidationUtil.kt b/src/main/kotlin/org/opensearch/replication/util/ValidationUtil.kt index 1d6b8c2e..3aad9665 100644 --- a/src/main/kotlin/org/opensearch/replication/util/ValidationUtil.kt +++ b/src/main/kotlin/org/opensearch/replication/util/ValidationUtil.kt @@ -24,8 +24,11 @@ import org.opensearch.env.Environment import org.opensearch.index.IndexNotFoundException import java.io.UnsupportedEncodingException import org.opensearch.cluster.service.ClusterService +import org.opensearch.node.Node +import org.opensearch.node.remotestore.RemoteStoreNodeAttribute import org.opensearch.replication.ReplicationPlugin.Companion.KNN_INDEX_SETTING import org.opensearch.replication.ReplicationPlugin.Companion.KNN_PLUGIN_PRESENT_SETTING +import org.opensearch.replication.action.changes.TransportGetChangesAction import java.nio.file.Files import java.nio.file.Path import java.util.Locale @@ -154,4 +157,8 @@ object ValidationUtil { } + fun isRemoteStoreEnabledCluster(clusterService: ClusterService): Boolean { + return clusterService.settings.getByPrefix(Node.NODE_ATTRIBUTES.key + RemoteStoreNodeAttribute.REMOTE_STORE_NODE_ATTRIBUTE_KEY_PREFIX).isEmpty == false + } + } From 6c2205a2af02b4cc208bea0f53b634d8aca63405 Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Wed, 22 Nov 2023 21:26:46 +0530 Subject: [PATCH 77/84] Add 2.11.1.0 release notes (#1285) (#1286) Signed-off-by: monusingh-1 (cherry picked from commit abb41a93f492b927db0d6880e81a85a978454e73) Co-authored-by: Monu Singh --- ...rch-cross-cluster-replication.release-notes-2.11.1.0.md | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 release-notes/opensearch-cross-cluster-replication.release-notes-2.11.1.0.md diff --git a/release-notes/opensearch-cross-cluster-replication.release-notes-2.11.1.0.md b/release-notes/opensearch-cross-cluster-replication.release-notes-2.11.1.0.md new file mode 100644 index 00000000..0df3f9e6 --- /dev/null +++ b/release-notes/opensearch-cross-cluster-replication.release-notes-2.11.1.0.md @@ -0,0 +1,7 @@ +## Version 2.11.1.0 Release Notes + +Compatible with OpenSearch 2.11.1 + + +### Bug Fixes +* Fix CCR compatibility with remote translogs ([#1276](https://github.com/opensearch-project/cross-cluster-replication/pull/1276)) \ No newline at end of file From 712e21ee8566ea002f3a4d69d5b2dc9adb4df22c Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Thu, 7 Dec 2023 11:14:31 +0530 Subject: [PATCH 78/84] Add support for SystemIndex (#1290) (#1291) Signed-off-by: monusingh-1 (cherry picked from commit 577fd6275085d3b50ef816949c3ab178dc805b4c) Co-authored-by: Monu Singh --- .../org/opensearch/replication/ReplicationPlugin.kt | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/main/kotlin/org/opensearch/replication/ReplicationPlugin.kt b/src/main/kotlin/org/opensearch/replication/ReplicationPlugin.kt index 39ba84eb..792ebe27 100644 --- a/src/main/kotlin/org/opensearch/replication/ReplicationPlugin.kt +++ b/src/main/kotlin/org/opensearch/replication/ReplicationPlugin.kt @@ -114,6 +114,7 @@ import org.opensearch.plugins.ActionPlugin import org.opensearch.plugins.ActionPlugin.ActionHandler import org.opensearch.plugins.EnginePlugin import org.opensearch.plugins.PersistentTaskPlugin +import org.opensearch.plugins.SystemIndexPlugin import org.opensearch.plugins.Plugin import org.opensearch.plugins.RepositoryPlugin import org.opensearch.replication.action.autofollow.* @@ -144,11 +145,13 @@ import java.util.Optional import java.util.function.Supplier import org.opensearch.index.engine.NRTReplicationEngine +import org.opensearch.indices.SystemIndexDescriptor import org.opensearch.replication.util.ValidationUtil @OpenForTesting -internal class ReplicationPlugin : Plugin(), ActionPlugin, PersistentTaskPlugin, RepositoryPlugin, EnginePlugin { +internal class ReplicationPlugin : Plugin(), ActionPlugin, PersistentTaskPlugin, + RepositoryPlugin, EnginePlugin, SystemIndexPlugin { private lateinit var client: Client private lateinit var clusterService: ClusterService @@ -399,4 +402,7 @@ internal class ReplicationPlugin : Plugin(), ActionPlugin, PersistentTaskPlugin, indexModule.addIndexEventListener(IndexCloseListener) } } + override fun getSystemIndexDescriptors(settings: Settings): Collection { + return listOf(SystemIndexDescriptor(ReplicationMetadataStore.REPLICATION_CONFIG_SYSTEM_INDEX, "System Index for storing cross cluster replication configuration.")) + } } From 95028c8c5d362f3affd31998f8fd9bd31bcbf2a3 Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Thu, 7 Dec 2023 20:23:31 +0530 Subject: [PATCH 79/84] Correct error message when field are not passed when starting replication (#1292) (#1293) --- .../action/index/ReplicateIndexRequest.kt | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/src/main/kotlin/org/opensearch/replication/action/index/ReplicateIndexRequest.kt b/src/main/kotlin/org/opensearch/replication/action/index/ReplicateIndexRequest.kt index 6024798a..3d2bfb1b 100644 --- a/src/main/kotlin/org/opensearch/replication/action/index/ReplicateIndexRequest.kt +++ b/src/main/kotlin/org/opensearch/replication/action/index/ReplicateIndexRequest.kt @@ -92,10 +92,19 @@ class ReplicateIndexRequest : AcknowledgedRequest, Indice override fun validate(): ActionRequestValidationException? { var validationException = ActionRequestValidationException() - if (!this::leaderAlias.isInitialized || - !this::leaderIndex.isInitialized || - !this::followerIndex.isInitialized) { - validationException.addValidationError("Mandatory params are missing for the request") + val missingFields: MutableList = mutableListOf() + if (!this::leaderAlias.isInitialized){ + missingFields.add("leader_alias") + } + if(!this::leaderIndex.isInitialized){ + missingFields.add("leader_index") + } + if (!this::followerIndex.isInitialized){ + missingFields.add("follower_index") + } + if(missingFields.isNotEmpty()){ + validationException.addValidationError("Mandatory params $missingFields are missing for the request") + return validationException } validateName(leaderIndex, validationException) From cb5af605867b44ab846980b22b62ad7350250930 Mon Sep 17 00:00:00 2001 From: Monu Singh Date: Tue, 12 Dec 2023 11:26:24 +0530 Subject: [PATCH 80/84] Increment version to 2.12 (#1296) Increment version to 2.12 and fix build failure (#1296) Signed-off-by: monusingh-1 --- build.gradle | 2 +- .../replication/action/replay/TransportReplayChangesAction.kt | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/build.gradle b/build.gradle index 70bfc2aa..81ecb1f3 100644 --- a/build.gradle +++ b/build.gradle @@ -36,7 +36,7 @@ import org.opensearch.gradle.test.RestIntegTestTask buildscript { ext { isSnapshot = "true" == System.getProperty("build.snapshot", "true") - opensearch_version = System.getProperty("opensearch.version", "2.11.0-SNAPSHOT") + opensearch_version = System.getProperty("opensearch.version", "2.12.0-SNAPSHOT") buildVersionQualifier = System.getProperty("build.version_qualifier", "") // e.g. 2.0.0-rc1-SNAPSHOT -> 2.0.0.0-rc1-SNAPSHOT version_tokens = opensearch_version.tokenize('-') diff --git a/src/main/kotlin/org/opensearch/replication/action/replay/TransportReplayChangesAction.kt b/src/main/kotlin/org/opensearch/replication/action/replay/TransportReplayChangesAction.kt index fbf9e181..e4a23720 100644 --- a/src/main/kotlin/org/opensearch/replication/action/replay/TransportReplayChangesAction.kt +++ b/src/main/kotlin/org/opensearch/replication/action/replay/TransportReplayChangesAction.kt @@ -52,6 +52,7 @@ import org.opensearch.index.shard.IndexShard import org.opensearch.index.translog.Translog import org.opensearch.indices.IndicesService import org.opensearch.indices.SystemIndices +import org.opensearch.telemetry.tracing.noop.NoopTracer import org.opensearch.threadpool.ThreadPool import org.opensearch.transport.TransportService import java.util.function.Function @@ -72,7 +73,7 @@ class TransportReplayChangesAction @Inject constructor(settings: Settings, trans TransportWriteAction( settings, ReplayChangesAction.NAME, transportService, clusterService, indicesService, threadPool, shardStateAction, actionFilters, Writeable.Reader { inp -> ReplayChangesRequest(inp) }, Writeable.Reader { inp -> ReplayChangesRequest(inp) }, - EXECUTOR_NAME_FUNCTION, false, indexingPressureService, systemIndices) { + EXECUTOR_NAME_FUNCTION, false, indexingPressureService, systemIndices, NoopTracer.INSTANCE) { companion object { private val log = LogManager.getLogger(TransportReplayChangesAction::class.java)!! From 34a1d6e76c1123147cff1c5c06e0801a554a4f2c Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Tue, 2 Jan 2024 14:36:06 +0530 Subject: [PATCH 81/84] Use latest version of ipaddress Library (#1302) (#1305) Use latest version of ipaddress Library, fixes https://nvd.nist.gov/vuln/detail/CVE-2023-50570 Signed-off-by: Monu Singh (cherry picked from commit 8258ac5ded83398be701721c7199b3d2ea714c70) Co-authored-by: Monu Singh --- build.gradle | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build.gradle b/build.gradle index 81ecb1f3..5b671ff1 100644 --- a/build.gradle +++ b/build.gradle @@ -140,7 +140,7 @@ dependencies { implementation "org.jetbrains.kotlin:kotlin-stdlib:${kotlin_version}" implementation "org.jetbrains.kotlin:kotlin-stdlib-common:${kotlin_version}" implementation "org.jetbrains:annotations:13.0" - implementation "com.github.seancfoley:ipaddress:5.3.3" + implementation "com.github.seancfoley:ipaddress:5.4.0" implementation "org.jetbrains.kotlinx:kotlinx-coroutines-core:1.6.0" // Moving away from kotlin_version implementation "org.opensearch:common-utils:${common_utils_version}" From 86d3f679b5460b95f898c85174e6a22d8a547221 Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Tue, 2 Jan 2024 21:20:22 +0530 Subject: [PATCH 82/84] Ignoring update of final type settings in updateSettings function (#1304) (#1306) Signed-off-by: Nishant Goel (cherry picked from commit e509da4dcced4842c556d7166ebc778af697c6cb) Co-authored-by: Nishant Goel <113011736+nisgoel-amazon@users.noreply.github.com> --- .../replication/task/index/IndexReplicationTask.kt | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/main/kotlin/org/opensearch/replication/task/index/IndexReplicationTask.kt b/src/main/kotlin/org/opensearch/replication/task/index/IndexReplicationTask.kt index d0c812a7..48d0f331 100644 --- a/src/main/kotlin/org/opensearch/replication/task/index/IndexReplicationTask.kt +++ b/src/main/kotlin/org/opensearch/replication/task/index/IndexReplicationTask.kt @@ -461,7 +461,7 @@ open class IndexReplicationTask(id: Long, type: String, action: String, descript continue } val setting = indexScopedSettings[key] - if (!setting.isPrivateIndex) { + if (!setting.isPrivateIndex && !setting.isFinal) { desiredSettingsBuilder.copy(key, settings); } } @@ -473,7 +473,7 @@ open class IndexReplicationTask(id: Long, type: String, action: String, descript if (desiredSettings.get(key) != followerSettings.get(key)) { //Not intended setting on follower side. val setting = indexScopedSettings[key] - if (indexScopedSettings.isPrivateSetting(key)) { + if (indexScopedSettings.isPrivateSetting(key) || setting.isFinal) { continue } if (!setting.isDynamic()) { @@ -486,7 +486,7 @@ open class IndexReplicationTask(id: Long, type: String, action: String, descript for (key in followerSettings.keySet()) { val setting = indexScopedSettings[key] - if (setting == null || setting.isPrivateIndex) { + if (setting == null || setting.isPrivateIndex || setting.isFinal) { continue } From 7e2902c7d3dfc4768b21c9be0f34aecdcc587e31 Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Fri, 2 Feb 2024 21:05:45 +0530 Subject: [PATCH 83/84] Fix build failures (#1315) (#1316) * Fix build failures Signed-off-by: monusingh-1 * Enable xml report Signed-off-by: monusingh-1 --------- Signed-off-by: monusingh-1 (cherry picked from commit 043a7882a369399b07fead3b1d146aefc359d03a) Co-authored-by: Monu Singh --- build.gradle | 9 ++++----- gradle/wrapper/gradle-wrapper.properties | 2 +- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/build.gradle b/build.gradle index 5b671ff1..5c701433 100644 --- a/build.gradle +++ b/build.gradle @@ -86,7 +86,7 @@ buildscript { } plugins { - id 'nebula.ospackage' version "8.3.0" + id 'com.netflix.nebula.ospackage' version "11.6.0" id "com.dorongold.task-tree" version "1.5" id "jacoco" } @@ -162,15 +162,14 @@ repositories { compileKotlin { kotlinOptions { - // This should be 11, but the OpenSearch logger usage checker tool doesn't like classes > 1.8 - jvmTarget = "1.8" + jvmTarget = "11" freeCompilerArgs = ['-Xjsr305=strict'] // Handle OpenSearch @Nullable annotation correctly } } compileTestKotlin { kotlinOptions { - jvmTarget = "1.8" + jvmTarget = "11" freeCompilerArgs = ['-Xjsr305=strict'] } } @@ -536,7 +535,7 @@ jacocoTestReport { dependsOn test dependsOn integTest reports { - xml.enabled true + xml.required.set(true) } // We're combining the coverage data for both test and integ tests. getExecutionData().setFrom(fileTree(buildDir).include("/jacoco/*.exec")) diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties index 68efe1de..8d687878 100644 --- a/gradle/wrapper/gradle-wrapper.properties +++ b/gradle/wrapper/gradle-wrapper.properties @@ -11,6 +11,6 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionUrl=https\://services.gradle.org/distributions/gradle-7.6.1-all.zip +distributionUrl=https\://services.gradle.org/distributions/gradle-8.4-all.zip zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists From 315e60044cef6d41634c11b7f37ec8c980bf7633 Mon Sep 17 00:00:00 2001 From: Mohit Kumar <113413713+mohitamg@users.noreply.github.com> Date: Tue, 6 Feb 2024 20:18:01 +0530 Subject: [PATCH 84/84] Create opensearch-cross-cluster-replication.release-notes-2.12.0.0.md Release notes for OS 2.12.0 Signed-off-by: Mohit Kumar <113413713+mohitamg@users.noreply.github.com> --- ...cross-cluster-replication.release-notes-2.12.0.0.md | 10 ++++++++++ 1 file changed, 10 insertions(+) create mode 100644 release-notes/opensearch-cross-cluster-replication.release-notes-2.12.0.0.md diff --git a/release-notes/opensearch-cross-cluster-replication.release-notes-2.12.0.0.md b/release-notes/opensearch-cross-cluster-replication.release-notes-2.12.0.0.md new file mode 100644 index 00000000..8b9224f0 --- /dev/null +++ b/release-notes/opensearch-cross-cluster-replication.release-notes-2.12.0.0.md @@ -0,0 +1,10 @@ +## Version 2.12.0 Release Notes + +Compatible with OpenSearch 2.12.0 + +## Bug Fixes + +* Implement getSystemIndexDescriptors to support SystemIndex for replication plugin ([#1290](https://github.com/opensearch-project/cross-cluster-replication/pull/1290)) +* Correct error message including what fields are missing when field are not passed when starting replication ([#1292](https://github.com/opensearch-project/cross-cluster-replication/pull/1292)) +* Ignoring all the final settings to copy from leader to follower as those settings won't be able to apply as those are not updatable ([#1304](https://github.com/opensearch-project/cross-cluster-replication/pull/1304)) +