Skip to content

Commit

Permalink
Use write block on archive indices (#85102)
Browse files Browse the repository at this point in the history
Ensure archive indices have a write block and the write block can't be removed.

Relates #81210
  • Loading branch information
ywelsch authored Mar 18, 2022
1 parent b826516 commit 9fdd0dc
Show file tree
Hide file tree
Showing 5 changed files with 171 additions and 95 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -1609,6 +1609,7 @@ private IndexMetadata convertLegacyIndex(IndexMetadata snapshotIndexMetadata, Cl
Settings.builder()
.put(snapshotIndexMetadata.getSettings())
.put(IndexMetadata.SETTING_INDEX_VERSION_COMPATIBILITY.getKey(), clusterState.getNodes().getSmallestNonClientNodeVersion())
.put(IndexMetadata.SETTING_BLOCKS_WRITE, true)
);
// TODO: _routing? Perhaps we don't need to obey any routing here as stuff is read-only anyway and get API will be disabled
return convertedIndexMetadata.build();
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,113 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/

package org.elasticsearch.xpack.lucene.bwc;

import org.elasticsearch.Version;
import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.cluster.metadata.RepositoryMetadata;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.env.Environment;
import org.elasticsearch.indices.recovery.RecoverySettings;
import org.elasticsearch.license.License;
import org.elasticsearch.license.PostStartTrialAction;
import org.elasticsearch.license.PostStartTrialRequest;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.plugins.RepositoryPlugin;
import org.elasticsearch.repositories.IndexId;
import org.elasticsearch.repositories.Repository;
import org.elasticsearch.repositories.RepositoryData;
import org.elasticsearch.repositories.fs.FsRepository;
import org.elasticsearch.snapshots.AbstractSnapshotIntegTestCase;
import org.elasticsearch.snapshots.SnapshotId;
import org.elasticsearch.snapshots.mockstore.MockRepository;
import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.xcontent.NamedXContentRegistry;
import org.junit.Before;

import java.io.IOException;
import java.util.Arrays;
import java.util.Collection;
import java.util.Map;

import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;

@ESIntegTestCase.ClusterScope(supportsDedicatedMasters = false, numClientNodes = 0, scope = ESIntegTestCase.Scope.TEST)
public abstract class AbstractArchiveTestCase extends AbstractSnapshotIntegTestCase {

@Override
protected Collection<Class<? extends Plugin>> nodePlugins() {
return Arrays.asList(LocalStateOldLuceneVersions.class, TestRepositoryPlugin.class, MockRepository.Plugin.class);
}

public static class TestRepositoryPlugin extends Plugin implements RepositoryPlugin {
public static final String FAKE_VERSIONS_TYPE = "fakeversionsrepo";

@Override
public Map<String, Repository.Factory> getRepositories(
Environment env,
NamedXContentRegistry namedXContentRegistry,
ClusterService clusterService,
BigArrays bigArrays,
RecoverySettings recoverySettings
) {
return Map.of(
FAKE_VERSIONS_TYPE,
metadata -> new FakeVersionsRepo(metadata, env, namedXContentRegistry, clusterService, bigArrays, recoverySettings)
);
}

// fakes an old index version format to activate license checks
private static class FakeVersionsRepo extends FsRepository {
FakeVersionsRepo(
RepositoryMetadata metadata,
Environment env,
NamedXContentRegistry namedXContentRegistry,
ClusterService clusterService,
BigArrays bigArrays,
RecoverySettings recoverySettings
) {
super(metadata, env, namedXContentRegistry, clusterService, bigArrays, recoverySettings);
}

@Override
public IndexMetadata getSnapshotIndexMetaData(RepositoryData repositoryData, SnapshotId snapshotId, IndexId index)
throws IOException {
final IndexMetadata original = super.getSnapshotIndexMetaData(repositoryData, snapshotId, index);
return IndexMetadata.builder(original)
.settings(
Settings.builder()
.put(original.getSettings())
.put(
IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(),
metadata.settings()
.getAsVersion("version", randomBoolean() ? Version.fromString("5.0.0") : Version.fromString("6.0.0"))
)
)
.build();
}
}
}

protected static final String repoName = "test-repo";
protected static final String indexName = "test-index";
protected static final String snapshotName = "test-snapshot";

@Before
public void createAndRestoreArchive() throws Exception {
createRepository(repoName, TestRepositoryPlugin.FAKE_VERSIONS_TYPE);
createIndex(indexName);
createFullSnapshot(repoName, snapshotName);

assertAcked(client().admin().indices().prepareDelete(indexName));

PostStartTrialRequest request = new PostStartTrialRequest().setType(License.LicenseType.TRIAL.getTypeName()).acknowledge(true);
client().execute(PostStartTrialAction.INSTANCE, request).get();
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -13,14 +13,8 @@
import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.health.ClusterHealthStatus;
import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.cluster.metadata.Metadata;
import org.elasticsearch.cluster.metadata.RepositoryMetadata;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.env.Environment;
import org.elasticsearch.indices.recovery.RecoverySettings;
import org.elasticsearch.license.DeleteLicenseAction;
import org.elasticsearch.license.License;
import org.elasticsearch.license.LicensesMetadata;
Expand All @@ -29,108 +23,20 @@
import org.elasticsearch.license.PostStartTrialAction;
import org.elasticsearch.license.PostStartTrialRequest;
import org.elasticsearch.license.PostStartTrialResponse;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.plugins.RepositoryPlugin;
import org.elasticsearch.protocol.xpack.XPackUsageRequest;
import org.elasticsearch.protocol.xpack.license.DeleteLicenseRequest;
import org.elasticsearch.repositories.IndexId;
import org.elasticsearch.repositories.Repository;
import org.elasticsearch.repositories.RepositoryData;
import org.elasticsearch.repositories.fs.FsRepository;
import org.elasticsearch.snapshots.AbstractSnapshotIntegTestCase;
import org.elasticsearch.snapshots.SnapshotId;
import org.elasticsearch.snapshots.SnapshotRestoreException;
import org.elasticsearch.snapshots.mockstore.MockRepository;
import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.xcontent.NamedXContentRegistry;
import org.elasticsearch.xpack.core.action.XPackUsageFeatureAction;
import org.elasticsearch.xpack.core.action.XPackUsageFeatureResponse;
import org.elasticsearch.xpack.core.archive.ArchiveFeatureSetUsage;
import org.junit.Before;

import java.io.IOException;
import java.util.Arrays;
import java.util.Collection;
import java.util.Map;

import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.instanceOf;
import static org.hamcrest.Matchers.oneOf;

@ESIntegTestCase.ClusterScope(supportsDedicatedMasters = false, numClientNodes = 0, scope = ESIntegTestCase.Scope.TEST)
public class ArchiveLicenseIntegTests extends AbstractSnapshotIntegTestCase {

@Override
protected Collection<Class<? extends Plugin>> nodePlugins() {
return Arrays.asList(LocalStateOldLuceneVersions.class, TestRepositoryPlugin.class, MockRepository.Plugin.class);
}

public static class TestRepositoryPlugin extends Plugin implements RepositoryPlugin {
public static final String FAKE_VERSIONS_TYPE = "fakeversionsrepo";

@Override
public Map<String, Repository.Factory> getRepositories(
Environment env,
NamedXContentRegistry namedXContentRegistry,
ClusterService clusterService,
BigArrays bigArrays,
RecoverySettings recoverySettings
) {
return Map.of(
FAKE_VERSIONS_TYPE,
metadata -> new FakeVersionsRepo(metadata, env, namedXContentRegistry, clusterService, bigArrays, recoverySettings)
);
}

// fakes an old index version format to activate license checks
private static class FakeVersionsRepo extends FsRepository {
FakeVersionsRepo(
RepositoryMetadata metadata,
Environment env,
NamedXContentRegistry namedXContentRegistry,
ClusterService clusterService,
BigArrays bigArrays,
RecoverySettings recoverySettings
) {
super(metadata, env, namedXContentRegistry, clusterService, bigArrays, recoverySettings);
}

@Override
public IndexMetadata getSnapshotIndexMetaData(RepositoryData repositoryData, SnapshotId snapshotId, IndexId index)
throws IOException {
final IndexMetadata original = super.getSnapshotIndexMetaData(repositoryData, snapshotId, index);
return IndexMetadata.builder(original)
.settings(
Settings.builder()
.put(original.getSettings())
.put(
IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(),
metadata.settings()
.getAsVersion("version", randomBoolean() ? Version.fromString("5.0.0") : Version.fromString("6.0.0"))
)
)
.build();
}
}
}

private static final String repoName = "test-repo";
private static final String indexName = "test-index";
private static final String snapshotName = "test-snapshot";

@Before
public void createAndRestoreArchive() throws Exception {
createRepository(repoName, TestRepositoryPlugin.FAKE_VERSIONS_TYPE);
createIndex(indexName);
createFullSnapshot(repoName, snapshotName);

assertAcked(client().admin().indices().prepareDelete(indexName));

PostStartTrialRequest request = new PostStartTrialRequest().setType(License.LicenseType.TRIAL.getTypeName()).acknowledge(true);
client().execute(PostStartTrialAction.INSTANCE, request).get();
}
public class ArchiveLicenseIntegTests extends AbstractArchiveTestCase {

public void testFeatureUsage() throws Exception {
XPackUsageFeatureResponse usage = client().execute(XPackUsageFeatureAction.ARCHIVE, new XPackUsageRequest()).get();
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/

package org.elasticsearch.xpack.lucene.bwc;

import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequest;
import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse;
import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.common.settings.Settings;

import java.util.concurrent.ExecutionException;

import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo;

public class ArchiveSettingValidationIntegTests extends AbstractArchiveTestCase {
public void testCannotRemoveWriteBlock() throws ExecutionException, InterruptedException {
final RestoreSnapshotRequest req = new RestoreSnapshotRequest(repoName, snapshotName).indices(indexName).waitForCompletion(true);

final RestoreSnapshotResponse restoreSnapshotResponse = client().admin().cluster().restoreSnapshot(req).get();
assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(0));
ensureGreen(indexName);

final IllegalArgumentException iae = expectThrows(
IllegalArgumentException.class,
() -> client().admin()
.indices()
.prepareUpdateSettings(indexName)
.setSettings(Settings.builder().put(IndexMetadata.INDEX_BLOCKS_WRITE_SETTING.getKey(), false))
.get()
);
assertThat(
iae.getMessage(),
containsString("illegal value can't update [" + IndexMetadata.INDEX_BLOCKS_WRITE_SETTING.getKey() + "] from [true] to [false]")
);
assertNotNull(iae.getCause());
assertThat(iae.getCause().getMessage(), containsString("Cannot remove write block from archive index"));

client().admin()
.indices()
.prepareUpdateSettings(indexName)
.setSettings(Settings.builder().put(IndexMetadata.INDEX_BLOCKS_WRITE_SETTING.getKey(), true))
.get();
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.client.internal.Client;
import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDecider;
Expand Down Expand Up @@ -128,6 +129,12 @@ public void afterFilesRestoredFromRepository(IndexShard indexShard) {
});

indexModule.addIndexEventListener(failShardsListener.get());

indexModule.addSettingsUpdateConsumer(IndexMetadata.INDEX_BLOCKS_WRITE_SETTING, s -> {}, write -> {
if (write == false) {
throw new IllegalArgumentException("Cannot remove write block from archive index");
}
});
}
}

Expand Down

0 comments on commit 9fdd0dc

Please sign in to comment.