diff --git a/server/src/main/java/org/elasticsearch/gateway/MetaStateService.java b/server/src/main/java/org/elasticsearch/gateway/MetaStateService.java index 3bd8ba11a57ec..3ce3f8918a190 100644 --- a/server/src/main/java/org/elasticsearch/gateway/MetaStateService.java +++ b/server/src/main/java/org/elasticsearch/gateway/MetaStateService.java @@ -21,6 +21,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.elasticsearch.cluster.metadata.IndexGraveyard; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.Manifest; import org.elasticsearch.cluster.metadata.MetaData; @@ -115,12 +116,15 @@ private Tuple loadFullStateBWC() throws IOException { MetaData globalMetaData = metaDataAndGeneration.v1(); long globalStateGeneration = metaDataAndGeneration.v2(); + final IndexGraveyard indexGraveyard; if (globalMetaData != null) { metaDataBuilder = MetaData.builder(globalMetaData); + indexGraveyard = globalMetaData.custom(IndexGraveyard.TYPE); // TODO https://github.com/elastic/elasticsearch/issues/38556 // assert Version.CURRENT.major < 8 : "failed to find manifest file, which is mandatory staring with Elasticsearch version 8.0"; } else { metaDataBuilder = MetaData.builder(); + indexGraveyard = IndexGraveyard.builder().build(); } for (String indexFolderName : nodeEnv.availableIndexFolders()) { @@ -132,8 +136,13 @@ private Tuple loadFullStateBWC() throws IOException { IndexMetaData indexMetaData = indexMetaDataAndGeneration.v1(); long generation = indexMetaDataAndGeneration.v2(); if (indexMetaData != null) { - indices.put(indexMetaData.getIndex(), generation); - metaDataBuilder.put(indexMetaData, false); + if (indexGraveyard.containsIndex(indexMetaData.getIndex())) { + logger.debug("[{}] found metadata for deleted index [{}]", indexFolderName, indexMetaData.getIndex()); + // this index folder is cleared up when state is recovered + } else { + indices.put(indexMetaData.getIndex(), generation); + metaDataBuilder.put(indexMetaData, false); + } } else { logger.debug("[{}] failed to find metadata for existing index location", indexFolderName); } diff --git a/server/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java b/server/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java index e9cbd912202a0..1df326cb1857e 100644 --- a/server/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java +++ b/server/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java @@ -30,8 +30,10 @@ import org.elasticsearch.client.Client; import org.elasticsearch.client.Requests; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.coordination.CoordinationMetaData; import org.elasticsearch.cluster.metadata.IndexGraveyard; import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.Manifest; import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.routing.IndexRoutingTable; @@ -39,11 +41,13 @@ import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.UnassignedInfo; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.CheckedConsumer; import org.elasticsearch.common.Priority; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.indices.IndexClosedException; @@ -54,6 +58,8 @@ import org.elasticsearch.test.InternalTestCluster.RestartCallback; import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; import java.util.List; import java.util.Map; import java.util.concurrent.TimeUnit; @@ -66,6 +72,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.notNullValue; @@ -508,6 +515,42 @@ public void testArchiveBrokenClusterSettings() throws Exception { assertHitCount(client().prepareSearch().setQuery(matchAllQuery()).get(), 1L); } + public void testHalfDeletedIndexImport() throws Exception { + // It's possible for a 6.x node to add a tombstone for an index but not actually delete the index metadata from disk since that + // deletion is slightly deferred and may race against the node being shut down; if you upgrade to 7.x when in this state then the + // node won't start. + + internalCluster().startNode(); + createIndex("test", Settings.builder() + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .build()); + ensureGreen("test"); + + final MetaData metaData = internalCluster().getInstance(ClusterService.class).state().metaData(); + final Path[] paths = internalCluster().getInstance(NodeEnvironment.class).nodeDataPaths(); + writeBrokenMeta(metaStateService -> { + metaStateService.writeGlobalState("test", MetaData.builder(metaData) + // we remove the manifest file, resetting the term and making this look like an upgrade from 6.x, so must also reset the + // term in the coordination metadata + .coordinationMetaData(CoordinationMetaData.builder(metaData.coordinationMetaData()).term(0L).build()) + // add a tombstone but do not delete the index metadata from disk + .putCustom(IndexGraveyard.TYPE, IndexGraveyard.builder().addTombstone(metaData.index("test").getIndex()).build()).build()); + for (final Path path : paths) { + try (Stream stateFiles = Files.list(path.resolve(MetaDataStateFormat.STATE_DIR_NAME))) { + for (final Path manifestPath : stateFiles + .filter(p -> p.getFileName().toString().startsWith(Manifest.FORMAT.getPrefix())).collect(Collectors.toList())) { + IOUtils.rm(manifestPath); + } + } + } + }); + + ensureGreen(); + + assertBusy(() -> assertThat(internalCluster().getInstance(NodeEnvironment.class).availableIndexFolders(), empty())); + } + private void writeBrokenMeta(CheckedConsumer writer) throws Exception { Map metaStateServices = Stream.of(internalCluster().getNodeNames()) .collect(Collectors.toMap(Function.identity(), nodeName -> internalCluster().getInstance(MetaStateService.class, nodeName)));