Skip to content

Commit

Permalink
Avoid cleaning up metadata files in case of DML operation timeout
Browse files Browse the repository at this point in the history
In case of dealing with an `CommitFailedException` while
commiting an Iceberg transaction, the Iceberg framework
will attempt to retry for `COMMIT_NUM_RETRIES` times the
operation and if the operation still fails, it will clean up
the metadata file corresponding to the transaction.
In case of a metastore client timeout operation the
Iceberg library can therefore delete metadata files
which eventually get referenced from the configuration
of the table persisted on the metastore for the table
which leaves the table in a corrupt state.

Throw `CommitStateUnknownException` to ensure that the
table is not left in a corrupt state after the erroneous
completion of the DML operation.
  • Loading branch information
findinpath authored and findepi committed Sep 16, 2022
1 parent b88d183 commit 15dd728
Show file tree
Hide file tree
Showing 4 changed files with 138 additions and 4 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -100,6 +100,7 @@
import static com.google.common.collect.ImmutableList.toImmutableList;
import static com.google.common.collect.ImmutableMap.toImmutableMap;
import static com.google.common.collect.ImmutableSet.toImmutableSet;
import static io.trino.plugin.hive.HiveErrorCode.HIVE_CONCURRENT_MODIFICATION_DETECTED;
import static io.trino.plugin.hive.HiveErrorCode.HIVE_METASTORE_ERROR;
import static io.trino.plugin.hive.HiveMetadata.TABLE_COMMENT;
import static io.trino.plugin.hive.HivePartitionManager.extractPartitionValues;
Expand Down Expand Up @@ -582,7 +583,7 @@ public synchronized void replaceTable(String databaseName, String tableName, Tab
}

if (isIcebergTable(table) && !Objects.equals(table.getParameters().get("metadata_location"), newTable.getParameters().get("previous_metadata_location"))) {
throw new TrinoException(HIVE_METASTORE_ERROR, "Cannot update Iceberg table: supplied previous location does not match current location");
throw new TrinoException(HIVE_CONCURRENT_MODIFICATION_DETECTED, "Cannot update Iceberg table: supplied previous location does not match current location");
}

Path tableMetadataDirectory = getTableMetadataDirectory(table);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,16 +18,19 @@
import io.trino.plugin.hive.metastore.Table;
import io.trino.plugin.hive.metastore.cache.CachingHiveMetastore;
import io.trino.plugin.iceberg.catalog.hms.AbstractMetastoreTableOperations;
import io.trino.spi.TrinoException;
import io.trino.spi.connector.ConnectorSession;
import org.apache.iceberg.TableMetadata;
import org.apache.iceberg.exceptions.CommitFailedException;
import org.apache.iceberg.exceptions.CommitStateUnknownException;
import org.apache.iceberg.io.FileIO;

import javax.annotation.concurrent.NotThreadSafe;

import java.util.Optional;

import static com.google.common.base.Preconditions.checkState;
import static io.trino.plugin.hive.HiveErrorCode.HIVE_CONCURRENT_MODIFICATION_DETECTED;
import static io.trino.plugin.hive.metastore.PrincipalPrivileges.NO_PRIVILEGES;
import static org.apache.iceberg.BaseMetastoreTableOperations.METADATA_LOCATION_PROP;
import static org.apache.iceberg.BaseMetastoreTableOperations.PREVIOUS_METADATA_LOCATION_PROP;
Expand Down Expand Up @@ -76,7 +79,12 @@ protected void commitToExistingTable(TableMetadata base, TableMetadata metadata)
metastore.replaceTable(database, tableName, table, privileges);
}
catch (RuntimeException e) {
throw new CommitFailedException(e, "Failed to commit transaction to FileHiveMetastore");
if (e instanceof TrinoException trinoException &&
trinoException.getErrorCode() == HIVE_CONCURRENT_MODIFICATION_DETECTED.toErrorCode()) {
// CommitFailedException is handled as a special case in the Iceberg library. This commit will automatically retry
throw new CommitFailedException(e, "Failed to replace table due to concurrent updates: %s.%s", database, tableName);
}
throw new CommitStateUnknownException(e);
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
import io.trino.spi.connector.TableNotFoundException;
import org.apache.iceberg.TableMetadata;
import org.apache.iceberg.exceptions.CommitFailedException;
import org.apache.iceberg.exceptions.CommitStateUnknownException;
import org.apache.iceberg.io.FileIO;

import javax.annotation.concurrent.NotThreadSafe;
Expand Down Expand Up @@ -90,8 +91,9 @@ protected void commitToExistingTable(TableMetadata base, TableMetadata metadata)
metastore.replaceTable(database, tableName, table, privileges);
}
catch (RuntimeException e) {
// CommitFailedException is handled as a special case in the Iceberg library. This commit will automatically retry
throw new CommitFailedException(e, "Failed to commit to table %s.%s", database, tableName);
// Cannot determine whether the `replaceTable` operation was successful,
// regardless of the exception thrown (e.g. : timeout exception) or it actually failed
throw new CommitStateUnknownException(e);
}
}
finally {
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,123 @@
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.trino.plugin.iceberg.catalog.file;

import com.google.common.collect.ImmutableMap;
import io.trino.Session;
import io.trino.plugin.hive.NodeVersion;
import io.trino.plugin.hive.metastore.Database;
import io.trino.plugin.hive.metastore.HiveMetastore;
import io.trino.plugin.hive.metastore.HiveMetastoreConfig;
import io.trino.plugin.hive.metastore.PrincipalPrivileges;
import io.trino.plugin.hive.metastore.Table;
import io.trino.plugin.hive.metastore.file.FileHiveMetastore;
import io.trino.plugin.hive.metastore.file.FileHiveMetastoreConfig;
import io.trino.plugin.iceberg.TestingIcebergConnectorFactory;
import io.trino.spi.security.PrincipalType;
import io.trino.testing.AbstractTestQueryFramework;
import io.trino.testing.LocalQueryRunner;
import org.apache.iceberg.exceptions.CommitStateUnknownException;
import org.testng.annotations.AfterClass;
import org.testng.annotations.Test;

import java.io.File;
import java.io.IOException;
import java.io.UncheckedIOException;
import java.nio.file.Files;
import java.util.Optional;

import static com.google.common.io.MoreFiles.deleteRecursively;
import static com.google.common.io.RecursiveDeleteOption.ALLOW_INSECURE;
import static com.google.inject.util.Modules.EMPTY_MODULE;
import static io.trino.plugin.hive.HiveTestUtils.HDFS_ENVIRONMENT;
import static io.trino.testing.TestingSession.testSessionBuilder;
import static java.lang.String.format;
import static org.assertj.core.api.Assertions.assertThatThrownBy;

@Test(singleThreaded = true)
public class TestIcebergFileMetastoreTableOperationsInsertFailure
extends AbstractTestQueryFramework
{
private static final String ICEBERG_CATALOG = "iceberg";
private static final String SCHEMA_NAME = "test_schema";
private File baseDir;

@Override
protected LocalQueryRunner createQueryRunner()
{
Session session = testSessionBuilder()
.setCatalog(ICEBERG_CATALOG)
.setSchema(SCHEMA_NAME)
.build();

try {
baseDir = Files.createTempDirectory(null).toFile();
}
catch (IOException e) {
throw new UncheckedIOException(e);
}

HiveMetastore metastore = new FileHiveMetastore(
new NodeVersion("testversion"),
HDFS_ENVIRONMENT,
new HiveMetastoreConfig().isHideDeltaLakeTables(),
new FileHiveMetastoreConfig()
.setCatalogDirectory(baseDir.toURI().toString())
.setMetastoreUser("test"))
{
@Override
public synchronized void replaceTable(String databaseName, String tableName, Table newTable, PrincipalPrivileges principalPrivileges)
{
super.replaceTable(databaseName, tableName, newTable, principalPrivileges);
throw new RuntimeException("Test-simulated metastore timeout exception");
}
};
LocalQueryRunner queryRunner = LocalQueryRunner.create(session);

queryRunner.createCatalog(
ICEBERG_CATALOG,
new TestingIcebergConnectorFactory(Optional.of(metastore), Optional.empty(), EMPTY_MODULE),
ImmutableMap.of());

Database database = Database.builder()
.setDatabaseName(SCHEMA_NAME)
.setOwnerName(Optional.of("public"))
.setOwnerType(Optional.of(PrincipalType.ROLE))
.build();
metastore.createDatabase(database);

return queryRunner;
}

@AfterClass(alwaysRun = true)
public void cleanup()
throws Exception
{
if (baseDir != null) {
deleteRecursively(baseDir.toPath(), ALLOW_INSECURE);
}
}

@Test
public void testInsertFailureDoesNotCorruptTheTableMetadata()
{
String tableName = "test_insert_failure";

getQueryRunner().execute(format("CREATE TABLE %s (a_varchar) AS VALUES ('Trino')", tableName));
assertThatThrownBy(() -> getQueryRunner().execute("INSERT INTO " + tableName + " VALUES 'rocks'"))
.isInstanceOf(CommitStateUnknownException.class)
.hasMessageContaining("Test-simulated metastore timeout exception");
assertQuery("SELECT * FROM " + tableName, "VALUES 'Trino', 'rocks'");
}
}

0 comments on commit 15dd728

Please sign in to comment.