Skip to content

Commit

Permalink
Rename 'insert.batch-size' configuration property into 'write.batch-s…
Browse files Browse the repository at this point in the history
…ize'
  • Loading branch information
Sergey Melnychuk authored and hashhar committed Jul 23, 2021
1 parent e2ca8f5 commit a71e032
Show file tree
Hide file tree
Showing 5 changed files with 37 additions and 37 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@

public class JdbcMetadataConfig
{
static final int MAX_ALLOWED_INSERT_BATCH_SIZE = 1_000_000;
static final int MAX_ALLOWED_WRITE_BATCH_SIZE = 1_000_000;

private boolean allowDropTable;
/*
Expand All @@ -43,7 +43,7 @@ public class JdbcMetadataConfig
// between performance and pushdown capabilities
private int domainCompactionThreshold = 32;

private int insertBatchSize = 1000;
private int writeBatchSize = 1000;

// Do not create temporary table during insert.
// This means that the write operation can fail and leave the table in an inconsistent state.
Expand Down Expand Up @@ -118,17 +118,17 @@ public JdbcMetadataConfig setDomainCompactionThreshold(int domainCompactionThres
}

@Min(1)
@Max(MAX_ALLOWED_INSERT_BATCH_SIZE)
public int getInsertBatchSize()
@Max(MAX_ALLOWED_WRITE_BATCH_SIZE)
public int getWriteBatchSize()
{
return insertBatchSize;
return writeBatchSize;
}

@Config("insert.batch-size")
@ConfigDescription("Maximum number of rows to insert in a single batch")
public JdbcMetadataConfig setInsertBatchSize(int insertBatchSize)
@Config("write.batch-size")
@ConfigDescription("Maximum number of rows to write in a single batch")
public JdbcMetadataConfig setWriteBatchSize(int writeBatchSize)
{
this.insertBatchSize = insertBatchSize;
this.writeBatchSize = writeBatchSize;
return this;
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@
import java.util.List;
import java.util.Optional;

import static io.trino.plugin.jdbc.JdbcMetadataConfig.MAX_ALLOWED_INSERT_BATCH_SIZE;
import static io.trino.plugin.jdbc.JdbcMetadataConfig.MAX_ALLOWED_WRITE_BATCH_SIZE;
import static io.trino.spi.StandardErrorCode.INVALID_SESSION_PROPERTY;
import static io.trino.spi.session.PropertyMetadata.booleanProperty;
import static io.trino.spi.session.PropertyMetadata.integerProperty;
Expand All @@ -37,7 +37,7 @@ public class JdbcMetadataSessionProperties
public static final String AGGREGATION_PUSHDOWN_ENABLED = "aggregation_pushdown_enabled";
public static final String TOPN_PUSHDOWN_ENABLED = "topn_pushdown_enabled";
public static final String DOMAIN_COMPACTION_THRESHOLD = "domain_compaction_threshold";
public static final String INSERT_BATCH_SIZE = "insert_batch_size";
public static final String WRITE_BATCH_SIZE = "write_batch_size";
public static final String NON_TRANSACTIONAL_INSERT = "non_transactional_insert";

private final List<PropertyMetadata<?>> properties;
Expand Down Expand Up @@ -69,10 +69,10 @@ public JdbcMetadataSessionProperties(JdbcMetadataConfig jdbcMetadataConfig, @Max
jdbcMetadataConfig.isTopNPushdownEnabled(),
false))
.add(integerProperty(
INSERT_BATCH_SIZE,
"Insert batch size",
jdbcMetadataConfig.getInsertBatchSize(),
value -> validateInsertBatchSize(value, MAX_ALLOWED_INSERT_BATCH_SIZE),
WRITE_BATCH_SIZE,
"Maximum number of rows to write in a single batch",
jdbcMetadataConfig.getWriteBatchSize(),
JdbcMetadataSessionProperties::validateWriteBatchSize,
false))
.add(booleanProperty(
NON_TRANSACTIONAL_INSERT,
Expand Down Expand Up @@ -108,9 +108,9 @@ public static int getDomainCompactionThreshold(ConnectorSession session)
return session.getProperty(DOMAIN_COMPACTION_THRESHOLD, Integer.class);
}

public static int getInsertBatchSize(ConnectorSession session)
public static int getWriteBatchSize(ConnectorSession session)
{
return session.getProperty(INSERT_BATCH_SIZE, Integer.class);
return session.getProperty(WRITE_BATCH_SIZE, Integer.class);
}

public static boolean isNonTransactionalInsert(ConnectorSession session)
Expand All @@ -131,13 +131,13 @@ private static void validateDomainCompactionThreshold(int domainCompactionThresh
});
}

private static void validateInsertBatchSize(int maxBatchSize, int maxAllowedBatchSize)
private static void validateWriteBatchSize(int maxBatchSize)
{
if (maxBatchSize < 1) {
throw new TrinoException(INVALID_SESSION_PROPERTY, format("%s must be greater than 0: %s", INSERT_BATCH_SIZE, maxBatchSize));
throw new TrinoException(INVALID_SESSION_PROPERTY, format("%s must be greater than 0: %s", WRITE_BATCH_SIZE, maxBatchSize));
}
if (maxBatchSize > maxAllowedBatchSize) {
throw new TrinoException(INVALID_SESSION_PROPERTY, format("%s cannot exceed %s: %s", INSERT_BATCH_SIZE, maxAllowedBatchSize, maxBatchSize));
if (maxBatchSize > MAX_ALLOWED_WRITE_BATCH_SIZE) {
throw new TrinoException(INVALID_SESSION_PROPERTY, format("%s cannot exceed %s: %s", WRITE_BATCH_SIZE, MAX_ALLOWED_WRITE_BATCH_SIZE, maxBatchSize));
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@
import static com.google.common.collect.ImmutableList.toImmutableList;
import static io.trino.plugin.jdbc.JdbcErrorCode.JDBC_ERROR;
import static io.trino.plugin.jdbc.JdbcErrorCode.JDBC_NON_TRANSIENT_ERROR;
import static io.trino.plugin.jdbc.JdbcMetadataSessionProperties.getInsertBatchSize;
import static io.trino.plugin.jdbc.JdbcMetadataSessionProperties.getWriteBatchSize;
import static io.trino.spi.StandardErrorCode.NOT_SUPPORTED;
import static java.util.concurrent.CompletableFuture.completedFuture;

Expand Down Expand Up @@ -106,7 +106,7 @@ public JdbcPageSink(ConnectorSession session, JdbcOutputTableHandle handle, Jdbc
throw new TrinoException(JDBC_ERROR, e);
}

this.maxBatchSize = getInsertBatchSize(session);
this.maxBatchSize = getWriteBatchSize(session);
}

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1281,18 +1281,18 @@ public void testInsertWithoutTemporaryTable()
}

@Test(dataProvider = "batchSizeAndTotalNumberOfRowsToInsertDataProvider")
public void testInsertBatchSizeSessionProperty(Integer batchSize, Integer numberOfRows)
public void testWriteBatchSizeSessionProperty(Integer batchSize, Integer numberOfRows)
{
if (!hasBehavior(SUPPORTS_CREATE_TABLE)) {
throw new SkipException("CREATE TABLE is required for insert_batch_size test but is not supported");
throw new SkipException("CREATE TABLE is required for write_batch_size test but is not supported");
}
Session session = Session.builder(getSession())
.setCatalogSessionProperty(getSession().getCatalog().orElseThrow(), "insert_batch_size", batchSize.toString())
.setCatalogSessionProperty(getSession().getCatalog().orElseThrow(), "write_batch_size", batchSize.toString())
.build();

try (TestTable table = new TestTable(
getQueryRunner()::execute,
"test_insert_batch_size",
"test_write_batch_size",
"(a varchar(36), b bigint)")) {
String values = String.join(",", buildRowsForInsert(numberOfRows));
assertUpdate(session, "INSERT INTO " + table.getName() + " (a, b) VALUES " + values, numberOfRows);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ public void testDefaults()
.setAggregationPushdownEnabled(true)
.setTopNPushdownEnabled(true)
.setDomainCompactionThreshold(32)
.setInsertBatchSize(1000)
.setWriteBatchSize(1000)
.setNonTransactionalInsert(false));
}

Expand All @@ -49,7 +49,7 @@ public void testExplicitPropertyMappings()
.put("aggregation-pushdown.enabled", "false")
.put("domain-compaction-threshold", "42")
.put("topn-pushdown.enabled", "false")
.put("insert.batch-size", "24")
.put("write.batch-size", "24")
.put("insert.non-transactional-insert.enabled", "true")
.build();

Expand All @@ -59,25 +59,25 @@ public void testExplicitPropertyMappings()
.setAggregationPushdownEnabled(false)
.setTopNPushdownEnabled(false)
.setDomainCompactionThreshold(42)
.setInsertBatchSize(24)
.setWriteBatchSize(24)
.setNonTransactionalInsert(true);

assertFullMapping(properties, expected);
}

@Test
public void testInsertBatchSizeValidation()
public void testWriteBatchSizeValidation()
{
assertThatThrownBy(() -> makeConfig(ImmutableMap.of("insert.batch-size", "-42")))
.hasMessageContaining("insert.batch-size: must be greater than or equal to 1");
assertThatThrownBy(() -> makeConfig(ImmutableMap.of("write.batch-size", "-42")))
.hasMessageContaining("write.batch-size: must be greater than or equal to 1");

assertThatThrownBy(() -> makeConfig(ImmutableMap.of("insert.batch-size", "0")))
.hasMessageContaining("insert.batch-size: must be greater than or equal to 1");
assertThatThrownBy(() -> makeConfig(ImmutableMap.of("write.batch-size", "0")))
.hasMessageContaining("write.batch-size: must be greater than or equal to 1");

assertThatCode(() -> makeConfig(ImmutableMap.of("insert.batch-size", "1")))
assertThatCode(() -> makeConfig(ImmutableMap.of("write.batch-size", "1")))
.doesNotThrowAnyException();

assertThatCode(() -> makeConfig(ImmutableMap.of("insert.batch-size", "42")))
assertThatCode(() -> makeConfig(ImmutableMap.of("write.batch-size", "42")))
.doesNotThrowAnyException();
}

Expand Down

0 comments on commit a71e032

Please sign in to comment.