Skip to content

Commit

Permalink
[MINOR] Fix CI issue with TestHiveSyncTool (apache#6110)
Browse files Browse the repository at this point in the history
  • Loading branch information
xushiyan authored Jul 22, 2022
1 parent 41653fc commit d5c904e
Show file tree
Hide file tree
Showing 10 changed files with 102 additions and 152 deletions.
4 changes: 4 additions & 0 deletions azure-pipelines.yml
Original file line number Diff line number Diff line change
Expand Up @@ -88,6 +88,7 @@ stages:
- stage: test
jobs:
- job: UT_FT_1
condition: false
displayName: UT FT common & flink & UT client/spark-client
timeoutInMinutes: '120'
steps:
Expand Down Expand Up @@ -118,6 +119,7 @@ stages:
jdkVersionOption: '1.8'
mavenOptions: '-Xmx4g'
- job: UT_FT_2
condition: false
displayName: FT client/spark-client
timeoutInMinutes: '120'
steps:
Expand Down Expand Up @@ -169,6 +171,7 @@ stages:
jdkVersionOption: '1.8'
mavenOptions: '-Xmx4g'
- job: UT_FT_4
condition: false
displayName: UT FT other modules
timeoutInMinutes: '120'
steps:
Expand Down Expand Up @@ -199,6 +202,7 @@ stages:
jdkVersionOption: '1.8'
mavenOptions: '-Xmx4g'
- job: IT
condition: false
displayName: IT modules
timeoutInMinutes: '120'
steps:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,7 @@
import java.util.stream.Stream;

import static org.apache.hudi.hive.HiveSyncConfigHolder.HIVE_URL;
import static org.apache.hudi.hive.testutils.HiveTestService.HS2_JDBC_URL;
import static org.apache.hudi.sync.common.HoodieSyncConfig.META_SYNC_DATABASE_NAME;
import static org.apache.hudi.sync.common.HoodieSyncConfig.META_SYNC_PARTITION_FIELDS;
import static org.apache.hudi.sync.common.HoodieSyncConfig.META_SYNC_TABLE_NAME;
Expand Down Expand Up @@ -180,7 +181,7 @@ private static TypedProperties getProperties() {
// Make path selection test suite specific
props.setProperty("hoodie.deltastreamer.source.input.selector", DFSTestSuitePathSelector.class.getName());
// Hive Configs
props.setProperty(HIVE_URL.key(), "jdbc:hive2://127.0.0.1:9999/");
props.setProperty(HIVE_URL.key(), HS2_JDBC_URL);
props.setProperty(META_SYNC_DATABASE_NAME.key(), "testdb1");
props.setProperty(META_SYNC_TABLE_NAME.key(), "table1");
props.setProperty(META_SYNC_PARTITION_FIELDS.key(), "datestr");
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,8 @@ public HiveSyncConfig(Properties props) {

public HiveSyncConfig(Properties props, Configuration hadoopConf) {
super(props, hadoopConf);
HiveConf hiveConf = new HiveConf(hadoopConf, HiveConf.class);
HiveConf hiveConf = hadoopConf instanceof HiveConf
? (HiveConf) hadoopConf : new HiveConf(hadoopConf, HiveConf.class);
// HiveConf needs to load fs conf to allow instantiation via AWSGlueClientFactory
hiveConf.addResource(getHadoopFileSystem().getConf());
setHadoopConf(hiveConf);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@

package org.apache.hudi.hive.replication;

import org.apache.hudi.hive.testutils.TestCluster;
import org.apache.hudi.hive.testutils.HiveTestCluster;

import org.apache.hadoop.fs.Path;
import org.junit.jupiter.api.AfterEach;
Expand Down Expand Up @@ -53,9 +53,9 @@
public class TestHiveSyncGlobalCommitTool {

@RegisterExtension
public static TestCluster localCluster = new TestCluster();
public static HiveTestCluster localCluster = new HiveTestCluster();
@RegisterExtension
public static TestCluster remoteCluster = new TestCluster();
public static HiveTestCluster remoteCluster = new HiveTestCluster();

private static final String DB_NAME = "foo";
private static final String TBL_NAME = "bar";
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,6 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
import org.apache.hadoop.hive.metastore.IMetaStoreClient;
import org.apache.hadoop.hive.metastore.RetryingMetaStoreClient;
Expand All @@ -57,14 +56,14 @@
import org.junit.jupiter.api.extension.BeforeAllCallback;
import org.junit.jupiter.api.extension.BeforeEachCallback;
import org.junit.jupiter.api.extension.ExtensionContext;
import org.junit.runners.model.InitializationError;

import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.net.URISyntaxException;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.time.ZonedDateTime;
import java.time.format.DateTimeFormatter;
import java.time.temporal.ChronoUnit;
Expand All @@ -75,16 +74,15 @@

import static org.junit.jupiter.api.Assertions.fail;

public class TestCluster implements BeforeAllCallback, AfterAllCallback,
BeforeEachCallback, AfterEachCallback {
private HdfsTestService hdfsTestService;
public HiveTestService hiveTestService;
private Configuration conf;
public HiveServer2 server2;
private static volatile int port = 9083;
public class HiveTestCluster implements BeforeAllCallback, AfterAllCallback,
BeforeEachCallback, AfterEachCallback {
public MiniDFSCluster dfsCluster;
DateTimeFormatter dtfOut;
public File hiveSiteXml;
private HdfsTestService hdfsTestService;
private HiveTestService hiveTestService;
private HiveConf conf;
private HiveServer2 server2;
private DateTimeFormatter dtfOut;
private File hiveSiteXml;
private IMetaStoreClient client;

@Override
Expand All @@ -109,24 +107,18 @@ public void setup() throws Exception {
hdfsTestService = new HdfsTestService();
dfsCluster = hdfsTestService.start(true);

conf = hdfsTestService.getHadoopConf();
conf.setInt(ConfVars.METASTORE_SERVER_PORT.varname, port++);
conf.setInt(ConfVars.HIVE_SERVER2_THRIFT_PORT.varname, port++);
conf.setInt(ConfVars.HIVE_SERVER2_WEBUI_PORT.varname, port++);
hiveTestService = new HiveTestService(conf);
Configuration hadoopConf = hdfsTestService.getHadoopConf();
hiveTestService = new HiveTestService(hadoopConf);
server2 = hiveTestService.start();
dtfOut = DateTimeFormatter.ofPattern("yyyy/MM/dd");
hiveSiteXml = File.createTempFile("hive-site", ".xml");
hiveSiteXml.deleteOnExit();
conf = hiveTestService.getHiveConf();
try (OutputStream os = new FileOutputStream(hiveSiteXml)) {
hiveTestService.getServerConf().writeXml(os);
conf.writeXml(os);
}
client = HiveMetaStoreClient.newSynchronizedClient(
RetryingMetaStoreClient.getProxy(hiveTestService.getServerConf(), true));
}

public Configuration getConf() {
return this.conf;
RetryingMetaStoreClient.getProxy(conf, true));
}

public String getHiveSiteXmlLocation() {
Expand All @@ -138,7 +130,7 @@ public IMetaStoreClient getHMSClient() {
}

public String getHiveJdBcUrl() {
return "jdbc:hive2://127.0.0.1:" + conf.get(ConfVars.HIVE_SERVER2_THRIFT_PORT.varname) + "";
return hiveTestService.getJdbcHive2Url();
}

public String tablePath(String dbName, String tableName) throws Exception {
Expand All @@ -151,12 +143,12 @@ private String dbPath(String dbName) throws Exception {

public void forceCreateDb(String dbName) throws Exception {
try {
getHMSClient().dropDatabase(dbName);
} catch (NoSuchObjectException e) {
System.out.println("db does not exist but its ok " + dbName);
client.dropDatabase(dbName);
} catch (NoSuchObjectException ignored) {
// expected
}
Database db = new Database(dbName, "", dbPath(dbName), new HashMap<>());
getHMSClient().createDatabase(db);
client.createDatabase(db);
}

public void createCOWTable(String commitTime, int numberOfPartitions, String dbName, String tableName)
Expand All @@ -169,10 +161,7 @@ public void createCOWTable(String commitTime, int numberOfPartitions, String dbN
.setTableName(tableName)
.setPayloadClass(HoodieAvroPayload.class)
.initTable(conf, path.toString());
boolean result = dfsCluster.getFileSystem().mkdirs(path);
if (!result) {
throw new InitializationError("cannot initialize table");
}
dfsCluster.getFileSystem().mkdirs(path);
ZonedDateTime dateTime = ZonedDateTime.now();
HoodieCommitMetadata commitMetadata = createPartitions(numberOfPartitions, true, dateTime, commitTime, path.toString());
createCommitFile(commitMetadata, commitTime, path.toString());
Expand Down Expand Up @@ -239,7 +228,7 @@ private void generateParquetData(Path filePath, boolean isParquetSchemaSimple)
try {
writer.write(s);
} catch (IOException e) {
fail("IOException while writing test records as parquet" + e.toString());
fail("IOException while writing test records as parquet", e);
}
});
writer.close();
Expand All @@ -259,15 +248,15 @@ public void stopHiveServer2() {
public void startHiveServer2() {
if (server2 == null) {
server2 = new HiveServer2();
server2.init(hiveTestService.getServerConf());
server2.init(hiveTestService.getHiveConf());
server2.start();
}
}

public void shutDown() {
stopHiveServer2();
public void shutDown() throws IOException {
Files.deleteIfExists(hiveSiteXml.toPath());
Hive.closeCurrent();
hiveTestService.getHiveMetaStore().stop();
hiveTestService.stop();
hdfsTestService.stop();
}
}
Loading

0 comments on commit d5c904e

Please sign in to comment.