From a7e14d39fbf76c90dd701bb23cf9066c5beaae86 Mon Sep 17 00:00:00 2001 From: Y Ethan Guo Date: Thu, 18 Aug 2022 11:40:15 -0700 Subject: [PATCH] [MINOR] Improve code style of CLI Command classes (#6427) --- .../cli/commands/ArchivedCommitsCommand.java | 4 +- .../hudi/cli/commands/BootstrapCommand.java | 5 +- .../hudi/cli/commands/CleansCommand.java | 24 ++-- .../hudi/cli/commands/ClusteringCommand.java | 2 + .../hudi/cli/commands/CommitsCommand.java | 117 +++++++++--------- .../hudi/cli/commands/CompactionCommand.java | 99 +++++++-------- .../hudi/cli/commands/ExportCommand.java | 13 +- .../cli/commands/FileSystemViewCommand.java | 12 +- .../commands/HDFSParquetImportCommand.java | 6 +- .../hudi/cli/commands/MarkersCommand.java | 4 +- .../hudi/cli/commands/MetadataCommand.java | 2 +- .../hudi/cli/commands/RepairsCommand.java | 11 +- .../hudi/cli/commands/RollbacksCommand.java | 22 ++-- .../hudi/cli/commands/SavepointsCommand.java | 7 +- .../hudi/cli/commands/SparkEnvCommand.java | 6 +- .../apache/hudi/cli/commands/SparkMain.java | 38 +++--- .../hudi/cli/commands/TableCommand.java | 27 ++-- .../hudi/cli/commands/TempViewCommand.java | 6 +- .../hudi/cli/commands/UtilsCommand.java | 1 + .../commands/TestArchivedCommitsCommand.java | 2 +- .../hudi/cli/commands/TestCommitsCommand.java | 2 +- .../cli/commands/TestCompactionCommand.java | 2 +- .../commands/TestFileSystemViewCommand.java | 11 +- .../commands/TestHoodieLogFileCommand.java | 6 +- .../cli/commands/TestSavepointsCommand.java | 2 +- 25 files changed, 219 insertions(+), 212 deletions(-) diff --git a/hudi-cli/src/main/java/org/apache/hudi/cli/commands/ArchivedCommitsCommand.java b/hudi-cli/src/main/java/org/apache/hudi/cli/commands/ArchivedCommitsCommand.java index fcb273f0a73bd..337d6e2a305c6 100644 --- a/hudi-cli/src/main/java/org/apache/hudi/cli/commands/ArchivedCommitsCommand.java +++ b/hudi-cli/src/main/java/org/apache/hudi/cli/commands/ArchivedCommitsCommand.java @@ -200,12 +200,12 @@ private Comparable[] readCommit(GenericRecord record, boolean skipMetadata) { case HoodieTimeline.COMPACTION_ACTION: return commitDetail(record, "hoodieCompactionMetadata", skipMetadata); default: { - return new Comparable[]{}; + return new Comparable[] {}; } } } catch (Exception e) { e.printStackTrace(); - return new Comparable[]{}; + return new Comparable[] {}; } } } diff --git a/hudi-cli/src/main/java/org/apache/hudi/cli/commands/BootstrapCommand.java b/hudi-cli/src/main/java/org/apache/hudi/cli/commands/BootstrapCommand.java index 015743d2f299f..f4ef55943cdf4 100644 --- a/hudi-cli/src/main/java/org/apache/hudi/cli/commands/BootstrapCommand.java +++ b/hudi-cli/src/main/java/org/apache/hudi/cli/commands/BootstrapCommand.java @@ -40,8 +40,8 @@ import java.io.IOException; import java.net.URISyntaxException; -import java.util.Arrays; import java.util.ArrayList; +import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.stream.Collectors; @@ -113,8 +113,7 @@ public String showBootstrapIndexMapping( @CliOption(key = {"limit"}, unspecifiedDefaultValue = "-1", help = "Limit rows to be displayed") Integer limit, @CliOption(key = {"sortBy"}, unspecifiedDefaultValue = "", help = "Sorting Field") final String sortByField, @CliOption(key = {"desc"}, unspecifiedDefaultValue = "false", help = "Ordering") final boolean descending, - @CliOption(key = {"headeronly"}, unspecifiedDefaultValue = "false", help = "Print Header Only") - final boolean headerOnly) { + @CliOption(key = {"headeronly"}, unspecifiedDefaultValue = "false", help = "Print Header Only") final boolean headerOnly) { if (partitionPath.isEmpty() && !fileIds.isEmpty()) { throw new IllegalStateException("PartitionPath is mandatory when passing fileIds."); diff --git a/hudi-cli/src/main/java/org/apache/hudi/cli/commands/CleansCommand.java b/hudi-cli/src/main/java/org/apache/hudi/cli/commands/CleansCommand.java index 4924eaacbf46b..4e827dc562c4a 100644 --- a/hudi-cli/src/main/java/org/apache/hudi/cli/commands/CleansCommand.java +++ b/hudi-cli/src/main/java/org/apache/hudi/cli/commands/CleansCommand.java @@ -71,9 +71,9 @@ public String showCleans( List rows = new ArrayList<>(); for (HoodieInstant clean : cleans) { HoodieCleanMetadata cleanMetadata = - TimelineMetadataUtils.deserializeHoodieCleanMetadata(timeline.getInstantDetails(clean).get()); - rows.add(new Comparable[]{clean.getTimestamp(), cleanMetadata.getEarliestCommitToRetain(), - cleanMetadata.getTotalFilesDeleted(), cleanMetadata.getTimeTakenInMillis()}); + TimelineMetadataUtils.deserializeHoodieCleanMetadata(timeline.getInstantDetails(clean).get()); + rows.add(new Comparable[] {clean.getTimestamp(), cleanMetadata.getEarliestCommitToRetain(), + cleanMetadata.getTotalFilesDeleted(), cleanMetadata.getTimeTakenInMillis()}); } TableHeader header = @@ -85,7 +85,8 @@ public String showCleans( } @CliCommand(value = "clean showpartitions", help = "Show partition level details of a clean") - public String showCleanPartitions(@CliOption(key = {"clean"}, help = "clean to show") final String instantTime, + public String showCleanPartitions( + @CliOption(key = {"clean"}, help = "clean to show") final String instantTime, @CliOption(key = {"limit"}, help = "Limit commits", unspecifiedDefaultValue = "-1") final Integer limit, @CliOption(key = {"sortBy"}, help = "Sorting Field", unspecifiedDefaultValue = "") final String sortByField, @CliOption(key = {"desc"}, help = "Ordering", unspecifiedDefaultValue = "false") final boolean descending, @@ -122,13 +123,14 @@ public String showCleanPartitions(@CliOption(key = {"clean"}, help = "clean to s } @CliCommand(value = "cleans run", help = "run clean") - public String runClean(@CliOption(key = "sparkMemory", unspecifiedDefaultValue = "4G", - help = "Spark executor memory") final String sparkMemory, - @CliOption(key = "propsFilePath", help = "path to properties file on localfs or dfs with configurations for hoodie client for cleaning", - unspecifiedDefaultValue = "") final String propsFilePath, - @CliOption(key = "hoodieConfigs", help = "Any configuration that can be set in the properties file can be passed here in the form of an array", - unspecifiedDefaultValue = "") final String[] configs, - @CliOption(key = "sparkMaster", unspecifiedDefaultValue = "", help = "Spark Master ") String master) throws IOException, InterruptedException, URISyntaxException { + public String runClean( + @CliOption(key = "sparkMemory", unspecifiedDefaultValue = "4G", + help = "Spark executor memory") final String sparkMemory, + @CliOption(key = "propsFilePath", help = "path to properties file on localfs or dfs with configurations for hoodie client for cleaning", + unspecifiedDefaultValue = "") final String propsFilePath, + @CliOption(key = "hoodieConfigs", help = "Any configuration that can be set in the properties file can be passed here in the form of an array", + unspecifiedDefaultValue = "") final String[] configs, + @CliOption(key = "sparkMaster", unspecifiedDefaultValue = "", help = "Spark Master ") String master) throws IOException, InterruptedException, URISyntaxException { boolean initialized = HoodieCLI.initConf(); HoodieCLI.initFS(initialized); HoodieTableMetaClient metaClient = HoodieCLI.getTableMetaClient(); diff --git a/hudi-cli/src/main/java/org/apache/hudi/cli/commands/ClusteringCommand.java b/hudi-cli/src/main/java/org/apache/hudi/cli/commands/ClusteringCommand.java index 4163f0cb5a6a4..8b2a95b55729e 100644 --- a/hudi-cli/src/main/java/org/apache/hudi/cli/commands/ClusteringCommand.java +++ b/hudi-cli/src/main/java/org/apache/hudi/cli/commands/ClusteringCommand.java @@ -25,6 +25,7 @@ import org.apache.hudi.common.table.HoodieTableMetaClient; import org.apache.hudi.common.table.timeline.HoodieActiveTimeline; import org.apache.hudi.utilities.UtilHelpers; + import org.apache.log4j.LogManager; import org.apache.log4j.Logger; import org.apache.spark.launcher.SparkLauncher; @@ -33,6 +34,7 @@ import org.springframework.shell.core.annotation.CliCommand; import org.springframework.shell.core.annotation.CliOption; import org.springframework.stereotype.Component; + import scala.collection.JavaConverters; @Component diff --git a/hudi-cli/src/main/java/org/apache/hudi/cli/commands/CommitsCommand.java b/hudi-cli/src/main/java/org/apache/hudi/cli/commands/CommitsCommand.java index b2b6940bef3cd..7c74110b36a24 100644 --- a/hudi-cli/src/main/java/org/apache/hudi/cli/commands/CommitsCommand.java +++ b/hudi-cli/src/main/java/org/apache/hudi/cli/commands/CommitsCommand.java @@ -68,23 +68,23 @@ private String printCommits(HoodieDefaultTimeline timeline, final List rows = new ArrayList<>(); final List commits = timeline.getCommitsTimeline().filterCompletedInstants() - .getInstants().collect(Collectors.toList()); + .getInstants().collect(Collectors.toList()); // timeline can be read from multiple files. So sort is needed instead of reversing the collection Collections.sort(commits, HoodieInstant.COMPARATOR.reversed()); for (int i = 0; i < commits.size(); i++) { final HoodieInstant commit = commits.get(i); final HoodieCommitMetadata commitMetadata = HoodieCommitMetadata.fromBytes( - timeline.getInstantDetails(commit).get(), - HoodieCommitMetadata.class); - rows.add(new Comparable[]{commit.getTimestamp(), - commitMetadata.fetchTotalBytesWritten(), - commitMetadata.fetchTotalFilesInsert(), - commitMetadata.fetchTotalFilesUpdated(), - commitMetadata.fetchTotalPartitionsWritten(), - commitMetadata.fetchTotalRecordsWritten(), - commitMetadata.fetchTotalUpdateRecordsWritten(), - commitMetadata.fetchTotalWriteErrors()}); + timeline.getInstantDetails(commit).get(), + HoodieCommitMetadata.class); + rows.add(new Comparable[] {commit.getTimestamp(), + commitMetadata.fetchTotalBytesWritten(), + commitMetadata.fetchTotalFilesInsert(), + commitMetadata.fetchTotalFilesUpdated(), + commitMetadata.fetchTotalPartitionsWritten(), + commitMetadata.fetchTotalRecordsWritten(), + commitMetadata.fetchTotalUpdateRecordsWritten(), + commitMetadata.fetchTotalWriteErrors()}); } final Map> fieldNameToConverterMap = new HashMap<>(); @@ -93,47 +93,47 @@ private String printCommits(HoodieDefaultTimeline timeline, }); final TableHeader header = new TableHeader() - .addTableHeaderField(HoodieTableHeaderFields.HEADER_COMMIT_TIME) - .addTableHeaderField(HoodieTableHeaderFields.HEADER_TOTAL_BYTES_WRITTEN) - .addTableHeaderField(HoodieTableHeaderFields.HEADER_TOTAL_FILES_ADDED) - .addTableHeaderField(HoodieTableHeaderFields.HEADER_TOTAL_FILES_UPDATED) - .addTableHeaderField(HoodieTableHeaderFields.HEADER_TOTAL_PARTITIONS_WRITTEN) - .addTableHeaderField(HoodieTableHeaderFields.HEADER_TOTAL_RECORDS_WRITTEN) - .addTableHeaderField(HoodieTableHeaderFields.HEADER_TOTAL_UPDATE_RECORDS_WRITTEN) - .addTableHeaderField(HoodieTableHeaderFields.HEADER_TOTAL_ERRORS); + .addTableHeaderField(HoodieTableHeaderFields.HEADER_COMMIT_TIME) + .addTableHeaderField(HoodieTableHeaderFields.HEADER_TOTAL_BYTES_WRITTEN) + .addTableHeaderField(HoodieTableHeaderFields.HEADER_TOTAL_FILES_ADDED) + .addTableHeaderField(HoodieTableHeaderFields.HEADER_TOTAL_FILES_UPDATED) + .addTableHeaderField(HoodieTableHeaderFields.HEADER_TOTAL_PARTITIONS_WRITTEN) + .addTableHeaderField(HoodieTableHeaderFields.HEADER_TOTAL_RECORDS_WRITTEN) + .addTableHeaderField(HoodieTableHeaderFields.HEADER_TOTAL_UPDATE_RECORDS_WRITTEN) + .addTableHeaderField(HoodieTableHeaderFields.HEADER_TOTAL_ERRORS); return HoodiePrintHelper.print(header, fieldNameToConverterMap, sortByField, descending, - limit, headerOnly, rows, tempTableName); + limit, headerOnly, rows, tempTableName); } private String printCommitsWithMetadata(HoodieDefaultTimeline timeline, - final Integer limit, final String sortByField, - final boolean descending, - final boolean headerOnly, - final String tempTableName) throws IOException { + final Integer limit, final String sortByField, + final boolean descending, + final boolean headerOnly, + final String tempTableName) throws IOException { final List rows = new ArrayList<>(); final List commits = timeline.getCommitsTimeline().filterCompletedInstants() - .getInstants().collect(Collectors.toList()); + .getInstants().collect(Collectors.toList()); // timeline can be read from multiple files. So sort is needed instead of reversing the collection Collections.sort(commits, HoodieInstant.COMPARATOR.reversed()); for (int i = 0; i < commits.size(); i++) { final HoodieInstant commit = commits.get(i); final HoodieCommitMetadata commitMetadata = HoodieCommitMetadata.fromBytes( - timeline.getInstantDetails(commit).get(), - HoodieCommitMetadata.class); + timeline.getInstantDetails(commit).get(), + HoodieCommitMetadata.class); for (Map.Entry> partitionWriteStat : - commitMetadata.getPartitionToWriteStats().entrySet()) { + commitMetadata.getPartitionToWriteStats().entrySet()) { for (HoodieWriteStat hoodieWriteStat : partitionWriteStat.getValue()) { - rows.add(new Comparable[]{ commit.getAction(), commit.getTimestamp(), hoodieWriteStat.getPartitionPath(), - hoodieWriteStat.getFileId(), hoodieWriteStat.getPrevCommit(), hoodieWriteStat.getNumWrites(), - hoodieWriteStat.getNumInserts(), hoodieWriteStat.getNumDeletes(), - hoodieWriteStat.getNumUpdateWrites(), hoodieWriteStat.getTotalWriteErrors(), - hoodieWriteStat.getTotalLogBlocks(), hoodieWriteStat.getTotalCorruptLogBlock(), - hoodieWriteStat.getTotalRollbackBlocks(), hoodieWriteStat.getTotalLogRecords(), - hoodieWriteStat.getTotalUpdatedRecordsCompacted(), hoodieWriteStat.getTotalWriteBytes() + rows.add(new Comparable[] {commit.getAction(), commit.getTimestamp(), hoodieWriteStat.getPartitionPath(), + hoodieWriteStat.getFileId(), hoodieWriteStat.getPrevCommit(), hoodieWriteStat.getNumWrites(), + hoodieWriteStat.getNumInserts(), hoodieWriteStat.getNumDeletes(), + hoodieWriteStat.getNumUpdateWrites(), hoodieWriteStat.getTotalWriteErrors(), + hoodieWriteStat.getTotalLogBlocks(), hoodieWriteStat.getTotalCorruptLogBlock(), + hoodieWriteStat.getTotalRollbackBlocks(), hoodieWriteStat.getTotalLogRecords(), + hoodieWriteStat.getTotalUpdatedRecordsCompacted(), hoodieWriteStat.getTotalWriteBytes() }); } } @@ -162,7 +162,7 @@ private String printCommitsWithMetadata(HoodieDefaultTimeline timeline, .addTableHeaderField(HoodieTableHeaderFields.HEADER_TOTAL_BYTES_WRITTEN); return HoodiePrintHelper.print(header, new HashMap<>(), sortByField, descending, - limit, headerOnly, rows, tempTableName); + limit, headerOnly, rows, tempTableName); } @CliCommand(value = "commits show", help = "Show the commits") @@ -182,30 +182,26 @@ public String showCommits( HoodieActiveTimeline activeTimeline = HoodieCLI.getTableMetaClient().getActiveTimeline(); if (includeExtraMetadata) { return printCommitsWithMetadata(activeTimeline, limit, sortByField, descending, headerOnly, exportTableName); - } else { + } else { return printCommits(activeTimeline, limit, sortByField, descending, headerOnly, exportTableName); } } @CliCommand(value = "commits showarchived", help = "Show the archived commits") public String showArchivedCommits( - @CliOption(key = {"includeExtraMetadata"}, help = "Include extra metadata", - unspecifiedDefaultValue = "false") final boolean includeExtraMetadata, - @CliOption(key = {"createView"}, mandatory = false, help = "view name to store output table", - unspecifiedDefaultValue = "") final String exportTableName, - @CliOption(key = {"startTs"}, mandatory = false, help = "start time for commits, default: now - 10 days") + @CliOption(key = {"includeExtraMetadata"}, help = "Include extra metadata", + unspecifiedDefaultValue = "false") final boolean includeExtraMetadata, + @CliOption(key = {"createView"}, mandatory = false, help = "view name to store output table", + unspecifiedDefaultValue = "") final String exportTableName, + @CliOption(key = {"startTs"}, mandatory = false, help = "start time for commits, default: now - 10 days") String startTs, - @CliOption(key = {"endTs"}, mandatory = false, help = "end time for commits, default: now - 1 day") + @CliOption(key = {"endTs"}, mandatory = false, help = "end time for commits, default: now - 1 day") String endTs, - @CliOption(key = {"limit"}, mandatory = false, help = "Limit commits", unspecifiedDefaultValue = "-1") - final Integer limit, - @CliOption(key = {"sortBy"}, help = "Sorting Field", unspecifiedDefaultValue = "") - final String sortByField, - @CliOption(key = {"desc"}, help = "Ordering", unspecifiedDefaultValue = "false") - final boolean descending, - @CliOption(key = {"headeronly"}, help = "Print Header Only", unspecifiedDefaultValue = "false") - final boolean headerOnly) - throws IOException { + @CliOption(key = {"limit"}, mandatory = false, help = "Limit commits", unspecifiedDefaultValue = "-1") final Integer limit, + @CliOption(key = {"sortBy"}, help = "Sorting Field", unspecifiedDefaultValue = "") final String sortByField, + @CliOption(key = {"desc"}, help = "Ordering", unspecifiedDefaultValue = "false") final boolean descending, + @CliOption(key = {"headeronly"}, help = "Print Header Only", unspecifiedDefaultValue = "false") final boolean headerOnly) + throws IOException { if (StringUtils.isNullOrEmpty(startTs)) { startTs = CommitUtil.getTimeDaysAgo(10); } @@ -218,7 +214,7 @@ public String showArchivedCommits( HoodieDefaultTimeline timelineRange = archivedTimeline.findInstantsInRange(startTs, endTs); if (includeExtraMetadata) { return printCommitsWithMetadata(timelineRange, limit, sortByField, descending, headerOnly, exportTableName); - } else { + } else { return printCommits(timelineRange, limit, sortByField, descending, headerOnly, exportTableName); } } finally { @@ -228,13 +224,14 @@ public String showArchivedCommits( } @CliCommand(value = "commit rollback", help = "Rollback a commit") - public String rollbackCommit(@CliOption(key = {"commit"}, help = "Commit to rollback") final String instantTime, + public String rollbackCommit( + @CliOption(key = {"commit"}, help = "Commit to rollback") final String instantTime, @CliOption(key = {"sparkProperties"}, help = "Spark Properties File Path") final String sparkPropertiesPath, @CliOption(key = "sparkMaster", unspecifiedDefaultValue = "", help = "Spark Master") String master, @CliOption(key = "sparkMemory", unspecifiedDefaultValue = "4G", - help = "Spark executor memory") final String sparkMemory, + help = "Spark executor memory") final String sparkMemory, @CliOption(key = "rollbackUsingMarkers", unspecifiedDefaultValue = "false", - help = "Enabling marker based rollback") final String rollbackUsingMarkers) + help = "Enabling marker based rollback") final String rollbackUsingMarkers) throws Exception { HoodieActiveTimeline activeTimeline = HoodieCLI.getTableMetaClient().getActiveTimeline(); HoodieTimeline completedTimeline = activeTimeline.getCommitsTimeline().filterCompletedInstants(); @@ -455,9 +452,9 @@ public String syncCommits(@CliOption(key = {"path"}, help = "Path of the table t * */ private Option getCommitForInstant(HoodieTimeline timeline, String instantTime) throws IOException { List instants = Arrays.asList( - new HoodieInstant(false, HoodieTimeline.COMMIT_ACTION, instantTime), - new HoodieInstant(false, HoodieTimeline.REPLACE_COMMIT_ACTION, instantTime), - new HoodieInstant(false, HoodieTimeline.DELTA_COMMIT_ACTION, instantTime)); + new HoodieInstant(false, HoodieTimeline.COMMIT_ACTION, instantTime), + new HoodieInstant(false, HoodieTimeline.REPLACE_COMMIT_ACTION, instantTime), + new HoodieInstant(false, HoodieTimeline.DELTA_COMMIT_ACTION, instantTime)); Option hoodieInstant = Option.fromJavaOptional(instants.stream().filter(timeline::containsInstant).findAny()); @@ -471,7 +468,7 @@ private Option getHoodieCommitMetadata(HoodieTimeline time HoodieReplaceCommitMetadata.class)); } return Option.of(HoodieCommitMetadata.fromBytes(timeline.getInstantDetails(hoodieInstant.get()).get(), - HoodieCommitMetadata.class)); + HoodieCommitMetadata.class)); } return Option.empty(); diff --git a/hudi-cli/src/main/java/org/apache/hudi/cli/commands/CompactionCommand.java b/hudi-cli/src/main/java/org/apache/hudi/cli/commands/CompactionCommand.java index d3845137c8e23..c979e1840f28b 100644 --- a/hudi-cli/src/main/java/org/apache/hudi/cli/commands/CompactionCommand.java +++ b/hudi-cli/src/main/java/org/apache/hudi/cli/commands/CompactionCommand.java @@ -102,8 +102,8 @@ public String compactionsAll( HoodieTableMetaClient client = checkAndGetMetaClient(); HoodieActiveTimeline activeTimeline = client.getActiveTimeline(); return printAllCompactions(activeTimeline, - compactionPlanReader(this::readCompactionPlanForActiveTimeline, activeTimeline), - includeExtraMetadata, sortByField, descending, limit, headerOnly); + compactionPlanReader(this::readCompactionPlanForActiveTimeline, activeTimeline), + includeExtraMetadata, sortByField, descending, limit, headerOnly); } @CliCommand(value = "compaction show", help = "Shows compaction details for a specific compaction instant") @@ -128,18 +128,18 @@ public String compactionShow( @CliCommand(value = "compactions showarchived", help = "Shows compaction details for specified time window") public String compactionsShowArchived( - @CliOption(key = {"includeExtraMetadata"}, help = "Include extra metadata", - unspecifiedDefaultValue = "false") final boolean includeExtraMetadata, - @CliOption(key = {"startTs"}, mandatory = false, help = "start time for compactions, default: now - 10 days") - String startTs, - @CliOption(key = {"endTs"}, mandatory = false, help = "end time for compactions, default: now - 1 day") - String endTs, - @CliOption(key = {"limit"}, help = "Limit compactions", - unspecifiedDefaultValue = "-1") final Integer limit, - @CliOption(key = {"sortBy"}, help = "Sorting Field", unspecifiedDefaultValue = "") final String sortByField, - @CliOption(key = {"desc"}, help = "Ordering", unspecifiedDefaultValue = "false") final boolean descending, - @CliOption(key = {"headeronly"}, help = "Print Header Only", - unspecifiedDefaultValue = "false") final boolean headerOnly) { + @CliOption(key = {"includeExtraMetadata"}, help = "Include extra metadata", + unspecifiedDefaultValue = "false") final boolean includeExtraMetadata, + @CliOption(key = {"startTs"}, mandatory = false, help = "start time for compactions, default: now - 10 days") + String startTs, + @CliOption(key = {"endTs"}, mandatory = false, help = "end time for compactions, default: now - 1 day") + String endTs, + @CliOption(key = {"limit"}, help = "Limit compactions", + unspecifiedDefaultValue = "-1") final Integer limit, + @CliOption(key = {"sortBy"}, help = "Sorting Field", unspecifiedDefaultValue = "") final String sortByField, + @CliOption(key = {"desc"}, help = "Ordering", unspecifiedDefaultValue = "false") final boolean descending, + @CliOption(key = {"headeronly"}, help = "Print Header Only", + unspecifiedDefaultValue = "false") final boolean headerOnly) { if (StringUtils.isNullOrEmpty(startTs)) { startTs = CommitUtil.getTimeDaysAgo(10); } @@ -152,8 +152,8 @@ public String compactionsShowArchived( archivedTimeline.loadCompactionDetailsInMemory(startTs, endTs); try { return printAllCompactions(archivedTimeline, - compactionPlanReader(this::readCompactionPlanForArchivedTimeline, archivedTimeline), - includeExtraMetadata, sortByField, descending, limit, headerOnly); + compactionPlanReader(this::readCompactionPlanForArchivedTimeline, archivedTimeline), + includeExtraMetadata, sortByField, descending, limit, headerOnly); } finally { archivedTimeline.clearInstantDetailsFromMemory(startTs, endTs); } @@ -161,23 +161,23 @@ public String compactionsShowArchived( @CliCommand(value = "compaction showarchived", help = "Shows compaction details for a specific compaction instant") public String compactionShowArchived( - @CliOption(key = "instant", mandatory = true, - help = "instant time") final String compactionInstantTime, - @CliOption(key = {"limit"}, help = "Limit commits", - unspecifiedDefaultValue = "-1") final Integer limit, - @CliOption(key = {"sortBy"}, help = "Sorting Field", unspecifiedDefaultValue = "") final String sortByField, - @CliOption(key = {"desc"}, help = "Ordering", unspecifiedDefaultValue = "false") final boolean descending, - @CliOption(key = {"headeronly"}, help = "Print Header Only", - unspecifiedDefaultValue = "false") final boolean headerOnly) - throws Exception { + @CliOption(key = "instant", mandatory = true, + help = "instant time") final String compactionInstantTime, + @CliOption(key = {"limit"}, help = "Limit commits", + unspecifiedDefaultValue = "-1") final Integer limit, + @CliOption(key = {"sortBy"}, help = "Sorting Field", unspecifiedDefaultValue = "") final String sortByField, + @CliOption(key = {"desc"}, help = "Ordering", unspecifiedDefaultValue = "false") final boolean descending, + @CliOption(key = {"headeronly"}, help = "Print Header Only", + unspecifiedDefaultValue = "false") final boolean headerOnly) + throws Exception { HoodieTableMetaClient client = checkAndGetMetaClient(); HoodieArchivedTimeline archivedTimeline = client.getArchivedTimeline(); HoodieInstant instant = new HoodieInstant(HoodieInstant.State.COMPLETED, - HoodieTimeline.COMPACTION_ACTION, compactionInstantTime); + HoodieTimeline.COMPACTION_ACTION, compactionInstantTime); try { archivedTimeline.loadCompactionDetailsInMemory(compactionInstantTime); HoodieCompactionPlan compactionPlan = TimelineMetadataUtils.deserializeAvroRecordMetadata( - archivedTimeline.getInstantDetails(instant).get(), HoodieCompactionPlan.getClassSchema()); + archivedTimeline.getInstantDetails(instant).get(), HoodieCompactionPlan.getClassSchema()); return printCompaction(compactionPlan, sortByField, descending, limit, headerOnly); } finally { archivedTimeline.clearInstantDetailsFromMemory(compactionInstantTime); @@ -185,8 +185,9 @@ public String compactionShowArchived( } @CliCommand(value = "compaction schedule", help = "Schedule Compaction") - public String scheduleCompact(@CliOption(key = "sparkMemory", unspecifiedDefaultValue = "1G", - help = "Spark executor memory") final String sparkMemory, + public String scheduleCompact( + @CliOption(key = "sparkMemory", unspecifiedDefaultValue = "1G", + help = "Spark executor memory") final String sparkMemory, @CliOption(key = "propsFilePath", help = "path to properties file on localfs or dfs with configurations for hoodie client for compacting", unspecifiedDefaultValue = "") final String propsFilePath, @CliOption(key = "hoodieConfigs", help = "Any configuration that can be set in the properties file can be passed here in the form of an array", @@ -229,9 +230,9 @@ public String compact( @CliOption(key = "retry", unspecifiedDefaultValue = "1", help = "Number of retries") final String retry, @CliOption(key = "compactionInstant", help = "Base path for the target hoodie table") String compactionInstantTime, @CliOption(key = "propsFilePath", help = "path to properties file on localfs or dfs with configurations for hoodie client for compacting", - unspecifiedDefaultValue = "") final String propsFilePath, + unspecifiedDefaultValue = "") final String propsFilePath, @CliOption(key = "hoodieConfigs", help = "Any configuration that can be set in the properties file can be passed here in the form of an array", - unspecifiedDefaultValue = "") final String[] configs) + unspecifiedDefaultValue = "") final String[] configs) throws Exception { HoodieTableMetaClient client = checkAndGetMetaClient(); boolean initialized = HoodieCLI.initConf(); @@ -312,12 +313,12 @@ private String printAllCompactions(HoodieDefaultTimeline timeline, Stream instantsStream = timeline.getWriteTimeline().getReverseOrderedInstants(); List> compactionPlans = instantsStream - .map(instant -> Pair.of(instant, compactionPlanReader.apply(instant))) - .filter(pair -> pair.getRight() != null) - .collect(Collectors.toList()); + .map(instant -> Pair.of(instant, compactionPlanReader.apply(instant))) + .filter(pair -> pair.getRight() != null) + .collect(Collectors.toList()); Set committedInstants = timeline.getCommitTimeline().filterCompletedInstants() - .getInstants().map(HoodieInstant::getTimestamp).collect(Collectors.toSet()); + .getInstants().map(HoodieInstant::getTimestamp).collect(Collectors.toSet()); List rows = new ArrayList<>(); for (Pair compactionPlan : compactionPlans) { @@ -332,11 +333,11 @@ private String printAllCompactions(HoodieDefaultTimeline timeline, if (includeExtraMetadata) { rows.add(new Comparable[] {instant.getTimestamp(), state.toString(), - plan.getOperations() == null ? 0 : plan.getOperations().size(), - plan.getExtraMetadata().toString()}); + plan.getOperations() == null ? 0 : plan.getOperations().size(), + plan.getExtraMetadata().toString()}); } else { rows.add(new Comparable[] {instant.getTimestamp(), state.toString(), - plan.getOperations() == null ? 0 : plan.getOperations().size()}); + plan.getOperations() == null ? 0 : plan.getOperations().size()}); } } @@ -358,7 +359,7 @@ private String printAllCompactions(HoodieDefaultTimeline timeline, */ private Function compactionPlanReader( - BiFunction f, T timeline) { + BiFunction f, T timeline) { return (y) -> f.apply(timeline, y); } @@ -389,15 +390,15 @@ private HoodieCompactionPlan readCompactionPlanForActiveTimeline(HoodieActiveTim try { // This could be a completed compaction. Assume a compaction request file is present but skip if fails return TimelineMetadataUtils.deserializeCompactionPlan( - activeTimeline.readCompactionPlanAsBytes( - HoodieTimeline.getCompactionRequestedInstant(instant.getTimestamp())).get()); + activeTimeline.readCompactionPlanAsBytes( + HoodieTimeline.getCompactionRequestedInstant(instant.getTimestamp())).get()); } catch (HoodieIOException ioe) { // SKIP return null; } } else { return TimelineMetadataUtils.deserializeCompactionPlan(activeTimeline.readCompactionPlanAsBytes( - HoodieTimeline.getCompactionRequestedInstant(instant.getTimestamp())).get()); + HoodieTimeline.getCompactionRequestedInstant(instant.getTimestamp())).get()); } } catch (IOException e) { throw new HoodieIOException(e.getMessage(), e); @@ -405,15 +406,15 @@ private HoodieCompactionPlan readCompactionPlanForActiveTimeline(HoodieActiveTim } protected String printCompaction(HoodieCompactionPlan compactionPlan, - String sortByField, - boolean descending, - int limit, - boolean headerOnly) { + String sortByField, + boolean descending, + int limit, + boolean headerOnly) { List rows = new ArrayList<>(); if ((null != compactionPlan) && (null != compactionPlan.getOperations())) { for (HoodieCompactionOperation op : compactionPlan.getOperations()) { - rows.add(new Comparable[]{op.getPartitionPath(), op.getFileId(), op.getBaseInstantTime(), op.getDataFilePath(), - op.getDeltaFilePaths().size(), op.getMetrics() == null ? "" : op.getMetrics().toString()}); + rows.add(new Comparable[] {op.getPartitionPath(), op.getFileId(), op.getBaseInstantTime(), op.getDataFilePath(), + op.getDeltaFilePaths().size(), op.getMetrics() == null ? "" : op.getMetrics().toString()}); } } @@ -645,7 +646,7 @@ public String repairCompaction( } private String getRenamesToBePrinted(List res, Integer limit, String sortByField, boolean descending, - boolean headerOnly, String operation) { + boolean headerOnly, String operation) { Option result = Option.fromJavaOptional(res.stream().map(r -> r.isExecuted() && r.isSuccess()).reduce(Boolean::logicalAnd)); diff --git a/hudi-cli/src/main/java/org/apache/hudi/cli/commands/ExportCommand.java b/hudi-cli/src/main/java/org/apache/hudi/cli/commands/ExportCommand.java index 95e7caa8a9ed0..91d13bcd17967 100644 --- a/hudi-cli/src/main/java/org/apache/hudi/cli/commands/ExportCommand.java +++ b/hudi-cli/src/main/java/org/apache/hudi/cli/commands/ExportCommand.java @@ -18,12 +18,6 @@ package org.apache.hudi.cli.commands; -import org.apache.avro.generic.GenericRecord; -import org.apache.avro.generic.IndexedRecord; -import org.apache.avro.specific.SpecificData; -import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; import org.apache.hudi.avro.HoodieAvroUtils; import org.apache.hudi.avro.model.HoodieArchivedMetaEntry; import org.apache.hudi.avro.model.HoodieCleanMetadata; @@ -42,6 +36,13 @@ import org.apache.hudi.common.table.timeline.TimelineMetadataUtils; import org.apache.hudi.common.util.ClosableIterator; import org.apache.hudi.exception.HoodieException; + +import org.apache.avro.generic.GenericRecord; +import org.apache.avro.generic.IndexedRecord; +import org.apache.avro.specific.SpecificData; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; import org.springframework.shell.core.CommandMarker; import org.springframework.shell.core.annotation.CliCommand; import org.springframework.shell.core.annotation.CliOption; diff --git a/hudi-cli/src/main/java/org/apache/hudi/cli/commands/FileSystemViewCommand.java b/hudi-cli/src/main/java/org/apache/hudi/cli/commands/FileSystemViewCommand.java index 27598b5f5197f..d5647d860ddff 100644 --- a/hudi-cli/src/main/java/org/apache/hudi/cli/commands/FileSystemViewCommand.java +++ b/hudi-cli/src/main/java/org/apache/hudi/cli/commands/FileSystemViewCommand.java @@ -229,18 +229,18 @@ public String showLatestFileSlices( /** * Build File System View. - * - * @param globRegex Path Regex - * @param maxInstant Max Instants to be used for displaying file-instants - * @param basefileOnly Include only base file view + * + * @param globRegex Path Regex + * @param maxInstant Max Instants to be used for displaying file-instants + * @param basefileOnly Include only base file view * @param includeMaxInstant Include Max instant - * @param includeInflight Include inflight instants + * @param includeInflight Include inflight instants * @param excludeCompaction Exclude Compaction instants * @return * @throws IOException */ private HoodieTableFileSystemView buildFileSystemView(String globRegex, String maxInstant, boolean basefileOnly, - boolean includeMaxInstant, boolean includeInflight, boolean excludeCompaction) throws IOException { + boolean includeMaxInstant, boolean includeInflight, boolean excludeCompaction) throws IOException { HoodieTableMetaClient client = HoodieCLI.getTableMetaClient(); HoodieTableMetaClient metaClient = HoodieTableMetaClient.builder().setConf(client.getHadoopConf()).setBasePath(client.getBasePath()).setLoadActiveTimelineOnLoad(true).build(); diff --git a/hudi-cli/src/main/java/org/apache/hudi/cli/commands/HDFSParquetImportCommand.java b/hudi-cli/src/main/java/org/apache/hudi/cli/commands/HDFSParquetImportCommand.java index 5c6407cea1443..dc59f8a650fa7 100644 --- a/hudi-cli/src/main/java/org/apache/hudi/cli/commands/HDFSParquetImportCommand.java +++ b/hudi-cli/src/main/java/org/apache/hudi/cli/commands/HDFSParquetImportCommand.java @@ -37,8 +37,8 @@ /** * CLI command for importing parquet table to hudi table. * - * @deprecated This utility is deprecated in 0.10.0 and will be removed in 0.11.0. Use {@link HoodieDeltaStreamer.Config#runBootstrap} instead. * @see HoodieDeltaStreamer + * @deprecated This utility is deprecated in 0.10.0 and will be removed in 0.11.0. Use {@link HoodieDeltaStreamer.Config#runBootstrap} instead. */ @Component public class HDFSParquetImportCommand implements CommandMarker { @@ -64,9 +64,9 @@ public String convert( @CliOption(key = "sparkMemory", mandatory = true, help = "Spark executor memory") final String sparkMemory, @CliOption(key = "retry", mandatory = true, help = "Number of retries") final String retry, @CliOption(key = "propsFilePath", help = "path to properties file on localfs or dfs with configurations for hoodie client for importing", - unspecifiedDefaultValue = "") final String propsFilePath, + unspecifiedDefaultValue = "") final String propsFilePath, @CliOption(key = "hoodieConfigs", help = "Any configuration that can be set in the properties file can be passed here in the form of an array", - unspecifiedDefaultValue = "") final String[] configs) throws Exception { + unspecifiedDefaultValue = "") final String[] configs) throws Exception { (new FormatValidator()).validate("format", format); diff --git a/hudi-cli/src/main/java/org/apache/hudi/cli/commands/MarkersCommand.java b/hudi-cli/src/main/java/org/apache/hudi/cli/commands/MarkersCommand.java index 57a4ee1879855..d229fe1a71f03 100644 --- a/hudi-cli/src/main/java/org/apache/hudi/cli/commands/MarkersCommand.java +++ b/hudi-cli/src/main/java/org/apache/hudi/cli/commands/MarkersCommand.java @@ -22,6 +22,7 @@ import org.apache.hudi.cli.utils.InputStreamConsumer; import org.apache.hudi.cli.utils.SparkUtil; import org.apache.hudi.common.table.HoodieTableMetaClient; + import org.apache.spark.launcher.SparkLauncher; import org.springframework.shell.core.CommandMarker; import org.springframework.shell.core.annotation.CliCommand; @@ -35,7 +36,8 @@ public class MarkersCommand implements CommandMarker { @CliCommand(value = "marker delete", help = "Delete the marker") - public String deleteMarker(@CliOption(key = {"commit"}, help = "Delete a marker") final String instantTime, + public String deleteMarker( + @CliOption(key = {"commit"}, help = "Delete a marker") final String instantTime, @CliOption(key = {"sparkProperties"}, help = "Spark Properties File Path") final String sparkPropertiesPath, @CliOption(key = "sparkMaster", unspecifiedDefaultValue = "", help = "Spark Master") String master, @CliOption(key = "sparkMemory", unspecifiedDefaultValue = "1G", diff --git a/hudi-cli/src/main/java/org/apache/hudi/cli/commands/MetadataCommand.java b/hudi-cli/src/main/java/org/apache/hudi/cli/commands/MetadataCommand.java index e3d25e06b8860..d9ef1d04cee98 100644 --- a/hudi-cli/src/main/java/org/apache/hudi/cli/commands/MetadataCommand.java +++ b/hudi-cli/src/main/java/org/apache/hudi/cli/commands/MetadataCommand.java @@ -150,7 +150,7 @@ public String delete() throws Exception { @CliCommand(value = "metadata init", help = "Update the metadata table from commits since the creation") public String init(@CliOption(key = "sparkMaster", unspecifiedDefaultValue = SparkUtil.DEFAULT_SPARK_MASTER, help = "Spark master") final String master, @CliOption(key = {"readonly"}, unspecifiedDefaultValue = "false", - help = "Open in read-only mode") final boolean readOnly) throws Exception { + help = "Open in read-only mode") final boolean readOnly) throws Exception { HoodieCLI.getTableMetaClient(); Path metadataPath = new Path(getMetadataTableBasePath(HoodieCLI.basePath)); try { diff --git a/hudi-cli/src/main/java/org/apache/hudi/cli/commands/RepairsCommand.java b/hudi-cli/src/main/java/org/apache/hudi/cli/commands/RepairsCommand.java index ac1701915773c..611d937b8c69d 100644 --- a/hudi-cli/src/main/java/org/apache/hudi/cli/commands/RepairsCommand.java +++ b/hudi-cli/src/main/java/org/apache/hudi/cli/commands/RepairsCommand.java @@ -33,12 +33,11 @@ import org.apache.hudi.common.table.timeline.HoodieTimeline; import org.apache.hudi.common.util.CleanerUtils; import org.apache.hudi.common.util.Option; +import org.apache.hudi.common.util.StringUtils; import org.apache.hudi.exception.HoodieIOException; import org.apache.avro.AvroRuntimeException; import org.apache.hadoop.fs.Path; -import org.apache.hudi.common.util.StringUtils; - import org.apache.log4j.Logger; import org.apache.spark.launcher.SparkLauncher; import org.apache.spark.util.Utils; @@ -46,7 +45,6 @@ import org.springframework.shell.core.annotation.CliCommand; import org.springframework.shell.core.annotation.CliOption; import org.springframework.stereotype.Component; -import scala.collection.JavaConverters; import java.io.FileInputStream; import java.io.IOException; @@ -56,6 +54,8 @@ import java.util.TreeSet; import java.util.stream.Collectors; +import scala.collection.JavaConverters; + import static org.apache.hudi.common.table.HoodieTableMetaClient.METAFOLDER_NAME; /** @@ -151,8 +151,7 @@ public String addPartitionMeta( @CliCommand(value = "repair overwrite-hoodie-props", help = "Overwrite hoodie.properties with provided file. Risky operation. Proceed with caution!") public String overwriteHoodieProperties( - @CliOption(key = {"new-props-file"}, help = "Path to a properties file on local filesystem to overwrite the table's hoodie.properties with") - final String overwriteFilePath) throws IOException { + @CliOption(key = {"new-props-file"}, help = "Path to a properties file on local filesystem to overwrite the table's hoodie.properties with") final String overwriteFilePath) throws IOException { HoodieTableMetaClient client = HoodieCLI.getTableMetaClient(); Properties newProps = new Properties(); @@ -170,7 +169,7 @@ public String overwriteHoodieProperties( String[][] rows = new String[allPropKeys.size()][]; int ind = 0; for (String propKey : allPropKeys) { - String[] row = new String[]{ + String[] row = new String[] { propKey, oldProps.getOrDefault(propKey, "null"), newProps.getOrDefault(propKey, "null").toString() diff --git a/hudi-cli/src/main/java/org/apache/hudi/cli/commands/RollbacksCommand.java b/hudi-cli/src/main/java/org/apache/hudi/cli/commands/RollbacksCommand.java index faa778943c9b0..8d1d21ac903ea 100644 --- a/hudi-cli/src/main/java/org/apache/hudi/cli/commands/RollbacksCommand.java +++ b/hudi-cli/src/main/java/org/apache/hudi/cli/commands/RollbacksCommand.java @@ -102,17 +102,17 @@ public String showRollback( activeTimeline.getInstantDetails(new HoodieInstant(State.COMPLETED, ROLLBACK_ACTION, rollbackInstant)).get(), HoodieRollbackMetadata.class); metadata.getPartitionMetadata().forEach((key, value) -> Stream - .concat(value.getSuccessDeleteFiles().stream().map(f -> Pair.of(f, true)), - value.getFailedDeleteFiles().stream().map(f -> Pair.of(f, false))) - .forEach(fileWithDeleteStatus -> { - Comparable[] row = new Comparable[5]; - row[0] = metadata.getStartRollbackTime(); - row[1] = metadata.getCommitsRollback().toString(); - row[2] = key; - row[3] = fileWithDeleteStatus.getLeft(); - row[4] = fileWithDeleteStatus.getRight(); - rows.add(row); - })); + .concat(value.getSuccessDeleteFiles().stream().map(f -> Pair.of(f, true)), + value.getFailedDeleteFiles().stream().map(f -> Pair.of(f, false))) + .forEach(fileWithDeleteStatus -> { + Comparable[] row = new Comparable[5]; + row[0] = metadata.getStartRollbackTime(); + row[1] = metadata.getCommitsRollback().toString(); + row[2] = key; + row[3] = fileWithDeleteStatus.getLeft(); + row[4] = fileWithDeleteStatus.getRight(); + rows.add(row); + })); TableHeader header = new TableHeader().addTableHeaderField(HoodieTableHeaderFields.HEADER_INSTANT) .addTableHeaderField(HoodieTableHeaderFields.HEADER_ROLLBACK_INSTANT) diff --git a/hudi-cli/src/main/java/org/apache/hudi/cli/commands/SavepointsCommand.java b/hudi-cli/src/main/java/org/apache/hudi/cli/commands/SavepointsCommand.java index e5b07fd99f403..5b775e5f3135b 100644 --- a/hudi-cli/src/main/java/org/apache/hudi/cli/commands/SavepointsCommand.java +++ b/hudi-cli/src/main/java/org/apache/hudi/cli/commands/SavepointsCommand.java @@ -28,6 +28,7 @@ import org.apache.hudi.common.table.timeline.HoodieInstant; import org.apache.hudi.common.table.timeline.HoodieTimeline; import org.apache.hudi.exception.HoodieException; + import org.apache.spark.launcher.SparkLauncher; import org.springframework.shell.core.CommandMarker; import org.springframework.shell.core.annotation.CliCommand; @@ -57,7 +58,8 @@ public String showSavepoints() { } @CliCommand(value = "savepoint create", help = "Savepoint a commit") - public String savepoint(@CliOption(key = {"commit"}, help = "Commit to savepoint") final String commitTime, + public String savepoint( + @CliOption(key = {"commit"}, help = "Commit to savepoint") final String commitTime, @CliOption(key = {"user"}, unspecifiedDefaultValue = "default", help = "User who is creating the savepoint") final String user, @CliOption(key = {"comments"}, unspecifiedDefaultValue = "default", @@ -125,7 +127,8 @@ public String rollbackToSavepoint( } @CliCommand(value = "savepoint delete", help = "Delete the savepoint") - public String deleteSavepoint(@CliOption(key = {"commit"}, help = "Delete a savepoint") final String instantTime, + public String deleteSavepoint( + @CliOption(key = {"commit"}, help = "Delete a savepoint") final String instantTime, @CliOption(key = {"sparkProperties"}, help = "Spark Properties File Path") final String sparkPropertiesPath, @CliOption(key = "sparkMaster", unspecifiedDefaultValue = "", help = "Spark Master") String master, @CliOption(key = "sparkMemory", unspecifiedDefaultValue = "4G", diff --git a/hudi-cli/src/main/java/org/apache/hudi/cli/commands/SparkEnvCommand.java b/hudi-cli/src/main/java/org/apache/hudi/cli/commands/SparkEnvCommand.java index a3d78d1260967..aed404c300709 100644 --- a/hudi-cli/src/main/java/org/apache/hudi/cli/commands/SparkEnvCommand.java +++ b/hudi-cli/src/main/java/org/apache/hudi/cli/commands/SparkEnvCommand.java @@ -50,8 +50,8 @@ public void setEnv(@CliOption(key = {"conf"}, help = "Env config to be set") fin public String showAllEnv() { String[][] rows = new String[env.size()][2]; int i = 0; - for (Map.Entry entry: env.entrySet()) { - rows[i] = new String[]{entry.getKey(), entry.getValue()}; + for (Map.Entry entry : env.entrySet()) { + rows[i] = new String[] {entry.getKey(), entry.getValue()}; i++; } return HoodiePrintHelper.print(new String[] {"key", "value"}, rows); @@ -62,7 +62,7 @@ public String showEnvByKey(@CliOption(key = {"key"}, help = "Which env conf want if (key == null || key.isEmpty()) { return showAllEnv(); } else { - return HoodiePrintHelper.print(new String[] {"key", "value"}, new String[][]{new String[]{key, env.get(key)}}); + return HoodiePrintHelper.print(new String[] {"key", "value"}, new String[][] {new String[] {key, env.get(key)}}); } } } diff --git a/hudi-cli/src/main/java/org/apache/hudi/cli/commands/SparkMain.java b/hudi-cli/src/main/java/org/apache/hudi/cli/commands/SparkMain.java index f60f6f2d46c15..ef17abca57932 100644 --- a/hudi-cli/src/main/java/org/apache/hudi/cli/commands/SparkMain.java +++ b/hudi-cli/src/main/java/org/apache/hudi/cli/commands/SparkMain.java @@ -174,7 +174,7 @@ public static void main(String[] args) throws Exception { case COMPACT_REPAIR: assert (args.length == 8); doCompactRepair(jsc, args[3], args[4], args[5], Integer.parseInt(args[6]), - Boolean.parseBoolean(args[7])); + Boolean.parseBoolean(args[7])); returnCode = 0; break; case COMPACT_UNSCHEDULE_FILE: @@ -263,7 +263,7 @@ public static void main(String[] args) throws Exception { configs.addAll(Arrays.asList(args).subList(18, args.length)); } returnCode = doBootstrap(jsc, args[3], args[4], args[5], args[6], args[7], args[8], args[9], args[10], - args[11], args[12], args[13], args[14], args[15], args[16], propsFilePath, configs); + args[11], args[12], args[13], args[14], args[15], args[16], propsFilePath, configs); break; case UPGRADE: case DOWNGRADE: @@ -283,7 +283,7 @@ public static void main(String[] args) throws Exception { } protected static void clean(JavaSparkContext jsc, String basePath, String propsFilePath, - List configs) { + List configs) { HoodieCleaner.Config cfg = new HoodieCleaner.Config(); cfg.basePath = basePath; cfg.propsFilePath = propsFilePath; @@ -307,8 +307,8 @@ protected static int deleteMarker(JavaSparkContext jsc, String instantTime, Stri } private static int dataLoad(JavaSparkContext jsc, String command, String srcPath, String targetPath, String tableName, - String tableType, String rowKey, String partitionKey, int parallelism, String schemaFile, - int retry, String propsFilePath, List configs) { + String tableType, String rowKey, String partitionKey, int parallelism, String schemaFile, + int retry, String propsFilePath, List configs) { Config cfg = new Config(); cfg.command = command; cfg.srcPath = srcPath; @@ -325,7 +325,7 @@ private static int dataLoad(JavaSparkContext jsc, String command, String srcPath } private static void doCompactValidate(JavaSparkContext jsc, String basePath, String compactionInstant, - String outputPath, int parallelism) throws Exception { + String outputPath, int parallelism) throws Exception { HoodieCompactionAdminTool.Config cfg = new HoodieCompactionAdminTool.Config(); cfg.basePath = basePath; cfg.operation = Operation.VALIDATE; @@ -336,7 +336,7 @@ private static void doCompactValidate(JavaSparkContext jsc, String basePath, Str } private static void doCompactRepair(JavaSparkContext jsc, String basePath, String compactionInstant, - String outputPath, int parallelism, boolean dryRun) throws Exception { + String outputPath, int parallelism, boolean dryRun) throws Exception { HoodieCompactionAdminTool.Config cfg = new HoodieCompactionAdminTool.Config(); cfg.basePath = basePath; cfg.operation = Operation.REPAIR; @@ -348,7 +348,7 @@ private static void doCompactRepair(JavaSparkContext jsc, String basePath, Strin } private static void doCompactUnschedule(JavaSparkContext jsc, String basePath, String compactionInstant, - String outputPath, int parallelism, boolean skipValidation, boolean dryRun) throws Exception { + String outputPath, int parallelism, boolean skipValidation, boolean dryRun) throws Exception { HoodieCompactionAdminTool.Config cfg = new HoodieCompactionAdminTool.Config(); cfg.basePath = basePath; cfg.operation = Operation.UNSCHEDULE_PLAN; @@ -361,7 +361,7 @@ private static void doCompactUnschedule(JavaSparkContext jsc, String basePath, S } private static void doCompactUnscheduleFile(JavaSparkContext jsc, String basePath, String fileId, String partitionPath, - String outputPath, int parallelism, boolean skipValidation, boolean dryRun) + String outputPath, int parallelism, boolean skipValidation, boolean dryRun) throws Exception { HoodieCompactionAdminTool.Config cfg = new HoodieCompactionAdminTool.Config(); cfg.basePath = basePath; @@ -376,8 +376,8 @@ private static void doCompactUnscheduleFile(JavaSparkContext jsc, String basePat } private static int compact(JavaSparkContext jsc, String basePath, String tableName, String compactionInstant, - int parallelism, String schemaFile, int retry, String mode, String propsFilePath, - List configs) { + int parallelism, String schemaFile, int retry, String mode, String propsFilePath, + List configs) { HoodieCompactor.Config cfg = new HoodieCompactor.Config(); cfg.basePath = basePath; cfg.tableName = tableName; @@ -393,7 +393,7 @@ private static int compact(JavaSparkContext jsc, String basePath, String tableNa } private static int cluster(JavaSparkContext jsc, String basePath, String tableName, String clusteringInstant, - int parallelism, String sparkMemory, int retry, String runningMode, String propsFilePath, List configs) { + int parallelism, String sparkMemory, int retry, String runningMode, String propsFilePath, List configs) { HoodieClusteringJob.Config cfg = new HoodieClusteringJob.Config(); cfg.basePath = basePath; cfg.tableName = tableName; @@ -407,7 +407,7 @@ private static int cluster(JavaSparkContext jsc, String basePath, String tableNa } private static int deduplicatePartitionPath(JavaSparkContext jsc, String duplicatedPartitionPath, - String repairedOutputPath, String basePath, boolean dryRun, String dedupeType) { + String repairedOutputPath, String basePath, boolean dryRun, String dedupeType) { DedupeSparkJob job = new DedupeSparkJob(basePath, duplicatedPartitionPath, repairedOutputPath, new SQLContext(jsc), FSUtils.getFs(basePath, jsc.hadoopConfiguration()), DeDupeType.withName(dedupeType)); job.fixDuplicates(dryRun); @@ -415,9 +415,9 @@ private static int deduplicatePartitionPath(JavaSparkContext jsc, String duplica } private static int doBootstrap(JavaSparkContext jsc, String tableName, String tableType, String basePath, - String sourcePath, String recordKeyCols, String partitionFields, String parallelism, String schemaProviderClass, - String bootstrapIndexClass, String selectorClass, String keyGenerator, String fullBootstrapInputProvider, - String payloadClassName, String enableHiveSync, String propsFilePath, List configs) throws IOException { + String sourcePath, String recordKeyCols, String partitionFields, String parallelism, String schemaProviderClass, + String bootstrapIndexClass, String selectorClass, String keyGenerator, String fullBootstrapInputProvider, + String payloadClassName, String enableHiveSync, String propsFilePath, List configs) throws IOException { TypedProperties properties = propsFilePath == null ? buildProperties(configs) : readConfig(jsc.hadoopConfiguration(), new Path(propsFilePath), configs).getProps(true); @@ -462,7 +462,7 @@ private static int rollback(JavaSparkContext jsc, String instantTime, String bas } private static int createSavepoint(JavaSparkContext jsc, String commitTime, String user, - String comments, String basePath) throws Exception { + String comments, String basePath) throws Exception { SparkRDDWriteClient client = createHoodieClient(jsc, basePath, false); try { client.savepoint(commitTime, user, comments); @@ -501,8 +501,8 @@ private static int deleteSavepoint(JavaSparkContext jsc, String savepointTime, S /** * Upgrade or downgrade table. * - * @param jsc instance of {@link JavaSparkContext} to use. - * @param basePath base path of the dataset. + * @param jsc instance of {@link JavaSparkContext} to use. + * @param basePath base path of the dataset. * @param toVersion version to which upgrade/downgrade to be done. * @return 0 if success, else -1. * @throws Exception diff --git a/hudi-cli/src/main/java/org/apache/hudi/cli/commands/TableCommand.java b/hudi-cli/src/main/java/org/apache/hudi/cli/commands/TableCommand.java index 41cdfb88f8441..d9b1d16d65e88 100644 --- a/hudi-cli/src/main/java/org/apache/hudi/cli/commands/TableCommand.java +++ b/hudi-cli/src/main/java/org/apache/hudi/cli/commands/TableCommand.java @@ -94,8 +94,8 @@ public String connect( /** * Create a Hoodie Table if it does not exist. * - * @param path Base Path - * @param name Hoodie Table Name + * @param path Base Path + * @param name Hoodie Table Name * @param tableTypeStr Hoodie Table Type * @param payloadClass Payload Class */ @@ -127,12 +127,12 @@ public String createTable( } HoodieTableMetaClient.withPropertyBuilder() - .setTableType(tableTypeStr) - .setTableName(name) - .setArchiveLogFolder(archiveFolder) - .setPayloadClassName(payloadClass) - .setTimelineLayoutVersion(layoutVersion) - .initTable(HoodieCLI.conf, path); + .setTableType(tableTypeStr) + .setTableName(name) + .setArchiveLogFolder(archiveFolder) + .setPayloadClassName(payloadClass) + .setTimelineLayoutVersion(layoutVersion) + .initTable(HoodieCLI.conf, path); // Now connect to ensure loading works return connect(path, layoutVersion, false, 0, 0, 0); } @@ -193,8 +193,7 @@ public String recoverTableConfig() throws IOException { @CliCommand(value = "table update-configs", help = "Update the table configs with configs with provided file.") public String updateTableConfig( - @CliOption(key = {"props-file"}, mandatory = true, help = "Path to a properties file on local filesystem") - final String updatePropsFilePath) throws IOException { + @CliOption(key = {"props-file"}, mandatory = true, help = "Path to a properties file on local filesystem") final String updatePropsFilePath) throws IOException { HoodieTableMetaClient client = HoodieCLI.getTableMetaClient(); Map oldProps = client.getTableConfig().propsMap(); @@ -210,8 +209,7 @@ public String updateTableConfig( @CliCommand(value = "table delete-configs", help = "Delete the supplied table configs from the table.") public String deleteTableConfig( - @CliOption(key = {"comma-separated-configs"}, mandatory = true, help = "Comma separated list of configs to delete.") - final String csConfigs) { + @CliOption(key = {"comma-separated-configs"}, mandatory = true, help = "Comma separated list of configs to delete.") final String csConfigs) { HoodieTableMetaClient client = HoodieCLI.getTableMetaClient(); Map oldProps = client.getTableConfig().propsMap(); @@ -232,7 +230,7 @@ private static String renderOldNewProps(Map newProps, Map newProps, Map fileSlicesToCRList(Stream fileSlice, Strin return rows; } - /**( + /** + * ( * Test case for command 'show fsview latest'. */ @Test diff --git a/hudi-cli/src/test/java/org/apache/hudi/cli/commands/TestHoodieLogFileCommand.java b/hudi-cli/src/test/java/org/apache/hudi/cli/commands/TestHoodieLogFileCommand.java index ee7fbda11b783..f92d5fc57915b 100644 --- a/hudi-cli/src/test/java/org/apache/hudi/cli/commands/TestHoodieLogFileCommand.java +++ b/hudi-cli/src/test/java/org/apache/hudi/cli/commands/TestHoodieLogFileCommand.java @@ -137,7 +137,7 @@ public void testShowLogFileCommits() throws JsonProcessingException { ObjectMapper objectMapper = new ObjectMapper(); String headerStr = objectMapper.writeValueAsString(dataBlock.getLogBlockHeader()); String footerStr = objectMapper.writeValueAsString(dataBlock.getLogBlockFooter()); - Comparable[] output = new Comparable[]{INSTANT_TIME, 100, dataBlock.getBlockType(), headerStr, footerStr}; + Comparable[] output = new Comparable[] {INSTANT_TIME, 100, dataBlock.getBlockType(), headerStr, footerStr}; rows.add(output); String expected = HoodiePrintHelper.print(header, new HashMap<>(), "", false, -1, false, rows); @@ -156,7 +156,7 @@ public void testShowLogFileRecords() throws IOException, URISyntaxException { // construct expect result, get 10 records. List records = SchemaTestUtil.generateTestRecords(0, 10); - String[][] rows = records.stream().map(r -> new String[]{r.toString()}).toArray(String[][]::new); + String[][] rows = records.stream().map(r -> new String[] {r.toString()}).toArray(String[][]::new); String expected = HoodiePrintHelper.print(new String[] {HoodieTableHeaderFields.HEADER_RECORDS}, rows); expected = removeNonWordAndStripSpace(expected); String got = removeNonWordAndStripSpace(cr.getResult().toString()); @@ -232,7 +232,7 @@ public void testShowLogFileRecordsWithMerge() throws IOException, InterruptedExc indexRecords.add(hoodieRecord.get()); num++; } - String[][] rows = indexRecords.stream().map(r -> new String[]{r.toString()}).toArray(String[][]::new); + String[][] rows = indexRecords.stream().map(r -> new String[] {r.toString()}).toArray(String[][]::new); assertNotNull(rows); String expected = HoodiePrintHelper.print(new String[] {HoodieTableHeaderFields.HEADER_RECORDS}, rows); diff --git a/hudi-cli/src/test/java/org/apache/hudi/cli/commands/TestSavepointsCommand.java b/hudi-cli/src/test/java/org/apache/hudi/cli/commands/TestSavepointsCommand.java index 74ea42f0ffa40..436af1d976f5e 100644 --- a/hudi-cli/src/test/java/org/apache/hudi/cli/commands/TestSavepointsCommand.java +++ b/hudi-cli/src/test/java/org/apache/hudi/cli/commands/TestSavepointsCommand.java @@ -74,7 +74,7 @@ public void testShowSavepoints() throws IOException { // generate expect result String[][] rows = Stream.of("100", "101", "102", "103").sorted(Comparator.reverseOrder()) - .map(instant -> new String[]{instant}).toArray(String[][]::new); + .map(instant -> new String[] {instant}).toArray(String[][]::new); String expected = HoodiePrintHelper.print(new String[] {HoodieTableHeaderFields.HEADER_SAVEPOINT_TIME}, rows); expected = removeNonWordAndStripSpace(expected); String got = removeNonWordAndStripSpace(cr.getResult().toString());