diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/RestoreJob.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/RestoreJob.java index b014e6693bbc..3e0d216ed0df 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/RestoreJob.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/RestoreJob.java @@ -19,6 +19,7 @@ import java.io.IOException; import org.apache.hadoop.conf.Configurable; +import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.TableName; import org.apache.yetus.audience.InterfaceAudience; @@ -34,10 +35,11 @@ public interface RestoreJob extends Configurable { * Run restore operation * @param dirPaths path array of WAL log directories * @param fromTables from tables + * @param restoreFileSystem output file system * @param toTables to tables * @param fullBackupRestore full backup restore * @throws IOException if running the job fails */ - void run(Path[] dirPaths, TableName[] fromTables, TableName[] toTables, boolean fullBackupRestore) - throws IOException; + void run(Path[] dirPaths, TableName[] fromTables, FileSystem restoreFileSystem, + TableName[] toTables, boolean fullBackupRestore) throws IOException; } diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/RestoreRequest.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/RestoreRequest.java index 4e097188fe7f..f7f1d848d958 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/RestoreRequest.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/RestoreRequest.java @@ -37,8 +37,8 @@ public Builder withBackupRootDir(String backupRootDir) { return this; } - public Builder withTargetRootDir(String targetRootDir) { - request.setTargetRootDir(targetRootDir); + public Builder withRestoreRootDir(String restoreRootDir) { + request.setRestoreRootDir(restoreRootDir); return this; } @@ -73,7 +73,7 @@ public RestoreRequest build() { } private String backupRootDir; - private String targetRootDir; + private String restoreRootDir; private String backupId; private boolean check = false; private TableName[] fromTables; @@ -92,12 +92,12 @@ private RestoreRequest setBackupRootDir(String backupRootDir) { return this; } - public String getTargetRootDir() { - return targetRootDir; + public String getRestoreRootDir() { + return restoreRootDir; } - public RestoreRequest setTargetRootDir(String targetRootDir) { - this.targetRootDir = targetRootDir; + private RestoreRequest setRestoreRootDir(String restoreRootDir) { + this.restoreRootDir = restoreRootDir; return this; } diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreTablesClient.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreTablesClient.java index b8bf1e13cedc..76712afcb771 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreTablesClient.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreTablesClient.java @@ -35,7 +35,6 @@ import org.apache.hadoop.hbase.backup.HBackupFileSystem; import org.apache.hadoop.hbase.backup.RestoreRequest; import org.apache.hadoop.hbase.backup.impl.BackupManifest.BackupImage; -import org.apache.hadoop.hbase.backup.mapreduce.MapReduceHFileSplitterJob; import org.apache.hadoop.hbase.backup.util.RestoreTool; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; @@ -57,9 +56,10 @@ public class RestoreTablesClient { private TableName[] sTableArray; private TableName[] tTableArray; private String backupRootDir; + private FileSystem restoreFileSystem; private boolean isOverwrite; - public RestoreTablesClient(Connection conn, RestoreRequest request) { + public RestoreTablesClient(Connection conn, RestoreRequest request) throws IOException { this.backupRootDir = request.getBackupRootDir(); this.backupId = request.getBackupId(); this.sTableArray = request.getFromTables(); @@ -70,8 +70,11 @@ public RestoreTablesClient(Connection conn, RestoreRequest request) { this.isOverwrite = request.isOverwrite(); this.conn = conn; this.conf = conn.getConfiguration(); - if (request.getTargetRootDir() != null) { - conf.set(MapReduceHFileSplitterJob.BULK_OUTPUT_ROOT_DIR, request.getTargetRootDir()); + if (request.getRestoreRootDir() != null) { + Path restoreRootDir = new Path(request.getRestoreRootDir()); + restoreFileSystem = restoreRootDir.getFileSystem(conf); + } else { + restoreFileSystem = FileSystem.get(conf); } } @@ -135,7 +138,7 @@ private void restoreImages(BackupImage[] images, TableName sTable, TableName tTa String rootDir = image.getRootDir(); String backupId = image.getBackupId(); Path backupRoot = new Path(rootDir); - RestoreTool restoreTool = new RestoreTool(conf, backupRoot, backupId); + RestoreTool restoreTool = new RestoreTool(conf, backupRoot, restoreFileSystem, backupId); Path tableBackupPath = HBackupFileSystem.getTableBackupPath(sTable, backupRoot, backupId); String lastIncrBackupId = images.length == 1 ? null : images[images.length - 1].getBackupId(); // We need hFS only for full restore (see the code) diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupMergeJob.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupMergeJob.java index 3b4cf0246d73..56911621159f 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupMergeJob.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupMergeJob.java @@ -120,7 +120,7 @@ public void run(String[] backupIds) throws IOException { Path[] dirPaths = findInputDirectories(fs, backupRoot, tableNames[i], backupIds); String dirs = StringUtils.join(dirPaths, ","); - Path bulkOutputPath = BackupUtils.getBulkOutputDir( + Path bulkOutputPath = BackupUtils.getBulkOutputDir(fs, BackupUtils.getFileNameCompatibleString(tableNames[i]), getConf(), false); // Delete content if exists if (fs.exists(bulkOutputPath)) { diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceHFileSplitterJob.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceHFileSplitterJob.java index 41511086d2be..766a99d778b8 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceHFileSplitterJob.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceHFileSplitterJob.java @@ -56,7 +56,6 @@ public class MapReduceHFileSplitterJob extends Configured implements Tool { private static final Logger LOG = LoggerFactory.getLogger(MapReduceHFileSplitterJob.class); final static String NAME = "HFileSplitterJob"; public final static String BULK_OUTPUT_CONF_KEY = "hfile.bulk.output"; - public static final String BULK_OUTPUT_ROOT_DIR = "hfile.bulk.output.root.dir"; public final static String TABLES_KEY = "hfile.input.tables"; public final static String TABLE_MAP_KEY = "hfile.input.tablesmap"; private final static String JOB_NAME_CONF_KEY = "mapreduce.job.name"; diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceRestoreJob.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceRestoreJob.java index e6046bf5fb92..eee7b6f30c8c 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceRestoreJob.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceRestoreJob.java @@ -22,6 +22,7 @@ import java.io.IOException; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.BackupRestoreConstants; @@ -50,8 +51,8 @@ public MapReduceRestoreJob() { } @Override - public void run(Path[] dirPaths, TableName[] tableNames, TableName[] newTableNames, - boolean fullBackupRestore) throws IOException { + public void run(Path[] dirPaths, TableName[] tableNames, FileSystem restoreFileSystem, + TableName[] newTableNames, boolean fullBackupRestore) throws IOException { String bulkOutputConfKey; player = new MapReduceHFileSplitterJob(); @@ -71,8 +72,8 @@ public void run(Path[] dirPaths, TableName[] tableNames, TableName[] newTableNam for (int i = 0; i < tableNames.length; i++) { LOG.info("Restore " + tableNames[i] + " into " + newTableNames[i]); - Path bulkOutputPath = BackupUtils - .getBulkOutputDir(BackupUtils.getFileNameCompatibleString(newTableNames[i]), getConf()); + Path bulkOutputPath = BackupUtils.getBulkOutputDir(restoreFileSystem, + BackupUtils.getFileNameCompatibleString(newTableNames[i]), getConf()); Configuration conf = getConf(); conf.set(bulkOutputConfKey, bulkOutputPath.toString()); String[] playerArgs = { dirs, diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/BackupUtils.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/BackupUtils.java index 4f18f1b70875..b71905bcee10 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/BackupUtils.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/BackupUtils.java @@ -49,7 +49,6 @@ import org.apache.hadoop.hbase.backup.RestoreRequest; import org.apache.hadoop.hbase.backup.impl.BackupManifest; import org.apache.hadoop.hbase.backup.impl.BackupManifest.BackupImage; -import org.apache.hadoop.hbase.backup.mapreduce.MapReduceHFileSplitterJob; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.RegionInfo; @@ -686,16 +685,8 @@ public static boolean validate(HashMap backupManifest return isValid; } - public static Path getBulkOutputDir(String tableName, Configuration conf, boolean deleteOnExit) - throws IOException { - FileSystem fs; - String bulkOutputRootDir = conf.get(MapReduceHFileSplitterJob.BULK_OUTPUT_ROOT_DIR); - if (bulkOutputRootDir != null) { - Path rootDir = new Path(bulkOutputRootDir); - fs = FileSystem.get(rootDir.toUri(), conf); - } else { - fs = FileSystem.get(conf); - } + public static Path getBulkOutputDir(FileSystem fs, String tableName, Configuration conf, + boolean deleteOnExit) throws IOException { String tmp = conf.get(HConstants.TEMPORARY_FS_DIRECTORY_KEY, fs.getHomeDirectory() + "/hbase-staging"); Path path = new Path(tmp + Path.SEPARATOR + "bulk_output-" + tableName + "-" @@ -706,8 +697,9 @@ public static Path getBulkOutputDir(String tableName, Configuration conf, boolea return path; } - public static Path getBulkOutputDir(String tableName, Configuration conf) throws IOException { - return getBulkOutputDir(tableName, conf, true); + public static Path getBulkOutputDir(FileSystem restoreFileSystem, String tableName, + Configuration conf) throws IOException { + return getBulkOutputDir(restoreFileSystem, tableName, conf, true); } public static String getFileNameCompatibleString(TableName table) { diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java index bf2aa14046db..c52cc37170cb 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java @@ -69,16 +69,18 @@ public class RestoreTool { protected Path backupRootPath; protected String backupId; protected FileSystem fs; + protected FileSystem restoreFs; // store table name and snapshot dir mapping private final HashMap snapshotMap = new HashMap<>(); - public RestoreTool(Configuration conf, final Path backupRootPath, final String backupId) - throws IOException { + public RestoreTool(Configuration conf, final Path backupRootPath, + final FileSystem restoreFileSystem, final String backupId) throws IOException { this.conf = conf; this.backupRootPath = backupRootPath; this.backupId = backupId; this.fs = backupRootPath.getFileSystem(conf); + this.restoreFs = restoreFileSystem; } /** @@ -200,7 +202,7 @@ public void incrementalRestoreTable(Connection conn, Path tableBackupPath, Path[ } RestoreJob restoreService = BackupRestoreFactory.getRestoreJob(conf); - restoreService.run(logDirs, tableNames, newTableNames, false); + restoreService.run(logDirs, tableNames, restoreFs, newTableNames, false); } } @@ -350,8 +352,8 @@ private void createAndRestoreTable(Connection conn, TableName tableName, TableNa RestoreJob restoreService = BackupRestoreFactory.getRestoreJob(conf); Path[] paths = new Path[regionPathList.size()]; regionPathList.toArray(paths); - restoreService.run(paths, new TableName[] { tableName }, new TableName[] { newTableName }, - true); + restoreService.run(paths, new TableName[] { tableName }, restoreFs, + new TableName[] { newTableName }, true); } catch (Exception e) { LOG.error(e.toString(), e); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupUtils.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupUtils.java index 7bf7a55c599c..262bb2485c24 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupUtils.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupUtils.java @@ -68,7 +68,7 @@ public Path run() { @Override public Path run() { try { - return BackupUtils.getBulkOutputDir("test", conf, false); + return BackupUtils.getBulkOutputDir(FileSystem.get(conf), "test", conf, false); } catch (IOException ioe) { LOG.error("Failed to get bulk output dir path", ioe); } diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithFailures.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithFailures.java index 1ece1770489b..9a2653ccb8dc 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithFailures.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithFailures.java @@ -130,7 +130,7 @@ public void run(String[] backupIds) throws IOException { // Find input directories for table Path[] dirPaths = findInputDirectories(fs, backupRoot, tableNames[i], backupIds); String dirs = StringUtils.join(dirPaths, ","); - Path bulkOutputPath = BackupUtils.getBulkOutputDir( + Path bulkOutputPath = BackupUtils.getBulkOutputDir(fs, BackupUtils.getFileNameCompatibleString(tableNames[i]), getConf(), false); // Delete content if exists if (fs.exists(bulkOutputPath)) { diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java index ce8c6497c9ef..3109456cab68 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java @@ -22,7 +22,6 @@ import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.backup.util.BackupUtils; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.junit.BeforeClass; @@ -65,8 +64,9 @@ public void testFullRestoreRemote() throws Exception { LOG.info("backup complete"); TableName[] tableset = new TableName[] { table1 }; TableName[] tablemap = new TableName[] { table1_restore }; - getBackupAdmin().restore(BackupUtils.createRestoreRequest(BACKUP_REMOTE_ROOT_DIR, backupId, - false, tableset, tablemap, false)); + getBackupAdmin().restore(new RestoreRequest.Builder().withBackupRootDir(BACKUP_REMOTE_ROOT_DIR) + .withRestoreRootDir(BACKUP_ROOT_DIR).withBackupId(backupId).withCheck(false) + .withFromTables(tableset).withToTables(tablemap).withOvewrite(false).build()); Admin hba = TEST_UTIL.getAdmin(); assertTrue(hba.tableExists(table1_restore)); TEST_UTIL.deleteTable(table1_restore);