Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

HADOOP-13294. Test hadoop fs shell against s3a #4006

Open
wants to merge 12 commits into
base: trunk
Choose a base branch
from
Open
Original file line number Diff line number Diff line change
Expand Up @@ -80,9 +80,20 @@ public void testRmEmptyRootDirRecursive() throws Throwable {
skipIfUnsupported(TEST_ROOT_TESTS_ENABLED);
Path root = new Path("/");
assertIsDirectory(root);
boolean deleted = getFileSystem().delete(root, true);
LOG.info("rm -r / of empty dir result is {}", deleted);
assertIsDirectory(root);
if (isSupported(ROOT_DELETE_INCOMPATIBLE_FS)) {
// Root delete incompatible file systems should throw an exception on calling
try {
boolean deleted = getFileSystem().delete(root, true);
fail("incompatible file system should have raised an exception," +
" but completed with exit code " + deleted);
} catch (IOException e) {
handleExpectedException(e);
}
} else {
boolean deleted = getFileSystem().delete(root, true);
LOG.info("rm -r / of empty dir result is {}", deleted);
assertIsDirectory(root);
}
}

@Test
Expand Down Expand Up @@ -119,9 +130,20 @@ public Void call() throws Exception {
},
new LambdaTestUtils.ProportionalRetryInterval(50, 1000));
// then try to delete the empty one
boolean deleted = fs.delete(root, false);
LOG.info("rm / of empty dir result is {}", deleted);
assertIsDirectory(root);
if (isSupported(ROOT_DELETE_INCOMPATIBLE_FS)) {
// Root delete incompatible file systems should throw an exception on calling
try {
boolean deleted = getFileSystem().delete(root, true);
fail("incompatible file system should have raised an exception," +
" but completed with exit code " + deleted);
} catch (IOException e) {
handleExpectedException(e);
}
} else {
boolean deleted = fs.delete(root, false);
LOG.info("rm / of empty dir result is {}", deleted);
assertIsDirectory(root);
}
}

@Test
Expand Down Expand Up @@ -157,13 +179,24 @@ public void testRmRootRecursive() throws Throwable {
Path file = new Path("/testRmRootRecursive");
try {
ContractTestUtils.touch(getFileSystem(), file);
boolean deleted = getFileSystem().delete(root, true);
assertIsDirectory(root);
LOG.info("rm -rf / result is {}", deleted);
if (deleted) {
assertPathDoesNotExist("expected file to be deleted", file);
if (isSupported(ROOT_DELETE_INCOMPATIBLE_FS)) {
// Root delete incompatible file systems should throw an exception on calling
try {
boolean deleted = getFileSystem().delete(root, true);
fail("incompatible file system should have raised an exception," +
" but completed with exit code " + deleted);
} catch (IOException e) {
handleExpectedException(e);
}
} else {
assertPathExists("expected file to be preserved", file);
boolean deleted = getFileSystem().delete(root, true);
assertIsDirectory(root);
LOG.info("rm -rf / result is {}", deleted);
if (deleted) {
assertPathDoesNotExist("expected file to be deleted", file);
} else {
assertPathExists("expected file to be preserved", file);
}
}
} finally{
getFileSystem().delete(file, false);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -234,6 +234,12 @@ public interface ContractOptions {
*/
String TEST_ROOT_TESTS_ENABLED = "test.root-tests-enabled";

/**
* Indicates that FS is incompatible with root delete.
* {@value}
*/
String ROOT_DELETE_INCOMPATIBLE_FS = "root-delete-incompatible-fs";

/**
* Limit for #of random seeks to perform.
* Keep low for remote filesystems for faster tests
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@

import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
import org.apache.hadoop.fs.PathPermissionException;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.fs.s3a.Invoker;
import org.apache.hadoop.fs.s3a.Retries;
Expand Down Expand Up @@ -190,7 +191,7 @@ public Boolean execute() throws IOException {
LOG.error("S3A: Cannot delete the root directory."
+ " Path: {}. Recursive: {}",
status.getPath(), recursive);
return false;
throw new PathPermissionException(path.toString(), "S3A: Cannot delete the root directory");
}

if (!recursive && status.isEmptyDirectory() == Tristate.FALSE) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@

package org.apache.hadoop.fs.contract.s3a;

import org.junit.Ignore;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

Expand All @@ -45,9 +44,4 @@ protected AbstractFSContract createContract(Configuration conf) {
public S3AFileSystem getFileSystem() {
return (S3AFileSystem) super.getFileSystem();
}

@Override
@Ignore("S3 always return false when non-recursively remove root dir")
public void testRmNonEmptyRootDirNonRecursive() throws Throwable {
}
}
Loading