Date: Thu, 10 Mar 2022 08:02:18 +0100
Subject: [PATCH 028/145] YARN-11052. Improve code quality in
TestRMWebServicesNodeLabels. Contributed by Szilard Nemeth
---
.../webapp/TestRMWebServicesNodeLabels.java | 938 ++++++++----------
1 file changed, 424 insertions(+), 514 deletions(-)
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodeLabels.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodeLabels.java
index f1ea0fba0339d..503b4a8b2bbdd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodeLabels.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodeLabels.java
@@ -6,9 +6,9 @@
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@@ -26,9 +26,15 @@
import java.io.IOException;
import java.io.StringWriter;
import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.UriBuilder;
+import org.apache.commons.lang3.tuple.Pair;
+import org.apache.hadoop.util.Lists;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.http.JettyUtils;
@@ -55,7 +61,6 @@
import com.google.inject.Guice;
import com.google.inject.servlet.ServletModule;
import com.sun.jersey.api.client.ClientResponse;
-import com.sun.jersey.api.client.UniformInterfaceException;
import com.sun.jersey.api.client.WebResource;
import com.sun.jersey.api.json.JSONJAXBContext;
import com.sun.jersey.api.json.JSONMarshaller;
@@ -69,6 +74,28 @@ public class TestRMWebServicesNodeLabels extends JerseyTestBase {
private static final Logger LOG = LoggerFactory
.getLogger(TestRMWebServicesNodeLabels.class);
+ private static final String NODE_0 = "nid:0";
+ private static final String NODE_1 = "nid1:0";
+ private static final String NODE_2 = "nid2:0";
+ private static final String LABEL_A = "a";
+ private static final String LABEL_B = "b";
+ private static final String LABEL_X = "x";
+ private static final String LABEL_Y = "y";
+ private static final String LABEL_Z = "z";
+ public static final boolean DEFAULT_NL_EXCLUSIVITY = true;
+ private static final String PATH_WS = "ws";
+ private static final String PATH_V1 = "v1";
+ private static final String PATH_NODES = "nodes";
+ private static final String PATH_CLUSTER = "cluster";
+ private static final String PATH_REPLACE_NODE_TO_LABELS = "replace-node-to-labels";
+ private static final String PATH_LABEL_MAPPINGS = "label-mappings";
+ private static final String PATH_GET_LABELS = "get-labels";
+ private static final String PATH_REPLACE_LABELS = "replace-labels";
+ private static final String PATH_REMOVE_LABELS = "remove-node-labels";
+ private static final String PATH_GET_NODE_LABELS = "get-node-labels";
+ private static final String PATH_GET_NODE_TO_LABELS = "get-node-to-labels";
+ private static final String QUERY_USER_NAME = "user.name";
+ private static final String PATH_ADD_NODE_LABELS = "add-node-labels";
private static MockRM rm;
private static YarnConfiguration conf;
@@ -76,6 +103,8 @@ public class TestRMWebServicesNodeLabels extends JerseyTestBase {
private static String userName;
private static String notUserName;
private static RMWebServices rmWebService;
+ private WebResource resource;
+
private static class WebServletModule extends ServletModule {
@@ -92,7 +121,7 @@ protected void configureServlets() {
conf = new YarnConfiguration();
conf.set(YarnConfiguration.YARN_ADMIN_ACL, userName);
rm = new MockRM(conf);
- rmWebService = new RMWebServices(rm,conf);
+ rmWebService = new RMWebServices(rm, conf);
bind(RMWebServices.class).toInstance(rmWebService);
bind(GenericExceptionHandler.class);
bind(ResourceManager.class).toInstance(rm);
@@ -100,7 +129,7 @@ protected void configureServlets() {
TestRMWebServicesAppsModification.TestRMCustomAuthFilter.class);
serve("/*").with(GuiceContainer.class);
}
- };
+ }
@Override
@Before
@@ -108,6 +137,7 @@ public void setUp() throws Exception {
super.setUp();
GuiceServletConfig.setInjector(
Guice.createInjector(new WebServletModule()));
+ resource = resource();
}
public TestRMWebServicesNodeLabels() {
@@ -118,528 +148,444 @@ public TestRMWebServicesNodeLabels() {
.contextPath("jersey-guice-filter").servletPath("/").build());
}
- @Test
- public void testNodeLabels() throws JSONException, Exception {
- WebResource r = resource();
+ private WebResource getClusterWebResource() {
+ return resource.path(PATH_WS).path(PATH_V1).path(PATH_CLUSTER);
+ }
+
+ private ClientResponse get(String path) {
+ return getClusterWebResource()
+ .path(path)
+ .queryParam(QUERY_USER_NAME, userName)
+ .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
+ }
+
+ private ClientResponse get(String path, MultivaluedMapImpl queryParams) {
+ return getClusterWebResource()
+ .path(path)
+ .queryParam(QUERY_USER_NAME, userName)
+ .queryParams(queryParams)
+ .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
+ }
+
+ private ClientResponse post(String path, String userName, Object payload,
+ Class> payloadClass) throws Exception {
+ return getClusterWebResource()
+ .path(path)
+ .queryParam(QUERY_USER_NAME, userName)
+ .accept(MediaType.APPLICATION_JSON)
+ .entity(toJson(payload, payloadClass),
+ MediaType.APPLICATION_JSON)
+ .post(ClientResponse.class);
+ }
+ private ClientResponse post(String path, String userName, Object payload,
+ Class> payloadClass, MultivaluedMapImpl queryParams) throws Exception {
+ WebResource.Builder builder = getClusterWebResource()
+ .path(path)
+ .queryParam(QUERY_USER_NAME, userName)
+ .queryParams(queryParams)
+ .accept(MediaType.APPLICATION_JSON);
+
+ if (payload != null && payloadClass != null) {
+ builder.entity(toJson(payload, payloadClass), MediaType.APPLICATION_JSON);
+ }
+ return builder.post(ClientResponse.class);
+ }
+
+ @Test
+ public void testNodeLabels() throws Exception {
ClientResponse response;
// Add a label
- NodeLabelsInfo nlsifo = new NodeLabelsInfo();
- nlsifo.getNodeLabelsInfo().add(new NodeLabelInfo("a"));
- response =
- r.path("ws").path("v1").path("cluster")
- .path("add-node-labels").queryParam("user.name", userName)
- .accept(MediaType.APPLICATION_JSON)
- .entity(toJson(nlsifo, NodeLabelsInfo.class),
- MediaType.APPLICATION_JSON)
- .post(ClientResponse.class);
+ response = addNodeLabels(Lists.newArrayList(Pair.of(LABEL_A, DEFAULT_NL_EXCLUSIVITY)));
+ assertHttp200(response);
// Verify
- response =
- r.path("ws").path("v1").path("cluster")
- .path("get-node-labels").queryParam("user.name", userName)
- .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
- assertEquals(MediaType.APPLICATION_JSON_TYPE + "; " + JettyUtils.UTF_8,
- response.getType().toString());
- nlsifo = response.getEntity(NodeLabelsInfo.class);
- assertEquals(1, nlsifo.getNodeLabels().size());
- for (NodeLabelInfo nl : nlsifo.getNodeLabelsInfo()) {
- assertEquals("a", nl.getName());
- assertTrue(nl.getExclusivity());
- }
-
+ response = getNodeLabels();
+ assertApplicationJsonUtf8Response(response);
+ assertNodeLabelsInfo(response.getEntity(NodeLabelsInfo.class), Lists.newArrayList(
+ Pair.of(LABEL_A, true)));
+
// Add another
- nlsifo = new NodeLabelsInfo();
- nlsifo.getNodeLabelsInfo().add(new NodeLabelInfo("b", false));
- response =
- r.path("ws").path("v1").path("cluster")
- .path("add-node-labels").queryParam("user.name", userName)
- .accept(MediaType.APPLICATION_JSON)
- .entity(toJson(nlsifo, NodeLabelsInfo.class),
- MediaType.APPLICATION_JSON)
- .post(ClientResponse.class);
+ response = addNodeLabels(Lists.newArrayList(Pair.of(LABEL_B, false)));
+ assertHttp200(response);
// Verify
- response =
- r.path("ws").path("v1").path("cluster")
- .path("get-node-labels").queryParam("user.name", userName)
- .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
- assertEquals(MediaType.APPLICATION_JSON_TYPE + "; " + JettyUtils.UTF_8,
- response.getType().toString());
- nlsifo = response.getEntity(NodeLabelsInfo.class);
- assertEquals(2, nlsifo.getNodeLabels().size());
- // Verify exclusivity for 'y' as false
- for (NodeLabelInfo nl : nlsifo.getNodeLabelsInfo()) {
- if (nl.getName().equals("b")) {
- assertFalse(nl.getExclusivity());
- }
- }
-
+ response = getNodeLabels();
+ assertApplicationJsonUtf8Response(response);
+ // Verify exclusivity for 'b' as false
+ assertNodeLabelsInfo(response.getEntity(NodeLabelsInfo.class),
+ Lists.newArrayList(
+ Pair.of(LABEL_A, true),
+ Pair.of(LABEL_B, false)));
+
// Add labels to a node
- MultivaluedMapImpl params = new MultivaluedMapImpl();
- params.add("labels", "a");
- response =
- r.path("ws").path("v1").path("cluster")
- .path("nodes").path("nid:0")
- .path("replace-labels")
- .queryParam("user.name", userName)
- .queryParams(params)
- .accept(MediaType.APPLICATION_JSON)
- .post(ClientResponse.class);
- LOG.info("posted node nodelabel");
+ response = replaceLabelsOnNode(NODE_0, LABEL_A);
+ assertHttp200(response);
// Add labels to another node
- params = new MultivaluedMapImpl();
- params.add("labels", "b");
- response =
- r.path("ws").path("v1").path("cluster")
- .path("nodes").path("nid1:0")
- .path("replace-labels")
- .queryParam("user.name", userName)
- .queryParams(params)
- .accept(MediaType.APPLICATION_JSON)
- .post(ClientResponse.class);
- LOG.info("posted node nodelabel");
+ response = replaceLabelsOnNode(NODE_1, LABEL_B);
+ assertHttp200(response);
// Add labels to another node
- params = new MultivaluedMapImpl();
- params.add("labels", "b");
- response =
- r.path("ws").path("v1").path("cluster")
- .path("nodes").path("nid2:0")
- .path("replace-labels")
- .queryParam("user.name", userName)
- .queryParams(params)
- .accept(MediaType.APPLICATION_JSON)
- .post(ClientResponse.class);
- LOG.info("posted node nodelabel");
-
- // Verify, using get-labels-to-Nodes
- response =
- r.path("ws").path("v1").path("cluster")
- .path("label-mappings").queryParam("user.name", userName)
- .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
- assertEquals(MediaType.APPLICATION_JSON_TYPE + "; " + JettyUtils.UTF_8,
- response.getType().toString());
- LabelsToNodesInfo ltni = response.getEntity(LabelsToNodesInfo.class);
- assertEquals(2, ltni.getLabelsToNodes().size());
- NodeIDsInfo nodes = ltni.getLabelsToNodes().get(
- new NodeLabelInfo("b", false));
- assertTrue(nodes.getNodeIDs().contains("nid2:0"));
- assertTrue(nodes.getNodeIDs().contains("nid1:0"));
- nodes = ltni.getLabelsToNodes().get(new NodeLabelInfo("a"));
- assertTrue(nodes.getNodeIDs().contains("nid:0"));
+ response = replaceLabelsOnNode(NODE_2, LABEL_B);
+ assertHttp200(response);
+
+ // Verify all, using get-labels-to-Nodes
+ response = getNodeLabelMappings();
+ assertApplicationJsonUtf8Response(response);
+ LabelsToNodesInfo labelsToNodesInfo = response.getEntity(LabelsToNodesInfo.class);
+ assertLabelsToNodesInfo(labelsToNodesInfo, 2, Lists.newArrayList(
+ Pair.of(Pair.of(LABEL_B, false), Lists.newArrayList(NODE_1, NODE_2)),
+ Pair.of(Pair.of(LABEL_A, DEFAULT_NL_EXCLUSIVITY), Lists.newArrayList(NODE_0))
+ ));
// Verify, using get-labels-to-Nodes for specified set of labels
- params = new MultivaluedMapImpl();
- params.add("labels", "a");
- response =
- r.path("ws").path("v1").path("cluster")
- .path("label-mappings").queryParam("user.name", userName)
- .queryParams(params)
- .accept(MediaType.APPLICATION_JSON)
- .get(ClientResponse.class);
- assertEquals(MediaType.APPLICATION_JSON_TYPE + "; " + JettyUtils.UTF_8,
- response.getType().toString());
- ltni = response.getEntity(LabelsToNodesInfo.class);
- assertEquals(1, ltni.getLabelsToNodes().size());
- nodes = ltni.getLabelsToNodes().get(new NodeLabelInfo("a"));
- assertTrue(nodes.getNodeIDs().contains("nid:0"));
+ response = getNodeLabelMappingsByLabels(LABEL_A);
+ assertApplicationJsonUtf8Response(response);
+ labelsToNodesInfo = response.getEntity(LabelsToNodesInfo.class);
+ assertLabelsToNodesInfo(labelsToNodesInfo, 1, Lists.newArrayList(
+ Pair.of(Pair.of(LABEL_A, DEFAULT_NL_EXCLUSIVITY), Lists.newArrayList(NODE_0))
+ ));
// Verify
- response =
- r.path("ws").path("v1").path("cluster")
- .path("nodes").path("nid:0")
- .path("get-labels").queryParam("user.name", userName)
- .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
- assertEquals(MediaType.APPLICATION_JSON_TYPE + "; " + JettyUtils.UTF_8,
- response.getType().toString());
- nlsifo = response.getEntity(NodeLabelsInfo.class);
- assertTrue(nlsifo.getNodeLabelsInfo().contains(new NodeLabelInfo("a")));
+ response = getLabelsOfNode(NODE_0);
+ assertApplicationJsonUtf8Response(response);
+ assertNodeLabelsInfoContains(response.getEntity(NodeLabelsInfo.class),
+ Pair.of(LABEL_A, DEFAULT_NL_EXCLUSIVITY));
-
// Replace
- params = new MultivaluedMapImpl();
- params.add("labels", "b");
- response =
- r.path("ws").path("v1").path("cluster")
- .path("nodes").path("nid:0")
- .path("replace-labels")
- .queryParam("user.name", userName)
- .queryParams(params)
- .accept(MediaType.APPLICATION_JSON)
- .post(ClientResponse.class);
- LOG.info("posted node nodelabel");
+ response = replaceLabelsOnNode(NODE_0, LABEL_B);
+ assertHttp200(response);
// Verify
- response =
- r.path("ws").path("v1").path("cluster")
- .path("nodes").path("nid:0")
- .path("get-labels").queryParam("user.name", userName)
- .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
- assertEquals(MediaType.APPLICATION_JSON_TYPE + "; " + JettyUtils.UTF_8,
- response.getType().toString());
- nlsifo = response.getEntity(NodeLabelsInfo.class);
- assertTrue(nlsifo.getNodeLabelsInfo().contains(
- new NodeLabelInfo("b", false)));
-
+ response = getLabelsOfNode(NODE_0);
+ assertApplicationJsonUtf8Response(response);
+ assertNodeLabelsInfoContains(response.getEntity(NodeLabelsInfo.class), Pair.of(LABEL_B, false));
+
// Replace labels using node-to-labels
- NodeToLabelsEntryList ntli = new NodeToLabelsEntryList();
- ArrayList labels = new ArrayList();
- labels.add("a");
- NodeToLabelsEntry nli = new NodeToLabelsEntry("nid:0", labels);
- ntli.getNodeToLabels().add(nli);
- response =
- r.path("ws").path("v1").path("cluster")
- .path("replace-node-to-labels")
- .queryParam("user.name", userName)
- .accept(MediaType.APPLICATION_JSON)
- .entity(toJson(ntli, NodeToLabelsEntryList.class),
- MediaType.APPLICATION_JSON)
- .post(ClientResponse.class);
-
+ response = replaceNodeToLabels(Lists.newArrayList(Pair.of(NODE_0,
+ Lists.newArrayList(LABEL_A))));
+ assertHttp200(response);
+
// Verify, using node-to-labels
- response =
- r.path("ws").path("v1").path("cluster")
- .path("get-node-to-labels").queryParam("user.name", userName)
- .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
- assertEquals(MediaType.APPLICATION_JSON_TYPE + "; " + JettyUtils.UTF_8,
- response.getType().toString());
- NodeToLabelsInfo ntlinfo = response.getEntity(NodeToLabelsInfo.class);
- NodeLabelsInfo nlinfo = ntlinfo.getNodeToLabels().get("nid:0");
- assertEquals(1, nlinfo.getNodeLabels().size());
- assertTrue(nlinfo.getNodeLabelsInfo().contains(new NodeLabelInfo("a")));
-
+ response = getNodeToLabels();
+ assertApplicationJsonUtf8Response(response);
+ NodeToLabelsInfo nodeToLabelsInfo = response.getEntity(NodeToLabelsInfo.class);
+ NodeLabelsInfo nodeLabelsInfo = nodeToLabelsInfo.getNodeToLabels().get(NODE_0);
+ assertNodeLabelsSize(nodeLabelsInfo, 1);
+ assertNodeLabelsInfoContains(nodeLabelsInfo, Pair.of(LABEL_A, DEFAULT_NL_EXCLUSIVITY));
+
// Remove all
- params = new MultivaluedMapImpl();
- params.add("labels", "");
- response =
- r.path("ws").path("v1").path("cluster")
- .path("nodes").path("nid:0")
- .path("replace-labels")
- .queryParam("user.name", userName)
- .queryParams(params)
- .accept(MediaType.APPLICATION_JSON)
- .post(ClientResponse.class);
- LOG.info("posted node nodelabel");
+ response = replaceLabelsOnNode(NODE_0, "");
+ assertHttp200(response);
// Verify
- response =
- r.path("ws").path("v1").path("cluster")
- .path("nodes").path("nid:0")
- .path("get-labels").queryParam("user.name", userName)
- .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
- assertEquals(MediaType.APPLICATION_JSON_TYPE + "; " + JettyUtils.UTF_8,
- response.getType().toString());
- nlsifo = response.getEntity(NodeLabelsInfo.class);
- assertTrue(nlsifo.getNodeLabelsInfo().isEmpty());
-
+ response = getLabelsOfNode(NODE_0);
+ assertApplicationJsonUtf8Response(response);
+ assertNodeLabelsSize(response.getEntity(NodeLabelsInfo.class), 0);
+
// Add a label back for auth tests
- params = new MultivaluedMapImpl();
- params.add("labels", "a");
- response =
- r.path("ws").path("v1").path("cluster")
- .path("nodes").path("nid:0")
- .path("replace-labels")
- .queryParam("user.name", userName)
- .queryParams(params)
- .accept(MediaType.APPLICATION_JSON)
- .post(ClientResponse.class);
- LOG.info("posted node nodelabel");
+ response = replaceLabelsOnNode(NODE_0, LABEL_A);
+ assertHttp200(response);
// Verify
- response =
- r.path("ws").path("v1").path("cluster")
- .path("nodes").path("nid:0")
- .path("get-labels").queryParam("user.name", userName)
- .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
- assertEquals(MediaType.APPLICATION_JSON_TYPE + "; " + JettyUtils.UTF_8,
- response.getType().toString());
- nlsifo = response.getEntity(NodeLabelsInfo.class);
- assertTrue(nlsifo.getNodeLabelsInfo().contains(new NodeLabelInfo("a")));
-
+ response = getLabelsOfNode(NODE_0);
+ assertApplicationJsonUtf8Response(response);
+ assertNodeLabelsInfoContains(response.getEntity(NodeLabelsInfo.class),
+ Pair.of(LABEL_A, DEFAULT_NL_EXCLUSIVITY));
+
// Auth fail replace labels on node
- params = new MultivaluedMapImpl();
- params.add("labels", "b");
- response =
- r.path("ws").path("v1").path("cluster")
- .path("nodes").path("nid:0")
- .path("replace-labels")
- .queryParam("user.name", notUserName)
- .queryParams(params)
- .accept(MediaType.APPLICATION_JSON)
- .post(ClientResponse.class);
+ response = replaceLabelsOnNodeWithUserName(NODE_0, notUserName, LABEL_B);
+ assertHttp401(response);
// Verify
- response =
- r.path("ws").path("v1").path("cluster")
- .path("nodes").path("nid:0")
- .path("get-labels").queryParam("user.name", userName)
- .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
- assertEquals(MediaType.APPLICATION_JSON_TYPE + "; " + JettyUtils.UTF_8,
- response.getType().toString());
- nlsifo = response.getEntity(NodeLabelsInfo.class);
- assertTrue(nlsifo.getNodeLabelsInfo().contains(new NodeLabelInfo("a")));
-
- // Fail to add a label with post
- response =
- r.path("ws").path("v1").path("cluster")
- .path("add-node-labels").queryParam("user.name", notUserName)
- .accept(MediaType.APPLICATION_JSON)
- .entity("{\"nodeLabels\":\"c\"}", MediaType.APPLICATION_JSON)
- .post(ClientResponse.class);
+ response = getLabelsOfNode(NODE_0);
+ assertApplicationJsonUtf8Response(response);
+ assertNodeLabelsInfoContains(response.getEntity(NodeLabelsInfo.class),
+ Pair.of(LABEL_A, DEFAULT_NL_EXCLUSIVITY));
+
+ // Fail to add a label with wrong user
+ response = addNodeLabelsWithUser(Lists.newArrayList(Pair.of("c", DEFAULT_NL_EXCLUSIVITY)),
+ notUserName);
+ assertHttp401(response);
// Verify
- response =
- r.path("ws").path("v1").path("cluster")
- .path("get-node-labels").queryParam("user.name", userName)
- .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
- assertEquals(MediaType.APPLICATION_JSON_TYPE + "; " + JettyUtils.UTF_8,
- response.getType().toString());
- nlsifo = response.getEntity(NodeLabelsInfo.class);
- assertEquals(2, nlsifo.getNodeLabels().size());
-
+ response = getNodeLabels();
+ assertApplicationJsonUtf8Response(response);
+ assertNodeLabelsSize(response.getEntity(NodeLabelsInfo.class), 2);
+
// Remove cluster label (succeed, we no longer need it)
- params = new MultivaluedMapImpl();
- params.add("labels", "b");
- response =
- r.path("ws").path("v1").path("cluster")
- .path("remove-node-labels")
- .queryParam("user.name", userName)
- .queryParams(params)
- .accept(MediaType.APPLICATION_JSON)
- .post(ClientResponse.class);
+ response = removeNodeLabel(LABEL_B);
+ assertHttp200(response);
// Verify
- response =
- r.path("ws").path("v1").path("cluster")
- .path("get-node-labels").queryParam("user.name", userName)
- .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
- assertEquals(MediaType.APPLICATION_JSON_TYPE + "; " + JettyUtils.UTF_8,
- response.getType().toString());
- nlsifo = response.getEntity(NodeLabelsInfo.class);
- assertEquals(1, nlsifo.getNodeLabels().size());
- for (NodeLabelInfo nl : nlsifo.getNodeLabelsInfo()) {
- assertEquals("a", nl.getName());
- assertTrue(nl.getExclusivity());
- }
-
+ response = getNodeLabels();
+ assertApplicationJsonUtf8Response(response);
+ assertNodeLabelsInfo(response.getEntity(NodeLabelsInfo.class),
+ Lists.newArrayList(Pair.of(LABEL_A, true)));
+
// Remove cluster label with post
- params = new MultivaluedMapImpl();
- params.add("labels", "a");
- response =
- r.path("ws").path("v1").path("cluster")
- .path("remove-node-labels")
- .queryParam("user.name", userName)
- .queryParams(params)
- .accept(MediaType.APPLICATION_JSON)
- .post(ClientResponse.class);
+ response = removeNodeLabel(LABEL_A);
+ assertHttp200(response);
// Verify
- response =
- r.path("ws").path("v1").path("cluster")
- .path("get-node-labels").queryParam("user.name", userName)
- .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
- assertEquals(MediaType.APPLICATION_JSON_TYPE + "; " + JettyUtils.UTF_8,
- response.getType().toString());
- nlsifo = response.getEntity(NodeLabelsInfo.class);
- assertEquals(0, nlsifo.getNodeLabels().size());
+ response = getNodeLabels();
+ assertApplicationJsonUtf8Response(response);
+ nodeLabelsInfo = response.getEntity(NodeLabelsInfo.class);
+ assertEquals(0, nodeLabelsInfo.getNodeLabels().size());
// Following test cases are to test replace when distributed node label
// configuration is on
// Reset for testing : add cluster labels
- nlsifo = new NodeLabelsInfo();
- nlsifo.getNodeLabelsInfo().add(new NodeLabelInfo("x", false));
- nlsifo.getNodeLabelsInfo().add(new NodeLabelInfo("y", false));
- response =
- r.path("ws")
- .path("v1")
- .path("cluster")
- .path("add-node-labels")
- .queryParam("user.name", userName)
- .accept(MediaType.APPLICATION_JSON)
- .entity(toJson(nlsifo, NodeLabelsInfo.class),
- MediaType.APPLICATION_JSON).post(ClientResponse.class);
+ response = addNodeLabels(Lists.newArrayList(
+ Pair.of(LABEL_X, false), Pair.of(LABEL_Y, false)));
+ assertHttp200(response);
// Reset for testing : Add labels to a node
- params = new MultivaluedMapImpl();
- params.add("labels", "y");
- response =
- r.path("ws").path("v1").path("cluster").path("nodes").path("nid:0")
- .path("replace-labels").queryParam("user.name", userName)
- .queryParams(params)
- .accept(MediaType.APPLICATION_JSON)
- .post(ClientResponse.class);
- LOG.info("posted node nodelabel");
-
- //setting rmWebService for non Centralized NodeLabel Configuration
+ response = replaceLabelsOnNode(NODE_0, LABEL_Y);
+ assertHttp200(response);
+
+ //setting rmWebService for non-centralized NodeLabel Configuration
rmWebService.isCentralizedNodeLabelConfiguration = false;
// Case1 : Replace labels using node-to-labels
- ntli = new NodeToLabelsEntryList();
- labels = new ArrayList();
- labels.add("x");
- nli = new NodeToLabelsEntry("nid:0", labels);
- ntli.getNodeToLabels().add(nli);
- response =
- r.path("ws")
- .path("v1")
- .path("cluster")
- .path("replace-node-to-labels")
- .queryParam("user.name", userName)
- .accept(MediaType.APPLICATION_JSON)
- .entity(toJson(ntli, NodeToLabelsEntryList.class),
- MediaType.APPLICATION_JSON).post(ClientResponse.class);
+ response = replaceNodeToLabels(Lists.newArrayList(Pair.of(NODE_0,
+ Lists.newArrayList(LABEL_X))));
+ assertHttp404(response);
// Verify, using node-to-labels that previous operation has failed
- response =
- r.path("ws").path("v1").path("cluster").path("get-node-to-labels")
- .queryParam("user.name", userName)
- .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
- assertEquals(MediaType.APPLICATION_JSON_TYPE + "; " + JettyUtils.UTF_8,
- response.getType().toString());
- ntlinfo = response.getEntity(NodeToLabelsInfo.class);
- nlinfo = ntlinfo.getNodeToLabels().get("nid:0");
- assertEquals(1, nlinfo.getNodeLabels().size());
- assertFalse(nlinfo.getNodeLabelsInfo().contains(
- new NodeLabelInfo("x", false)));
+ response = getNodeToLabels();
+ assertApplicationJsonUtf8Response(response);
+ nodeToLabelsInfo = response.getEntity(NodeToLabelsInfo.class);
+ nodeLabelsInfo = nodeToLabelsInfo.getNodeToLabels().get(NODE_0);
+ assertNodeLabelsSize(nodeLabelsInfo, 1);
+ assertNodeLabelsInfoDoesNotContain(nodeLabelsInfo, Pair.of(LABEL_X, false));
// Case2 : failure to Replace labels using replace-labels
- response =
- r.path("ws").path("v1").path("cluster").path("nodes").path("nid:0")
- .path("replace-labels").queryParam("user.name", userName)
- .accept(MediaType.APPLICATION_JSON)
- .entity("{\"nodeLabelName\": [\"x\"]}", MediaType.APPLICATION_JSON)
- .post(ClientResponse.class);
- LOG.info("posted node nodelabel");
+ response = replaceLabelsOnNode(NODE_0, LABEL_X);
+ assertHttp404(response);
// Verify, using node-to-labels that previous operation has failed
- response =
- r.path("ws").path("v1").path("cluster").path("get-node-to-labels")
- .queryParam("user.name", userName)
- .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
- assertEquals(MediaType.APPLICATION_JSON_TYPE + "; " + JettyUtils.UTF_8,
- response.getType().toString());
- ntlinfo = response.getEntity(NodeToLabelsInfo.class);
- nlinfo = ntlinfo.getNodeToLabels().get("nid:0");
- assertEquals(1, nlinfo.getNodeLabels().size());
- assertFalse(nlinfo.getNodeLabelsInfo().contains(
- new NodeLabelInfo("x", false)));
+ response = getNodeToLabels();
+ assertApplicationJsonUtf8Response(response);
+ nodeToLabelsInfo = response.getEntity(NodeToLabelsInfo.class);
+ nodeLabelsInfo = nodeToLabelsInfo.getNodeToLabels().get(NODE_0);
+ assertNodeLabelsSize(nodeLabelsInfo, 1);
+ assertNodeLabelsInfoDoesNotContain(nodeLabelsInfo, Pair.of(LABEL_X, false));
// Case3 : Remove cluster label should be successful
- params = new MultivaluedMapImpl();
- params.add("labels", "x");
- response =
- r.path("ws").path("v1").path("cluster")
- .path("remove-node-labels")
- .queryParam("user.name", userName)
- .queryParams(params)
- .accept(MediaType.APPLICATION_JSON)
- .post(ClientResponse.class);
+ response = removeNodeLabel(LABEL_X);
+ assertHttp200(response);
// Verify
- response =
- r.path("ws").path("v1").path("cluster")
- .path("get-node-labels").queryParam("user.name", userName)
- .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
- assertEquals(MediaType.APPLICATION_JSON_TYPE + "; " + JettyUtils.UTF_8,
- response.getType().toString());
- nlsifo = response.getEntity(NodeLabelsInfo.class);
- assertEquals(new NodeLabelInfo("y", false),
- nlsifo.getNodeLabelsInfo().get(0));
- assertEquals("y", nlsifo.getNodeLabelsInfo().get(0).getName());
- assertFalse(nlsifo.getNodeLabelsInfo().get(0).getExclusivity());
+ response = getNodeLabels();
+ assertApplicationJsonUtf8Response(response);
+ assertNodeLabelsInfoAtPosition(response.getEntity(NodeLabelsInfo.class), Pair.of(LABEL_Y,
+ false), 0);
// Remove y
- params = new MultivaluedMapImpl();
- params.add("labels", "y");
- response =
- r.path("ws").path("v1").path("cluster")
- .path("remove-node-labels")
- .queryParam("user.name", userName)
- .queryParams(params)
- .accept(MediaType.APPLICATION_JSON)
- .post(ClientResponse.class);
+ response = removeNodeLabel(LABEL_Y);
+ assertHttp200(response);
// Verify
- response =
- r.path("ws").path("v1").path("cluster")
- .path("get-node-labels").queryParam("user.name", userName)
- .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
- assertEquals(MediaType.APPLICATION_JSON_TYPE + "; " + JettyUtils.UTF_8,
- response.getType().toString());
- nlsifo = response.getEntity(NodeLabelsInfo.class);
- assertTrue(nlsifo.getNodeLabelsInfo().isEmpty());
-
- // add a new nodelabel with exclusity
- nlsifo = new NodeLabelsInfo();
- nlsifo.getNodeLabelsInfo().add(new NodeLabelInfo("z", false));
- response =
- r.path("ws")
- .path("v1")
- .path("cluster")
- .path("add-node-labels")
- .queryParam("user.name", userName)
- .accept(MediaType.APPLICATION_JSON)
- .entity(toJson(nlsifo, NodeLabelsInfo.class),
- MediaType.APPLICATION_JSON).post(ClientResponse.class);
+ response = getNodeLabels();
+ assertApplicationJsonUtf8Response(response);
+ assertNodeLabelsSize(response.getEntity(NodeLabelsInfo.class), 0);
+ // add a new nodelabel with exclusivity=false
+ response = addNodeLabels(Lists.newArrayList(Pair.of(LABEL_Z, false)));
+ assertHttp200(response);
// Verify
- response =
- r.path("ws").path("v1").path("cluster")
- .path("get-node-labels").queryParam("user.name", userName)
- .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
+ response = getNodeLabels();
+ assertApplicationJsonUtf8Response(response);
+ assertNodeLabelsInfoAtPosition(response.getEntity(NodeLabelsInfo.class),
+ Pair.of(LABEL_Z, false), 0);
+ assertNodeLabelsSize(nodeLabelsInfo, 1);
+ }
+
+ private void assertLabelsToNodesInfo(LabelsToNodesInfo labelsToNodesInfo, int size,
+ List, List>> nodeLabelsToNodesList) {
+ Map labelsToNodes = labelsToNodesInfo.getLabelsToNodes();
+ assertNotNull("Labels to nodes mapping should not be null.", labelsToNodes);
+ assertEquals("Size of label to nodes mapping is not the expected.", size, labelsToNodes.size());
+
+ for (Pair, List> nodeLabelToNodes : nodeLabelsToNodesList) {
+ Pair expectedNLData = nodeLabelToNodes.getLeft();
+ List expectedNodes = nodeLabelToNodes.getRight();
+ NodeLabelInfo expectedNLInfo = new NodeLabelInfo(expectedNLData.getLeft(),
+ expectedNLData.getRight());
+ NodeIDsInfo actualNodes = labelsToNodes.get(expectedNLInfo);
+ assertNotNull(String.format("Node info not found. Expected NodeLabel data: %s",
+ expectedNLData), actualNodes);
+ for (String expectedNode : expectedNodes) {
+ assertTrue(String.format("Can't find node ID in actual Node IDs list: %s",
+ actualNodes.getNodeIDs()), actualNodes.getNodeIDs().contains(expectedNode));
+ }
+ }
+ }
+
+ private void assertNodeLabelsInfo(NodeLabelsInfo nodeLabelsInfo,
+ List> nlInfos) {
+ assertEquals(nlInfos.size(), nodeLabelsInfo.getNodeLabels().size());
+
+ for (int i = 0; i < nodeLabelsInfo.getNodeLabelsInfo().size(); i++) {
+ Pair expected = nlInfos.get(i);
+ NodeLabelInfo actual = nodeLabelsInfo.getNodeLabelsInfo().get(i);
+ LOG.debug("Checking NodeLabelInfo: {}", actual);
+ assertEquals(expected.getLeft(), actual.getName());
+ assertEquals(expected.getRight(), actual.getExclusivity());
+ }
+ }
+
+ private void assertNodeLabelsInfoAtPosition(NodeLabelsInfo nodeLabelsInfo, Pair nlInfo, int pos) {
+ NodeLabelInfo actual = nodeLabelsInfo.getNodeLabelsInfo().get(pos);
+ LOG.debug("Checking NodeLabelInfo: {}", actual);
+ assertEquals(nlInfo.getLeft(), actual.getName());
+ assertEquals(nlInfo.getRight(), actual.getExclusivity());
+ }
+
+ private void assertNodeLabelsInfoContains(NodeLabelsInfo nodeLabelsInfo,
+ Pair nlInfo) {
+ NodeLabelInfo nodeLabelInfo = new NodeLabelInfo(nlInfo.getLeft(), nlInfo.getRight());
+ assertTrue(String.format("Cannot find nodeLabelInfo '%s' among items of node label info list:" +
+ " %s", nodeLabelInfo, nodeLabelsInfo.getNodeLabelsInfo()),
+ nodeLabelsInfo.getNodeLabelsInfo().contains(nodeLabelInfo));
+ }
+
+ private void assertNodeLabelsInfoDoesNotContain(NodeLabelsInfo nodeLabelsInfo, Pair nlInfo) {
+ NodeLabelInfo nodeLabelInfo = new NodeLabelInfo(nlInfo.getLeft(), nlInfo.getRight());
+ assertFalse(String.format("Should have not found nodeLabelInfo '%s' among " +
+ "items of node label info list: %s", nodeLabelInfo, nodeLabelsInfo.getNodeLabelsInfo()),
+ nodeLabelsInfo.getNodeLabelsInfo().contains(nodeLabelInfo));
+ }
+
+ private void assertNodeLabelsSize(NodeLabelsInfo nodeLabelsInfo, int expectedSize) {
+ assertEquals(expectedSize, nodeLabelsInfo.getNodeLabelsInfo().size());
+ }
+
+ private ClientResponse replaceNodeToLabels(List>> nodeToLabelInfos) throws Exception {
+ NodeToLabelsEntryList nodeToLabelsEntries = new NodeToLabelsEntryList();
+
+ for (Pair> nodeToLabelInfo : nodeToLabelInfos) {
+ ArrayList labelList = new ArrayList<>(nodeToLabelInfo.getRight());
+ String nodeId = nodeToLabelInfo.getLeft();
+ NodeToLabelsEntry nli = new NodeToLabelsEntry(nodeId, labelList);
+ nodeToLabelsEntries.getNodeToLabels().add(nli);
+ }
+ return post(PATH_REPLACE_NODE_TO_LABELS, userName, nodeToLabelsEntries, NodeToLabelsEntryList.class);
+ }
+
+ private ClientResponse getNodeLabelMappings() {
+ return get(PATH_LABEL_MAPPINGS);
+ }
+
+ private ClientResponse getNodeLabelMappingsByLabels(String... labelNames) {
+ MultivaluedMapImpl params = createMultiValuedMap(labelNames);
+ return get(PATH_LABEL_MAPPINGS, params);
+ }
+
+ private ClientResponse replaceLabelsOnNode(String node, String... labelNames) throws Exception {
+ return replaceLabelsOnNodeWithUserName(node, userName, labelNames);
+ }
+
+ private ClientResponse replaceLabelsOnNodeWithUserName(String node,
+ String userName, String... labelNames) throws Exception {
+ LOG.info("Replacing labels on node '{}', label(s): {}", node, labelNames);
+ MultivaluedMapImpl params = createMultiValuedMap(labelNames);
+ String path = UriBuilder.fromPath(PATH_NODES).path(node)
+ .path(PATH_REPLACE_LABELS).build().toString();
+ return post(path, userName, null, null, params);
+ }
+
+ private static MultivaluedMapImpl createMultiValuedMap(String[] labelNames) {
+ MultivaluedMapImpl params = new MultivaluedMapImpl();
+ for (String labelName : labelNames) {
+ params.add("labels", labelName);
+ }
+ return params;
+ }
+
+ private ClientResponse removeNodeLabel(String... labelNames) throws Exception {
+ MultivaluedMapImpl params = createMultiValuedMap(labelNames);
+ return post(PATH_REMOVE_LABELS, userName, null, null, params);
+ }
+
+ private ClientResponse getLabelsOfNode(String node) {
+ String path = UriBuilder.fromPath(PATH_NODES).path(node)
+ .path(PATH_GET_LABELS).build().toString();
+ return get(path);
+ }
+
+ private ClientResponse getNodeLabels() {
+ return get(PATH_GET_NODE_LABELS);
+ }
+
+ private ClientResponse getNodeToLabels() {
+ return get(PATH_GET_NODE_TO_LABELS);
+ }
+
+ private ClientResponse addNodeLabels(List> nlInfos) throws Exception {
+ return addNodeLabelsInternal(nlInfos, userName);
+ }
+
+ private ClientResponse addNodeLabelsWithUser(List> nlInfos,
+ String userName) throws Exception {
+ return addNodeLabelsInternal(nlInfos, userName);
+ }
+
+ private ClientResponse addNodeLabelsInternal(List> nlInfos,
+ String userName) throws Exception {
+ NodeLabelsInfo nodeLabelsInfo = new NodeLabelsInfo();
+ for (Pair nlInfo : nlInfos) {
+ NodeLabelInfo nodeLabelInfo = new NodeLabelInfo(nlInfo.getLeft(), nlInfo.getRight());
+ nodeLabelsInfo.getNodeLabelsInfo().add(nodeLabelInfo);
+ }
+ return post(PATH_ADD_NODE_LABELS, userName, nodeLabelsInfo, NodeLabelsInfo.class);
+ }
+
+ private void assertApplicationJsonUtf8Response(ClientResponse response) {
assertEquals(MediaType.APPLICATION_JSON_TYPE + "; " + JettyUtils.UTF_8,
response.getType().toString());
- nlsifo = response.getEntity(NodeLabelsInfo.class);
- assertEquals("z", nlsifo.getNodeLabelsInfo().get(0).getName());
- assertFalse(nlsifo.getNodeLabelsInfo().get(0).getExclusivity());
- assertEquals(1, nlsifo.getNodeLabels().size());
+ }
+
+ private void assertHttp200(ClientResponse response) {
+ assertEquals(Response.Status.OK.getStatusCode(), response.getStatus());
+ }
+
+ private void assertHttp401(ClientResponse response) {
+ assertEquals(Response.Status.UNAUTHORIZED.getStatusCode(), response.getStatus());
+ }
+
+ private void assertHttp404(ClientResponse response) {
+ assertEquals(Response.Status.NOT_FOUND.getStatusCode(), response.getStatus());
}
@Test
public void testLabelInvalidAddition()
- throws UniformInterfaceException, Exception {
- WebResource r = resource();
- ClientResponse response;
+ throws Exception {
// Add a invalid label
- NodeLabelsInfo nlsifo = new NodeLabelsInfo();
- nlsifo.getNodeLabelsInfo().add(new NodeLabelInfo("a&"));
- response = r.path("ws").path("v1").path("cluster").path("add-node-labels")
- .queryParam("user.name", userName).accept(MediaType.APPLICATION_JSON)
- .entity(toJson(nlsifo, NodeLabelsInfo.class),
- MediaType.APPLICATION_JSON)
- .post(ClientResponse.class);
- String expectedmessage =
+ ClientResponse response = addNodeLabels(Lists.newArrayList(Pair.of("a&",
+ DEFAULT_NL_EXCLUSIVITY)));
+ String expectedMessage =
"java.io.IOException: label name should only contains"
+ " {0-9, a-z, A-Z, -, _} and should not started with"
+ " {-,_}, now it is= a&";
- validateJsonExceptionContent(response, expectedmessage);
+ validateJsonExceptionContent(response, expectedMessage);
}
@Test
public void testLabelChangeExclusivity()
- throws Exception, JSONException {
- WebResource r = resource();
+ throws Exception {
ClientResponse response;
- NodeLabelsInfo nlsifo = new NodeLabelsInfo();
- nlsifo.getNodeLabelsInfo().add(new NodeLabelInfo("newlabel", true));
- response = r.path("ws").path("v1").path("cluster").path("add-node-labels")
- .queryParam("user.name", userName).accept(MediaType.APPLICATION_JSON)
- .entity(toJson(nlsifo, NodeLabelsInfo.class),
- MediaType.APPLICATION_JSON)
- .post(ClientResponse.class);
+ response = addNodeLabels(Lists.newArrayList(Pair.of("newLabel", DEFAULT_NL_EXCLUSIVITY)));
+ assertHttp200(response);
// new info and change exclusivity
- NodeLabelsInfo nlsinfo2 = new NodeLabelsInfo();
- nlsinfo2.getNodeLabelsInfo().add(new NodeLabelInfo("newlabel", false));
- response = r.path("ws").path("v1").path("cluster").path("add-node-labels")
- .queryParam("user.name", userName).accept(MediaType.APPLICATION_JSON)
- .entity(toJson(nlsinfo2, NodeLabelsInfo.class),
- MediaType.APPLICATION_JSON)
- .post(ClientResponse.class);
- String expectedmessage =
+ response = addNodeLabels(Lists.newArrayList(Pair.of("newLabel", false)));
+ String expectedMessage =
"java.io.IOException: Exclusivity cannot be modified for an existing"
- + " label with : ";
- validateJsonExceptionContent(response, expectedmessage);
+ + " label with : ";
+ validateJsonExceptionContent(response, expectedMessage);
}
private void validateJsonExceptionContent(ClientResponse response,
- String expectedmessage)
+ String expectedMessage)
throws JSONException {
Assert.assertEquals(BAD_REQUEST_CODE, response.getStatus());
JSONObject msg = response.getEntity(JSONObject.class);
@@ -653,112 +599,76 @@ private void validateJsonExceptionContent(ClientResponse response,
WebServicesTestUtils.checkStringMatch("exception classname",
"org.apache.hadoop.yarn.webapp.BadRequestException", classname);
WebServicesTestUtils.checkStringContains("exception message",
- expectedmessage, message);
+ expectedMessage, message);
}
@Test
public void testLabelInvalidReplace()
- throws UniformInterfaceException, Exception {
- WebResource r = resource();
+ throws Exception {
ClientResponse response;
- // replace label which doesnt exist
- MultivaluedMapImpl params = new MultivaluedMapImpl();
- params.add("labels", "idontexist");
- response = r.path("ws").path("v1").path("cluster").path("nodes")
- .path("nid:0").path("replace-labels").queryParam("user.name", userName)
- .queryParams(params).accept(MediaType.APPLICATION_JSON)
- .post(ClientResponse.class);
+ // replace label which doesn't exist
+ response = replaceLabelsOnNode(NODE_0, "idontexist");
- String expectedmessage =
+ String expectedMessage =
"Not all labels being replaced contained by known label"
+ " collections, please check, new labels=[idontexist]";
- validateJsonExceptionContent(response, expectedmessage);
+ validateJsonExceptionContent(response, expectedMessage);
}
@Test
public void testLabelInvalidRemove()
- throws UniformInterfaceException, Exception {
- WebResource r = resource();
+ throws Exception {
ClientResponse response;
- MultivaluedMapImpl params = new MultivaluedMapImpl();
- params.add("labels", "irealldontexist");
- response =
- r.path("ws").path("v1").path("cluster").path("remove-node-labels")
- .queryParam("user.name", userName).queryParams(params)
- .accept(MediaType.APPLICATION_JSON).post(ClientResponse.class);
- String expectedmessage =
- "java.io.IOException: Node label=irealldontexist to be"
+ response = removeNodeLabel("ireallydontexist");
+ String expectedMessage =
+ "java.io.IOException: Node label=ireallydontexist to be"
+ " removed doesn't existed in cluster node labels"
+ " collection.";
- validateJsonExceptionContent(response, expectedmessage);
+ validateJsonExceptionContent(response, expectedMessage);
}
@Test
public void testNodeLabelPartitionInfo() throws Exception {
- WebResource r = resource();
-
ClientResponse response;
// Add a node label
- NodeLabelsInfo nlsifo = new NodeLabelsInfo();
- nlsifo.getNodeLabelsInfo().add(new NodeLabelInfo("a"));
- response =
- r.path("ws").path("v1").path("cluster")
- .path("add-node-labels").queryParam("user.name", userName)
- .accept(MediaType.APPLICATION_JSON)
- .entity(toJson(nlsifo, NodeLabelsInfo.class), MediaType.APPLICATION_JSON)
- .post(ClientResponse.class);
+ response = addNodeLabels(Lists.newArrayList(Pair.of(LABEL_A, DEFAULT_NL_EXCLUSIVITY)));
+ assertHttp200(response);
// Verify partition info in get-node-labels
- response =
- r.path("ws").path("v1").path("cluster")
- .path("get-node-labels").queryParam("user.name", userName)
- .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
- assertEquals(MediaType.APPLICATION_JSON_TYPE + "; " + JettyUtils.UTF_8,
- response.getType().toString());
- nlsifo = response.getEntity(NodeLabelsInfo.class);
- assertEquals(1, nlsifo.getNodeLabels().size());
- for (NodeLabelInfo nl : nlsifo.getNodeLabelsInfo()) {
- assertEquals("a", nl.getName());
+ response = getNodeLabels();
+ assertApplicationJsonUtf8Response(response);
+ NodeLabelsInfo nodeLabelsInfo = response.getEntity(NodeLabelsInfo.class);
+ assertNodeLabelsSize(nodeLabelsInfo, 1);
+ for (NodeLabelInfo nl : nodeLabelsInfo.getNodeLabelsInfo()) {
+ assertEquals(LABEL_A, nl.getName());
assertTrue(nl.getExclusivity());
assertNotNull(nl.getPartitionInfo());
assertNotNull(nl.getPartitionInfo().getResourceAvailable());
}
// Add node label to a node
- MultivaluedMapImpl params = new MultivaluedMapImpl();
- params.add("labels", "a");
- response =
- r.path("ws").path("v1").path("cluster")
- .path("nodes").path("nodeId:0")
- .path("replace-labels")
- .queryParam("user.name", userName)
- .queryParams(params)
- .accept(MediaType.APPLICATION_JSON)
- .post(ClientResponse.class);
+ response = replaceLabelsOnNode("nodeId:0", LABEL_A);
+ assertHttp200(response);
// Verify partition info in label-mappings
- response =
- r.path("ws").path("v1").path("cluster")
- .path("label-mappings").queryParam("user.name", userName)
- .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
- assertEquals(MediaType.APPLICATION_JSON_TYPE + "; " + JettyUtils.UTF_8,
- response.getType().toString());
- LabelsToNodesInfo ltni = response.getEntity(LabelsToNodesInfo.class);
- assertEquals(1, ltni.getLabelsToNodes().size());
- NodeIDsInfo nodes = ltni.getLabelsToNodes().get(
- new NodeLabelInfo("a"));
- assertTrue(nodes.getNodeIDs().contains("nodeId:0"));
+ response = getNodeLabelMappings();
+ assertApplicationJsonUtf8Response(response);
+ LabelsToNodesInfo labelsToNodesInfo = response.getEntity(LabelsToNodesInfo.class);
+ assertLabelsToNodesInfo(labelsToNodesInfo, 1, Lists.newArrayList(
+ Pair.of(Pair.of(LABEL_A, DEFAULT_NL_EXCLUSIVITY), Lists.newArrayList("nodeId:0"))
+ ));
+ NodeIDsInfo nodes = labelsToNodesInfo.getLabelsToNodes().get(new NodeLabelInfo(LABEL_A));
assertNotNull(nodes.getPartitionInfo());
assertNotNull(nodes.getPartitionInfo().getResourceAvailable());
}
@SuppressWarnings("rawtypes")
- private String toJson(Object nsli, Class klass) throws Exception {
+ private String toJson(Object obj, Class klass) throws Exception {
StringWriter sw = new StringWriter();
JSONJAXBContext ctx = new JSONJAXBContext(klass);
JSONMarshaller jm = ctx.createJSONMarshaller();
- jm.marshallToJSON(nsli, sw);
+ jm.marshallToJSON(obj, sw);
return sw.toString();
}
}
From 383b73417df80028011d229dce9daf8e4ecbdb49 Mon Sep 17 00:00:00 2001
From: 9uapaw
Date: Thu, 10 Mar 2022 13:11:19 +0100
Subject: [PATCH 029/145] YARN-11036. Do not inherit from
TestRMWebServicesCapacitySched. Contributed by Tamas Domok
---
.../TestRMWebServicesCapacitySched.java | 47 +++++++++-----
...WebServicesCapacitySchedDynamicConfig.java | 65 +++++--------------
.../TestRMWebServicesSchedulerActivities.java | 31 +++++++--
3 files changed, 73 insertions(+), 70 deletions(-)
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java
index 2be9b54c86549..b9ce10aaeddcb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java
@@ -77,42 +77,37 @@
public class TestRMWebServicesCapacitySched extends JerseyTestBase {
- protected static MockRM rm;
+ private MockRM rm;
+
+ public static class WebServletModule extends ServletModule {
+ private final MockRM rm;
+
+ WebServletModule(MockRM rm) {
+ this.rm = rm;
+ }
- private static class WebServletModule extends ServletModule {
@Override
protected void configureServlets() {
bind(JAXBContextResolver.class);
bind(RMWebServices.class);
bind(GenericExceptionHandler.class);
- CapacitySchedulerConfiguration csConf = new CapacitySchedulerConfiguration(
- new Configuration(false));
- setupQueueConfiguration(csConf);
- YarnConfiguration conf = new YarnConfiguration(csConf);
- conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
- ResourceScheduler.class);
- conf.set(YarnConfiguration.RM_PLACEMENT_CONSTRAINTS_HANDLER,
- YarnConfiguration.SCHEDULER_RM_PLACEMENT_CONSTRAINTS_HANDLER);
- rm = new MockRM(conf);
bind(ResourceManager.class).toInstance(rm);
serve("/*").with(GuiceContainer.class);
}
}
public TestRMWebServicesCapacitySched() {
- super(new WebAppDescriptor.Builder(
- "org.apache.hadoop.yarn.server.resourcemanager.webapp")
- .contextListenerClass(GuiceServletConfig.class)
- .filterClass(com.google.inject.servlet.GuiceFilter.class)
- .contextPath("jersey-guice-filter").servletPath("/").build());
+ super(createWebAppDescriptor());
}
@Before
@Override
public void setUp() throws Exception {
super.setUp();
+ rm = createMockRM(new CapacitySchedulerConfiguration(
+ new Configuration(false)));
GuiceServletConfig.setInjector(
- Guice.createInjector(new WebServletModule()));
+ Guice.createInjector(new WebServletModule(rm)));
}
public static void setupQueueConfiguration(
@@ -389,4 +384,22 @@ public static void updateTestDataAutomatically(String configFilename, String act
Assert.fail("overwrite should not fail " + e.getMessage());
}
}
+
+ public static WebAppDescriptor createWebAppDescriptor() {
+ return new WebAppDescriptor.Builder(
+ TestRMWebServicesCapacitySched.class.getPackage().getName())
+ .contextListenerClass(GuiceServletConfig.class)
+ .filterClass(com.google.inject.servlet.GuiceFilter.class)
+ .contextPath("jersey-guice-filter").servletPath("/").build();
+ }
+
+ public static MockRM createMockRM(CapacitySchedulerConfiguration csConf) {
+ setupQueueConfiguration(csConf);
+ YarnConfiguration conf = new YarnConfiguration(csConf);
+ conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
+ ResourceScheduler.class);
+ conf.set(YarnConfiguration.RM_PLACEMENT_CONSTRAINTS_HANDLER,
+ YarnConfiguration.SCHEDULER_RM_PLACEMENT_CONSTRAINTS_HANDLER);
+ return new MockRM(conf);
+ }
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySchedDynamicConfig.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySchedDynamicConfig.java
index 0e2ecd0bfad38..4cc50c4d57c1b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySchedDynamicConfig.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySchedDynamicConfig.java
@@ -19,10 +19,7 @@
package org.apache.hadoop.yarn.server.resourcemanager.webapp;
import com.google.inject.Guice;
-import com.google.inject.servlet.ServletModule;
import com.sun.jersey.api.client.ClientResponse;
-import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
-import com.sun.jersey.test.framework.WebAppDescriptor;
import java.io.IOException;
import java.util.HashMap;
@@ -34,67 +31,28 @@
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
-import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.AutoCreatedQueueTemplate;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerQueueManager;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.QueuePath;
-import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
import org.apache.hadoop.yarn.webapp.GuiceServletConfig;
import org.apache.hadoop.yarn.webapp.JerseyTestBase;
import org.junit.Test;
+import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerTestUtilities.GB;
import static org.apache.hadoop.yarn.server.resourcemanager.webapp.TestRMWebServicesCapacitySched.assertJsonResponse;
+import static org.apache.hadoop.yarn.server.resourcemanager.webapp.TestRMWebServicesCapacitySched.createMockRM;
+import static org.apache.hadoop.yarn.server.resourcemanager.webapp.TestRMWebServicesCapacitySched.createWebAppDescriptor;
public class TestRMWebServicesCapacitySchedDynamicConfig extends
JerseyTestBase {
- private static final int GB = 1024;
- private static MockRM rm;
+ private MockRM rm;
private CapacitySchedulerQueueManager autoQueueHandler;
- private static class WebServletModule extends ServletModule {
- private final Configuration conf;
-
- WebServletModule(Configuration conf) {
- this.conf = conf;
- }
-
- @Override
- protected void configureServlets() {
- bind(JAXBContextResolver.class);
- bind(RMWebServices.class);
- bind(GenericExceptionHandler.class);
- conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
- ResourceScheduler.class);
- conf.set(YarnConfiguration.RM_PLACEMENT_CONSTRAINTS_HANDLER,
- YarnConfiguration.SCHEDULER_RM_PLACEMENT_CONSTRAINTS_HANDLER);
- rm = new MockRM(conf);
- bind(ResourceManager.class).toInstance(rm);
- serve("/*").with(GuiceContainer.class);
- }
- }
-
- private void initResourceManager(Configuration conf) throws IOException {
- GuiceServletConfig.setInjector(
- Guice.createInjector(new WebServletModule(conf)));
- rm.start();
- //Need to call reinitialize as
- //MutableCSConfigurationProvider with InMemoryConfigurationStore
- //somehow does not load the queues properly and falls back to default config.
- //Therefore CS will think there's only the default queue there.
- ((CapacityScheduler) rm.getResourceScheduler()).reinitialize(conf,
- rm.getRMContext(), true);
- }
-
public TestRMWebServicesCapacitySchedDynamicConfig() {
- super(new WebAppDescriptor.Builder(
- "org.apache.hadoop.yarn.server.resourcemanager.webapp")
- .contextListenerClass(GuiceServletConfig.class)
- .filterClass(com.google.inject.servlet.GuiceFilter.class)
- .contextPath("jersey-guice-filter").servletPath("/").build());
+ super(createWebAppDescriptor());
}
@Test
@@ -327,4 +285,17 @@ public static Configuration createConfiguration(
return config;
}
}
+
+ private void initResourceManager(Configuration conf) throws IOException {
+ rm = createMockRM(new CapacitySchedulerConfiguration(conf));
+ GuiceServletConfig.setInjector(
+ Guice.createInjector(new TestRMWebServicesCapacitySched.WebServletModule(rm)));
+ rm.start();
+ //Need to call reinitialize as
+ //MutableCSConfigurationProvider with InMemoryConfigurationStore
+ //somehow does not load the queues properly and falls back to default config.
+ //Therefore CS will think there's only the default queue there.
+ ((CapacityScheduler) rm.getResourceScheduler()).reinitialize(conf,
+ rm.getRMContext(), true);
+ }
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesSchedulerActivities.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesSchedulerActivities.java
index 3fc3c26c00af2..ee472dafcb940 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesSchedulerActivities.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesSchedulerActivities.java
@@ -18,15 +18,20 @@
package org.apache.hadoop.yarn.server.resourcemanager.webapp;
+import com.google.inject.Guice;
import com.sun.jersey.api.client.ClientResponse;
import com.sun.jersey.api.client.WebResource;
import com.sun.jersey.core.util.MultivaluedMapImpl;
+
+import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.ActivityDiagnosticConstant;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.ActivityState;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
+import org.apache.hadoop.yarn.webapp.GuiceServletConfig;
+import org.apache.hadoop.yarn.webapp.JerseyTestBase;
+import org.junit.Before;
import org.apache.hadoop.http.JettyUtils;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerState;
@@ -83,6 +88,8 @@
import static org.apache.hadoop.yarn.server.resourcemanager.webapp.ActivitiesTestUtils.verifyNumberOfNodes;
import static org.apache.hadoop.yarn.server.resourcemanager.webapp.ActivitiesTestUtils.verifyQueueOrder;
import static org.apache.hadoop.yarn.server.resourcemanager.webapp.ActivitiesTestUtils.verifyStateOfAllocations;
+import static org.apache.hadoop.yarn.server.resourcemanager.webapp.TestRMWebServicesCapacitySched.createMockRM;
+import static org.apache.hadoop.yarn.server.resourcemanager.webapp.TestRMWebServicesCapacitySched.createWebAppDescriptor;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
@@ -91,11 +98,23 @@
/**
* Tests for scheduler/app activities.
*/
-public class TestRMWebServicesSchedulerActivities
- extends TestRMWebServicesCapacitySched {
+public class TestRMWebServicesSchedulerActivities extends JerseyTestBase {
+
+ private MockRM rm;
- private static final Logger LOG = LoggerFactory.getLogger(
- TestRMWebServicesSchedulerActivities.class);
+ public TestRMWebServicesSchedulerActivities() {
+ super(createWebAppDescriptor());
+ }
+
+ @Before
+ @Override
+ public void setUp() throws Exception {
+ super.setUp();
+ rm = createMockRM(new CapacitySchedulerConfiguration(
+ new Configuration(false)));
+ GuiceServletConfig.setInjector(
+ Guice.createInjector(new TestRMWebServicesCapacitySched.WebServletModule(rm)));
+ }
@Test
public void testAssignMultipleContainersPerNodeHeartbeat()
From d0fa9b5775185bd83e4a767a7dfc13ef89c5154a Mon Sep 17 00:00:00 2001
From: Gautham B A
Date: Thu, 10 Mar 2022 22:02:38 +0530
Subject: [PATCH 030/145] HADOOP-18155. Refactor tests in TestFileUtil (#4053)
---
.../java/org/apache/hadoop/fs/FileUtil.java | 36 +-
.../org/apache/hadoop/fs/TestFileUtil.java | 402 +++++++++++-------
2 files changed, 275 insertions(+), 163 deletions(-)
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
index 96d8a40512f0a..b788c7ec6b664 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
@@ -39,6 +39,7 @@
import java.nio.file.FileSystems;
import java.nio.file.Files;
import java.nio.file.LinkOption;
+import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.Enumeration;
import java.util.List;
@@ -992,6 +993,14 @@ private static void unpackEntries(TarArchiveInputStream tis,
+ " would create entry outside of " + outputDir);
}
+ if (entry.isSymbolicLink() || entry.isLink()) {
+ String canonicalTargetPath = getCanonicalPath(entry.getLinkName(), outputDir);
+ if (!canonicalTargetPath.startsWith(targetDirPath)) {
+ throw new IOException(
+ "expanding " + entry.getName() + " would create entry outside of " + outputDir);
+ }
+ }
+
if (entry.isDirectory()) {
File subDir = new File(outputDir, entry.getName());
if (!subDir.mkdirs() && !subDir.isDirectory()) {
@@ -1007,10 +1016,12 @@ private static void unpackEntries(TarArchiveInputStream tis,
}
if (entry.isSymbolicLink()) {
- // Create symbolic link relative to tar parent dir
- Files.createSymbolicLink(FileSystems.getDefault()
- .getPath(outputDir.getPath(), entry.getName()),
- FileSystems.getDefault().getPath(entry.getLinkName()));
+ // Create symlink with canonical target path to ensure that we don't extract
+ // outside targetDirPath
+ String canonicalTargetPath = getCanonicalPath(entry.getLinkName(), outputDir);
+ Files.createSymbolicLink(
+ FileSystems.getDefault().getPath(outputDir.getPath(), entry.getName()),
+ FileSystems.getDefault().getPath(canonicalTargetPath));
return;
}
@@ -1022,7 +1033,8 @@ private static void unpackEntries(TarArchiveInputStream tis,
}
if (entry.isLink()) {
- File src = new File(outputDir, entry.getLinkName());
+ String canonicalTargetPath = getCanonicalPath(entry.getLinkName(), outputDir);
+ File src = new File(canonicalTargetPath);
HardLink.createHardLink(src, outputFile);
return;
}
@@ -1030,6 +1042,20 @@ private static void unpackEntries(TarArchiveInputStream tis,
org.apache.commons.io.FileUtils.copyToFile(tis, outputFile);
}
+ /**
+ * Gets the canonical path for the given path.
+ *
+ * @param path The path for which the canonical path needs to be computed.
+ * @param parentDir The parent directory to use if the path is a relative path.
+ * @return The canonical path of the given path.
+ */
+ private static String getCanonicalPath(String path, File parentDir) throws IOException {
+ java.nio.file.Path targetPath = Paths.get(path);
+ return (targetPath.isAbsolute() ?
+ new File(path) :
+ new File(parentDir, path)).getCanonicalPath();
+ }
+
/**
* Class for creating hardlinks.
* Supports Unix, WindXP.
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java
index e19900dfeacdb..29eafb9e4dac0 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java
@@ -42,13 +42,14 @@
import java.net.URL;
import java.net.UnknownHostException;
import java.nio.charset.StandardCharsets;
-import java.nio.file.FileSystems;
import java.nio.file.Files;
+import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
+import java.util.Objects;
import java.util.jar.Attributes;
import java.util.jar.JarFile;
import java.util.jar.Manifest;
@@ -60,9 +61,12 @@
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.test.LambdaTestUtils;
import org.apache.hadoop.util.StringUtils;
import org.apache.tools.tar.TarEntry;
import org.apache.tools.tar.TarOutputStream;
+
+import org.assertj.core.api.Assertions;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
@@ -158,13 +162,12 @@ public void setup() throws IOException {
FileUtils.forceMkdir(dir1);
FileUtils.forceMkdir(dir2);
- new File(del, FILE).createNewFile();
- File tmpFile = new File(tmp, FILE);
- tmpFile.createNewFile();
+ Verify.createNewFile(new File(del, FILE));
+ File tmpFile = Verify.createNewFile(new File(tmp, FILE));
// create files
- new File(dir1, FILE).createNewFile();
- new File(dir2, FILE).createNewFile();
+ Verify.createNewFile(new File(dir1, FILE));
+ Verify.createNewFile(new File(dir2, FILE));
// create a symlink to file
File link = new File(del, LINK);
@@ -173,7 +176,7 @@ public void setup() throws IOException {
// create a symlink to dir
File linkDir = new File(del, "tmpDir");
FileUtil.symLink(tmp.toString(), linkDir.toString());
- Assert.assertEquals(5, del.listFiles().length);
+ Assert.assertEquals(5, Objects.requireNonNull(del.listFiles()).length);
// create files in partitioned directories
createFile(partitioned, "part-r-00000", "foo");
@@ -200,13 +203,9 @@ public void tearDown() throws IOException {
private File createFile(File directory, String name, String contents)
throws IOException {
File newFile = new File(directory, name);
- PrintWriter pw = new PrintWriter(newFile);
- try {
+ try (PrintWriter pw = new PrintWriter(newFile)) {
pw.println(contents);
}
- finally {
- pw.close();
- }
return newFile;
}
@@ -218,11 +217,11 @@ public void testListFiles() throws IOException {
//Test existing directory with no files case
File newDir = new File(tmp.getPath(),"test");
- newDir.mkdir();
+ Verify.mkdir(newDir);
Assert.assertTrue("Failed to create test dir", newDir.exists());
files = FileUtil.listFiles(newDir);
Assert.assertEquals(0, files.length);
- newDir.delete();
+ assertTrue(newDir.delete());
Assert.assertFalse("Failed to delete test dir", newDir.exists());
//Test non-existing directory case, this throws
@@ -244,11 +243,11 @@ public void testListAPI() throws IOException {
//Test existing directory with no files case
File newDir = new File(tmp.getPath(),"test");
- newDir.mkdir();
+ Verify.mkdir(newDir);
Assert.assertTrue("Failed to create test dir", newDir.exists());
files = FileUtil.list(newDir);
Assert.assertEquals("New directory unexpectedly contains files", 0, files.length);
- newDir.delete();
+ assertTrue(newDir.delete());
Assert.assertFalse("Failed to delete test dir", newDir.exists());
//Test non-existing directory case, this throws
@@ -266,7 +265,7 @@ public void testListAPI() throws IOException {
public void testFullyDelete() throws IOException {
boolean ret = FileUtil.fullyDelete(del);
Assert.assertTrue(ret);
- Assert.assertFalse(del.exists());
+ Verify.notExists(del);
validateTmpDir();
}
@@ -279,13 +278,13 @@ public void testFullyDelete() throws IOException {
@Test (timeout = 30000)
public void testFullyDeleteSymlinks() throws IOException {
File link = new File(del, LINK);
- Assert.assertEquals(5, del.list().length);
+ assertDelListLength(5);
// Since tmpDir is symlink to tmp, fullyDelete(tmpDir) should not
// delete contents of tmp. See setupDirs for details.
boolean ret = FileUtil.fullyDelete(link);
Assert.assertTrue(ret);
- Assert.assertFalse(link.exists());
- Assert.assertEquals(4, del.list().length);
+ Verify.notExists(link);
+ assertDelListLength(4);
validateTmpDir();
File linkDir = new File(del, "tmpDir");
@@ -293,8 +292,8 @@ public void testFullyDeleteSymlinks() throws IOException {
// delete contents of tmp. See setupDirs for details.
ret = FileUtil.fullyDelete(linkDir);
Assert.assertTrue(ret);
- Assert.assertFalse(linkDir.exists());
- Assert.assertEquals(3, del.list().length);
+ Verify.notExists(linkDir);
+ assertDelListLength(3);
validateTmpDir();
}
@@ -310,16 +309,16 @@ public void testFullyDeleteDanglingSymlinks() throws IOException {
// to make y as a dangling link to file tmp/x
boolean ret = FileUtil.fullyDelete(tmp);
Assert.assertTrue(ret);
- Assert.assertFalse(tmp.exists());
+ Verify.notExists(tmp);
// dangling symlink to file
File link = new File(del, LINK);
- Assert.assertEquals(5, del.list().length);
+ assertDelListLength(5);
// Even though 'y' is dangling symlink to file tmp/x, fullyDelete(y)
// should delete 'y' properly.
ret = FileUtil.fullyDelete(link);
Assert.assertTrue(ret);
- Assert.assertEquals(4, del.list().length);
+ assertDelListLength(4);
// dangling symlink to directory
File linkDir = new File(del, "tmpDir");
@@ -327,22 +326,22 @@ public void testFullyDeleteDanglingSymlinks() throws IOException {
// delete tmpDir properly.
ret = FileUtil.fullyDelete(linkDir);
Assert.assertTrue(ret);
- Assert.assertEquals(3, del.list().length);
+ assertDelListLength(3);
}
@Test (timeout = 30000)
public void testFullyDeleteContents() throws IOException {
boolean ret = FileUtil.fullyDeleteContents(del);
Assert.assertTrue(ret);
- Assert.assertTrue(del.exists());
- Assert.assertEquals(0, del.listFiles().length);
+ Verify.exists(del);
+ Assert.assertEquals(0, Objects.requireNonNull(del.listFiles()).length);
validateTmpDir();
}
private void validateTmpDir() {
- Assert.assertTrue(tmp.exists());
- Assert.assertEquals(1, tmp.listFiles().length);
- Assert.assertTrue(new File(tmp, FILE).exists());
+ Verify.exists(tmp);
+ Assert.assertEquals(1, Objects.requireNonNull(tmp.listFiles()).length);
+ Verify.exists(new File(tmp, FILE));
}
/**
@@ -366,15 +365,15 @@ private void validateTmpDir() {
* @throws IOException
*/
private void setupDirsAndNonWritablePermissions() throws IOException {
- new MyFile(del, FILE_1_NAME).createNewFile();
+ Verify.createNewFile(new MyFile(del, FILE_1_NAME));
// "file1" is non-deletable by default, see MyFile.delete().
- xSubDir.mkdirs();
- file2.createNewFile();
+ Verify.mkdirs(xSubDir);
+ Verify.createNewFile(file2);
- xSubSubDir.mkdirs();
- file22.createNewFile();
+ Verify.mkdirs(xSubSubDir);
+ Verify.createNewFile(file22);
revokePermissions(file22);
revokePermissions(xSubSubDir);
@@ -382,8 +381,8 @@ private void setupDirsAndNonWritablePermissions() throws IOException {
revokePermissions(file2);
revokePermissions(xSubDir);
- ySubDir.mkdirs();
- file3.createNewFile();
+ Verify.mkdirs(ySubDir);
+ Verify.createNewFile(file3);
File tmpFile = new File(tmp, FILE);
tmpFile.createNewFile();
@@ -462,8 +461,8 @@ public void testFailFullyDeleteDirSymlinks() throws IOException {
boolean ret = FileUtil.fullyDelete(linkDir);
// fail symlink deletion
Assert.assertFalse(ret);
- Assert.assertTrue(linkDir.exists());
- Assert.assertEquals(5, del.list().length);
+ Verify.exists(linkDir);
+ assertDelListLength(5);
// tmp dir should exist
validateTmpDir();
// simulate disk recovers and turns good
@@ -471,12 +470,94 @@ public void testFailFullyDeleteDirSymlinks() throws IOException {
ret = FileUtil.fullyDelete(linkDir);
// success symlink deletion
Assert.assertTrue(ret);
- Assert.assertFalse(linkDir.exists());
- Assert.assertEquals(4, del.list().length);
+ Verify.notExists(linkDir);
+ assertDelListLength(4);
// tmp dir should exist
validateTmpDir();
}
+ /**
+ * Asserts if the {@link TestFileUtil#del} meets the given expected length.
+ *
+ * @param expectedLength The expected length of the {@link TestFileUtil#del}.
+ */
+ private void assertDelListLength(int expectedLength) {
+ Assertions.assertThat(del.list()).describedAs("del list").isNotNull().hasSize(expectedLength);
+ }
+
+ /**
+ * Helper class to perform {@link File} operation and also verify them.
+ */
+ public static class Verify {
+ /**
+ * Invokes {@link File#createNewFile()} on the given {@link File} instance.
+ *
+ * @param file The file to call {@link File#createNewFile()} on.
+ * @return The result of {@link File#createNewFile()}.
+ * @throws IOException As per {@link File#createNewFile()}.
+ */
+ public static File createNewFile(File file) throws IOException {
+ assertTrue("Unable to create new file " + file, file.createNewFile());
+ return file;
+ }
+
+ /**
+ * Invokes {@link File#mkdir()} on the given {@link File} instance.
+ *
+ * @param file The file to call {@link File#mkdir()} on.
+ * @return The result of {@link File#mkdir()}.
+ */
+ public static File mkdir(File file) {
+ assertTrue("Unable to mkdir for " + file, file.mkdir());
+ return file;
+ }
+
+ /**
+ * Invokes {@link File#mkdirs()} on the given {@link File} instance.
+ *
+ * @param file The file to call {@link File#mkdirs()} on.
+ * @return The result of {@link File#mkdirs()}.
+ */
+ public static File mkdirs(File file) {
+ assertTrue("Unable to mkdirs for " + file, file.mkdirs());
+ return file;
+ }
+
+ /**
+ * Invokes {@link File#delete()} on the given {@link File} instance.
+ *
+ * @param file The file to call {@link File#delete()} on.
+ * @return The result of {@link File#delete()}.
+ */
+ public static File delete(File file) {
+ assertTrue("Unable to delete " + file, file.delete());
+ return file;
+ }
+
+ /**
+ * Invokes {@link File#exists()} on the given {@link File} instance.
+ *
+ * @param file The file to call {@link File#exists()} on.
+ * @return The result of {@link File#exists()}.
+ */
+ public static File exists(File file) {
+ assertTrue("Expected file " + file + " doesn't exist", file.exists());
+ return file;
+ }
+
+ /**
+ * Invokes {@link File#exists()} on the given {@link File} instance to check if the
+ * {@link File} doesn't exists.
+ *
+ * @param file The file to call {@link File#exists()} on.
+ * @return The negation of the result of {@link File#exists()}.
+ */
+ public static File notExists(File file) {
+ assertFalse("Expected file " + file + " must not exist", file.exists());
+ return file;
+ }
+ }
+
/**
* Extend {@link File}. Same as {@link File} except for two things: (1) This
* treats file1Name as a very special file which is not delete-able
@@ -609,14 +690,13 @@ public void testGetDU() throws Exception {
FileUtil.chmod(partitioned.getAbsolutePath(), "0777", true/*recursive*/);
}
}
-
+
@Test (timeout = 30000)
- public void testUnTar() throws IOException {
+ public void testUnTar() throws Exception {
// make a simple tar:
final File simpleTar = new File(del, FILE);
- OutputStream os = new FileOutputStream(simpleTar);
- TarOutputStream tos = new TarOutputStream(os);
- try {
+ OutputStream os = new FileOutputStream(simpleTar);
+ try (TarOutputStream tos = new TarOutputStream(os)) {
TarEntry te = new TarEntry("/bar/foo");
byte[] data = "some-content".getBytes("UTF-8");
te.setSize(data.length);
@@ -625,55 +705,42 @@ public void testUnTar() throws IOException {
tos.closeEntry();
tos.flush();
tos.finish();
- } finally {
- tos.close();
}
// successfully untar it into an existing dir:
FileUtil.unTar(simpleTar, tmp);
// check result:
- assertTrue(new File(tmp, "/bar/foo").exists());
+ Verify.exists(new File(tmp, "/bar/foo"));
assertEquals(12, new File(tmp, "/bar/foo").length());
-
- final File regularFile = new File(tmp, "QuickBrownFoxJumpsOverTheLazyDog");
- regularFile.createNewFile();
- assertTrue(regularFile.exists());
- try {
- FileUtil.unTar(simpleTar, regularFile);
- assertTrue("An IOException expected.", false);
- } catch (IOException ioe) {
- // okay
- }
+
+ final File regularFile =
+ Verify.createNewFile(new File(tmp, "QuickBrownFoxJumpsOverTheLazyDog"));
+ LambdaTestUtils.intercept(IOException.class, () -> FileUtil.unTar(simpleTar, regularFile));
}
@Test (timeout = 30000)
public void testReplaceFile() throws IOException {
- final File srcFile = new File(tmp, "src");
-
// src exists, and target does not exist:
- srcFile.createNewFile();
- assertTrue(srcFile.exists());
+ final File srcFile = Verify.createNewFile(new File(tmp, "src"));
final File targetFile = new File(tmp, "target");
- assertTrue(!targetFile.exists());
+ Verify.notExists(targetFile);
FileUtil.replaceFile(srcFile, targetFile);
- assertTrue(!srcFile.exists());
- assertTrue(targetFile.exists());
+ Verify.notExists(srcFile);
+ Verify.exists(targetFile);
// src exists and target is a regular file:
- srcFile.createNewFile();
- assertTrue(srcFile.exists());
+ Verify.createNewFile(srcFile);
+ Verify.exists(srcFile);
FileUtil.replaceFile(srcFile, targetFile);
- assertTrue(!srcFile.exists());
- assertTrue(targetFile.exists());
+ Verify.notExists(srcFile);
+ Verify.exists(targetFile);
// src exists, and target is a non-empty directory:
- srcFile.createNewFile();
- assertTrue(srcFile.exists());
- targetFile.delete();
- targetFile.mkdirs();
- File obstacle = new File(targetFile, "obstacle");
- obstacle.createNewFile();
- assertTrue(obstacle.exists());
+ Verify.createNewFile(srcFile);
+ Verify.exists(srcFile);
+ Verify.delete(targetFile);
+ Verify.mkdirs(targetFile);
+ File obstacle = Verify.createNewFile(new File(targetFile, "obstacle"));
assertTrue(targetFile.exists() && targetFile.isDirectory());
try {
FileUtil.replaceFile(srcFile, targetFile);
@@ -682,9 +749,9 @@ public void testReplaceFile() throws IOException {
// okay
}
// check up the post-condition: nothing is deleted:
- assertTrue(srcFile.exists());
+ Verify.exists(srcFile);
assertTrue(targetFile.exists() && targetFile.isDirectory());
- assertTrue(obstacle.exists());
+ Verify.exists(obstacle);
}
@Test (timeout = 30000)
@@ -697,13 +764,13 @@ public void testCreateLocalTempFile() throws IOException {
assertTrue(tmp1.exists() && tmp2.exists());
assertTrue(tmp1.canWrite() && tmp2.canWrite());
assertTrue(tmp1.canRead() && tmp2.canRead());
- tmp1.delete();
- tmp2.delete();
+ Verify.delete(tmp1);
+ Verify.delete(tmp2);
assertTrue(!tmp1.exists() && !tmp2.exists());
}
@Test (timeout = 30000)
- public void testUnZip() throws IOException {
+ public void testUnZip() throws Exception {
// make sa simple zip
final File simpleZip = new File(del, FILE);
OutputStream os = new FileOutputStream(simpleZip);
@@ -724,18 +791,12 @@ public void testUnZip() throws IOException {
// successfully unzip it into an existing dir:
FileUtil.unZip(simpleZip, tmp);
// check result:
- assertTrue(new File(tmp, "foo").exists());
+ Verify.exists(new File(tmp, "foo"));
assertEquals(12, new File(tmp, "foo").length());
-
- final File regularFile = new File(tmp, "QuickBrownFoxJumpsOverTheLazyDog");
- regularFile.createNewFile();
- assertTrue(regularFile.exists());
- try {
- FileUtil.unZip(simpleZip, regularFile);
- assertTrue("An IOException expected.", false);
- } catch (IOException ioe) {
- // okay
- }
+
+ final File regularFile =
+ Verify.createNewFile(new File(tmp, "QuickBrownFoxJumpsOverTheLazyDog"));
+ LambdaTestUtils.intercept(IOException.class, () -> FileUtil.unZip(simpleZip, regularFile));
}
@Test (timeout = 30000)
@@ -781,24 +842,24 @@ public void testCopy5() throws IOException {
final File dest = new File(del, "dest");
boolean result = FileUtil.copy(fs, srcPath, dest, false, conf);
assertTrue(result);
- assertTrue(dest.exists());
+ Verify.exists(dest);
assertEquals(content.getBytes().length
+ System.getProperty("line.separator").getBytes().length, dest.length());
- assertTrue(srcFile.exists()); // should not be deleted
+ Verify.exists(srcFile); // should not be deleted
// copy regular file, delete src:
- dest.delete();
- assertTrue(!dest.exists());
+ Verify.delete(dest);
+ Verify.notExists(dest);
result = FileUtil.copy(fs, srcPath, dest, true, conf);
assertTrue(result);
- assertTrue(dest.exists());
+ Verify.exists(dest);
assertEquals(content.getBytes().length
+ System.getProperty("line.separator").getBytes().length, dest.length());
- assertTrue(!srcFile.exists()); // should be deleted
+ Verify.notExists(srcFile); // should be deleted
// copy a dir:
- dest.delete();
- assertTrue(!dest.exists());
+ Verify.delete(dest);
+ Verify.notExists(dest);
srcPath = new Path(partitioned.toURI());
result = FileUtil.copy(fs, srcPath, dest, true, conf);
assertTrue(result);
@@ -810,7 +871,7 @@ public void testCopy5() throws IOException {
assertEquals(3
+ System.getProperty("line.separator").getBytes().length, f.length());
}
- assertTrue(!partitioned.exists()); // should be deleted
+ Verify.notExists(partitioned); // should be deleted
}
@Test (timeout = 30000)
@@ -898,8 +959,8 @@ public void testSymlinkRenameTo() throws Exception {
// create the symlink
FileUtil.symLink(file.getAbsolutePath(), link.getAbsolutePath());
- Assert.assertTrue(file.exists());
- Assert.assertTrue(link.exists());
+ Verify.exists(file);
+ Verify.exists(link);
File link2 = new File(del, "_link2");
@@ -909,10 +970,10 @@ public void testSymlinkRenameTo() throws Exception {
// Make sure the file still exists
// (NOTE: this would fail on Java6 on Windows if we didn't
// copy the file in FileUtil#symlink)
- Assert.assertTrue(file.exists());
+ Verify.exists(file);
- Assert.assertTrue(link2.exists());
- Assert.assertFalse(link.exists());
+ Verify.exists(link2);
+ Verify.notExists(link);
}
/**
@@ -927,13 +988,13 @@ public void testSymlinkDelete() throws Exception {
// create the symlink
FileUtil.symLink(file.getAbsolutePath(), link.getAbsolutePath());
- Assert.assertTrue(file.exists());
- Assert.assertTrue(link.exists());
+ Verify.exists(file);
+ Verify.exists(link);
// make sure that deleting a symlink works properly
- Assert.assertTrue(link.delete());
- Assert.assertFalse(link.exists());
- Assert.assertTrue(file.exists());
+ Verify.delete(link);
+ Verify.notExists(link);
+ Verify.exists(file);
}
/**
@@ -960,13 +1021,13 @@ public void testSymlinkLength() throws Exception {
Assert.assertEquals(data.length, file.length());
Assert.assertEquals(data.length, link.length());
- file.delete();
- Assert.assertFalse(file.exists());
+ Verify.delete(file);
+ Verify.notExists(file);
Assert.assertEquals(0, link.length());
- link.delete();
- Assert.assertFalse(link.exists());
+ Verify.delete(link);
+ Verify.notExists(link);
}
/**
@@ -1032,7 +1093,7 @@ public void testSymlinkFileAlreadyExists() throws IOException {
public void testSymlinkSameFile() throws IOException {
File file = new File(del, FILE);
- file.delete();
+ Verify.delete(file);
// Create a symbolic link
// The operation should succeed
@@ -1105,21 +1166,21 @@ private void doUntarAndVerify(File tarFile, File untarDir)
String parentDir = untarDir.getCanonicalPath() + Path.SEPARATOR + "name";
File testFile = new File(parentDir + Path.SEPARATOR + "version");
- Assert.assertTrue(testFile.exists());
+ Verify.exists(testFile);
Assert.assertTrue(testFile.length() == 0);
String imageDir = parentDir + Path.SEPARATOR + "image";
testFile = new File(imageDir + Path.SEPARATOR + "fsimage");
- Assert.assertTrue(testFile.exists());
+ Verify.exists(testFile);
Assert.assertTrue(testFile.length() == 157);
String currentDir = parentDir + Path.SEPARATOR + "current";
testFile = new File(currentDir + Path.SEPARATOR + "fsimage");
- Assert.assertTrue(testFile.exists());
+ Verify.exists(testFile);
Assert.assertTrue(testFile.length() == 4331);
testFile = new File(currentDir + Path.SEPARATOR + "edits");
- Assert.assertTrue(testFile.exists());
+ Verify.exists(testFile);
Assert.assertTrue(testFile.length() == 1033);
testFile = new File(currentDir + Path.SEPARATOR + "fstime");
- Assert.assertTrue(testFile.exists());
+ Verify.exists(testFile);
Assert.assertTrue(testFile.length() == 8);
}
@@ -1180,9 +1241,9 @@ public void testCreateJarWithClassPath() throws Exception {
}
// create non-jar files, which we expect to not be included in the classpath
- Assert.assertTrue(new File(tmp, "text.txt").createNewFile());
- Assert.assertTrue(new File(tmp, "executable.exe").createNewFile());
- Assert.assertTrue(new File(tmp, "README").createNewFile());
+ Verify.createNewFile(new File(tmp, "text.txt"));
+ Verify.createNewFile(new File(tmp, "executable.exe"));
+ Verify.createNewFile(new File(tmp, "README"));
// create classpath jar
String wildcardPath = tmp.getCanonicalPath() + File.separator + "*";
@@ -1268,9 +1329,9 @@ public void testGetJarsInDirectory() throws Exception {
}
// create non-jar files, which we expect to not be included in the result
- assertTrue(new File(tmp, "text.txt").createNewFile());
- assertTrue(new File(tmp, "executable.exe").createNewFile());
- assertTrue(new File(tmp, "README").createNewFile());
+ Verify.createNewFile(new File(tmp, "text.txt"));
+ Verify.createNewFile(new File(tmp, "executable.exe"));
+ Verify.createNewFile(new File(tmp, "README"));
// pass in the directory
String directory = tmp.getCanonicalPath();
@@ -1304,7 +1365,7 @@ public void setupCompareFs() {
uri4 = new URI(uris4);
uri5 = new URI(uris5);
uri6 = new URI(uris6);
- } catch (URISyntaxException use) {
+ } catch (URISyntaxException ignored) {
}
// Set up InetAddress
inet1 = mock(InetAddress.class);
@@ -1327,7 +1388,7 @@ public void setupCompareFs() {
when(InetAddress.getByName(uris3)).thenReturn(inet3);
when(InetAddress.getByName(uris4)).thenReturn(inet4);
when(InetAddress.getByName(uris5)).thenReturn(inet5);
- } catch (UnknownHostException ue) {
+ } catch (UnknownHostException ignored) {
}
fs1 = mock(FileSystem.class);
@@ -1347,62 +1408,87 @@ public void setupCompareFs() {
@Test
public void testCompareFsNull() throws Exception {
setupCompareFs();
- assertEquals(FileUtil.compareFs(null,fs1),false);
- assertEquals(FileUtil.compareFs(fs1,null),false);
+ assertFalse(FileUtil.compareFs(null, fs1));
+ assertFalse(FileUtil.compareFs(fs1, null));
}
@Test
public void testCompareFsDirectories() throws Exception {
setupCompareFs();
- assertEquals(FileUtil.compareFs(fs1,fs1),true);
- assertEquals(FileUtil.compareFs(fs1,fs2),false);
- assertEquals(FileUtil.compareFs(fs1,fs5),false);
- assertEquals(FileUtil.compareFs(fs3,fs4),true);
- assertEquals(FileUtil.compareFs(fs1,fs6),false);
+ assertTrue(FileUtil.compareFs(fs1, fs1));
+ assertFalse(FileUtil.compareFs(fs1, fs2));
+ assertFalse(FileUtil.compareFs(fs1, fs5));
+ assertTrue(FileUtil.compareFs(fs3, fs4));
+ assertFalse(FileUtil.compareFs(fs1, fs6));
}
@Test(timeout = 8000)
public void testCreateSymbolicLinkUsingJava() throws IOException {
final File simpleTar = new File(del, FILE);
OutputStream os = new FileOutputStream(simpleTar);
- TarArchiveOutputStream tos = new TarArchiveOutputStream(os);
- File untarFile = null;
- try {
+ try (TarArchiveOutputStream tos = new TarArchiveOutputStream(os)) {
// Files to tar
final String tmpDir = "tmp/test";
File tmpDir1 = new File(tmpDir, "dir1/");
File tmpDir2 = new File(tmpDir, "dir2/");
- // Delete the directories if they already exist
- tmpDir1.mkdirs();
- tmpDir2.mkdirs();
+ Verify.mkdirs(tmpDir1);
+ Verify.mkdirs(tmpDir2);
- java.nio.file.Path symLink = FileSystems
- .getDefault().getPath(tmpDir1.getPath() + "/sl");
+ java.nio.file.Path symLink = Paths.get(tmpDir1.getPath(), "sl");
// Create Symbolic Link
- Files.createSymbolicLink(symLink,
- FileSystems.getDefault().getPath(tmpDir2.getPath())).toString();
+ Files.createSymbolicLink(symLink, Paths.get(tmpDir2.getPath()));
assertTrue(Files.isSymbolicLink(symLink.toAbsolutePath()));
- // put entries in tar file
+ // Put entries in tar file
putEntriesInTar(tos, tmpDir1.getParentFile());
tos.close();
- untarFile = new File(tmpDir, "2");
- // Untar using java
+ File untarFile = new File(tmpDir, "2");
+ // Untar using Java
FileUtil.unTarUsingJava(simpleTar, untarFile, false);
// Check symbolic link and other directories are there in untar file
assertTrue(Files.exists(untarFile.toPath()));
- assertTrue(Files.exists(FileSystems.getDefault().getPath(untarFile
- .getPath(), tmpDir)));
- assertTrue(Files.isSymbolicLink(FileSystems.getDefault().getPath(untarFile
- .getPath().toString(), symLink.toString())));
-
+ assertTrue(Files.exists(Paths.get(untarFile.getPath(), tmpDir)));
+ assertTrue(Files.isSymbolicLink(Paths.get(untarFile.getPath(), symLink.toString())));
} finally {
FileUtils.deleteDirectory(new File("tmp"));
- tos.close();
}
+ }
+
+ @Test(expected = IOException.class)
+ public void testCreateArbitrarySymlinkUsingJava() throws IOException {
+ final File simpleTar = new File(del, FILE);
+ OutputStream os = new FileOutputStream(simpleTar);
+ File rootDir = new File("tmp");
+ try (TarArchiveOutputStream tos = new TarArchiveOutputStream(os)) {
+ tos.setLongFileMode(TarArchiveOutputStream.LONGFILE_GNU);
+
+ // Create arbitrary dir
+ File arbitraryDir = new File(rootDir, "arbitrary-dir/");
+ Verify.mkdirs(arbitraryDir);
+
+ // We will tar from the tar-root lineage
+ File tarRoot = new File(rootDir, "tar-root/");
+ File symlinkRoot = new File(tarRoot, "dir1/");
+ Verify.mkdirs(symlinkRoot);
+
+ // Create Symbolic Link to an arbitrary dir
+ java.nio.file.Path symLink = Paths.get(symlinkRoot.getPath(), "sl");
+ Files.createSymbolicLink(symLink, arbitraryDir.toPath().toAbsolutePath());
+
+ // Put entries in tar file
+ putEntriesInTar(tos, tarRoot);
+ putEntriesInTar(tos, new File(symLink.toFile(), "dir-outside-tar-root/"));
+ tos.close();
+
+ // Untar using Java
+ File untarFile = new File(rootDir, "extracted");
+ FileUtil.unTarUsingJava(simpleTar, untarFile, false);
+ } finally {
+ FileUtils.deleteDirectory(rootDir);
+ }
}
private void putEntriesInTar(TarArchiveOutputStream tos, File f)
@@ -1496,7 +1582,7 @@ public void testReadSymlinkWithAFileAsInput() throws IOException {
String result = FileUtil.readLink(file);
Assert.assertEquals("", result);
- file.delete();
+ Verify.delete(file);
}
/**
From 481da19494fe13ca42651305b519e0670cafabf0 Mon Sep 17 00:00:00 2001
From: Szilard Nemeth
Date: Thu, 10 Mar 2022 22:15:35 +0100
Subject: [PATCH 031/145] YARN-10049. FIFOOrderingPolicy Improvements.
Contributed by Benjamin Teke
---
.../scheduler/policy/FifoComparator.java | 5 +
.../policy/TestFifoOrderingPolicy.java | 91 ++++++++++++++-----
2 files changed, 71 insertions(+), 25 deletions(-)
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/FifoComparator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/FifoComparator.java
index 112c50ff1cc52..c62b738df5679 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/FifoComparator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/FifoComparator.java
@@ -29,6 +29,11 @@ public class FifoComparator
@Override
public int compare(SchedulableEntity r1, SchedulableEntity r2) {
int res = r1.compareInputOrderTo(r2);
+
+ if (res == 0) {
+ res = (int) Math.signum(r1.getStartTime() - r2.getStartTime());
+ }
+
return res;
}
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/TestFifoOrderingPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/TestFifoOrderingPolicy.java
index 7ec2c01ec2512..62bc7124c4b9d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/TestFifoOrderingPolicy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/TestFifoOrderingPolicy.java
@@ -18,16 +18,19 @@
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.policy;
-import java.util.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
-import org.junit.Assert;
-import org.junit.Test;
+import java.util.Arrays;
+import java.util.Iterator;
+import java.util.List;
import org.apache.hadoop.yarn.api.records.Priority;
+import org.junit.Assert;
+import org.junit.Test;
-import static org.assertj.core.api.Assertions.assertThat;
-
-public class TestFifoOrderingPolicy {
+public
+class TestFifoOrderingPolicy {
@Test
public void testFifoOrderingPolicy() {
@@ -36,13 +39,17 @@ public void testFifoOrderingPolicy() {
MockSchedulableEntity r1 = new MockSchedulableEntity();
MockSchedulableEntity r2 = new MockSchedulableEntity();
- assertThat(policy.getComparator().compare(r1, r2)).isEqualTo(0);
+ assertEquals("The comparator should return 0 because the entities are created with " +
+ "the same values.", 0,
+ policy.getComparator().compare(r1, r2));
r1.setSerial(1);
- assertThat(policy.getComparator().compare(r1, r2)).isEqualTo(1);
+ assertEquals("The lhs entity has a larger serial, the comparator return " +
+ "value should be 1.", 1, policy.getComparator().compare(r1, r2));
r2.setSerial(2);
- assertThat(policy.getComparator().compare(r1, r2)).isEqualTo(-1);
+ Assert.assertEquals("The rhs entity has a larger serial, the comparator return " +
+ "value should be -1.", -1, policy.getComparator().compare(r1, r2));
}
@Test
@@ -63,46 +70,80 @@ public void testIterators() {
schedOrder.addSchedulableEntity(msp3);
//Assignment, oldest to youngest
- checkSerials(schedOrder.getAssignmentIterator(IteratorSelector.EMPTY_ITERATOR_SELECTOR), new long[]{1, 2, 3});
+ checkSerials(Arrays.asList(1L, 2L, 3L), schedOrder.getAssignmentIterator(
+ IteratorSelector.EMPTY_ITERATOR_SELECTOR));
//Preemption, youngest to oldest
- checkSerials(schedOrder.getPreemptionIterator(), new long[]{3, 2, 1});
+ checkSerials(Arrays.asList(3L, 2L, 1L), schedOrder.getPreemptionIterator());
}
- public void checkSerials(Iterator si,
- long[] serials) {
- for (int i = 0;i < serials.length;i++) {
- Assert.assertEquals(si.next().getSerial(),
- serials[i]);
+ public void checkSerials(List expectedSerials, Iterator
+ actualSerialIterator) {
+ for (long expectedSerial : expectedSerials) {
+ assertEquals(expectedSerial, actualSerialIterator.next().getSerial());
}
}
@Test
- public void testFifoOrderingPolicyAlongWithPriorty() {
+ public void testFifoOrderingPolicyAlongWithPriority() {
FifoOrderingPolicy policy =
new FifoOrderingPolicy();
MockSchedulableEntity r1 = new MockSchedulableEntity();
MockSchedulableEntity r2 = new MockSchedulableEntity();
- Priority p1 = Priority.newInstance(1);
- Priority p2 = Priority.newInstance(0);
+ assertEquals("Both r1 and r2 priority is null, the comparator should return 0.", 0,
+ policy.getComparator().compare(r1, r2));
- // Both r1 and r1 priority is null
- Assert.assertEquals(0, policy.getComparator().compare(r1, r2));
+ Priority p2 = Priority.newInstance(0);
// r1 is null and r2 is not null
r2.setApplicationPriority(p2);
- Assert.assertEquals(-1, policy.getComparator().compare(r1, r2));
+ Assert.assertTrue("The priority of r1 is null, the priority of r2 is not null, " +
+ "the comparator should return a negative value.",
+ policy.getComparator().compare(r1, r2) < 0);
+
+ Priority p1 = Priority.newInstance(1);
// r1 is not null and r2 is null
- r2.setApplicationPriority(null);
r1.setApplicationPriority(p1);
- Assert.assertEquals(1, policy.getComparator().compare(r1, r2));
+ r2.setApplicationPriority(null);
+ assertTrue("The priority of r1 is not null, the priority of r2 is null," +
+ "the comparator should return a positive value.",
+ policy.getComparator().compare(r1, r2) > 0);
// r1 is not null and r2 is not null
r1.setApplicationPriority(p1);
r2.setApplicationPriority(p2);
- Assert.assertEquals(-1, policy.getComparator().compare(r1, r2));
+ Assert.assertTrue("Both priorities are not null, the r1 has higher priority, " +
+ "the result should be a negative value.",
+ policy.getComparator().compare(r1, r2) < 0);
}
+ @Test
+ public void testOrderingUsingAppSubmitTime() {
+ FifoOrderingPolicy policy =
+ new FifoOrderingPolicy();
+ MockSchedulableEntity r1 = new MockSchedulableEntity();
+ MockSchedulableEntity r2 = new MockSchedulableEntity();
+
+ // R1, R2 has been started at same time
+ assertEquals(r1.getStartTime(), r2.getStartTime());
+
+ // No changes, equal
+ assertEquals("The submit times are the same, the comparator should return 0.", 0,
+ policy.getComparator().compare(r1, r2));
+
+ // R2 has been started after R1
+ r1.setStartTime(5);
+ r2.setStartTime(10);
+ Assert.assertTrue("r2 was started after r1, " +
+ "the comparator should return a negative value.",
+ policy.getComparator().compare(r1, r2) < 0);
+
+ // R1 has been started after R2
+ r1.setStartTime(10);
+ r2.setStartTime(5);
+ Assert.assertTrue("r2 was started before r1, the comparator should return a positive value.",
+ policy.getComparator().compare(r1, r2) > 0);
+ }
}
From ed65aa23240b3dd6b56e86e5f0e9d38069fb3b01 Mon Sep 17 00:00:00 2001
From: Szilard Nemeth
Date: Thu, 10 Mar 2022 22:22:58 +0100
Subject: [PATCH 032/145] YARN-11067. Resource overcommitment due to incorrect
resource normalisation logical order. Contributed by Andras Gyori
---
.../scheduler/capacity/ParentQueue.java | 37 +++++++--------
.../TestAbsoluteResourceConfiguration.java | 47 +++++++++++++++++++
2 files changed, 64 insertions(+), 20 deletions(-)
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
index c624aab71cc90..87ebc0b40f3b1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
@@ -1294,17 +1294,24 @@ public boolean hasChildQueues() {
private void calculateEffectiveResourcesAndCapacity(String label,
Resource clusterResource) {
+ // Update effective resources for my self;
+ if (rootQueue) {
+ Resource resourceByLabel = labelManager.getResourceByLabel(label, clusterResource);
+ usageTracker.getQueueResourceQuotas().setEffectiveMinResource(label, resourceByLabel);
+ usageTracker.getQueueResourceQuotas().setEffectiveMaxResource(label, resourceByLabel);
+ } else {
+ super.updateEffectiveResources(clusterResource);
+ }
+
+ recalculateEffectiveMinRatio(label, clusterResource);
+ }
+
+ private void recalculateEffectiveMinRatio(String label, Resource clusterResource) {
// For root queue, ensure that max/min resource is updated to latest
// cluster resource.
- Resource resourceByLabel = labelManager.getResourceByLabel(label,
- clusterResource);
-
- /*
- * == Below logic are added to calculate effectiveMinRatioPerResource ==
- */
+ Resource resourceByLabel = labelManager.getResourceByLabel(label, clusterResource);
- // Total configured min resources of direct children of this given parent
- // queue
+ // Total configured min resources of direct children of this given parent queue
Resource configuredMinResources = Resource.newInstance(0L, 0);
for (CSQueue childQueue : getChildQueues()) {
Resources.addTo(configuredMinResources,
@@ -1312,8 +1319,7 @@ private void calculateEffectiveResourcesAndCapacity(String label,
}
// Factor to scale down effective resource: When cluster has sufficient
- // resources, effective_min_resources will be same as configured
- // min_resources.
+ // resources, effective_min_resources will be same as configured min_resources.
Resource numeratorForMinRatio = null;
if (getQueuePath().equals("root")) {
if (!resourceByLabel.equals(Resources.none()) && Resources.lessThan(resourceCalculator,
@@ -1324,21 +1330,12 @@ private void calculateEffectiveResourcesAndCapacity(String label,
if (Resources.lessThan(resourceCalculator, clusterResource,
usageTracker.getQueueResourceQuotas().getEffectiveMinResource(label),
configuredMinResources)) {
- numeratorForMinRatio = usageTracker.getQueueResourceQuotas()
- .getEffectiveMinResource(label);
+ numeratorForMinRatio = usageTracker.getQueueResourceQuotas().getEffectiveMinResource(label);
}
}
effectiveMinResourceRatio.put(label, getEffectiveMinRatio(
configuredMinResources, numeratorForMinRatio));
-
- // Update effective resources for my self;
- if (rootQueue) {
- usageTracker.getQueueResourceQuotas().setEffectiveMinResource(label, resourceByLabel);
- usageTracker.getQueueResourceQuotas().setEffectiveMaxResource(label, resourceByLabel);
- } else{
- super.updateEffectiveResources(clusterResource);
- }
}
private Map getEffectiveMinRatio(
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestAbsoluteResourceConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestAbsoluteResourceConfiguration.java
index d7c80b5dda117..8d68cbf7932eb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestAbsoluteResourceConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestAbsoluteResourceConfiguration.java
@@ -30,6 +30,7 @@
import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeRemovedSchedulerEvent;
import org.apache.hadoop.yarn.util.resource.Resources;
import org.junit.Assert;
import org.junit.Test;
@@ -100,6 +101,21 @@ public class TestAbsoluteResourceConfiguration {
private static Set resourceTypes = new HashSet<>(
Arrays.asList("memory", "vcores"));
+ private CapacitySchedulerConfiguration setupNormalizationConfiguration() {
+ CapacitySchedulerConfiguration csConf = new CapacitySchedulerConfiguration();
+ csConf.setQueues(CapacitySchedulerConfiguration.ROOT,
+ new String[]{QUEUEA, QUEUEB});
+ csConf.setQueues(QUEUEA_FULL.getFullPath(), new String[]{QUEUEA1, QUEUEA2});
+
+// 60, 28
+ csConf.setMinimumResourceRequirement("", QUEUEA_FULL, Resource.newInstance(50 * GB, 20));
+ csConf.setMinimumResourceRequirement("", QUEUEA1_FULL, Resource.newInstance(30 * GB, 15));
+ csConf.setMinimumResourceRequirement("", QUEUEA2_FULL, Resource.newInstance(20 * GB, 5));
+ csConf.setMinimumResourceRequirement("", QUEUEB_FULL, Resource.newInstance(10 * GB, 8));
+
+ return csConf;
+ }
+
private CapacitySchedulerConfiguration setupSimpleQueueConfiguration(
boolean isCapacityNeeded) {
CapacitySchedulerConfiguration csConf = new CapacitySchedulerConfiguration();
@@ -292,6 +308,37 @@ public void testSimpleMinMaxResourceConfigurartionPerQueue()
rm.close();
}
+ @Test
+ public void testNormalizationAfterNodeRemoval() throws Exception {
+ CapacitySchedulerConfiguration csConf = setupNormalizationConfiguration();
+ csConf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
+ ResourceScheduler.class);
+
+ MockRM rm = new MockRM(csConf);
+
+ rm.start();
+ rm.registerNode("h1:1234", 8 * GB, 4);
+ rm.registerNode("h2:1234", 8 * GB, 4);
+ rm.registerNode("h3:1234", 8 * GB, 4);
+ MockNM nm = rm.registerNode("h4:1234", 8 * GB, 4);
+ rm.registerNode("h5:1234", 28 * GB, 12);
+
+ // Send a removal event to CS. MockRM#unregisterNode does not reflect the real world scenario,
+ // therefore we manually need to invoke this removal event.
+ CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler();
+ cs.handle(new NodeRemovedSchedulerEvent(rm.getRMContext().getRMNodes().get(nm.getNodeId())));
+
+ Resource res = Resources.add(
+ cs.getQueue(QUEUEA1_FULL.getFullPath()).getEffectiveCapacity(""),
+ cs.getQueue(QUEUEA2_FULL.getFullPath()).getEffectiveCapacity(""));
+ Resource resParent = cs.getQueue(QUEUEA_FULL.getFullPath()).getEffectiveCapacity("");
+
+ // Check if there is no overcommitment on behalf of the child queues
+ Assert.assertTrue(String.format("Summarized resource %s of all children is greater than " +
+ "their parent's %s", res, resParent),
+ Resources.lessThan(cs.getResourceCalculator(), cs.getClusterResource(), res, resParent));
+ }
+
@Test
public void testEffectiveMinMaxResourceConfigurartionPerQueue()
throws Exception {
From 672e380c4f6ffcb0a6fee6d8263166e16b4323c2 Mon Sep 17 00:00:00 2001
From: Mukund Thakur
Date: Fri, 11 Mar 2022 13:05:45 +0530
Subject: [PATCH 033/145] HADOOP-18112: Implement paging during multi object
delete. (#4045)
Multi object delete of size more than 1000 is not supported by S3 and
fails with MalformedXML error. So implementing paging of requests to
reduce the number of keys in a single request. Page size can be configured
using "fs.s3a.bulk.delete.page.size"
Contributed By: Mukund Thakur
---
.../java/org/apache/hadoop/util/Lists.java | 24 ++++
.../org/apache/hadoop/util/TestLists.java | 44 +++++++
.../apache/hadoop/fs/s3a/S3AFileSystem.java | 78 +++++--------
.../hadoop/fs/s3a/api/RequestFactory.java | 5 +-
.../hadoop/fs/s3a/impl/DeleteOperation.java | 45 ++------
.../fs/s3a/impl/OperationCallbacks.java | 12 +-
.../hadoop/fs/s3a/impl/RenameOperation.java | 8 +-
.../fs/s3a/impl/RequestFactoryImpl.java | 5 +-
.../hadoop/fs/s3a/tools/MarkerTool.java | 2 +-
.../fs/s3a/tools/MarkerToolOperations.java | 9 +-
.../s3a/tools/MarkerToolOperationsImpl.java | 10 +-
.../fs/s3a/ITestS3AFailureHandling.java | 38 ++++++-
.../apache/hadoop/fs/s3a/S3ATestUtils.java | 107 +++++++++++++++++-
.../s3a/impl/ITestPartialRenamesDeletes.java | 105 -----------------
.../fs/s3a/impl/TestRequestFactory.java | 2 +-
.../fs/s3a/scale/ITestS3ADeleteManyFiles.java | 2 +-
.../s3a/test/MinimalOperationCallbacks.java | 9 +-
17 files changed, 273 insertions(+), 232 deletions(-)
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Lists.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Lists.java
index b6d74ee679281..5d9cc0502afaa 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Lists.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Lists.java
@@ -232,4 +232,28 @@ private static boolean addAll(Collection addTo,
return addAll(addTo, elementsToAdd.iterator());
}
+ /**
+ * Returns consecutive sub-lists of a list, each of the same size
+ * (the final list may be smaller).
+ * @param originalList original big list.
+ * @param pageSize desired size of each sublist ( last one
+ * may be smaller)
+ * @return a list of sub lists.
+ */
+ public static List> partition(List originalList, int pageSize) {
+
+ Preconditions.checkArgument(originalList != null && originalList.size() > 0,
+ "Invalid original list");
+ Preconditions.checkArgument(pageSize > 0, "Page size should " +
+ "be greater than 0 for performing partition");
+
+ List> result = new ArrayList<>();
+ int i=0;
+ while (i < originalList.size()) {
+ result.add(originalList.subList(i,
+ Math.min(i + pageSize, originalList.size())));
+ i = i + pageSize;
+ }
+ return result;
+ }
}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestLists.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestLists.java
index 537e3781edc0e..53241da695c63 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestLists.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestLists.java
@@ -18,9 +18,11 @@
package org.apache.hadoop.util;
+import org.assertj.core.api.Assertions;
import org.junit.Assert;
import org.junit.Test;
+import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
@@ -79,6 +81,48 @@ public void testItrLinkedLists() {
Assert.assertEquals(4, list.size());
}
+ @Test
+ public void testListsPartition() {
+ List list = new ArrayList<>();
+ list.add("a");
+ list.add("b");
+ list.add("c");
+ list.add("d");
+ list.add("e");
+ List> res = Lists.
+ partition(list, 2);
+ Assertions.assertThat(res)
+ .describedAs("Number of partitions post partition")
+ .hasSize(3);
+ Assertions.assertThat(res.get(0))
+ .describedAs("Number of elements in first partition")
+ .hasSize(2);
+ Assertions.assertThat(res.get(2))
+ .describedAs("Number of elements in last partition")
+ .hasSize(1);
+
+ List> res2 = Lists.
+ partition(list, 1);
+ Assertions.assertThat(res2)
+ .describedAs("Number of partitions post partition")
+ .hasSize(5);
+ Assertions.assertThat(res2.get(0))
+ .describedAs("Number of elements in first partition")
+ .hasSize(1);
+ Assertions.assertThat(res2.get(4))
+ .describedAs("Number of elements in last partition")
+ .hasSize(1);
+
+ List> res3 = Lists.
+ partition(list, 6);
+ Assertions.assertThat(res3)
+ .describedAs("Number of partitions post partition")
+ .hasSize(1);
+ Assertions.assertThat(res3.get(0))
+ .describedAs("Number of elements in first partition")
+ .hasSize(5);
+ }
+
@Test
public void testArrayListWithSize() {
List list = Lists.newArrayListWithCapacity(3);
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
index c8a73d956d844..86da70ecdd316 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
@@ -135,6 +135,7 @@
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.util.DurationInfo;
import org.apache.hadoop.util.LambdaUtils;
+import org.apache.hadoop.util.Lists;
import org.apache.hadoop.util.Preconditions;
import org.apache.hadoop.fs.FileAlreadyExistsException;
import org.apache.hadoop.fs.FileStatus;
@@ -225,6 +226,7 @@
import static org.apache.hadoop.fs.statistics.impl.IOStatisticsBinding.trackDurationOfOperation;
import static org.apache.hadoop.fs.statistics.impl.IOStatisticsBinding.trackDurationOfSupplier;
import static org.apache.hadoop.io.IOUtils.cleanupWithLogger;
+import static org.apache.hadoop.util.Preconditions.checkArgument;
import static org.apache.hadoop.util.functional.RemoteIterators.typeCastingRemoteIterator;
/**
@@ -550,6 +552,8 @@ public void initialize(URI name, Configuration originalConf)
pageSize = intOption(getConf(), BULK_DELETE_PAGE_SIZE,
BULK_DELETE_PAGE_SIZE_DEFAULT, 0);
+ checkArgument(pageSize <= InternalConstants.MAX_ENTRIES_TO_DELETE,
+ "page size out of range: %s", pageSize);
listing = new Listing(listingOperationCallbacks, createStoreContext());
} catch (AmazonClientException e) {
// amazon client exception: stop all services then throw the translation
@@ -2026,14 +2030,12 @@ public CopyResult copyFile(final String srcKey,
}
@Override
- public DeleteObjectsResult removeKeys(
- final List keysToDelete,
- final boolean deleteFakeDir,
- final boolean quiet)
+ public void removeKeys(
+ final List keysToDelete,
+ final boolean deleteFakeDir)
throws MultiObjectDeleteException, AmazonClientException, IOException {
auditSpan.activate();
- return S3AFileSystem.this.removeKeys(keysToDelete, deleteFakeDir,
- quiet);
+ S3AFileSystem.this.removeKeys(keysToDelete, deleteFakeDir);
}
@Override
@@ -2818,10 +2820,6 @@ public void incrementPutProgressStatistics(String key, long bytes) {
* @param keysToDelete collection of keys to delete on the s3-backend.
* if empty, no request is made of the object store.
* @param deleteFakeDir indicates whether this is for deleting fake dirs
- * @param quiet should a bulk query be quiet, or should its result list
- * all deleted keys?
- * @return the deletion result if a multi object delete was invoked
- * and it returned without a failure.
* @throws InvalidRequestException if the request was rejected due to
* a mistaken attempt to delete the root directory.
* @throws MultiObjectDeleteException one or more of the keys could not
@@ -2831,10 +2829,9 @@ public void incrementPutProgressStatistics(String key, long bytes) {
* @throws AmazonClientException other amazon-layer failure.
*/
@Retries.RetryRaw
- private DeleteObjectsResult removeKeysS3(
- List keysToDelete,
- boolean deleteFakeDir,
- boolean quiet)
+ private void removeKeysS3(
+ List keysToDelete,
+ boolean deleteFakeDir)
throws MultiObjectDeleteException, AmazonClientException,
IOException {
if (LOG.isDebugEnabled()) {
@@ -2847,16 +2844,28 @@ private DeleteObjectsResult removeKeysS3(
}
if (keysToDelete.isEmpty()) {
// exit fast if there are no keys to delete
- return null;
+ return;
}
for (DeleteObjectsRequest.KeyVersion keyVersion : keysToDelete) {
blockRootDelete(keyVersion.getKey());
}
- DeleteObjectsResult result = null;
try {
if (enableMultiObjectsDelete) {
- result = deleteObjects(
- getRequestFactory().newBulkDeleteRequest(keysToDelete, quiet));
+ if (keysToDelete.size() <= pageSize) {
+ deleteObjects(getRequestFactory()
+ .newBulkDeleteRequest(keysToDelete));
+ } else {
+ // Multi object deletion of more than 1000 keys is not supported
+ // by s3. So we are paging the keys by page size.
+ LOG.debug("Partitioning the keys to delete as it is more than " +
+ "page size. Number of keys: {}, Page size: {}",
+ keysToDelete.size(), pageSize);
+ for (List batchOfKeysToDelete :
+ Lists.partition(keysToDelete, pageSize)) {
+ deleteObjects(getRequestFactory()
+ .newBulkDeleteRequest(batchOfKeysToDelete));
+ }
+ }
} else {
for (DeleteObjectsRequest.KeyVersion keyVersion : keysToDelete) {
deleteObject(keyVersion.getKey());
@@ -2872,7 +2881,6 @@ private DeleteObjectsResult removeKeysS3(
throw ex;
}
noteDeleted(keysToDelete.size(), deleteFakeDir);
- return result;
}
/**
@@ -2889,7 +2897,7 @@ private void noteDeleted(final int count, final boolean deleteFakeDir) {
}
/**
- * Invoke {@link #removeKeysS3(List, boolean, boolean)}.
+ * Invoke {@link #removeKeysS3(List, boolean)}.
* If a {@code MultiObjectDeleteException} is raised, the
* relevant statistics are updated.
*
@@ -2910,35 +2918,9 @@ public void removeKeys(
final boolean deleteFakeDir)
throws MultiObjectDeleteException, AmazonClientException,
IOException {
- removeKeys(keysToDelete, deleteFakeDir,
- true);
- }
-
- /**
- * Invoke {@link #removeKeysS3(List, boolean, boolean)}.
- * @param keysToDelete collection of keys to delete on the s3-backend.
- * if empty, no request is made of the object store.
- * @param deleteFakeDir indicates whether this is for deleting fake dirs.
- * @param quiet should a bulk query be quiet, or should its result list
- * all deleted keys
- * @return the deletion result if a multi object delete was invoked
- * and it returned without a failure, else null.
- * @throws InvalidRequestException if the request was rejected due to
- * a mistaken attempt to delete the root directory.
- * @throws MultiObjectDeleteException one or more of the keys could not
- * be deleted in a multiple object delete operation.
- * @throws AmazonClientException amazon-layer failure.
- * @throws IOException other IO Exception.
- */
- @Retries.RetryRaw
- private DeleteObjectsResult removeKeys(
- final List keysToDelete,
- final boolean deleteFakeDir,
- final boolean quiet)
- throws MultiObjectDeleteException, AmazonClientException, IOException {
try (DurationInfo ignored = new DurationInfo(LOG, false,
- "Deleting %d keys", keysToDelete.size())) {
- return removeKeysS3(keysToDelete, deleteFakeDir, quiet);
+ "Deleting %d keys", keysToDelete.size())) {
+ removeKeysS3(keysToDelete, deleteFakeDir);
}
}
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/api/RequestFactory.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/api/RequestFactory.java
index ee5728688b74e..97a15d95132f4 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/api/RequestFactory.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/api/RequestFactory.java
@@ -291,12 +291,9 @@ ListObjectsV2Request newListObjectsV2Request(String key,
/**
* Bulk delete request.
* @param keysToDelete list of keys to delete.
- * @param quiet should a bulk query be quiet, or should its result list
- * all deleted keys?
* @return the request
*/
DeleteObjectsRequest newBulkDeleteRequest(
- List keysToDelete,
- boolean quiet);
+ List keysToDelete);
}
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/DeleteOperation.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/DeleteOperation.java
index 3d2ab22b44dc4..a45bfe46f169f 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/DeleteOperation.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/DeleteOperation.java
@@ -25,7 +25,6 @@
import java.util.stream.Collectors;
import com.amazonaws.services.s3.model.DeleteObjectsRequest;
-import com.amazonaws.services.s3.model.DeleteObjectsResult;
import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListeningExecutorService;
import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.MoreExecutors;
import org.slf4j.Logger;
@@ -365,8 +364,7 @@ private CompletableFuture submitDelete(
callableWithinAuditSpan(
getAuditSpan(), () -> {
asyncDeleteAction(
- keyList,
- LOG.isDebugEnabled());
+ keyList);
return null;
}));
}
@@ -376,20 +374,16 @@ private CompletableFuture submitDelete(
* the keys from S3 and paths from S3Guard.
*
* @param keyList keys to delete.
- * @param auditDeletedKeys should the results be audited and undeleted
* entries logged?
* @throws IOException failure
*/
@Retries.RetryTranslated
private void asyncDeleteAction(
- final List keyList,
- final boolean auditDeletedKeys)
+ final List keyList)
throws IOException {
- List deletedObjects = new ArrayList<>();
try (DurationInfo ignored =
new DurationInfo(LOG, false,
"Delete page of %d keys", keyList.size())) {
- DeleteObjectsResult result;
if (!keyList.isEmpty()) {
// first delete the files.
List files = keyList.stream()
@@ -397,15 +391,12 @@ private void asyncDeleteAction(
.map(e -> e.keyVersion)
.collect(Collectors.toList());
LOG.debug("Deleting of {} file objects", files.size());
- result = Invoker.once("Remove S3 Files",
+ Invoker.once("Remove S3 Files",
status.getPath().toString(),
() -> callbacks.removeKeys(
files,
- false,
- !auditDeletedKeys));
- if (result != null) {
- deletedObjects.addAll(result.getDeletedObjects());
- }
+ false
+ ));
// now the dirs
List dirs = keyList.stream()
.filter(e -> e.isDirMarker)
@@ -413,32 +404,12 @@ private void asyncDeleteAction(
.collect(Collectors.toList());
LOG.debug("Deleting of {} directory markers", dirs.size());
// This is invoked with deleteFakeDir.
- result = Invoker.once("Remove S3 Dir Markers",
+ Invoker.once("Remove S3 Dir Markers",
status.getPath().toString(),
() -> callbacks.removeKeys(
dirs,
- true,
- !auditDeletedKeys));
- if (result != null) {
- deletedObjects.addAll(result.getDeletedObjects());
- }
- }
- if (auditDeletedKeys) {
- // audit the deleted keys
- if (deletedObjects.size() != keyList.size()) {
- // size mismatch
- LOG.warn("Size mismatch in deletion operation. "
- + "Expected count of deleted files: {}; "
- + "actual: {}",
- keyList.size(), deletedObjects.size());
- // strip out the deleted keys
- for (DeleteObjectsResult.DeletedObject del : deletedObjects) {
- keyList.removeIf(kv -> kv.getKey().equals(del.getKey()));
- }
- for (DeleteEntry kv : keyList) {
- LOG.debug("{}", kv.getKey());
- }
- }
+ true
+ ));
}
}
}
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/OperationCallbacks.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/OperationCallbacks.java
index a72dc7e10b33e..ecfe2c0ba0a24 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/OperationCallbacks.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/OperationCallbacks.java
@@ -24,7 +24,6 @@
import com.amazonaws.AmazonClientException;
import com.amazonaws.services.s3.model.DeleteObjectsRequest;
-import com.amazonaws.services.s3.model.DeleteObjectsResult;
import com.amazonaws.services.s3.model.MultiObjectDeleteException;
import com.amazonaws.services.s3.transfer.model.CopyResult;
@@ -138,10 +137,6 @@ CopyResult copyFile(String srcKey,
* @param keysToDelete collection of keys to delete on the s3-backend.
* if empty, no request is made of the object store.
* @param deleteFakeDir indicates whether this is for deleting fake dirs.
- * @param quiet should a bulk query be quiet, or should its result list
- * all deleted keys
- * @return the deletion result if a multi object delete was invoked
- * and it returned without a failure, else null.
* @throws InvalidRequestException if the request was rejected due to
* a mistaken attempt to delete the root directory.
* @throws MultiObjectDeleteException one or more of the keys could not
@@ -150,10 +145,9 @@ CopyResult copyFile(String srcKey,
* @throws IOException other IO Exception.
*/
@Retries.RetryRaw
- DeleteObjectsResult removeKeys(
- List keysToDelete,
- boolean deleteFakeDir,
- boolean quiet)
+ void removeKeys(
+ List keysToDelete,
+ boolean deleteFakeDir)
throws MultiObjectDeleteException, AmazonClientException,
IOException;
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/RenameOperation.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/RenameOperation.java
index c1700ef389cc3..bc9ad669b56f5 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/RenameOperation.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/RenameOperation.java
@@ -49,6 +49,7 @@
import static org.apache.hadoop.fs.s3a.impl.CallableSupplier.submit;
import static org.apache.hadoop.fs.s3a.impl.CallableSupplier.waitForCompletion;
import static org.apache.hadoop.fs.s3a.impl.InternalConstants.RENAME_PARALLEL_LIMIT;
+import static org.apache.hadoop.util.Preconditions.checkArgument;
/**
* A parallelized rename operation.
@@ -155,6 +156,9 @@ public RenameOperation(
this.destKey = destKey;
this.destStatus = destStatus;
this.callbacks = callbacks;
+ checkArgument(pageSize > 0
+ && pageSize <= InternalConstants.MAX_ENTRIES_TO_DELETE,
+ "page size out of range: %s", pageSize);
this.pageSize = pageSize;
}
@@ -586,8 +590,8 @@ private void removeSourceObjects(
sourcePath.toString(), () ->
callbacks.removeKeys(
keys,
- false,
- true));
+ false
+ ));
}
/**
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/RequestFactoryImpl.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/RequestFactoryImpl.java
index 04cff49be3d80..fa58323decd03 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/RequestFactoryImpl.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/RequestFactoryImpl.java
@@ -575,12 +575,11 @@ public DeleteObjectRequest newDeleteObjectRequest(String key) {
@Override
public DeleteObjectsRequest newBulkDeleteRequest(
- List keysToDelete,
- boolean quiet) {
+ List keysToDelete) {
return prepareRequest(
new DeleteObjectsRequest(bucket)
.withKeys(keysToDelete)
- .withQuiet(quiet));
+ .withQuiet(true));
}
@Override
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/tools/MarkerTool.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/tools/MarkerTool.java
index bd09ca652a69d..230f07793d9ea 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/tools/MarkerTool.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/tools/MarkerTool.java
@@ -817,7 +817,7 @@ pages, suffix(pages),
end);
once("Remove S3 Keys",
tracker.getBasePath().toString(), () ->
- operations.removeKeys(page, true, false));
+ operations.removeKeys(page, true));
summary.deleteRequests++;
// and move to the start of the next page
start = end;
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/tools/MarkerToolOperations.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/tools/MarkerToolOperations.java
index 7d7627dfc03b4..a701f86f7b0c3 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/tools/MarkerToolOperations.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/tools/MarkerToolOperations.java
@@ -23,7 +23,6 @@
import com.amazonaws.AmazonClientException;
import com.amazonaws.services.s3.model.DeleteObjectsRequest;
-import com.amazonaws.services.s3.model.DeleteObjectsResult;
import com.amazonaws.services.s3.model.MultiObjectDeleteException;
import org.apache.hadoop.fs.InvalidRequestException;
@@ -58,10 +57,7 @@ RemoteIterator listObjects(
* @param keysToDelete collection of keys to delete on the s3-backend.
* if empty, no request is made of the object store.
* @param deleteFakeDir indicates whether this is for deleting fake dirs.
- * @param quiet should a bulk query be quiet, or should its result list
* all deleted keys
- * @return the deletion result if a multi object delete was invoked
- * and it returned without a failure, else null.
* @throws InvalidRequestException if the request was rejected due to
* a mistaken attempt to delete the root directory.
* @throws MultiObjectDeleteException one or more of the keys could not
@@ -70,10 +66,9 @@ RemoteIterator listObjects(
* @throws IOException other IO Exception.
*/
@Retries.RetryMixed
- DeleteObjectsResult removeKeys(
+ void removeKeys(
List keysToDelete,
- boolean deleteFakeDir,
- boolean quiet)
+ boolean deleteFakeDir)
throws MultiObjectDeleteException, AmazonClientException,
IOException;
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/tools/MarkerToolOperationsImpl.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/tools/MarkerToolOperationsImpl.java
index 7ccbc41bbea45..ccf80e1dde00e 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/tools/MarkerToolOperationsImpl.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/tools/MarkerToolOperationsImpl.java
@@ -23,7 +23,6 @@
import com.amazonaws.AmazonClientException;
import com.amazonaws.services.s3.model.DeleteObjectsRequest;
-import com.amazonaws.services.s3.model.DeleteObjectsResult;
import com.amazonaws.services.s3.model.MultiObjectDeleteException;
import org.apache.hadoop.fs.Path;
@@ -55,13 +54,12 @@ public RemoteIterator listObjects(final Path path,
}
@Override
- public DeleteObjectsResult removeKeys(
+ public void removeKeys(
final List keysToDelete,
- final boolean deleteFakeDir,
- final boolean quiet)
+ final boolean deleteFakeDir)
throws MultiObjectDeleteException, AmazonClientException, IOException {
- return operationCallbacks.removeKeys(keysToDelete, deleteFakeDir,
- quiet);
+ operationCallbacks.removeKeys(keysToDelete, deleteFakeDir
+ );
}
}
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AFailureHandling.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AFailureHandling.java
index c34ba22b10513..c0f6a4b23226b 100644
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AFailureHandling.java
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AFailureHandling.java
@@ -20,10 +20,13 @@
import com.amazonaws.services.s3.model.DeleteObjectsRequest;
+import org.assertj.core.api.Assertions;
import org.junit.Assume;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.LocatedFileStatus;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.fs.statistics.StoreStatisticNames;
import org.apache.hadoop.fs.store.audit.AuditSpan;
@@ -37,9 +40,11 @@
import java.nio.file.AccessDeniedException;
import static org.apache.hadoop.fs.contract.ContractTestUtils.*;
+import static org.apache.hadoop.fs.s3a.S3ATestUtils.createFiles;
import static org.apache.hadoop.fs.s3a.test.ExtraAssertions.failIf;
-import static org.apache.hadoop.fs.s3a.impl.MultiObjectDeleteSupport.*;
import static org.apache.hadoop.test.LambdaTestUtils.*;
+import static org.apache.hadoop.util.functional.RemoteIterators.mappingRemoteIterator;
+import static org.apache.hadoop.util.functional.RemoteIterators.toList;
/**
* ITest for failure handling, primarily multipart deletion.
@@ -72,6 +77,37 @@ public void testMultiObjectDeleteNoFile() throws Throwable {
removeKeys(getFileSystem(), "ITestS3AFailureHandling/missingFile");
}
+ /**
+ * See HADOOP-18112.
+ */
+ @Test
+ public void testMultiObjectDeleteLargeNumKeys() throws Exception {
+ S3AFileSystem fs = getFileSystem();
+ Path path = path("largeDir");
+ mkdirs(path);
+ createFiles(fs, path, 1, 1005, 0);
+ RemoteIterator locatedFileStatusRemoteIterator =
+ fs.listFiles(path, false);
+ List keys = toList(mappingRemoteIterator(locatedFileStatusRemoteIterator,
+ locatedFileStatus -> fs.pathToKey(locatedFileStatus.getPath())));
+ // After implementation of paging during multi object deletion,
+ // no exception is encountered.
+ Long bulkDeleteReqBefore = getNumberOfBulkDeleteRequestsMadeTillNow(fs);
+ try (AuditSpan span = span()) {
+ fs.removeKeys(buildDeleteRequest(keys.toArray(new String[0])), false);
+ }
+ Long bulkDeleteReqAfter = getNumberOfBulkDeleteRequestsMadeTillNow(fs);
+ // number of delete requests is 5 as we have default page size of 250.
+ Assertions.assertThat(bulkDeleteReqAfter - bulkDeleteReqBefore)
+ .describedAs("Number of batched bulk delete requests")
+ .isEqualTo(5);
+ }
+
+ private Long getNumberOfBulkDeleteRequestsMadeTillNow(S3AFileSystem fs) {
+ return fs.getIOStatistics().counters()
+ .get(StoreStatisticNames.OBJECT_BULK_DELETE_REQUEST);
+ }
+
private void removeKeys(S3AFileSystem fileSystem, String... keys)
throws IOException {
try (AuditSpan span = span()) {
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java
index 55ddba9bbd1e0..d965e6e57a28f 100644
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java
@@ -52,7 +52,11 @@
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.service.Service;
import org.apache.hadoop.service.ServiceOperations;
+import org.apache.hadoop.thirdparty.com.google.common.base.Charsets;
+import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListeningExecutorService;
+import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.MoreExecutors;
import org.apache.hadoop.util.BlockingThreadPoolExecutorService;
+import org.apache.hadoop.util.DurationInfo;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.functional.CallableRaisingIOE;
@@ -70,6 +74,7 @@
import java.net.URISyntaxException;
import java.text.DateFormat;
import java.text.SimpleDateFormat;
+import java.util.ArrayList;
import java.util.List;
import java.util.Set;
import java.util.TreeSet;
@@ -78,6 +83,10 @@
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
+import static org.apache.hadoop.fs.contract.ContractTestUtils.createFile;
+import static org.apache.hadoop.fs.s3a.impl.CallableSupplier.submit;
+import static org.apache.hadoop.fs.s3a.impl.CallableSupplier.waitForCompletion;
+import static org.apache.hadoop.test.GenericTestUtils.buildPaths;
import static org.apache.hadoop.util.Preconditions.checkNotNull;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_CREDENTIAL_PROVIDER_PATH;
import static org.apache.commons.lang3.StringUtils.isNotEmpty;
@@ -95,8 +104,23 @@
@InterfaceAudience.Private
@InterfaceStability.Unstable
public final class S3ATestUtils {
+
private static final Logger LOG = LoggerFactory.getLogger(
- S3ATestUtils.class);
+ S3ATestUtils.class);
+
+ /** Many threads for scale performance: {@value}. */
+ public static final int EXECUTOR_THREAD_COUNT = 64;
+ /**
+ * For submitting work.
+ */
+ private static final ListeningExecutorService EXECUTOR =
+ MoreExecutors.listeningDecorator(
+ BlockingThreadPoolExecutorService.newInstance(
+ EXECUTOR_THREAD_COUNT,
+ EXECUTOR_THREAD_COUNT * 2,
+ 30, TimeUnit.SECONDS,
+ "test-operations"));
+
/**
* Value to set a system property to (in maven) to declare that
@@ -821,6 +845,87 @@ public static StoreContext createMockStoreContext(
.build();
}
+ /**
+ * Write the text to a file asynchronously. Logs the operation duration.
+ * @param fs filesystem
+ * @param path path
+ * @return future to the patch created.
+ */
+ private static CompletableFuture put(FileSystem fs,
+ Path path, String text) {
+ return submit(EXECUTOR, () -> {
+ try (DurationInfo ignore =
+ new DurationInfo(LOG, false, "Creating %s", path)) {
+ createFile(fs, path, true, text.getBytes(Charsets.UTF_8));
+ return path;
+ }
+ });
+ }
+
+ /**
+ * Build a set of files in a directory tree.
+ * @param fs filesystem
+ * @param destDir destination
+ * @param depth file depth
+ * @param fileCount number of files to create.
+ * @param dirCount number of dirs to create at each level
+ * @return the list of files created.
+ */
+ public static List createFiles(final FileSystem fs,
+ final Path destDir,
+ final int depth,
+ final int fileCount,
+ final int dirCount) throws IOException {
+ return createDirsAndFiles(fs, destDir, depth, fileCount, dirCount,
+ new ArrayList<>(fileCount),
+ new ArrayList<>(dirCount));
+ }
+
+ /**
+ * Build a set of files in a directory tree.
+ * @param fs filesystem
+ * @param destDir destination
+ * @param depth file depth
+ * @param fileCount number of files to create.
+ * @param dirCount number of dirs to create at each level
+ * @param paths [out] list of file paths created
+ * @param dirs [out] list of directory paths created.
+ * @return the list of files created.
+ */
+ public static List createDirsAndFiles(final FileSystem fs,
+ final Path destDir,
+ final int depth,
+ final int fileCount,
+ final int dirCount,
+ final List paths,
+ final List dirs) throws IOException {
+ buildPaths(paths, dirs, destDir, depth, fileCount, dirCount);
+ List> futures = new ArrayList<>(paths.size()
+ + dirs.size());
+
+ // create directories. With dir marker retention, that adds more entries
+ // to cause deletion issues
+ try (DurationInfo ignore =
+ new DurationInfo(LOG, "Creating %d directories", dirs.size())) {
+ for (Path path : dirs) {
+ futures.add(submit(EXECUTOR, () ->{
+ fs.mkdirs(path);
+ return path;
+ }));
+ }
+ waitForCompletion(futures);
+ }
+
+ try (DurationInfo ignore =
+ new DurationInfo(LOG, "Creating %d files", paths.size())) {
+ for (Path path : paths) {
+ futures.add(put(fs, path, path.getName()));
+ }
+ waitForCompletion(futures);
+ return paths;
+ }
+ }
+
/**
* Helper class to do diffs of metrics.
*/
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestPartialRenamesDeletes.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestPartialRenamesDeletes.java
index 068b7b2dda5b7..378f4a70433d7 100644
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestPartialRenamesDeletes.java
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestPartialRenamesDeletes.java
@@ -26,14 +26,9 @@
import java.util.List;
import java.util.Set;
import java.util.TreeSet;
-import java.util.concurrent.CompletableFuture;
-import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import com.amazonaws.services.s3.model.MultiObjectDeleteException;
-import org.apache.hadoop.thirdparty.com.google.common.base.Charsets;
-import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListeningExecutorService;
-import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.MoreExecutors;
import org.assertj.core.api.Assertions;
import org.junit.Test;
import org.junit.runner.RunWith;
@@ -42,13 +37,11 @@
import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.contract.ContractTestUtils;
import org.apache.hadoop.fs.s3a.AbstractS3ATestBase;
import org.apache.hadoop.fs.s3a.S3AFileSystem;
import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.util.BlockingThreadPoolExecutorService;
import org.apache.hadoop.util.DurationInfo;
import static org.apache.hadoop.fs.contract.ContractTestUtils.*;
@@ -69,13 +62,10 @@
import static org.apache.hadoop.fs.s3a.auth.RoleTestUtils.forbidden;
import static org.apache.hadoop.fs.s3a.auth.RoleTestUtils.newAssumedRoleConfig;
import static org.apache.hadoop.fs.s3a.auth.delegation.DelegationConstants.DELEGATION_TOKEN_BINDING;
-import static org.apache.hadoop.fs.s3a.impl.CallableSupplier.submit;
-import static org.apache.hadoop.fs.s3a.impl.CallableSupplier.waitForCompletion;
import static org.apache.hadoop.fs.s3a.test.ExtraAssertions.assertFileCount;
import static org.apache.hadoop.fs.s3a.test.ExtraAssertions.extractCause;
import static org.apache.hadoop.fs.statistics.IOStatisticsLogging.ioStatisticsSourceToString;
import static org.apache.hadoop.io.IOUtils.cleanupWithLogger;
-import static org.apache.hadoop.test.GenericTestUtils.buildPaths;
import static org.apache.hadoop.test.LambdaTestUtils.eval;
/**
@@ -112,20 +102,6 @@ public class ITestPartialRenamesDeletes extends AbstractS3ATestBase {
private static final Statement STATEMENT_ALL_BUCKET_READ_ACCESS
= statement(true, S3_ALL_BUCKETS, S3_BUCKET_READ_OPERATIONS);
- /** Many threads for scale performance: {@value}. */
- public static final int EXECUTOR_THREAD_COUNT = 64;
-
- /**
- * For submitting work.
- */
- private static final ListeningExecutorService EXECUTOR =
- MoreExecutors.listeningDecorator(
- BlockingThreadPoolExecutorService.newInstance(
- EXECUTOR_THREAD_COUNT,
- EXECUTOR_THREAD_COUNT * 2,
- 30, TimeUnit.SECONDS,
- "test-operations"));
-
/**
* The number of files in a non-scaled test.
@@ -742,87 +718,6 @@ private Set listFilesUnderPath(Path path, boolean recursive)
return files;
}
- /**
- * Write the text to a file asynchronously. Logs the operation duration.
- * @param fs filesystem
- * @param path path
- * @return future to the patch created.
- */
- private static CompletableFuture put(FileSystem fs,
- Path path, String text) {
- return submit(EXECUTOR, () -> {
- try (DurationInfo ignore =
- new DurationInfo(LOG, false, "Creating %s", path)) {
- createFile(fs, path, true, text.getBytes(Charsets.UTF_8));
- return path;
- }
- });
- }
-
- /**
- * Build a set of files in a directory tree.
- * @param fs filesystem
- * @param destDir destination
- * @param depth file depth
- * @param fileCount number of files to create.
- * @param dirCount number of dirs to create at each level
- * @return the list of files created.
- */
- public static List createFiles(final FileSystem fs,
- final Path destDir,
- final int depth,
- final int fileCount,
- final int dirCount) throws IOException {
- return createDirsAndFiles(fs, destDir, depth, fileCount, dirCount,
- new ArrayList(fileCount),
- new ArrayList(dirCount));
- }
-
- /**
- * Build a set of files in a directory tree.
- * @param fs filesystem
- * @param destDir destination
- * @param depth file depth
- * @param fileCount number of files to create.
- * @param dirCount number of dirs to create at each level
- * @param paths [out] list of file paths created
- * @param dirs [out] list of directory paths created.
- * @return the list of files created.
- */
- public static List createDirsAndFiles(final FileSystem fs,
- final Path destDir,
- final int depth,
- final int fileCount,
- final int dirCount,
- final List paths,
- final List dirs) throws IOException {
- buildPaths(paths, dirs, destDir, depth, fileCount, dirCount);
- List> futures = new ArrayList<>(paths.size()
- + dirs.size());
-
- // create directories. With dir marker retention, that adds more entries
- // to cause deletion issues
- try (DurationInfo ignore =
- new DurationInfo(LOG, "Creating %d directories", dirs.size())) {
- for (Path path : dirs) {
- futures.add(submit(EXECUTOR, () ->{
- fs.mkdirs(path);
- return path;
- }));
- }
- waitForCompletion(futures);
- }
-
- try (DurationInfo ignore =
- new DurationInfo(LOG, "Creating %d files", paths.size())) {
- for (Path path : paths) {
- futures.add(put(fs, path, path.getName()));
- }
- waitForCompletion(futures);
- return paths;
- }
- }
-
/**
* Verifies that s3:DeleteObjectVersion is not required for rename.
*
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/TestRequestFactory.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/TestRequestFactory.java
index dbd89b960f96d..9bc3aef83aacb 100644
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/TestRequestFactory.java
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/TestRequestFactory.java
@@ -164,7 +164,7 @@ private void createFactoryObjects(RequestFactory factory) {
new ArrayList<>()));
a(factory.newCopyObjectRequest(path, path2, md));
a(factory.newDeleteObjectRequest(path));
- a(factory.newBulkDeleteRequest(new ArrayList<>(), true));
+ a(factory.newBulkDeleteRequest(new ArrayList<>()));
a(factory.newDirectoryMarkerRequest(path));
a(factory.newGetObjectRequest(path));
a(factory.newGetObjectMetadataRequest(path));
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3ADeleteManyFiles.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3ADeleteManyFiles.java
index d5862bcb33516..dbdd8b5da6a3c 100644
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3ADeleteManyFiles.java
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3ADeleteManyFiles.java
@@ -38,7 +38,7 @@
import static org.apache.hadoop.fs.s3a.Constants.EXPERIMENTAL_AWS_INTERNAL_THROTTLING;
import static org.apache.hadoop.fs.s3a.Constants.USER_AGENT_PREFIX;
import static org.apache.hadoop.fs.s3a.S3ATestUtils.lsR;
-import static org.apache.hadoop.fs.s3a.impl.ITestPartialRenamesDeletes.createFiles;
+import static org.apache.hadoop.fs.s3a.S3ATestUtils.createFiles;
import static org.apache.hadoop.test.GenericTestUtils.filenameOfIndex;
/**
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/test/MinimalOperationCallbacks.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/test/MinimalOperationCallbacks.java
index a2aebc82720d1..fa1ad2db62af7 100644
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/test/MinimalOperationCallbacks.java
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/test/MinimalOperationCallbacks.java
@@ -23,7 +23,6 @@
import com.amazonaws.AmazonClientException;
import com.amazonaws.services.s3.model.DeleteObjectsRequest;
-import com.amazonaws.services.s3.model.DeleteObjectsResult;
import com.amazonaws.services.s3.model.MultiObjectDeleteException;
import com.amazonaws.services.s3.transfer.model.CopyResult;
@@ -99,13 +98,11 @@ public CopyResult copyFile(
}
@Override
- public DeleteObjectsResult removeKeys(
- List keysToDelete,
- boolean deleteFakeDir,
- boolean quiet)
+ public void removeKeys(
+ List keysToDelete,
+ boolean deleteFakeDir)
throws MultiObjectDeleteException, AmazonClientException,
IOException {
- return null;
}
@Override
From a32cfc21693244d70ebf07ff3c81c5d6b4ab04a5 Mon Sep 17 00:00:00 2001
From: He Xiaoqiao
Date: Sat, 12 Mar 2022 18:40:09 +0800
Subject: [PATCH 034/145] HDFS-15382. Split one FsDatasetImpl lock to block
pool grain locks. (#3941). Contributed by limingxiang.
Signed-off-by: He Xiaoqiao
Signed-off-by: litao
---
.../hdfs/server/datanode/BPServiceActor.java | 5 +
.../hdfs/server/datanode/BlockSender.java | 4 +-
.../hadoop/hdfs/server/datanode/DataNode.java | 12 +-
.../hdfs/server/datanode/DiskBalancer.java | 18 +-
.../datanode/fsdataset/FsDatasetSpi.java | 20 +-
.../fsdataset/impl/BlockPoolSlice.java | 3 +-
.../fsdataset/impl/FsDatasetImpl.java | 402 +++++++++---------
.../datanode/fsdataset/impl/FsVolumeList.java | 22 +
.../fsdataset/impl/ProvidedVolumeImpl.java | 5 +-
.../datanode/fsdataset/impl/ReplicaMap.java | 124 +++---
.../server/datanode/SimulatedFSDataset.java | 20 +-
.../server/datanode/TestBPOfferService.java | 3 +
.../server/datanode/TestBlockRecovery2.java | 7 +-
.../server/datanode/TestDirectoryScanner.java | 13 +-
.../extdataset/ExternalDatasetImpl.java | 10 +-
.../impl/FsDatasetImplTestUtils.java | 2 +-
.../fsdataset/impl/TestFsDatasetImpl.java | 121 +-----
.../fsdataset/impl/TestFsVolumeList.java | 3 +-
.../impl/TestInterDatanodeProtocol.java | 17 +-
.../fsdataset/impl/TestProvidedImpl.java | 10 +-
.../fsdataset/impl/TestReplicaMap.java | 7 +-
.../fsdataset/impl/TestWriteToReplica.java | 3 +-
22 files changed, 384 insertions(+), 447 deletions(-)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
index 6199626fd2ef6..838259d7f6a0b 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
@@ -55,6 +55,7 @@
import org.apache.hadoop.hdfs.protocolPB.DatanodeLifelineProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdfs.server.common.IncorrectVersionException;
+import org.apache.hadoop.hdfs.server.common.DataNodeLockManager.LockLevel;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
@@ -309,6 +310,10 @@ private void connectToNNAndHandshake() throws IOException {
// info.
NamespaceInfo nsInfo = retrieveNamespaceInfo();
+ // init block pool lock when init.
+ dn.getDataSetLockManager().addLock(LockLevel.BLOCK_POOl,
+ nsInfo.getBlockPoolID());
+
// Verify that this matches the other NN in this HA pair.
// This also initializes our block pool in the DN if we are
// the first NN connection for this BP.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
index 172a245b3525a..5c4212fea537f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
@@ -41,6 +41,7 @@
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
+import org.apache.hadoop.hdfs.server.common.DataNodeLockManager.LockLevel;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.LengthInputStream;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaInputStreams;
@@ -256,7 +257,8 @@ class BlockSender implements java.io.Closeable {
// the append write.
ChunkChecksum chunkChecksum = null;
final long replicaVisibleLength;
- try(AutoCloseableLock lock = datanode.data.acquireDatasetReadLock()) {
+ try (AutoCloseableLock lock = datanode.getDataSetLockManager().readLock(
+ LockLevel.BLOCK_POOl, block.getBlockPoolId())) {
replica = getReplica(block, datanode);
replicaVisibleLength = replica.getVisibleLength();
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 84382275bd965..15e8a9e359799 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -146,6 +146,7 @@
import org.apache.hadoop.hdfs.DFSUtilClient;
import org.apache.hadoop.hdfs.HDFSPolicyProvider;
import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.server.common.DataNodeLockManager.LockLevel;
import org.apache.hadoop.hdfs.server.datanode.checker.DatasetVolumeChecker;
import org.apache.hadoop.hdfs.server.datanode.checker.StorageLocationChecker;
import org.apache.hadoop.hdfs.util.DataTransferThrottler;
@@ -433,6 +434,7 @@ public static InetSocketAddress createSocketAddr(String target) {
.availableProcessors();
private static final double CONGESTION_RATIO = 1.5;
private DiskBalancer diskBalancer;
+ private DataSetLockManager dataSetLockManager;
private final ExecutorService xferService;
@@ -474,6 +476,7 @@ private static Tracer createTracer(Configuration conf) {
this.pipelineSupportSlownode = false;
this.socketFactory = NetUtils.getDefaultSocketFactory(conf);
this.dnConf = new DNConf(this);
+ this.dataSetLockManager = new DataSetLockManager(conf);
initOOBTimeout();
storageLocationChecker = null;
volumeChecker = new DatasetVolumeChecker(conf, new Timer());
@@ -492,6 +495,7 @@ private static Tracer createTracer(Configuration conf) {
super(conf);
this.tracer = createTracer(conf);
this.fileIoProvider = new FileIoProvider(conf, this);
+ this.dataSetLockManager = new DataSetLockManager(conf);
this.blockScanner = new BlockScanner(this);
this.lastDiskErrorCheck = 0;
this.maxNumberOfBlocksToLog = conf.getLong(DFS_MAX_NUM_BLOCKS_TO_LOG_KEY,
@@ -2461,6 +2465,7 @@ public void shutdown() {
notifyAll();
}
tracer.close();
+ dataSetLockManager.lockLeakCheck();
}
/**
@@ -3367,7 +3372,8 @@ void transferReplicaForPipelineRecovery(final ExtendedBlock b,
final BlockConstructionStage stage;
//get replica information
- try(AutoCloseableLock lock = data.acquireDatasetReadLock()) {
+ try (AutoCloseableLock lock = dataSetLockManager.readLock(
+ LockLevel.BLOCK_POOl, b.getBlockPoolId())) {
Block storedBlock = data.getStoredBlock(b.getBlockPoolId(),
b.getBlockId());
if (null == storedBlock) {
@@ -4084,6 +4090,10 @@ private static boolean isWrite(BlockConstructionStage stage) {
|| stage == PIPELINE_SETUP_APPEND_RECOVERY);
}
+ public DataSetLockManager getDataSetLockManager() {
+ return dataSetLockManager;
+ }
+
boolean isSlownodeByNameserviceId(String nsId) {
return blockPoolManager.isSlownodeByNameserviceId(nsId);
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
index 0f710a143ad87..4126140678759 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
@@ -25,7 +25,6 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi
.FsVolumeReferences;
-import org.apache.hadoop.util.AutoCloseableLock;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.datanode.DiskBalancerWorkStatus
@@ -502,16 +501,13 @@ private void createWorkPlan(NodePlan plan) throws DiskBalancerException {
private Map getStorageIDToVolumeBasePathMap()
throws DiskBalancerException {
Map storageIDToVolBasePathMap = new HashMap<>();
- FsDatasetSpi.FsVolumeReferences references;
- try {
- try(AutoCloseableLock lock = this.dataset.acquireDatasetReadLock()) {
- references = this.dataset.getFsVolumeReferences();
- for (int ndx = 0; ndx < references.size(); ndx++) {
- FsVolumeSpi vol = references.get(ndx);
- storageIDToVolBasePathMap.put(vol.getStorageID(),
- vol.getBaseURI().getPath());
- }
- references.close();
+ // Get volumes snapshot so no need to acquire dataset lock.
+ try (FsDatasetSpi.FsVolumeReferences references = dataset.
+ getFsVolumeReferences()) {
+ for (int ndx = 0; ndx < references.size(); ndx++) {
+ FsVolumeSpi vol = references.get(ndx);
+ storageIDToVolBasePathMap.put(vol.getStorageID(),
+ vol.getBaseURI().getPath());
}
} catch (IOException ex) {
LOG.error("Disk Balancer - Internal Error.", ex);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java
index f162ea1b3ae15..e39ef817b6f29 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java
@@ -35,8 +35,9 @@
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdfs.server.common.AutoCloseDataSetLock;
+import org.apache.hadoop.hdfs.server.common.DataNodeLockManager;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.MountVolumeMap;
-import org.apache.hadoop.util.AutoCloseableLock;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
@@ -241,7 +242,7 @@ StorageReport[] getStorageReports(String bpid)
* Gets a list of references to the finalized blocks for the given block pool.
*
* Callers of this function should call
- * {@link FsDatasetSpi#acquireDatasetLock} to avoid blocks' status being
+ * {@link FsDatasetSpi#acquireDatasetLockManager} to avoid blocks' status being
* changed during list iteration.
*
* @return a list of references to the finalized blocks for the given block
@@ -657,21 +658,12 @@ ReplicaInfo moveBlockAcrossStorage(final ExtendedBlock block,
ReplicaInfo moveBlockAcrossVolumes(final ExtendedBlock block,
FsVolumeSpi destination) throws IOException;
- /**
- * Acquire the lock of the data set. This prevents other threads from
- * modifying the volume map structure inside the datanode, but other changes
- * are still possible. For example modifying the genStamp of a block instance.
- */
- AutoCloseableLock acquireDatasetLock();
-
/***
- * Acquire the read lock of the data set. This prevents other threads from
- * modifying the volume map structure inside the datanode, but other changes
- * are still possible. For example modifying the genStamp of a block instance.
+ * Acquire lock Manager for the data set. This prevents other threads from
+ * modifying the volume map structure inside the datanode.
* @return The AutoClosable read lock instance.
*/
- AutoCloseableLock acquireDatasetReadLock();
-
+ DataNodeLockManager extends AutoCloseDataSetLock> acquireDatasetLockManager();
/**
* Deep copy the replica info belonging to given block pool.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
index 0ef1d56de34d6..eff079a353da6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
@@ -43,7 +43,6 @@
import java.util.concurrent.ForkJoinTask;
import java.util.concurrent.RecursiveAction;
import java.util.concurrent.atomic.AtomicLong;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.concurrent.TimeUnit;
import org.apache.hadoop.hdfs.server.datanode.FSCachingGetSpaceUsed;
@@ -914,7 +913,7 @@ void shutdown(BlockListAsLongs blocksListToPersist) {
private boolean readReplicasFromCache(ReplicaMap volumeMap,
final RamDiskReplicaTracker lazyWriteReplicaMap) {
- ReplicaMap tmpReplicaMap = new ReplicaMap(new ReentrantReadWriteLock());
+ ReplicaMap tmpReplicaMap = new ReplicaMap();
File replicaFile = new File(replicaCacheDir, REPLICA_CACHE_FILE);
// Check whether the file exists or not.
if (!replicaFile.exists()) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index 441a6f96e4c23..002d99abc5ba7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -32,7 +32,6 @@
import java.nio.file.Files;
import java.util.ArrayList;
import java.util.Collection;
-import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
@@ -41,9 +40,6 @@
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.Executor;
-import java.util.concurrent.locks.Condition;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
import javax.management.NotCompliantMBeanException;
import javax.management.ObjectName;
@@ -64,6 +60,10 @@
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtilClient;
import org.apache.hadoop.hdfs.ExtendedBlockId;
+import org.apache.hadoop.hdfs.server.common.AutoCloseDataSetLock;
+import org.apache.hadoop.hdfs.server.common.DataNodeLockManager;
+import org.apache.hadoop.hdfs.server.common.DataNodeLockManager.LockLevel;
+import org.apache.hadoop.hdfs.server.datanode.DataSetLockManager;
import org.apache.hadoop.hdfs.server.datanode.FileIoProvider;
import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica;
import org.apache.hadoop.hdfs.server.datanode.LocalReplica;
@@ -119,7 +119,6 @@
import org.apache.hadoop.util.DataChecksum;
import org.apache.hadoop.util.DiskChecker.DiskErrorException;
import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
-import org.apache.hadoop.util.InstrumentedReadWriteLock;
import org.apache.hadoop.util.Lists;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.Sets;
@@ -188,7 +187,8 @@ public StorageReport[] getStorageReports(String bpid)
@Override
public FsVolumeImpl getVolume(final ExtendedBlock b) {
- try (AutoCloseableLock lock = datasetReadLock.acquire()) {
+ try (AutoCloseableLock lock = lockManager.readLock(LockLevel.BLOCK_POOl,
+ b.getBlockPoolId())) {
final ReplicaInfo r =
volumeMap.get(b.getBlockPoolId(), b.getLocalBlock());
return r != null ? (FsVolumeImpl) r.getVolume() : null;
@@ -198,7 +198,8 @@ public FsVolumeImpl getVolume(final ExtendedBlock b) {
@Override // FsDatasetSpi
public Block getStoredBlock(String bpid, long blkid)
throws IOException {
- try (AutoCloseableLock lock = datasetReadLock.acquire()) {
+ try (AutoCloseableLock lock = lockManager.readLock(LockLevel.BLOCK_POOl,
+ bpid)) {
ReplicaInfo r = volumeMap.get(bpid, blkid);
if (r == null) {
return null;
@@ -210,12 +211,16 @@ public Block getStoredBlock(String bpid, long blkid)
@Override
public Set extends Replica> deepCopyReplica(String bpid)
throws IOException {
- Set extends Replica> replicas = null;
- try (AutoCloseableLock lock = datasetReadLock.acquire()) {
- replicas = new HashSet<>(volumeMap.replicas(bpid) == null ? Collections.
- EMPTY_SET : volumeMap.replicas(bpid));
+ try (AutoCloseDataSetLock l = lockManager.readLock(LockLevel.BLOCK_POOl, bpid)) {
+ Set replicas = new HashSet<>();
+ volumeMap.replicas(bpid, (iterator) -> {
+ while (iterator.hasNext()) {
+ ReplicaInfo b = iterator.next();
+ replicas.add(b);
+ }
+ });
+ return replicas;
}
- return Collections.unmodifiableSet(replicas);
}
/**
@@ -275,13 +280,7 @@ public LengthInputStream getMetaDataInputStream(ExtendedBlock b)
private boolean blockPinningEnabled;
private final int maxDataLength;
- @VisibleForTesting
- final AutoCloseableLock datasetWriteLock;
- @VisibleForTesting
- final AutoCloseableLock datasetReadLock;
- @VisibleForTesting
- final InstrumentedReadWriteLock datasetRWLock;
- private final Condition datasetWriteLockCondition;
+ private final DataSetLockManager lockManager;
private static String blockPoolId = "";
// Make limited notify times from DirectoryScanner to NameNode.
@@ -300,33 +299,7 @@ public LengthInputStream getMetaDataInputStream(ExtendedBlock b)
this.dataStorage = storage;
this.conf = conf;
this.smallBufferSize = DFSUtilClient.getSmallBufferSize(conf);
- this.datasetRWLock = new InstrumentedReadWriteLock(
- conf.getBoolean(DFSConfigKeys.DFS_DATANODE_LOCK_FAIR_KEY,
- DFSConfigKeys.DFS_DATANODE_LOCK_FAIR_DEFAULT),
- "FsDatasetRWLock", LOG, conf.getTimeDuration(
- DFSConfigKeys.DFS_LOCK_SUPPRESS_WARNING_INTERVAL_KEY,
- DFSConfigKeys.DFS_LOCK_SUPPRESS_WARNING_INTERVAL_DEFAULT,
- TimeUnit.MILLISECONDS),
- conf.getTimeDuration(
- DFSConfigKeys.DFS_DATANODE_LOCK_REPORTING_THRESHOLD_MS_KEY,
- DFSConfigKeys.DFS_DATANODE_LOCK_REPORTING_THRESHOLD_MS_DEFAULT,
- TimeUnit.MILLISECONDS));
- this.datasetWriteLock = new AutoCloseableLock(datasetRWLock.writeLock());
- boolean enableRL = conf.getBoolean(
- DFSConfigKeys.DFS_DATANODE_LOCK_READ_WRITE_ENABLED_KEY,
- DFSConfigKeys.DFS_DATANODE_LOCK_READ_WRITE_ENABLED_DEFAULT);
- // The read lock can be disabled by the above config key. If it is disabled
- // then we simply make the both the read and write lock variables hold
- // the write lock. All accesses to the lock are via these variables, so that
- // effectively disables the read lock.
- if (enableRL) {
- LOG.info("The datanode lock is a read write lock");
- this.datasetReadLock = new AutoCloseableLock(datasetRWLock.readLock());
- } else {
- LOG.info("The datanode lock is an exclusive write lock");
- this.datasetReadLock = this.datasetWriteLock;
- }
- this.datasetWriteLockCondition = datasetWriteLock.newCondition();
+ this.lockManager = datanode.getDataSetLockManager();
// The number of volumes required for operation is the total number
// of volumes minus the number of failed volumes we can tolerate.
@@ -365,7 +338,7 @@ public LengthInputStream getMetaDataInputStream(ExtendedBlock b)
}
storageMap = new ConcurrentHashMap();
- volumeMap = new ReplicaMap(datasetReadLock, datasetWriteLock);
+ volumeMap = new ReplicaMap(lockManager);
ramDiskReplicaTracker = RamDiskReplicaTracker.getInstance(conf, this);
@SuppressWarnings("unchecked")
@@ -421,16 +394,6 @@ public LengthInputStream getMetaDataInputStream(ExtendedBlock b)
lastDirScannerNotifyTime = System.currentTimeMillis();
}
- @Override
- public AutoCloseableLock acquireDatasetLock() {
- return datasetWriteLock.acquire();
- }
-
- @Override
- public AutoCloseableLock acquireDatasetReadLock() {
- return datasetReadLock.acquire();
- }
-
/**
* Gets initial volume failure information for all volumes that failed
* immediately at startup. The method works by determining the set difference
@@ -465,42 +428,40 @@ private static List getInitialVolumeFailureInfos(
* Activate a volume to serve requests.
* @throws IOException if the storage UUID already exists.
*/
- private void activateVolume(
+ private synchronized void activateVolume(
ReplicaMap replicaMap,
Storage.StorageDirectory sd, StorageType storageType,
FsVolumeReference ref) throws IOException {
- try (AutoCloseableLock lock = datasetWriteLock.acquire()) {
- DatanodeStorage dnStorage = storageMap.get(sd.getStorageUuid());
- if (dnStorage != null) {
- final String errorMsg = String.format(
- "Found duplicated storage UUID: %s in %s.",
- sd.getStorageUuid(), sd.getVersionFile());
- LOG.error(errorMsg);
- throw new IOException(errorMsg);
- }
- // Check if there is same storage type on the mount.
- // Only useful when same disk tiering is turned on.
- FsVolumeImpl volumeImpl = (FsVolumeImpl) ref.getVolume();
- FsVolumeReference checkRef = volumes
- .getMountVolumeMap()
- .getVolumeRefByMountAndStorageType(
- volumeImpl.getMount(), volumeImpl.getStorageType());
- if (checkRef != null) {
- final String errorMsg = String.format(
- "Storage type %s already exists on same mount: %s.",
- volumeImpl.getStorageType(), volumeImpl.getMount());
- checkRef.close();
- LOG.error(errorMsg);
- throw new IOException(errorMsg);
- }
- volumeMap.mergeAll(replicaMap);
- storageMap.put(sd.getStorageUuid(),
- new DatanodeStorage(sd.getStorageUuid(),
- DatanodeStorage.State.NORMAL,
- storageType));
- asyncDiskService.addVolume(volumeImpl);
- volumes.addVolume(ref);
- }
+ DatanodeStorage dnStorage = storageMap.get(sd.getStorageUuid());
+ if (dnStorage != null) {
+ final String errorMsg = String.format(
+ "Found duplicated storage UUID: %s in %s.",
+ sd.getStorageUuid(), sd.getVersionFile());
+ LOG.error(errorMsg);
+ throw new IOException(errorMsg);
+ }
+ // Check if there is same storage type on the mount.
+ // Only useful when same disk tiering is turned on.
+ FsVolumeImpl volumeImpl = (FsVolumeImpl) ref.getVolume();
+ FsVolumeReference checkRef = volumes
+ .getMountVolumeMap()
+ .getVolumeRefByMountAndStorageType(
+ volumeImpl.getMount(), volumeImpl.getStorageType());
+ if (checkRef != null) {
+ final String errorMsg = String.format(
+ "Storage type %s already exists on same mount: %s.",
+ volumeImpl.getStorageType(), volumeImpl.getMount());
+ checkRef.close();
+ LOG.error(errorMsg);
+ throw new IOException(errorMsg);
+ }
+ volumeMap.mergeAll(replicaMap);
+ storageMap.put(sd.getStorageUuid(),
+ new DatanodeStorage(sd.getStorageUuid(),
+ DatanodeStorage.State.NORMAL,
+ storageType));
+ asyncDiskService.addVolume(volumeImpl);
+ volumes.addVolume(ref);
}
private void addVolume(Storage.StorageDirectory sd) throws IOException {
@@ -517,8 +478,8 @@ private void addVolume(Storage.StorageDirectory sd) throws IOException {
.setConf(this.conf)
.build();
FsVolumeReference ref = fsVolume.obtainReference();
- ReplicaMap tempVolumeMap =
- new ReplicaMap(datasetReadLock, datasetWriteLock);
+ // no need to acquire lock.
+ ReplicaMap tempVolumeMap = new ReplicaMap();
fsVolume.getVolumeMap(tempVolumeMap, ramDiskReplicaTracker);
activateVolume(tempVolumeMap, sd, storageLocation.getStorageType(), ref);
@@ -557,13 +518,13 @@ public void addVolume(final StorageLocation location,
StorageType storageType = location.getStorageType();
final FsVolumeImpl fsVolume =
createFsVolume(sd.getStorageUuid(), sd, location);
- final ReplicaMap tempVolumeMap =
- new ReplicaMap(new ReentrantReadWriteLock());
+ // no need to add lock
+ final ReplicaMap tempVolumeMap = new ReplicaMap();
ArrayList exceptions = Lists.newArrayList();
for (final NamespaceInfo nsInfo : nsInfos) {
String bpid = nsInfo.getBlockPoolID();
- try {
+ try (AutoCloseDataSetLock l = lockManager.writeLock(LockLevel.BLOCK_POOl, bpid)) {
fsVolume.addBlockPool(bpid, this.conf, this.timer);
fsVolume.getVolumeMap(bpid, tempVolumeMap, ramDiskReplicaTracker);
} catch (IOException e) {
@@ -603,7 +564,9 @@ public void removeVolumes(
new ArrayList<>(storageLocsToRemove);
Map> blkToInvalidate = new HashMap<>();
List storageToRemove = new ArrayList<>();
- try (AutoCloseableLock lock = datasetWriteLock.acquire()) {
+ // This object lock is protect data structure related volumes like add and
+ // remove.This will obtain volumeMap lock again if access replicaInfo.
+ synchronized (this) {
for (int idx = 0; idx < dataStorage.getNumStorageDirs(); idx++) {
Storage.StorageDirectory sd = dataStorage.getStorageDir(idx);
final StorageLocation sdLocation = sd.getStorageLocation();
@@ -615,7 +578,7 @@ public void removeVolumes(
// Disable the volume from the service.
asyncDiskService.removeVolume(sd.getStorageUuid());
volumes.removeVolume(sdLocation, clearFailure);
- volumes.waitVolumeRemoved(5000, datasetWriteLockCondition);
+ volumes.waitVolumeRemoved(5000, this);
// Removed all replica information for the blocks on the volume.
// Unlike updating the volumeMap in addVolume(), this operation does
@@ -623,18 +586,19 @@ public void removeVolumes(
for (String bpid : volumeMap.getBlockPoolList()) {
List blocks = blkToInvalidate
.computeIfAbsent(bpid, (k) -> new ArrayList<>());
- for (Iterator it =
- volumeMap.replicas(bpid).iterator(); it.hasNext();) {
- ReplicaInfo block = it.next();
- final StorageLocation blockStorageLocation =
- block.getVolume().getStorageLocation();
- LOG.trace("checking for block " + block.getBlockId() +
- " with storageLocation " + blockStorageLocation);
- if (blockStorageLocation.equals(sdLocation)) {
- blocks.add(block);
- it.remove();
+ volumeMap.replicas(bpid, (iterator) -> {
+ while (iterator.hasNext()) {
+ ReplicaInfo block = iterator.next();
+ final StorageLocation blockStorageLocation =
+ block.getVolume().getStorageLocation();
+ LOG.trace("checking for block " + block.getBlockId() +
+ " with storageLocation " + blockStorageLocation);
+ if (blockStorageLocation.equals(sdLocation)) {
+ blocks.add(block);
+ iterator.remove();
+ }
}
- }
+ });
}
storageToRemove.add(sd.getStorageUuid());
storageLocationsToRemove.remove(sdLocation);
@@ -662,8 +626,8 @@ public void removeVolumes(
}
}
- try (AutoCloseableLock lock = datasetWriteLock.acquire()) {
- for(String storageUuid : storageToRemove) {
+ synchronized (this) {
+ for (String storageUuid : storageToRemove) {
storageMap.remove(storageUuid);
}
}
@@ -853,7 +817,8 @@ public InputStream getBlockInputStream(ExtendedBlock b,
long seekOffset) throws IOException {
ReplicaInfo info;
- try (AutoCloseableLock lock = datasetReadLock.acquire()) {
+ try (AutoCloseableLock lock = lockManager.readLock(LockLevel.BLOCK_POOl,
+ b.getBlockPoolId())) {
info = volumeMap.get(b.getBlockPoolId(), b.getLocalBlock());
}
@@ -941,7 +906,8 @@ ReplicaInfo getReplicaInfo(String bpid, long blkid)
@Override // FsDatasetSpi
public ReplicaInputStreams getTmpInputStreams(ExtendedBlock b,
long blkOffset, long metaOffset) throws IOException {
- try (AutoCloseableLock lock = datasetReadLock.acquire()) {
+ try (AutoCloseDataSetLock l = lockManager.readLock(LockLevel.BLOCK_POOl,
+ b.getBlockPoolId())) {
ReplicaInfo info = getReplicaInfo(b);
FsVolumeReference ref = info.getVolume().obtainReference();
try {
@@ -1117,7 +1083,8 @@ public ReplicaInfo moveBlockAcrossStorage(ExtendedBlock block,
targetStorageType, targetStorageId);
boolean useVolumeOnSameMount = false;
- try (AutoCloseableLock lock = datasetReadLock.acquire()) {
+ try (AutoCloseableLock lock = lockManager.readLock(LockLevel.BLOCK_POOl,
+ block.getBlockPoolId())) {
if (shouldConsiderSameMountVolume) {
volumeRef = volumes.getVolumeByMount(targetStorageType,
((FsVolumeImpl) replicaInfo.getVolume()).getMount(),
@@ -1311,7 +1278,8 @@ public ReplicaInfo moveBlockAcrossVolumes(ExtendedBlock block, FsVolumeSpi
FsVolumeReference volumeRef = null;
- try (AutoCloseableLock lock = datasetReadLock.acquire()) {
+ try (AutoCloseableLock lock = lockManager.readLock(LockLevel.BLOCK_POOl,
+ block.getBlockPoolId())) {
volumeRef = destination.obtainReference();
}
@@ -1325,6 +1293,11 @@ public ReplicaInfo moveBlockAcrossVolumes(ExtendedBlock block, FsVolumeSpi
return replicaInfo;
}
+ @Override
+ public DataNodeLockManager acquireDatasetLockManager() {
+ return lockManager;
+ }
+
/**
* Compute and store the checksum for a block file that does not already have
* its checksum computed.
@@ -1399,7 +1372,8 @@ static void computeChecksum(ReplicaInfo srcReplica, File dstMeta,
@Override // FsDatasetSpi
public ReplicaHandler append(ExtendedBlock b,
long newGS, long expectedBlockLen) throws IOException {
- try (AutoCloseableLock lock = datasetWriteLock.acquire()) {
+ try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.BLOCK_POOl,
+ b.getBlockPoolId())) {
// If the block was successfully finalized because all packets
// were successfully processed at the Datanode but the ack for
// some of the packets were not received by the client. The client
@@ -1451,7 +1425,7 @@ public ReplicaHandler append(ExtendedBlock b,
private ReplicaInPipeline append(String bpid,
ReplicaInfo replicaInfo, long newGS, long estimateBlockLen)
throws IOException {
- try (AutoCloseableLock lock = datasetWriteLock.acquire()) {
+ try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.BLOCK_POOl, bpid)) {
// If the block is cached, start uncaching it.
if (replicaInfo.getState() != ReplicaState.FINALIZED) {
throw new IOException("Only a Finalized replica can be appended to; "
@@ -1547,7 +1521,8 @@ public ReplicaHandler recoverAppend(
while (true) {
try {
- try (AutoCloseableLock lock = datasetWriteLock.acquire()) {
+ try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.BLOCK_POOl,
+ b.getBlockPoolId())) {
ReplicaInfo replicaInfo = recoverCheck(b, newGS, expectedBlockLen);
FsVolumeReference ref = replicaInfo.getVolume().obtainReference();
ReplicaInPipeline replica;
@@ -1579,7 +1554,8 @@ public Replica recoverClose(ExtendedBlock b, long newGS,
LOG.info("Recover failed close " + b);
while (true) {
try {
- try (AutoCloseableLock lock = datasetWriteLock.acquire()) {
+ try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.BLOCK_POOl,
+ b.getBlockPoolId())) {
// check replica's state
ReplicaInfo replicaInfo = recoverCheck(b, newGS, expectedBlockLen);
// bump the replica's GS
@@ -1602,7 +1578,8 @@ public ReplicaHandler createRbw(
StorageType storageType, String storageId, ExtendedBlock b,
boolean allowLazyPersist) throws IOException {
long startTimeMs = Time.monotonicNow();
- try (AutoCloseableLock lock = datasetWriteLock.acquire()) {
+ try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.BLOCK_POOl,
+ b.getBlockPoolId())) {
ReplicaInfo replicaInfo = volumeMap.get(b.getBlockPoolId(),
b.getBlockId());
if (replicaInfo != null) {
@@ -1680,7 +1657,8 @@ public ReplicaHandler recoverRbw(
try {
while (true) {
try {
- try (AutoCloseableLock lock = datasetWriteLock.acquire()) {
+ try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.BLOCK_POOl,
+ b.getBlockPoolId())) {
ReplicaInfo replicaInfo =
getReplicaInfo(b.getBlockPoolId(), b.getBlockId());
// check the replica's state
@@ -1711,7 +1689,8 @@ public ReplicaHandler recoverRbw(
private ReplicaHandler recoverRbwImpl(ReplicaInPipeline rbw,
ExtendedBlock b, long newGS, long minBytesRcvd, long maxBytesRcvd)
throws IOException {
- try (AutoCloseableLock lock = datasetWriteLock.acquire()) {
+ try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.BLOCK_POOl,
+ b.getBlockPoolId())) {
// check generation stamp
long replicaGenerationStamp = rbw.getGenerationStamp();
if (replicaGenerationStamp < b.getGenerationStamp() ||
@@ -1772,7 +1751,8 @@ private ReplicaHandler recoverRbwImpl(ReplicaInPipeline rbw,
public ReplicaInPipeline convertTemporaryToRbw(
final ExtendedBlock b) throws IOException {
long startTimeMs = Time.monotonicNow();
- try (AutoCloseableLock lock = datasetWriteLock.acquire()) {
+ try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.BLOCK_POOl,
+ b.getBlockPoolId())) {
final long blockId = b.getBlockId();
final long expectedGs = b.getGenerationStamp();
final long visible = b.getNumBytes();
@@ -1851,7 +1831,8 @@ public ReplicaHandler createTemporary(StorageType storageType,
ReplicaInfo lastFoundReplicaInfo = null;
boolean isInPipeline = false;
do {
- try (AutoCloseableLock lock = datasetWriteLock.acquire()) {
+ try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.BLOCK_POOl,
+ b.getBlockPoolId())) {
ReplicaInfo currentReplicaInfo =
volumeMap.get(b.getBlockPoolId(), b.getBlockId());
if (currentReplicaInfo == lastFoundReplicaInfo) {
@@ -1906,7 +1887,8 @@ public ReplicaHandler createTemporary(StorageType storageType,
false);
}
long startHoldLockTimeMs = Time.monotonicNow();
- try (AutoCloseableLock lock = datasetWriteLock.acquire()) {
+ try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.BLOCK_POOl,
+ b.getBlockPoolId())) {
FsVolumeReference ref = volumes.getNextVolume(storageType, storageId, b
.getNumBytes());
FsVolumeImpl v = (FsVolumeImpl) ref.getVolume();
@@ -1967,7 +1949,8 @@ public void finalizeBlock(ExtendedBlock b, boolean fsyncDir)
ReplicaInfo replicaInfo = null;
ReplicaInfo finalizedReplicaInfo = null;
long startTimeMs = Time.monotonicNow();
- try (AutoCloseableLock lock = datasetWriteLock.acquire()) {
+ try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.BLOCK_POOl,
+ b.getBlockPoolId())) {
if (Thread.interrupted()) {
// Don't allow data modifications from interrupted threads
throw new IOException("Cannot finalize block from Interrupted Thread");
@@ -2003,7 +1986,7 @@ public void finalizeBlock(ExtendedBlock b, boolean fsyncDir)
private ReplicaInfo finalizeReplica(String bpid, ReplicaInfo replicaInfo)
throws IOException {
- try (AutoCloseableLock lock = datasetWriteLock.acquire()) {
+ try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.BLOCK_POOl, bpid)) {
// Compare generation stamp of old and new replica before finalizing
if (volumeMap.get(bpid, replicaInfo.getBlockId()).getGenerationStamp()
> replicaInfo.getGenerationStamp()) {
@@ -2049,7 +2032,8 @@ private ReplicaInfo finalizeReplica(String bpid, ReplicaInfo replicaInfo)
@Override // FsDatasetSpi
public void unfinalizeBlock(ExtendedBlock b) throws IOException {
long startTimeMs = Time.monotonicNow();
- try (AutoCloseableLock lock = datasetWriteLock.acquire()) {
+ try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.BLOCK_POOl,
+ b.getBlockPoolId())) {
ReplicaInfo replicaInfo = volumeMap.get(b.getBlockPoolId(),
b.getLocalBlock());
if (replicaInfo != null &&
@@ -2107,47 +2091,50 @@ public Map getBlockReports(String bpid) {
new HashMap();
List curVolumes = null;
- try (AutoCloseableLock lock = datasetReadLock.acquire()) {
+ try (AutoCloseableLock lock = lockManager.readLock(LockLevel.BLOCK_POOl, bpid)) {
curVolumes = volumes.getVolumes();
for (FsVolumeSpi v : curVolumes) {
builders.put(v.getStorageID(), BlockListAsLongs.builder(maxDataLength));
}
Set missingVolumesReported = new HashSet<>();
- for (ReplicaInfo b : volumeMap.replicas(bpid)) {
- // skip PROVIDED replicas.
- if (b.getVolume().getStorageType() == StorageType.PROVIDED) {
- continue;
- }
- String volStorageID = b.getVolume().getStorageID();
- switch(b.getState()) {
- case FINALIZED:
- case RBW:
- case RWR:
- break;
- case RUR:
- // use the original replica.
- b = b.getOriginalReplica();
- break;
- case TEMPORARY:
- continue;
- default:
- assert false : "Illegal ReplicaInfo state.";
- continue;
- }
- BlockListAsLongs.Builder storageBuilder = builders.get(volStorageID);
- // a storage in the process of failing will not be in the volumes list
- // but will be in the replica map.
- if (storageBuilder != null) {
- storageBuilder.add(b);
- } else {
- if (!missingVolumesReported.contains(volStorageID)) {
- LOG.warn("Storage volume: " + volStorageID + " missing for the"
- + " replica block: " + b + ". Probably being removed!");
- missingVolumesReported.add(volStorageID);
+ volumeMap.replicas(bpid, (iterator) -> {
+ while (iterator.hasNext()) {
+ ReplicaInfo b = iterator.next();
+ // skip PROVIDED replicas.
+ if (b.getVolume().getStorageType() == StorageType.PROVIDED) {
+ continue;
+ }
+ String volStorageID = b.getVolume().getStorageID();
+ switch(b.getState()) {
+ case FINALIZED:
+ case RBW:
+ case RWR:
+ break;
+ case RUR:
+ // use the original replica.
+ b = b.getOriginalReplica();
+ break;
+ case TEMPORARY:
+ continue;
+ default:
+ assert false : "Illegal ReplicaInfo state.";
+ continue;
+ }
+ BlockListAsLongs.Builder storageBuilder = builders.get(volStorageID);
+ // a storage in the process of failing will not be in the volumes list
+ // but will be in the replica map.
+ if (storageBuilder != null) {
+ storageBuilder.add(b);
+ } else {
+ if (!missingVolumesReported.contains(volStorageID)) {
+ LOG.warn("Storage volume: " + volStorageID + " missing for the"
+ + " replica block: " + b + ". Probably being removed!");
+ missingVolumesReported.add(volStorageID);
+ }
}
}
- }
+ });
}
for (FsVolumeImpl v : curVolumes) {
@@ -2162,7 +2149,7 @@ public Map getBlockReports(String bpid) {
* Gets a list of references to the finalized blocks for the given block pool.
*
* Callers of this function should call
- * {@link FsDatasetSpi#acquireDatasetLock()} to avoid blocks' status being
+ * {@link FsDatasetSpi#acquireDatasetLockManager()} ()} to avoid blocks' status being
* changed during list iteration.
*
* @return a list of references to the finalized blocks for the given block
@@ -2170,14 +2157,17 @@ public Map getBlockReports(String bpid) {
*/
@Override
public List getFinalizedBlocks(String bpid) {
- try (AutoCloseableLock lock = datasetReadLock.acquire()) {
- final List finalized = new ArrayList(
- volumeMap.size(bpid));
- for (ReplicaInfo b : volumeMap.replicas(bpid)) {
- if (b.getState() == ReplicaState.FINALIZED) {
- finalized.add(b);
+ try (AutoCloseDataSetLock l = lockManager.readLock(LockLevel.BLOCK_POOl, bpid)) {
+ ArrayList finalized =
+ new ArrayList<>(volumeMap.size(bpid));
+ volumeMap.replicas(bpid, (iterator) -> {
+ while (iterator.hasNext()) {
+ ReplicaInfo b = iterator.next();
+ if (b.getState() == ReplicaState.FINALIZED) {
+ finalized.add(new FinalizedReplica((FinalizedReplica)b));
+ }
}
- }
+ });
return finalized;
}
}
@@ -2310,7 +2300,7 @@ private void invalidate(String bpid, Block[] invalidBlks, boolean async)
for (int i = 0; i < invalidBlks.length; i++) {
final ReplicaInfo removing;
final FsVolumeImpl v;
- try (AutoCloseableLock lock = datasetWriteLock.acquire()) {
+ try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.BLOCK_POOl, bpid)) {
final ReplicaInfo info = volumeMap.get(bpid, invalidBlks[i]);
if (info == null) {
ReplicaInfo infoByBlockId =
@@ -2433,7 +2423,7 @@ private void cacheBlock(String bpid, long blockId) {
long length, genstamp;
Executor volumeExecutor;
- try (AutoCloseableLock lock = datasetWriteLock.acquire()) {
+ try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.BLOCK_POOl, bpid)) {
ReplicaInfo info = volumeMap.get(bpid, blockId);
boolean success = false;
try {
@@ -2501,7 +2491,8 @@ public boolean isCached(String bpid, long blockId) {
@Override // FsDatasetSpi
public boolean contains(final ExtendedBlock block) {
- try (AutoCloseableLock lock = datasetReadLock.acquire()) {
+ try (AutoCloseableLock lock = lockManager.readLock(LockLevel.BLOCK_POOl,
+ block.getBlockPoolId())) {
final long blockId = block.getLocalBlock().getBlockId();
final String bpid = block.getBlockPoolId();
final ReplicaInfo r = volumeMap.get(bpid, blockId);
@@ -2628,7 +2619,7 @@ public void checkAndUpdate(String bpid, ScanInfo scanInfo)
curDirScannerNotifyCount = 0;
lastDirScannerNotifyTime = startTimeMs;
}
- try (AutoCloseableLock lock = datasetWriteLock.acquire()) {
+ try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.BLOCK_POOl, bpid)) {
memBlockInfo = volumeMap.get(bpid, blockId);
if (memBlockInfo != null &&
memBlockInfo.getState() != ReplicaState.FINALIZED) {
@@ -2851,7 +2842,7 @@ public ReplicaInfo getReplica(String bpid, long blockId) {
@Override
public String getReplicaString(String bpid, long blockId) {
- try (AutoCloseableLock lock = datasetReadLock.acquire()) {
+ try (AutoCloseableLock lock = lockManager.readLock(LockLevel.BLOCK_POOl, bpid)) {
final Replica r = volumeMap.get(bpid, blockId);
return r == null ? "null" : r.toString();
}
@@ -2865,12 +2856,26 @@ public ReplicaRecoveryInfo initReplicaRecovery(RecoveringBlock rBlock)
datanode.getDnConf().getXceiverStopTimeout());
}
+ ReplicaRecoveryInfo initReplicaRecovery(String bpid, ReplicaMap map,
+ Block block, long recoveryId, long xceiverStopTimeout) throws IOException {
+ while (true) {
+ try {
+ try (AutoCloseDataSetLock l = lockManager.writeLock(LockLevel.BLOCK_POOl, bpid)) {
+ return initReplicaRecoveryImpl(bpid, map, block, recoveryId);
+ }
+ } catch (MustStopExistingWriter e) {
+ e.getReplicaInPipeline().stopWriter(xceiverStopTimeout);
+ }
+ }
+ }
+
/** static version of {@link #initReplicaRecovery(RecoveringBlock)}. */
static ReplicaRecoveryInfo initReplicaRecovery(String bpid, ReplicaMap map,
- Block block, long recoveryId, long xceiverStopTimeout) throws IOException {
+ Block block, long recoveryId, long xceiverStopTimeout, DataSetLockManager
+ lockManager) throws IOException {
while (true) {
try {
- try (AutoCloseableLock lock = map.getLock().acquire()) {
+ try (AutoCloseDataSetLock l = lockManager.writeLock(LockLevel.BLOCK_POOl, bpid)) {
return initReplicaRecoveryImpl(bpid, map, block, recoveryId);
}
} catch (MustStopExistingWriter e) {
@@ -2959,7 +2964,8 @@ public Replica updateReplicaUnderRecovery(
final long newBlockId,
final long newlength) throws IOException {
long startTimeMs = Time.monotonicNow();
- try (AutoCloseableLock lock = datasetWriteLock.acquire()) {
+ try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.BLOCK_POOl,
+ oldBlock.getBlockPoolId())) {
//get replica
final String bpid = oldBlock.getBlockPoolId();
final ReplicaInfo replica = volumeMap.get(bpid, oldBlock.getBlockId());
@@ -3078,7 +3084,8 @@ private ReplicaInfo updateReplicaUnderRecovery(
@Override // FsDatasetSpi
public long getReplicaVisibleLength(final ExtendedBlock block)
throws IOException {
- try (AutoCloseableLock lock = datasetReadLock.acquire()) {
+ try (AutoCloseableLock lock = lockManager.readLock(LockLevel.BLOCK_POOl,
+ block.getBlockPoolId())) {
final Replica replica = getReplicaInfo(block.getBlockPoolId(),
block.getBlockId());
if (replica.getGenerationStamp() < block.getGenerationStamp()) {
@@ -3095,7 +3102,7 @@ public void addBlockPool(String bpid, Configuration conf)
throws IOException {
LOG.info("Adding block pool " + bpid);
AddBlockPoolException volumeExceptions = new AddBlockPoolException();
- try (AutoCloseableLock lock = datasetWriteLock.acquire()) {
+ try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.BLOCK_POOl, bpid)) {
try {
volumes.addBlockPool(bpid, conf);
} catch (AddBlockPoolException e) {
@@ -3125,7 +3132,7 @@ public static void setBlockPoolId(String bpid) {
@Override
public void shutdownBlockPool(String bpid) {
- try (AutoCloseableLock lock = datasetWriteLock.acquire()) {
+ try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.BLOCK_POOl, bpid)) {
LOG.info("Removing block pool " + bpid);
Map blocksPerVolume
= getBlockReports(bpid);
@@ -3199,7 +3206,7 @@ public Map getVolumeInfoMap() {
@Override //FsDatasetSpi
public void deleteBlockPool(String bpid, boolean force)
throws IOException {
- try (AutoCloseableLock lock = datasetWriteLock.acquire()) {
+ try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.BLOCK_POOl, bpid)) {
List curVolumes = volumes.getVolumes();
if (!force) {
for (FsVolumeImpl volume : curVolumes) {
@@ -3228,7 +3235,8 @@ public void deleteBlockPool(String bpid, boolean force)
@Override // FsDatasetSpi
public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block)
throws IOException {
- try (AutoCloseableLock lock = datasetReadLock.acquire()) {
+ try (AutoCloseableLock lock = lockManager.readLock(LockLevel.BLOCK_POOl,
+ block.getBlockPoolId())) {
final Replica replica = volumeMap.get(block.getBlockPoolId(),
block.getBlockId());
if (replica == null) {
@@ -3282,7 +3290,7 @@ public void clearRollingUpgradeMarker(String bpid) throws IOException {
@Override
public void onCompleteLazyPersist(String bpId, long blockId,
long creationTime, File[] savedFiles, FsVolumeImpl targetVolume) {
- try (AutoCloseableLock lock = datasetWriteLock.acquire()) {
+ try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.BLOCK_POOl, bpId)) {
ramDiskReplicaTracker.recordEndLazyPersist(bpId, blockId, savedFiles);
targetVolume.incDfsUsedAndNumBlocks(bpId, savedFiles[0].length()
@@ -3416,7 +3424,8 @@ private boolean saveNextReplica() {
try {
block = ramDiskReplicaTracker.dequeueNextReplicaToPersist();
if (block != null) {
- try (AutoCloseableLock lock = datasetWriteLock.acquire()) {
+ try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.BLOCK_POOl,
+ block.getBlockPoolId())) {
replicaInfo = volumeMap.get(block.getBlockPoolId(), block.getBlockId());
// If replicaInfo is null, the block was either deleted before
@@ -3483,7 +3492,7 @@ public void evictBlocks(long bytesNeeded) throws IOException {
ReplicaInfo replicaInfo, newReplicaInfo;
final String bpid = replicaState.getBlockPoolId();
- try (AutoCloseableLock lock = datasetWriteLock.acquire()) {
+ try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.BLOCK_POOl, bpid)) {
replicaInfo = getReplicaInfo(replicaState.getBlockPoolId(),
replicaState.getBlockId());
Preconditions.checkState(replicaInfo.getVolume().isTransientStorage());
@@ -3661,18 +3670,21 @@ public int getVolumeCount() {
}
void stopAllDataxceiverThreads(FsVolumeImpl volume) {
- try (AutoCloseableLock lock = datasetWriteLock.acquire()) {
- for (String bpid : volumeMap.getBlockPoolList()) {
- Collection replicas = volumeMap.replicas(bpid);
- for (ReplicaInfo replicaInfo : replicas) {
- if ((replicaInfo.getState() == ReplicaState.TEMPORARY
- || replicaInfo.getState() == ReplicaState.RBW)
- && replicaInfo.getVolume().equals(volume)) {
- ReplicaInPipeline replicaInPipeline =
- (ReplicaInPipeline) replicaInfo;
- replicaInPipeline.interruptThread();
+ for (String bpid : volumeMap.getBlockPoolList()) {
+ try (AutoCloseDataSetLock lock = lockManager
+ .writeLock(LockLevel.BLOCK_POOl, bpid)) {
+ volumeMap.replicas(bpid, (iterator) -> {
+ while (iterator.hasNext()) {
+ ReplicaInfo replicaInfo = iterator.next();
+ if ((replicaInfo.getState() == ReplicaState.TEMPORARY
+ || replicaInfo.getState() == ReplicaState.RBW)
+ && replicaInfo.getVolume().equals(volume)) {
+ ReplicaInPipeline replicaInPipeline =
+ (ReplicaInPipeline) replicaInfo;
+ replicaInPipeline.interruptThread();
+ }
}
- }
+ });
}
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
index 95470bb8ffe26..262a24bd3aa45 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
@@ -345,6 +345,28 @@ void waitVolumeRemoved(int sleepMillis, Condition condition) {
FsDatasetImpl.LOG.info("Volume reference is released.");
}
+ /**
+ * Wait for the reference of the volume removed from a previous
+ * {@link #removeVolume(FsVolumeImpl)} call to be released.
+ *
+ * @param sleepMillis interval to recheck.
+ */
+ void waitVolumeRemoved(int sleepMillis, Object condition) {
+ while (!checkVolumesRemoved()) {
+ if (FsDatasetImpl.LOG.isDebugEnabled()) {
+ FsDatasetImpl.LOG.debug("Waiting for volume reference to be released.");
+ }
+ try {
+ condition.wait(sleepMillis);
+ } catch (InterruptedException e) {
+ FsDatasetImpl.LOG.info("Thread interrupted when waiting for "
+ + "volume reference to be released.");
+ Thread.currentThread().interrupt();
+ }
+ }
+ FsDatasetImpl.LOG.info("Volume reference is released.");
+ }
+
@Override
public String toString() {
return volumes.toString();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ProvidedVolumeImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ProvidedVolumeImpl.java
index eae119712f7c4..69a46257317bf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ProvidedVolumeImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ProvidedVolumeImpl.java
@@ -30,7 +30,6 @@
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicLong;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.databind.ObjectMapper;
@@ -134,7 +133,7 @@ static class ProvidedBlockPoolSlice {
ProvidedBlockPoolSlice(String bpid, ProvidedVolumeImpl volume,
Configuration conf) {
this.providedVolume = volume;
- bpVolumeMap = new ReplicaMap(new ReentrantReadWriteLock());
+ bpVolumeMap = new ReplicaMap();
Class extends BlockAliasMap> fmt =
conf.getClass(DFSConfigKeys.DFS_PROVIDED_ALIASMAP_CLASS,
TextFileRegionAliasMap.class, BlockAliasMap.class);
@@ -219,7 +218,7 @@ private void incrNumBlocks() {
}
public boolean isEmpty() {
- return bpVolumeMap.replicas(bpid).size() == 0;
+ return bpVolumeMap.size(bpid) == 0;
}
public void shutdown(BlockListAsLongs blocksListsAsLongs) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReplicaMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReplicaMap.java
index c1d103ed50dba..25d302b86c20a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReplicaMap.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReplicaMap.java
@@ -18,46 +18,49 @@
package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
import java.util.Collection;
-import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
import java.util.Map;
-import java.util.concurrent.locks.ReadWriteLock;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.function.Consumer;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.server.common.AutoCloseDataSetLock;
+import org.apache.hadoop.hdfs.server.common.DataNodeLockManager;
+import org.apache.hadoop.hdfs.server.common.DataNodeLockManager.LockLevel;
+import org.apache.hadoop.hdfs.server.common.NoLockManager;
import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo;
import org.apache.hadoop.util.LightWeightResizableGSet;
-import org.apache.hadoop.util.AutoCloseableLock;
/**
* Maintains the replica map.
*/
class ReplicaMap {
// Lock object to synchronize this instance.
- private final AutoCloseableLock readLock;
- private final AutoCloseableLock writeLock;
-
+ private DataNodeLockManager lockManager;
+
// Map of block pool Id to another map of block Id to ReplicaInfo.
private final Map> map =
- new HashMap<>();
+ new ConcurrentHashMap<>();
- ReplicaMap(AutoCloseableLock readLock, AutoCloseableLock writeLock) {
- if (readLock == null || writeLock == null) {
+ ReplicaMap(DataNodeLockManager manager) {
+ if (manager == null) {
throw new HadoopIllegalArgumentException(
- "Lock to synchronize on cannot be null");
+ "Object to synchronize on cannot be null");
}
- this.readLock = readLock;
- this.writeLock = writeLock;
+ this.lockManager = manager;
}
- ReplicaMap(ReadWriteLock lock) {
- this(new AutoCloseableLock(lock.readLock()),
- new AutoCloseableLock(lock.writeLock()));
+ // Used for ut or temp replicaMap that no need to protected by lock.
+ ReplicaMap() {
+ this.lockManager = new NoLockManager();
}
String[] getBlockPoolList() {
- try (AutoCloseableLock l = readLock.acquire()) {
- return map.keySet().toArray(new String[map.keySet().size()]);
- }
+ Set bpset = map.keySet();
+ return bpset.toArray(new String[bpset.size()]);
}
private void checkBlockPool(String bpid) {
@@ -100,7 +103,7 @@ ReplicaInfo get(String bpid, Block block) {
*/
ReplicaInfo get(String bpid, long blockId) {
checkBlockPool(bpid);
- try (AutoCloseableLock l = readLock.acquire()) {
+ try (AutoCloseDataSetLock l = lockManager.readLock(LockLevel.BLOCK_POOl, bpid)) {
LightWeightResizableGSet m = map.get(bpid);
return m != null ? m.get(new Block(blockId)) : null;
}
@@ -117,7 +120,7 @@ ReplicaInfo get(String bpid, long blockId) {
ReplicaInfo add(String bpid, ReplicaInfo replicaInfo) {
checkBlockPool(bpid);
checkBlock(replicaInfo);
- try (AutoCloseableLock l = writeLock.acquire()) {
+ try (AutoCloseDataSetLock l = lockManager.writeLock(LockLevel.BLOCK_POOl, bpid)) {
LightWeightResizableGSet m = map.get(bpid);
if (m == null) {
// Add an entry for block pool if it does not exist already
@@ -135,7 +138,7 @@ ReplicaInfo add(String bpid, ReplicaInfo replicaInfo) {
ReplicaInfo addAndGet(String bpid, ReplicaInfo replicaInfo) {
checkBlockPool(bpid);
checkBlock(replicaInfo);
- try (AutoCloseableLock l = writeLock.acquire()) {
+ try (AutoCloseDataSetLock l = lockManager.writeLock(LockLevel.BLOCK_POOl, bpid)) {
LightWeightResizableGSet m = map.get(bpid);
if (m == null) {
// Add an entry for block pool if it does not exist already
@@ -164,13 +167,28 @@ void addAll(ReplicaMap other) {
* Merge all entries from the given replica map into the local replica map.
*/
void mergeAll(ReplicaMap other) {
- other.map.forEach(
- (bp, replicaInfos) -> {
- replicaInfos.forEach(
- replicaInfo -> add(bp, replicaInfo)
- );
+ Set bplist = other.map.keySet();
+ for (String bp : bplist) {
+ checkBlockPool(bp);
+ try (AutoCloseDataSetLock l = lockManager.writeLock(LockLevel.BLOCK_POOl, bp)) {
+ LightWeightResizableGSet replicaInfos = other.map.get(bp);
+ LightWeightResizableGSet curSet = map.get(bp);
+ HashSet replicaSet = new HashSet<>();
+ //Can't add to GSet while in another GSet iterator may cause endlessLoop
+ for (ReplicaInfo replicaInfo : replicaInfos) {
+ replicaSet.add(replicaInfo);
}
- );
+ for (ReplicaInfo replicaInfo : replicaSet) {
+ checkBlock(replicaInfo);
+ if (curSet == null) {
+ // Add an entry for block pool if it does not exist already
+ curSet = new LightWeightResizableGSet<>();
+ map.put(bp, curSet);
+ }
+ curSet.put(replicaInfo);
+ }
+ }
+ }
}
/**
@@ -184,7 +202,7 @@ void mergeAll(ReplicaMap other) {
ReplicaInfo remove(String bpid, Block block) {
checkBlockPool(bpid);
checkBlock(block);
- try (AutoCloseableLock l = writeLock.acquire()) {
+ try (AutoCloseDataSetLock l = lockManager.writeLock(LockLevel.BLOCK_POOl, bpid)) {
LightWeightResizableGSet m = map.get(bpid);
if (m != null) {
ReplicaInfo replicaInfo = m.get(block);
@@ -206,7 +224,7 @@ ReplicaInfo remove(String bpid, Block block) {
*/
ReplicaInfo remove(String bpid, long blockId) {
checkBlockPool(bpid);
- try (AutoCloseableLock l = writeLock.acquire()) {
+ try (AutoCloseDataSetLock l = lockManager.writeLock(LockLevel.BLOCK_POOl, bpid)) {
LightWeightResizableGSet m = map.get(bpid);
if (m != null) {
return m.remove(new Block(blockId));
@@ -221,7 +239,7 @@ ReplicaInfo remove(String bpid, long blockId) {
* @return the number of replicas in the map
*/
int size(String bpid) {
- try (AutoCloseableLock l = readLock.acquire()) {
+ try (AutoCloseDataSetLock l = lockManager.readLock(LockLevel.BLOCK_POOl, bpid)) {
LightWeightResizableGSet m = map.get(bpid);
return m != null ? m.size() : 0;
}
@@ -229,11 +247,9 @@ int size(String bpid) {
/**
* Get a collection of the replicas for given block pool
- * This method is not synchronized. It needs to be synchronized
- * externally using the lock, both for getting the replicas
- * values from the map and iterating over it. Mutex can be accessed using
- * {@link #getLock()} method.
- *
+ * This method is not synchronized. If you want to keep thread safe
+ * Use method {@link #replicas(String, Consumer>)}.
+ *
* @param bpid block pool id
* @return a collection of the replicas belonging to the block pool
*/
@@ -243,9 +259,25 @@ Collection replicas(String bpid) {
return m != null ? m.values() : null;
}
+ /**
+ * execute function for one block pool and protect by LockManager.
+ * This method is synchronized.
+ *
+ * @param bpid block pool id
+ */
+ void replicas(String bpid, Consumer> consumer) {
+ LightWeightResizableGSet m = null;
+ try (AutoCloseDataSetLock l = lockManager.readLock(LockLevel.BLOCK_POOl, bpid)) {
+ m = map.get(bpid);
+ if (m !=null) {
+ m.getIterator(consumer);
+ }
+ }
+ }
+
void initBlockPool(String bpid) {
checkBlockPool(bpid);
- try (AutoCloseableLock l = writeLock.acquire()) {
+ try (AutoCloseDataSetLock l = lockManager.writeLock(LockLevel.BLOCK_POOl, bpid)) {
LightWeightResizableGSet m = map.get(bpid);
if (m == null) {
// Add an entry for block pool if it does not exist already
@@ -257,26 +289,8 @@ void initBlockPool(String bpid) {
void cleanUpBlockPool(String bpid) {
checkBlockPool(bpid);
- try (AutoCloseableLock l = writeLock.acquire()) {
+ try (AutoCloseDataSetLock l = lockManager.writeLock(LockLevel.BLOCK_POOl, bpid)) {
map.remove(bpid);
}
}
-
- /**
- * Get the lock object used for synchronizing ReplicasMap
- * @return lock object
- */
- AutoCloseableLock getLock() {
- return writeLock;
- }
-
- /**
- * Get the lock object used for synchronizing the ReplicasMap for read only
- * operations.
- * @return The read lock object
- */
- AutoCloseableLock getReadLock() {
- return readLock;
- }
-
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
index 73e8bf7cb618e..3313c7c7a0360 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
@@ -40,6 +40,8 @@
import javax.management.ObjectName;
import javax.management.StandardMBean;
+import org.apache.hadoop.hdfs.server.common.AutoCloseDataSetLock;
+import org.apache.hadoop.hdfs.server.common.DataNodeLockManager;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.MountVolumeMap;
import org.apache.hadoop.thirdparty.com.google.common.math.LongMath;
import org.apache.commons.lang3.ArrayUtils;
@@ -48,7 +50,6 @@
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImplTestUtils;
-import org.apache.hadoop.util.AutoCloseableLock;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
@@ -162,7 +163,7 @@ public static byte simulatedByte(Block b, long offsetInBlk) {
static final byte[] nullCrcFileData;
- private final AutoCloseableLock datasetLock;
+ private final DataNodeLockManager datasetLockManager;
private final FileIoProvider fileIoProvider;
static {
@@ -707,6 +708,8 @@ public SimulatedFSDataset(DataStorage storage, Configuration conf) {
public SimulatedFSDataset(DataNode datanode, DataStorage storage, Configuration conf) {
this.datanode = datanode;
+ this.datasetLockManager = datanode == null ? new DataSetLockManager() :
+ datanode.getDataSetLockManager();
int storageCount;
if (storage != null && storage.getNumStorageDirs() > 0) {
storageCount = storage.getNumStorageDirs();
@@ -721,9 +724,6 @@ public SimulatedFSDataset(DataNode datanode, DataStorage storage, Configuration
registerMBean(datanodeUuid);
this.fileIoProvider = new FileIoProvider(conf, datanode);
-
- this.datasetLock = new AutoCloseableLock();
-
this.storages = new ArrayList<>();
for (int i = 0; i < storageCount; i++) {
this.storages.add(new SimulatedStorage(
@@ -1587,14 +1587,8 @@ public ReplicaInfo moveBlockAcrossVolumes(ExtendedBlock block,
}
@Override
- public AutoCloseableLock acquireDatasetLock() {
- return datasetLock.acquire();
- }
-
- @Override
- public AutoCloseableLock acquireDatasetReadLock() {
- // No RW lock implementation in simulated dataset currently.
- return datasetLock.acquire();
+ public DataNodeLockManager acquireDatasetLockManager() {
+ return null;
}
@Override
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
index 057dd6459fdc2..0c0fe618174c4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
@@ -128,6 +128,7 @@ public class TestBPOfferService {
private final int[] heartbeatCounts = new int[3];
private DataNode mockDn;
private FsDatasetSpi> mockFSDataset;
+ private DataSetLockManager dataSetLockManager = new DataSetLockManager();
private boolean isSlownode;
@Before
@@ -153,6 +154,7 @@ public void setupMocks() throws Exception {
// Wire the dataset to the DN.
Mockito.doReturn(mockFSDataset).when(mockDn).getFSDataset();
+ Mockito.doReturn(dataSetLockManager).when(mockDn).getDataSetLockManager();
}
/**
@@ -508,6 +510,7 @@ public void testPickActiveNameNode() throws Exception {
public void testBPInitErrorHandling() throws Exception {
final DataNode mockDn = Mockito.mock(DataNode.class);
Mockito.doReturn(true).when(mockDn).shouldRun();
+ Mockito.doReturn(dataSetLockManager).when(mockDn).getDataSetLockManager();
Configuration conf = new Configuration();
File dnDataDir = new File(
new File(TEST_BUILD_DATA, "testBPInitErrorHandling"), "data");
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery2.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery2.java
index 8d2df18711256..9d7c820249c65 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery2.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery2.java
@@ -46,7 +46,6 @@
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.util.AutoCloseableLock;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
@@ -252,10 +251,8 @@ public void testRaceBetweenReplicaRecoveryAndFinalizeBlock()
final BlockRecoveryCommand.RecoveringBlock recoveringBlock =
new BlockRecoveryCommand.RecoveringBlock(block.getBlock(),
locations, block.getBlock().getGenerationStamp() + 1);
- try(AutoCloseableLock lock = dataNode.data.acquireDatasetLock()) {
- Thread.sleep(2000);
- dataNode.initReplicaRecovery(recoveringBlock);
- }
+ Thread.sleep(2000);
+ dataNode.initReplicaRecovery(recoveringBlock);
} catch (Exception e) {
LOG.error("Something went wrong.", e);
recoveryInitResult.set(false);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
index b6beb9a4878e3..74c70cec76967 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
@@ -63,6 +63,7 @@
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
+import org.apache.hadoop.hdfs.server.common.DataNodeLockManager.LockLevel;
import org.apache.hadoop.hdfs.server.datanode.DirectoryScanner.ReportCompiler;
import org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.DataNodeVolumeMetrics;
@@ -133,7 +134,8 @@ private List createFile(String fileNamePrefix, long fileLen,
/** Truncate a block file. */
private long truncateBlockFile() throws IOException {
- try (AutoCloseableLock lock = fds.acquireDatasetLock()) {
+ try (AutoCloseableLock lock = fds.acquireDatasetLockManager().writeLock(
+ LockLevel.BLOCK_POOl, bpid)) {
for (ReplicaInfo b : FsDatasetTestUtil.getReplicas(fds, bpid)) {
File f = new File(b.getBlockURI());
File mf = new File(b.getMetadataURI());
@@ -158,7 +160,8 @@ private long truncateBlockFile() throws IOException {
/** Delete a block file */
private long deleteBlockFile() {
- try (AutoCloseableLock lock = fds.acquireDatasetLock()) {
+ try (AutoCloseableLock lock = fds.acquireDatasetLockManager().
+ writeLock(LockLevel.BLOCK_POOl, bpid)) {
for (ReplicaInfo b : FsDatasetTestUtil.getReplicas(fds, bpid)) {
File f = new File(b.getBlockURI());
File mf = new File(b.getMetadataURI());
@@ -174,7 +177,8 @@ private long deleteBlockFile() {
/** Delete block meta file */
private long deleteMetaFile() {
- try (AutoCloseableLock lock = fds.acquireDatasetLock()) {
+ try (AutoCloseableLock lock = fds.acquireDatasetLockManager().
+ writeLock(LockLevel.BLOCK_POOl, bpid)) {
for (ReplicaInfo b : FsDatasetTestUtil.getReplicas(fds, bpid)) {
// Delete a metadata file
if (b.metadataExists() && b.deleteMetadata()) {
@@ -193,7 +197,8 @@ private long deleteMetaFile() {
* @throws IOException
*/
private void duplicateBlock(long blockId) throws IOException {
- try (AutoCloseableLock lock = fds.acquireDatasetLock()) {
+ try (AutoCloseableLock lock = fds.acquireDatasetLockManager().
+ writeLock(LockLevel.BLOCK_POOl, bpid)) {
ReplicaInfo b = FsDatasetTestUtil.fetchReplicaInfo(fds, bpid, blockId);
try (FsDatasetSpi.FsVolumeReferences volumes =
fds.getFsVolumeReferences()) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalDatasetImpl.java
index 3fbd4de721260..1c6597eb4541a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalDatasetImpl.java
@@ -23,8 +23,9 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdfs.server.common.AutoCloseDataSetLock;
+import org.apache.hadoop.hdfs.server.common.DataNodeLockManager;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.MountVolumeMap;
-import org.apache.hadoop.util.AutoCloseableLock;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo;
@@ -452,12 +453,7 @@ public ReplicaInfo moveBlockAcrossVolumes(ExtendedBlock block,
}
@Override
- public AutoCloseableLock acquireDatasetLock() {
- return null;
- }
-
- @Override
- public AutoCloseableLock acquireDatasetReadLock() {
+ public DataNodeLockManager acquireDatasetLockManager() {
return null;
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java
index 67176d8a1a3e6..7e8497e1ebdbc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java
@@ -434,7 +434,7 @@ public void changeStoredGenerationStamp(
@Override
public Iterator getStoredReplicas(String bpid) throws IOException {
// Reload replicas from the disk.
- ReplicaMap replicaMap = new ReplicaMap(dataset.datasetRWLock);
+ ReplicaMap replicaMap = new ReplicaMap(dataset.acquireDatasetLockManager());
try (FsVolumeReferences refs = dataset.getFsVolumeReferences()) {
for (FsVolumeSpi vol : refs) {
FsVolumeImpl volume = (FsVolumeImpl) vol;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
index f11f4b9d1cdb1..23a72f9acfa4e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
@@ -21,11 +21,11 @@
import java.io.OutputStream;
import java.nio.file.Files;
import java.nio.file.Paths;
-import java.util.concurrent.TimeoutException;
import java.util.function.Supplier;
import org.apache.hadoop.fs.DF;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
+import org.apache.hadoop.hdfs.server.datanode.DataSetLockManager;
import org.apache.hadoop.hdfs.server.datanode.DirectoryScanner;
import org.apache.hadoop.hdfs.server.datanode.LocalReplica;
@@ -70,7 +70,6 @@
import org.apache.hadoop.io.MultipleIOException;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.LambdaTestUtils;
-import org.apache.hadoop.util.AutoCloseableLock;
import org.apache.hadoop.util.DiskChecker;
import org.apache.hadoop.util.FakeTimer;
import org.apache.hadoop.util.Lists;
@@ -94,7 +93,6 @@
import java.util.List;
import java.util.Set;
import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicBoolean;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DN_CACHED_DFSUSED_CHECK_INTERVAL_MS;
@@ -140,6 +138,7 @@ public class TestFsDatasetImpl {
private DataNode datanode;
private DataStorage storage;
private FsDatasetImpl dataset;
+ private DataSetLockManager manager = new DataSetLockManager();
private final static String BLOCKPOOL = "BP-TEST";
@@ -213,6 +212,7 @@ public void setUp() throws IOException {
this.conf.set(DFSConfigKeys.DFS_DATANODE_REPLICA_CACHE_ROOT_DIR_KEY,
replicaCacheRootDir);
+ when(datanode.getDataSetLockManager()).thenReturn(manager);
when(datanode.getConf()).thenReturn(conf);
final DNConf dnConf = new DNConf(datanode);
when(datanode.getDnConf()).thenReturn(dnConf);
@@ -232,118 +232,6 @@ public void setUp() throws IOException {
assertEquals(0, dataset.getNumFailedVolumes());
}
- @Test(timeout=10000)
- public void testReadLockEnabledByDefault()
- throws Exception {
- final FsDatasetSpi ds = dataset;
- AtomicBoolean accessed = new AtomicBoolean(false);
- CountDownLatch latch = new CountDownLatch(1);
- CountDownLatch waiterLatch = new CountDownLatch(1);
-
- Thread holder = new Thread() {
- public void run() {
- try (AutoCloseableLock l = ds.acquireDatasetReadLock()) {
- latch.countDown();
- // wait for the waiter thread to access the lock.
- waiterLatch.await();
- } catch (Exception e) {
- }
- }
- };
-
- Thread waiter = new Thread() {
- public void run() {
- try {
- latch.await();
- } catch (InterruptedException e) {
- waiterLatch.countDown();
- return;
- }
- try (AutoCloseableLock l = ds.acquireDatasetReadLock()) {
- accessed.getAndSet(true);
- // signal the holder thread.
- waiterLatch.countDown();
- } catch (Exception e) {
- }
- }
- };
- waiter.start();
- holder.start();
- holder.join();
- waiter.join();
- // The holder thread is still holding the lock, but the waiter can still
- // run as the lock is a shared read lock.
- // Otherwise test will timeout with deadlock.
- assertEquals(true, accessed.get());
- holder.interrupt();
- }
-
- @Test(timeout=20000)
- public void testReadLockCanBeDisabledByConfig()
- throws Exception {
- HdfsConfiguration conf = new HdfsConfiguration();
- conf.setBoolean(
- DFSConfigKeys.DFS_DATANODE_LOCK_READ_WRITE_ENABLED_KEY, false);
- MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
- .numDataNodes(1).build();
- try {
- AtomicBoolean accessed = new AtomicBoolean(false);
- cluster.waitActive();
- DataNode dn = cluster.getDataNodes().get(0);
- final FsDatasetSpi> ds = DataNodeTestUtils.getFSDataset(dn);
-
- CountDownLatch latch = new CountDownLatch(1);
- CountDownLatch waiterLatch = new CountDownLatch(1);
- Thread holder = new Thread() {
- public void run() {
- try (AutoCloseableLock l = ds.acquireDatasetReadLock()) {
- latch.countDown();
- // wait for the waiter thread to access the lock.
- waiterLatch.await();
- } catch (Exception e) {
- }
- }
- };
-
- Thread waiter = new Thread() {
- public void run() {
- try {
- // Wait for holder to get ds read lock.
- latch.await();
- } catch (InterruptedException e) {
- waiterLatch.countDown();
- return;
- }
- try (AutoCloseableLock l = ds.acquireDatasetReadLock()) {
- accessed.getAndSet(true);
- // signal the holder thread.
- waiterLatch.countDown();
- } catch (Exception e) {
- }
- }
- };
- waiter.start();
- holder.start();
- // Wait for sometime to make sure we are in deadlock,
- try {
- GenericTestUtils.waitFor(() ->
- accessed.get(),
- 100, 10000);
- fail("Waiter thread should not execute.");
- } catch (TimeoutException e) {
- }
- // Release waiterLatch to exit deadlock.
- waiterLatch.countDown();
- holder.join();
- waiter.join();
- // After releasing waiterLatch water
- // thread will be able to execute.
- assertTrue(accessed.get());
- } finally {
- cluster.shutdown();
- }
- }
-
@Test
public void testAddVolumes() throws IOException {
final int numNewVolumes = 3;
@@ -413,6 +301,7 @@ public void testAddVolumeWithSameDiskTiering() throws IOException {
final ShortCircuitRegistry shortCircuitRegistry =
new ShortCircuitRegistry(conf);
when(datanode.getShortCircuitRegistry()).thenReturn(shortCircuitRegistry);
+ when(datanode.getDataSetLockManager()).thenReturn(manager);
createStorageDirs(storage, conf, 1);
dataset = new FsDatasetImpl(datanode, storage, conf);
@@ -480,6 +369,8 @@ public void testAddVolumeWithCustomizedCapacityRatio()
final ShortCircuitRegistry shortCircuitRegistry =
new ShortCircuitRegistry(conf);
when(datanode.getShortCircuitRegistry()).thenReturn(shortCircuitRegistry);
+ when(datanode.getDataSetLockManager()).thenReturn(manager);
+
createStorageDirs(storage, conf, 0);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java
index 4d8e0c99980d2..1b638e28fa7de 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java
@@ -57,7 +57,6 @@
import java.util.concurrent.ForkJoinPool;
import java.util.concurrent.Future;
import java.util.concurrent.TimeoutException;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.stream.Collectors;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DU_RESERVED_PERCENTAGE_KEY;
@@ -407,7 +406,7 @@ public void run() {
fs.close();
FsDatasetImpl fsDataset = (FsDatasetImpl) cluster.getDataNodes().get(0)
.getFSDataset();
- ReplicaMap volumeMap = new ReplicaMap(new ReentrantReadWriteLock());
+ ReplicaMap volumeMap = new ReplicaMap(fsDataset.acquireDatasetLockManager());
RamDiskReplicaTracker ramDiskReplicaMap = RamDiskReplicaTracker
.getInstance(conf, fsDataset);
FsVolumeImpl vol = (FsVolumeImpl) fsDataset.getFsVolumeReferences().get(0);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java
index b72b1cd1bb8bd..5290337e2f270 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java
@@ -25,7 +25,6 @@
import java.net.InetSocketAddress;
import java.net.SocketTimeoutException;
import java.util.List;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
@@ -47,6 +46,7 @@
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
+import org.apache.hadoop.hdfs.server.datanode.DataSetLockManager;
import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica;
import org.apache.hadoop.hdfs.server.datanode.Replica;
import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo;
@@ -236,7 +236,8 @@ public void testInitReplicaRecovery() throws IOException {
final long firstblockid = 10000L;
final long gs = 7777L;
final long length = 22L;
- final ReplicaMap map = new ReplicaMap(new ReentrantReadWriteLock());
+ DataSetLockManager manager = new DataSetLockManager();
+ final ReplicaMap map = new ReplicaMap(manager);
String bpid = "BP-TEST";
final Block[] blocks = new Block[5];
for(int i = 0; i < blocks.length; i++) {
@@ -252,7 +253,7 @@ public void testInitReplicaRecovery() throws IOException {
final long recoveryid = gs + 1;
final ReplicaRecoveryInfo recoveryInfo = FsDatasetImpl
.initReplicaRecovery(bpid, map, blocks[0], recoveryid,
- DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT);
+ DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT, manager);
assertEquals(originalInfo, recoveryInfo);
final ReplicaUnderRecovery updatedInfo = (ReplicaUnderRecovery)map.get(bpid, b);
@@ -263,7 +264,7 @@ public void testInitReplicaRecovery() throws IOException {
final long recoveryid2 = gs + 2;
final ReplicaRecoveryInfo recoveryInfo2 = FsDatasetImpl
.initReplicaRecovery(bpid, map, blocks[0], recoveryid2,
- DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT);
+ DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT, manager);
assertEquals(originalInfo, recoveryInfo2);
final ReplicaUnderRecovery updatedInfo2 = (ReplicaUnderRecovery)map.get(bpid, b);
@@ -273,7 +274,7 @@ public void testInitReplicaRecovery() throws IOException {
//case RecoveryInProgressException
try {
FsDatasetImpl.initReplicaRecovery(bpid, map, b, recoveryid,
- DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT);
+ DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT, manager);
Assert.fail();
}
catch(RecoveryInProgressException ripe) {
@@ -286,7 +287,7 @@ public void testInitReplicaRecovery() throws IOException {
final Block b = new Block(firstblockid - 1, length, gs);
ReplicaRecoveryInfo r = FsDatasetImpl.initReplicaRecovery(bpid, map, b,
recoveryid,
- DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT);
+ DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT, manager);
Assert.assertNull("Data-node should not have this replica.", r);
}
@@ -295,7 +296,7 @@ public void testInitReplicaRecovery() throws IOException {
final Block b = new Block(firstblockid + 1, length, gs);
try {
FsDatasetImpl.initReplicaRecovery(bpid, map, b, recoveryid,
- DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT);
+ DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT, manager);
Assert.fail();
}
catch(IOException ioe) {
@@ -309,7 +310,7 @@ public void testInitReplicaRecovery() throws IOException {
final Block b = new Block(firstblockid, length, gs+1);
try {
FsDatasetImpl.initReplicaRecovery(bpid, map, b, recoveryid,
- DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT);
+ DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT, manager);
fail("InitReplicaRecovery should fail because replica's " +
"gs is less than the block's gs");
} catch (IOException e) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestProvidedImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestProvidedImpl.java
index afd816864a1d7..f8d66c2f2cccf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestProvidedImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestProvidedImpl.java
@@ -44,7 +44,6 @@
import java.util.Optional;
import java.util.Random;
import java.util.Set;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.conf.Configuration;
@@ -66,6 +65,7 @@
import org.apache.hadoop.hdfs.server.datanode.BlockScanner;
import org.apache.hadoop.hdfs.server.datanode.DNConf;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.datanode.DataSetLockManager;
import org.apache.hadoop.hdfs.server.datanode.DataStorage;
import org.apache.hadoop.hdfs.server.datanode.DirectoryScanner;
import org.apache.hadoop.hdfs.server.datanode.DirectoryScanner.ScanInfoVolumeReport;
@@ -109,6 +109,7 @@ public class TestProvidedImpl {
private DataNode datanode;
private DataStorage storage;
private FsDatasetImpl dataset;
+ private DataSetLockManager manager = new DataSetLockManager();
private static Map blkToPathMap;
private static List providedVolumes;
private static long spaceUsed = 0;
@@ -319,6 +320,7 @@ public void setUp() throws IOException {
conf.setLong(DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, 0);
when(datanode.getConf()).thenReturn(conf);
+ when(datanode.getDataSetLockManager()).thenReturn(manager);
final DNConf dnConf = new DNConf(datanode);
when(datanode.getDnConf()).thenReturn(dnConf);
// reset the space used
@@ -400,7 +402,7 @@ public void testProvidedVolumeImpl() throws IOException {
public void testBlockLoad() throws IOException {
for (int i = 0; i < providedVolumes.size(); i++) {
FsVolumeImpl vol = providedVolumes.get(i);
- ReplicaMap volumeMap = new ReplicaMap(new ReentrantReadWriteLock());
+ ReplicaMap volumeMap = new ReplicaMap();
vol.getVolumeMap(volumeMap, null);
assertEquals(vol.getBlockPoolList().length, BLOCK_POOL_IDS.length);
@@ -476,7 +478,7 @@ private int getBlocksInProvidedVolumes(String basePath, int numBlocks,
vol.setFileRegionProvider(BLOCK_POOL_IDS[CHOSEN_BP_ID],
new TestFileRegionBlockAliasMap(fileRegionIterator, minBlockId,
numBlocks));
- ReplicaMap volumeMap = new ReplicaMap(new ReentrantReadWriteLock());
+ ReplicaMap volumeMap = new ReplicaMap();
vol.getVolumeMap(BLOCK_POOL_IDS[CHOSEN_BP_ID], volumeMap, null);
totalBlocks += volumeMap.size(BLOCK_POOL_IDS[CHOSEN_BP_ID]);
}
@@ -586,7 +588,7 @@ public void testProvidedReplicaSuffixExtraction() {
public void testProvidedReplicaPrefix() throws Exception {
for (int i = 0; i < providedVolumes.size(); i++) {
FsVolumeImpl vol = providedVolumes.get(i);
- ReplicaMap volumeMap = new ReplicaMap(new ReentrantReadWriteLock());
+ ReplicaMap volumeMap = new ReplicaMap();
vol.getVolumeMap(volumeMap, null);
Path expectedPrefix = new Path(
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReplicaMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReplicaMap.java
index 59203bb7d3468..3058598bb6c6e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReplicaMap.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReplicaMap.java
@@ -26,13 +26,12 @@
import org.junit.Before;
import org.junit.Test;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
/**
* Unit test for ReplicasMap class
*/
public class TestReplicaMap {
- private final ReplicaMap map = new ReplicaMap(new ReentrantReadWriteLock());
+ private final ReplicaMap map = new ReplicaMap();
private final String bpid = "BP-TEST";
private final Block block = new Block(1234, 1234, 1234);
@@ -112,7 +111,7 @@ public void testRemove() {
@Test
public void testMergeAll() {
- ReplicaMap temReplicaMap = new ReplicaMap(new ReentrantReadWriteLock());
+ ReplicaMap temReplicaMap = new ReplicaMap();
Block tmpBlock = new Block(5678, 5678, 5678);
temReplicaMap.add(bpid, new FinalizedReplica(tmpBlock, null, null));
@@ -123,7 +122,7 @@ public void testMergeAll() {
@Test
public void testAddAll() {
- ReplicaMap temReplicaMap = new ReplicaMap(new ReentrantReadWriteLock());
+ ReplicaMap temReplicaMap = new ReplicaMap();
Block tmpBlock = new Block(5678, 5678, 5678);
temReplicaMap.add(bpid, new FinalizedReplica(tmpBlock, null, null));
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestWriteToReplica.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestWriteToReplica.java
index e939389624130..fe78a0f2a41a0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestWriteToReplica.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestWriteToReplica.java
@@ -27,7 +27,6 @@
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.StorageType;
@@ -550,7 +549,7 @@ public void testReplicaMapAfterDatanodeRestart() throws Exception {
bpList.size() == 2);
createReplicas(bpList, volumes, cluster.getFsDatasetTestUtils(dn));
- ReplicaMap oldReplicaMap = new ReplicaMap(new ReentrantReadWriteLock());
+ ReplicaMap oldReplicaMap = new ReplicaMap();
oldReplicaMap.addAll(dataSet.volumeMap);
cluster.restartDataNode(0);
From 7b5eac27ff380a31428a71f5fd162a9cdee05dd8 Mon Sep 17 00:00:00 2001
From: Owen O'Malley
Date: Fri, 4 Mar 2022 16:17:46 -0800
Subject: [PATCH 035/145] HDFS-16495: RBF should prepend the client ip rather
than append it.
Fixes #4054
Signed-off-by: Owen O'Malley
---
.../org/apache/hadoop/ipc/CallerContext.java | 2 +-
.../federation/router/RouterRpcClient.java | 31 +++++++++++++------
.../federation/router/TestRouterRpc.java | 13 ++++----
.../router/TestRouterRpcMultiDestination.java | 5 ++-
4 files changed, 32 insertions(+), 19 deletions(-)
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallerContext.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallerContext.java
index ed8dcf2242f2c..c8b4135d088ed 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallerContext.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallerContext.java
@@ -116,7 +116,7 @@ public String toString() {
/** The caller context builder. */
public static final class Builder {
- private static final String KEY_VALUE_SEPARATOR = ":";
+ public static final String KEY_VALUE_SEPARATOR = ":";
/**
* The illegal separators include '\t', '\n', '='.
* User should not set illegal separator.
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
index 5683f416b3809..436418da00348 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
@@ -466,7 +466,7 @@ private Object invokeMethod(
+ router.getRouterId());
}
- appendClientIpPortToCallerContextIfAbsent();
+ addClientIpToCallerContext();
Object ret = null;
if (rpcMonitor != null) {
@@ -588,19 +588,32 @@ private Object invokeMethod(
/**
* For tracking which is the actual client address.
* It adds trace info "clientIp:ip" and "clientPort:port"
- * to caller context if they are absent.
+ * in the caller context, removing the old values if they were
+ * already present.
*/
- private void appendClientIpPortToCallerContextIfAbsent() {
+ private void addClientIpToCallerContext() {
CallerContext ctx = CallerContext.getCurrent();
String origContext = ctx == null ? null : ctx.getContext();
byte[] origSignature = ctx == null ? null : ctx.getSignature();
- CallerContext.setCurrent(
- new CallerContext.Builder(origContext, contextFieldSeparator)
- .appendIfAbsent(CLIENT_IP_STR, Server.getRemoteAddress())
- .appendIfAbsent(CLIENT_PORT_STR,
+ CallerContext.Builder builder =
+ new CallerContext.Builder("", contextFieldSeparator)
+ .append(CLIENT_IP_STR, Server.getRemoteAddress())
+ .append(CLIENT_PORT_STR,
Integer.toString(Server.getRemotePort()))
- .setSignature(origSignature)
- .build());
+ .setSignature(origSignature);
+ // Append the original caller context
+ if (origContext != null) {
+ for (String part : origContext.split(contextFieldSeparator)) {
+ String[] keyValue =
+ part.split(CallerContext.Builder.KEY_VALUE_SEPARATOR, 2);
+ if (keyValue.length == 2) {
+ builder.appendIfAbsent(keyValue[0], keyValue[1]);
+ } else if (keyValue.length == 1) {
+ builder.append(keyValue[0]);
+ }
+ }
+ }
+ CallerContext.setCurrent(builder.build());
}
/**
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java
index 2ab40b031495b..ae58bf8b6126f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java
@@ -1950,9 +1950,10 @@ public void testMkdirsWithCallerContext() throws IOException {
FsPermission permission = new FsPermission("755");
routerProtocol.mkdirs(dirPath, permission, false);
- // The audit log should contains "callerContext=clientContext,clientIp:"
- assertTrue(auditlog.getOutput()
- .contains("callerContext=clientContext,clientIp:"));
+ // The audit log should contains "callerContext=clientIp:...,clientContext"
+ final String logOutput = auditlog.getOutput();
+ assertTrue(logOutput.contains("callerContext=clientIp:"));
+ assertTrue(logOutput.contains(",clientContext"));
assertTrue(verifyFileExists(routerFS, dirPath));
}
@@ -1997,9 +1998,9 @@ public void testAddClientIpPortToCallerContext() throws IOException {
// Create a directory via the router.
routerProtocol.getFileInfo(dirPath);
- // The audit log should contains the original clientIp and clientPort
+ // The audit log should not contain the original clientIp and clientPort
// set by client.
- assertTrue(auditLog.getOutput().contains("clientIp:1.1.1.1"));
- assertTrue(auditLog.getOutput().contains("clientPort:1234"));
+ assertFalse(auditLog.getOutput().contains("clientIp:1.1.1.1"));
+ assertFalse(auditLog.getOutput().contains("clientPort:1234"));
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java
index e50464c0be7b5..370a1250a7c11 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java
@@ -464,9 +464,8 @@ public void testCallerContextWithMultiDestinations() throws IOException {
for (String line : auditLog.getOutput().split("\n")) {
if (line.contains(auditFlag)) {
// assert origin caller context exist in audit log
- assertTrue(line.contains("callerContext=clientContext"));
- String callerContext = line.substring(
- line.indexOf("callerContext=clientContext"));
+ String callerContext = line.substring(line.indexOf("callerContext="));
+ assertTrue(callerContext.contains("clientContext"));
// assert client ip info exist in caller context
assertTrue(callerContext.contains(clientIpInfo));
// assert client ip info appears only once in caller context
From 8b8158f02df18424b2406fd66b34c1bdf3d7ab55 Mon Sep 17 00:00:00 2001
From: Xing Lin
Date: Tue, 22 Feb 2022 15:52:03 -0800
Subject: [PATCH 036/145] HADOOP-18144: getTrashRoot in ViewFileSystem should
return a path in ViewFS.
To get the new behavior, define fs.viewfs.trash.force-inside-mount-point to be true.
If the trash root for path p is in the same mount point as path p,
and one of:
* The mount point isn't at the top of the target fs.
* The resolved path of path is root (eg it is the fallback FS).
* The trash root isn't in user's target fs home directory.
get the corresponding viewFS path for the trash root and return it.
Otherwise, use /.Trash/.
Signed-off-by: Owen O'Malley
---
.../apache/hadoop/fs/TrashPolicyDefault.java | 4 +-
.../apache/hadoop/fs/viewfs/Constants.java | 7 +-
.../hadoop/fs/viewfs/ViewFileSystem.java | 192 +++++++++++-------
.../TestViewFileSystemLocalFileSystem.java | 9 +
...ileSystemWithAuthorityLocalFileSystem.java | 10 +
.../fs/viewfs/ViewFileSystemBaseTest.java | 102 ++++++----
.../fs/viewfs/TestViewFileSystemHdfs.java | 8 +
7 files changed, 206 insertions(+), 126 deletions(-)
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
index 99467f5633625..f4228dea69f49 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
@@ -191,8 +191,8 @@ public boolean moveToTrash(Path path) throws IOException {
cause = e;
}
}
- throw (IOException)
- new IOException("Failed to move to trash: " + path).initCause(cause);
+ throw new IOException("Failed to move " + path + " to trash " + trashPath,
+ cause);
}
@SuppressWarnings("deprecation")
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java
index 94b07973ac824..21f4d99f891c2 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java
@@ -134,8 +134,9 @@ public interface Constants {
HCFSMountTableConfigLoader.class;
/**
- * Enable ViewFileSystem to return a trashRoot which is local to mount point.
+ * Force ViewFileSystem to return a trashRoot that is inside a mount point.
*/
- String CONFIG_VIEWFS_MOUNT_POINT_LOCAL_TRASH = "fs.viewfs.mount.point.local.trash";
- boolean CONFIG_VIEWFS_MOUNT_POINT_LOCAL_TRASH_DEFAULT = false;
+ String CONFIG_VIEWFS_TRASH_FORCE_INSIDE_MOUNT_POINT =
+ "fs.viewfs.trash.force-inside-mount-point";
+ boolean CONFIG_VIEWFS_TRASH_FORCE_INSIDE_MOUNT_POINT_DEFAULT = false;
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
index 8c3cdb82d7447..fde0faa59b45e 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
@@ -25,8 +25,8 @@
import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS;
import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS_DEFAULT;
import static org.apache.hadoop.fs.viewfs.Constants.PERMISSION_555;
-import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_MOUNT_POINT_LOCAL_TRASH;
-import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_MOUNT_POINT_LOCAL_TRASH_DEFAULT;
+import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_TRASH_FORCE_INSIDE_MOUNT_POINT;
+import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_TRASH_FORCE_INSIDE_MOUNT_POINT_DEFAULT;
import java.util.function.Function;
import java.io.FileNotFoundException;
@@ -1132,47 +1132,77 @@ public Collection extends BlockStoragePolicySpi> getAllStoragePolicies()
* Get the trash root directory for current user when the path
* specified is deleted.
*
- * If CONFIG_VIEWFS_MOUNT_POINT_LOCAL_TRASH is not set, return
- * the default trash root from targetFS.
+ * If FORCE_INSIDE_MOUNT_POINT flag is not set, return the default trash root
+ * from targetFS.
*
- * When CONFIG_VIEWFS_MOUNT_POINT_LOCAL_TRASH is set to true,
- * 1) If path p is in fallback FS or from the same mount point as the default
- * trash root for targetFS, return the default trash root for targetFS.
- * 2) else, return a trash root in the mounted targetFS
- * (/mntpoint/.Trash/{user})
+ * When FORCE_INSIDE_MOUNT_POINT is set to true,
+ *
+ * -
+ * If the trash root for path p is in the same mount point as path p,
+ * and one of:
+ *
+ * - The mount point isn't at the top of the target fs.
+ * - The resolved path of path is root (in fallback FS).
+ * - The trash isn't in user's target fs home directory
+ * get the corresponding viewFS path for the trash root and return
+ * it.
+ *
+ *
+ *
+ * -
+ * else, return the trash root under the root of the mount point
+ * (/{mntpoint}/.Trash/{user}).
+ *
+ *
+ *
+ * These conditions handle several different important cases:
+ *
+ * - File systems may need to have more local trash roots, such as
+ * encryption zones or snapshot roots.
+ * - The fallback mount should use the user's home directory.
+ * - Cloud storage systems should not use trash in an implicity defined
+ * home directory, per a container, unless it is the fallback fs.
+ *
*
* @param path the trash root of the path to be determined.
* @return the trash root path.
*/
@Override
public Path getTrashRoot(Path path) {
- boolean useMountPointLocalTrash =
- config.getBoolean(CONFIG_VIEWFS_MOUNT_POINT_LOCAL_TRASH,
- CONFIG_VIEWFS_MOUNT_POINT_LOCAL_TRASH_DEFAULT);
try {
InodeTree.ResolveResult res =
fsState.resolve(getUriPath(path), true);
+ Path targetFSTrashRoot =
+ res.targetFileSystem.getTrashRoot(res.remainingPath);
- Path trashRoot = res.targetFileSystem.getTrashRoot(res.remainingPath);
- if (!useMountPointLocalTrash) {
- return trashRoot;
- } else {
- // Path p is either in a mount point or in the fallback FS
+ // Allow clients to use old behavior of delegating to target fs.
+ if (!config.getBoolean(CONFIG_VIEWFS_TRASH_FORCE_INSIDE_MOUNT_POINT,
+ CONFIG_VIEWFS_TRASH_FORCE_INSIDE_MOUNT_POINT_DEFAULT)) {
+ return targetFSTrashRoot;
+ }
- if (ROOT_PATH.equals(new Path(res.resolvedPath))
- || trashRoot.toUri().getPath().startsWith(res.resolvedPath)) {
- // Path p is in the fallback FS or targetFileSystem.trashRoot is in
- // the same mount point as Path p
- return trashRoot;
- } else {
- // targetFileSystem.trashRoot is in a different mount point from
- // Path p. Return the trash root for the mount point.
- Path mountPointRoot =
- res.targetFileSystem.getFileStatus(new Path("/")).getPath();
- return new Path(mountPointRoot,
- TRASH_PREFIX + "/" + ugi.getShortUserName());
- }
+ // The trash root path from the target fs
+ String targetFSTrashRootPath = targetFSTrashRoot.toUri().getPath();
+ // The mount point path in the target fs
+ String mountTargetPath = res.targetFileSystem.getUri().getPath();
+ if (!mountTargetPath.endsWith("/")) {
+ mountTargetPath = mountTargetPath + "/";
+ }
+
+ Path targetFsUserHome = res.targetFileSystem.getHomeDirectory();
+ if (targetFSTrashRootPath.startsWith(mountTargetPath) &&
+ !(mountTargetPath.equals(ROOT_PATH.toString()) &&
+ !res.resolvedPath.equals(ROOT_PATH.toString()) &&
+ (targetFsUserHome != null && targetFSTrashRootPath.startsWith(
+ targetFsUserHome.toUri().getPath())))) {
+ String relativeTrashRoot =
+ targetFSTrashRootPath.substring(mountTargetPath.length());
+ return makeQualified(new Path(res.resolvedPath, relativeTrashRoot));
+ } else {
+ // Return the trash root for the mount point.
+ return makeQualified(new Path(res.resolvedPath,
+ TRASH_PREFIX + "/" + ugi.getShortUserName()));
}
} catch (IOException | IllegalArgumentException e) {
throw new NotInMountpointException(path, "getTrashRoot");
@@ -1182,72 +1212,78 @@ public Path getTrashRoot(Path path) {
/**
* Get all the trash roots for current user or all users.
*
+ * When FORCE_INSIDE_MOUNT_POINT is set to true, we also return trash roots
+ * under the root of each mount point, with their viewFS paths.
+ *
* @param allUsers return trash roots for all users if true.
* @return all Trash root directories.
*/
@Override
public Collection getTrashRoots(boolean allUsers) {
- List trashRoots = new ArrayList<>();
+ // A map from targetFSPath -> FileStatus.
+ // FileStatus can be from targetFS or viewFS.
+ HashMap trashRoots = new HashMap<>();
for (FileSystem fs : getChildFileSystems()) {
- trashRoots.addAll(fs.getTrashRoots(allUsers));
+ for (FileStatus trash : fs.getTrashRoots(allUsers)) {
+ trashRoots.put(trash.getPath(), trash);
+ }
}
- // Add trash dirs for each mount point
- boolean useMountPointLocalTrash =
- config.getBoolean(CONFIG_VIEWFS_MOUNT_POINT_LOCAL_TRASH,
- CONFIG_VIEWFS_MOUNT_POINT_LOCAL_TRASH_DEFAULT);
- if (useMountPointLocalTrash) {
+ // Return trashRoots if FORCE_INSIDE_MOUNT_POINT is disabled.
+ if (!config.getBoolean(CONFIG_VIEWFS_TRASH_FORCE_INSIDE_MOUNT_POINT,
+ CONFIG_VIEWFS_TRASH_FORCE_INSIDE_MOUNT_POINT_DEFAULT)) {
+ return trashRoots.values();
+ }
- Set currentTrashPaths = new HashSet<>();
- for (FileStatus file : trashRoots) {
- currentTrashPaths.add(file.getPath());
- }
+ // Get trash roots in TRASH_PREFIX dir inside mount points and fallback FS.
+ List> mountPoints =
+ fsState.getMountPoints();
+ // If we have a fallback FS, add a mount point for it as <"", fallback FS>.
+ // The source path of a mount point shall not end with '/', thus for
+ // fallback fs, we set its mount point src as "".
+ if (fsState.getRootFallbackLink() != null) {
+ mountPoints.add(new InodeTree.MountPoint<>("",
+ fsState.getRootFallbackLink()));
+ }
- MountPoint[] mountPoints = getMountPoints();
- try {
- for (int i = 0; i < mountPoints.length; i++) {
- Path trashRoot = makeQualified(
- new Path(mountPoints[i].mountedOnPath + "/" + TRASH_PREFIX));
+ try {
+ for (InodeTree.MountPoint mountPoint : mountPoints) {
- // Continue if trashRoot does not exist for this filesystem
- if (!exists(trashRoot)) {
- continue;
- }
+ Path trashRoot =
+ makeQualified(new Path(mountPoint.src + "/" + TRASH_PREFIX));
- InodeTree.ResolveResult res =
- fsState.resolve(getUriPath(trashRoot), true);
-
- if (!allUsers) {
- Path userTrash =
- new Path("/" + TRASH_PREFIX + "/" + ugi.getShortUserName());
- try {
- FileStatus file = res.targetFileSystem.getFileStatus(userTrash);
- if (!currentTrashPaths.contains(file.getPath())) {
- trashRoots.add(file);
- currentTrashPaths.add(file.getPath());
- }
- } catch (FileNotFoundException ignored) {
- }
- } else {
- FileStatus[] targetFsTrashRoots =
- res.targetFileSystem.listStatus(new Path("/" + TRASH_PREFIX));
- for (FileStatus file : targetFsTrashRoots) {
- // skip if we already include it in currentTrashPaths
- if (currentTrashPaths.contains(file.getPath())) {
- continue;
- }
+ // Continue if trashRoot does not exist for this mount point
+ if (!exists(trashRoot)) {
+ continue;
+ }
- trashRoots.add(file);
- currentTrashPaths.add(file.getPath());
- }
+ FileSystem targetFS = mountPoint.target.getTargetFileSystem();
+ if (!allUsers) {
+ Path userTrashRoot = new Path(trashRoot, ugi.getShortUserName());
+ if (exists(userTrashRoot)) {
+ Path targetFSUserTrashRoot = targetFS.makeQualified(
+ new Path(targetFS.getUri().getPath(),
+ TRASH_PREFIX + "/" + ugi.getShortUserName()));
+ trashRoots.put(targetFSUserTrashRoot, getFileStatus(userTrashRoot));
+ }
+ } else {
+ FileStatus[] mountPointTrashRoots = listStatus(trashRoot);
+ for (FileStatus trash : mountPointTrashRoots) {
+ // Remove the mountPoint and the leading '/' to get the
+ // relative targetFsTrash path
+ String targetFsTrash = trash.getPath().toUri().getPath()
+ .substring(mountPoint.src.length() + 1);
+ Path targetFsTrashPath = targetFS.makeQualified(
+ new Path(targetFS.getUri().getPath(), targetFsTrash));
+ trashRoots.put(targetFsTrashPath, trash);
}
}
- } catch (IOException e) {
- LOG.warn("Exception in get all trash roots", e);
}
+ } catch (IOException e) {
+ LOG.warn("Exception in get all trash roots for mount points", e);
}
- return trashRoots;
+ return trashRoots.values();
}
@Override
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemLocalFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemLocalFileSystem.java
index 808d8b06c35ba..adc5db87e7725 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemLocalFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemLocalFileSystem.java
@@ -33,6 +33,8 @@
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
+import static org.apache.hadoop.fs.FileSystem.TRASH_PREFIX;
+import org.apache.hadoop.security.UserGroupInformation;
import org.junit.After;
import org.junit.Before;
@@ -61,6 +63,13 @@ public void setUp() throws Exception {
}
+ @Override
+ Path getTrashRootInFallBackFS() throws IOException {
+ return new Path(
+ "/" + TRASH_PREFIX + "/" + UserGroupInformation.getCurrentUser()
+ .getShortUserName());
+ }
+
@Test
public void testNflyWriteSimple() throws IOException {
LOG.info("Starting testNflyWriteSimple");
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemWithAuthorityLocalFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemWithAuthorityLocalFileSystem.java
index 877c2228c1eea..9223338f34bf5 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemWithAuthorityLocalFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemWithAuthorityLocalFileSystem.java
@@ -25,6 +25,9 @@
import org.apache.hadoop.fs.FileSystemTestHelper;
import org.apache.hadoop.fs.FsConstants;
import org.apache.hadoop.fs.Path;
+import static org.apache.hadoop.fs.FileSystem.TRASH_PREFIX;
+import org.apache.hadoop.security.UserGroupInformation;
+import java.io.IOException;
import org.junit.After;
import org.junit.Assert;
@@ -63,6 +66,13 @@ public void tearDown() throws Exception {
super.tearDown();
}
+ @Override
+ Path getTrashRootInFallBackFS() throws IOException {
+ return new Path(
+ "/" + TRASH_PREFIX + "/" + UserGroupInformation.getCurrentUser()
+ .getShortUserName());
+ }
+
@Override
@Test
public void testBasicPaths() {
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java
index 91a90751e873e..90ffa06bfce30 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java
@@ -69,7 +69,7 @@
import static org.apache.hadoop.fs.FileSystemTestHelper.*;
import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_ENABLE_INNER_CACHE;
import static org.apache.hadoop.fs.viewfs.Constants.PERMISSION_555;
-import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_MOUNT_POINT_LOCAL_TRASH;
+import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_TRASH_FORCE_INSIDE_MOUNT_POINT;
import static org.apache.hadoop.fs.FileSystem.TRASH_PREFIX;
import org.junit.After;
@@ -1104,35 +1104,46 @@ public void testTrashRoot() throws IOException {
Assert.assertTrue("", fsView.getTrashRoots(true).size() > 0);
}
+ // Default implementation of getTrashRoot for a fallback FS mounted at root:
+ // e.g., fallbackFS.uri.getPath = '/'
+ Path getTrashRootInFallBackFS() throws IOException {
+ return new Path(fsTarget.getHomeDirectory().toUri().getPath(),
+ TRASH_PREFIX);
+ }
+
/**
- * Test the localized trash root for getTrashRoot.
+ * Test TRASH_FORCE_INSIDE_MOUNT_POINT feature for getTrashRoot.
*/
@Test
- public void testTrashRootLocalizedTrash() throws IOException {
+ public void testTrashRootForceInsideMountPoint() throws IOException {
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
Configuration conf2 = new Configuration(conf);
- conf2.setBoolean(CONFIG_VIEWFS_MOUNT_POINT_LOCAL_TRASH, true);
+ conf2.setBoolean(CONFIG_VIEWFS_TRASH_FORCE_INSIDE_MOUNT_POINT, true);
ConfigUtil.addLinkFallback(conf2, targetTestRoot.toUri());
FileSystem fsView2 = FileSystem.get(FsConstants.VIEWFS_URI, conf2);
- // Case 1: path p not in the default FS.
- // Return a trash root within the mount point.
+ // Case 1: path p in the /data mount point.
+ // Return a trash root within the /data mount point.
Path dataTestPath = new Path("/data/dir/file");
- Path dataTrashRoot = new Path(targetTestRoot,
- "data/" + TRASH_PREFIX + "/" + ugi.getShortUserName());
+ Path dataTrashRoot = fsView2.makeQualified(
+ new Path("/data/" + TRASH_PREFIX + "/" + ugi.getShortUserName()));
Assert.assertEquals(dataTrashRoot, fsView2.getTrashRoot(dataTestPath));
- // Case 2: path p not found in mount table, fall back to the default FS
- // Return a trash root in user home dir
+ // Case 2: path p not found in mount table.
+ // Return a trash root in fallback FS.
Path nonExistentPath = new Path("/nonExistentDir/nonExistentFile");
- Path userTrashRoot = new Path(fsTarget.getHomeDirectory(), TRASH_PREFIX);
- Assert.assertEquals(userTrashRoot, fsView2.getTrashRoot(nonExistentPath));
+ Path expectedTrash =
+ fsView2.makeQualified(getTrashRootInFallBackFS());
+ Assert.assertEquals(expectedTrash, fsView2.getTrashRoot(nonExistentPath));
- // Case 3: turn off the CONFIG_VIEWFS_MOUNT_POINT_LOCAL_TRASH flag.
+ // Case 3: turn off the CONFIG_VIEWFS_TRASH_FORCE_INSIDE_MOUNT_POINT flag.
// Return a trash root in user home dir.
- conf2.setBoolean(CONFIG_VIEWFS_MOUNT_POINT_LOCAL_TRASH, false);
+ conf2.setBoolean(CONFIG_VIEWFS_TRASH_FORCE_INSIDE_MOUNT_POINT, false);
fsView2 = FileSystem.get(FsConstants.VIEWFS_URI, conf2);
- Assert.assertEquals(userTrashRoot, fsView2.getTrashRoot(dataTestPath));
+ Path targetFSUserHomeTrashRoot = fsTarget.makeQualified(
+ new Path(fsTarget.getHomeDirectory(), TRASH_PREFIX));
+ Assert.assertEquals(targetFSUserHomeTrashRoot,
+ fsView2.getTrashRoot(dataTestPath));
// Case 4: viewFS without fallback. Expect exception for a nonExistent path
conf2 = new Configuration(conf);
@@ -1141,30 +1152,14 @@ public void testTrashRootLocalizedTrash() throws IOException {
fsView2.getTrashRoot(nonExistentPath);
} catch (NotInMountpointException ignored) {
}
-
- // Case 5: path p is in the same mount point as targetFS.getTrashRoot().
- // Return targetFS.getTrashRoot()
- // Use a new Configuration object, so that we can start with an empty
- // mount table. This would avoid a conflict between the /user link in
- // setupMountPoints() and homeDir we will need to setup for this test.
- // default homeDir for hdfs is /user/.
- Configuration conf3 = ViewFileSystemTestSetup.createConfig();
- Path homeDir = fsTarget.getHomeDirectory();
- String homeParentDir = homeDir.getParent().toUri().getPath();
- conf3.setBoolean(CONFIG_VIEWFS_MOUNT_POINT_LOCAL_TRASH, true);
- ConfigUtil.addLink(conf3, homeParentDir,
- new Path(targetTestRoot, homeParentDir).toUri());
- Path homeTestPath = new Path(homeDir.toUri().getPath(), "testuser/file");
- FileSystem fsView3 = FileSystem.get(FsConstants.VIEWFS_URI, conf3);
- Assert.assertEquals(userTrashRoot, fsView3.getTrashRoot(homeTestPath));
}
/**
* A mocked FileSystem which returns a deep trash dir.
*/
- static class MockTrashRootFS extends MockFileSystem {
+ static class DeepTrashRootMockFS extends MockFileSystem {
public static final Path TRASH =
- new Path("/mnt/very/deep/deep/trash/dir/.Trash");
+ new Path("/vol/very/deep/deep/trash/dir/.Trash");
@Override
public Path getTrashRoot(Path path) {
@@ -1173,28 +1168,31 @@ public Path getTrashRoot(Path path) {
}
/**
- * Test a trash root that is inside a mount point for getTrashRoot
+ * Test getTrashRoot that is very deep inside a mount point.
*/
@Test
public void testTrashRootDeepTrashDir() throws IOException {
Configuration conf2 = ViewFileSystemTestSetup.createConfig();
- conf2.setBoolean(CONFIG_VIEWFS_MOUNT_POINT_LOCAL_TRASH, true);
- conf2.setClass("fs.mocktrashfs.impl", MockTrashRootFS.class,
+ conf2.setBoolean(CONFIG_VIEWFS_TRASH_FORCE_INSIDE_MOUNT_POINT, true);
+ conf2.setClass("fs.mocktrashfs.impl", DeepTrashRootMockFS.class,
FileSystem.class);
- ConfigUtil.addLink(conf2, "/mnt", URI.create("mocktrashfs://mnt/path"));
- Path testPath = new Path(MockTrashRootFS.TRASH, "projs/proj");
+ ConfigUtil.addLink(conf2, "/mnt/datavol1",
+ URI.create("mocktrashfs://localhost/vol"));
+ Path testPath = new Path("/mnt/datavol1/projs/proj");
FileSystem fsView2 = FileSystem.get(FsConstants.VIEWFS_URI, conf2);
- Assert.assertEquals(MockTrashRootFS.TRASH, fsView2.getTrashRoot(testPath));
+ Path expectedTrash = fsView2.makeQualified(
+ new Path("/mnt/datavol1/very/deep/deep/trash/dir/.Trash"));
+ Assert.assertEquals(expectedTrash, fsView2.getTrashRoot(testPath));
}
/**
- * Test localized trash roots in getTrashRoots() for all users.
+ * Test getTrashRoots() for all users.
*/
@Test
public void testTrashRootsAllUsers() throws IOException {
Configuration conf2 = new Configuration(conf);
- conf2.setBoolean(CONFIG_VIEWFS_MOUNT_POINT_LOCAL_TRASH, true);
+ conf2.setBoolean(CONFIG_VIEWFS_TRASH_FORCE_INSIDE_MOUNT_POINT, true);
FileSystem fsView2 = FileSystem.get(FsConstants.VIEWFS_URI, conf2);
// Case 1: verify correct trash roots from fsView and fsView2
@@ -1230,17 +1228,27 @@ public void testTrashRootsAllUsers() throws IOException {
FileSystem fsView4 = FileSystem.get(FsConstants.VIEWFS_URI, conf4);
int trashRootsNum4 = fsView4.getTrashRoots(true).size();
Assert.assertEquals(afterTrashRootsNum2 + 2, trashRootsNum4);
+
+ // Case 4: test trash roots in fallback FS
+ fsTarget.mkdirs(new Path(targetTestRoot, ".Trash/user10"));
+ fsTarget.mkdirs(new Path(targetTestRoot, ".Trash/user11"));
+ fsTarget.mkdirs(new Path(targetTestRoot, ".Trash/user12"));
+ Configuration conf5 = new Configuration(conf2);
+ ConfigUtil.addLinkFallback(conf5, targetTestRoot.toUri());
+ FileSystem fsView5 = FileSystem.get(FsConstants.VIEWFS_URI, conf5);
+ int trashRootsNum5 = fsView5.getTrashRoots(true).size();
+ Assert.assertEquals(afterTrashRootsNum2 + 3, trashRootsNum5);
}
/**
- * Test localized trash roots in getTrashRoots() for current user.
+ * Test getTrashRoots() for current user.
*/
@Test
public void testTrashRootsCurrentUser() throws IOException {
String currentUser =
UserGroupInformation.getCurrentUser().getShortUserName();
Configuration conf2 = new Configuration(conf);
- conf2.setBoolean(CONFIG_VIEWFS_MOUNT_POINT_LOCAL_TRASH, true);
+ conf2.setBoolean(CONFIG_VIEWFS_TRASH_FORCE_INSIDE_MOUNT_POINT, true);
FileSystem fsView2 = FileSystem.get(FsConstants.VIEWFS_URI, conf2);
int beforeTrashRootNum = fsView.getTrashRoots(false).size();
@@ -1256,6 +1264,14 @@ public void testTrashRootsCurrentUser() throws IOException {
int afterTrashRootsNum2 = fsView2.getTrashRoots(false).size();
Assert.assertEquals(beforeTrashRootNum, afterTrashRootsNum);
Assert.assertEquals(beforeTrashRootNum2 + 2, afterTrashRootsNum2);
+
+ // Test trash roots in fallback FS
+ Configuration conf3 = new Configuration(conf2);
+ fsTarget.mkdirs(new Path(targetTestRoot, TRASH_PREFIX + "/" + currentUser));
+ ConfigUtil.addLinkFallback(conf3, targetTestRoot.toUri());
+ FileSystem fsView3 = FileSystem.get(FsConstants.VIEWFS_URI, conf3);
+ int trashRootsNum3 = fsView3.getTrashRoots(false).size();
+ Assert.assertEquals(afterTrashRootsNum2 + 1, trashRootsNum3);
}
@Test(expected = NotInMountpointException.class)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java
index fdc746464f4e5..62dc3076d00a6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java
@@ -55,6 +55,7 @@
import org.apache.hadoop.test.GenericTestUtils;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY;
+import static org.apache.hadoop.fs.FileSystem.TRASH_PREFIX;
import org.junit.After;
import org.junit.AfterClass;
@@ -181,6 +182,13 @@ int getExpectedDelegationTokenCountWithCredentials() {
return 2;
}
+ @Override
+ Path getTrashRootInFallBackFS() throws IOException {
+ return new Path(
+ "/" + TRASH_PREFIX + "/" + UserGroupInformation.getCurrentUser()
+ .getShortUserName());
+ }
+
@Test
public void testTrashRootsAfterEncryptionZoneDeletion() throws Exception {
final Path zone = new Path("/EZ");
From 1c0bc35305aea6ab8037241fab10862615f3e296 Mon Sep 17 00:00:00 2001
From: Viraj Jasani
Date: Wed, 16 Mar 2022 07:02:29 +0530
Subject: [PATCH 037/145] HDFS-16502. Reconfigure Block Invalidate limit
(#4064)
Signed-off-by: Wei-Chiu Chuang
---
.../blockmanagement/DatanodeManager.java | 33 ++++++++++++-------
.../hadoop/hdfs/server/namenode/NameNode.java | 27 ++++++++++++++-
.../namenode/TestNameNodeReconfigure.java | 31 +++++++++++++++++
.../hadoop/hdfs/tools/TestDFSAdmin.java | 21 +++++++-----
4 files changed, 91 insertions(+), 21 deletions(-)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
index cfb1d83ec5bc6..cb601e94f822c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
@@ -314,18 +314,12 @@ public class DatanodeManager {
DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT); // 5 minutes
this.heartbeatExpireInterval = 2 * heartbeatRecheckInterval
+ 10 * 1000 * heartbeatIntervalSeconds;
- // Effected block invalidate limit is the bigger value between
- // value configured in hdfs-site.xml, and 20 * HB interval.
final int configuredBlockInvalidateLimit = conf.getInt(
DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KEY,
DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_DEFAULT);
- final int countedBlockInvalidateLimit = 20*(int)(heartbeatIntervalSeconds);
- this.blockInvalidateLimit = Math.max(countedBlockInvalidateLimit,
- configuredBlockInvalidateLimit);
- LOG.info(DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KEY
- + ": configured=" + configuredBlockInvalidateLimit
- + ", counted=" + countedBlockInvalidateLimit
- + ", effected=" + blockInvalidateLimit);
+ // Block invalidate limit also has some dependency on heartbeat interval.
+ // Check setBlockInvalidateLimit().
+ setBlockInvalidateLimit(configuredBlockInvalidateLimit);
this.checkIpHostnameInRegistration = conf.getBoolean(
DFSConfigKeys.DFS_NAMENODE_DATANODE_REGISTRATION_IP_HOSTNAME_CHECK_KEY,
DFSConfigKeys.DFS_NAMENODE_DATANODE_REGISTRATION_IP_HOSTNAME_CHECK_DEFAULT);
@@ -2088,8 +2082,25 @@ private void setHeartbeatInterval(long intervalSeconds,
this.heartbeatRecheckInterval = recheckInterval;
this.heartbeatExpireInterval = 2L * recheckInterval + 10 * 1000
* intervalSeconds;
- this.blockInvalidateLimit = Math.max(20 * (int) (intervalSeconds),
- blockInvalidateLimit);
+ this.blockInvalidateLimit = getBlockInvalidateLimit(blockInvalidateLimit);
+ }
+
+ private int getBlockInvalidateLimitFromHBInterval() {
+ return 20 * (int) heartbeatIntervalSeconds;
+ }
+
+ private int getBlockInvalidateLimit(int configuredBlockInvalidateLimit) {
+ return Math.max(getBlockInvalidateLimitFromHBInterval(), configuredBlockInvalidateLimit);
+ }
+
+ public void setBlockInvalidateLimit(int configuredBlockInvalidateLimit) {
+ final int countedBlockInvalidateLimit = getBlockInvalidateLimitFromHBInterval();
+ // Effected block invalidate limit is the bigger value between
+ // value configured in hdfs-site.xml, and 20 * HB interval.
+ this.blockInvalidateLimit = getBlockInvalidateLimit(configuredBlockInvalidateLimit);
+ LOG.info("{} : configured={}, counted={}, effected={}",
+ DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KEY, configuredBlockInvalidateLimit,
+ countedBlockInvalidateLimit, this.blockInvalidateLimit);
}
/**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
index 8cd5d25247343..ef0eef8510cd2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
@@ -121,6 +121,7 @@
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_DEFAULT;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NN_NOT_BECOME_ACTIVE_IN_SAFEMODE;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NN_NOT_BECOME_ACTIVE_IN_SAFEMODE_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_IMAGE_PARALLEL_LOAD_DEFAULT;
@@ -337,7 +338,8 @@ public enum OperationCategory {
DFS_IMAGE_PARALLEL_LOAD_KEY,
DFS_NAMENODE_AVOID_SLOW_DATANODE_FOR_READ_KEY,
DFS_NAMENODE_BLOCKPLACEMENTPOLICY_EXCLUDE_SLOW_NODES_ENABLED_KEY,
- DFS_NAMENODE_MAX_SLOWPEER_COLLECT_NODES_KEY));
+ DFS_NAMENODE_MAX_SLOWPEER_COLLECT_NODES_KEY,
+ DFS_BLOCK_INVALIDATE_LIMIT_KEY));
private static final String USAGE = "Usage: hdfs namenode ["
+ StartupOption.BACKUP.getName() + "] | \n\t["
@@ -2210,6 +2212,8 @@ protected String reconfigurePropertyImpl(String property, String newVal)
|| (property.equals(DFS_NAMENODE_BLOCKPLACEMENTPOLICY_EXCLUDE_SLOW_NODES_ENABLED_KEY))
|| (property.equals(DFS_NAMENODE_MAX_SLOWPEER_COLLECT_NODES_KEY))) {
return reconfigureSlowNodesParameters(datanodeManager, property, newVal);
+ } else if (property.equals(DFS_BLOCK_INVALIDATE_LIMIT_KEY)) {
+ return reconfigureBlockInvalidateLimit(datanodeManager, property, newVal);
} else {
throw new ReconfigurationException(property, newVal, getConf().get(
property));
@@ -2434,6 +2438,27 @@ String reconfigureSlowNodesParameters(final DatanodeManager datanodeManager,
}
}
+ private String reconfigureBlockInvalidateLimit(final DatanodeManager datanodeManager,
+ final String property, final String newVal) throws ReconfigurationException {
+ namesystem.writeLock();
+ try {
+ if (newVal == null) {
+ datanodeManager.setBlockInvalidateLimit(DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_DEFAULT);
+ } else {
+ datanodeManager.setBlockInvalidateLimit(Integer.parseInt(newVal));
+ }
+ final String updatedBlockInvalidateLimit =
+ String.valueOf(datanodeManager.getBlockInvalidateLimit());
+ LOG.info("RECONFIGURE* changed blockInvalidateLimit to {}", updatedBlockInvalidateLimit);
+ return updatedBlockInvalidateLimit;
+ } catch (NumberFormatException e) {
+ throw new ReconfigurationException(property, newVal, getConf().get(property), e);
+ } finally {
+ namesystem.writeUnlock();
+ }
+ }
+
+
@Override // ReconfigurableBase
protected Configuration getNewConf() {
return new HdfsConfiguration();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java
index 33debdb14923f..2f06918c3e167 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java
@@ -453,6 +453,37 @@ public void testReconfigureMaxSlowpeerCollectNodes()
assertEquals(10, datanodeManager.getMaxSlowpeerCollectNodes());
}
+ @Test
+ public void testBlockInvalidateLimit() throws ReconfigurationException {
+ final NameNode nameNode = cluster.getNameNode();
+ final DatanodeManager datanodeManager = nameNode.namesystem
+ .getBlockManager().getDatanodeManager();
+
+ assertEquals(DFS_BLOCK_INVALIDATE_LIMIT_KEY + " is not correctly set",
+ customizedBlockInvalidateLimit, datanodeManager.getBlockInvalidateLimit());
+
+ try {
+ nameNode.reconfigureProperty(DFS_BLOCK_INVALIDATE_LIMIT_KEY, "non-numeric");
+ fail("Should not reach here");
+ } catch (ReconfigurationException e) {
+ assertEquals(
+ "Could not change property dfs.block.invalidate.limit from '500' to 'non-numeric'",
+ e.getMessage());
+ }
+
+ nameNode.reconfigureProperty(DFS_BLOCK_INVALIDATE_LIMIT_KEY, "2500");
+
+ assertEquals(DFS_BLOCK_INVALIDATE_LIMIT_KEY + " is not honored after reconfiguration", 2500,
+ datanodeManager.getBlockInvalidateLimit());
+
+ nameNode.reconfigureProperty(DFS_HEARTBEAT_INTERVAL_KEY, "500");
+
+ // 20 * 500 (10000) > 2500
+ // Hence, invalid block limit should be reset to 10000
+ assertEquals(DFS_BLOCK_INVALIDATE_LIMIT_KEY + " is not reconfigured correctly", 10000,
+ datanodeManager.getBlockInvalidateLimit());
+ }
+
@After
public void shutDown() throws IOException {
if (cluster != null) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
index 78598760df42c..0f8c4cdc8a4ec 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
@@ -31,6 +31,7 @@
import java.util.function.Supplier;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY;
@@ -433,15 +434,17 @@ public void testNameNodeGetReconfigurableProperties() throws IOException {
final List outs = Lists.newArrayList();
final List errs = Lists.newArrayList();
getReconfigurableProperties("namenode", address, outs, errs);
- assertEquals(16, outs.size());
- assertEquals(DFS_BLOCK_PLACEMENT_EC_CLASSNAME_KEY, outs.get(1));
- assertEquals(DFS_BLOCK_REPLICATOR_CLASSNAME_KEY, outs.get(2));
- assertEquals(DFS_HEARTBEAT_INTERVAL_KEY, outs.get(3));
- assertEquals(DFS_IMAGE_PARALLEL_LOAD_KEY, outs.get(4));
- assertEquals(DFS_NAMENODE_AVOID_SLOW_DATANODE_FOR_READ_KEY, outs.get(5));
- assertEquals(DFS_NAMENODE_BLOCKPLACEMENTPOLICY_EXCLUDE_SLOW_NODES_ENABLED_KEY, outs.get(6));
- assertEquals(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, outs.get(7));
- assertEquals(DFS_NAMENODE_MAX_SLOWPEER_COLLECT_NODES_KEY, outs.get(8));
+ assertEquals(17, outs.size());
+ assertTrue(outs.get(0).contains("Reconfigurable properties:"));
+ assertEquals(DFS_BLOCK_INVALIDATE_LIMIT_KEY, outs.get(1));
+ assertEquals(DFS_BLOCK_PLACEMENT_EC_CLASSNAME_KEY, outs.get(2));
+ assertEquals(DFS_BLOCK_REPLICATOR_CLASSNAME_KEY, outs.get(3));
+ assertEquals(DFS_HEARTBEAT_INTERVAL_KEY, outs.get(4));
+ assertEquals(DFS_IMAGE_PARALLEL_LOAD_KEY, outs.get(5));
+ assertEquals(DFS_NAMENODE_AVOID_SLOW_DATANODE_FOR_READ_KEY, outs.get(6));
+ assertEquals(DFS_NAMENODE_BLOCKPLACEMENTPOLICY_EXCLUDE_SLOW_NODES_ENABLED_KEY, outs.get(7));
+ assertEquals(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, outs.get(8));
+ assertEquals(DFS_NAMENODE_MAX_SLOWPEER_COLLECT_NODES_KEY, outs.get(9));
assertEquals(errs.size(), 0);
}
From a237526988b8d69beccd837a3e9c238cfeb6f5db Mon Sep 17 00:00:00 2001
From: He Xiaoqiao
Date: Wed, 16 Mar 2022 20:43:38 +0800
Subject: [PATCH 038/145] HDFS-16494.Removed reuse of
AvailableSpaceVolumeChoosingPolicy#initLocks(). (#4048). Contributed by
JiangHua Zhu.
---
.../datanode/fsdataset/AvailableSpaceVolumeChoosingPolicy.java | 1 -
1 file changed, 1 deletion(-)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/AvailableSpaceVolumeChoosingPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/AvailableSpaceVolumeChoosingPolicy.java
index 72ed47c67fa93..5d12fa72bb165 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/AvailableSpaceVolumeChoosingPolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/AvailableSpaceVolumeChoosingPolicy.java
@@ -63,7 +63,6 @@ public class AvailableSpaceVolumeChoosingPolicy
public AvailableSpaceVolumeChoosingPolicy() {
this(new Random());
- initLocks();
}
private void initLocks() {
From 7f6a891f0313528b566de3c97b95e1bf2e4e4421 Mon Sep 17 00:00:00 2001
From: litao
Date: Thu, 17 Mar 2022 12:04:48 +0800
Subject: [PATCH 039/145] HDFS-16499. [SPS]: Should not start indefinitely
while another SPS process is running (#4058)
---
.../sps/ExternalStoragePolicySatisfier.java | 12 ++++--
.../TestExternalStoragePolicySatisfier.java | 43 ++++++++++++++++++-
2 files changed, 51 insertions(+), 4 deletions(-)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/sps/ExternalStoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/sps/ExternalStoragePolicySatisfier.java
index 15cdc6eb47bcf..efbee7c030f98 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/sps/ExternalStoragePolicySatisfier.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/sps/ExternalStoragePolicySatisfier.java
@@ -38,6 +38,7 @@
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.util.ExitUtil;
import org.apache.hadoop.util.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -96,20 +97,25 @@ private static void secureLogin(Configuration conf)
socAddr.getHostName());
}
- private static NameNodeConnector getNameNodeConnector(Configuration conf)
- throws IOException, InterruptedException {
+ public static NameNodeConnector getNameNodeConnector(Configuration conf)
+ throws InterruptedException {
final Collection