diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
index 67cd81ee91a96..397d81f92f60b 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
@@ -214,6 +214,15 @@ public class CommonConfigurationKeysPublic {
public static final String FS_TRASH_INTERVAL_KEY = "fs.trash.interval";
/** Default value for FS_TRASH_INTERVAL_KEY */
public static final long FS_TRASH_INTERVAL_DEFAULT = 0;
+ /**
+ * @see
+ *
+ * core-default.xml
+ */
+ public static final String FS_TRASH_CLEAN_TRASHROOT_ENABLE_KEY =
+ "fs.trash.clean.trashroot.enable";
+ /** Default value for FS_TRASH_CLEAN_TRASHROOT_ENABLE_KEY. */
+ public static final boolean FS_TRASH_CLEAN_TRASHROOT_ENABLE_DEFAULT = false;
/**
* @see
*
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
index f4228dea69f49..2fb4bff09f9fa 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
@@ -19,6 +19,8 @@
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_CHECKPOINT_INTERVAL_DEFAULT;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_CHECKPOINT_INTERVAL_KEY;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_CLEAN_TRASHROOT_ENABLE_DEFAULT;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_CLEAN_TRASHROOT_ENABLE_KEY;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_DEFAULT;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
@@ -70,6 +72,8 @@ public class TrashPolicyDefault extends TrashPolicy {
private long emptierInterval;
+ private boolean cleanNonCheckpointUnderTrashRoot;
+
public TrashPolicyDefault() { }
private TrashPolicyDefault(FileSystem fs, Configuration conf)
@@ -90,6 +94,8 @@ public void initialize(Configuration conf, FileSystem fs, Path home) {
this.emptierInterval = (long)(conf.getFloat(
FS_TRASH_CHECKPOINT_INTERVAL_KEY, FS_TRASH_CHECKPOINT_INTERVAL_DEFAULT)
* MSECS_PER_MINUTE);
+ this.cleanNonCheckpointUnderTrashRoot = conf.getBoolean(
+ FS_TRASH_CLEAN_TRASHROOT_ENABLE_KEY, FS_TRASH_CLEAN_TRASHROOT_ENABLE_DEFAULT);
}
@Override
@@ -101,6 +107,8 @@ public void initialize(Configuration conf, FileSystem fs) {
this.emptierInterval = (long)(conf.getFloat(
FS_TRASH_CHECKPOINT_INTERVAL_KEY, FS_TRASH_CHECKPOINT_INTERVAL_DEFAULT)
* MSECS_PER_MINUTE);
+ this.cleanNonCheckpointUnderTrashRoot = conf.getBoolean(
+ FS_TRASH_CLEAN_TRASHROOT_ENABLE_KEY, FS_TRASH_CLEAN_TRASHROOT_ENABLE_DEFAULT);
if (deletionInterval < 0) {
LOG.warn("Invalid value {} for deletion interval,"
+ " deletion interaval can not be negative."
@@ -374,8 +382,14 @@ private void deleteCheckpoint(Path trashRoot, boolean deleteImmediately)
try {
time = getTimeFromCheckpoint(name);
} catch (ParseException e) {
- LOG.warn("Unexpected item in trash: "+dir+". Ignoring.");
- continue;
+ if (cleanNonCheckpointUnderTrashRoot) {
+ fs.delete(path, true);
+ LOG.warn("Unexpected item in trash: " + dir + ". Deleting.");
+ continue;
+ } else {
+ LOG.warn("Unexpected item in trash: " + dir + ". Ignoring.");
+ continue;
+ }
}
if (((now - deletionInterval) > time) || deleteImmediately) {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/prefetch/CachingBlockManager.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/prefetch/CachingBlockManager.java
index e43b176d0bfe9..4461c118625a1 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/prefetch/CachingBlockManager.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/prefetch/CachingBlockManager.java
@@ -110,6 +110,7 @@ public abstract class CachingBlockManager extends BlockManager {
* @param prefetchingStatistics statistics for this stream.
* @param conf the configuration.
* @param localDirAllocator the local dir allocator instance.
+ * @param maxBlocksCount max blocks count to be kept in cache at any time.
* @throws IllegalArgumentException if bufferPoolSize is zero or negative.
*/
public CachingBlockManager(
@@ -118,7 +119,8 @@ public CachingBlockManager(
int bufferPoolSize,
PrefetchingStatistics prefetchingStatistics,
Configuration conf,
- LocalDirAllocator localDirAllocator) {
+ LocalDirAllocator localDirAllocator,
+ int maxBlocksCount) {
super(blockData);
Validate.checkPositiveInteger(bufferPoolSize, "bufferPoolSize");
@@ -129,16 +131,16 @@ public CachingBlockManager(
this.numReadErrors = new AtomicInteger();
this.cachingDisabled = new AtomicBoolean();
this.prefetchingStatistics = requireNonNull(prefetchingStatistics);
+ this.conf = requireNonNull(conf);
if (this.getBlockData().getFileSize() > 0) {
this.bufferPool = new BufferPool(bufferPoolSize, this.getBlockData().getBlockSize(),
this.prefetchingStatistics);
- this.cache = this.createCache();
+ this.cache = this.createCache(maxBlocksCount);
}
this.ops = new BlockOperations();
this.ops.setDebug(false);
- this.conf = requireNonNull(conf);
this.localDirAllocator = localDirAllocator;
}
@@ -557,8 +559,8 @@ private void addToCacheAndRelease(BufferData data, Future blockFuture,
}
}
- protected BlockCache createCache() {
- return new SingleFilePerBlockCache(prefetchingStatistics);
+ protected BlockCache createCache(int maxBlocksCount) {
+ return new SingleFilePerBlockCache(prefetchingStatistics, maxBlocksCount);
}
protected void cachePut(int blockNumber, ByteBuffer buffer) throws IOException {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/prefetch/PrefetchConstants.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/prefetch/PrefetchConstants.java
new file mode 100644
index 0000000000000..785023f523cd5
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/prefetch/PrefetchConstants.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hadoop.fs.impl.prefetch;
+
+import java.util.concurrent.TimeUnit;
+
+/**
+ * Constants used by prefetch implementations.
+ */
+public final class PrefetchConstants {
+
+ private PrefetchConstants() {
+ }
+
+ /**
+ * Timeout to be used by close, while acquiring prefetch block write lock.
+ * Value = {@value PREFETCH_WRITE_LOCK_TIMEOUT}
+ */
+ static final int PREFETCH_WRITE_LOCK_TIMEOUT = 5;
+
+ /**
+ * Lock timeout unit to be used by the thread while acquiring prefetch block write lock.
+ * Value = {@value PREFETCH_WRITE_LOCK_TIMEOUT_UNIT}
+ */
+ static final TimeUnit PREFETCH_WRITE_LOCK_TIMEOUT_UNIT = TimeUnit.SECONDS;
+
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/prefetch/SingleFilePerBlockCache.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/prefetch/SingleFilePerBlockCache.java
index e043fbd904be8..a84a79eb77851 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/prefetch/SingleFilePerBlockCache.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/prefetch/SingleFilePerBlockCache.java
@@ -47,6 +47,7 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.LocalDirAllocator;
+import org.apache.hadoop.util.Preconditions;
import static java.util.Objects.requireNonNull;
import static org.apache.hadoop.fs.impl.prefetch.Validate.checkNotNull;
@@ -61,27 +62,42 @@ public class SingleFilePerBlockCache implements BlockCache {
/**
* Blocks stored in this cache.
*/
- private final Map blocks = new ConcurrentHashMap<>();
+ private final Map blocks;
/**
- * Number of times a block was read from this cache.
- * Used for determining cache utilization factor.
+ * Total max blocks count, to be considered as baseline for LRU cache eviction.
*/
- private int numGets = 0;
+ private final int maxBlocksCount;
- private final AtomicBoolean closed;
+ /**
+ * The lock to be shared by LRU based linked list updates.
+ */
+ private final ReentrantReadWriteLock blocksLock;
- private final PrefetchingStatistics prefetchingStatistics;
+ /**
+ * Head of the linked list.
+ */
+ private Entry head;
+
+ /**
+ * Tail of the linked list.
+ */
+ private Entry tail;
/**
- * Timeout to be used by close, while acquiring prefetch block write lock.
+ * Total size of the linked list.
*/
- private static final int PREFETCH_WRITE_LOCK_TIMEOUT = 5;
+ private int entryListSize;
/**
- * Lock timeout unit to be used by the thread while acquiring prefetch block write lock.
+ * Number of times a block was read from this cache.
+ * Used for determining cache utilization factor.
*/
- private static final TimeUnit PREFETCH_WRITE_LOCK_TIMEOUT_UNIT = TimeUnit.SECONDS;
+ private int numGets = 0;
+
+ private final AtomicBoolean closed;
+
+ private final PrefetchingStatistics prefetchingStatistics;
/**
* File attributes attached to any intermediate temporary file created during index creation.
@@ -103,6 +119,8 @@ private enum LockType {
READ,
WRITE
}
+ private Entry previous;
+ private Entry next;
Entry(int blockNumber, Path path, int size, long checksum) {
this.blockNumber = blockNumber;
@@ -110,6 +128,8 @@ private enum LockType {
this.size = size;
this.checksum = checksum;
this.lock = new ReentrantReadWriteLock();
+ this.previous = null;
+ this.next = null;
}
@Override
@@ -166,16 +186,37 @@ private boolean takeLock(LockType lockType, long timeout, TimeUnit unit) {
}
return false;
}
+
+ private Entry getPrevious() {
+ return previous;
+ }
+
+ private void setPrevious(Entry previous) {
+ this.previous = previous;
+ }
+
+ private Entry getNext() {
+ return next;
+ }
+
+ private void setNext(Entry next) {
+ this.next = next;
+ }
}
/**
* Constructs an instance of a {@code SingleFilePerBlockCache}.
*
* @param prefetchingStatistics statistics for this stream.
+ * @param maxBlocksCount max blocks count to be kept in cache at any time.
*/
- public SingleFilePerBlockCache(PrefetchingStatistics prefetchingStatistics) {
+ public SingleFilePerBlockCache(PrefetchingStatistics prefetchingStatistics, int maxBlocksCount) {
this.prefetchingStatistics = requireNonNull(prefetchingStatistics);
this.closed = new AtomicBoolean(false);
+ this.maxBlocksCount = maxBlocksCount;
+ Preconditions.checkArgument(maxBlocksCount > 0, "maxBlocksCount should be more than 0");
+ blocks = new ConcurrentHashMap<>();
+ blocksLock = new ReentrantReadWriteLock();
}
/**
@@ -247,9 +288,60 @@ private Entry getEntry(int blockNumber) {
throw new IllegalStateException(String.format("block %d not found in cache", blockNumber));
}
numGets++;
+ addToLinkedListHead(entry);
return entry;
}
+ /**
+ * Helper method to add the given entry to the head of the linked list.
+ *
+ * @param entry Block entry to add.
+ */
+ private void addToLinkedListHead(Entry entry) {
+ blocksLock.writeLock().lock();
+ try {
+ addToHeadOfLinkedList(entry);
+ } finally {
+ blocksLock.writeLock().unlock();
+ }
+ }
+
+ /**
+ * Add the given entry to the head of the linked list.
+ *
+ * @param entry Block entry to add.
+ */
+ private void addToHeadOfLinkedList(Entry entry) {
+ if (head == null) {
+ head = entry;
+ tail = entry;
+ }
+ LOG.debug(
+ "Block num {} to be added to the head. Current head block num: {} and tail block num: {}",
+ entry.blockNumber, head.blockNumber, tail.blockNumber);
+ if (entry != head) {
+ Entry prev = entry.getPrevious();
+ Entry nxt = entry.getNext();
+ // no-op if the block is already evicted
+ if (!blocks.containsKey(entry.blockNumber)) {
+ return;
+ }
+ if (prev != null) {
+ prev.setNext(nxt);
+ }
+ if (nxt != null) {
+ nxt.setPrevious(prev);
+ }
+ entry.setPrevious(null);
+ entry.setNext(head);
+ head.setPrevious(entry);
+ head = entry;
+ if (prev != null && prev.getNext() == null) {
+ tail = prev;
+ }
+ }
+ }
+
/**
* Puts the given block in this cache.
*
@@ -278,6 +370,7 @@ public void put(int blockNumber, ByteBuffer buffer, Configuration conf,
} finally {
entry.releaseLock(Entry.LockType.READ);
}
+ addToLinkedListHead(entry);
return;
}
@@ -299,9 +392,65 @@ public void put(int blockNumber, ByteBuffer buffer, Configuration conf,
// Update stream_read_blocks_in_cache stats only after blocks map is updated with new file
// entry to avoid any discrepancy related to the value of stream_read_blocks_in_cache.
// If stream_read_blocks_in_cache is updated before updating the blocks map here, closing of
- // the input stream can lead to the removal of the cache file even before blocks is added with
- // the new cache file, leading to incorrect value of stream_read_blocks_in_cache.
+ // the input stream can lead to the removal of the cache file even before blocks is added
+ // with the new cache file, leading to incorrect value of stream_read_blocks_in_cache.
prefetchingStatistics.blockAddedToFileCache();
+ addToLinkedListAndEvictIfRequired(entry);
+ }
+
+ /**
+ * Add the given entry to the head of the linked list and if the LRU cache size
+ * exceeds the max limit, evict tail of the LRU linked list.
+ *
+ * @param entry Block entry to add.
+ */
+ private void addToLinkedListAndEvictIfRequired(Entry entry) {
+ blocksLock.writeLock().lock();
+ try {
+ addToHeadOfLinkedList(entry);
+ entryListSize++;
+ if (entryListSize > maxBlocksCount && !closed.get()) {
+ Entry elementToPurge = tail;
+ tail = tail.getPrevious();
+ if (tail == null) {
+ tail = head;
+ }
+ tail.setNext(null);
+ elementToPurge.setPrevious(null);
+ deleteBlockFileAndEvictCache(elementToPurge);
+ }
+ } finally {
+ blocksLock.writeLock().unlock();
+ }
+ }
+
+ /**
+ * Delete cache file as part of the block cache LRU eviction.
+ *
+ * @param elementToPurge Block entry to evict.
+ */
+ private void deleteBlockFileAndEvictCache(Entry elementToPurge) {
+ boolean lockAcquired = elementToPurge.takeLock(Entry.LockType.WRITE,
+ PrefetchConstants.PREFETCH_WRITE_LOCK_TIMEOUT,
+ PrefetchConstants.PREFETCH_WRITE_LOCK_TIMEOUT_UNIT);
+ if (!lockAcquired) {
+ LOG.error("Cache file {} deletion would not be attempted as write lock could not"
+ + " be acquired within {} {}", elementToPurge.path,
+ PrefetchConstants.PREFETCH_WRITE_LOCK_TIMEOUT,
+ PrefetchConstants.PREFETCH_WRITE_LOCK_TIMEOUT_UNIT);
+ } else {
+ try {
+ if (Files.deleteIfExists(elementToPurge.path)) {
+ entryListSize--;
+ prefetchingStatistics.blockRemovedFromFileCache();
+ blocks.remove(elementToPurge.blockNumber);
+ }
+ } catch (IOException e) {
+ LOG.warn("Failed to delete cache file {}", elementToPurge.path, e);
+ } finally {
+ elementToPurge.releaseLock(Entry.LockType.WRITE);
+ }
+ }
}
private static final Set extends OpenOption> CREATE_OPTIONS =
@@ -337,30 +486,38 @@ protected Path getCacheFilePath(final Configuration conf,
public void close() throws IOException {
if (closed.compareAndSet(false, true)) {
LOG.debug(getStats());
- int numFilesDeleted = 0;
-
- for (Entry entry : blocks.values()) {
- boolean lockAcquired = entry.takeLock(Entry.LockType.WRITE, PREFETCH_WRITE_LOCK_TIMEOUT,
- PREFETCH_WRITE_LOCK_TIMEOUT_UNIT);
- if (!lockAcquired) {
- LOG.error("Cache file {} deletion would not be attempted as write lock could not"
- + " be acquired within {} {}", entry.path, PREFETCH_WRITE_LOCK_TIMEOUT,
- PREFETCH_WRITE_LOCK_TIMEOUT_UNIT);
- continue;
- }
- try {
- Files.deleteIfExists(entry.path);
+ deleteCacheFiles();
+ }
+ }
+
+ /**
+ * Delete cache files as part of the close call.
+ */
+ private void deleteCacheFiles() {
+ int numFilesDeleted = 0;
+ for (Entry entry : blocks.values()) {
+ boolean lockAcquired =
+ entry.takeLock(Entry.LockType.WRITE, PrefetchConstants.PREFETCH_WRITE_LOCK_TIMEOUT,
+ PrefetchConstants.PREFETCH_WRITE_LOCK_TIMEOUT_UNIT);
+ if (!lockAcquired) {
+ LOG.error("Cache file {} deletion would not be attempted as write lock could not"
+ + " be acquired within {} {}", entry.path,
+ PrefetchConstants.PREFETCH_WRITE_LOCK_TIMEOUT,
+ PrefetchConstants.PREFETCH_WRITE_LOCK_TIMEOUT_UNIT);
+ continue;
+ }
+ try {
+ if (Files.deleteIfExists(entry.path)) {
prefetchingStatistics.blockRemovedFromFileCache();
numFilesDeleted++;
- } catch (IOException e) {
- LOG.warn("Failed to delete cache file {}", entry.path, e);
- } finally {
- entry.releaseLock(Entry.LockType.WRITE);
}
+ } catch (IOException e) {
+ LOG.warn("Failed to delete cache file {}", entry.path, e);
+ } finally {
+ entry.releaseLock(Entry.LockType.WRITE);
}
-
- LOG.debug("Prefetch cache close: Deleted {} cache files", numFilesDeleted);
}
+ LOG.debug("Prefetch cache close: Deleted {} cache files", numFilesDeleted);
}
@Override
diff --git a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index dd543deb8a5a5..5f841bd233d34 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -974,6 +974,14 @@
+
+ fs.trash.clean.trashroot.enable
+ false
+ Whether clean some directories and files
+ in Trash home which are not under checkpoint directory.
+
+
+
fs.protected.directories
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
index 5b8c10b3fa6f9..30c9a31fda4ea 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
@@ -32,6 +32,7 @@
import java.util.Random;
import java.util.Set;
import java.util.concurrent.atomic.AtomicInteger;
+import java.util.function.Supplier;
import org.junit.After;
import org.junit.Before;
@@ -786,6 +787,55 @@ public void testTrashEmptier() throws Exception {
emptierThread.join();
}
+ /**
+ * Test trash emptier can delete non-checkpoint dir or not.
+ * @throws Exception
+ */
+ @Test()
+ public void testTrashEmptierCleanDirNotInCheckpointDir() throws Exception {
+ Configuration conf = new Configuration();
+ // Trash with 12 second deletes and 6 seconds checkpoints.
+ conf.set(FS_TRASH_INTERVAL_KEY, "0.2"); // 12 seconds
+ conf.setClass("fs.file.impl", TestLFS.class, FileSystem.class);
+ conf.set(FS_TRASH_CHECKPOINT_INTERVAL_KEY, "0.1"); // 6 seconds
+ conf.setBoolean(FS_TRASH_CLEAN_TRASHROOT_ENABLE_KEY, true);
+ FileSystem fs = FileSystem.getLocal(conf);
+ conf.set("fs.default.name", fs.getUri().toString());
+
+ Trash trash = new Trash(conf);
+
+ // Start Emptier in background.
+ Runnable emptier = trash.getEmptier();
+ Thread emptierThread = new Thread(emptier);
+ emptierThread.start();
+
+ FsShell shell = new FsShell();
+ shell.setConf(conf);
+ shell.init();
+
+ // Make sure the .Trash dir existed.
+ mkdir(fs, shell.getCurrentTrashDir());
+ assertTrue(fs.exists(shell.getCurrentTrashDir()));
+ // Create a directory under .Trash directly.
+ Path myPath = new Path(shell.getCurrentTrashDir().getParent(), "test_dirs");
+ mkdir(fs, myPath);
+ assertTrue(fs.exists(myPath));
+
+ GenericTestUtils.waitFor(new Supplier() {
+ @Override
+ public Boolean get() {
+ try {
+ return !fs.exists(myPath);
+ } catch (IOException e) {
+ // Do nothing.
+ }
+ return false;
+ }
+ }, 6000, 60000);
+ emptierThread.interrupt();
+ emptierThread.join();
+ }
+
@After
public void tearDown() throws IOException {
File trashDir = new File(TEST_DIR.toUri().getPath());
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/impl/prefetch/TestBlockCache.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/impl/prefetch/TestBlockCache.java
index 3b60c1c795336..b32ce20a37354 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/impl/prefetch/TestBlockCache.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/impl/prefetch/TestBlockCache.java
@@ -45,7 +45,7 @@ public class TestBlockCache extends AbstractHadoopTestBase {
public void testArgChecks() throws Exception {
// Should not throw.
BlockCache cache =
- new SingleFilePerBlockCache(EmptyPrefetchingStatistics.getInstance());
+ new SingleFilePerBlockCache(EmptyPrefetchingStatistics.getInstance(), 2);
ByteBuffer buffer = ByteBuffer.allocate(16);
@@ -55,7 +55,7 @@ public void testArgChecks() throws Exception {
intercept(NullPointerException.class, null,
- () -> new SingleFilePerBlockCache(null));
+ () -> new SingleFilePerBlockCache(null, 2));
}
@@ -63,7 +63,7 @@ public void testArgChecks() throws Exception {
@Test
public void testPutAndGet() throws Exception {
BlockCache cache =
- new SingleFilePerBlockCache(EmptyPrefetchingStatistics.getInstance());
+ new SingleFilePerBlockCache(EmptyPrefetchingStatistics.getInstance(), 2);
ByteBuffer buffer1 = ByteBuffer.allocate(BUFFER_SIZE);
for (byte i = 0; i < BUFFER_SIZE; i++) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
index 81c40fe6346cc..90dcd83ddba1e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
@@ -75,6 +75,7 @@
import java.util.Collection;
import java.util.Collections;
import java.util.EnumSet;
+import java.util.HashMap;
import java.util.List;
import java.util.Map;
@@ -867,6 +868,15 @@ public static ErasureCodingPolicyInfo toECPolicyInfo(Map, ?> m) {
return new ErasureCodingPolicyInfo(ecPolicy, ecPolicyState);
}
+ public static Map getErasureCodeCodecs(Map, ?> json) {
+ Map map = new HashMap<>();
+ Map, ?> m = (Map, ?>) json.get("ErasureCodingCodecs");
+ m.forEach((key, value) -> {
+ map.put((String) key, (String) value);
+ });
+ return map;
+ }
+
private static List toDiffList(
List> objs) {
if (objs == null) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
index f65ec98a9d782..5210692ab324e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
@@ -2206,6 +2206,19 @@ Collection decodeResponse(Map, ?> json) {
}.run();
}
+ public Map getAllErasureCodingCodecs()
+ throws IOException {
+ statistics.incrementReadOps(1);
+ storageStatistics.incrementOpCounter(OpType.GET_EC_CODECS);
+ final HttpOpParam.Op op = GetOpParam.Op.GETECCODECS;
+ return new FsPathResponseRunner