rpcLogFile)
throws AbruptExitException {
if (actionContextProvider != null) {
actionContextProvider.afterCommand();
diff --git a/src/main/java/com/google/devtools/build/lib/remote/disk/BUILD b/src/main/java/com/google/devtools/build/lib/remote/disk/BUILD
index 57271f3b0a45e2..000f08716d4f79 100644
--- a/src/main/java/com/google/devtools/build/lib/remote/disk/BUILD
+++ b/src/main/java/com/google/devtools/build/lib/remote/disk/BUILD
@@ -15,12 +15,17 @@ java_library(
name = "disk",
srcs = glob(["*.java"]),
deps = [
+ "//src/main/java/com/google/devtools/build/lib/concurrent",
"//src/main/java/com/google/devtools/build/lib/exec:spawn_runner",
"//src/main/java/com/google/devtools/build/lib/remote:store",
"//src/main/java/com/google/devtools/build/lib/remote/common",
"//src/main/java/com/google/devtools/build/lib/remote/common:cache_not_found_exception",
+ "//src/main/java/com/google/devtools/build/lib/remote/options",
"//src/main/java/com/google/devtools/build/lib/remote/util",
+ "//src/main/java/com/google/devtools/build/lib/server:idle_task",
+ "//src/main/java/com/google/devtools/build/lib/util:string",
"//src/main/java/com/google/devtools/build/lib/vfs",
+ "//third_party:flogger",
"//third_party:guava",
"//third_party:jsr305",
"//third_party/protobuf:protobuf_java",
diff --git a/src/main/java/com/google/devtools/build/lib/remote/disk/DiskCacheClient.java b/src/main/java/com/google/devtools/build/lib/remote/disk/DiskCacheClient.java
index 4af5d9fc85fb36..742cfde10e38aa 100644
--- a/src/main/java/com/google/devtools/build/lib/remote/disk/DiskCacheClient.java
+++ b/src/main/java/com/google/devtools/build/lib/remote/disk/DiskCacheClient.java
@@ -60,8 +60,13 @@
* when they collide.
*
* The mtime of an entry reflects the most recent time the entry was stored *or* retrieved. This
- * property may be used to trim the disk cache to the most recently used entries. However, it's not
- * safe to trim the cache at the same time a Bazel process is accessing it.
+ * property may be used to garbage collect the disk cache by deleting the least recently accessed
+ * entries. This may be done by Bazel itself (see {@link DiskCacheGarbageCollectorIdleTask}), by
+ * another Bazel process sharing the disk cache, or by an external process. Although we could have
+ * arranged for an ongoing garbage collection to block a concurrent build, we judge it to not be
+ * worth the extra complexity; assuming that the collection policy is not overly aggressive, the
+ * likelihood of a race condition is fairly small, and an affected build is able to automatically
+ * recover by retrying.
*/
public class DiskCacheClient implements RemoteCacheClient {
diff --git a/src/main/java/com/google/devtools/build/lib/remote/disk/DiskCacheGarbageCollector.java b/src/main/java/com/google/devtools/build/lib/remote/disk/DiskCacheGarbageCollector.java
new file mode 100644
index 00000000000000..a57c2bf5fff105
--- /dev/null
+++ b/src/main/java/com/google/devtools/build/lib/remote/disk/DiskCacheGarbageCollector.java
@@ -0,0 +1,309 @@
+// Copyright 2024 The Bazel Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+package com.google.devtools.build.lib.remote.disk;
+
+import static com.google.common.collect.ImmutableSet.toImmutableSet;
+import static com.google.devtools.build.lib.remote.util.Utils.bytesCountToDisplayString;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.collect.ComparisonChain;
+import com.google.common.collect.ImmutableSet;
+import com.google.devtools.build.lib.concurrent.AbstractQueueVisitor;
+import com.google.devtools.build.lib.concurrent.ErrorClassifier;
+import com.google.devtools.build.lib.vfs.Dirent;
+import com.google.devtools.build.lib.vfs.FileStatus;
+import com.google.devtools.build.lib.vfs.IORuntimeException;
+import com.google.devtools.build.lib.vfs.Path;
+import com.google.devtools.build.lib.vfs.Symlinks;
+import java.io.IOException;
+import java.time.Duration;
+import java.time.Instant;
+import java.util.ArrayList;
+import java.util.Comparator;
+import java.util.List;
+import java.util.Optional;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.LongAdder;
+
+/**
+ * A garbage collector for the disk cache.
+ *
+ *
Garbage collection works by enumerating the entire contents of the disk cache, identifying
+ * candidates for deletion according to a {@link CollectionPolicy}, and deleting them. This process
+ * may take a significant amount of time on large disk caches and slow filesystems, and may be
+ * interrupted at any time.
+ */
+public final class DiskCacheGarbageCollector {
+ private static final ImmutableSet EXCLUDED_DIRS = ImmutableSet.of("tmp", "gc");
+
+ /**
+ * Describes a disk cache entry.
+ *
+ * @param path path relative to the root directory of the disk cache
+ * @param size file size in bytes
+ * @param mtime file modification time
+ */
+ private record Entry(String path, long size, long mtime) {}
+
+ /**
+ * Determines which entries should be collected.
+ *
+ * @param maxSizeBytes the maximum total size in bytes, or empty for no size limit
+ * @param maxAge the maximum age of cache entries, or empty for no age limit
+ */
+ public record CollectionPolicy(Optional maxSizeBytes, Optional maxAge) {
+
+ // Sort older entries before newer ones, tie breaking by path. This causes AC entries to be
+ // sorted before CAS entries with the same age, making it less likely for garbage collection
+ // to break referential integrity in the event that mtime resolution is insufficient.
+ private static final Comparator COMPARATOR =
+ (x, y) ->
+ ComparisonChain.start()
+ .compare(x.mtime(), y.mtime())
+ .compare(x.path(), y.path())
+ .result();
+
+ /**
+ * Returns the entries to be deleted.
+ *
+ * @param entries the full list of entries
+ */
+ List getEntriesToDelete(List entries) {
+ entries.sort(COMPARATOR);
+
+ long excessSizeBytes = getExcessSizeBytes(entries);
+ long timeCutoff = getTimeCutoff();
+
+ int i = 0;
+ for (; i < entries.size(); i++) {
+ if (excessSizeBytes <= 0 && entries.get(i).mtime() >= timeCutoff) {
+ break;
+ }
+ excessSizeBytes -= entries.get(i).size();
+ }
+
+ return entries.subList(0, i);
+ }
+
+ private long getExcessSizeBytes(List entries) {
+ if (maxSizeBytes.isEmpty()) {
+ return 0;
+ }
+ long currentSizeBytes = entries.stream().mapToLong(Entry::size).sum();
+ return currentSizeBytes - maxSizeBytes.get();
+ }
+
+ private long getTimeCutoff() {
+ if (maxAge.isEmpty()) {
+ return 0;
+ }
+ return Instant.now().minus(maxAge.get()).toEpochMilli();
+ }
+ }
+
+ private record DeletionStats(long deletedEntries, long deletedBytes, boolean concurrentUpdate) {}
+
+ /** Stats for a garbage collection run. */
+ public record CollectionStats(
+ long totalEntries,
+ long totalBytes,
+ long deletedEntries,
+ long deletedBytes,
+ boolean concurrentUpdate) {
+
+ /** Returns a human-readable summary. */
+ public String displayString() {
+ return "Deleted %d of %d files, reclaimed %s of %s%s"
+ .formatted(
+ deletedEntries(),
+ totalEntries(),
+ bytesCountToDisplayString(deletedBytes()),
+ bytesCountToDisplayString(totalBytes()),
+ concurrentUpdate() ? " (concurrent update detected)" : "");
+ }
+ }
+
+ private final Path root;
+ private final CollectionPolicy policy;
+ private final ExecutorService executorService;
+ private final ImmutableSet excludedDirs;
+
+ /**
+ * Creates a new garbage collector.
+ *
+ * @param root the root directory of the disk cache
+ * @param executorService the executor service to schedule I/O operations onto
+ * @param policy the garbage collection policy to use
+ */
+ public DiskCacheGarbageCollector(
+ Path root, ExecutorService executorService, CollectionPolicy policy) {
+ this.root = root;
+ this.policy = policy;
+ this.executorService = executorService;
+ this.excludedDirs = EXCLUDED_DIRS.stream().map(root::getChild).collect(toImmutableSet());
+ }
+
+ @VisibleForTesting
+ public Path getRoot() {
+ return root;
+ }
+
+ @VisibleForTesting
+ public CollectionPolicy getPolicy() {
+ return policy;
+ }
+
+ /**
+ * Runs garbage collection.
+ *
+ * @throws IOException if an I/O error occurred
+ * @throws InterruptedException if the thread was interrupted
+ */
+ public CollectionStats run() throws IOException, InterruptedException {
+ // Acquire an exclusive lock to prevent two Bazel processes from simultaneously running
+ // garbage collection, which can waste resources and lead to incorrect results.
+ try (var lock = DiskCacheLock.getExclusive(root.getRelative("gc/lock"))) {
+ return runUnderLock();
+ }
+ }
+
+ private CollectionStats runUnderLock() throws IOException, InterruptedException {
+ EntryScanner scanner = new EntryScanner();
+ EntryDeleter deleter = new EntryDeleter();
+
+ List allEntries = scanner.scan();
+ List entriesToDelete = policy.getEntriesToDelete(allEntries);
+
+ for (Entry entry : entriesToDelete) {
+ deleter.delete(entry);
+ }
+
+ DeletionStats deletionStats = deleter.await();
+
+ return new CollectionStats(
+ allEntries.size(),
+ allEntries.stream().mapToLong(Entry::size).sum(),
+ deletionStats.deletedEntries(),
+ deletionStats.deletedBytes(),
+ deletionStats.concurrentUpdate());
+ }
+
+ /** Lists all disk cache entries, performing I/O in parallel. */
+ private final class EntryScanner extends AbstractQueueVisitor {
+ private final ArrayList entries = new ArrayList<>();
+
+ EntryScanner() {
+ super(
+ executorService,
+ ExecutorOwnership.SHARED,
+ ExceptionHandlingMode.FAIL_FAST,
+ ErrorClassifier.DEFAULT);
+ }
+
+ /** Lists all disk cache entries. */
+ List scan() throws IOException, InterruptedException {
+ execute(() -> visitDirectory(root));
+ try {
+ awaitQuiescence(true);
+ } catch (IORuntimeException e) {
+ throw e.getCauseIOException();
+ }
+ return entries;
+ }
+
+ private void visitDirectory(Path path) {
+ try {
+ for (Dirent dirent : path.readdir(Symlinks.NOFOLLOW)) {
+ Path childPath = path.getChild(dirent.getName());
+ if (dirent.getType().equals(Dirent.Type.FILE)) {
+ // The file may be gone by the time we stat it.
+ FileStatus status = childPath.statIfFound();
+ if (status != null) {
+ Entry entry =
+ new Entry(
+ childPath.relativeTo(root).getPathString(),
+ status.getSize(),
+ status.getLastModifiedTime());
+ synchronized (entries) {
+ entries.add(entry);
+ }
+ }
+ } else if (dirent.getType().equals(Dirent.Type.DIRECTORY)
+ && !excludedDirs.contains(childPath)) {
+ execute(() -> visitDirectory(childPath));
+ }
+ // Deliberately ignore other file types, which should never occur in a well-formed cache.
+ }
+ } catch (IOException e) {
+ throw new IORuntimeException(e);
+ }
+ }
+ }
+
+ /** Deletes disk cache entries, performing I/O in parallel. */
+ private final class EntryDeleter extends AbstractQueueVisitor {
+ private final LongAdder deletedEntries = new LongAdder();
+ private final LongAdder deletedBytes = new LongAdder();
+ private final AtomicBoolean concurrentUpdate = new AtomicBoolean(false);
+
+ EntryDeleter() {
+ super(
+ executorService,
+ ExecutorOwnership.SHARED,
+ ExceptionHandlingMode.FAIL_FAST,
+ ErrorClassifier.DEFAULT);
+ }
+
+ /** Enqueues an entry to be deleted. */
+ void delete(Entry entry) {
+ execute(
+ () -> {
+ Path path = root.getRelative(entry.path());
+ try {
+ FileStatus status = path.statIfFound();
+ if (status == null) {
+ // The entry is already gone.
+ concurrentUpdate.set(true);
+ return;
+ }
+ if (status.getLastModifiedTime() != entry.mtime()) {
+ // The entry was likely accessed by a build since we statted it.
+ concurrentUpdate.set(true);
+ return;
+ }
+ if (path.delete()) {
+ deletedEntries.increment();
+ deletedBytes.add(entry.size());
+ } else {
+ // The entry is already gone.
+ concurrentUpdate.set(true);
+ }
+ } catch (IOException e) {
+ throw new IORuntimeException(e);
+ }
+ });
+ }
+
+ /** Waits for all enqueued deletions to complete. */
+ DeletionStats await() throws IOException, InterruptedException {
+ try {
+ awaitQuiescence(true);
+ } catch (IORuntimeException e) {
+ throw e.getCauseIOException();
+ }
+ return new DeletionStats(deletedEntries.sum(), deletedBytes.sum(), concurrentUpdate.get());
+ }
+ }
+}
diff --git a/src/main/java/com/google/devtools/build/lib/remote/disk/DiskCacheGarbageCollectorIdleTask.java b/src/main/java/com/google/devtools/build/lib/remote/disk/DiskCacheGarbageCollectorIdleTask.java
new file mode 100644
index 00000000000000..7c03e16fbb6b3c
--- /dev/null
+++ b/src/main/java/com/google/devtools/build/lib/remote/disk/DiskCacheGarbageCollectorIdleTask.java
@@ -0,0 +1,102 @@
+// Copyright 2024 The Bazel Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+package com.google.devtools.build.lib.remote.disk;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.flogger.GoogleLogger;
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
+import com.google.devtools.build.lib.remote.disk.DiskCacheGarbageCollector.CollectionPolicy;
+import com.google.devtools.build.lib.remote.disk.DiskCacheGarbageCollector.CollectionStats;
+import com.google.devtools.build.lib.remote.options.RemoteOptions;
+import com.google.devtools.build.lib.server.IdleTask;
+import com.google.devtools.build.lib.vfs.Path;
+import java.io.IOException;
+import java.time.Duration;
+import java.util.Optional;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import javax.annotation.Nullable;
+
+/** An {@link IdleTask} to run a {@link DiskCacheGarbageCollector}. */
+public final class DiskCacheGarbageCollectorIdleTask implements IdleTask {
+ private static final GoogleLogger logger = GoogleLogger.forEnclosingClass();
+
+ private final Duration delay;
+ private final DiskCacheGarbageCollector gc;
+
+ private static final ExecutorService executorService =
+ Executors.newCachedThreadPool(
+ new ThreadFactoryBuilder().setNameFormat("disk-cache-gc-%d").build());
+
+ private DiskCacheGarbageCollectorIdleTask(Duration delay, DiskCacheGarbageCollector gc) {
+ this.delay = delay;
+ this.gc = gc;
+ }
+
+ /**
+ * Creates a new {@link DiskCacheGarbageCollectorIdleTask} according to the options.
+ *
+ * @param remoteOptions the remote options
+ * @param workingDirectory the working directory
+ * @param executorService the executor service to schedule I/O operations onto
+ * @return the idle task, or null if garbage collection is disabled
+ */
+ @Nullable
+ public static DiskCacheGarbageCollectorIdleTask create(
+ RemoteOptions remoteOptions, Path workingDirectory) {
+ if (remoteOptions.diskCache == null || remoteOptions.diskCache.isEmpty()) {
+ return null;
+ }
+ Optional maxSizeBytes = Optional.empty();
+ if (remoteOptions.diskCacheGcMaxSize > 0) {
+ maxSizeBytes = Optional.of(remoteOptions.diskCacheGcMaxSize);
+ }
+ Optional maxAge = Optional.empty();
+ if (!remoteOptions.diskCacheGcMaxAge.isZero()) {
+ maxAge = Optional.of(remoteOptions.diskCacheGcMaxAge);
+ }
+ Duration delay = remoteOptions.diskCacheGcIdleDelay;
+ if (maxSizeBytes.isEmpty() && maxAge.isEmpty()) {
+ return null;
+ }
+ var policy = new CollectionPolicy(maxSizeBytes, maxAge);
+ var gc =
+ new DiskCacheGarbageCollector(
+ workingDirectory.getRelative(remoteOptions.diskCache), executorService, policy);
+ return new DiskCacheGarbageCollectorIdleTask(delay, gc);
+ }
+
+ @VisibleForTesting
+ public DiskCacheGarbageCollector getGarbageCollector() {
+ return gc;
+ }
+
+ @Override
+ public Duration delay() {
+ return delay;
+ }
+
+ @Override
+ public void run() {
+ try {
+ logger.atInfo().log("Disk cache garbage collection started");
+ CollectionStats stats = gc.run();
+ logger.atInfo().log("%s", stats.displayString());
+ } catch (IOException e) {
+ logger.atInfo().withCause(e).log("Disk cache garbage collection failed");
+ } catch (InterruptedException e) {
+ logger.atInfo().withCause(e).log("Disk cache garbage collection interrupted");
+ }
+ }
+}
diff --git a/src/main/java/com/google/devtools/build/lib/remote/disk/DiskCacheLock.java b/src/main/java/com/google/devtools/build/lib/remote/disk/DiskCacheLock.java
new file mode 100644
index 00000000000000..5fea966ff44d2c
--- /dev/null
+++ b/src/main/java/com/google/devtools/build/lib/remote/disk/DiskCacheLock.java
@@ -0,0 +1,100 @@
+// Copyright 2024 The Bazel Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+package com.google.devtools.build.lib.remote.disk;
+
+import static java.nio.charset.StandardCharsets.ISO_8859_1;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.devtools.build.lib.vfs.Path;
+import java.io.IOException;
+import java.nio.channels.FileChannel;
+import java.nio.channels.FileLock;
+import java.nio.charset.Charset;
+import java.nio.file.StandardOpenOption;
+
+/** Manages shared or exclusive access to the disk cache by concurrent processes. */
+public final class DiskCacheLock implements AutoCloseable {
+ private final FileChannel channel;
+ private final FileLock lock;
+
+ private DiskCacheLock(FileChannel channel, FileLock lock) {
+ this.channel = channel;
+ this.lock = lock;
+ }
+
+ /**
+ * Acquires shared access to the disk cache.
+ *
+ * @param path the path to the lock file
+ * @throws IOException if an error occurred, including the lock currently being exclusively held
+ * by another process
+ */
+ public static DiskCacheLock getShared(Path path) throws IOException {
+ return get(path, true);
+ }
+
+ /**
+ * Acquires exclusive access to the disk cache.
+ *
+ * @param path the path to the lock file
+ * @throws IOException if an error occurred, including the lock currently being exclusively held
+ * by another process
+ */
+ public static DiskCacheLock getExclusive(Path path) throws IOException {
+ return get(path, false);
+ }
+
+ private static DiskCacheLock get(Path path, boolean shared) throws IOException {
+ path.getParentDirectory().createDirectoryAndParents();
+ FileChannel channel =
+ FileChannel.open(
+ // Correctly handle non-ASCII paths by converting from the internal string encoding.
+ java.nio.file.Path.of(getPathStringForJavaIo(path)),
+ StandardOpenOption.READ,
+ StandardOpenOption.WRITE,
+ StandardOpenOption.CREATE);
+ FileLock lock = channel.tryLock(0, Long.MAX_VALUE, shared);
+ if (lock == null) {
+ throw new IOException(
+ "failed to acquire %s disk cache lock".formatted(shared ? "shared" : "exclusive"));
+ }
+ return new DiskCacheLock(channel, lock);
+ }
+
+ private static String getPathStringForJavaIo(Path path) {
+ return new String(
+ path.getPathString().getBytes(ISO_8859_1),
+ Charset.forName(System.getProperty("sun.jnu.encoding"), ISO_8859_1));
+ }
+
+ @VisibleForTesting
+ boolean isShared() {
+ return lock.isShared();
+ }
+
+ @VisibleForTesting
+ boolean isExclusive() {
+ return !isShared();
+ }
+
+ /** Releases access to the disk cache. */
+ @Override
+ public void close() throws IOException {
+ try {
+ lock.release();
+ } finally {
+ channel.close();
+ }
+ }
+}
diff --git a/src/main/java/com/google/devtools/build/lib/remote/options/RemoteOptions.java b/src/main/java/com/google/devtools/build/lib/remote/options/RemoteOptions.java
index 4161d60a610a8f..24fccce927e698 100644
--- a/src/main/java/com/google/devtools/build/lib/remote/options/RemoteOptions.java
+++ b/src/main/java/com/google/devtools/build/lib/remote/options/RemoteOptions.java
@@ -29,6 +29,8 @@
import com.google.devtools.common.options.Converters;
import com.google.devtools.common.options.Converters.AssignmentConverter;
import com.google.devtools.common.options.Converters.BooleanConverter;
+import com.google.devtools.common.options.Converters.ByteSizeConverter;
+import com.google.devtools.common.options.Converters.DurationConverter;
import com.google.devtools.common.options.EnumConverter;
import com.google.devtools.common.options.Option;
import com.google.devtools.common.options.OptionDocumentationCategory;
@@ -371,6 +373,47 @@ public RemoteBuildEventUploadModeConverter() {
+ "If the directory does not exist, it will be created.")
public PathFragment diskCache;
+ @Option(
+ name = "experimental_disk_cache_gc_idle_delay",
+ defaultValue = "5m",
+ documentationCategory = OptionDocumentationCategory.UNCATEGORIZED,
+ effectTags = {OptionEffectTag.UNKNOWN},
+ converter = DurationConverter.class,
+ help =
+ "How long the server must remain idle before a garbage collection of the disk cache"
+ + " occurs. To specify the garbage collection policy, set"
+ + " --experimental_disk_cache_gc_max_size and/or"
+ + " --experimental_disk_cache_gc_max_age.")
+ public Duration diskCacheGcIdleDelay;
+
+ @Option(
+ name = "experimental_disk_cache_gc_max_size",
+ defaultValue = "0",
+ documentationCategory = OptionDocumentationCategory.UNCATEGORIZED,
+ effectTags = {OptionEffectTag.UNKNOWN},
+ converter = ByteSizeConverter.class,
+ help =
+ "If set to a positive value, the disk cache will be periodically garbage collected to"
+ + " stay under this size. If set in conjunction with"
+ + " --experimental_disk_cache_gc_max_age, both criteria are applied. Garbage"
+ + " collection occurrs in the background once the server has become idle, as"
+ + " determined by the --experimental_disk_cache_gc_idle_delay flag.")
+ public long diskCacheGcMaxSize;
+
+ @Option(
+ name = "experimental_disk_cache_gc_max_age",
+ defaultValue = "0",
+ documentationCategory = OptionDocumentationCategory.UNCATEGORIZED,
+ effectTags = {OptionEffectTag.UNKNOWN},
+ converter = DurationConverter.class,
+ help =
+ "If set to a positive value, the disk cache will be periodically garbage collected to"
+ + " remove entries older than this age. If set in conjunction with"
+ + " --experimental_disk_cache_gc_max_size, both criteria are applied. Garbage"
+ + " collection occurrs in the background once the server has become idle, as"
+ + " determined by the --experimental_disk_cache_gc_idle_delay flag.")
+ public Duration diskCacheGcMaxAge;
+
@Option(
name = "experimental_guard_against_concurrent_changes",
defaultValue = "false",
diff --git a/src/test/java/com/google/devtools/build/lib/remote/RemoteModuleTest.java b/src/test/java/com/google/devtools/build/lib/remote/RemoteModuleTest.java
index 8d260d8ad9a507..7adcb9b9e240c6 100644
--- a/src/test/java/com/google/devtools/build/lib/remote/RemoteModuleTest.java
+++ b/src/test/java/com/google/devtools/build/lib/remote/RemoteModuleTest.java
@@ -43,6 +43,8 @@
import com.google.devtools.build.lib.exec.ExecutionOptions;
import com.google.devtools.build.lib.pkgcache.PackageOptions;
import com.google.devtools.build.lib.remote.circuitbreaker.FailureCircuitBreaker;
+import com.google.devtools.build.lib.remote.disk.DiskCacheGarbageCollector.CollectionPolicy;
+import com.google.devtools.build.lib.remote.disk.DiskCacheGarbageCollectorIdleTask;
import com.google.devtools.build.lib.remote.downloader.GrpcRemoteDownloader;
import com.google.devtools.build.lib.remote.options.RemoteOptions;
import com.google.devtools.build.lib.runtime.BlazeRuntime;
@@ -56,6 +58,7 @@
import com.google.devtools.build.lib.runtime.CommonCommandOptions;
import com.google.devtools.build.lib.runtime.commands.BuildCommand;
import com.google.devtools.build.lib.testutil.Scratch;
+import com.google.devtools.build.lib.testutil.TestUtils;
import com.google.devtools.build.lib.util.AbruptExitException;
import com.google.devtools.build.lib.vfs.DigestHashFunction;
import com.google.devtools.build.lib.vfs.FileSystem;
@@ -63,6 +66,7 @@
import com.google.devtools.common.options.Options;
import com.google.devtools.common.options.OptionsParser;
import com.google.devtools.common.options.OptionsParsingResult;
+import com.google.errorprone.annotations.CanIgnoreReturnValue;
import io.grpc.BindableService;
import io.grpc.Server;
import io.grpc.ServerInterceptors;
@@ -74,6 +78,7 @@
import java.net.URI;
import java.time.Duration;
import java.util.ArrayList;
+import java.util.Optional;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
@@ -529,10 +534,42 @@ public void bazelOutputService_noRemoteCache_exit() throws Exception {
}
}
- private void beforeCommand() throws IOException, AbruptExitException {
+ @Test
+ public void diskCacheGarbageCollectionIdleTask_disabled() throws Exception {
+ var diskCacheDir = TestUtils.createUniqueTmpDir(null);
+ remoteOptions.diskCache = diskCacheDir.asFragment();
+
+ var env = beforeCommand();
+
+ assertThat(env.getIdleTasks()).isEmpty();
+ }
+
+ @Test
+ public void diskCacheGarbageCollectionIdleTask_enabled() throws Exception {
+ var diskCacheDir = TestUtils.createUniqueTmpDir(null);
+ remoteOptions.diskCache = diskCacheDir.asFragment();
+ remoteOptions.diskCacheGcIdleDelay = Duration.ofMinutes(2);
+ remoteOptions.diskCacheGcMaxSize = 1234567890L;
+ remoteOptions.diskCacheGcMaxAge = Duration.ofDays(7);
+
+ var env = beforeCommand();
+
+ assertThat(env.getIdleTasks()).hasSize(1);
+ assertThat(env.getIdleTasks().get(0)).isInstanceOf(DiskCacheGarbageCollectorIdleTask.class);
+ var idleTask = (DiskCacheGarbageCollectorIdleTask) env.getIdleTasks().get(0);
+ assertThat(idleTask.delay()).isEqualTo(Duration.ofMinutes(2));
+ assertThat(idleTask.getGarbageCollector().getRoot().getPathString())
+ .isEqualTo(diskCacheDir.getPathString());
+ assertThat(idleTask.getGarbageCollector().getPolicy())
+ .isEqualTo(new CollectionPolicy(Optional.of(1234567890L), Optional.of(Duration.ofDays(7))));
+ }
+
+ @CanIgnoreReturnValue
+ private CommandEnvironment beforeCommand() throws IOException, AbruptExitException {
CommandEnvironment env = createTestCommandEnvironment(remoteModule, remoteOptions);
remoteModule.beforeCommand(env);
env.throwPendingException();
+ return env;
}
private void assertCircuitBreakerInstance() {
diff --git a/src/test/java/com/google/devtools/build/lib/remote/disk/BUILD b/src/test/java/com/google/devtools/build/lib/remote/disk/BUILD
index e437a4e125ed42..d4d949f2a76d7f 100644
--- a/src/test/java/com/google/devtools/build/lib/remote/disk/BUILD
+++ b/src/test/java/com/google/devtools/build/lib/remote/disk/BUILD
@@ -13,9 +13,22 @@ filegroup(
visibility = ["//src:__subpackages__"],
)
+java_binary(
+ name = "external_lock_helper",
+ testonly = True,
+ srcs = ["ExternalLockHelper.java"],
+ jvm_flags = [
+ # Prevent the JVM from polluting stdout and interfere with communication with the parent.
+ "-Xlog:disable",
+ "-Xlog:all=warning:stderr",
+ ],
+ main_class = "com.google.devtools.build.lib.remote.disk.ExternalLockHelper",
+)
+
java_test(
name = "disk",
srcs = glob(["*.java"]),
+ data = [":external_lock_helper"],
test_class = "com.google.devtools.build.lib.AllTests",
deps = [
"//src/main/java/com/google/devtools/build/lib/actions",
@@ -25,6 +38,8 @@ java_test(
"//src/main/java/com/google/devtools/build/lib/remote/common:cache_not_found_exception",
"//src/main/java/com/google/devtools/build/lib/remote/disk",
"//src/main/java/com/google/devtools/build/lib/remote/util",
+ "//src/main/java/com/google/devtools/build/lib/shell",
+ "//src/main/java/com/google/devtools/build/lib/util:os",
"//src/main/java/com/google/devtools/build/lib/vfs",
"//src/main/java/com/google/devtools/build/lib/vfs/bazel",
"//src/main/java/com/google/devtools/build/lib/vfs/inmemoryfs",
@@ -36,6 +51,7 @@ java_test(
"//third_party:mockito",
"//third_party:truth",
"//third_party/protobuf:protobuf_java",
+ "@bazel_tools//tools/java/runfiles",
"@remoteapis//:build_bazel_remote_execution_v2_remote_execution_java_proto",
],
)
diff --git a/src/test/java/com/google/devtools/build/lib/remote/disk/DiskCacheGarbageCollectorTest.java b/src/test/java/com/google/devtools/build/lib/remote/disk/DiskCacheGarbageCollectorTest.java
new file mode 100644
index 00000000000000..41b778aacb3d98
--- /dev/null
+++ b/src/test/java/com/google/devtools/build/lib/remote/disk/DiskCacheGarbageCollectorTest.java
@@ -0,0 +1,244 @@
+// Copyright 2024 The Bazel Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+package com.google.devtools.build.lib.remote.disk;
+
+import static com.google.common.truth.Truth.assertThat;
+import static com.google.common.truth.Truth.assertWithMessage;
+import static org.junit.Assert.assertThrows;
+
+import com.google.common.util.concurrent.MoreExecutors;
+import com.google.devtools.build.lib.remote.disk.DiskCacheGarbageCollector.CollectionStats;
+import com.google.devtools.build.lib.testutil.TestUtils;
+import com.google.devtools.build.lib.vfs.Path;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.time.Duration;
+import java.time.Instant;
+import java.util.Optional;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.JUnit4;
+
+/** Tests for {@link DiskCacheGarbageCollector}. */
+@RunWith(JUnit4.class)
+public final class DiskCacheGarbageCollectorTest {
+
+ private final ExecutorService executorService =
+ MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(1));
+
+ private Path rootDir;
+
+ record Entry(String path, long size, Instant mtime) {
+ static Entry of(String path, long size, Instant mtime) {
+ return new Entry(path, size, mtime);
+ }
+ }
+
+ @Before
+ public void setUp() throws Exception {
+ rootDir = TestUtils.createUniqueTmpDir(null);
+ }
+
+ @Test
+ public void sizePolicy_noCollection() throws Exception {
+ writeFiles(
+ Entry.of("ac/123", kbytes(1), Instant.now()),
+ Entry.of("cas/456", kbytes(1), Instant.now()));
+
+ CollectionStats stats = runGarbageCollector(Optional.of(kbytes(2)), Optional.empty());
+
+ assertThat(stats).isEqualTo(new CollectionStats(2, kbytes(2), 0, 0, false));
+ assertFilesExist("ac/123", "cas/456");
+ }
+
+ @Test
+ public void sizePolicy_collectsOldest() throws Exception {
+ writeFiles(
+ Entry.of("ac/123", kbytes(1), daysAgo(1)),
+ Entry.of("cas/456", kbytes(1), daysAgo(2)),
+ Entry.of("ac/abc", kbytes(1), daysAgo(3)),
+ Entry.of("cas/def", kbytes(1), daysAgo(4)));
+
+ CollectionStats stats = runGarbageCollector(Optional.of(kbytes(2)), Optional.empty());
+
+ assertThat(stats).isEqualTo(new CollectionStats(4, kbytes(4), 2, kbytes(2), false));
+ assertFilesExist("ac/123", "cas/456");
+ assertFilesDoNotExist("ac/abc", "cas/def");
+ }
+
+ @Test
+ public void sizePolicy_tieBreakByPath() throws Exception {
+ writeFiles(
+ Entry.of("ac/123", kbytes(1), daysAgo(1)),
+ Entry.of("cas/456", kbytes(1), daysAgo(1)),
+ Entry.of("ac/abc", kbytes(1), daysAgo(1)),
+ Entry.of("cas/def", kbytes(1), daysAgo(1)));
+
+ CollectionStats stats = runGarbageCollector(Optional.of(kbytes(2)), Optional.empty());
+
+ assertThat(stats).isEqualTo(new CollectionStats(4, kbytes(4), 2, kbytes(2), false));
+ assertFilesExist("cas/456", "cas/def");
+ assertFilesDoNotExist("ac/123", "ac/abc");
+ }
+
+ @Test
+ public void agePolicy_noCollection() throws Exception {
+ writeFiles(
+ Entry.of("ac/123", kbytes(1), Instant.now()),
+ Entry.of("cas/456", kbytes(1), Instant.now()));
+
+ CollectionStats stats = runGarbageCollector(Optional.empty(), Optional.of(days(3)));
+
+ assertThat(stats).isEqualTo(new CollectionStats(2, kbytes(2), 0, 0, false));
+ assertFilesExist("ac/123", "cas/456");
+ }
+
+ @Test
+ public void agePolicy_collectsOldest() throws Exception {
+ writeFiles(
+ Entry.of("ac/123", kbytes(1), daysAgo(1)),
+ Entry.of("cas/456", kbytes(1), daysAgo(2)),
+ Entry.of("ac/abc", kbytes(1), daysAgo(4)),
+ Entry.of("cas/def", kbytes(1), daysAgo(5)));
+
+ CollectionStats stats = runGarbageCollector(Optional.empty(), Optional.of(Duration.ofDays(3)));
+
+ assertThat(stats).isEqualTo(new CollectionStats(4, kbytes(4), 2, kbytes(2), false));
+ assertFilesExist("ac/123", "cas/456");
+ assertFilesDoNotExist("ac/abc", "cas/def");
+ }
+
+ @Test
+ public void sizeAndAgePolicy_noCollection() throws Exception {
+ writeFiles(
+ Entry.of("ac/123", kbytes(1), Instant.now()),
+ Entry.of("cas/456", kbytes(1), Instant.now()));
+
+ CollectionStats stats = runGarbageCollector(Optional.of(kbytes(2)), Optional.of(days(1)));
+
+ assertThat(stats).isEqualTo(new CollectionStats(2, kbytes(2), 0, 0, false));
+ assertFilesExist("ac/123", "cas/456");
+ }
+
+ @Test
+ public void sizeAndAgePolicy_sizeMoreRestrictiveThanAge_collectsOldest() throws Exception {
+ writeFiles(
+ Entry.of("ac/123", kbytes(1), daysAgo(1)),
+ Entry.of("cas/456", kbytes(1), daysAgo(2)),
+ Entry.of("ac/abc", kbytes(1), daysAgo(3)),
+ Entry.of("cas/def", kbytes(1), daysAgo(4)));
+
+ CollectionStats stats = runGarbageCollector(Optional.of(kbytes(2)), Optional.of(days(4)));
+
+ assertThat(stats).isEqualTo(new CollectionStats(4, kbytes(4), 2, kbytes(2), false));
+ assertFilesExist("ac/123", "cas/456");
+ assertFilesDoNotExist("ac/abc", "cas/def");
+ }
+
+ @Test
+ public void sizeAndAgePolicy_ageMoreRestrictiveThanSize_collectsOldest() throws Exception {
+ writeFiles(
+ Entry.of("ac/123", kbytes(1), daysAgo(1)),
+ Entry.of("cas/456", kbytes(1), daysAgo(2)),
+ Entry.of("ac/abc", kbytes(1), daysAgo(3)),
+ Entry.of("cas/def", kbytes(1), daysAgo(4)));
+
+ CollectionStats stats = runGarbageCollector(Optional.of(kbytes(3)), Optional.of(days(3)));
+
+ assertThat(stats).isEqualTo(new CollectionStats(4, kbytes(4), 2, kbytes(2), false));
+ assertFilesExist("ac/123", "cas/456");
+ assertFilesDoNotExist("ac/abc", "cas/def");
+ }
+
+ @Test
+ public void ignoresTmpAndGcSubdirectories() throws Exception {
+ writeFiles(
+ Entry.of("gc/foo", kbytes(1), daysAgo(1)), Entry.of("tmp/foo", kbytes(1), daysAgo(1)));
+
+ CollectionStats stats = runGarbageCollector(Optional.of(1L), Optional.of(days(1)));
+
+ assertThat(stats).isEqualTo(new CollectionStats(0, 0, 0, 0, false));
+ assertFilesExist("gc/foo", "tmp/foo");
+ }
+
+ @Test
+ public void failsWhenLockIsAlreadyHeld() throws Exception {
+ try (var externalLock = ExternalLock.getShared(rootDir.getRelative("gc/lock"))) {
+ Exception e =
+ assertThrows(
+ Exception.class, () -> runGarbageCollector(Optional.of(1L), Optional.empty()));
+ assertThat(e).isInstanceOf(IOException.class);
+ assertThat(e).hasMessageThat().contains("failed to acquire exclusive disk cache lock");
+ }
+ }
+
+ private void assertFilesExist(String... relativePaths) throws IOException {
+ for (String relativePath : relativePaths) {
+ Path path = rootDir.getRelative(relativePath);
+ assertWithMessage("expected %s to exist".formatted(relativePath))
+ .that(path.exists())
+ .isTrue();
+ }
+ }
+
+ private void assertFilesDoNotExist(String... relativePaths) throws IOException {
+ for (String relativePath : relativePaths) {
+ Path path = rootDir.getRelative(relativePath);
+ assertWithMessage("expected %s to not exist".formatted(relativePath))
+ .that(path.exists())
+ .isFalse();
+ }
+ }
+
+ private CollectionStats runGarbageCollector(
+ Optional maxSizeBytes, Optional maxAge)
+ throws IOException, InterruptedException {
+ var gc =
+ new DiskCacheGarbageCollector(
+ rootDir,
+ executorService,
+ new DiskCacheGarbageCollector.CollectionPolicy(maxSizeBytes, maxAge));
+ return gc.run();
+ }
+
+ private void writeFiles(Entry... entries) throws IOException {
+ for (Entry entry : entries) {
+ writeFile(entry.path(), entry.size(), entry.mtime());
+ }
+ }
+
+ private void writeFile(String relativePath, long size, Instant mtime) throws IOException {
+ Path path = rootDir.getRelative(relativePath);
+ path.getParentDirectory().createDirectoryAndParents();
+ try (OutputStream out = path.getOutputStream()) {
+ out.write(new byte[(int) size]);
+ }
+ path.setLastModifiedTime(mtime.toEpochMilli());
+ }
+
+ private static Instant daysAgo(int days) {
+ return Instant.now().minus(Duration.ofDays(days));
+ }
+
+ private static Duration days(int days) {
+ return Duration.ofDays(days);
+ }
+
+ private static long kbytes(int kbytes) {
+ return kbytes * 1024L;
+ }
+}
diff --git a/src/test/java/com/google/devtools/build/lib/remote/disk/DiskCacheLockTest.java b/src/test/java/com/google/devtools/build/lib/remote/disk/DiskCacheLockTest.java
new file mode 100644
index 00000000000000..bd7223dd9e96c6
--- /dev/null
+++ b/src/test/java/com/google/devtools/build/lib/remote/disk/DiskCacheLockTest.java
@@ -0,0 +1,84 @@
+// Copyright 2024 The Bazel Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+package com.google.devtools.build.lib.remote.disk;
+
+import static com.google.common.truth.Truth.assertThat;
+import static org.junit.Assert.assertThrows;
+
+import com.google.devtools.build.lib.testutil.TestUtils;
+import com.google.devtools.build.lib.vfs.Path;
+import java.io.IOException;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.JUnit4;
+
+/** Tests for {@link DiskCacheLock}. */
+@RunWith(JUnit4.class)
+public final class DiskCacheLockTest {
+
+ private Path lockPath;
+
+ @Before
+ public void setUp() throws Exception {
+ var rootDir = TestUtils.createUniqueTmpDir(null);
+ lockPath = rootDir.getRelative("subdir/lock");
+ }
+
+ @Test
+ public void getShared_whenNotLocked_succeeds() throws Exception {
+ try (var lock = DiskCacheLock.getShared(lockPath)) {
+ assertThat(lock.isShared()).isTrue();
+ }
+ }
+
+ @Test
+ public void getShared_whenLockedForSharedUse_succeeds() throws Exception {
+ try (var externalLock = ExternalLock.getShared(lockPath);
+ var lock = DiskCacheLock.getShared(lockPath)) {
+ assertThat(lock.isShared()).isTrue();
+ }
+ }
+
+ @Test
+ public void getShared_whenLockedForExclusiveUse_fails() throws Exception {
+ try (var externalLock = ExternalLock.getExclusive(lockPath)) {
+ IOException e = assertThrows(IOException.class, () -> DiskCacheLock.getShared(lockPath));
+ assertThat(e).hasMessageThat().contains("failed to acquire shared disk cache lock");
+ }
+ }
+
+ @Test
+ public void getExclusive_whenNotLocked_succeeds() throws Exception {
+ try (var lock = DiskCacheLock.getExclusive(lockPath)) {
+ assertThat(lock.isExclusive()).isTrue();
+ }
+ }
+
+ @Test
+ public void getExclusive_whenLockedForSharedUse_fails() throws Exception {
+ try (var externalLock = ExternalLock.getShared(lockPath)) {
+ IOException e = assertThrows(IOException.class, () -> DiskCacheLock.getExclusive(lockPath));
+ assertThat(e).hasMessageThat().contains("failed to acquire exclusive disk cache lock");
+ }
+ }
+
+ @Test
+ public void getExclusive_whenLockedForExclusiveUse_fails() throws Exception {
+ try (var lock = ExternalLock.getExclusive(lockPath)) {
+ IOException e = assertThrows(IOException.class, () -> DiskCacheLock.getExclusive(lockPath));
+ assertThat(e).hasMessageThat().contains("failed to acquire exclusive disk cache lock");
+ }
+ }
+}
diff --git a/src/test/java/com/google/devtools/build/lib/remote/disk/ExternalLock.java b/src/test/java/com/google/devtools/build/lib/remote/disk/ExternalLock.java
new file mode 100644
index 00000000000000..7136104d014d7a
--- /dev/null
+++ b/src/test/java/com/google/devtools/build/lib/remote/disk/ExternalLock.java
@@ -0,0 +1,65 @@
+// Copyright 2024 The Bazel Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+package com.google.devtools.build.lib.remote.disk;
+
+import com.google.common.collect.ImmutableList;
+import com.google.devtools.build.lib.shell.Subprocess;
+import com.google.devtools.build.lib.shell.SubprocessBuilder;
+import com.google.devtools.build.lib.util.OS;
+import com.google.devtools.build.lib.vfs.Path;
+import com.google.devtools.build.runfiles.Runfiles;
+import java.io.IOException;
+
+/**
+ * Runs an external process that holds a shared or exclusive lock on a file.
+ *
+ * This is needed for testing because the JVM does not allow overlapping locks.
+ */
+public class ExternalLock implements AutoCloseable {
+ private static final String HELPER_PATH =
+ "io_bazel/src/test/java/com/google/devtools/build/lib/remote/disk/external_lock_helper"
+ + (OS.getCurrent() == OS.WINDOWS ? ".exe" : "");
+
+ private final Subprocess subprocess;
+
+ static ExternalLock getShared(Path lockPath) throws IOException {
+ return new ExternalLock(lockPath, true);
+ }
+
+ static ExternalLock getExclusive(Path lockPath) throws IOException {
+ return new ExternalLock(lockPath, false);
+ }
+
+ ExternalLock(Path lockPath, boolean shared) throws IOException {
+ String binaryPath = Runfiles.preload().withSourceRepository("").rlocation(HELPER_PATH);
+ this.subprocess =
+ new SubprocessBuilder()
+ .setArgv(
+ ImmutableList.of(
+ binaryPath, lockPath.getPathString(), shared ? "shared" : "exclusive"))
+ .start();
+ // Wait for child to report that the lock has been acquired.
+ // We could read the entire stdout/stderr here to obtain additional debugging information,
+ // but for some reason that hangs forever on Windows, even if we close them on the child side.
+ if (subprocess.getInputStream().read() != '!') {
+ throw new IOException("external helper process failed");
+ }
+ }
+
+ @Override
+ public void close() throws IOException {
+ // Wait for process to exit and release the lock.
+ subprocess.destroyAndWait();
+ }
+}
diff --git a/src/test/java/com/google/devtools/build/lib/remote/disk/ExternalLockHelper.java b/src/test/java/com/google/devtools/build/lib/remote/disk/ExternalLockHelper.java
new file mode 100644
index 00000000000000..f85e0db927f374
--- /dev/null
+++ b/src/test/java/com/google/devtools/build/lib/remote/disk/ExternalLockHelper.java
@@ -0,0 +1,49 @@
+// Copyright 2024 The Bazel Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+package com.google.devtools.build.lib.remote.disk;
+
+import java.io.IOException;
+import java.nio.channels.FileChannel;
+import java.nio.channels.FileLock;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.StandardOpenOption;
+
+/** A helper binary that holds a shared or exclusive lock on a file. */
+public final class ExternalLockHelper {
+ private ExternalLockHelper() {}
+
+ public static void main(String[] args) throws IOException, InterruptedException {
+ if (args.length != 2) {
+ throw new IOException("bad arguments");
+ }
+ Path path = Path.of(args[0]).toAbsolutePath();
+ Files.createDirectories(path.getParent());
+ boolean shared = args[1].equals("shared");
+ try (FileChannel channel =
+ FileChannel.open(
+ path,
+ StandardOpenOption.READ,
+ StandardOpenOption.WRITE,
+ StandardOpenOption.CREATE);
+ FileLock lock = channel.lock(0, Long.MAX_VALUE, shared)) {
+ // Signal parent that the lock is held.
+ System.out.println("!");
+ // Block until killed by parent.
+ while (true) {
+ Thread.sleep(1000);
+ }
+ }
+ }
+}
diff --git a/src/test/shell/bazel/disk_cache_test.sh b/src/test/shell/bazel/disk_cache_test.sh
index f4f3c3012e9034..3b015ba291f9a2 100755
--- a/src/test/shell/bazel/disk_cache_test.sh
+++ b/src/test/shell/bazel/disk_cache_test.sh
@@ -127,4 +127,43 @@ EOF
expect_log "(cached) PASSED"
}
+function test_garbage_collection() {
+ local -r CACHE_DIR="${TEST_TMPDIR}/cache"
+ rm -rf "$CACHE_DIR"
+
+ mkdir -p a
+ touch a/BUILD
+
+ # Populate the disk cache with some fake entries totalling 4 MB in size.
+ create_file_with_size_and_mtime "${CACHE_DIR}/cas/123" 1M "202401010100"
+ create_file_with_size_and_mtime "${CACHE_DIR}/ac/456" 1M "202401010200"
+ create_file_with_size_and_mtime "${CACHE_DIR}/cas/abc" 1M "202401010300"
+ create_file_with_size_and_mtime "${CACHE_DIR}/ac/def" 1M "202401010400"
+
+ # Run a build and request an immediate garbage collection.
+ # Note that this build doesn't write anything to the disk cache.
+ bazel build --disk_cache="$CACHE_DIR" \
+ --experimental_disk_cache_gc_max_size=2M \
+ --experimental_disk_cache_gc_idle_delay=0 \
+ //a:BUILD >& $TEST_log || fail "Expected build to succeed"
+
+ # Give the idle task a bit of time to run.
+ sleep 1
+
+ # Expect the two oldest entries to have been deleted to reduce size to 2 MB.
+ assert_not_exists "${CACHE_DIR}/cas/123"
+ assert_not_exists "${CACHE_DIR}/ac/456"
+ assert_exists "${CACHE_DIR}/cas/abc"
+ assert_exists "${CACHE_DIR}/ac/def"
+}
+
+function create_file_with_size_and_mtime() {
+ local -r path=$1
+ local -r size=$2
+ local -r mtime=$3
+ mkdir -p "$(dirname "$path")"
+ dd if=/dev/zero of="$path" bs="$size" count=1
+ touch -t "$mtime" "$path"
+}
+
run_suite "disk cache test"
diff --git a/src/test/shell/integration/validation_actions_test.sh b/src/test/shell/integration/validation_actions_test.sh
index a17c279b0cf8a0..1dc9ff05ae5eb7 100755
--- a/src/test/shell/integration/validation_actions_test.sh
+++ b/src/test/shell/integration/validation_actions_test.sh
@@ -221,14 +221,6 @@ EOF
chmod +x validation_actions/validation_tool
}
-function assert_exists() {
- path="$1"
- [ -f "$path" ] && return 0
-
- fail "Expected file '$path' to exist, but it did not"
- return 1
-}
-
#### Tests #####################################################################
function test_validation_actions() {
diff --git a/src/test/shell/unittest.bash b/src/test/shell/unittest.bash
index c88ba2ceb85493..438e21896f4bfd 100644
--- a/src/test/shell/unittest.bash
+++ b/src/test/shell/unittest.bash
@@ -519,6 +519,32 @@ function assert_contains_n() {
return 1
}
+# Usage: assert_exists [error-message]
+# Asserts that the file exists.
+function assert_exists() {
+ local file=$1
+ local message=${2:-"Expected '$file' to exist"}
+ if [[ -f "$file" ]]; then
+ return 0
+ fi
+
+ fail "$message"
+ return 1
+}
+
+# Usage: assert_not_exists [error-message]
+# Asserts that the file does not exist.
+function assert_not_exists() {
+ local file=$1
+ local message=${2:-"Expected '$file' to not exist"}
+ if ! [[ -f "$file" ]]; then
+ return 0
+ fi
+
+ fail "$message"
+ return 1
+}
+
# Updates the global variables TESTS if
# sharding is enabled, i.e. ($TEST_TOTAL_SHARDS > 0).
function __update_shards() {
diff --git a/src/tools/diskcache/BUILD b/src/tools/diskcache/BUILD
new file mode 100644
index 00000000000000..90bcf16d69921b
--- /dev/null
+++ b/src/tools/diskcache/BUILD
@@ -0,0 +1,29 @@
+load("@rules_java//java:defs.bzl", "java_binary")
+
+package(
+ default_applicable_licenses = ["//:license"],
+ default_visibility = ["//:__pkg__"],
+)
+
+filegroup(
+ name = "srcs",
+ srcs = glob(["**"]),
+ visibility = ["//src:__subpackages__"],
+)
+
+java_binary(
+ name = "gc",
+ srcs = ["Gc.java"],
+ main_class = "diskcache.Gc",
+ visibility = ["//visibility:public"],
+ deps = [
+ "//src/main/java/com/google/devtools/build/lib/remote/disk",
+ "//src/main/java/com/google/devtools/build/lib/remote/util",
+ "//src/main/java/com/google/devtools/build/lib/unix",
+ "//src/main/java/com/google/devtools/build/lib/util:os",
+ "//src/main/java/com/google/devtools/build/lib/vfs",
+ "//src/main/java/com/google/devtools/build/lib/windows",
+ "//src/main/java/com/google/devtools/common/options",
+ "//third_party:guava",
+ ],
+)
diff --git a/src/tools/diskcache/Gc.java b/src/tools/diskcache/Gc.java
new file mode 100644
index 00000000000000..c678e5bfeb9258
--- /dev/null
+++ b/src/tools/diskcache/Gc.java
@@ -0,0 +1,125 @@
+// Copyright 2024 The Bazel Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+package diskcache;
+
+import static java.lang.Math.min;
+
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
+import com.google.devtools.build.lib.remote.disk.DiskCacheGarbageCollector;
+import com.google.devtools.build.lib.remote.disk.DiskCacheGarbageCollector.CollectionPolicy;
+import com.google.devtools.build.lib.remote.disk.DiskCacheGarbageCollector.CollectionStats;
+import com.google.devtools.build.lib.unix.UnixFileSystem;
+import com.google.devtools.build.lib.util.OS;
+import com.google.devtools.build.lib.vfs.DigestHashFunction;
+import com.google.devtools.build.lib.vfs.FileSystem;
+import com.google.devtools.build.lib.windows.WindowsFileSystem;
+import com.google.devtools.common.options.Converters.ByteSizeConverter;
+import com.google.devtools.common.options.Option;
+import com.google.devtools.common.options.OptionDocumentationCategory;
+import com.google.devtools.common.options.OptionEffectTag;
+import com.google.devtools.common.options.OptionsBase;
+import com.google.devtools.common.options.OptionsParser;
+import java.time.Duration;
+import java.util.Optional;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+
+/** Standalone disk cache garbage collection utility. */
+public final class Gc {
+
+ private Gc() {}
+
+ /** Command line options. */
+ public static final class Options extends OptionsBase {
+
+ @Option(
+ name = "disk_cache",
+ defaultValue = "null",
+ documentationCategory = OptionDocumentationCategory.UNCATEGORIZED,
+ effectTags = {OptionEffectTag.UNKNOWN},
+ help = "Path to disk cache.")
+ public String diskCache;
+
+ @Option(
+ name = "max_size",
+ defaultValue = "0",
+ converter = ByteSizeConverter.class,
+ documentationCategory = OptionDocumentationCategory.UNCATEGORIZED,
+ effectTags = {OptionEffectTag.UNKNOWN},
+ help =
+ "The target size for the disk cache. If set to a positive value, older entries will be"
+ + " deleted as required to reach this size.")
+ public long maxSize;
+
+ @Option(
+ name = "max_age",
+ defaultValue = "0",
+ documentationCategory = OptionDocumentationCategory.UNCATEGORIZED,
+ effectTags = {OptionEffectTag.UNKNOWN},
+ help =
+ "The target age for the disk cache. If set to a positive value, entries exceeding this"
+ + " age will be deleted.")
+ public Duration maxAge;
+ }
+
+ private static final ExecutorService executorService =
+ Executors.newFixedThreadPool(
+ min(4, Runtime.getRuntime().availableProcessors()),
+ new ThreadFactoryBuilder().setNameFormat("disk-cache-gc-%d").build());
+
+ public static void main(String[] args) throws Exception {
+ OptionsParser op = OptionsParser.builder().optionsClasses(Options.class).build();
+ op.parseAndExitUponError(args);
+
+ Options options = op.getOptions(Options.class);
+
+ if (options.diskCache == null) {
+ System.err.println("--disk_cache must be specified.");
+ System.exit(1);
+ }
+
+ if (options.maxSize <= 0 && options.maxAge.isZero()) {
+ System.err.println(
+ "At least one of --max_size or --max_age must be set to a positive value.");
+ System.exit(1);
+ }
+
+ var root = getFileSystem().getPath(options.diskCache);
+ if (!root.isDirectory()) {
+ System.err.println("Expected --disk_cache to exist and be a directory.");
+ System.exit(1);
+ }
+
+ var policy =
+ new CollectionPolicy(
+ options.maxSize == 0 ? Optional.empty() : Optional.of(options.maxSize),
+ options.maxAge.isZero() ? Optional.empty() : Optional.of(options.maxAge));
+
+ var gc = new DiskCacheGarbageCollector(root, executorService, policy);
+
+ CollectionStats stats = gc.run();
+
+ System.out.println(stats.displayString());
+ System.exit(0);
+ }
+
+ private static FileSystem getFileSystem() {
+ // Note: the digest function is irrelevant, as the garbage collector scans the entire disk cache
+ // and never computes digests.
+ if (OS.getCurrent() == OS.WINDOWS) {
+ return new WindowsFileSystem(DigestHashFunction.SHA256, false);
+ }
+ return new UnixFileSystem(DigestHashFunction.SHA256, "");
+ }
+}
diff --git a/src/tools/diskcache/README.md b/src/tools/diskcache/README.md
new file mode 100644
index 00000000000000..4f50023b9f0fb2
--- /dev/null
+++ b/src/tools/diskcache/README.md
@@ -0,0 +1,5 @@
+# Standalone disk cache garbage collection utility
+
+This utility may be used to manually run a garbage collection on a disk cache,
+if more control over when which garbage collection runs is desired than afforded
+by the automatic garbage collection built into Bazel.