diff --git a/src/EvenireDB.Benchmark/EventsProviderBenckmarks.cs b/src/EvenireDB.Benchmark/EventsProviderBenckmarks.cs index 7aca598..474f2cc 100644 --- a/src/EvenireDB.Benchmark/EventsProviderBenckmarks.cs +++ b/src/EvenireDB.Benchmark/EventsProviderBenckmarks.cs @@ -3,6 +3,7 @@ using EvenireDB; using EvenireDB.Common; using EvenireDB.Utils; +using Microsoft.Extensions.Logging.Abstractions; using System.Threading.Channels; public class EventsProviderBenckmarks @@ -30,10 +31,11 @@ public void GlobalSetup() var repo = new FileEventsRepository(repoConfig, factory); var cache = new LRUCache(this.EventsCount); + var logger = new NullLogger(); var channel = Channel.CreateUnbounded(); - _sut = new EventsProvider(EventsProviderConfig.Default, repo, cache, channel.Writer); + _sut = new EventsProvider(EventsProviderConfig.Default, repo, cache, channel.Writer, logger); var events = Enumerable.Range(0, (int)this.EventsCount).Select(i => factory.Create(Guid.NewGuid(), "lorem", _data)).ToArray(); Task.WaitAll(_sut.AppendAsync(_streamId, events).AsTask()); diff --git a/src/EvenireDB.Server/IncomingEventsPersistenceWorker.cs b/src/EvenireDB.Server/IncomingEventsPersistenceWorker.cs index 909e7d9..c0315b9 100644 --- a/src/EvenireDB.Server/IncomingEventsPersistenceWorker.cs +++ b/src/EvenireDB.Server/IncomingEventsPersistenceWorker.cs @@ -1,47 +1,44 @@ -using EvenireDB; -using System.Threading.Channels; +using System.Threading.Channels; -public class IncomingEventsPersistenceWorker : BackgroundService +namespace EvenireDB.Server { - private readonly ChannelReader _reader; - private readonly IEventsRepository _repo; - private readonly ILogger _logger; - - public IncomingEventsPersistenceWorker(ChannelReader reader, IEventsRepository repo, ILogger logger) + public class IncomingEventsPersistenceWorker : BackgroundService { - _reader = reader ?? throw new ArgumentNullException(nameof(reader)); - _repo = repo ?? throw new ArgumentNullException(nameof(repo)); - _logger = logger ?? throw new ArgumentNullException(nameof(logger)); - } + private readonly ChannelReader _reader; + private readonly IEventsRepository _repo; + private readonly ILogger _logger; - protected override async Task ExecuteAsync(CancellationToken cancellationToken) - { - await Task.Factory.StartNew(async () => + public IncomingEventsPersistenceWorker(ChannelReader reader, IEventsRepository repo, ILogger logger) { - await this.ExecuteAsyncCore(cancellationToken).ConfigureAwait(false); - }, cancellationToken); - } + _reader = reader ?? throw new ArgumentNullException(nameof(reader)); + _repo = repo ?? throw new ArgumentNullException(nameof(repo)); + _logger = logger ?? throw new ArgumentNullException(nameof(logger)); + } - private async Task ExecuteAsyncCore(CancellationToken cancellationToken) - { - while (!cancellationToken.IsCancellationRequested || await _reader.WaitToReadAsync(cancellationToken)) + protected override async Task ExecuteAsync(CancellationToken cancellationToken) { - while (_reader.TryRead(out IncomingEventsGroup? group) && group is not null) + await Task.Factory.StartNew(async () => { - try - { - await _repo.AppendAsync(group.AggregateId, group.Events, cancellationToken) - .ConfigureAwait(false); - } - catch (Exception ex) + await ExecuteAsyncCore(cancellationToken).ConfigureAwait(false); + }, cancellationToken); + } + + private async Task ExecuteAsyncCore(CancellationToken cancellationToken) + { + while (!cancellationToken.IsCancellationRequested || await _reader.WaitToReadAsync(cancellationToken)) + { + while (_reader.TryRead(out IncomingEventsGroup? group) && group is not null) { - _logger.LogError( - ex, - "an error has occurred while persisting events group for aggregate {AggregateId}: {Error}", - group.AggregateId, - ex.Message); + try + { + await _repo.AppendAsync(group.AggregateId, group.Events, cancellationToken) + .ConfigureAwait(false); + } + catch (Exception ex) + { + _logger.EventsGroupPersistenceError(group.AggregateId, ex.Message); + } } - } } } diff --git a/src/EvenireDB.Server/LogMessages.cs b/src/EvenireDB.Server/LogMessages.cs new file mode 100644 index 0000000..53db424 --- /dev/null +++ b/src/EvenireDB.Server/LogMessages.cs @@ -0,0 +1,23 @@ +namespace EvenireDB.Server +{ + public static partial class LogMessages + { + [LoggerMessage( + EventId = 1, + Level = LogLevel.Warning, + Message = "Memory usage is {MemoryUsage} bytes, more than the allowed value: {MaxAllowedAllocatedBytes}. Dropping some cached streams")] + public static partial void HighMemoryUsageDetected(this ILogger logger, long memoryUsage, long maxAllowedAllocatedBytes); + + [LoggerMessage( + EventId = 2, + Level = LogLevel.Debug, + Message = "Memory usage is {MemoryUsage} / {MaxAllowedAllocatedBytes} bytes")] + public static partial void MemoryUsageBelowTreshold(this ILogger logger, long memoryUsage, long maxAllowedAllocatedBytes); + + [LoggerMessage( + EventId = 3, + Level = LogLevel.Error, + Message = "an error has occurred while persisting events group for stream {StreamId}: {Error}")] + public static partial void EventsGroupPersistenceError(this ILogger logger, Guid streamId, string error); + } +} \ No newline at end of file diff --git a/src/EvenireDB.Server/MemoryWatcher.cs b/src/EvenireDB.Server/MemoryWatcher.cs new file mode 100644 index 0000000..2d4d4bf --- /dev/null +++ b/src/EvenireDB.Server/MemoryWatcher.cs @@ -0,0 +1,51 @@ +using EvenireDB.Utils; +using System.Diagnostics; + +namespace EvenireDB.Server +{ + public record MemoryWatcherSettings( + TimeSpan Interval, + long MaxAllowedAllocatedBytes); + + public class MemoryWatcher : BackgroundService + { + private readonly MemoryWatcherSettings _settings; + private readonly ILogger _logger; + private readonly IServiceProvider _sp; + + public MemoryWatcher(MemoryWatcherSettings settings, ILogger logger, IServiceProvider sp) + { + _settings = settings; + _logger = logger; + _sp = sp; + } + + protected override async Task ExecuteAsync(CancellationToken stoppingToken) + { + while (!stoppingToken.IsCancellationRequested) + { + using var process = Process.GetCurrentProcess(); + + bool needDrop = process.PrivateMemorySize64 > _settings.MaxAllowedAllocatedBytes; + if (needDrop) + { + _logger.HighMemoryUsageDetected(process.PrivateMemorySize64, _settings.MaxAllowedAllocatedBytes); + + using var scope = _sp.CreateScope(); + var cache = scope.ServiceProvider.GetRequiredService>(); + + var dropCount = cache.Count / 3; + cache.DropOldest(dropCount); + + GC.Collect(); + } + else + { + _logger.MemoryUsageBelowTreshold(process.PrivateMemorySize64, _settings.MaxAllowedAllocatedBytes); + } + + await Task.Delay(_settings.Interval, stoppingToken); + } + } + } +} \ No newline at end of file diff --git a/src/EvenireDB.Server/Program.cs b/src/EvenireDB.Server/Program.cs index a36e6fe..fc8498c 100644 --- a/src/EvenireDB.Server/Program.cs +++ b/src/EvenireDB.Server/Program.cs @@ -1,4 +1,5 @@ using EvenireDB; +using EvenireDB.Server; using EvenireDB.Server.Routes; using EvenireDB.Utils; using Microsoft.AspNetCore.Server.Kestrel.Core; @@ -40,7 +41,7 @@ .AddSingleton>(ctx => { var serverConfig = ctx.GetRequiredService>().Value; - return new LRUCache(serverConfig.CacheCapacity); + return new LRUCache(serverConfig.MaxInMemoryStreamsCount); }) .AddSingleton(ctx => { @@ -72,7 +73,12 @@ return new FileEventsRepositoryConfig(dataPath, serverConfig.MaxEventsPageSizeFromDisk); }) .AddSingleton() - .AddHostedService(); + .AddHostedService() + .AddSingleton(ctx => + { + var serverConfig = ctx.GetRequiredService>().Value; + return new MemoryWatcherSettings(serverConfig.MemoryWatcherInterval, serverConfig.MaxAllowedAllocatedBytes); + }).AddHostedService(); var version = Assembly.GetExecutingAssembly().GetName().Version; diff --git a/src/EvenireDB.Server/ServerConfig.cs b/src/EvenireDB.Server/ServerConfig.cs index 5828cab..a74994f 100644 --- a/src/EvenireDB.Server/ServerConfig.cs +++ b/src/EvenireDB.Server/ServerConfig.cs @@ -1,10 +1,5 @@ internal record class ServerConfig { - /// - /// max number of streams to cache in memory - /// - public uint CacheCapacity { get; init; } = 1000; - /// /// max page size returned to clients /// @@ -24,4 +19,17 @@ internal record class ServerConfig public int HttpPort { get; init; } = 16281; public int GrpcPort { get; init; } = 16282; + + /// + /// max number of streams to cache in memory + /// + public uint MaxInMemoryStreamsCount { get; init; } = 1000; + + /// + /// max allowed memory allocated by the process. If exceeded, the system will try to recover + /// some memory by dropping some cached streams + /// + public long MaxAllowedAllocatedBytes { get; init; } = 1_000_000_000; // TODO: consider making this a function of max allowed streams count and max event data size + + public TimeSpan MemoryWatcherInterval { get; init; } = TimeSpan.FromMinutes(2); } \ No newline at end of file diff --git a/src/EvenireDB.Server/appsettings.Development.json b/src/EvenireDB.Server/appsettings.Development.json index ba56948..25d45d6 100644 --- a/src/EvenireDB.Server/appsettings.Development.json +++ b/src/EvenireDB.Server/appsettings.Development.json @@ -1,12 +1,17 @@ { "Logging": { + "Console": { + "IncludeScopes": true + }, "LogLevel": { - "Default": "Information", - "Microsoft.AspNetCore": "Warning" + "Default": "Warning", + "EvenireDB": "Trace" } }, "Server": { "HttpPort": 5001, - "GrpcPort": 5002 + "GrpcPort": 5002, + "MemoryWatcherInterval": "0:00:10", + "MaxAllowedAllocatedBytes": 50000000 } } diff --git a/src/EvenireDB.Server/appsettings.json b/src/EvenireDB.Server/appsettings.json index 1f78df5..90b36f4 100644 --- a/src/EvenireDB.Server/appsettings.json +++ b/src/EvenireDB.Server/appsettings.json @@ -1,8 +1,8 @@ { "Logging": { "LogLevel": { - "Default": "Information", - "Microsoft.AspNetCore": "Warning" + "Default": "Warning", + "EvenireDB": "Error" } }, "AllowedHosts": "*", @@ -11,6 +11,6 @@ "MaxPageSizeToClient": 100, "MaxEventsPageSizeFromDisk": 100, "MaxEventDataSize": 500000, - "DataFolder": "./data" + "DataFolder": "./data" } } diff --git a/src/EvenireDB/EvenireDB.csproj b/src/EvenireDB/EvenireDB.csproj index 9ea733e..37e3f77 100644 --- a/src/EvenireDB/EvenireDB.csproj +++ b/src/EvenireDB/EvenireDB.csproj @@ -8,6 +8,10 @@ true + + + + diff --git a/src/EvenireDB/EventsProvider.cs b/src/EvenireDB/EventsProvider.cs index e42736b..9641fd1 100644 --- a/src/EvenireDB/EventsProvider.cs +++ b/src/EvenireDB/EventsProvider.cs @@ -1,5 +1,6 @@ using EvenireDB.Common; using EvenireDB.Utils; +using Microsoft.Extensions.Logging; using System.Runtime.CompilerServices; using System.Threading.Channels; @@ -15,21 +16,26 @@ public class EventsProvider private readonly EventsProviderConfig _config; private readonly ChannelWriter _writer; private readonly IEventsRepository _repo; + private readonly ILogger _logger; public EventsProvider( - EventsProviderConfig config, - IEventsRepository repo, - ICache cache, - ChannelWriter writer) + EventsProviderConfig config, + IEventsRepository repo, + ICache cache, + ChannelWriter writer, + ILogger logger) { _cache = cache ?? throw new ArgumentNullException(nameof(cache)); _config = config ?? throw new ArgumentNullException(nameof(config)); _writer = writer ?? throw new ArgumentNullException(nameof(writer)); _repo = repo ?? throw new ArgumentNullException(nameof(repo)); + _logger = logger ?? throw new ArgumentNullException(nameof(logger)); } private async ValueTask EventsFactory(Guid streamId, CancellationToken cancellationToken) { + _logger.ReadingStreamFromRepository(streamId); + var persistedEvents = new List(); await foreach (var @event in _repo.ReadAsync(streamId, cancellationToken)) persistedEvents.Add(@event); @@ -119,7 +125,7 @@ public async ValueTask AppendAsync(Guid streamId, IEnumerable< private void UpdateCache(Guid streamId, IEnumerable incomingEvents, CachedEvents entry) { entry.Events.AddRange(incomingEvents); - _cache.Update(streamId, entry); + _cache.AddOrUpdate(streamId, entry); } private static bool HasDuplicateEvent(IEnumerable incomingEvents, CachedEvents entry, out IEvent? duplicate) diff --git a/src/EvenireDB/LogMessages.cs b/src/EvenireDB/LogMessages.cs new file mode 100644 index 0000000..30cb4dc --- /dev/null +++ b/src/EvenireDB/LogMessages.cs @@ -0,0 +1,13 @@ +using Microsoft.Extensions.Logging; + +namespace EvenireDB +{ + public static partial class LogMessages + { + [LoggerMessage( + EventId = 0, + Level = LogLevel.Warning, + Message = "Reading stream '{StreamId}' from repository")] + public static partial void ReadingStreamFromRepository(this ILogger logger, Guid streamId); + } +} \ No newline at end of file diff --git a/src/EvenireDB/Utils/ICache.cs b/src/EvenireDB/Utils/ICache.cs index ff961a8..d933237 100644 --- a/src/EvenireDB/Utils/ICache.cs +++ b/src/EvenireDB/Utils/ICache.cs @@ -3,6 +3,9 @@ public interface ICache where TKey : notnull { ValueTask GetOrAddAsync(TKey key, Func> valueFactory, CancellationToken cancellationToken = default); - void Update(TKey key, TValue value); + void AddOrUpdate(TKey key, TValue value); + void DropOldest(uint maxCount); + + public uint Count { get; } } } \ No newline at end of file diff --git a/src/EvenireDB/Utils/LRUCache.cs b/src/EvenireDB/Utils/LRUCache.cs index ca8b4d5..95758a0 100644 --- a/src/EvenireDB/Utils/LRUCache.cs +++ b/src/EvenireDB/Utils/LRUCache.cs @@ -1,3 +1,5 @@ +using System.Threading; + namespace EvenireDB.Utils { // TODO: drop entries if memory consumption is approaching a threshold @@ -17,7 +19,8 @@ private class Node private Node? _head; private Node? _tail; - private object _moveToHeadLock = new(); + private object _moveToHeadLock = new(); + private object _dropLock = new(); private object _getSemaphoresLock = new(); private readonly Dictionary _semaphores; @@ -31,19 +34,55 @@ public LRUCache(uint capacity) _semaphores = new Dictionary((int)capacity); } + public void DropOldest(uint maxCount) + { + if (this.Count == 0) + return; + + uint countToRemove = Math.Min(maxCount, this.Count); + + lock (_dropLock) + { + if (countToRemove == this.Count) + { + _head = null; + _tail = null; + _cache.Clear(); + _semaphores.Clear(); + return; + } + + var curr = _tail; + while (countToRemove > 0 && curr != null) + { + _cache.Remove(curr.Key); + _semaphores.Remove(curr.Key); + + curr = curr.Previous; + countToRemove--; + } + + curr.Next = null; + } + } + public bool ContainsKey(TKey key) => _cache.ContainsKey(key); - public void Update(TKey key, TValue value) + public void AddOrUpdate(TKey key, TValue value) { - if (!_cache.TryGetValue(key, out var node)) - throw new KeyNotFoundException($"invalid key: {key}"); - SemaphoreSlim semaphore = GetSemaphore(key); semaphore.Wait(); - node.Value = value; - MoveToHead(node); + if (_cache.TryGetValue(key, out var node)) + { + node.Value = value; + MoveToHead(node); + } + else + { + Add(key, value); + } semaphore.Release(); } @@ -71,12 +110,17 @@ public async ValueTask GetOrAddAsync( return node.Value; } - private async ValueTask.Node?> AddAsync( + private async ValueTask AddAsync( TKey key, Func> valueFactory, CancellationToken cancellationToken) { var value = await valueFactory(key, cancellationToken).ConfigureAwait(false); + return Add(key, value); + } + + private Node Add(TKey key, TValue value) + { if (_cache.Count == _capacity) { _cache.Remove(_tail.Key); @@ -142,7 +186,7 @@ private void MoveToHead(Node node) } } - public int Count => _cache.Count; + public uint Count => (uint)_cache.Count; protected virtual void Dispose(bool disposing) { diff --git a/tests/EvenireDB.Tests/EventsProviderTests.cs b/tests/EvenireDB.Tests/EventsProviderTests.cs index ed5e090..b9b5d65 100644 --- a/tests/EvenireDB.Tests/EventsProviderTests.cs +++ b/tests/EvenireDB.Tests/EventsProviderTests.cs @@ -1,4 +1,5 @@ using EvenireDB.Common; +using Microsoft.Extensions.Logging; using System.Threading.Channels; namespace EvenireDB.Tests @@ -10,10 +11,11 @@ public class EventsProviderTests [Fact] public async Task ReadAsync_should_return_empty_collection_when_data_not_available() { - var repo = Substitute.For(); + var repo = Substitute.For(); var cache = Substitute.For>(); var channel = Channel.CreateUnbounded(); - var sut = new EventsProvider(EventsProviderConfig.Default, repo, cache, channel.Writer); + var logger = Substitute.For>(); + var sut = new EventsProvider(EventsProviderConfig.Default, repo, cache, channel.Writer, logger); var events = await sut.ReadAsync(Guid.NewGuid(), StreamPosition.Start) .ToListAsync(); @@ -35,7 +37,8 @@ public async Task ReadAsync_should_pull_data_from_repo_on_cache_miss() var repo = Substitute.For(); var channel = Channel.CreateUnbounded(); - var sut = new EventsProvider(EventsProviderConfig.Default, repo, cache, channel.Writer); + var logger = Substitute.For>(); + var sut = new EventsProvider(EventsProviderConfig.Default, repo, cache, channel.Writer, logger); var events = await sut.ReadAsync(streamId, StreamPosition.Start) .ToListAsync(); @@ -59,7 +62,8 @@ public async Task ReadAsync_should_be_able_to_read_backwards() var repo = Substitute.For(); var channel = Channel.CreateUnbounded(); - var sut = new EventsProvider(EventsProviderConfig.Default, repo, cache, channel.Writer); + var logger = Substitute.For>(); + var sut = new EventsProvider(EventsProviderConfig.Default, repo, cache, channel.Writer, logger); var expectedEvents = sourceEvents.Skip(142).ToArray().Reverse(); @@ -85,7 +89,8 @@ public async Task ReadAsync_should_be_able_to_read_backwards_from_position() var repo = Substitute.For(); var channel = Channel.CreateUnbounded(); - var sut = new EventsProvider(EventsProviderConfig.Default, repo, cache, channel.Writer); + var logger = Substitute.For>(); + var sut = new EventsProvider(EventsProviderConfig.Default, repo, cache, channel.Writer, logger); var offset = 11; StreamPosition startPosition = (uint)(sourceEvents.Count - offset); @@ -115,7 +120,8 @@ public async Task ReadAsync_should_be_able_to_read_last_page_backwards_from_posi var repo = Substitute.For(); var channel = Channel.CreateUnbounded(); - var sut = new EventsProvider(EventsProviderConfig.Default, repo, cache, channel.Writer); + var logger = Substitute.For>(); + var sut = new EventsProvider(EventsProviderConfig.Default, repo, cache, channel.Writer, logger); var startPosition = EventsProviderConfig.Default.MaxPageSize / 2; @@ -143,7 +149,8 @@ public async Task ReadAsync_should_be_able_to_read_forward() var repo = Substitute.For(); var channel = Channel.CreateUnbounded(); - var sut = new EventsProvider(EventsProviderConfig.Default, repo, cache, channel.Writer); + var logger = Substitute.For>(); + var sut = new EventsProvider(EventsProviderConfig.Default, repo, cache, channel.Writer, logger); var expectedEvents = sourceEvents.Take((int)EventsProviderConfig.Default.MaxPageSize); @@ -169,7 +176,8 @@ public async Task ReadAsync_should_be_able_to_read_forward_from_position() var repo = Substitute.For(); var channel = Channel.CreateUnbounded(); - var sut = new EventsProvider(EventsProviderConfig.Default, repo, cache, channel.Writer); + var logger = Substitute.For>(); + var sut = new EventsProvider(EventsProviderConfig.Default, repo, cache, channel.Writer, logger); StreamPosition startPosition = 11; var expectedEvents = sourceEvents.Skip(11).Take((int)EventsProviderConfig.Default.MaxPageSize); @@ -193,7 +201,8 @@ public async Task AppendAsync_should_fail_when_events_duplicated() var repo = Substitute.For(); var cache = new LRUCache(1000); var channel = Channel.CreateUnbounded(); - var sut = new EventsProvider(EventsProviderConfig.Default, repo, cache, channel.Writer); + var logger = Substitute.For>(); + var sut = new EventsProvider(EventsProviderConfig.Default, repo, cache, channel.Writer, logger); await sut.AppendAsync(streamId, expectedEvents); var result = await sut.AppendAsync(streamId, new[] { expectedEvents[0] }); result.Should().BeOfType(); @@ -215,7 +224,8 @@ public async Task AppendAsync_should_succeed_when_events_valid() var repo = Substitute.For(); var cache = new LRUCache(1000); var channel = Channel.CreateUnbounded(); - var sut = new EventsProvider(EventsProviderConfig.Default, repo, cache, channel.Writer); + var logger = Substitute.For>(); + var sut = new EventsProvider(EventsProviderConfig.Default, repo, cache, channel.Writer, logger); var result = await sut.AppendAsync(streamId, expectedEvents); result.Should().BeOfType(); diff --git a/tests/EvenireDB.Tests/LRUCacheTests.cs b/tests/EvenireDB.Tests/LRUCacheTests.cs index 582cda3..bf588a7 100644 --- a/tests/EvenireDB.Tests/LRUCacheTests.cs +++ b/tests/EvenireDB.Tests/LRUCacheTests.cs @@ -86,47 +86,84 @@ public async Task GetOrAdd_should_put_item_on_front() } [Fact] - public void Update_should_throw_when_key_not_existing() + public async Task Upsert_should_update_existing_value() { - var sut = new LRUCache(1); - Assert.Throws(() => sut.Update("key", 1)); - } - - [Fact] - public async Task Update_should_update_value() - { - var sut = new LRUCache(1); + var sut = new LRUCache(10); await sut.GetOrAddAsync("key", (_,_) => ValueTask.FromResult(1)); - sut.Update("key", 2); + sut.AddOrUpdate("key", 2); var result = await sut.GetOrAddAsync("key", (_, _) => ValueTask.FromResult(1)); result.Should().Be(2); } [Fact] - public async Task Update_should_not_increase_size() + public async Task Upsert_should_not_increase_size_when_key_exists() { - var sut = new LRUCache(1); + var sut = new LRUCache(10); await sut.GetOrAddAsync("key", (_,_) => ValueTask.FromResult(1)); - sut.Update("key", 2); + sut.AddOrUpdate("key", 2); sut.Count.Should().Be(1); - sut.Update("key", 2); + sut.AddOrUpdate("key", 2); sut.Count.Should().Be(1); } [Fact] - public async Task Update_should_put_item_on_front() + public void Upsert_should_add_when_item_does_not_exist() + { + var sut = new LRUCache(10); + + sut.AddOrUpdate("key1", 71); + sut.Count.Should().Be(1); + + sut.AddOrUpdate("key2", 42); + sut.Count.Should().Be(2); + } + + [Fact] + public void Upsert_should_put_item_on_front() { var sut = new LRUCache(2); - await sut.GetOrAddAsync("key1", (_, _) => ValueTask.FromResult(1)); - await sut.GetOrAddAsync("key2", (_, _) => ValueTask.FromResult(2)); + + sut.AddOrUpdate("key1", 1); + sut.AddOrUpdate("key2", 2); - sut.Update("key1", 42); + sut.AddOrUpdate("key1", 42); - await sut.GetOrAddAsync("key3", (_, _) => ValueTask.FromResult(3)); + sut.AddOrUpdate("key3", 3); + sut.ContainsKey("key2").Should().BeFalse(); } + + [Fact] + public async Task DropOldest_should_not_exceed_count() + { + uint capacity = 10; + var sut = new LRUCache(capacity); + + for(int i=0;i ValueTask.FromResult(i)); + + sut.Count.Should().Be(capacity / 2); + + sut.DropOldest(sut.Count + 1); + + sut.Count.Should().Be(0); + } + + [Fact] + public async Task DropOldest_should_remove_count_item() + { + uint capacity = 10; + var sut = new LRUCache(capacity); + + for (int i = 0; i < capacity; i++) + await sut.GetOrAddAsync(i.ToString(), (_, _) => ValueTask.FromResult(i)); + + sut.DropOldest(4); + + sut.Count.Should().Be(6); + } } } \ No newline at end of file