diff --git a/cache/cache.go b/cache/cache.go index a368b45af..24c65c5be 100644 --- a/cache/cache.go +++ b/cache/cache.go @@ -25,7 +25,7 @@ import ( "path/filepath" "sync" - "github.com/containerd/stargz-snapshotter/util/lrucache" + "github.com/containerd/stargz-snapshotter/util/cacheutil" "github.com/containerd/stargz-snapshotter/util/namedmutex" "github.com/hashicorp/go-multierror" "github.com/pkg/errors" @@ -51,11 +51,11 @@ type DirectoryCacheConfig struct { // DataCache is an on-memory cache of the data. // OnEvicted will be overridden and replaced for internal use. - DataCache *lrucache.Cache + DataCache *cacheutil.LRUCache // FdCache is a cache for opened file descriptors. // OnEvicted will be overridden and replaced for internal use. - FdCache *lrucache.Cache + FdCache *cacheutil.LRUCache // BufPool will be used for pooling bytes.Buffer. BufPool *sync.Pool @@ -130,7 +130,7 @@ func NewDirectoryCache(directory string, config DirectoryCacheConfig) (BlobCache if maxEntry == 0 { maxEntry = defaultMaxLRUCacheEntry } - dataCache = lrucache.New(maxEntry) + dataCache = cacheutil.NewLRUCache(maxEntry) dataCache.OnEvicted = func(key string, value interface{}) { value.(*bytes.Buffer).Reset() bufPool.Put(value) @@ -142,7 +142,7 @@ func NewDirectoryCache(directory string, config DirectoryCacheConfig) (BlobCache if maxEntry == 0 { maxEntry = defaultMaxCacheFds } - fdCache = lrucache.New(maxEntry) + fdCache = cacheutil.NewLRUCache(maxEntry) fdCache.OnEvicted = func(key string, value interface{}) { value.(*os.File).Close() } @@ -169,8 +169,8 @@ func NewDirectoryCache(directory string, config DirectoryCacheConfig) (BlobCache // directoryCache is a cache implementation which backend is a directory. type directoryCache struct { - cache *lrucache.Cache - fileCache *lrucache.Cache + cache *cacheutil.LRUCache + fileCache *cacheutil.LRUCache wipDirectory string directory string wipLock *namedmutex.NamedMutex diff --git a/fs/config/config.go b/fs/config/config.go index 65c3387f8..5550c2fd5 100644 --- a/fs/config/config.go +++ b/fs/config/config.go @@ -34,18 +34,21 @@ const ( ) type Config struct { - HTTPCacheType string `toml:"http_cache_type"` - FSCacheType string `toml:"filesystem_cache_type"` - ResolveResultEntry int `toml:"resolve_result_entry"` - PrefetchSize int64 `toml:"prefetch_size"` - PrefetchTimeoutSec int64 `toml:"prefetch_timeout_sec"` - NoPrefetch bool `toml:"noprefetch"` - NoBackgroundFetch bool `toml:"no_background_fetch"` - Debug bool `toml:"debug"` - AllowNoVerification bool `toml:"allow_no_verification"` - DisableVerification bool `toml:"disable_verification"` - MaxConcurrency int64 `toml:"max_concurrency"` - NoPrometheus bool `toml:"no_prometheus"` + HTTPCacheType string `toml:"http_cache_type"` + FSCacheType string `toml:"filesystem_cache_type"` + // ResolveResultEntryTTLSec is TTL (in sec) to cache resolved layers for + // future use. (default 120s) + ResolveResultEntryTTLSec int `toml:"resolve_result_entry_ttl_sec"` + ResolveResultEntry int `toml:"resolve_result_entry"` // deprecated + PrefetchSize int64 `toml:"prefetch_size"` + PrefetchTimeoutSec int64 `toml:"prefetch_timeout_sec"` + NoPrefetch bool `toml:"noprefetch"` + NoBackgroundFetch bool `toml:"no_background_fetch"` + Debug bool `toml:"debug"` + AllowNoVerification bool `toml:"allow_no_verification"` + DisableVerification bool `toml:"disable_verification"` + MaxConcurrency int64 `toml:"max_concurrency"` + NoPrometheus bool `toml:"no_prometheus"` // BlobConfig is config for layer blob management. BlobConfig `toml:"blob"` diff --git a/fs/layer/layer.go b/fs/layer/layer.go index a42b7f420..fa0e5ccf2 100644 --- a/fs/layer/layer.go +++ b/fs/layer/layer.go @@ -45,7 +45,7 @@ import ( "github.com/containerd/stargz-snapshotter/fs/source" "github.com/containerd/stargz-snapshotter/metadata" "github.com/containerd/stargz-snapshotter/task" - "github.com/containerd/stargz-snapshotter/util/lrucache" + "github.com/containerd/stargz-snapshotter/util/cacheutil" "github.com/containerd/stargz-snapshotter/util/namedmutex" fusefs "github.com/hanwen/go-fuse/v2/fs" digest "github.com/opencontainers/go-digest" @@ -55,11 +55,11 @@ import ( ) const ( - defaultResolveResultEntry = 30 - defaultMaxLRUCacheEntry = 10 - defaultMaxCacheFds = 10 - defaultPrefetchTimeoutSec = 10 - memoryCacheType = "memory" + defaultResolveResultEntryTTLSec = 120 + defaultMaxLRUCacheEntry = 10 + defaultMaxCacheFds = 10 + defaultPrefetchTimeoutSec = 10 + memoryCacheType = "memory" ) // Layer represents a layer. @@ -117,9 +117,9 @@ type Resolver struct { rootDir string resolver *remote.Resolver prefetchTimeout time.Duration - layerCache *lrucache.Cache + layerCache *cacheutil.TTLCache layerCacheMu sync.Mutex - blobCache *lrucache.Cache + blobCache *cacheutil.TTLCache blobCacheMu sync.Mutex backgroundTaskManager *task.BackgroundTaskManager resolveLock *namedmutex.NamedMutex @@ -129,9 +129,9 @@ type Resolver struct { // NewResolver returns a new layer resolver. func NewResolver(root string, backgroundTaskManager *task.BackgroundTaskManager, cfg config.Config, resolveHandlers map[string]remote.Handler, metadataStore metadata.Store) (*Resolver, error) { - resolveResultEntry := cfg.ResolveResultEntry - if resolveResultEntry == 0 { - resolveResultEntry = defaultResolveResultEntry + resolveResultEntryTTL := time.Duration(cfg.ResolveResultEntryTTLSec) * time.Second + if resolveResultEntryTTL == 0 { + resolveResultEntryTTL = defaultResolveResultEntryTTLSec * time.Second } prefetchTimeout := time.Duration(cfg.PrefetchTimeoutSec) * time.Second if prefetchTimeout == 0 { @@ -141,7 +141,7 @@ func NewResolver(root string, backgroundTaskManager *task.BackgroundTaskManager, // layerCache caches resolved layers for future use. This is useful in a use-case where // the filesystem resolves and caches all layers in an image (not only queried one) in parallel, // before they are actually queried. - layerCache := lrucache.New(resolveResultEntry) + layerCache := cacheutil.NewTTLCache(resolveResultEntryTTL) layerCache.OnEvicted = func(key string, value interface{}) { if err := value.(*layer).close(); err != nil { logrus.WithField("key", key).WithError(err).Warnf("failed to clean up layer") @@ -152,7 +152,7 @@ func NewResolver(root string, backgroundTaskManager *task.BackgroundTaskManager, // blobCache caches resolved blobs for futural use. This is especially useful when a layer // isn't eStargz/stargz (the *layer object won't be created/cached in this case). - blobCache := lrucache.New(resolveResultEntry) + blobCache := cacheutil.NewTTLCache(resolveResultEntryTTL) blobCache.OnEvicted = func(key string, value interface{}) { if err := value.(remote.Blob).Close(); err != nil { logrus.WithField("key", key).WithError(err).Warnf("failed to clean up blob") @@ -198,7 +198,7 @@ func newCache(root string, cacheType string, cfg config.Config) (cache.BlobCache return new(bytes.Buffer) }, } - dCache, fCache := lrucache.New(maxDataEntry), lrucache.New(maxFdEntry) + dCache, fCache := cacheutil.NewLRUCache(maxDataEntry), cacheutil.NewLRUCache(maxFdEntry) dCache.OnEvicted = func(key string, value interface{}) { value.(*bytes.Buffer).Reset() bufPool.Put(value) @@ -231,13 +231,13 @@ func (r *Resolver) Resolve(ctx context.Context, hosts source.RegistryHosts, refs name := refspec.String() + "/" + desc.Digest.String() // Wait if resolving this layer is already running. The result - // can hopefully get from the LRU cache. + // can hopefully get from the cache. r.resolveLock.Lock(name) defer r.resolveLock.Unlock(name) ctx = log.WithLogger(ctx, log.G(ctx).WithField("src", name)) - // First, try to retrieve this layer from the underlying LRU cache. + // First, try to retrieve this layer from the underlying cache. r.layerCacheMu.Lock() c, done, ok := r.layerCache.Get(name) r.layerCacheMu.Unlock() @@ -324,7 +324,7 @@ func (r *Resolver) Resolve(ctx context.Context, hosts source.RegistryHosts, refs func (r *Resolver) resolveBlob(ctx context.Context, hosts source.RegistryHosts, refspec reference.Spec, desc ocispec.Descriptor) (_ *blobRef, retErr error) { name := refspec.String() + "/" + desc.Digest.String() - // Try to retrieve the blob from the underlying LRU cache. + // Try to retrieve the blob from the underlying cache. r.blobCacheMu.Lock() c, done, ok := r.blobCache.Get(name) r.blobCacheMu.Unlock() diff --git a/store/refs.go b/store/refs.go index fba122edc..8e91be4f7 100644 --- a/store/refs.go +++ b/store/refs.go @@ -33,8 +33,8 @@ import ( "github.com/containerd/containerd/remotes" "github.com/containerd/containerd/remotes/docker" "github.com/containerd/stargz-snapshotter/fs/source" + "github.com/containerd/stargz-snapshotter/util/cacheutil" "github.com/containerd/stargz-snapshotter/util/containerdutil" - "github.com/containerd/stargz-snapshotter/util/lrucache" digest "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) @@ -54,7 +54,7 @@ func newRefPool(ctx context.Context, root string, hosts source.RegistryHosts) (* hosts: hosts, refcounter: make(map[string]*releaser), } - p.cache = lrucache.New(refCacheEntry) + p.cache = cacheutil.NewLRUCache(refCacheEntry) p.cache.OnEvicted = func(key string, value interface{}) { refspec := value.(reference.Spec) if err := os.RemoveAll(p.metadataDir(refspec)); err != nil { @@ -71,7 +71,7 @@ type refPool struct { hosts source.RegistryHosts refcounter map[string]*releaser - cache *lrucache.Cache + cache *cacheutil.LRUCache mu sync.Mutex } diff --git a/util/lrucache/lrucache.go b/util/cacheutil/lrucache.go similarity index 86% rename from util/lrucache/lrucache.go rename to util/cacheutil/lrucache.go index 8bec36417..b43b49194 100644 --- a/util/lrucache/lrucache.go +++ b/util/cacheutil/lrucache.go @@ -14,8 +14,7 @@ limitations under the License. */ -// Package lrucache provides reference-count-aware lru cache. -package lrucache +package cacheutil import ( "sync" @@ -23,10 +22,10 @@ import ( "github.com/golang/groupcache/lru" ) -// Cache is "groupcache/lru"-like cache. The difference is that "groupcache/lru" immediately +// LRUCache is "groupcache/lru"-like cache. The difference is that "groupcache/lru" immediately // finalizes theevicted contents using OnEvicted callback but our version strictly tracks the // reference counts of contents and calls OnEvicted when nobody refers to the evicted contents. -type Cache struct { +type LRUCache struct { cache *lru.Cache mu sync.Mutex @@ -35,15 +34,15 @@ type Cache struct { OnEvicted func(key string, value interface{}) } -// New creates new cache. -func New(maxEntries int) *Cache { +// NewLRUCache creates new lru cache. +func NewLRUCache(maxEntries int) *LRUCache { inner := lru.New(maxEntries) inner.OnEvicted = func(key lru.Key, value interface{}) { // Decrease the ref count incremented in Add(). // When nobody refers to this value, this value will be finalized via refCounter. value.(*refCounter).finalize() } - return &Cache{ + return &LRUCache{ cache: inner, } } @@ -51,7 +50,7 @@ func New(maxEntries int) *Cache { // Get retrieves the specified object from the cache and increments the reference counter of the // target content. Client must call `done` callback to decrease the reference count when the value // will no longer be used. -func (c *Cache) Get(key string) (value interface{}, done func(), ok bool) { +func (c *LRUCache) Get(key string) (value interface{}, done func(), ok bool) { c.mu.Lock() defer c.mu.Unlock() o, ok := c.cache.Get(key) @@ -67,7 +66,7 @@ func (c *Cache) Get(key string) (value interface{}, done func(), ok bool) { // If the specified content already exists in the cache, this sets `added` to false and returns // "already cached" content (i.e. doesn't replace the content with the new one). Client must call // `done` callback to decrease the counter when the value will no longer be used. -func (c *Cache) Add(key string, value interface{}) (cachedValue interface{}, done func(), added bool) { +func (c *LRUCache) Add(key string, value interface{}) (cachedValue interface{}, done func(), added bool) { c.mu.Lock() defer c.mu.Unlock() if o, ok := c.cache.Get(key); ok { @@ -88,13 +87,13 @@ func (c *Cache) Add(key string, value interface{}) (cachedValue interface{}, don // Remove removes the specified contents from the cache. OnEvicted callback will be called when // nobody refers to the removed content. -func (c *Cache) Remove(key string) { +func (c *LRUCache) Remove(key string) { c.mu.Lock() defer c.mu.Unlock() c.cache.Remove(key) } -func (c *Cache) decreaseOnceFunc(rc *refCounter) func() { +func (c *LRUCache) decreaseOnceFunc(rc *refCounter) func() { var once sync.Once return func() { c.mu.Lock() diff --git a/util/lrucache/lrucache_test.go b/util/cacheutil/lrucache_test.go similarity index 61% rename from util/lrucache/lrucache_test.go rename to util/cacheutil/lrucache_test.go index 677be5d2d..76b203d57 100644 --- a/util/lrucache/lrucache_test.go +++ b/util/cacheutil/lrucache_test.go @@ -14,60 +14,57 @@ limitations under the License. */ -package lrucache +package cacheutil import ( "fmt" "testing" ) -func TestAdd(t *testing.T) { - c := New(10) +// TestLRUAdd tests Add API +func TestLRUAdd(t *testing.T) { + c := NewLRUCache(10) key, value := "key1", "abcd" v, _, added := c.Add(key, value) if !added { - t.Errorf("failed to add %q", key) - return + t.Fatalf("failed to add %q", key) } else if v.(string) != value { - t.Errorf("returned different object for %q; want %q; got %q", key, value, v.(string)) - return + t.Fatalf("returned different object for %q; want %q; got %q", key, value, v.(string)) } key, newvalue := "key1", "dummy" v, _, added = c.Add(key, newvalue) if added || v.(string) != value { - t.Errorf("%q must be originally stored one; want %q; got %q (added:%v)", + t.Fatalf("%q must be originally stored one; want %q; got %q (added:%v)", key, value, v.(string), added) } } -func TestGet(t *testing.T) { - c := New(10) +// TestLRUGet tests Get API +func TestLRUGet(t *testing.T) { + c := NewLRUCache(10) key, value := "key1", "abcd" v, _, added := c.Add(key, value) if !added { - t.Errorf("failed to add %q", key) - return + t.Fatalf("failed to add %q", key) } else if v.(string) != value { - t.Errorf("returned different object for %q; want %q; got %q", key, value, v.(string)) - return + t.Fatalf("returned different object for %q; want %q; got %q", key, value, v.(string)) } v, _, ok := c.Get(key) if !ok { - t.Errorf("failed to get obj %q (%q)", key, value) - return + t.Fatalf("failed to get obj %q (%q)", key, value) } else if v.(string) != value { - t.Errorf("unexpected object for %q; want %q; got %q", key, value, v.(string)) - return + t.Fatalf("unexpected object for %q; want %q; got %q", key, value, v.(string)) } } -func TestRemove(t *testing.T) { +// TestLRURemoe tests Remove API +func TestLRURemove(t *testing.T) { var evicted []string - c := New(2) + c := NewLRUCache(2) c.OnEvicted = func(key string, value interface{}) { evicted = append(evicted, key) } @@ -77,30 +74,27 @@ func TestRemove(t *testing.T) { c.Remove(key1) if len(evicted) != 0 { - t.Errorf("no content must be evicted after remove") - return + t.Fatalf("no content must be evicted after remove") } done1() if len(evicted) != 0 { - t.Errorf("no content must be evicted until all reference are discarded") - return + t.Fatalf("no content must be evicted until all reference are discarded") } done12() if len(evicted) != 1 { - t.Errorf("content must be evicted") - return + t.Fatalf("content must be evicted") } if evicted[0] != key1 { - t.Errorf("1st content %q must be evicted but got %q", key1, evicted[0]) - return + t.Fatalf("1st content %q must be evicted but got %q", key1, evicted[0]) } } -func TestEviction(t *testing.T) { +// TestLRUEviction tests that eviction occurs when the overflow happens. +func TestLRUEviction(t *testing.T) { var evicted []string - c := New(2) + c := NewLRUCache(2) c.OnEvicted = func(key string, value interface{}) { evicted = append(evicted, key) } @@ -111,42 +105,35 @@ func TestEviction(t *testing.T) { _, done22, _ := c.Get(key2) if len(evicted) != 0 { - t.Errorf("no content must be evicted after addition") - return + t.Fatalf("no content must be evicted after addition") } for i := 0; i < 2; i++ { c.Add(fmt.Sprintf("key-add-%d", i), fmt.Sprintf("abcd-add-%d", i)) } if len(evicted) != 0 { - t.Errorf("no content must be evicted after overflow") - return + t.Fatalf("no content must be evicted after overflow") } done1() if len(evicted) != 1 { - t.Errorf("1 content must be evicted") - return + t.Fatalf("1 content must be evicted") } if evicted[0] != key1 { - t.Errorf("1st content %q must be evicted but got %q", key1, evicted[0]) - return + t.Fatalf("1st content %q must be evicted but got %q", key1, evicted[0]) } done2() // effective done2() // ignored done2() // ignored if len(evicted) != 1 { - t.Errorf("only 1 content must be evicted") - return + t.Fatalf("only 1 content must be evicted") } done22() if len(evicted) != 2 { - t.Errorf("2 contents must be evicted") - return + t.Fatalf("2 contents must be evicted") } if evicted[1] != key2 { - t.Errorf("2nd content %q must be evicted but got %q", key2, evicted[1]) - return + t.Fatalf("2nd content %q must be evicted but got %q", key2, evicted[1]) } } diff --git a/util/cacheutil/ttlcache.go b/util/cacheutil/ttlcache.go new file mode 100644 index 000000000..92954dc70 --- /dev/null +++ b/util/cacheutil/ttlcache.go @@ -0,0 +1,115 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cacheutil + +import ( + "sync" + "time" +) + +// TTLCache is a ttl-based cache with reference counters. +// Each elements is deleted as soon as expiering the configured ttl. +type TTLCache struct { + m map[string]*refCounterWithTimer + mu sync.Mutex + ttl time.Duration + + // OnEvicted optionally specifies a callback function to be + // executed when an entry is purged from the cache. + OnEvicted func(key string, value interface{}) +} + +// NewTTLCache creates a new ttl-based cache. +func NewTTLCache(ttl time.Duration) *TTLCache { + return &TTLCache{ + m: make(map[string]*refCounterWithTimer), + ttl: ttl, + } +} + +// Get retrieves the specified object from the cache and increments the reference counter of the +// target content. Client must call `done` callback to decrease the reference count when the value +// will no longer be used. +func (c *TTLCache) Get(key string) (value interface{}, done func(), ok bool) { + c.mu.Lock() + defer c.mu.Unlock() + rc, ok := c.m[key] + if !ok { + return nil, nil, false + } + rc.inc() + return rc.v, c.decreaseOnceFunc(rc), true +} + +// Add adds object to the cache and returns the cached contents with incrementing the reference count. +// If the specified content already exists in the cache, this sets `added` to false and returns +// "already cached" content (i.e. doesn't replace the content with the new one). Client must call +// `done` callback to decrease the counter when the value will no longer be used. +func (c *TTLCache) Add(key string, value interface{}) (cachedValue interface{}, done func(), added bool) { + c.mu.Lock() + defer c.mu.Unlock() + if rc, ok := c.m[key]; ok { + rc.inc() + return rc.v, c.decreaseOnceFunc(rc), false + } + rc := &refCounterWithTimer{ + refCounter: &refCounter{ + key: key, + v: value, + onEvicted: c.OnEvicted, + }, + } + rc.initialize() // Keep this object having at least 1 ref count (will be decreased in OnEviction) + rc.inc() // The client references this object (will be decreased on "done") + rc.t = time.AfterFunc(c.ttl, func() { + c.mu.Lock() + defer c.mu.Unlock() + c.evictLocked(key) + }) + c.m[key] = rc + return rc.v, c.decreaseOnceFunc(rc), true +} + +// Remove removes the specified contents from the cache. OnEvicted callback will be called when +// nobody refers to the removed content. +func (c *TTLCache) Remove(key string) { + c.mu.Lock() + defer c.mu.Unlock() + c.evictLocked(key) +} + +func (c *TTLCache) evictLocked(key string) { + if rc, ok := c.m[key]; ok { + delete(c.m, key) + rc.t.Stop() // stop timer to prevent GC to this content anymore + rc.finalize() + } +} + +func (c *TTLCache) decreaseOnceFunc(rc *refCounterWithTimer) func() { + var once sync.Once + return func() { + c.mu.Lock() + defer c.mu.Unlock() + once.Do(func() { rc.dec() }) + } +} + +type refCounterWithTimer struct { + *refCounter + t *time.Timer +} diff --git a/util/cacheutil/ttlcache_test.go b/util/cacheutil/ttlcache_test.go new file mode 100644 index 000000000..0d7eaa5c6 --- /dev/null +++ b/util/cacheutil/ttlcache_test.go @@ -0,0 +1,178 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cacheutil + +import ( + "sync" + "testing" + "time" +) + +// TestTTLAdd tests Add API +func TestTTLAdd(t *testing.T) { + c := NewTTLCache(time.Hour) + + key, value := "key1", "abcd" + v, _, added := c.Add(key, value) + if !added { + t.Fatalf("failed to add %q", key) + } else if v.(string) != value { + t.Fatalf("returned different object for %q; want %q; got %q", key, value, v.(string)) + } + + key, newvalue := "key1", "dummy" + v, _, added = c.Add(key, newvalue) + if added || v.(string) != value { + t.Fatalf("%q must be originally stored one; want %q; got %q (added:%v)", + key, value, v.(string), added) + } +} + +// TestTTLGet tests Get API +func TestTTLGet(t *testing.T) { + c := NewTTLCache(time.Hour) + + key, value := "key1", "abcd" + v, _, added := c.Add(key, value) + if !added { + t.Fatalf("failed to add %q", key) + } else if v.(string) != value { + t.Fatalf("returned different object for %q; want %q; got %q", key, value, v.(string)) + } + + v, _, ok := c.Get(key) + if !ok { + t.Fatalf("failed to get obj %q (%q)", key, value) + } else if v.(string) != value { + t.Fatalf("unexpected object for %q; want %q; got %q", key, value, v.(string)) + } +} + +// TestTTLRemove tests Remove API +func TestTTLRemove(t *testing.T) { + var evicted []string + c := NewTTLCache(time.Hour) + c.OnEvicted = func(key string, value interface{}) { + evicted = append(evicted, key) + } + key1, value1 := "key1", "abcd1" + _, done1, _ := c.Add(key1, value1) + _, done12, _ := c.Get(key1) + + c.Remove(key1) + if len(evicted) != 0 { + t.Fatalf("no content must be evicted after remove") + } + + done1() + if len(evicted) != 0 { + t.Fatalf("no content must be evicted until all reference are discarded") + } + + done12() + if len(evicted) != 1 { + t.Fatalf("content must be evicted") + } + if evicted[0] != key1 { + t.Fatalf("1st content %q must be evicted but got %q", key1, evicted[0]) + } +} + +// TestTTLRemoveOverwritten tests old gc doesn't affect overwritten content +func TestTTLRemoveOverwritten(t *testing.T) { + var evicted []string + c := NewTTLCache(3 * time.Second) + c.OnEvicted = func(key string, value interface{}) { + evicted = append(evicted, key) + } + key1, value1 := "key1", "abcd1" + _, done1, _ := c.Add(key1, value1) + done1() + c.Remove(key1) // remove key1 as soon as possible + + // add another content with a new key + time.Sleep(2 * time.Second) + value12 := value1 + "!" + _, done12, _ := c.Add(key1, value12) + time.Sleep(2 * time.Second) + // spent 4 sec (larger than ttl) since the previous key1 was added. + // but the *newly-added* key1 hasn't been expierd yet so key1 must remain. + v1, done122, getOK := c.Get(key1) + if !getOK { + t.Fatalf("unexpected eviction") + } + if s1, ok := v1.(string); !ok || s1 != value12 { + t.Fatalf("unexpected content %q(%v) != %q", s1, ok, value12) + } + + time.Sleep(2 * time.Second) + done122() + done12() + // spent 4 sec since the new key1 was added. This should be expierd. + if _, _, ok := c.Get(key1); ok { + t.Fatalf("%q must be expierd but remaining", key1) + } +} + +// TestTTLEviction tests contents are evicted after TTL witout remaining reference. +func TestTTLEviction(t *testing.T) { + var ( + evicted []string + evictedMu sync.Mutex + ) + c := NewTTLCache(time.Second) + c.OnEvicted = func(key string, value interface{}) { + evictedMu.Lock() + evicted = append(evicted, key) + evictedMu.Unlock() + } + key1, value1 := "key1", "abcd1" + key2, value2 := "key2", "abcd2" + _, done1, _ := c.Add(key1, value1) + done1() // evict key1 on expiering ttl + _, done2, _ := c.Add(key2, value2) + _, done22, _ := c.Get(key2) // hold reference of key2 to prevent eviction + time.Sleep(3 * time.Second) // wait until elements reach ttl + + evictedMu.Lock() + if len(evicted) != 1 { + t.Fatalf("1 content must be removed") + } + if evicted[0] != key1 { + t.Fatalf("1st content %q must be evicted but got %q", key1, evicted[0]) + } + evictedMu.Unlock() + + done2() // effective + done2() // ignored + done2() // ignored + evictedMu.Lock() + if len(evicted) != 1 { + t.Fatalf("only 1 content must be evicted") + } + evictedMu.Unlock() + + done22() + evictedMu.Lock() + if len(evicted) != 2 { + t.Fatalf("2 contents must be evicted") + } + if evicted[1] != key2 { + t.Fatalf("2nd content %q must be evicted but got %q", key2, evicted[1]) + } + evictedMu.Unlock() +}