diff --git a/go.mod b/go.mod index 0dc0fef192e0..b426e9cf9978 100644 --- a/go.mod +++ b/go.mod @@ -35,6 +35,7 @@ require ( github.com/coinbase/waas-client-library-go v1.0.8 github.com/couchbase/gocb/v2 v2.9.2 github.com/crewjam/rfc5424 v0.1.0 + github.com/dgraph-io/ristretto v1.0.0 github.com/dustin/go-humanize v1.0.1 github.com/elastic/go-elasticsearch/v8 v8.15.0 github.com/envoyproxy/protoc-gen-validate v1.1.0 diff --git a/go.sum b/go.sum index 3f2a7ec38e6c..93936a6bf40d 100644 --- a/go.sum +++ b/go.sum @@ -220,6 +220,10 @@ github.com/danieljoos/wincred v1.1.2/go.mod h1:GijpziifJoIBfYh+S7BbkdUTU4LfM+QnG github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgraph-io/ristretto v1.0.0 h1:SYG07bONKMlFDUYu5pEu3DGAh8c2OFNzKm6G9J4Si84= +github.com/dgraph-io/ristretto v1.0.0/go.mod h1:jTi2FiYEhQ1NsMmA7DeBykizjOuY88NhKBkepyu1jPc= +github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y= +github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= diff --git a/pkg/cache/decorator.go b/pkg/cache/decorator.go new file mode 100644 index 000000000000..5740dec7eb0b --- /dev/null +++ b/pkg/cache/decorator.go @@ -0,0 +1,64 @@ +package cache + +// WithMetrics is a decorator that adds metrics collection to any Cache implementation. +type WithMetrics[T any] struct { + wrapped Cache[T] + metrics BaseMetricsCollector + cacheName string +} + +// NewCacheWithMetrics creates a new WithMetrics decorator that wraps the provided Cache +// and collects metrics using the provided BaseMetricsCollector. +// The cacheName parameter is used to identify the cache in the collected metrics. +func NewCacheWithMetrics[T any](wrapped Cache[T], metrics BaseMetricsCollector, cacheName string) *WithMetrics[T] { + return &WithMetrics[T]{ + wrapped: wrapped, + metrics: metrics, + cacheName: cacheName, + } +} + +// Set sets the value for the given key in the cache. It also records a set metric +// for the cache using the provided metrics collector and cache name. +func (c *WithMetrics[T]) Set(key string, val T) { + c.metrics.RecordSet(c.cacheName) + c.wrapped.Set(key, val) +} + +// Get retrieves the value for the given key from the underlying cache. It also records +// a hit or miss metric for the cache using the provided metrics collector and cache name. +func (c *WithMetrics[T]) Get(key string) (T, bool) { + val, found := c.wrapped.Get(key) + if found { + c.metrics.RecordHit(c.cacheName) + } else { + c.metrics.RecordMiss(c.cacheName) + } + return val, found +} + +// Exists checks if the given key exists in the cache. It records a hit or miss metric +// for the cache using the provided metrics collector and cache name. +func (c *WithMetrics[T]) Exists(key string) bool { + found := c.wrapped.Exists(key) + if found { + c.metrics.RecordHit(c.cacheName) + } else { + c.metrics.RecordMiss(c.cacheName) + } + return found +} + +// Delete removes the value for the given key from the cache. It also records a delete metric +// for the cache using the provided metrics collector and cache name. +func (c *WithMetrics[T]) Delete(key string) { + c.wrapped.Delete(key) + c.metrics.RecordDelete(c.cacheName) +} + +// Clear removes all entries from the cache. It also records a clear metric +// for the cache using the provided metrics collector and cache name. +func (c *WithMetrics[T]) Clear() { + c.wrapped.Clear() + c.metrics.RecordClear(c.cacheName) +} diff --git a/pkg/cache/metrics.go b/pkg/cache/metrics.go new file mode 100644 index 000000000000..7ea11f4c740c --- /dev/null +++ b/pkg/cache/metrics.go @@ -0,0 +1,107 @@ +package cache + +import ( + "sync" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" +) + +// BaseMetricsCollector defines the interface for recording cache metrics. +// Each method corresponds to a specific cache-related operation. +type BaseMetricsCollector interface { + RecordHit(cacheName string) + RecordMiss(cacheName string) + RecordSet(cacheName string) + RecordDelete(cacheName string) + RecordClear(cacheName string) +} + +// MetricsCollector encapsulates all Prometheus metrics with labels. +// It holds Prometheus counters for cache operations, which help track +// the performance and usage of the cache. +type MetricsCollector struct { + // Base metrics. + hits *prometheus.CounterVec + misses *prometheus.CounterVec + sets *prometheus.CounterVec + deletes *prometheus.CounterVec + clears *prometheus.CounterVec +} + +var ( + collectorOnce sync.Once // Ensures that the collector is initialized only once. + collector *MetricsCollector +) + +// InitializeMetrics initializes the singleton MetricsCollector. +// It sets up Prometheus counters for cache operations (hits, misses, sets, deletes, clears). +// Should be called once at the start of the application. +func InitializeMetrics(namespace, subsystem string) { + collectorOnce.Do(func() { + collector = &MetricsCollector{ + hits: promauto.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "hits_total", + Help: "Total number of cache hits.", + }, []string{"cache_name"}), + + misses: promauto.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "misses_total", + Help: "Total number of cache misses.", + }, []string{"cache_name"}), + + sets: promauto.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "sets_total", + Help: "Total number of cache set operations.", + }, []string{"cache_name"}), + + deletes: promauto.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "deletes_total", + Help: "Total number of cache delete operations.", + }, []string{"cache_name"}), + + clears: promauto.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "clears_total", + Help: "Total number of cache clear operations.", + }, []string{"cache_name"}), + } + }) +} + +// GetMetricsCollector returns the singleton MetricsCollector instance. +// It panics if InitializeMetrics has not been called to ensure metrics are properly initialized. +// Must be called after InitializeMetrics to avoid runtime issues. +// If you do it before, BAD THINGS WILL HAPPEN. +func GetMetricsCollector() *MetricsCollector { + if collector == nil { + panic("MetricsCollector not initialized. Call InitializeMetrics first.") + } + return collector +} + +// Implement BaseMetricsCollector interface methods. + +// RecordHit increments the counter for cache hits, tracking how often cache lookups succeed. +func (m *MetricsCollector) RecordHit(cacheName string) { m.hits.WithLabelValues(cacheName).Inc() } + +// RecordMiss increments the counter for cache misses, tracking how often cache lookups fail. +func (m *MetricsCollector) RecordMiss(cacheName string) { m.misses.WithLabelValues(cacheName).Inc() } + +// RecordSet increments the counter for cache set operations, tracking how often items are added/updated. +func (m *MetricsCollector) RecordSet(cacheName string) { m.sets.WithLabelValues(cacheName).Inc() } + +// RecordDelete increments the counter for cache delete operations, tracking how often items are removed. +func (m *MetricsCollector) RecordDelete(cacheName string) { m.deletes.WithLabelValues(cacheName).Inc() } + +// RecordClear increments the counter for cache clear operations, tracking how often the cache is completely cleared. +func (m *MetricsCollector) RecordClear(cacheName string) { m.clears.WithLabelValues(cacheName).Inc() } diff --git a/pkg/cache/sizedlru/metrics.go b/pkg/cache/sizedlru/metrics.go new file mode 100644 index 000000000000..d5d64e26eb04 --- /dev/null +++ b/pkg/cache/sizedlru/metrics.go @@ -0,0 +1,69 @@ +package sizedlru + +import ( + "github.com/prometheus/client_golang/prometheus" + + "github.com/trufflesecurity/trufflehog/v3/pkg/cache" +) + +// MetricsCollector should implement the collector interface. +var _ collector = (*MetricsCollector)(nil) + +// MetricsCollector extends the BaseMetricsCollector with Sized LRU specific metrics. +// It provides methods to record cache hits, misses, and evictions. +type MetricsCollector struct { + // BaseMetricsCollector is embedded to provide the base metrics functionality. + cache.BaseMetricsCollector + + totalHits *prometheus.GaugeVec + totalMisses *prometheus.GaugeVec + totalEvicts *prometheus.GaugeVec +} + +// NewSizedLRUMetricsCollector initializes a new MetricsCollector with the provided namespace and subsystem. +func NewSizedLRUMetricsCollector(namespace, subsystem string) *MetricsCollector { + base := cache.GetMetricsCollector() + + totalHits := prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "total_hits", + Help: "Total number of cache hits.", + }, []string{"cache_name"}) + + totalMisses := prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "total_misses", + Help: "Total number of cache misses.", + }, []string{"cache_name"}) + + totalEvicts := prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "total_evicts", + Help: "Total number of cache evictions.", + }, []string{"cache_name"}) + + return &MetricsCollector{ + BaseMetricsCollector: base, + totalHits: totalHits, + totalMisses: totalMisses, + totalEvicts: totalEvicts, + } +} + +// RecordHits updates the total hits metric for the given cache name. +func (c *MetricsCollector) RecordHits(cacheName string, hits uint64) { + c.totalHits.WithLabelValues(cacheName).Set(float64(hits)) +} + +// RecordMisses updates the total misses metric for the given cache name. +func (c *MetricsCollector) RecordMisses(cacheName string, misses uint64) { + c.totalMisses.WithLabelValues(cacheName).Set(float64(misses)) +} + +// RecordEvictions updates the total evictions metric for the given cache name. +func (c *MetricsCollector) RecordEvictions(cacheName string, evictions uint64) { + c.totalEvicts.WithLabelValues(cacheName).Set(float64(evictions)) +} diff --git a/pkg/cache/sizedlru/sizedlru.go b/pkg/cache/sizedlru/sizedlru.go new file mode 100644 index 000000000000..824042ee7793 --- /dev/null +++ b/pkg/cache/sizedlru/sizedlru.go @@ -0,0 +1,173 @@ +// Package sizedlru provides a generic, size-limited, LRU (Least Recently Used) cache with optional +// metrics collection and reporting. It wraps the Ristretto caching library, adding support for custom +// metrics tracking cache hits, misses, evictions, and other cache operations. +// +// This package supports configuring key aspects of cache behavior, including maximum cache size, +// custom metrics collection, and the interval for metrics reporting. +package sizedlru + +import ( + "fmt" + "time" + + "github.com/dgraph-io/ristretto" + + "github.com/trufflesecurity/trufflehog/v3/pkg/cache" +) + +// collector is an interface that extends cache.BaseMetricsCollector +// and adds methods for recording cache hits, misses, and evictions. +type collector interface { + cache.BaseMetricsCollector + + RecordHits(cacheName string, hits uint64) + RecordMisses(cacheName string, misses uint64) + RecordEvictions(cacheName string, evictions uint64) +} + +// Cache is a generic LRU-sized cache that stores key-value pairs with a maximum size limit. +// It wraps the Ristretto cache and provides additional metrics collection functionality. +type Cache[T any] struct { + cache *ristretto.Cache[string, T] + + cacheName string + metrics collector + updateInterval time.Duration +} + +// Option defines a functional option for configuring the Cache. +// It is a function that takes a Ristretto config and an Cache and modifies them. +type Option[T any] func(*ristretto.Config[string, T], *Cache[T]) + +// WithMaxCost is a functional option to set the maximum cost of the cache. +// It sets the MaxCost field of the Ristretto config. +// See https://github.com/dgraph-io/ristretto/blob/ce5561f7d9a89363e9d889095428fa9ef84f9c76/cache.go#L102-L110 for more details. +func WithMaxCost[T any](maxCost int64) Option[T] { + return func(config *ristretto.Config[string, T], _ *Cache[T]) { + config.MaxCost = maxCost + } +} + +// WithMetricsCollector is a functional option to set a custom metrics collector. +// It sets the metrics field of the Cache. +func WithMetricsCollector[T any](collector collector) Option[T] { + return func(_ *ristretto.Config[string, T], lc *Cache[T]) { + lc.metrics = collector + } +} + +// WithMetricsEnabled is a functional option to enable/disable metrics in the cache itself. +// If enabled, expect a performance hit due to the periodic metrics collection as well as +// the overhead of metrics collection from Ristretto. +// See https://github.com/dgraph-io/ristretto/blob/ce5561f7d9a89363e9d889095428fa9ef84f9c76/cache.go#L116-L120 for more details. +func WithMetricsEnabled[T any](enabled bool, updateInterval time.Duration) Option[T] { + return func(config *ristretto.Config[string, T], lc *Cache[T]) { + lc.updateInterval = updateInterval + config.Metrics = enabled + } +} + +// NewCache creates a new Cache with optional configuration parameters. +// It takes a cache name and a variadic list of options. +func NewCache[T any](cacheName string, opts ...Option[T]) (*Cache[T], error) { + const ( + // Default values for Ristretto cache configuration. + defaultNumCounters = 1e7 + defaultMaxCost = 1 << 30 + defaultBufferItems = 64 + defaultInterval = 10 * time.Second + ) + + config := &ristretto.Config[string, T]{ + NumCounters: defaultNumCounters, // number of keys to track frequency of (10M). + MaxCost: defaultMaxCost, // maximum cost of cache (1GB). + BufferItems: defaultBufferItems, // number of keys per Get buffer. + } + + // Cache instance with default nil metrics collector. + lru := &Cache[T]{ + metrics: nil, // No metrics collector by default + cacheName: cacheName, + updateInterval: defaultInterval, + } + + for _, opt := range opts { + opt(config, lru) + } + + c, err := ristretto.NewCache[string, T](config) + if err != nil { + return nil, fmt.Errorf("failed to create Ristretto cache: %w", err) + } + + lru.cache = c + + // Initialize metrics if enabled and a collector is set. + if config.Metrics && lru.metrics != nil { + lru.initMetrics() + } + + return lru, nil +} + +// initMetrics initializes Prometheus metrics based on Ristretto's internal metrics. +// It periodically updates custom metrics based on Ristretto's metrics. +// TODO (ahrav): Make context-aware. +func (lc *Cache[T]) initMetrics() { + // Periodically update custom metrics based on Ristretto's metrics. + go func() { + ticker := time.NewTicker(lc.updateInterval) + defer ticker.Stop() + for range ticker.C { + stats := lc.cache.Metrics + + lc.metrics.RecordHits(lc.cacheName, stats.Hits()) + lc.metrics.RecordMisses(lc.cacheName, stats.Misses()) + lc.metrics.RecordEvictions(lc.cacheName, stats.KeysEvicted()) + + // Record additional Sized LRU specific metrics. (eg. cost, keys, etc.) + } + }() +} + +// Set adds a key-value pair to the cache. +// It assumes each entry has a cost of 1. +func (lc *Cache[T]) Set(key string, val T) { + lc.cache.Set(key, val, 1) + lc.metrics.RecordSet(lc.cacheName) +} + +// Get retrieves a value from the cache by key. +func (lc *Cache[T]) Get(key string) (T, bool) { + value, found := lc.cache.Get(key) + if found { + lc.metrics.RecordHit(lc.cacheName) + return value, true + } + lc.metrics.RecordMiss(lc.cacheName) + var zero T + return zero, false +} + +// Exists checks if a key exists in the cache. +func (lc *Cache[T]) Exists(key string) bool { + _, found := lc.cache.Get(key) + if found { + lc.metrics.RecordHit(lc.cacheName) + } else { + lc.metrics.RecordMiss(lc.cacheName) + } + return found +} + +// Delete removes a key from the cache. +func (lc *Cache[T]) Delete(key string) { + lc.cache.Del(key) + lc.metrics.RecordDelete(lc.cacheName) +} + +// Clear removes all keys from the cache. +func (lc *Cache[T]) Clear() { + lc.cache.Clear() + lc.metrics.RecordClear(lc.cacheName) +} diff --git a/pkg/cache/sizedlru/sizedlru_test.go b/pkg/cache/sizedlru/sizedlru_test.go new file mode 100644 index 000000000000..bb140e9cae68 --- /dev/null +++ b/pkg/cache/sizedlru/sizedlru_test.go @@ -0,0 +1,198 @@ +package sizedlru + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +type mockCollector struct{ mock.Mock } + +func (m *mockCollector) RecordHits(cacheName string, hits uint64) { m.Called(cacheName, hits) } + +func (m *mockCollector) RecordMisses(cacheName string, misses uint64) { m.Called(cacheName, misses) } + +func (m *mockCollector) RecordEvictions(cacheName string, evictions uint64) { + m.Called(cacheName, evictions) +} + +func (m *mockCollector) RecordSet(cacheName string) { m.Called(cacheName) } + +func (m *mockCollector) RecordHit(cacheName string) { m.Called(cacheName) } + +func (m *mockCollector) RecordMiss(cacheName string) { m.Called(cacheName) } + +func (m *mockCollector) RecordDelete(cacheName string) { m.Called(cacheName) } + +func (m *mockCollector) RecordClear(cacheName string) { m.Called(cacheName) } + +func TestNewLRUCache(t *testing.T) { + t.Parallel() + + t.Run("default configuration", func(t *testing.T) { + t.Parallel() + cache, err := NewCache[int]("test_cache") + assert.NoError(t, err) + assert.NotNil(t, cache) + assert.Equal(t, "test_cache", cache.cacheName) + assert.Nil(t, cache.metrics) + }) + + t.Run("with custom max cost", func(t *testing.T) { + t.Parallel() + cache, err := NewCache[int]("test_cache", WithMaxCost[int](1000)) + assert.NoError(t, err) + assert.NotNil(t, cache) + }) + + t.Run("with metrics collector", func(t *testing.T) { + t.Parallel() + collector := &mockCollector{} + cache, err := NewCache[int]( + "test_cache", + WithMetricsCollector[int](collector), + WithMetricsEnabled[int](true, 200*time.Millisecond), + ) + assert.NoError(t, err) + assert.NotNil(t, cache) + assert.Equal(t, collector, cache.metrics) + }) +} + +func TestCacheSet(t *testing.T) { + t.Parallel() + + collector := new(mockCollector) + collector.On("RecordSet", "test_cache").Return() + collector.On("RecordGet", "test_cache").Return() + collector.On("RecordHit", "test_cache").Return() + + cache, err := NewCache[string]("test_cache", WithMetricsCollector[string](collector)) + assert.NoError(t, err) + + cache.Set("key", "value") + collector.AssertCalled(t, "RecordSet", "test_cache") + + got, found := cache.Get("key") + assert.True(t, found) + assert.Equal(t, "value", got) +} + +func TestCacheGet(t *testing.T) { + t.Parallel() + + collector := new(mockCollector) + collector.On("RecordSet", "test_cache").Return() + collector.On("RecordHit", "test_cache").Return() + collector.On("RecordMiss", "test_cache").Return() + + cache, err := NewCache[string]("test_cache", WithMetricsCollector[string](collector)) + assert.NoError(t, err) + + cache.Set("key", "value") + collector.AssertCalled(t, "RecordSet", "test_cache") + + value, found := cache.Get("key") + assert.True(t, found) + assert.Equal(t, "value", value) + collector.AssertCalled(t, "RecordHit", "test_cache") + + _, found = cache.Get("non_existent") + assert.False(t, found) + collector.AssertCalled(t, "RecordMiss", "test_cache") +} + +func TestCache_Exists(t *testing.T) { + t.Parallel() + + collector := new(mockCollector) + collector.On("RecordSet", "test_cache").Return() + collector.On("RecordHit", "test_cache").Return() + collector.On("RecordMiss", "test_cache").Return() + + cache, err := NewCache[string]("test_cache", WithMetricsCollector[string](collector)) + assert.NoError(t, err) + + cache.Set("key", "value") + collector.AssertCalled(t, "RecordSet", "test_cache") + + exists := cache.Exists("key") + assert.True(t, exists) + collector.AssertCalled(t, "RecordHit", "test_cache") + + exists = cache.Exists("non_existent") + assert.False(t, exists) + collector.AssertCalled(t, "RecordMiss", "test_cache") +} + +func TestCache_Delete(t *testing.T) { + t.Parallel() + + collector := new(mockCollector) + collector.On("RecordSet", "test_cache").Return() + collector.On("RecordDelete", "test_cache").Return() + collector.On("RecordMiss", "test_cache").Return() + + cache, err := NewCache[string]("test_cache", WithMetricsCollector[string](collector)) + assert.NoError(t, err) + + cache.Set("key", "value") + collector.AssertCalled(t, "RecordSet", "test_cache") + + cache.Delete("key") + collector.AssertCalled(t, "RecordDelete", "test_cache") + + _, found := cache.Get("key") + assert.False(t, found) + collector.AssertCalled(t, "RecordMiss", "test_cache") +} + +func TestCache_Clear(t *testing.T) { + t.Parallel() + + collector := new(mockCollector) + collector.On("RecordSet", "test_cache").Return() + collector.On("RecordClear", "test_cache").Return() + collector.On("RecordMiss", "test_cache").Return() + + cache, err := NewCache[string]("test_cache", WithMetricsCollector[string](collector)) + assert.NoError(t, err) + + cache.Set("key1", "value1") + cache.Set("key2", "value2") + collector.AssertNumberOfCalls(t, "RecordSet", 2) + + cache.Clear() + collector.AssertCalled(t, "RecordClear", "test_cache") + + _, found1 := cache.Get("key1") + _, found2 := cache.Get("key2") + assert.False(t, found1) + assert.False(t, found2) + collector.AssertNumberOfCalls(t, "RecordMiss", 2) +} + +func TestCache_Metrics(t *testing.T) { + t.Parallel() + + collector := new(mockCollector) + collector.On("RecordHits", "test_cache", mock.AnythingOfType("uint64")).Return() + collector.On("RecordMisses", "test_cache", mock.AnythingOfType("uint64")).Return() + collector.On("RecordEvictions", "test_cache", mock.AnythingOfType("uint64")).Return() + + _, err := NewCache[string]( + "test_cache", + WithMetricsCollector[string](collector), + WithMetricsEnabled[string](true, 200*time.Millisecond), + ) + assert.NoError(t, err) + + // Wait for at least one metrics update cycle. + time.Sleep(1 * time.Second) + + collector.AssertCalled(t, "RecordHits", "test_cache", mock.AnythingOfType("uint64")) + collector.AssertCalled(t, "RecordMisses", "test_cache", mock.AnythingOfType("uint64")) + collector.AssertCalled(t, "RecordEvictions", "test_cache", mock.AnythingOfType("uint64")) +}