Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Remove core restriction in cache and turn it into an active/standby restriction instead #3849

Merged
merged 7 commits into from
Jan 26, 2018
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
91 changes: 35 additions & 56 deletions physical/cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,29 +2,27 @@ package physical

import (
"context"
"strings"

iradix "github.com/hashicorp/go-immutable-radix"
"github.com/hashicorp/golang-lru"
"github.com/hashicorp/vault/helper/locksutil"
log "github.com/mgutz/logxi/v1"
)

const (
// DefaultCacheSize is used if no cache size is specified for NewCache
DefaultCacheSize = 32 * 1024
DefaultCacheSize = 128 * 1024
)

// Cache is used to wrap an underlying physical backend
// and provide an LRU cache layer on top. Most of the reads done by
// Vault are for policy objects so there is a large read reduction
// by using a simple write-through cache.
type Cache struct {
backend Backend
lru *lru.TwoQueueCache
locks []*locksutil.LockEntry
exceptions *iradix.Tree
logger log.Logger
backend Backend
lru *lru.TwoQueueCache
locks []*locksutil.LockEntry
logger log.Logger
enabled bool
}

// TransactionalCache is a Cache that wraps the physical that is transactional
Expand All @@ -34,49 +32,43 @@ type TransactionalCache struct {
}

// Verify Cache satisfies the correct interfaces
var _ Purgable = (*Cache)(nil)
var _ ToggleablePurgemonster = (*Cache)(nil)
var _ ToggleablePurgemonster = (*TransactionalCache)(nil)
var _ Backend = (*Cache)(nil)
var _ Transactional = (*TransactionalCache)(nil)
var _ Purgable = (*TransactionalCache)(nil)

// NewCache returns a physical cache of the given size.
// If no size is provided, the default size is used.
func NewCache(b Backend, size int, coreExceptions []string, logger log.Logger) *Cache {
func NewCache(b Backend, size int, logger log.Logger) *Cache {
if logger.IsTrace() {
logger.Trace("physical/cache: creating LRU cache", "size", size)
}
if size <= 0 {
size = DefaultCacheSize
}
cacheExceptions := iradix.New()
for _, key := range coreExceptions {
cacheValue := true
if strings.HasPrefix(key, "!") {
key = strings.TrimPrefix(key, "!")
cacheValue = false
}
cacheExceptions, _, _ = cacheExceptions.Insert([]byte(key), cacheValue)
}

cache, _ := lru.New2Q(size)
c := &Cache{
backend: b,
lru: cache,
locks: locksutil.CreateLocks(),
exceptions: cacheExceptions,
logger: logger,
backend: b,
lru: cache,
locks: locksutil.CreateLocks(),
logger: logger,
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

enabled seems to be false by default (meaning no cache), is this intentional?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yep. Fail safe.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ahh, in that case perhaps adding a comment on the need to call SetEnabled before the Cache really starts caching would be helpful.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Done

}
return c
}

func NewTransactionalCache(b Backend, size int, coreExceptions []string, logger log.Logger) *TransactionalCache {
func NewTransactionalCache(b Backend, size int, logger log.Logger) *TransactionalCache {
c := &TransactionalCache{
Cache: NewCache(b, size, coreExceptions, logger),
Cache: NewCache(b, size, logger),
Transactional: b.(Transactional),
}
return c
}

func (c *Cache) SetEnabled(enabled bool) {
c.enabled = enabled
}

// Purge is used to clear the cache
func (c *Cache) Purge(ctx context.Context) {
// Lock the world
Expand All @@ -89,30 +81,30 @@ func (c *Cache) Purge(ctx context.Context) {
}

func (c *Cache) Put(ctx context.Context, entry *Entry) error {
if !c.enabled {
return c.backend.Put(ctx, entry)
}

lock := locksutil.LockForKey(c.locks, entry.Key)
lock.Lock()
defer lock.Unlock()

err := c.backend.Put(ctx, entry)
if err == nil && c.shouldCache(entry.Key) {
if err == nil {
c.lru.Add(entry.Key, entry)
}
return err
}

func (c *Cache) Get(ctx context.Context, key string) (*Entry, error) {
if !c.enabled {
return c.backend.Get(ctx, key)
}

lock := locksutil.LockForKey(c.locks, key)
lock.RLock()
defer lock.RUnlock()

// We do NOT cache negative results for keys in the 'core/' prefix
// otherwise we risk certain race conditions upstream. The primary issue is
// with the HA mode, we could potentially negatively cache the leader entry
// and cause leader discovery to fail.
if !c.shouldCache(key) {
return c.backend.Get(ctx, key)
}

// Check the LRU first
if raw, ok := c.lru.Get(key); ok {
if raw == nil {
Expand All @@ -136,12 +128,16 @@ func (c *Cache) Get(ctx context.Context, key string) (*Entry, error) {
}

func (c *Cache) Delete(ctx context.Context, key string) error {
if !c.enabled {
return c.backend.Delete(ctx, key)
}

lock := locksutil.LockForKey(c.locks, key)
lock.Lock()
defer lock.Unlock()

err := c.backend.Delete(ctx, key)
if err == nil && c.shouldCache(key) {
if err == nil {
c.lru.Remove(key)
}
return err
Expand Down Expand Up @@ -170,8 +166,8 @@ func (c *TransactionalCache) Transaction(ctx context.Context, txns []*TxnEntry)
return err
}

for _, txn := range txns {
if c.shouldCache(txn.Entry.Key) {
if c.enabled {
for _, txn := range txns {
switch txn.Operation {
case PutOperation:
c.lru.Add(txn.Entry.Key, txn.Entry)
Expand All @@ -183,20 +179,3 @@ func (c *TransactionalCache) Transaction(ctx context.Context, txns []*TxnEntry)

return nil
}

// shouldCache checks for any cache exceptions
func (c *Cache) shouldCache(key string) bool {
// prefix match if nested under core/
if strings.HasPrefix(key, "core/") {
if prefix, val, found := c.exceptions.Root().LongestPrefix([]byte(key)); found {
strPrefix := string(prefix)
if strings.HasSuffix(strPrefix, "/") || strPrefix == key {
return val.(bool)
}
}
// default for core/ values is false
return false
}
// default is true
return true
}
Loading