Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Callback for the EOS UID cache to retry user fetch for failed keys on expiry #2266

Merged
merged 2 commits into from
Nov 17, 2021
Merged
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Next Next commit
Callback for the EOS UID cache to retry user fetch for failed keys on…
… expiry
ishank011 committed Nov 11, 2021
commit 51512eb9c39274aea7e0a6e449aa4af186fcb322
3 changes: 3 additions & 0 deletions changelog/unreleased/eos-uid-cache-expire-cb.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
Enhancement: Callback for the EOS UID cache to retry fetch for failed keys

https://github.com/cs3org/reva/pull/2266
20 changes: 14 additions & 6 deletions pkg/storage/utils/eosfs/eosfs.go
Original file line number Diff line number Diff line change
@@ -31,6 +31,7 @@ import (
"strings"
"time"

"github.com/ReneKroon/ttlcache/v2"
"github.com/bluele/gcache"
grouppb "github.com/cs3org/go-cs3apis/cs3/identity/group/v1beta1"
userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1"
@@ -140,7 +141,7 @@ type eosfs struct {
conf *Config
chunkHandler *chunking.ChunkHandler
singleUserAuth eosclient.Authorization
userIDCache gcache.Cache
userIDCache *ttlcache.Cache
tokenCache gcache.Cache
}

@@ -205,10 +206,17 @@ func NewEOSFS(c *Config) (storage.FS, error) {
c: eosClient,
conf: c,
chunkHandler: chunking.NewChunkHandler(c.CacheDirectory),
userIDCache: gcache.New(c.UserIDCacheSize).LFU().Build(),
userIDCache: ttlcache.NewCache(),
tokenCache: gcache.New(c.UserIDCacheSize).LFU().Build(),
}

eosfs.userIDCache.SetCacheSizeLimit(c.UserIDCacheSize)
eosfs.userIDCache.SetExpirationReasonCallback(func(key string, reason ttlcache.EvictionReason, value interface{}) {
// We only set those keys with TTL which we weren't able to retrieve the last time
// For those keys, try to contact the userprovider service again when they expire
_, _ = eosfs.getUserIDGateway(context.Background(), key)
})

go eosfs.userIDcacheWarmup()

return eosfs, nil
@@ -1811,11 +1819,11 @@ func (fs *eosfs) getUIDGateway(ctx context.Context, u *userpb.UserId) (eosclient
UserId: u,
})
if err != nil {
_ = fs.userIDCache.SetWithExpire(u.OpaqueId, &userpb.User{}, 12*time.Hour)
_ = fs.userIDCache.SetWithTTL(u.OpaqueId, &userpb.User{}, 12*time.Hour)
return eosclient.Authorization{}, errors.Wrap(err, "eosfs: error getting user")
}
if getUserResp.Status.Code != rpc.Code_CODE_OK {
_ = fs.userIDCache.SetWithExpire(u.OpaqueId, &userpb.User{}, 12*time.Hour)
_ = fs.userIDCache.SetWithTTL(u.OpaqueId, &userpb.User{}, 12*time.Hour)
return eosclient.Authorization{}, status.NewErrorFromCode(getUserResp.Status.Code, "eosfs")
}

@@ -1847,13 +1855,13 @@ func (fs *eosfs) getUserIDGateway(ctx context.Context, uid string) (*userpb.User
if err != nil {
// Insert an empty object in the cache so that we don't make another call
// for a specific amount of time
_ = fs.userIDCache.SetWithExpire(uid, &userpb.UserId{}, 12*time.Hour)
_ = fs.userIDCache.SetWithTTL(uid, &userpb.UserId{}, 12*time.Hour)
return nil, errors.Wrap(err, "eosfs: error getting user")
}
if getUserResp.Status.Code != rpc.Code_CODE_OK {
// Insert an empty object in the cache so that we don't make another call
// for a specific amount of time
_ = fs.userIDCache.SetWithExpire(uid, &userpb.UserId{}, 12*time.Hour)
_ = fs.userIDCache.SetWithTTL(uid, &userpb.UserId{}, 12*time.Hour)
return nil, status.NewErrorFromCode(getUserResp.Status.Code, "eosfs")
}