Skip to content
This repository has been archived by the owner on Apr 26, 2024. It is now read-only.

Optimise caches for single key #2185

Merged
merged 2 commits into from
May 5, 2017
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions synapse/storage/roommember.py
Original file line number Diff line number Diff line change
Expand Up @@ -147,7 +147,7 @@ def get_hosts_in_room(self, room_id, cache_context):
hosts = frozenset(get_domain_from_id(user_id) for user_id in user_ids)
defer.returnValue(hosts)

@cached(max_entries=500000, iterable=True)
@cached(max_entries=100000, iterable=True)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is there a reason this number is going down at the same time as changing the cache structure?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Because that's how this PR started and then I sort of got side tracked

def get_users_in_room(self, room_id):
def f(txn):
sql = (
Expand All @@ -160,7 +160,7 @@ def f(txn):
)

txn.execute(sql, (room_id, Membership.JOIN,))
return [r[0] for r in txn]
return [to_ascii(r[0]) for r in txn]
return self.runInteraction("get_users_in_room", f)

@cached()
Expand Down
42 changes: 33 additions & 9 deletions synapse/util/caches/descriptors.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
from synapse.util import unwrapFirstError, logcontext
from synapse.util.caches.lrucache import LruCache
from synapse.util.caches.treecache import TreeCache, iterate_tree_cache_entry
from synapse.util.stringutils import to_ascii

from . import register_cache

Expand Down Expand Up @@ -163,10 +164,6 @@ def prefill(self, key, value, callback=None):

def invalidate(self, key):
self.check_thread()
if not isinstance(key, tuple):
raise TypeError(
"The cache key must be a tuple not %r" % (type(key),)
)

# Increment the sequence number so that any SELECT statements that
# raced with the INSERT don't update the cache (SYN-369)
Expand Down Expand Up @@ -312,7 +309,7 @@ def __get__(self, obj, objtype=None):
iterable=self.iterable,
)

def get_cache_key(args, kwargs):
def get_cache_key_gen(args, kwargs):
"""Given some args/kwargs return a generator that resolves into
the cache_key.

Expand All @@ -330,13 +327,29 @@ def get_cache_key(args, kwargs):
else:
yield self.arg_defaults[nm]

# By default our cache key is a tuple, but if there is only one item
# then don't bother wrapping in a tuple. This is to save memory.
if self.num_args == 1:
nm = self.arg_names[0]

def get_cache_key(args, kwargs):
if nm in kwargs:
return kwargs[nm]
elif len(args):
return args[0]
else:
return self.arg_defaults[nm]
else:
def get_cache_key(args, kwargs):
return tuple(get_cache_key_gen(args, kwargs))

@functools.wraps(self.orig)
def wrapped(*args, **kwargs):
# If we're passed a cache_context then we'll want to call its invalidate()
# whenever we are invalidated
invalidate_callback = kwargs.pop("on_invalidate", None)

cache_key = tuple(get_cache_key(args, kwargs))
cache_key = get_cache_key(args, kwargs)

# Add our own `cache_context` to argument list if the wrapped function
# has asked for one
Expand All @@ -363,6 +376,11 @@ def onErr(f):

ret.addErrback(onErr)

# If our cache_key is a string, try to convert to ascii to save
# a bit of space in large caches
if isinstance(cache_key, basestring):
cache_key = to_ascii(cache_key)

result_d = ObservableDeferred(ret, consumeErrors=True)
cache.set(cache_key, result_d, callback=invalidate_callback)
observer = result_d.observe()
Expand All @@ -372,10 +390,16 @@ def onErr(f):
else:
return observer

wrapped.invalidate = cache.invalidate
if self.num_args == 1:
wrapped.invalidate = lambda key: cache.invalidate(key[0])
wrapped.prefill = lambda key, val: cache.prefill(key[0], val)
else:
wrapped.invalidate = cache.invalidate
wrapped.invalidate_all = cache.invalidate_all
wrapped.invalidate_many = cache.invalidate_many
wrapped.prefill = cache.prefill

wrapped.invalidate_all = cache.invalidate_all
wrapped.invalidate_many = cache.invalidate_many
wrapped.prefill = cache.prefill
wrapped.cache = cache

obj.__dict__[self.orig.__name__] = wrapped
Expand Down