diff --git a/CHANGELOG.md b/CHANGELOG.md index dcb518b3a..1b67c3ded 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,7 @@ frame's instruction address needs to be adjusted for symbolication. ([#948](https://github.com/getsentry/symbolicator/pull/948)) - Added offline mode and caching to `symbolicli`. ([#967](https://github.com/getsentry/symbolicator/pull/967),[#968](https://github.com/getsentry/symbolicator/pull/968)) - Support PortablePDB embedded sources. ([#996](https://github.com/getsentry/symbolicator/pull/996)) +- Use `moka` as an in-memory `Cacher` implementation. ([#979](https://github.com/getsentry/symbolicator/pull/979)) ### Internal diff --git a/crates/symbolicator-service/src/services/cache_key.rs b/crates/symbolicator-service/src/services/cache_key.rs index e1872e2a1..10158a1d2 100644 --- a/crates/symbolicator-service/src/services/cache_key.rs +++ b/crates/symbolicator-service/src/services/cache_key.rs @@ -5,7 +5,7 @@ use symbolicator_sources::RemoteFile; use crate::types::Scope; -#[derive(Debug, Clone, Eq, Ord, PartialEq, PartialOrd)] +#[derive(Debug, Clone, Eq, Ord, PartialEq, PartialOrd, Hash)] pub struct CacheKey { pub cache_key: String, } diff --git a/crates/symbolicator-service/src/services/cacher.rs b/crates/symbolicator-service/src/services/cacher.rs index dd6a96e9a..063c13784 100644 --- a/crates/symbolicator-service/src/services/cacher.rs +++ b/crates/symbolicator-service/src/services/cacher.rs @@ -1,12 +1,10 @@ -use std::collections::BTreeMap; -use std::io::{self, Error, ErrorKind}; -use std::path::PathBuf; +use std::collections::HashSet; +use std::path::{Path, PathBuf}; use std::sync::atomic::Ordering; use std::sync::Arc; +use std::time::Duration; -use futures::channel::oneshot; -use futures::future::{BoxFuture, Shared}; -use futures::{FutureExt, TryFutureExt}; +use futures::future::BoxFuture; use parking_lot::Mutex; use sentry::{Hub, SentryFutureExt}; use symbolic::common::ByteView; @@ -18,8 +16,7 @@ use crate::cache::{Cache, CacheEntry, CacheError, ExpirationTime}; use crate::services::shared_cache::{CacheStoreReason, SharedCacheKey, SharedCacheRef}; use crate::utils::futures::CallOnDrop; -type ComputationChannel = Shared>>; -type ComputationMap = Arc>>>; +type InMemoryCache = moka::future::Cache>; /// Manages a filesystem cache of any kind of data that can be serialized into bytes and read from /// it: @@ -36,19 +33,25 @@ type ComputationMap = Arc>>>; pub struct Cacher { config: Cache, - /// Used for deduplicating cache lookups. - current_computations: ComputationMap, + /// An in-memory Cache for some items which also does request-coalescing when requesting items. + cache: InMemoryCache, + + /// A [`HashSet`] of currently running cache refreshes. + refreshes: Arc>>, /// A service used to communicate with the shared cache. shared_cache: SharedCacheRef, } +// FIXME(swatinem): This is currently ~216 bytes that we copy around when spawning computations. +// The different cache actors have this behind an `Arc` already, maybe we should move that internally. impl Clone for Cacher { fn clone(&self) -> Self { // https://github.com/rust-lang/rust/issues/26925 Cacher { config: self.config.clone(), - current_computations: self.current_computations.clone(), + cache: self.cache.clone(), + refreshes: Arc::clone(&self.refreshes), shared_cache: Arc::clone(&self.shared_cache), } } @@ -56,10 +59,18 @@ impl Clone for Cacher { impl Cacher { pub fn new(config: Cache, shared_cache: SharedCacheRef) -> Self { + // TODO: eventually hook up configuration to this + // The capacity(0) and ttl(0) make sure we are not (yet) reusing in-memory caches, except + // for request coalescing right now. + let cache = InMemoryCache::builder() + .max_capacity(0) + .time_to_live(Duration::ZERO) + .build(); Cacher { config, + cache, + refreshes: Default::default(), shared_cache, - current_computations: Arc::new(Mutex::new(BTreeMap::new())), } } @@ -120,12 +131,10 @@ impl Cacher { fn lookup_local_cache( &self, request: &T, + cache_dir: &Path, key: &CacheKey, version: u32, ) -> CacheEntry> { - let Some(cache_dir) = self.config.cache_dir() else { - return Err(CacheError::NotFound); - }; let name = self.config.name(); let item_path = key.cache_path(cache_dir, version); tracing::trace!("Trying {} cache at path {}", name, item_path.display()); @@ -191,15 +200,7 @@ impl Cacher { /// /// This method does not take care of ensuring the computation only happens once even /// for concurrent requests, see the public [`Cacher::compute_memoized`] for this. - async fn compute(self, request: T, key: CacheKey, is_refresh: bool) -> CacheEntry { - // We do another cache lookup here. `compute_memoized` has a fast-path that does a cache - // lookup without going through the deduplication/channel creation logic. This creates a - // small opportunity of invoking compute another time after a fresh cache has just been - // computed. To avoid duplicated work in that case, we will check the cache here again. - if let Ok(item) = self.lookup_local_cache(&request, &key, T::VERSIONS.current) { - return item; - } - + async fn compute(&self, request: T, key: &CacheKey, is_refresh: bool) -> CacheEntry { let mut temp_file = self.tempfile()?; let shared_cache_key = SharedCacheKey { name: self.config.name(), @@ -303,122 +304,6 @@ impl Cacher { // sentry::capture_error(&*err); } - /// Creates a shareable channel that computes an item. - /// - /// In case the `is_refresh` flag is set, the computation request will count towards the configured - /// `max_lazy_refreshes`, and will return immediately with an error if the threshold was reached. - fn create_channel( - &self, - key: CacheKey, - computation: F, - is_refresh: bool, - ) -> ComputationChannel - where - F: std::future::Future> + Send + 'static, - { - let (sender, receiver) = oneshot::channel(); - - let max_lazy_refreshes = self.config.max_lazy_refreshes(); - let current_computations = self.current_computations.clone(); - let remove_computation_token = CallOnDrop::new(move || { - if is_refresh { - max_lazy_refreshes.fetch_add(1, Ordering::Relaxed); - } - current_computations.lock().remove(&key); - }); - - // Run the computation and wrap the result in Arcs to make them clonable. - let channel = async move { - // only start an independent transaction if this is a "background" task, - // otherwise it will not "outlive" its parent span, so attach it to the parent transaction. - let transaction = if is_refresh { - let span = sentry::configure_scope(|scope| scope.get_span()); - let ctx = sentry::TransactionContext::continue_from_span( - "Lazy Cache Computation", - "spawn_computation", - span, - ); - let transaction = sentry::start_transaction(ctx); - sentry::configure_scope(|scope| scope.set_span(Some(transaction.clone().into()))); - Some(transaction) - } else { - None - }; - let result = computation.await; - // Drop the token first to evict from the map. This ensures that callers either - // get a channel that will receive data, or they create a new channel. - drop(remove_computation_token); - if let Some(transaction) = transaction { - transaction.finish(); - } - sender.send(result).ok(); - } - .bind_hub(Hub::new_from_top(Hub::current())); - - // These computations are spawned on the current runtime, which in all cases is the CPU-pool. - tokio::spawn(channel); - - receiver.shared() - } - - /// Spawns the computation as a separate task. - /// - /// This does deduplication, by keeping track of the running computations based on their [`CacheKey`]. - /// - /// NOTE: This function itself is *not* `async`, because it should eagerly spawn the computation - /// on an executor, even if you don’t explicitly `await` its results. - fn spawn_computation( - &self, - request: T, - cache_key: CacheKey, - is_refresh: bool, - ) -> BoxFuture<'static, CacheEntry> { - let name = self.config.name(); - - let channel = { - let mut current_computations = self.current_computations.lock(); - if let Some(channel) = current_computations.get(&cache_key) { - // A concurrent cache lookup was deduplicated. - metric!(counter(&format!("caches.{name}.channel.hit")) += 1); - channel.clone() - } else { - // A concurrent cache lookup is considered new. This does not imply a cache miss. - metric!(counter(&format!("caches.{name}.channel.miss")) += 1); - - // We count down towards zero, and if we reach or surpass it, we will short circuit here. - // Doing the short-circuiting here means we don't create a channel at all, and don't - // put it into `current_computations`. - let max_lazy_refreshes = self.config.max_lazy_refreshes(); - if is_refresh && max_lazy_refreshes.fetch_sub(1, Ordering::Relaxed) <= 0 { - max_lazy_refreshes.fetch_add(1, Ordering::Relaxed); - - metric!(counter("caches.lazy_limit_hit") += 1, "cache" => name.as_ref()); - // This error is purely here to satisfy the return type, it should not show - // up anywhere, as lazy computation will not unwrap the error. - let result = Err(Error::new( - ErrorKind::Other, - "maximum number of lazy recomputations reached; aborting cache computation", - ) - .into()); - return Box::pin(async { result }); - } - - let computation = self.clone().compute(request, cache_key.clone(), is_refresh); - let channel = self.create_channel(cache_key.clone(), computation, is_refresh); - let evicted = current_computations.insert(cache_key, channel.clone()); - debug_assert!(evicted.is_none()); - channel - } - }; - - let future = channel.unwrap_or_else(move |_cancelled_error| { - let message = format!("{name} computation channel dropped"); - Err(std::io::Error::new(std::io::ErrorKind::Interrupted, message).into()) - }); - - Box::pin(future) - } - /// Computes an item by loading from or populating the cache. /// /// The actual computation is deduplicated between concurrent requests. Finally, the result is @@ -434,52 +319,131 @@ impl Cacher { pub async fn compute_memoized(&self, request: T, cache_key: CacheKey) -> CacheEntry { let name = self.config.name(); - // cache_path is None when caching is disabled. - if let Some(cache_dir) = self.config.cache_dir() { - let versions = - std::iter::once(T::VERSIONS.current).chain(T::VERSIONS.fallbacks.iter().copied()); - - for version in versions { - let item = match self.lookup_local_cache(&request, &cache_key, version) { - Err(CacheError::NotFound) => continue, - Err(err) => return Err(err), - Ok(item) => item, - }; - - if version != T::VERSIONS.current { - // we have found an outdated cache that we will use right away, - // and we will kick off a recomputation for the `current` cache version - // in a deduplicated background task, which we will not await - tracing::trace!( - "Spawning deduplicated {} computation for path {:?}", - name, - cache_key - .cache_path(cache_dir, T::VERSIONS.current) - .display() - ); - metric!( - counter(&format!("caches.{name}.file.fallback")) += 1, - "version" => &version.to_string(), - ); - let _not_awaiting_future = self.spawn_computation(request, cache_key, true); + // We log quite a few metrics directly: + // - caches.X.access + // - caches.X.file.hit (emitted inside `lookup_local_cache`) + // - caches.X.file.miss + // - caches.X.file.write (emitted inside `compute`) + // - caches.X.file.fallback + // - services.shared_cache.fetch (tagged with `hit`) + // Plus some others: + // - caches.X.file.size + // - services.shared_cache.store + // - services.shared_cache.store.bytes + // From these we can infer other metrics: + // - computations (aka in-memory hit): + // `file.hits + file.miss` which should be the same as `file.write` and `shared_cache.fetch` + // depending on errors and shared cache usage. + // - ^ Well actually, eager `computations` should be `file.hits + file.miss - file.fallback`, + // as cache fallback does trigger a background computation. + // - in-memory miss: access - computations + // FIXME: is it better to use a different metrics key or tags for the cache name? + metric!(counter(&format!("caches.{name}.access")) += 1); + + let compute = Box::pin(async { + // cache_path is None when caching is disabled. + if let Some(cache_dir) = self.config.cache_dir() { + let versions = std::iter::once(T::VERSIONS.current) + .chain(T::VERSIONS.fallbacks.iter().copied()); + + for version in versions { + let item = + match self.lookup_local_cache(&request, cache_dir, &cache_key, version) { + Err(CacheError::NotFound) => continue, + Err(err) => return Err(err), + Ok(item) => item, + }; + + if version != T::VERSIONS.current { + // we have found an outdated cache that we will use right away, + // and we will kick off a recomputation for the `current` cache version + // in a deduplicated background task, which we will not await + metric!( + counter(&format!("caches.{name}.file.fallback")) += 1, + "version" => &version.to_string(), + ); + self.spawn_refresh(cache_key.clone(), request); + } + + return item; } - - return item; } + + // A file was not found. If this spikes, it's possible that the filesystem cache + // just got pruned. + metric!(counter(&format!("caches.{name}.file.miss")) += 1); + + self.compute(request, &cache_key, false).await + }); + + self.cache.get_with_by_ref(&cache_key, compute).await + } + + fn spawn_refresh(&self, cache_key: CacheKey, request: T) { + let Some(cache_dir) = self.config.cache_dir() else { return }; + let name = self.config.name(); + + let mut refreshes = self.refreshes.lock(); + if refreshes.contains(&cache_key) { + return; } - // A file was not found. If this spikes, it's possible that the filesystem cache - // just got pruned. - metric!(counter(&format!("caches.{name}.file.miss")) += 1); + // We count down towards zero, and if we reach or surpass it, we will stop here. + let max_lazy_refreshes = self.config.max_lazy_refreshes(); + if max_lazy_refreshes.fetch_sub(1, Ordering::Relaxed) <= 0 { + max_lazy_refreshes.fetch_add(1, Ordering::Relaxed); + + metric!(counter("caches.lazy_limit_hit") += 1, "cache" => name.as_ref()); + return; + } + + let done_token = { + let key = cache_key.clone(); + let refreshes = Arc::clone(&self.refreshes); + CallOnDrop::new(move || { + max_lazy_refreshes.fetch_add(1, Ordering::Relaxed); + refreshes.lock().remove(&key); + }) + }; + refreshes.insert(cache_key.clone()); + drop(refreshes); + + tracing::trace!( + "Spawning deduplicated {} computation for path {:?}", + name, + cache_key + .cache_path(cache_dir, T::VERSIONS.current) + .display() + ); + + let this = self.clone(); + let task = async move { + let _done_token = done_token; // move into the future + + let span = sentry::configure_scope(|scope| scope.get_span()); + let ctx = sentry::TransactionContext::continue_from_span( + "Lazy Cache Computation", + "spawn_computation", + span, + ); + let transaction = sentry::start_transaction(ctx); + sentry::configure_scope(|scope| scope.set_span(Some(transaction.clone().into()))); + + let result = this.compute(request, &cache_key, true).await; - self.spawn_computation(request, cache_key, false).await + // refresh the memory cache with the newly refreshed result + this.cache.insert(cache_key, result).await; + + transaction.finish(); + }; + tokio::spawn(task.bind_hub(Hub::new_from_top(Hub::current()))); } } async fn persist_tempfile( mut temp_file: NamedTempFile, cache_path: PathBuf, -) -> io::Result { +) -> std::io::Result { let parent = cache_path.parent().ok_or_else(|| { std::io::Error::new( std::io::ErrorKind::Other, @@ -576,6 +540,30 @@ mod tests { } } + /// Tests that the size of the `compute_memoized` future does not grow out of bounds. + /// See for one of the main issues here. + /// The size assertion will naturally change with compiler, dependency and code changes. + #[tokio::test] + async fn future_size() { + test::setup(); + + let cache = Cache::from_config( + CacheName::Objects, + None, + None, + CacheConfig::from(CacheConfigs::default().derived), + Arc::new(AtomicIsize::new(1)), + ) + .unwrap(); + let cacher = Cacher::new(cache, Default::default()); + let key = CacheKey { + cache_key: "foo".into(), + }; + let fut = cacher.compute_memoized(TestCacheItem::new(), key); + let size = dbg!(std::mem::size_of_val(&fut)); + assert!(size > 750 && size < 800); + } + /// This test asserts that the cache is served from outdated cache files, and that a computation /// is being kicked off (and deduplicated) in the background #[tokio::test] diff --git a/crates/symbolicator-service/src/services/download/sentry.rs b/crates/symbolicator-service/src/services/download/sentry.rs index 2af0c53ac..cdae5581a 100644 --- a/crates/symbolicator-service/src/services/download/sentry.rs +++ b/crates/symbolicator-service/src/services/download/sentry.rs @@ -98,7 +98,7 @@ impl SentryDownloader { /// If there are cached search results this skips the actual search. async fn cached_sentry_search(&self, query: SearchQuery) -> CacheEntry> { let query_ = query.clone(); - let init_future = async { + let init_future = Box::pin(async { tracing::debug!( "Fetching list of Sentry debug files from {}", &query_.index_url @@ -112,7 +112,7 @@ impl SentryDownloader { CancelOnDrop::new(self.runtime.spawn(future.bind_hub(sentry::Hub::current()))); future.await.map_err(|_| CacheError::InternalError)? - }; + }); self.index_cache .get_with_if(query, init_future, |entry| entry.is_err()) .await diff --git a/crates/symbolicator-service/src/services/objects/data_cache.rs b/crates/symbolicator-service/src/services/objects/data_cache.rs index fc9c9a61c..423313aa6 100644 --- a/crates/symbolicator-service/src/services/objects/data_cache.rs +++ b/crates/symbolicator-service/src/services/objects/data_cache.rs @@ -264,7 +264,7 @@ mod tests { use symbolic::common::DebugId; use tempfile::TempDir; - async fn objects_actor(tempdir: &TempDir) -> ObjectsActor { + async fn make_objects_actor(tempdir: &TempDir) -> ObjectsActor { let meta_cache = Cache::from_config( CacheName::ObjectMeta, Some(tempdir.path().join("meta")), @@ -299,7 +299,7 @@ mod tests { let hitcounter = test::Server::new(); let cachedir = tempdir(); - let objects_actor = objects_actor(&cachedir).await; + let objects_actor = make_objects_actor(&cachedir).await; let find_object = FindObject { filetypes: &[FileType::MachCode], @@ -331,6 +331,9 @@ mod tests { CacheError::DownloadError("500 Internal Server Error".into()) ); assert_eq!(hitcounter.accesses(), 3); // up to 3 tries on failure + + // NOTE: creating a fresh instance to avoid in-memory cache + let objects_actor = make_objects_actor(&cachedir).await; let result = objects_actor .find(find_object.clone()) .await @@ -351,7 +354,7 @@ mod tests { let hitcounter = test::Server::new(); let cachedir = tempdir(); - let objects_actor = objects_actor(&cachedir).await; + let objects_actor = make_objects_actor(&cachedir).await; let find_object = FindObject { filetypes: &[FileType::MachCode], @@ -380,6 +383,9 @@ mod tests { .unwrap_err(); assert_eq!(result, CacheError::NotFound); assert_eq!(hitcounter.accesses(), 1); + + // NOTE: creating a fresh instance to avoid in-memory cache + let objects_actor = make_objects_actor(&cachedir).await; let result = objects_actor .find(find_object.clone()) .await @@ -397,7 +403,7 @@ mod tests { let hitcounter = test::Server::new(); let cachedir = tempdir(); - let objects_actor = objects_actor(&cachedir).await; + let objects_actor = make_objects_actor(&cachedir).await; let find_object = FindObject { filetypes: &[FileType::MachCode], @@ -428,6 +434,9 @@ mod tests { assert_eq!(result, err); // XXX: why are we not trying this 3 times? assert_eq!(hitcounter.accesses(), 1); + + // NOTE: creating a fresh instance to avoid in-memory cache + let objects_actor = make_objects_actor(&cachedir).await; let result = objects_actor .find(find_object.clone()) .await @@ -445,7 +454,7 @@ mod tests { let hitcounter = test::Server::new(); let cachedir = tempdir(); - let objects_actor = objects_actor(&cachedir).await; + let objects_actor = make_objects_actor(&cachedir).await; let find_object = FindObject { filetypes: &[FileType::MachCode], @@ -475,6 +484,9 @@ mod tests { .unwrap_err(); assert_eq!(result, err); assert_eq!(hitcounter.accesses(), 1); + + // NOTE: creating a fresh instance to avoid in-memory cache + let objects_actor = make_objects_actor(&cachedir).await; let result = objects_actor .find(find_object.clone()) .await diff --git a/crates/symbolicator-service/src/services/symbolication/process_minidump.rs b/crates/symbolicator-service/src/services/symbolication/process_minidump.rs index 5cb467aa5..4b5a80597 100644 --- a/crates/symbolicator-service/src/services/symbolication/process_minidump.rs +++ b/crates/symbolicator-service/src/services/symbolication/process_minidump.rs @@ -173,36 +173,35 @@ impl SymbolicatorSymbolProvider { /// Fetches CFI for the given module, parses it into a `SymbolFile`, and stores it internally. async fn load_cfi_module(&self, module: &(dyn Module + Sync)) -> FetchedCfiCache { let key = LookupKey::new(module); - self.cficaches - .get_with_by_ref(&key, async { - let sources = self.sources.clone(); - let scope = self.scope.clone(); - - let identifier = ObjectId { - code_id: key.code_id.clone(), - code_file: Some(module.code_file().into_owned()), - debug_id: key.debug_id, - debug_file: module - .debug_file() - .map(|debug_file| debug_file.into_owned()), + let load = Box::pin(async { + let sources = self.sources.clone(); + let scope = self.scope.clone(); + + let identifier = ObjectId { + code_id: key.code_id.clone(), + code_file: Some(module.code_file().into_owned()), + debug_id: key.debug_id, + debug_file: module + .debug_file() + .map(|debug_file| debug_file.into_owned()), + object_type: self.object_type, + }; + + self.cficache_actor + .fetch(FetchCfiCache { object_type: self.object_type, - }; - - self.cficache_actor - .fetch(FetchCfiCache { - object_type: self.object_type, - identifier, - sources, - scope, - }) - // NOTE: this `bind_hub` is important! - // `load_cfi_module` is being called concurrently from `rust-minidump` via - // `join_all`. We do need proper isolation of any async task that might - // manipulate any Sentry scope. - .bind_hub(Hub::new_from_top(Hub::current())) - .await - }) - .await + identifier, + sources, + scope, + }) + // NOTE: this `bind_hub` is important! + // `load_cfi_module` is being called concurrently from `rust-minidump` via + // `join_all`. We do need proper isolation of any async task that might + // manipulate any Sentry scope. + .bind_hub(Hub::new_from_top(Hub::current())) + .await + }); + self.cficaches.get_with_by_ref(&key, load).await } } @@ -518,3 +517,74 @@ fn normalize_minidump_os_name(os: Os) -> &'static str { Os::Unknown(_) => "", } } + +#[cfg(test)] +mod tests { + use crate::services::create_service; + use crate::services::objects::{FindObject, ObjectPurpose}; + use crate::services::ppdb_caches::FetchPortablePdbCache; + use crate::services::symcaches::FetchSymCache; + + use super::*; + + /// Tests that the size of the `compute_memoized` future does not grow out of bounds. + /// See for one of the main issues here. + /// The size assertion will naturally change with compiler, dependency and code changes. + #[tokio::test] + async fn future_size() { + let (sym, obj) = + create_service(&Default::default(), tokio::runtime::Handle::current()).unwrap(); + + let provider = SymbolicatorSymbolProvider::new( + Scope::Global, + Arc::from_iter([]), + sym.cficaches.clone(), + Default::default(), + ); + + let module = ("foo", DebugId::nil()); + let fut = provider.load_cfi_module(&module); + let size = dbg!(std::mem::size_of_val(&fut)); + assert!(size > 850 && size < 900); + + let req = FindObject { + filetypes: &[], + purpose: ObjectPurpose::Debug, + identifier: Default::default(), + sources: Arc::from_iter([]), + scope: Scope::Global, + }; + let fut = obj.find(req); + let size = dbg!(std::mem::size_of_val(&fut)); + assert!(size > 4800 && size < 4900); + + let req = FetchCfiCache { + object_type: Default::default(), + identifier: Default::default(), + sources: Arc::from_iter([]), + scope: Scope::Global, + }; + let fut = sym.cficaches.fetch(req); + let size = dbg!(std::mem::size_of_val(&fut)); + assert!(size > 5200 && size < 5300); + + let req = FetchPortablePdbCache { + identifier: Default::default(), + sources: Arc::from_iter([]), + scope: Scope::Global, + }; + let fut = sym.ppdb_caches.fetch(req); + let size = dbg!(std::mem::size_of_val(&fut)); + assert!(size > 5200 && size < 5300); + + let req = FetchSymCache { + object_type: Default::default(), + identifier: Default::default(), + sources: Arc::from_iter([]), + scope: Scope::Global, + }; + let fut = sym.symcaches.fetch(req); + let size = dbg!(std::mem::size_of_val(&fut)); + assert!(size > 11200 && size < 11300); + } +} diff --git a/crates/symbolicator-service/src/services/symcaches/mod.rs b/crates/symbolicator-service/src/services/symcaches/mod.rs index 8a3148ba1..c3168a25b 100644 --- a/crates/symbolicator-service/src/services/symcaches/mod.rs +++ b/crates/symbolicator-service/src/services/symcaches/mod.rs @@ -443,20 +443,19 @@ mod tests { // Create the symcache for the first time. Since the bcsymbolmap is not available, names in the // symcache will be obfuscated. - let owned_symcache = symcache_actor + let symcache = symcache_actor .fetch(fetch_symcache.clone()) .await .cache - .ok() .unwrap(); - let symcache = owned_symcache.get(); - let sl = symcache.lookup(0x5a75).next().unwrap(); + let sl = symcache.get().lookup(0x5a75).next().unwrap(); assert_eq!( sl.file().unwrap().full_path(), "__hidden#41_/__hidden#41_/__hidden#42_" ); assert_eq!(sl.function().name(), "__hidden#0_"); + drop(symcache); // Copy the plist and bcsymbolmap to the temporary symbol directory so that the SymCacheActor can find them. fs::copy( @@ -472,37 +471,30 @@ mod tests { .unwrap(); // Create the symcache for the second time. Even though the bcsymbolmap is now available, its absence should - // still be cached and the SymcacheActor should make no attempt to download it. Therefore, the names should + // still be cached and the SymCacheActor should make no attempt to download it. Therefore, the names should // be obfuscated like before. - let owned_symcache = symcache_actor + let symcache = symcache_actor .fetch(fetch_symcache.clone()) .await .cache - .ok() .unwrap(); - let symcache = owned_symcache.get(); - let sl = symcache.lookup(0x5a75).next().unwrap(); + let sl = symcache.get().lookup(0x5a75).next().unwrap(); assert_eq!( sl.file().unwrap().full_path(), "__hidden#41_/__hidden#41_/__hidden#42_" ); assert_eq!(sl.function().name(), "__hidden#0_"); + drop(symcache); // Sleep long enough for the negative cache entry to become invalid. std::thread::sleep(TIMEOUT); // Create the symcache for the third time. This time, the bcsymbolmap is downloaded and the names in the // symcache are unobfuscated. - let owned_symcache = symcache_actor - .fetch(fetch_symcache.clone()) - .await - .cache - .ok() - .unwrap(); + let symcache = symcache_actor.fetch(fetch_symcache).await.cache.unwrap(); - let symcache = owned_symcache.get(); - let sl = symcache.lookup(0x5a75).next().unwrap(); + let sl = symcache.get().lookup(0x5a75).next().unwrap(); assert_eq!( sl.file().unwrap().full_path(), "/Users/philipphofmann/git-repos/sentry-cocoa/Sources/Sentry/SentryMessage.m" diff --git a/crates/symbolicator-service/src/types/mod.rs b/crates/symbolicator-service/src/types/mod.rs index 99de670fa..2acb032eb 100644 --- a/crates/symbolicator-service/src/types/mod.rs +++ b/crates/symbolicator-service/src/types/mod.rs @@ -31,7 +31,7 @@ pub struct Signal(pub u32); /// Based on scopes, access to debug files that have been cached is determined. If a file comes from /// a public source, it can be used for any symbolication request. Otherwise, the symbolication /// request must match the scope of a file. -#[derive(Debug, Clone, Deserialize, Serialize, Eq, Ord, PartialEq, PartialOrd)] +#[derive(Debug, Clone, Deserialize, Serialize, Eq, Ord, PartialEq, PartialOrd, Hash)] #[serde(untagged)] #[derive(Default)] pub enum Scope {