diff --git a/CHANGELOG.md b/CHANGELOG.md index 1faac9b32af..1ce9cd19acb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,11 +2,11 @@ ### Enhancements * (PR [#????](https://github.com/realm/realm-core/pull/????)) -* None. +* Improve performance of interprocess mutexes on iOS which don't need to support reader-writer locking. The primary beneficiary of this is beginning and ending read transactions, which is now almost as fast as pre-v13.0.0 ([PR #6258](https://github.com/realm/realm-core/pull/6258)). ### Fixed * ([#????](https://github.com/realm/realm-core/issues/????), since v?.?.?) -* None. +* Sharing Realm files between a Catalyst app and Realm Studio did not properly synchronize access to the Realm file ([PR #6258](https://github.com/realm/realm-core/pull/6258), since v6.21.0). ### Breaking changes * None. diff --git a/src/realm/db.cpp b/src/realm/db.cpp index d30cdbe5b4d..9b15cc3b0c9 100644 --- a/src/realm/db.cpp +++ b/src/realm/db.cpp @@ -511,18 +511,29 @@ class DB::VersionManager { : m_file(file) , m_mutex(mutex) { - std::lock_guard lock(m_mutex); - size_t size = static_cast(m_file.get_size()); - m_reader_map.map(m_file, File::access_ReadWrite, size, File::map_NoSync); - m_info = m_reader_map.get_addr(); - m_local_max_entry = m_info->readers.capacity(); - REALM_ASSERT(sizeof(SharedInfo) + m_info->readers.compute_required_space(m_local_max_entry) == size); + size_t size = 0, required_size = sizeof(SharedInfo); + while (size < required_size) { + // Map the file without the lock held. This could result in the + // mapping being too small and having to remap if the file is grown + // concurrently, but if this is the case we should always see a bigger + // size the next time. + auto new_size = static_cast(m_file.get_size()); + REALM_ASSERT(new_size > size); + size = new_size; + m_reader_map.remap(m_file, File::access_ReadWrite, size, File::map_NoSync); + m_info = m_reader_map.get_addr(); + + std::lock_guard lock(m_mutex); + m_local_max_entry = m_info->readers.capacity(); + required_size = sizeof(SharedInfo) + m_info->readers.compute_required_space(m_local_max_entry); + REALM_ASSERT(required_size >= size); + } } void cleanup_versions(uint64_t& oldest_live_version, TopRefMap& top_refs, bool& any_new_unreachables) { std::lock_guard lock(m_mutex); - ensure_full_reader_mapping(); + ensure_reader_mapping(); m_info->readers.purge_versions(oldest_live_version, top_refs, any_new_unreachables); } @@ -533,9 +544,25 @@ class DB::VersionManager { VersionID get_version_id_of_latest_snapshot() { + { + // First check the local cache. This is an unlocked read, so it may + // race with adding a new version. If this happens we'll either see + // a stale value (acceptable for a racing write on one thread and + // a read on another), or a new value which is guaranteed to not + // be an active index in the local cache. + std::lock_guard lock(m_local_mutex); + auto index = m_info->readers.newest.load(); + if (index < m_local_readers.size()) { + auto& r = m_local_readers[index]; + if (r.is_active()) { + return {r.version, index}; + } + } + } + std::lock_guard lock(m_mutex); - ensure_full_reader_mapping(); - uint_fast32_t index = m_info->readers.newest; + auto index = m_info->readers.newest.load(); + ensure_reader_mapping(index); return {m_info->readers.get(index).version, index}; } @@ -569,34 +596,39 @@ class DB::VersionManager { if (try_grab_local_read_lock(read_lock, type, version_id)) return read_lock; - const bool pick_specific = version_id.version != VersionID().version; - - std::lock_guard lock(m_mutex); - auto newest = m_info->readers.newest.load(); - REALM_ASSERT(newest != VersionList::nil); - read_lock.m_reader_idx = pick_specific ? version_id.index : newest; - ensure_full_reader_mapping(); - bool picked_newest = read_lock.m_reader_idx == (unsigned)newest; - auto& r = m_info->readers.get(read_lock.m_reader_idx); - if (pick_specific && version_id.version != r.version) - throw BadVersion(); - if (!picked_newest) { - if (type == ReadLockInfo::Frozen && r.count_frozen == 0 && r.count_live == 0) - throw BadVersion(); - if (type != ReadLockInfo::Frozen && r.count_live == 0) + { + const bool pick_specific = version_id.version != VersionID().version; + std::lock_guard lock(m_mutex); + auto newest = m_info->readers.newest.load(); + REALM_ASSERT(newest != VersionList::nil); + read_lock.m_reader_idx = pick_specific ? version_id.index : newest; + ensure_reader_mapping((unsigned int)read_lock.m_reader_idx); + bool picked_newest = read_lock.m_reader_idx == (unsigned)newest; + auto& r = m_info->readers.get(read_lock.m_reader_idx); + if (pick_specific && version_id.version != r.version) throw BadVersion(); + if (!picked_newest) { + if (type == ReadLockInfo::Frozen && r.count_frozen == 0 && r.count_live == 0) + throw BadVersion(); + if (type != ReadLockInfo::Frozen && r.count_live == 0) + throw BadVersion(); + } + populate_read_lock(read_lock, r, type); } - populate_read_lock(read_lock, r, type); - std::lock_guard local_lock(m_local_mutex); - grow_local_cache(read_lock.m_reader_idx + 1); - auto& r2 = m_local_readers[read_lock.m_reader_idx]; - if (!r2.is_active()) { - r2 = r; - r2.count_full = r2.count_live = r2.count_frozen = 0; + { + std::lock_guard local_lock(m_local_mutex); + grow_local_cache(read_lock.m_reader_idx + 1); + auto& r2 = m_local_readers[read_lock.m_reader_idx]; + if (!r2.is_active()) { + r2.version = read_lock.m_version; + r2.filesize = read_lock.m_file_size; + r2.current_top = read_lock.m_top_ref; + r2.count_full = r2.count_live = r2.count_frozen = 0; + } + REALM_ASSERT(field_for_type(r2, type) == 0); + field_for_type(r2, type) = 1; } - REALM_ASSERT(field_for_type(r2, type) == 0); - field_for_type(r2, type) = 1; return read_lock; } @@ -604,7 +636,7 @@ class DB::VersionManager { void add_version(ref_type new_top_ref, size_t new_file_size, uint64_t new_version) { std::lock_guard lock(m_mutex); - ensure_full_reader_mapping(); + ensure_reader_mapping(); if (m_info->readers.try_allocate_entry(new_top_ref, new_file_size, new_version)) { return; } @@ -628,15 +660,17 @@ class DB::VersionManager { } private: - void ensure_full_reader_mapping() + void ensure_reader_mapping(unsigned int required = -1) { using _impl::SimulatedFailure; SimulatedFailure::trigger(SimulatedFailure::shared_group__grow_reader_mapping); // Throws - auto index = m_info->readers.capacity() - 1; - if (index >= m_local_max_entry) { + if (required < m_local_max_entry) + return; + + auto new_max_entry = m_info->readers.capacity(); + if (new_max_entry > m_local_max_entry) { // handle mapping expansion if required - auto new_max_entry = m_info->readers.capacity(); size_t info_size = sizeof(DB::SharedInfo) + m_info->readers.compute_required_space(new_max_entry); m_reader_map.remap(m_file, util::File::access_ReadWrite, info_size); // Throws m_local_max_entry = new_max_entry; @@ -787,7 +821,7 @@ void DB::open(const std::string& path, bool no_create_file, const DBOptions& opt File::CloseGuard fcg(m_file); m_file.set_fifo_path(coordination_dir, "lock.fifo"); - if (m_file.try_lock_exclusive()) { // Throws + if (m_file.try_rw_lock_exclusive()) { // Throws File::UnlockGuard ulg(m_file); // We're alone in the world, and it is Ok to initialize the @@ -819,12 +853,14 @@ void DB::open(const std::string& path, bool no_create_file, const DBOptions& opt // macOS has a bug which can cause a hang waiting to obtain a lock, even // if the lock is already open in shared mode, so we work around it by // busy waiting. This should occur only briefly during session initialization. - while (!m_file.try_lock_shared()) { + while (!m_file.try_rw_lock_shared()) { sched_yield(); } #else - m_file.lock_shared(); // Throws + m_file.rw_lock_shared(); // Throws #endif + File::UnlockGuard ulg(m_file); + // The coordination/management dir is created as a side effect of the lock // operation above if needed for lock emulation. But it may also be needed // for other purposes, so make sure it exists. @@ -1234,6 +1270,7 @@ void DB::open(const std::string& path, bool no_create_file, const DBOptions& opt fug_1.release(); // Do not unmap fcg.release(); // Do not close } + ulg.release(); // Do not release shared lock break; } @@ -1594,7 +1631,7 @@ void DB::close_internal(std::unique_lock lock, bool allow_ope // interleave which is not permitted on Windows. It is permitted on *nix. m_file_map.unmap(); m_version_manager.reset(); - m_file.unlock(); + m_file.rw_unlock(); // info->~SharedInfo(); // DO NOT Call destructor m_file.close(); } @@ -2315,7 +2352,7 @@ bool DB::call_with_lock(const std::string& realm_path, CallbackWithLock&& callba lockfile.open(lockfile_path, File::access_ReadWrite, File::create_Auto, 0); // Throws File::CloseGuard fcg(lockfile); lockfile.set_fifo_path(realm_path + ".management", "lock.fifo"); - if (lockfile.try_lock_exclusive()) { // Throws + if (lockfile.try_rw_lock_exclusive()) { // Throws callback(realm_path); return true; } diff --git a/src/realm/util/features.h b/src/realm/util/features.h index 71ec8ec975f..7bb1bc4c8c6 100644 --- a/src/realm/util/features.h +++ b/src/realm/util/features.h @@ -240,8 +240,10 @@ /* Device (iPhone or iPad) or simulator. */ #define REALM_IOS 1 #define REALM_APPLE_DEVICE !TARGET_OS_SIMULATOR +#define REALM_MACCATALYST TARGET_OS_MACCATALYST #else #define REALM_IOS 0 +#define REALM_MACCATALYST 0 #endif #if TARGET_OS_WATCH == 1 /* Device (Apple Watch) or simulator. */ @@ -259,6 +261,7 @@ #endif #else #define REALM_PLATFORM_APPLE 0 +#define REALM_MACCATALYST 0 #define REALM_IOS 0 #define REALM_WATCHOS 0 #define REALM_TVOS 0 diff --git a/src/realm/util/file.cpp b/src/realm/util/file.cpp index 551d1aed7fa..67ca3a7eb3c 100644 --- a/src/realm/util/file.cpp +++ b/src/realm/util/file.cpp @@ -1063,41 +1063,15 @@ static void _unlock(int m_fd) } #endif -bool File::lock(bool exclusive, bool non_blocking) +bool File::rw_lock(bool exclusive, bool non_blocking) { - REALM_ASSERT_RELEASE(is_attached()); - -#ifdef _WIN32 // Windows version - - REALM_ASSERT_RELEASE(!m_have_lock); - - // Under Windows a file lock must be explicitely released before - // the file is closed. It will eventually be released by the - // system, but there is no guarantees on the timing. - - DWORD flags = 0; - if (exclusive) - flags |= LOCKFILE_EXCLUSIVE_LOCK; - if (non_blocking) - flags |= LOCKFILE_FAIL_IMMEDIATELY; - OVERLAPPED overlapped; - memset(&overlapped, 0, sizeof overlapped); - overlapped.Offset = 0; // Just for clarity - overlapped.OffsetHigh = 0; // Just for clarity - if (LockFileEx(m_fd, flags, 0, 1, 0, &overlapped)) { - m_have_lock = true; - return true; - } - DWORD err = GetLastError(); // Eliminate any risk of clobbering - if (err == ERROR_LOCK_VIOLATION) - return false; - throw std::system_error(err, std::system_category(), "LockFileEx() failed"); + // exclusive blocking rw locks not implemented for emulation + REALM_ASSERT(!exclusive || non_blocking); +#ifndef REALM_FILELOCK_EMULATION + return lock(exclusive, non_blocking); #else -#ifdef REALM_FILELOCK_EMULATION - // If we already have any form of lock, release it before trying to acquire a new - if (m_has_exclusive_lock || has_shared_lock()) - unlock(); + REALM_ASSERT(!m_has_exclusive_lock && !has_shared_lock()); // First obtain an exclusive lock on the file proper int operation = LOCK_EX; @@ -1113,6 +1087,10 @@ bool File::lock(bool exclusive, bool non_blocking) throw std::system_error(errno, std::system_category(), "flock() failed"); m_has_exclusive_lock = true; + // Every path through this function except for successfully acquiring an + // exclusive lock needs to release the flock() before returning. + UnlockGuard ulg(*this); + // now use a named pipe to emulate locking in conjunction with using exclusive lock // on the file proper. // exclusively locked: we can't sucessfully write to the pipe. @@ -1124,72 +1102,94 @@ bool File::lock(bool exclusive, bool non_blocking) REALM_ASSERT_RELEASE(m_pipe_fd == -1); if (m_fifo_path.empty()) m_fifo_path = m_path + ".fifo"; - status = mkfifo(m_fifo_path.c_str(), 0666); - if (status) { + + // Due to a bug in iOS 10-12 we need to open in read-write mode for shared + // or the OS will deadlock when un-suspending the app. + int mode = exclusive ? O_WRONLY | O_NONBLOCK : O_RDWR | O_NONBLOCK; + + // Optimistically try to open the fifo. This may fail due to the fifo not + // existing, but since it usually exists this is faster than trying to create + // it first. + int fd = ::open(m_fifo_path.c_str(), mode); + if (fd == -1) { int err = errno; - REALM_ASSERT_EX(status == -1, status); - if (err == ENOENT) { - // The management directory doesn't exist, so there's clearly no - // readers. This can happen when calling DB::call_with_lock() or - // if the management directory has been removed by DB::call_with_lock() - if (exclusive) { + if (exclusive) { + if (err == ENOENT || err == ENXIO) { + // If the fifo either doesn't exist or there's no readers with the + // other end of the pipe open (ENXIO) then we have an exclusive lock + // and are done. + ulg.release(); return true; } - // open shared: - // We need the fifo in order to make a shared lock. If we have it - // in a management directory, we may need to create that first: + + // Otherwise we got an unexpected error + throw std::system_error(err, std::system_category(), "opening lock fifo for writing failed"); + } + + if (err == ENOENT) { + // The fifo doesn't exist and we're opening in shared mode, so we + // need to create it. if (!m_fifo_dir_path.empty()) try_make_dir(m_fifo_dir_path); - // now we can try creating the FIFO again status = mkfifo(m_fifo_path.c_str(), 0666); - if (status) { - // If we fail it must be because it already exists - err = errno; - REALM_ASSERT_EX(err == EEXIST, err); - } - } - else { - // if we failed to create the fifo and not because dir is missing, - // it must be because the fifo already exists! - REALM_ASSERT_EX(err == EEXIST, err); + if (status != 0) + throw std::system_error(errno, std::system_category(), "creating lock fifo for reading failed"); + + // Try again to open the fifo now that it exists + fd = ::open(m_fifo_path.c_str(), mode); + err = errno; } + + if (fd == -1) + throw std::system_error(err, std::system_category(), "opening lock fifo for reading failed"); } + + // We successfully opened the pipe. If we're trying to acquire an exclusive + // lock that means there's a reader (aka a shared lock) and we've failed. + // Release the exclusive lock and back out. if (exclusive) { - // check if any shared locks are already taken by trying to open the pipe for writing - // shared locks are indicated by one or more readers already having opened the pipe - int fd; - do { - fd = ::open(m_fifo_path.c_str(), O_WRONLY | O_NONBLOCK); - } while (fd == -1 && errno == EINTR); - if (fd == -1 && errno != ENXIO) - throw std::system_error(errno, std::system_category(), "opening lock fifo for writing failed"); - if (fd == -1) { - // No reader was present so we have the exclusive lock! - return true; - } - else { - ::close(fd); - // shared locks exist. Back out. Release exclusive lock on file - unlock(); - return false; - } - } - else { - // lock shared. We need to release the real exclusive lock on the file, - // but first we must mark the lock as shared by opening the pipe for reading - // Open pipe for reading! - // Due to a bug in iOS 10-12 we need to open in read-write mode or the OS - // will deadlock when un-suspending the app. - int fd = ::open(m_fifo_path.c_str(), O_RDWR | O_NONBLOCK); - if (fd == -1) - throw std::system_error(errno, std::system_category(), "opening lock fifo for reading failed"); - unlock(); - m_pipe_fd = fd; - return true; + ::close(fd); + return false; } + // We're in shared mode, so opening the fifo means we've successfully acquired + // a shared lock and are done. + ulg.release(); + rw_unlock(); + m_pipe_fd = fd; + return true; +#endif // REALM_FILELOCK_EMULATION +} -#else // BSD / Linux flock() +bool File::lock(bool exclusive, bool non_blocking) +{ + REALM_ASSERT_RELEASE(is_attached()); + +#ifdef _WIN32 // Windows version + REALM_ASSERT_RELEASE(!m_have_lock); + + // Under Windows a file lock must be explicitely released before + // the file is closed. It will eventually be released by the + // system, but there is no guarantees on the timing. + + DWORD flags = 0; + if (exclusive) + flags |= LOCKFILE_EXCLUSIVE_LOCK; + if (non_blocking) + flags |= LOCKFILE_FAIL_IMMEDIATELY; + OVERLAPPED overlapped; + memset(&overlapped, 0, sizeof overlapped); + overlapped.Offset = 0; // Just for clarity + overlapped.OffsetHigh = 0; // Just for clarity + if (LockFileEx(m_fd, flags, 0, 1, 0, &overlapped)) { + m_have_lock = true; + return true; + } + DWORD err = GetLastError(); // Eliminate any risk of clobbering + if (err == ERROR_LOCK_VIOLATION) + return false; + throw std::system_error(err, std::system_category(), "LockFileEx() failed"); +#else // _WIN32 // NOTE: It would probably have been more portable to use fcntl() // based POSIX locks, however these locks are not recursive within // a single process, and since a second attempt to acquire such a @@ -1207,7 +1207,6 @@ bool File::lock(bool exclusive, bool non_blocking) // // Fortunately, on both Linux and Darwin, flock() does not suffer // from this 'spurious unlocking issue'. - int operation = exclusive ? LOCK_EX : LOCK_SH; if (non_blocking) operation |= LOCK_NB; @@ -1219,16 +1218,12 @@ bool File::lock(bool exclusive, bool non_blocking) if (err == EWOULDBLOCK) return false; throw std::system_error(err, std::system_category(), "flock() failed"); - -#endif #endif } - void File::unlock() noexcept { #ifdef _WIN32 // Windows version - if (!m_have_lock) return; @@ -1241,10 +1236,20 @@ void File::unlock() noexcept REALM_ASSERT_RELEASE(r); m_have_lock = false; - #else -#ifdef REALM_FILELOCK_EMULATION + // The Linux man page for flock() does not state explicitely that + // unlocking is idempotent, however, we will assume it since there + // is no mention of the error that would be reported if a + // non-locked file were unlocked. + _unlock(m_fd); +#endif +} +void File::rw_unlock() noexcept +{ +#ifndef REALM_FILELOCK_EMULATION + unlock(); +#else // Coming here with an exclusive lock, we must release that lock. // Coming here with a shared lock, we must close the pipe that we have opened for reading. // - we have to do that under the protection of a proper exclusive lock to serialize @@ -1260,22 +1265,14 @@ void File::unlock() noexcept ::close(m_pipe_fd); m_pipe_fd = -1; } + else { + REALM_ASSERT(m_has_exclusive_lock); + } _unlock(m_fd); m_has_exclusive_lock = false; - -#else // BSD / Linux flock() - - // The Linux man page for flock() does not state explicitely that - // unlocking is idempotent, however, we will assume it since there - // is no mention of the error that would be reported if a - // non-locked file were unlocked. - _unlock(m_fd); - -#endif -#endif +#endif // REALM_FILELOCK_EMULATION } - void* File::map(AccessMode a, size_t size, int /*map_flags*/, size_t offset) const { return realm::util::mmap({m_fd, m_path, a, m_encryption_key.get()}, size, offset); diff --git a/src/realm/util/file.hpp b/src/realm/util/file.hpp index b27fd03f3cb..d1321ef1cdf 100644 --- a/src/realm/util/file.hpp +++ b/src/realm/util/file.hpp @@ -51,12 +51,11 @@ #define REALM_HAVE_STD_FILESYSTEM 0 #endif -#if REALM_APPLE_DEVICE && !REALM_TVOS +#if REALM_APPLE_DEVICE && !REALM_TVOS && !REALM_MACCATALYST #define REALM_FILELOCK_EMULATION #endif -namespace realm { -namespace util { +namespace realm::util { class EncryptedFileMapping; @@ -343,10 +342,34 @@ class File { /// Calling this function on an instance that is not attached to /// an open file, or on an instance that is already locked has /// undefined behavior. - void lock_exclusive(); + void lock(); + + /// Non-blocking version of `lock()`. Returns true if the lock was acquired + /// and false otherwise. + bool try_lock(); + + /// Release a previously acquired lock on this file which was acquired with + /// `lock()` or `try_lock()`. Calling this without holding the lock or + /// while holding a lock acquired with one of the `rw` functions is + /// undefined behavior. + void unlock() noexcept; /// Place an shared lock on this file. This blocks the caller - /// until all other exclusive locks have been released. + /// until all other locks have been released. + /// + /// Locks acquired on distinct File instances have fully recursive + /// behavior, even if they are acquired in the same process (or + /// thread) and are attached to the same underlying file. + /// + /// Calling this function on an instance that is not attached to an open + /// file, on an instance that is already locked, or on a file which + /// `lock()` (rather than `try_rw_lock_exclusive()` has been called on has + /// undefined behavior. + void rw_lock_shared(); + + /// Attempt to place an exclusive lock on this file. Returns true if the + /// lock could be acquired, and false if an exclusive or shared lock exists + /// for the file. /// /// Locks acquired on distinct File instances have fully recursive /// behavior, even if they are acquired in the same process (or @@ -355,19 +378,17 @@ class File { /// Calling this function on an instance that is not attached to /// an open file, or on an instance that is already locked has /// undefined behavior. - void lock_shared(); - - /// Non-blocking version of lock_exclusive(). Returns true iff it - /// succeeds. - bool try_lock_exclusive(); + bool try_rw_lock_exclusive(); /// Non-blocking version of lock_shared(). Returns true iff it /// succeeds. - bool try_lock_shared(); + bool try_rw_lock_shared(); - /// Release a previously acquired lock on this file. This function - /// is idempotent. - void unlock() noexcept; + /// Release a previously acquired read-write lock on this file acquired + /// with `rw_lock_shared()`, `try_rw_lock_exclusive()` or + /// `try_rw_lock_shared()`. Calling this after a call to `lock()` or + /// without holding the lock is undefined behavior. + void rw_unlock() noexcept; /// Set the encryption key used for this file. Must be called before any /// mappings are created or any data is read from or written to the file. @@ -599,9 +620,6 @@ class File { // Return false if the file doesn't exist. Otherwise uid will be set. static bool get_unique_id(const std::string& path, UniqueID& uid); - class ExclusiveLock; - class SharedLock; - template class Map; @@ -634,6 +652,7 @@ class File { std::string m_path; bool lock(bool exclusive, bool non_blocking); + bool rw_lock(bool exclusive, bool non_blocking); void open_internal(const std::string& path, AccessMode, CreateMode, int flags, bool* success); #ifdef REALM_FILELOCK_EMULATION @@ -651,7 +670,7 @@ class File { FileDesc m_fd; AccessMode m_access_mode = access_ReadOnly; - MapBase() noexcept; + MapBase() noexcept = default; ~MapBase() noexcept; // Disable copying. Copying an opened MapBase will create a scenario @@ -692,45 +711,6 @@ class File { }; -class File::ExclusiveLock { -public: - ExclusiveLock(File& f) - : m_file(f) - { - f.lock_exclusive(); - } - ~ExclusiveLock() noexcept - { - m_file.unlock(); - } - // Disable copying. It is not how this class should be used. - ExclusiveLock(const ExclusiveLock&) = delete; - ExclusiveLock& operator=(const ExclusiveLock&) = delete; - -private: - File& m_file; -}; - -class File::SharedLock { -public: - SharedLock(File& f) - : m_file(f) - { - f.lock_shared(); - } - ~SharedLock() noexcept - { - m_file.unlock(); - } - // Disable copying. It is not how this class should be used. - SharedLock(const SharedLock&) = delete; - SharedLock& operator=(const SharedLock&) = delete; - -private: - File& m_file; -}; - - /// This class provides a RAII abstraction over the concept of a /// memory mapped file. /// @@ -807,10 +787,10 @@ class File::Map : private MapBase { /// See File::remap(). /// - /// Calling this function on a Map instance that is not currently - /// attached to a memory mapped file has undefined behavior. The - /// returned pointer is the same as what will subsequently be - /// returned by get_addr(). + /// Calling this function on a Map instance that is not currently attached + /// to a memory mapped file is equivalent to calling map(). The returned + /// pointer is the same as what will subsequently be returned by + /// get_addr(). T* remap(const File&, AccessMode = access_ReadOnly, size_t size = sizeof(T), int map_flags = 0); /// Try to extend the existing mapping to a given size @@ -900,7 +880,7 @@ class File::UnlockGuard { ~UnlockGuard() noexcept { if (m_file) - m_file->unlock(); + m_file->rw_unlock(); } void release() noexcept { @@ -1151,30 +1131,29 @@ inline bool File::is_attached() const noexcept #endif } -inline void File::lock_exclusive() +inline void File::rw_lock_shared() { - lock(true, false); + rw_lock(false, false); } -inline void File::lock_shared() +inline bool File::try_rw_lock_exclusive() { - lock(false, false); + return rw_lock(true, true); } -inline bool File::try_lock_exclusive() +inline bool File::try_rw_lock_shared() { - return lock(true, true); + return rw_lock(false, true); } -inline bool File::try_lock_shared() +inline void File::lock() { - return lock(false, true); + lock(true, false); } -inline File::MapBase::MapBase() noexcept +inline bool File::try_lock() { - m_addr = nullptr; - m_size = 0; + return lock(true, true); } inline File::MapBase::~MapBase() noexcept @@ -1400,7 +1379,6 @@ inline bool operator>=(const File::UniqueID& lhs, const File::UniqueID& rhs) return !(lhs < rhs); } -} // namespace util -} // namespace realm +} // namespace realm::util #endif // REALM_UTIL_FILE_HPP diff --git a/src/realm/util/interprocess_mutex.hpp b/src/realm/util/interprocess_mutex.hpp index 1e2f7130f13..3ce2869ff92 100644 --- a/src/realm/util/interprocess_mutex.hpp +++ b/src/realm/util/interprocess_mutex.hpp @@ -326,7 +326,7 @@ inline void InterprocessMutex::lock() { #if REALM_ROBUST_MUTEX_EMULATION std::unique_lock mutex_lock(m_lock_info->m_local_mutex); - m_lock_info->m_file.lock_exclusive(); + m_lock_info->m_file.lock(); mutex_lock.release(); #else @@ -347,7 +347,7 @@ inline bool InterprocessMutex::try_lock() if (!mutex_lock.owns_lock()) { return false; } - bool success = m_lock_info->m_file.try_lock_exclusive(); + bool success = m_lock_info->m_file.try_lock(); if (success) { mutex_lock.release(); return true; diff --git a/test/test_file_locks.cpp b/test/test_file_locks.cpp index bb4aa87d3cb..96412d269b0 100644 --- a/test/test_file_locks.cpp +++ b/test/test_file_locks.cpp @@ -128,9 +128,9 @@ TEST(File_NoSpuriousTryLockFailures) try { File file(path, File::mode_Write); for (int i = 0; i != num_rounds; ++i) { - bool good_lock = file.try_lock_exclusive(); + bool good_lock = file.try_rw_lock_exclusive(); if (good_lock) - file.unlock(); + file.rw_unlock(); { LockGuard l(mutex); if (good_lock) @@ -208,7 +208,7 @@ TEST_IF(File_NoSpuriousTryLockFailures2, !(running_with_valgrind || running_with } // All threads race for the lock - bool owns_lock = file.try_lock_exclusive(); + bool owns_lock = file.try_rw_lock_exclusive(); barrier_2 = 0; @@ -226,7 +226,7 @@ TEST_IF(File_NoSpuriousTryLockFailures2, !(running_with_valgrind || running_with CHECK_EQUAL(lock_taken.load(), size_t(1)); if(owns_lock) { - file.unlock(); + file.rw_unlock(); } barrier_1 = 0; diff --git a/test/test_shared.cpp b/test/test_shared.cpp index 2ef908cd154..d322585108a 100644 --- a/test/test_shared.cpp +++ b/test/test_shared.cpp @@ -2960,7 +2960,7 @@ TEST(Shared_LockFileInitSpinsOnZeroSize) Thread t; auto do_async = [&]() { File f(path.get_lock_path(), File::mode_Write); - f.lock_shared(); + f.rw_lock_shared(); File::UnlockGuard ug(f); CHECK(f.is_attached()); @@ -3007,7 +3007,7 @@ TEST(Shared_LockFileSpinsOnInitComplete) Thread t; auto do_async = [&]() { File f(path.get_lock_path(), File::mode_Write); - f.lock_shared(); + f.rw_lock_shared(); File::UnlockGuard ug(f); CHECK(f.is_attached()); @@ -3059,7 +3059,7 @@ TEST(Shared_LockFileOfWrongSizeThrows) auto do_async = [&]() { File f(path.get_lock_path(), File::mode_Write); f.set_fifo_path(std::string(path) + ".management", "lock.fifo"); - f.lock_shared(); + f.rw_lock_shared(); File::UnlockGuard ug(f); CHECK(f.is_attached()); @@ -3125,7 +3125,7 @@ TEST(Shared_LockFileOfWrongVersionThrows) f.open(path.get_lock_path(), File::access_ReadWrite, File::create_Auto, 0); // Throws f.set_fifo_path(std::string(path) + ".management", "lock.fifo"); - f.lock_shared(); + f.rw_lock_shared(); File::UnlockGuard ug(f); CHECK(f.is_attached()); @@ -3177,7 +3177,7 @@ TEST(Shared_LockFileOfWrongMutexSizeThrows) File f; f.open(path.get_lock_path(), File::access_ReadWrite, File::create_Auto, 0); // Throws f.set_fifo_path(std::string(path) + ".management", "lock.fifo"); - f.lock_shared(); + f.rw_lock_shared(); File::UnlockGuard ug(f); CHECK(f.is_attached()); @@ -3231,7 +3231,7 @@ TEST(Shared_LockFileOfWrongCondvarSizeThrows) File f; f.open(path.get_lock_path(), File::access_ReadWrite, File::create_Auto, 0); // Throws f.set_fifo_path(std::string(path) + ".management", "lock.fifo"); - f.lock_shared(); + f.rw_lock_shared(); File::UnlockGuard ug(f); CHECK(f.is_attached()); diff --git a/test/test_util_file.cpp b/test/test_util_file.cpp index 3e86f277649..ab2661b90c3 100644 --- a/test/test_util_file.cpp +++ b/test/test_util_file.cpp @@ -294,4 +294,18 @@ TEST(Utils_File_ForEach) } } +TEST(Utils_File_Lock) +{ + TEST_DIR(dir); + util::try_make_dir(dir); + auto file = File::resolve("test", dir); + File f1(file, File::mode_Write); + File f2(file); + CHECK(f1.try_rw_lock_exclusive()); + CHECK_NOT(f2.try_rw_lock_shared()); + f1.rw_unlock(); + CHECK(f1.try_rw_lock_shared()); + CHECK_NOT(f2.try_rw_lock_exclusive()); +} + #endif