From b1d191db308c5739c804081072e29d40b47658e6 Mon Sep 17 00:00:00 2001 From: Sean Quah Date: Fri, 11 Nov 2022 18:44:20 +0000 Subject: [PATCH 1/4] Fix broken upserts to `receipts_linearized` and `receipts_graph` As part of the database migration to support threaded receipts, there is a possible window in between `73/08thread_receipts_non_null.sql.postgres` removing the original unique constraints on `receipts_linearized` and `receipts_graph` and the `reeipts_linearized_unique_index` and `receipts_graph_unique_index` background updates from `72/08thread_receipts.sql` completing where the unique constraints on `receipts_linearized` and `receipts_graph` are missing. Any emulated upserts on these tables must therefore be performed with a lock held, otherwise duplicate rows can end up in the tables when there are concurrent emulated upserts. Note that emulated upserts no longer happen by default on sqlite, since the minimum supported version of sqlite supports native upserts by default now. Signed-off-by: Sean Quah --- synapse/storage/databases/main/receipts.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/synapse/storage/databases/main/receipts.py b/synapse/storage/databases/main/receipts.py index 64519587f8dc..787f7499c541 100644 --- a/synapse/storage/databases/main/receipts.py +++ b/synapse/storage/databases/main/receipts.py @@ -702,9 +702,6 @@ def _insert_linearized_receipt_txn( "data": json_encoder.encode(data), }, where_clause=where_clause, - # receipts_linearized has a unique constraint on - # (user_id, room_id, receipt_type), so no need to lock - lock=False, ) return rx_ts @@ -862,9 +859,6 @@ def _insert_graph_receipt_txn( "data": json_encoder.encode(data), }, where_clause=where_clause, - # receipts_graph has a unique constraint on - # (user_id, room_id, receipt_type), so no need to lock - lock=False, ) From 580a4160ecc577550df300a8a6feaff895f8a516 Mon Sep 17 00:00:00 2001 From: Sean Quah Date: Fri, 11 Nov 2022 21:29:14 +0000 Subject: [PATCH 2/4] Clean up duplicate receipts when creating unique indexes Before creating the `receipts_graph_unique_index` and `receipts_linearized_unique_index` unique indexes, we have to clean up any duplicate receipts that may have crept in due to https://github.com/matrix-org/synapse/issues/14406. Signed-off-by: Sean Quah --- synapse/storage/databases/main/receipts.py | 109 +++++++++-- tests/storage/databases/main/test_receipts.py | 169 ++++++++++++++++++ 2 files changed, 260 insertions(+), 18 deletions(-) create mode 100644 tests/storage/databases/main/test_receipts.py diff --git a/synapse/storage/databases/main/receipts.py b/synapse/storage/databases/main/receipts.py index 787f7499c541..f89adacadbe0 100644 --- a/synapse/storage/databases/main/receipts.py +++ b/synapse/storage/databases/main/receipts.py @@ -113,24 +113,6 @@ def __init__( prefilled_cache=receipts_stream_prefill, ) - self.db_pool.updates.register_background_index_update( - "receipts_linearized_unique_index", - index_name="receipts_linearized_unique_index", - table="receipts_linearized", - columns=["room_id", "receipt_type", "user_id"], - where_clause="thread_id IS NULL", - unique=True, - ) - - self.db_pool.updates.register_background_index_update( - "receipts_graph_unique_index", - index_name="receipts_graph_unique_index", - table="receipts_graph", - columns=["room_id", "receipt_type", "user_id"], - where_clause="thread_id IS NULL", - unique=True, - ) - def get_max_receipt_stream_id(self) -> int: """Get the current max stream ID for receipts stream""" return self._receipts_id_gen.get_current_token() @@ -864,6 +846,8 @@ def _insert_graph_receipt_txn( class ReceiptsBackgroundUpdateStore(SQLBaseStore): POPULATE_RECEIPT_EVENT_STREAM_ORDERING = "populate_event_stream_ordering" + RECEIPTS_LINEARIZED_UNIQUE_INDEX_UPDATE_NAME = "receipts_linearized_unique_index" + RECEIPTS_GRAPH_UNIQUE_INDEX_UPDATE_NAME = "receipts_graph_unique_index" def __init__( self, @@ -877,6 +861,14 @@ def __init__( self.POPULATE_RECEIPT_EVENT_STREAM_ORDERING, self._populate_receipt_event_stream_ordering, ) + self.db_pool.updates.register_background_update_handler( + self.RECEIPTS_LINEARIZED_UNIQUE_INDEX_UPDATE_NAME, + self._background_receipts_linearized_unique_index, + ) + self.db_pool.updates.register_background_update_handler( + self.RECEIPTS_GRAPH_UNIQUE_INDEX_UPDATE_NAME, + self._background_receipts_graph_unique_index, + ) async def _populate_receipt_event_stream_ordering( self, progress: JsonDict, batch_size: int @@ -932,6 +924,87 @@ def _populate_receipt_event_stream_ordering_txn( return batch_size + async def _background_receipts_unique_index( + self, update_name: str, index_name: str, table: str + ) -> int: + """Adds a unique index on `(room_id, receipt_type, user_id)` to the given + receipts table, for non-thread receipts. + """ + + def _receipts_unique_index_txn(txn: LoggingTransaction) -> None: + # Identify any duplicate receipts arising from + # https://github.com/matrix-org/synapse/issues/14406. + # We expect the following query to use the per-thread receipt index and take + # less than a minute. + sql = f""" + SELECT room_id, receipt_type, user_id FROM {table} + WHERE thread_id IS NULL + GROUP BY room_id, receipt_type, user_id + HAVING COUNT(*) > 1 + """ + txn.execute(sql) + duplicate_keys = cast(List[Tuple[str, str, str]], list(txn)) + + # Then remove all duplicate receipts. + # We could be clever and try to keep the latest receipt out of every set of + # duplicates, but it's far simpler to remove them all. + for room_id, receipt_type, user_id in duplicate_keys: + sql = f""" + DELETE FROM {table} + WHERE + room_id = ? AND + receipt_type = ? AND + user_id = ? AND + thread_id IS NULL + """ + txn.execute(sql, (room_id, receipt_type, user_id)) + + # Now that the duplicates are gone, we can create the index. + concurrently = ( + "CONCURRENTLY" + if isinstance(self.database_engine, PostgresEngine) + else "" + ) + sql = f""" + CREATE UNIQUE INDEX {concurrently} {index_name} + ON {table}(room_id, receipt_type, user_id) + WHERE thread_id IS NULL + """ + txn.execute(sql) + + await self.db_pool.runInteraction( + update_name, + _receipts_unique_index_txn, + ) + + await self.db_pool.updates._end_background_update(update_name) + + return 1 + + async def _background_receipts_linearized_unique_index( + self, progress: dict, batch_size: int + ) -> int: + """Adds a unique index on `(room_id, receipt_type, user_id)` to + `receipts_linearized`, for non-thread receipts. + """ + return await self._background_receipts_unique_index( + self.RECEIPTS_LINEARIZED_UNIQUE_INDEX_UPDATE_NAME, + "receipts_linearized_unique_index", + "receipts_linearized", + ) + + async def _background_receipts_graph_unique_index( + self, progress: dict, batch_size: int + ) -> int: + """Adds a unique index on `(room_id, receipt_type, user_id)` to + `receipts_graph`, for non-thread receipts. + """ + return await self._background_receipts_unique_index( + self.RECEIPTS_GRAPH_UNIQUE_INDEX_UPDATE_NAME, + "receipts_graph_unique_index", + "receipts_graph", + ) + class ReceiptsStore(ReceiptsWorkerStore, ReceiptsBackgroundUpdateStore): pass diff --git a/tests/storage/databases/main/test_receipts.py b/tests/storage/databases/main/test_receipts.py new file mode 100644 index 000000000000..c90bdee772b8 --- /dev/null +++ b/tests/storage/databases/main/test_receipts.py @@ -0,0 +1,169 @@ +# Copyright 2022 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the 'License'); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an 'AS IS' BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Any, Dict + +from twisted.test.proto_helpers import MemoryReactor + +from synapse.rest import admin +from synapse.rest.client import login, room +from synapse.server import HomeServer +from synapse.storage.database import LoggingTransaction +from synapse.util import Clock + +from tests.unittest import HomeserverTestCase + + +class ReceiptsBackgroundUpdateStoreTestCase(HomeserverTestCase): + + servlets = [ + admin.register_servlets, + room.register_servlets, + login.register_servlets, + ] + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer): + self.store = hs.get_datastores().main + self.user_id = self.register_user("foo", "pass") + self.token = self.login("foo", "pass") + self.room_id = self.helper.create_room_as(self.user_id, tok=self.token) + self.other_room_id = self.helper.create_room_as(self.user_id, tok=self.token) + + def _test_background_receipts_unique_index( + self, + update_name: str, + index_name: str, + table: str, + values: Dict[str, Any], + ): + """Test that the background update to uniqueify non-thread receipts in + the given receipts table works properly. + """ + # First, undo the background update. + def drop_receipts_unique_index(txn: LoggingTransaction) -> None: + txn.execute(f"DROP INDEX IF EXISTS {index_name}") + + self.get_success( + self.store.db_pool.runInteraction( + "drop_receipts_unique_index", + drop_receipts_unique_index, + ) + ) + + # Add duplicate receipts for `room_id`. + for _ in range(2): + self.get_success( + self.store.db_pool.simple_insert( + table, + { + "room_id": self.room_id, + "receipt_type": "m.read", + "user_id": self.user_id, + "thread_id": None, + "data": "{}", + **values, + }, + ) + ) + + # Add a unique receipt for `other_room_id`. + self.get_success( + self.store.db_pool.simple_insert( + table, + { + "room_id": self.other_room_id, + "receipt_type": "m.read", + "user_id": self.user_id, + "thread_id": None, + "data": "{}", + **values, + }, + ) + ) + + # Insert and run the background update. + self.get_success( + self.store.db_pool.simple_insert( + "background_updates", + { + "update_name": update_name, + "progress_json": "{}", + }, + ) + ) + + self.store.db_pool.updates._all_done = False + + self.wait_for_background_updates() + + # Check that the background task deleted the duplicate receipts. + res = self.get_success( + self.store.db_pool.simple_select_onecol( + table=table, + keyvalues={ + "room_id": self.room_id, + "receipt_type": "m.read", + "user_id": self.user_id, + # `simple_select_onecol` does not support NULL filters, + # so skip the filter on `thread_id`. + }, + retcol="room_id", + desc="get_receipt", + ) + ) + self.assertEqual(0, len(res)) + + # Check that the background task did not delete the unique receipts. + res = self.get_success( + self.store.db_pool.simple_select_onecol( + table=table, + keyvalues={ + "room_id": self.other_room_id, + "receipt_type": "m.read", + "user_id": self.user_id, + # `simple_select_onecol` does not support NULL filters, + # so skip the filter on `thread_id`. + }, + retcol="room_id", + desc="get_receipt", + ) + ) + self.assertEqual(1, len(res)) + + def test_background_receipts_linearized_unique_index(self): + """Test that the background update to uniqueify non-thread receipts in + `receipts_linearized` works properly. + """ + self._test_background_receipts_unique_index( + "receipts_linearized_unique_index", + "receipts_linearized_unique_index", + "receipts_linearized", + { + "stream_id": 5, + "event_id": "$some_event", + }, + ) + + def test_background_receipts_graph_unique_index(self): + """Test that the background update to uniqueify non-thread receipts in + `receipts_graph` works properly. + """ + self._test_background_receipts_unique_index( + "receipts_graph_unique_index", + "receipts_graph_unique_index", + "receipts_graph", + { + "event_ids": '["$some_event"]', + }, + ) From dfe9946fbf8bf9689a0132f7b8376e1c93710e1d Mon Sep 17 00:00:00 2001 From: Sean Quah Date: Tue, 15 Nov 2022 12:44:22 +0000 Subject: [PATCH 3/4] Add newsfile Signed-off-by: Sean Quah --- changelog.d/14453.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/14453.bugfix diff --git a/changelog.d/14453.bugfix b/changelog.d/14453.bugfix new file mode 100644 index 000000000000..4969e5450c3f --- /dev/null +++ b/changelog.d/14453.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in Synapse 1.70.0 where the background updates to add non-thread unique indexes on receipts could fail when upgrading from 1.67.0 or earlier. From e654f8c2740c7edbbcc84a1f89d945def5c95adf Mon Sep 17 00:00:00 2001 From: Sean Quah Date: Tue, 15 Nov 2022 21:22:03 +0000 Subject: [PATCH 4/4] Fix background update on postgres and keep last linearized receipt --- synapse/storage/databases/main/receipts.py | 152 ++++++++++----- tests/storage/databases/main/test_receipts.py | 176 +++++++++++------- 2 files changed, 212 insertions(+), 116 deletions(-) diff --git a/synapse/storage/databases/main/receipts.py b/synapse/storage/databases/main/receipts.py index f89adacadbe0..a580e4bddaf2 100644 --- a/synapse/storage/databases/main/receipts.py +++ b/synapse/storage/databases/main/receipts.py @@ -924,87 +924,143 @@ def _populate_receipt_event_stream_ordering_txn( return batch_size - async def _background_receipts_unique_index( - self, update_name: str, index_name: str, table: str - ) -> int: + async def _create_receipts_index(self, index_name: str, table: str) -> None: """Adds a unique index on `(room_id, receipt_type, user_id)` to the given - receipts table, for non-thread receipts. - """ + receipts table, for non-thread receipts.""" + + def _create_index(conn: LoggingDatabaseConnection) -> None: + conn.rollback() + + # we have to set autocommit, because postgres refuses to + # CREATE INDEX CONCURRENTLY without it. + if isinstance(self.database_engine, PostgresEngine): + conn.set_session(autocommit=True) + + try: + c = conn.cursor() + + # Now that the duplicates are gone, we can create the index. + concurrently = ( + "CONCURRENTLY" + if isinstance(self.database_engine, PostgresEngine) + else "" + ) + sql = f""" + CREATE UNIQUE INDEX {concurrently} {index_name} + ON {table}(room_id, receipt_type, user_id) + WHERE thread_id IS NULL + """ + c.execute(sql) + finally: + if isinstance(self.database_engine, PostgresEngine): + conn.set_session(autocommit=False) + + await self.db_pool.runWithConnection(_create_index) - def _receipts_unique_index_txn(txn: LoggingTransaction) -> None: + async def _background_receipts_linearized_unique_index( + self, progress: dict, batch_size: int + ) -> int: + """Removes duplicate receipts and adds a unique index on + `(room_id, receipt_type, user_id)` to `receipts_linearized`, for non-thread + receipts.""" + + def _remote_duplicate_receipts_txn(txn: LoggingTransaction) -> None: # Identify any duplicate receipts arising from # https://github.com/matrix-org/synapse/issues/14406. # We expect the following query to use the per-thread receipt index and take # less than a minute. - sql = f""" - SELECT room_id, receipt_type, user_id FROM {table} + sql = """ + SELECT MAX(stream_id), room_id, receipt_type, user_id + FROM receipts_linearized WHERE thread_id IS NULL GROUP BY room_id, receipt_type, user_id HAVING COUNT(*) > 1 """ txn.execute(sql) - duplicate_keys = cast(List[Tuple[str, str, str]], list(txn)) + duplicate_keys = cast(List[Tuple[int, str, str, str]], list(txn)) - # Then remove all duplicate receipts. - # We could be clever and try to keep the latest receipt out of every set of - # duplicates, but it's far simpler to remove them all. - for room_id, receipt_type, user_id in duplicate_keys: - sql = f""" - DELETE FROM {table} + # Then remove duplicate receipts, keeping the one with the highest + # `stream_id`. There should only be a single receipt with any given + # `stream_id`. + for max_stream_id, room_id, receipt_type, user_id in duplicate_keys: + sql = """ + DELETE FROM receipts_linearized WHERE room_id = ? AND receipt_type = ? AND user_id = ? AND - thread_id IS NULL + thread_id IS NULL AND + stream_id < ? """ - txn.execute(sql, (room_id, receipt_type, user_id)) - - # Now that the duplicates are gone, we can create the index. - concurrently = ( - "CONCURRENTLY" - if isinstance(self.database_engine, PostgresEngine) - else "" - ) - sql = f""" - CREATE UNIQUE INDEX {concurrently} {index_name} - ON {table}(room_id, receipt_type, user_id) - WHERE thread_id IS NULL - """ - txn.execute(sql) + txn.execute(sql, (room_id, receipt_type, user_id, max_stream_id)) await self.db_pool.runInteraction( - update_name, - _receipts_unique_index_txn, + self.RECEIPTS_LINEARIZED_UNIQUE_INDEX_UPDATE_NAME, + _remote_duplicate_receipts_txn, ) - await self.db_pool.updates._end_background_update(update_name) - - return 1 - - async def _background_receipts_linearized_unique_index( - self, progress: dict, batch_size: int - ) -> int: - """Adds a unique index on `(room_id, receipt_type, user_id)` to - `receipts_linearized`, for non-thread receipts. - """ - return await self._background_receipts_unique_index( - self.RECEIPTS_LINEARIZED_UNIQUE_INDEX_UPDATE_NAME, + await self._create_receipts_index( "receipts_linearized_unique_index", "receipts_linearized", ) + await self.db_pool.updates._end_background_update( + self.RECEIPTS_LINEARIZED_UNIQUE_INDEX_UPDATE_NAME + ) + + return 1 + async def _background_receipts_graph_unique_index( self, progress: dict, batch_size: int ) -> int: - """Adds a unique index on `(room_id, receipt_type, user_id)` to - `receipts_graph`, for non-thread receipts. - """ - return await self._background_receipts_unique_index( + """Removes duplicate receipts and adds a unique index on + `(room_id, receipt_type, user_id)` to `receipts_graph`, for non-thread + receipts.""" + + def _remote_duplicate_receipts_txn(txn: LoggingTransaction) -> None: + # Identify any duplicate receipts arising from + # https://github.com/matrix-org/synapse/issues/14406. + # We expect the following query to use the per-thread receipt index and take + # less than a minute. + sql = """ + SELECT room_id, receipt_type, user_id FROM receipts_graph + WHERE thread_id IS NULL + GROUP BY room_id, receipt_type, user_id + HAVING COUNT(*) > 1 + """ + txn.execute(sql) + duplicate_keys = cast(List[Tuple[str, str, str]], list(txn)) + + # Then remove all duplicate receipts. + # We could be clever and try to keep the latest receipt out of every set of + # duplicates, but it's far simpler to remove them all. + for room_id, receipt_type, user_id in duplicate_keys: + sql = """ + DELETE FROM receipts_graph + WHERE + room_id = ? AND + receipt_type = ? AND + user_id = ? AND + thread_id IS NULL + """ + txn.execute(sql, (room_id, receipt_type, user_id)) + + await self.db_pool.runInteraction( self.RECEIPTS_GRAPH_UNIQUE_INDEX_UPDATE_NAME, + _remote_duplicate_receipts_txn, + ) + + await self._create_receipts_index( "receipts_graph_unique_index", "receipts_graph", ) + await self.db_pool.updates._end_background_update( + self.RECEIPTS_GRAPH_UNIQUE_INDEX_UPDATE_NAME + ) + + return 1 + class ReceiptsStore(ReceiptsWorkerStore, ReceiptsBackgroundUpdateStore): pass diff --git a/tests/storage/databases/main/test_receipts.py b/tests/storage/databases/main/test_receipts.py index c90bdee772b8..c4f12d81d73a 100644 --- a/tests/storage/databases/main/test_receipts.py +++ b/tests/storage/databases/main/test_receipts.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, Dict +from typing import Any, Dict, Optional, Sequence, Tuple from twisted.test.proto_helpers import MemoryReactor @@ -45,10 +45,22 @@ def _test_background_receipts_unique_index( update_name: str, index_name: str, table: str, - values: Dict[str, Any], + receipts: Dict[Tuple[str, str, str], Sequence[Dict[str, Any]]], + expected_unique_receipts: Dict[Tuple[str, str, str], Optional[Dict[str, Any]]], ): """Test that the background update to uniqueify non-thread receipts in the given receipts table works properly. + + Args: + update_name: The name of the background update to test. + index_name: The name of the index that the background update creates. + table: The table of receipts that the background update fixes. + receipts: The test data containing duplicate receipts. + A list of receipt rows to insert, grouped by + `(room_id, receipt_type, user_id)`. + expected_unique_receipts: A dictionary of `(room_id, receipt_type, user_id)` + keys and expected receipt key-values after duplicate receipts have been + removed. """ # First, undo the background update. def drop_receipts_unique_index(txn: LoggingTransaction) -> None: @@ -61,36 +73,22 @@ def drop_receipts_unique_index(txn: LoggingTransaction) -> None: ) ) - # Add duplicate receipts for `room_id`. - for _ in range(2): - self.get_success( - self.store.db_pool.simple_insert( - table, - { - "room_id": self.room_id, - "receipt_type": "m.read", - "user_id": self.user_id, - "thread_id": None, - "data": "{}", - **values, - }, + # Populate the receipts table, including duplicates. + for (room_id, receipt_type, user_id), rows in receipts.items(): + for row in rows: + self.get_success( + self.store.db_pool.simple_insert( + table, + { + "room_id": room_id, + "receipt_type": receipt_type, + "user_id": user_id, + "thread_id": None, + "data": "{}", + **row, + }, + ) ) - ) - - # Add a unique receipt for `other_room_id`. - self.get_success( - self.store.db_pool.simple_insert( - table, - { - "room_id": self.other_room_id, - "receipt_type": "m.read", - "user_id": self.user_id, - "thread_id": None, - "data": "{}", - **values, - }, - ) - ) # Insert and run the background update. self.get_success( @@ -107,39 +105,54 @@ def drop_receipts_unique_index(txn: LoggingTransaction) -> None: self.wait_for_background_updates() - # Check that the background task deleted the duplicate receipts. - res = self.get_success( - self.store.db_pool.simple_select_onecol( - table=table, - keyvalues={ - "room_id": self.room_id, - "receipt_type": "m.read", - "user_id": self.user_id, - # `simple_select_onecol` does not support NULL filters, - # so skip the filter on `thread_id`. - }, - retcol="room_id", - desc="get_receipt", - ) - ) - self.assertEqual(0, len(res)) - - # Check that the background task did not delete the unique receipts. - res = self.get_success( - self.store.db_pool.simple_select_onecol( - table=table, - keyvalues={ - "room_id": self.other_room_id, - "receipt_type": "m.read", - "user_id": self.user_id, - # `simple_select_onecol` does not support NULL filters, - # so skip the filter on `thread_id`. - }, - retcol="room_id", - desc="get_receipt", + # Check that the remaining receipts match expectations. + for ( + room_id, + receipt_type, + user_id, + ), expected_row in expected_unique_receipts.items(): + # Include the receipt key in the returned columns, for more informative + # assertion messages. + columns = ["room_id", "receipt_type", "user_id"] + if expected_row is not None: + columns += expected_row.keys() + + rows = self.get_success( + self.store.db_pool.simple_select_list( + table=table, + keyvalues={ + "room_id": room_id, + "receipt_type": receipt_type, + "user_id": user_id, + # `simple_select_onecol` does not support NULL filters, + # so skip the filter on `thread_id`. + }, + retcols=columns, + desc="get_receipt", + ) ) - ) - self.assertEqual(1, len(res)) + + if expected_row is not None: + self.assertEqual( + len(rows), + 1, + f"Background update did not leave behind latest receipt in {table}", + ) + self.assertEqual( + rows[0], + { + "room_id": room_id, + "receipt_type": receipt_type, + "user_id": user_id, + **expected_row, + }, + ) + else: + self.assertEqual( + len(rows), + 0, + f"Background update did not remove all duplicate receipts from {table}", + ) def test_background_receipts_linearized_unique_index(self): """Test that the background update to uniqueify non-thread receipts in @@ -149,9 +162,18 @@ def test_background_receipts_linearized_unique_index(self): "receipts_linearized_unique_index", "receipts_linearized_unique_index", "receipts_linearized", - { - "stream_id": 5, - "event_id": "$some_event", + receipts={ + (self.room_id, "m.read", self.user_id): [ + {"stream_id": 5, "event_id": "$some_event"}, + {"stream_id": 6, "event_id": "$some_event"}, + ], + (self.other_room_id, "m.read", self.user_id): [ + {"stream_id": 7, "event_id": "$some_event"} + ], + }, + expected_unique_receipts={ + (self.room_id, "m.read", self.user_id): {"stream_id": 6}, + (self.other_room_id, "m.read", self.user_id): {"stream_id": 7}, }, ) @@ -163,7 +185,25 @@ def test_background_receipts_graph_unique_index(self): "receipts_graph_unique_index", "receipts_graph_unique_index", "receipts_graph", - { - "event_ids": '["$some_event"]', + receipts={ + (self.room_id, "m.read", self.user_id): [ + { + "event_ids": '["$some_event"]', + }, + { + "event_ids": '["$some_event"]', + }, + ], + (self.other_room_id, "m.read", self.user_id): [ + { + "event_ids": '["$some_event"]', + } + ], + }, + expected_unique_receipts={ + (self.room_id, "m.read", self.user_id): None, + (self.other_room_id, "m.read", self.user_id): { + "event_ids": '["$some_event"]' + }, }, )