Skip to content

Commit

Permalink
Merge remote-tracking branch 'upstream/develop' into websocket
Browse files Browse the repository at this point in the history
  • Loading branch information
krombel committed Mar 6, 2018
2 parents 0721d44 + 8ffaacb commit cfb001b
Show file tree
Hide file tree
Showing 53 changed files with 2,159 additions and 1,922 deletions.
11 changes: 9 additions & 2 deletions docs/admin_api/purge_history_api.rst
Original file line number Diff line number Diff line change
Expand Up @@ -8,9 +8,9 @@ Depending on the amount of history being purged a call to the API may take
several minutes or longer. During this period users will not be able to
paginate further back in the room from the point being purged from.

The API is simply:
The API is:

``POST /_matrix/client/r0/admin/purge_history/<room_id>/<event_id>``
``POST /_matrix/client/r0/admin/purge_history/<room_id>[/<event_id>]``

including an ``access_token`` of a server admin.

Expand All @@ -25,3 +25,10 @@ To delete local events as well, set ``delete_local_events`` in the body:
{
"delete_local_events": true
}
The caller must specify the point in the room to purge up to. This can be
specified by including an event_id in the URI, or by setting a
``purge_up_to_event_id`` or ``purge_up_to_ts`` in the request body. If an event
id is given, that event (and others at the same graph depth) will be retained.
If ``purge_up_to_ts`` is given, it should be a timestamp since the unix epoch,
in milliseconds.
8 changes: 8 additions & 0 deletions synapse/app/event_creator.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,10 +27,14 @@
from synapse.http.site import SynapseSite
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
from synapse.replication.slave.storage._base import BaseSlavedStore
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
from synapse.replication.slave.storage.devices import SlavedDeviceStore
from synapse.replication.slave.storage.events import SlavedEventStore
from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
from synapse.replication.slave.storage.pushers import SlavedPusherStore
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
from synapse.replication.slave.storage.room import RoomStore
from synapse.replication.tcp.client import ReplicationClientHandler
Expand All @@ -48,6 +52,10 @@


class EventCreatorSlavedStore(
SlavedAccountDataStore,
SlavedPusherStore,
SlavedReceiptsStore,
SlavedPushRuleStore,
SlavedDeviceStore,
SlavedClientIpStore,
SlavedApplicationServiceStore,
Expand Down
5 changes: 0 additions & 5 deletions synapse/app/pusher.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,6 @@
from synapse.server import HomeServer
from synapse.storage import DataStore
from synapse.storage.engines import create_engine
from synapse.storage.roommember import RoomMemberStore
from synapse.util.httpresourcetree import create_resource_tree
from synapse.util.logcontext import LoggingContext, preserve_fn
from synapse.util.manhole import manhole
Expand Down Expand Up @@ -75,10 +74,6 @@ class PusherSlaveStore(
DataStore.get_profile_displayname.__func__
)

who_forgot_in_room = (
RoomMemberStore.__dict__["who_forgot_in_room"]
)


class PusherServer(HomeServer):
def setup(self):
Expand Down
8 changes: 2 additions & 6 deletions synapse/app/synchrotron.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,8 +62,6 @@


class SynchrotronSlavedStore(
SlavedPushRuleStore,
SlavedEventStore,
SlavedReceiptsStore,
SlavedAccountDataStore,
SlavedApplicationServiceStore,
Expand All @@ -73,14 +71,12 @@ class SynchrotronSlavedStore(
SlavedGroupServerStore,
SlavedDeviceInboxStore,
SlavedDeviceStore,
SlavedPushRuleStore,
SlavedEventStore,
SlavedClientIpStore,
RoomStore,
BaseSlavedStore,
):
who_forgot_in_room = (
RoomMemberStore.__dict__["who_forgot_in_room"]
)

did_forget = (
RoomMemberStore.__dict__["did_forget"]
)
Expand Down
2 changes: 0 additions & 2 deletions synapse/handlers/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@
from .room import (
RoomCreationHandler, RoomContextHandler,
)
from .room_member import RoomMemberHandler
from .message import MessageHandler
from .federation import FederationHandler
from .directory import DirectoryHandler
Expand Down Expand Up @@ -49,7 +48,6 @@ def __init__(self, hs):
self.registration_handler = RegistrationHandler(hs)
self.message_handler = MessageHandler(hs)
self.room_creation_handler = RoomCreationHandler(hs)
self.room_member_handler = RoomMemberHandler(hs)
self.federation_handler = FederationHandler(hs)
self.directory_handler = DirectoryHandler(hs)
self.admin_handler = AdminHandler(hs)
Expand Down
2 changes: 1 addition & 1 deletion synapse/handlers/_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -158,7 +158,7 @@ def kick_guest_users(self, current_state):
# homeserver.
requester = synapse.types.create_requester(
target_user, is_guest=True)
handler = self.hs.get_handlers().room_member_handler
handler = self.hs.get_room_member_handler()
yield handler.update_membership(
requester,
target_user,
Expand Down
6 changes: 4 additions & 2 deletions synapse/handlers/auth.py
Original file line number Diff line number Diff line change
Expand Up @@ -863,8 +863,10 @@ def validate_hash(self, password, stored_hash):
"""

def _do_validate_hash():
return bcrypt.hashpw(password.encode('utf8') + self.hs.config.password_pepper,
stored_hash.encode('utf8')) == stored_hash
return bcrypt.checkpw(
password.encode('utf8') + self.hs.config.password_pepper,
stored_hash.encode('utf8')
)

if stored_hash:
return make_deferred_yieldable(threads.deferToThread(_do_validate_hash))
Expand Down
30 changes: 19 additions & 11 deletions synapse/handlers/federation.py
Original file line number Diff line number Diff line change
Expand Up @@ -1447,16 +1447,24 @@ def _handle_new_event(self, origin, event, state=None, auth_events=None,
auth_events=auth_events,
)

if not event.internal_metadata.is_outlier() and not backfilled:
yield self.action_generator.handle_push_actions_for_event(
event, context
)
try:
if not event.internal_metadata.is_outlier() and not backfilled:
yield self.action_generator.handle_push_actions_for_event(
event, context
)

event_stream_id, max_stream_id = yield self.store.persist_event(
event,
context=context,
backfilled=backfilled,
)
event_stream_id, max_stream_id = yield self.store.persist_event(
event,
context=context,
backfilled=backfilled,
)
except: # noqa: E722, as we reraise the exception this is fine.
# Ensure that we actually remove the entries in the push actions
# staging area
logcontext.preserve_fn(
self.store.remove_push_actions_from_staging
)(event.event_id)
raise

if not backfilled:
# this intentionally does not yield: we don't care about the result
Expand Down Expand Up @@ -2145,7 +2153,7 @@ def exchange_third_party_invite(
raise e

yield self._check_signature(event, context)
member_handler = self.hs.get_handlers().room_member_handler
member_handler = self.hs.get_room_member_handler()
yield member_handler.send_membership_event(None, event, context)
else:
destinations = set(x.split(":", 1)[-1] for x in (sender_user_id, room_id))
Expand Down Expand Up @@ -2189,7 +2197,7 @@ def on_exchange_third_party_invite_request(self, origin, room_id, event_dict):
# TODO: Make sure the signatures actually are correct.
event.signatures.update(returned_invite.signatures)

member_handler = self.hs.get_handlers().room_member_handler
member_handler = self.hs.get_room_member_handler()
yield member_handler.send_membership_event(None, event, context)

@defer.inlineCallbacks
Expand Down
108 changes: 71 additions & 37 deletions synapse/handlers/message.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,16 +52,12 @@ def __init__(self, hs):
self.pagination_lock = ReadWriteLock()

@defer.inlineCallbacks
def purge_history(self, room_id, event_id, delete_local_events=False):
event = yield self.store.get_event(event_id)

if event.room_id != room_id:
raise SynapseError(400, "Event is for wrong room.")

depth = event.depth

def purge_history(self, room_id, topological_ordering,
delete_local_events=False):
with (yield self.pagination_lock.write(room_id)):
yield self.store.purge_history(room_id, depth, delete_local_events)
yield self.store.purge_history(
room_id, topological_ordering, delete_local_events,
)

@defer.inlineCallbacks
def get_messages(self, requester, room_id=None, pagin_config=None,
Expand Down Expand Up @@ -553,24 +549,21 @@ def handle_new_client_event(
event,
context,
ratelimit=True,
extra_users=[]
extra_users=[],
):
# We now need to go and hit out to wherever we need to hit out to.

# If we're a worker we need to hit out to the master.
if self.config.worker_app:
yield send_event_to_master(
self.http_client,
host=self.config.worker_replication_host,
port=self.config.worker_replication_http_port,
requester=requester,
event=event,
context=context,
)
return
"""Processes a new event. This includes checking auth, persisting it,
notifying users, sending to remote servers, etc.
if ratelimit:
yield self.base_handler.ratelimit(requester)
If called from a worker will hit out to the master process for final
processing.
Args:
requester (Requester)
event (FrozenEvent)
context (EventContext)
ratelimit (bool)
extra_users (list(str)): Any extra users to notify about event
"""

try:
yield self.auth.check_from_context(event, context)
Expand All @@ -586,6 +579,57 @@ def handle_new_client_event(
logger.exception("Failed to encode content: %r", event.content)
raise

yield self.action_generator.handle_push_actions_for_event(
event, context
)

try:
# If we're a worker we need to hit out to the master.
if self.config.worker_app:
yield send_event_to_master(
self.http_client,
host=self.config.worker_replication_host,
port=self.config.worker_replication_http_port,
requester=requester,
event=event,
context=context,
ratelimit=ratelimit,
extra_users=extra_users,
)
return

yield self.persist_and_notify_client_event(
requester,
event,
context,
ratelimit=ratelimit,
extra_users=extra_users,
)
except: # noqa: E722, as we reraise the exception this is fine.
# Ensure that we actually remove the entries in the push actions
# staging area, if we calculated them.
preserve_fn(self.store.remove_push_actions_from_staging)(event.event_id)
raise

@defer.inlineCallbacks
def persist_and_notify_client_event(
self,
requester,
event,
context,
ratelimit=True,
extra_users=[],
):
"""Called when we have fully built the event, have already
calculated the push actions for the event, and checked auth.
This should only be run on master.
"""
assert not self.config.worker_app

if ratelimit:
yield self.base_handler.ratelimit(requester)

yield self.base_handler.maybe_kick_guest_users(event, context)

if event.type == EventTypes.CanonicalAlias:
Expand Down Expand Up @@ -679,20 +723,10 @@ def is_inviter_member_event(e):
"Changing the room create event is forbidden",
)

yield self.action_generator.handle_push_actions_for_event(
event, context
(event_stream_id, max_stream_id) = yield self.store.persist_event(
event, context=context
)

try:
(event_stream_id, max_stream_id) = yield self.store.persist_event(
event, context=context
)
except: # noqa: E722, as we reraise the exception this is fine.
# Ensure that we actually remove the entries in the push actions
# staging area
preserve_fn(self.store.remove_push_actions_from_staging)(event.event_id)
raise

# this intentionally does not yield: we don't care about the result
# and don't need to wait for it.
preserve_fn(self.pusher_pool.on_new_notifications)(
Expand Down
2 changes: 1 addition & 1 deletion synapse/handlers/profile.py
Original file line number Diff line number Diff line change
Expand Up @@ -233,7 +233,7 @@ def _update_join_states(self, requester, target_user):
)

for room_id in room_ids:
handler = self.hs.get_handlers().room_member_handler
handler = self.hs.get_room_member_handler()
try:
# Assume the target_user isn't a guest,
# because we don't let guests set profile or avatar data.
Expand Down
6 changes: 3 additions & 3 deletions synapse/handlers/read_marker.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,9 +41,9 @@ def received_client_read_marker(self, room_id, user_id, event_id):
"""

with (yield self.read_marker_linearizer.queue((room_id, user_id))):
account_data = yield self.store.get_account_data_for_room(user_id, room_id)

existing_read_marker = account_data.get("m.fully_read", None)
existing_read_marker = yield self.store.get_account_data_for_room_and_type(
user_id, room_id, "m.fully_read",
)

should_update = True

Expand Down
13 changes: 5 additions & 8 deletions synapse/handlers/room.py
Original file line number Diff line number Diff line change
Expand Up @@ -165,7 +165,7 @@ def create_room(self, requester, config, ratelimit=True):

creation_content = config.get("creation_content", {})

room_member_handler = self.hs.get_handlers().room_member_handler
room_member_handler = self.hs.get_room_member_handler()

yield self._send_events_for_new_room(
requester,
Expand Down Expand Up @@ -224,7 +224,7 @@ def create_room(self, requester, config, ratelimit=True):
id_server = invite_3pid["id_server"]
address = invite_3pid["address"]
medium = invite_3pid["medium"]
yield self.hs.get_handlers().room_member_handler.do_3pid_invite(
yield self.hs.get_room_member_handler().do_3pid_invite(
room_id,
requester.user,
medium,
Expand Down Expand Up @@ -475,12 +475,9 @@ def get_new_events(
user.to_string()
)
if app_service:
events, end_key = yield self.store.get_appservice_room_stream(
service=app_service,
from_key=from_key,
to_key=to_key,
limit=limit,
)
# We no longer support AS users using /sync directly.
# See https://github.com/matrix-org/matrix-doc/issues/1144
raise NotImplementedError()
else:
room_events = yield self.store.get_membership_changes_for_user(
user.to_string(), from_key, to_key
Expand Down
Loading

0 comments on commit cfb001b

Please sign in to comment.