Skip to content
Snippets Groups Projects
Unverified Commit 921a3f8a authored by Erik Johnston's avatar Erik Johnston Committed by GitHub
Browse files

Fix not sending events over federation when using sharded event persisters (#8536)

* Fix outbound federaion with multiple event persisters.

We incorrectly notified federation senders that the minimum persisted
stream position had advanced when we got an `RDATA` from an event
persister.

Notifying of federation senders already correctly happens in the
notifier, so we just delete the offending line.

* Change some interfaces to use RoomStreamToken.

By enforcing use of `RoomStreamTokens` we make it less likely that
people pass in random ints that they got from somewhere random.
parent 3ee97a27
No related branches found
No related tags found
No related merge requests found
Fix not sending events over federation when using sharded event writers.
......@@ -790,10 +790,6 @@ class FederationSenderHandler:
send_queue.process_rows_for_federation(self.federation_sender, rows)
await self.update_token(token)
# We also need to poke the federation sender when new events happen
elif stream_name == "events":
self.federation_sender.notify_new_events(token)
# ... and when new receipts happen
elif stream_name == ReceiptsStream.NAME:
await self._on_new_receipts(rows)
......
......@@ -188,7 +188,7 @@ class FederationRemoteSendQueue:
for key in keys[:i]:
del self.edus[key]
def notify_new_events(self, current_id):
def notify_new_events(self, max_token):
"""As per FederationSender"""
# We don't need to replicate this as it gets sent down a different
# stream.
......
......@@ -40,7 +40,7 @@ from synapse.metrics import (
events_processed_counter,
)
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.types import ReadReceipt
from synapse.types import ReadReceipt, RoomStreamToken
from synapse.util.metrics import Measure, measure_func
logger = logging.getLogger(__name__)
......@@ -154,10 +154,15 @@ class FederationSender:
self._per_destination_queues[destination] = queue
return queue
def notify_new_events(self, current_id: int) -> None:
def notify_new_events(self, max_token: RoomStreamToken) -> None:
"""This gets called when we have some new events we might want to
send out to other servers.
"""
# We just use the minimum stream ordering and ignore the vector clock
# component. This is safe to do as long as we *always* ignore the vector
# clock components.
current_id = max_token.stream
self._last_poked_id = max(current_id, self._last_poked_id)
if self._is_processing:
......
......@@ -27,6 +27,7 @@ from synapse.metrics import (
event_processing_loop_room_count,
)
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.types import RoomStreamToken
from synapse.util.metrics import Measure
logger = logging.getLogger(__name__)
......@@ -47,15 +48,17 @@ class ApplicationServicesHandler:
self.current_max = 0
self.is_processing = False
async def notify_interested_services(self, current_id):
async def notify_interested_services(self, max_token: RoomStreamToken):
"""Notifies (pushes) all application services interested in this event.
Pushing is done asynchronously, so this method won't block for any
prolonged length of time.
Args:
current_id(int): The current maximum ID.
"""
# We just use the minimum stream ordering and ignore the vector clock
# component. This is safe to do as long as we *always* ignore the vector
# clock components.
current_id = max_token.stream
services = self.store.get_app_services()
if not services or not self.notify_appservices:
return
......
......@@ -319,19 +319,19 @@ class Notifier:
)
if self.federation_sender:
self.federation_sender.notify_new_events(max_room_stream_token.stream)
self.federation_sender.notify_new_events(max_room_stream_token)
async def _notify_app_services(self, max_room_stream_token: RoomStreamToken):
try:
await self.appservice_handler.notify_interested_services(
max_room_stream_token.stream
max_room_stream_token
)
except Exception:
logger.exception("Error notifying application services of event")
async def _notify_pusher_pool(self, max_room_stream_token: RoomStreamToken):
try:
await self._pusher_pool.on_new_notifications(max_room_stream_token.stream)
await self._pusher_pool.on_new_notifications(max_room_stream_token)
except Exception:
logger.exception("Error pusher pool of event")
......
......@@ -18,6 +18,7 @@ import logging
from twisted.internet.error import AlreadyCalled, AlreadyCancelled
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.types import RoomStreamToken
logger = logging.getLogger(__name__)
......@@ -91,7 +92,12 @@ class EmailPusher:
pass
self.timed_call = None
def on_new_notifications(self, max_stream_ordering):
def on_new_notifications(self, max_token: RoomStreamToken):
# We just use the minimum stream ordering and ignore the vector clock
# component. This is safe to do as long as we *always* ignore the vector
# clock components.
max_stream_ordering = max_token.stream
if self.max_stream_ordering:
self.max_stream_ordering = max(
max_stream_ordering, self.max_stream_ordering
......
......@@ -23,6 +23,7 @@ from synapse.api.constants import EventTypes
from synapse.logging import opentracing
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.push import PusherConfigException
from synapse.types import RoomStreamToken
from . import push_rule_evaluator, push_tools
......@@ -114,7 +115,12 @@ class HttpPusher:
if should_check_for_notifs:
self._start_processing()
def on_new_notifications(self, max_stream_ordering):
def on_new_notifications(self, max_token: RoomStreamToken):
# We just use the minimum stream ordering and ignore the vector clock
# component. This is safe to do as long as we *always* ignore the vector
# clock components.
max_stream_ordering = max_token.stream
self.max_stream_ordering = max(
max_stream_ordering, self.max_stream_ordering or 0
)
......
......@@ -24,6 +24,7 @@ from synapse.push import PusherConfigException
from synapse.push.emailpusher import EmailPusher
from synapse.push.httppusher import HttpPusher
from synapse.push.pusher import PusherFactory
from synapse.types import RoomStreamToken
from synapse.util.async_helpers import concurrently_execute
if TYPE_CHECKING:
......@@ -186,11 +187,16 @@ class PusherPool:
)
await self.remove_pusher(p["app_id"], p["pushkey"], p["user_name"])
async def on_new_notifications(self, max_stream_id: int):
async def on_new_notifications(self, max_token: RoomStreamToken):
if not self.pushers:
# nothing to do here.
return
# We just use the minimum stream ordering and ignore the vector clock
# component. This is safe to do as long as we *always* ignore the vector
# clock components.
max_stream_id = max_token.stream
if max_stream_id < self._last_room_stream_id_seen:
# Nothing to do
return
......@@ -214,7 +220,7 @@ class PusherPool:
if u in self.pushers:
for p in self.pushers[u].values():
p.on_new_notifications(max_stream_id)
p.on_new_notifications(max_token)
except Exception:
logger.exception("Exception in pusher on_new_notifications")
......
......@@ -18,6 +18,7 @@ from mock import Mock
from twisted.internet import defer
from synapse.handlers.appservice import ApplicationServicesHandler
from synapse.types import RoomStreamToken
from tests.test_utils import make_awaitable
from tests.utils import MockClock
......@@ -61,7 +62,9 @@ class AppServiceHandlerTestCase(unittest.TestCase):
defer.succeed((0, [event])),
defer.succeed((0, [])),
]
yield defer.ensureDeferred(self.handler.notify_interested_services(0))
yield defer.ensureDeferred(
self.handler.notify_interested_services(RoomStreamToken(None, 0))
)
self.mock_scheduler.submit_event_for_as.assert_called_once_with(
interested_service, event
)
......@@ -80,7 +83,9 @@ class AppServiceHandlerTestCase(unittest.TestCase):
defer.succeed((0, [event])),
defer.succeed((0, [])),
]
yield defer.ensureDeferred(self.handler.notify_interested_services(0))
yield defer.ensureDeferred(
self.handler.notify_interested_services(RoomStreamToken(None, 0))
)
self.mock_as_api.query_user.assert_called_once_with(services[0], user_id)
@defer.inlineCallbacks
......@@ -97,7 +102,9 @@ class AppServiceHandlerTestCase(unittest.TestCase):
defer.succeed((0, [event])),
defer.succeed((0, [])),
]
yield defer.ensureDeferred(self.handler.notify_interested_services(0))
yield defer.ensureDeferred(
self.handler.notify_interested_services(RoomStreamToken(None, 0))
)
self.assertFalse(
self.mock_as_api.query_user.called,
"query_user called when it shouldn't have been.",
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment