Newer
Older
cmp::Ordering,
collections::{hash_map::Entry, BTreeMap, BTreeSet, HashMap, HashSet},
time::Duration,
utils::math::{ruma_from_u64, ruma_from_usize, usize_from_ruma, usize_from_u64_truncated},
warn, Err, PduCount,
filter::{FilterDefinition, LazyLoadOptions},
sync::sync_events::{
self,
v3::{
Ephemeral, Filter, GlobalAccountData, InviteState, InvitedRoom, JoinedRoom, LeftRoom, Presence,
RoomAccountData, RoomSummary, Rooms, State as RoomState, Timeline, ToDevice,
v4::{SlidingOp, SlidingSyncRoomHero},
DeviceLists, UnreadNotificationsCount,
},
uiaa::UiaaResponse,
},
events::{
presence::PresenceEvent,
room::member::{MembershipState, RoomMemberEventContent},
AnyRawAccountDataEvent, StateEventType, TimelineEventType,
},
room::RoomType,
serde::Raw,
state_res::Event,
uint, DeviceId, EventId, MilliSecondsSinceUnixEpoch, OwnedRoomId, OwnedUserId, RoomId, UInt, UserId,
use crate::{
service::{pdu::EventHash, Services},
utils, Error, PduEvent, Result, Ruma, RumaResponse,
};
const SINGLE_CONNECTION_SYNC: &str = "single_connection_sync";
const DEFAULT_BUMP_TYPES: &[TimelineEventType] = &[
TimelineEventType::Message,
TimelineEventType::Encrypted,
TimelineEventType::Sticker,
TimelineEventType::CallInvite,
TimelineEventType::PollStart,
TimelineEventType::Beacon,
];
macro_rules! extract_variant {
($e:expr, $variant:path) => {
match $e {
$variant(value) => Some(value),
_ => None,
}
};
}
/// # `GET /_matrix/client/r0/sync`
///
/// Synchronize the client's state with the latest state on the server.
///
/// - This endpoint takes a `since` parameter which should be the `next_batch`
/// value from a previous request for incremental syncs.
///
/// Calling this endpoint without a `since` parameter returns:
/// - Some of the most recent events of each timeline
/// - Notification counts for each room
/// - Joined and invited member counts, heroes
/// - All state events
///
/// Calling this endpoint with a `since` parameter from a previous `next_batch`
/// returns: For joined rooms:
/// - Some of the most recent events of each timeline that happened after since
/// - If user joined the room after since: All state events (unless lazy loading
/// is activated) and all device list updates in that room
/// - If the user was already in the room: A list of all events that are in the
/// state now, but were not in the state at `since`
/// - If the state we send contains a member event: Joined and invited member
/// counts, heroes
/// - Device list updates that happened after `since`
/// - If there are events in the timeline we send or the user send updated his
/// read mark: Notification counts
/// - EDUs that are active now (read receipts, typing updates, presence)
/// - TODO: Allow multiple sync streams to support Pantalaimon
/// - If the user was invited after `since`: A subset of the state of the room
/// at the point of the invite
/// - If the user left after `since`: `prev_batch` token, empty state (TODO:
/// subset of the state at the point of the leave)
State(services): State<crate::State>, body: Ruma<sync_events::v3::Request>,
) -> Result<sync_events::v3::Response, RumaResponse<UiaaResponse>> {
let sender_user = body.sender_user.expect("user is authenticated");
let sender_device = body.sender_device.expect("user is authenticated");
let body = body.body;
// Presence update
if services.globals.allow_local_presence() {
services
.presence
.ping_presence(&sender_user, &body.set_presence)?;
}
// Setup watchers, so if there's no response, we can wait for them
let watcher = services.globals.watch(&sender_user, &sender_device);
let next_batch = services.globals.current_count()?;
let next_batchcount = PduCount::Normal(next_batch);
let next_batch_string = next_batch.to_string();
// Load filter
let filter = match body.filter {
None => FilterDefinition::default(),
Some(Filter::FilterDefinition(filter)) => filter,
Some(Filter::FilterId(filter_id)) => services
.users
.get_filter(&sender_user, &filter_id)?
.unwrap_or_default(),
// some clients, at least element, seem to require knowledge of redundant
// members for "inline" profiles on the timeline to work properly
let (lazy_load_enabled, lazy_load_send_redundant) = match filter.room.state.lazy_load_options {
LazyLoadOptions::Enabled {
include_redundant_members,
} => (true, include_redundant_members),
LazyLoadOptions::Disabled => (false, cfg!(feature = "element_hacks")),
};
let full_state = body.full_state;
let mut joined_rooms = BTreeMap::new();
let since = body
.since
.as_ref()
.and_then(|string| string.parse().ok())
.unwrap_or(0);
let sincecount = PduCount::Normal(since);
let mut presence_updates = HashMap::new();
let mut left_encrypted_users = HashSet::new(); // Users that have left any encrypted rooms the sender was in
let mut device_list_updates = HashSet::new();
let mut device_list_left = HashSet::new();
// Look for device list updates of this account
.users
.keys_changed(sender_user.as_ref(), since, None)
.filter_map(Result::ok),
);
process_presence_updates(&services, &mut presence_updates, since, &sender_user).await?;
.rooms
.state_cache
.rooms_joined(&sender_user)
.collect::<Vec<_>>();
// Coalesce database writes for the remainder of this scope.
for room_id in all_joined_rooms {
let room_id = room_id?;
if let Ok(joined_room) = load_joined_room(
&sender_user,
&sender_device,
&room_id,
since,
sincecount,
next_batch,
next_batchcount,
lazy_load_enabled,
lazy_load_send_redundant,
full_state,
&mut device_list_updates,
&mut left_encrypted_users,
)
.await
{
if !joined_room.is_empty() {
joined_rooms.insert(room_id.clone(), joined_room);
}
}
}
.rooms
.state_cache
.rooms_left(&sender_user)
.collect();
since,
&result?.0,
&sender_user,
&mut left_rooms,
&next_batch_string,
full_state,
lazy_load_enabled,
)
.instrument(Span::current())
.await?;
}
let mut invited_rooms = BTreeMap::new();
.rooms
.state_cache
.rooms_invited(&sender_user)
.collect();
for result in all_invited_rooms {
let (room_id, invite_state_events) = result?;
// Get and drop the lock to wait for remaining operations to finish
let insert_lock = services.rooms.timeline.mutex_insert.lock(&room_id).await;
.rooms
.state_cache
.get_invite_count(&room_id, &sender_user)?;
// Invited before last sync
if Some(since) >= invite_count {
continue;
}
invited_rooms.insert(
room_id.clone(),
InvitedRoom {
invite_state: InviteState {
events: invite_state_events,
},
},
);
}
for user_id in left_encrypted_users {
.rooms
.user
.get_shared_rooms(vec![sender_user.clone(), user_id.clone()])?
.filter_map(Result::ok)
.rooms
.state_accessor
.room_state_get(&other_room_id, &StateEventType::RoomEncryption, "")
.ok()?
.is_some(),
)
})
.all(|encrypted| !encrypted);
// If the user doesn't share an encrypted room with the target anymore, we need
// to tell them
if dont_share_encrypted_room {
device_list_left.insert(user_id);
}
}
// Remove all to-device events the device received *last time*
.users
.remove_to_device_events(&sender_user, &sender_device, since)?;
let response = sync_events::v3::Response {
next_batch: next_batch_string,
rooms: Rooms {
leave: left_rooms,
join: joined_rooms,
invite: invited_rooms,
knock: BTreeMap::new(), // TODO
},
presence: Presence {
events: presence_updates
.into_values()
.map(|v| Raw::new(&v).expect("PresenceEvent always serializes successfully"))
.collect(),
},
account_data: GlobalAccountData {
.account_data
.changes_since(None, &sender_user, since)?
.into_iter()
.filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Global))
.collect(),
},
device_lists: DeviceLists {
changed: device_list_updates.into_iter().collect(),
left: device_list_left.into_iter().collect(),
},
},
// Fallback keys are not yet supported
device_unused_fallback_key_types: None,
};
// TODO: Retry the endpoint instead of returning
if !full_state
&& response.rooms.is_empty()
&& response.presence.is_empty()
&& response.account_data.is_empty()
&& response.device_lists.is_empty()
&& response.to_device.is_empty()
{
// Hang a few seconds so requests are not spammed
// Stop hanging if new info arrives
let default = Duration::from_secs(30);
let duration = cmp::min(body.timeout.unwrap_or(default), default);
_ = tokio::time::timeout(duration, watcher).await;
#[tracing::instrument(skip_all, fields(user_id = %sender_user, room_id = %room_id), name = "left_room")]
services: &Services, since: u64, room_id: &RoomId, sender_user: &UserId,
left_rooms: &mut BTreeMap<OwnedRoomId, LeftRoom>, next_batch_string: &str, full_state: bool,
// Get and drop the lock to wait for remaining operations to finish
let insert_lock = services.rooms.timeline.mutex_insert.lock(room_id).await;
.rooms
.state_cache
.get_left_count(room_id, sender_user)?;
// Left before last sync
if Some(since) >= left_count {
return Ok(());
}
if !services.rooms.metadata.exists(room_id)? {
// This is just a rejected invite, not a room we know
// Insert a leave event anyways
let event = PduEvent {
event_id: EventId::new(services.globals.server_name()).into(),
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
sender: sender_user.to_owned(),
origin: None,
origin_server_ts: utils::millis_since_unix_epoch()
.try_into()
.expect("Timestamp is valid js_int value"),
kind: TimelineEventType::RoomMember,
content: serde_json::from_str(r#"{"membership":"leave"}"#).expect("this is valid JSON"),
state_key: Some(sender_user.to_string()),
unsigned: None,
// The following keys are dropped on conversion
room_id: room_id.to_owned(),
prev_events: vec![],
depth: uint!(1),
auth_events: vec![],
redacts: None,
hashes: EventHash {
sha256: String::new(),
},
signatures: None,
};
left_rooms.insert(
room_id.to_owned(),
LeftRoom {
account_data: RoomAccountData {
events: Vec::new(),
},
timeline: Timeline {
limited: false,
prev_batch: Some(next_batch_string.to_owned()),
events: Vec::new(),
},
events: vec![event.to_sync_state_event()],
},
},
);
return Ok(());
}
let mut left_state_events = Vec::new();
.rooms
.user
.get_token_shortstatehash(room_id, since)?;
let since_state_ids = match since_shortstatehash {
Some(s) => services.rooms.state_accessor.state_full_ids(s).await?,
None => HashMap::new(),
};
let Some(left_event_id) =
services
.rooms
.state_accessor
.room_state_get_id(room_id, &StateEventType::RoomMember, sender_user.as_str())?
else {
error!("Left room but no left state event");
return Ok(());
};
.rooms
.state_accessor
.pdu_shortstatehash(&left_event_id)?
else {
error!(event_id = %left_event_id, "Leave event has no state");
return Ok(());
};
.rooms
.state_accessor
.state_full_ids(left_shortstatehash)
.await?;
.rooms
.short
.get_or_create_shortstatekey(&StateEventType::RoomMember, sender_user.as_str())?;
left_state_ids.insert(leave_shortstatekey, left_event_id);
let mut i: u8 = 0;
for (key, id) in left_state_ids {
if full_state || since_state_ids.get(&key) != Some(&id) {
let (event_type, state_key) = services.rooms.short.get_statekey_from_short(key)?;
if !lazy_load_enabled
|| event_type != StateEventType::RoomMember
|| full_state
// TODO: Delete the following line when this is resolved: https://github.com/vector-im/element-web/issues/22565
|| (cfg!(feature = "element_hacks") && *sender_user == state_key)
{
let Some(pdu) = services.rooms.timeline.get_pdu(&id)? else {
error!("Pdu in state not found: {}", id);
continue;
};
left_state_events.push(pdu.to_sync_state_event());
if i % 100 == 0 {
tokio::task::yield_now().await;
}
}
}
}
left_rooms.insert(
room_id.to_owned(),
LeftRoom {
account_data: RoomAccountData {
events: Vec::new(),
},
timeline: Timeline {
limited: false,
prev_batch: Some(next_batch_string.to_owned()),
events: Vec::new(),
},
events: left_state_events,
},
},
);
Ok(())
}
services: &Services, presence_updates: &mut HashMap<OwnedUserId, PresenceEvent>, since: u64, syncing_user: &UserId,
for (user_id, _, presence_bytes) in services.presence.presence_since(since) {
if !services
.rooms
.state_cache
.user_sees_user(syncing_user, &user_id)?
{
continue;
}
let presence_event = services
.presence
.from_json_bytes_to_event(&presence_bytes, &user_id)?;
match presence_updates.entry(user_id) {
Entry::Vacant(slot) => {
slot.insert(presence_event);
},
Entry::Occupied(mut slot) => {
let curr_event = slot.get_mut();
let curr_content = &mut curr_event.content;
let new_content = presence_event.content;
// Update existing presence event with more info
curr_content.presence = new_content.presence;
curr_content.status_msg = new_content
.status_msg
.or_else(|| curr_content.status_msg.take());
curr_content.last_active_ago = new_content.last_active_ago.or(curr_content.last_active_ago);
curr_content.displayname = new_content
.displayname
.or_else(|| curr_content.displayname.take());
curr_content.avatar_url = new_content
.avatar_url
.or_else(|| curr_content.avatar_url.take());
curr_content.currently_active = new_content
.currently_active
.or(curr_content.currently_active);
#[allow(clippy::too_many_arguments)]
services: &Services, sender_user: &UserId, sender_device: &DeviceId, room_id: &RoomId, since: u64,
sincecount: PduCount, next_batch: u64, next_batchcount: PduCount, lazy_load_enabled: bool,
lazy_load_send_redundant: bool, full_state: bool, device_list_updates: &mut HashSet<OwnedUserId>,
left_encrypted_users: &mut HashSet<OwnedUserId>,
// Get and drop the lock to wait for remaining operations to finish
// This will make sure the we have all events until next_batch
let insert_lock = services.rooms.timeline.mutex_insert.lock(room_id).await;
let (timeline_pdus, limited) = load_timeline(services, sender_user, room_id, sincecount, 10)?;
.rooms
.user
.last_notification_read(sender_user, room_id)?
> since;
let mut timeline_users = HashSet::new();
for (_, event) in &timeline_pdus {
timeline_users.insert(event.sender.as_str().to_owned());
}
.rooms
.lazy_loading
.lazy_load_confirm_delivery(sender_user, sender_device, room_id, sincecount)
.await?;
let Some(current_shortstatehash) = services.rooms.state.get_room_shortstatehash(room_id)? else {
return Err!(Database(error!("Room {room_id} has no state")));
let (heroes, joined_member_count, invited_member_count, joined_since_last_sync, state_events) =
if timeline_pdus.is_empty() && since_shortstatehash == Some(current_shortstatehash) {
// No state changes
(Vec::new(), None, None, false, Vec::new())
} else {
// Calculates joined_member_count, invited_member_count and heroes
let calculate_counts = || {
.rooms
.state_cache
.room_joined_count(room_id)?
.unwrap_or(0);
.rooms
.state_cache
.room_invited_count(room_id)?
.unwrap_or(0);
let mut heroes: Vec<OwnedUserId> = Vec::with_capacity(5);
if joined_member_count.saturating_add(invited_member_count) <= 5 {
// Go through all PDUs and for each member event, check if the user is still
// joined or invited until we have 5 or we reach the end
.rooms
.timeline
.all_pdus(sender_user, room_id)?
.filter_map(Result::ok) // Ignore all broken pdus
.filter(|(_, pdu)| pdu.kind == TimelineEventType::RoomMember)
.map(|(_, pdu)| {
let content: RoomMemberEventContent = serde_json::from_str(pdu.content.get())
.map_err(|_| Error::bad_database("Invalid member event in database."))?;
if let Some(state_key) = &pdu.state_key {
let user_id = UserId::parse(state_key.clone())
.map_err(|_| Error::bad_database("Invalid UserId in member PDU."))?;
// The membership was and still is invite or join
if matches!(content.membership, MembershipState::Join | MembershipState::Invite)
&& (services.rooms.state_cache.is_joined(&user_id, room_id)?
|| services.rooms.state_cache.is_invited(&user_id, room_id)?)
} else {
Ok(None)
}
} else {
Ok(None)
}
})
.filter_map(Result::ok)
// Filter for possible heroes
.flatten()
if heroes.contains(&hero) || hero == sender_user {
Ok::<_, Error>((Some(joined_member_count), Some(invited_member_count), heroes))
};
let since_sender_member: Option<RoomMemberEventContent> = since_shortstatehash
.and_then(|shortstatehash| {
.rooms
.state_accessor
.state_get(shortstatehash, &StateEventType::RoomMember, sender_user.as_str())
.transpose()
})
.transpose()?
.and_then(|pdu| {
serde_json::from_str(pdu.content.get())
.map_err(|_| Error::bad_database("Invalid PDU in database."))
.ok()
});
let joined_since_last_sync =
since_sender_member.map_or(true, |member| member.membership != MembershipState::Join);
if since_shortstatehash.is_none() || joined_since_last_sync {
// Probably since = 0, we will do an initial sync
let (joined_member_count, invited_member_count, heroes) = calculate_counts()?;
.rooms
.state_accessor
.state_full_ids(current_shortstatehash)
.await?;
let mut state_events = Vec::new();
let mut lazy_loaded = HashSet::new();
let mut i: u8 = 0;
let Some(pdu) = services.rooms.timeline.get_pdu(&id)? else {
error!("Pdu in state not found: {}", id);
continue;
if i % 100 == 0 {
tokio::task::yield_now().await;
}
} else if !lazy_load_enabled
|| full_state
|| timeline_users.contains(&state_key)
// TODO: Delete the following line when this is resolved: https://github.com/vector-im/element-web/issues/22565
|| (cfg!(feature = "element_hacks") && *sender_user == state_key)
let Some(pdu) = services.rooms.timeline.get_pdu(&id)? else {
error!("Pdu in state not found: {}", id);
continue;
// This check is in case a bad user ID made it into the database
if let Ok(uid) = UserId::parse(&state_key) {
lazy_loaded.insert(uid);
}
state_events.push(pdu);
.rooms
.lazy_loading
.lazy_load_reset(sender_user, sender_device, room_id)?;
// The state_events above should contain all timeline_users, let's mark them as
// lazy loaded.
.rooms
.lazy_loading
.lazy_load_mark_sent(sender_user, sender_device, room_id, lazy_loaded, next_batchcount)
.await;
(heroes, joined_member_count, invited_member_count, true, state_events)
} else {
// Incremental /sync
let since_shortstatehash = since_shortstatehash.unwrap();
let mut delta_state_events = Vec::new();
.rooms
.state_accessor
.state_full_ids(current_shortstatehash)
.await?;
.rooms
.state_accessor
.state_full_ids(since_shortstatehash)
.await?;
for (key, id) in current_state_ids {
if full_state || since_state_ids.get(&key) != Some(&id) {
let Some(pdu) = services.rooms.timeline.get_pdu(&id)? else {
error!("Pdu in state not found: {}", id);
continue;
delta_state_events.push(pdu);
.rooms
.state_accessor
.state_get(current_shortstatehash, &StateEventType::RoomEncryption, "")?
.is_some();
let since_encryption = services.rooms.state_accessor.state_get(
since_shortstatehash,
&StateEventType::RoomEncryption,
"",
)?;
// Calculations:
let new_encrypted_room = encrypted_room && since_encryption.is_none();
let send_member_count = delta_state_events
.iter()
.any(|event| event.kind == TimelineEventType::RoomMember);
for state_event in &delta_state_events {
if let Some(state_key) = &state_event.state_key {
let user_id = UserId::parse(state_key.clone())
.map_err(|_| Error::bad_database("Invalid UserId in member PDU."))?;
if user_id == sender_user {
continue;
}
let new_membership =
serde_json::from_str::<RoomMemberEventContent>(state_event.content.get())
.map_err(|_| Error::bad_database("Invalid PDU in database."))?
.membership;
match new_membership {
MembershipState::Join => {
// A new user joined an encrypted room
if !share_encrypted_room(services, sender_user, &user_id, room_id)? {
device_list_updates.insert(user_id);
}
},
MembershipState::Leave => {
// Write down users that have left encrypted rooms we are in
left_encrypted_users.insert(user_id);
},
_ => {},
}
if joined_since_last_sync && encrypted_room || new_encrypted_room {
// If the user is in a new encrypted room, give them all joined users
device_list_updates.extend(
.rooms
.state_cache
.room_members(room_id)
.flatten()
.filter(|user_id| {
// Don't send key updates from the sender to the sender
sender_user != user_id
})
.filter(|user_id| {
// Only send keys if the sender doesn't share an encrypted room with the target
// already
!share_encrypted_room(services, sender_user, user_id, room_id).unwrap_or(false)
let (joined_member_count, invited_member_count, heroes) = if send_member_count {
calculate_counts()?
} else {
(None, None, Vec::new())
};
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
let mut state_events = delta_state_events;
let mut lazy_loaded = HashSet::new();
// Mark all member events we're returning as lazy-loaded
for pdu in &state_events {
if pdu.kind == TimelineEventType::RoomMember {
match UserId::parse(
pdu.state_key
.as_ref()
.expect("State event has state key")
.clone(),
) {
Ok(state_key_userid) => {
lazy_loaded.insert(state_key_userid);
},
Err(e) => error!("Invalid state key for member event: {}", e),
}
}
}
// Fetch contextual member state events for events from the timeline, and
// mark them as lazy-loaded as well.
for (_, event) in &timeline_pdus {
if lazy_loaded.contains(&event.sender) {
continue;
}
if !services.rooms.lazy_loading.lazy_load_was_sent_before(
sender_user,
sender_device,
room_id,
&event.sender,
)? || lazy_load_send_redundant
{
if let Some(member_event) = services.rooms.state_accessor.room_state_get(
room_id,
&StateEventType::RoomMember,
event.sender.as_str(),
)? {
lazy_loaded.insert(event.sender.clone());
state_events.push(member_event);
}
}
}
.rooms
.lazy_loading
.lazy_load_mark_sent(sender_user, sender_device, room_id, lazy_loaded, next_batchcount)
.await;
(
heroes,
joined_member_count,
invited_member_count,
joined_since_last_sync,
state_events,
)
}
};
// Look for device list updates in this room
.users
.keys_changed(room_id.as_ref(), since, None)
.filter_map(Result::ok),
);
let notification_count = if send_notification_counts {
Some(
.rooms
.user
.notification_count(sender_user, room_id)?
.try_into()
.expect("notification count can't go that high"),
)
} else {
None
};
let highlight_count = if send_notification_counts {
Some(
.rooms
.user
.highlight_count(sender_user, room_id)?
.try_into()
.expect("highlight count can't go that high"),
)
} else {
None
};
let prev_batch = timeline_pdus
.first()
.map_or(Ok::<_, Error>(None), |(pdu_count, _)| {
Ok(Some(match pdu_count {
PduCount::Backfilled(_) => {
error!("timeline in backfill state?!");
"0".to_owned()
},
PduCount::Normal(c) => c.to_string(),
}))
})?;
let room_events: Vec<_> = timeline_pdus
.iter()
.map(|(_, pdu)| pdu.to_sync_room_event())
.collect();
.rooms
.read_receipt
.readreceipts_since(room_id, since)
.filter_map(Result::ok) // Filter out buggy events
if services.rooms.typing.last_typing_update(room_id).await? > since {
&serde_json::to_string(&services.rooms.typing.typings_all(room_id).await?)
.expect("event is valid, we just created it"),
)
.expect("event is valid, we just created it"),
);
}
// Save the state after this sync so we can send the correct state diff next
// sync
.rooms
.user
.associate_token_shortstatehash(room_id, next_batch, current_shortstatehash)?;
Ok(JoinedRoom {
account_data: RoomAccountData {
.account_data
.changes_since(Some(room_id), sender_user, since)?
.into_iter()
.filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Room))
.collect(),
},
summary: RoomSummary {
heroes,
joined_member_count: joined_member_count.map(ruma_from_u64),
invited_member_count: invited_member_count.map(ruma_from_u64),
},
unread_notifications: UnreadNotificationsCount {
highlight_count,
notification_count,
},
timeline: Timeline {
limited: limited || joined_since_last_sync,
prev_batch,
events: room_events,
},
events: state_events
.iter()
.map(|pdu| pdu.to_sync_state_event())
.collect(),
},
ephemeral: Ephemeral {
events: edus,
},
unread_thread_notifications: BTreeMap::new(),
})
services: &Services, sender_user: &UserId, room_id: &RoomId, roomsincecount: PduCount, limit: u64,
) -> Result<(Vec<(PduCount, PduEvent)>, bool), Error> {
.rooms
.timeline
.last_timeline_count(sender_user, room_id)?
> roomsincecount
{
.rooms
.timeline
.pdus_until(sender_user, room_id, PduCount::max())?
.filter_map(|r| {
// Filter out buggy events
if r.is_err() {
error!("Bad pdu in pdus_since: {:?}", r);
}
r.ok()
})
.take_while(|(pducount, _)| pducount > &roomsincecount);
// Take the last events for the timeline
.collect::<Vec<_>>()
.into_iter()
.rev()
.collect::<Vec<_>>();
// They /sync response doesn't always return all messages, so we say the output
// is limited unless there are events in non_timeline_pdus
fn share_encrypted_room(
services: &Services, sender_user: &UserId, user_id: &UserId, ignore_room: &RoomId,
) -> Result<bool> {
Ok(services
.rooms
.user
.get_shared_rooms(vec![sender_user.to_owned(), user_id.to_owned()])?
.filter_map(Result::ok)
.filter(|room_id| room_id != ignore_room)
.filter_map(|other_room_id| {
Some(
.rooms
.state_accessor
.room_state_get(&other_room_id, &StateEventType::RoomEncryption, "")
.ok()?
.is_some(),
)
})
.any(|encrypted| encrypted))
/// POST `/_matrix/client/unstable/org.matrix.msc3575/sync`
///
/// Sliding Sync endpoint (future endpoint: `/_matrix/client/v4/sync`)
pub(crate) async fn sync_events_v4_route(
State(services): State<crate::State>, body: Ruma<sync_events::v4::Request>,
) -> Result<sync_events::v4::Response> {
let sender_user = body.sender_user.expect("user is authenticated");
let sender_device = body.sender_device.expect("user is authenticated");
let mut body = body.body;
// Setup watchers, so if there's no response, we can wait for them
let watcher = services.globals.watch(&sender_user, &sender_device);
let next_batch = services.globals.next_count()?;
let conn_id = body
.conn_id
.clone()
.unwrap_or_else(|| SINGLE_CONNECTION_SYNC.to_owned());
let globalsince = body
.pos
.as_ref()
.and_then(|string| string.parse().ok())
.unwrap_or(0);
if globalsince != 0
&& !services
.users
.remembered(sender_user.clone(), sender_device.clone(), conn_id.clone())
{
debug!("Restarting sync stream because it was gone from the database");
return Err(Error::Request(
ErrorKind::UnknownPos,
"Connection data lost since last time".into(),
http::StatusCode::BAD_REQUEST,
));
}
services
.users
.forget_sync_request_connection(sender_user.clone(), sender_device.clone(), conn_id.clone());
}
// Get sticky parameters from cache
let known_rooms =
.users
.update_sync_request_with_cache(sender_user.clone(), sender_device.clone(), &mut body);
.rooms
.state_cache
.rooms_joined(&sender_user)
.filter_map(Result::ok)
.collect::<Vec<_>>();
let all_invited_rooms = services
.rooms
.state_cache
.rooms_invited(&sender_user)
.filter_map(Result::ok)
.map(|r| r.0)
.collect::<Vec<_>>();
let all_rooms = all_joined_rooms
.iter()
.cloned()
.chain(all_invited_rooms.clone())
if body.extensions.to_device.enabled.unwrap_or(false) {
.users
.remove_to_device_events(&sender_user, &sender_device, globalsince)?;
}
let mut left_encrypted_users = HashSet::new(); // Users that have left any encrypted rooms the sender was in
let mut device_list_changes = HashSet::new();
let mut device_list_left = HashSet::new();
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
let mut account_data = sync_events::v4::AccountData {
global: Vec::new(),
rooms: BTreeMap::new(),
};
if body.extensions.account_data.enabled.unwrap_or(false) {
account_data.global = services
.account_data
.changes_since(None, &sender_user, globalsince)?
.into_iter()
.filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Global))
.collect();
if let Some(rooms) = body.extensions.account_data.rooms {
for room in rooms {
account_data.rooms.insert(
room.clone(),
services
.account_data
.changes_since(Some(&room), &sender_user, globalsince)?
.into_iter()
.filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Room))
.collect(),
);
}
}
}
if body.extensions.e2ee.enabled.unwrap_or(false) {
// Look for device list updates of this account
.users
.keys_changed(sender_user.as_ref(), globalsince, None)
.filter_map(Result::ok),
);
let Some(current_shortstatehash) = services.rooms.state.get_room_shortstatehash(room_id)? else {
error!("Room {} has no state", room_id);
continue;
};
.rooms
.user
.get_token_shortstatehash(room_id, globalsince)?;
let since_sender_member: Option<RoomMemberEventContent> = since_shortstatehash
.and_then(|shortstatehash| {
.rooms
.state_accessor
.state_get(shortstatehash, &StateEventType::RoomMember, sender_user.as_str())
.transpose()
})
.transpose()?
.and_then(|pdu| {
serde_json::from_str(pdu.content.get())
.map_err(|_| Error::bad_database("Invalid PDU in database."))
.ok()
});
.rooms
.state_accessor
.state_get(current_shortstatehash, &StateEventType::RoomEncryption, "")?
.is_some();
if let Some(since_shortstatehash) = since_shortstatehash {
// Skip if there are only timeline changes
if since_shortstatehash == current_shortstatehash {
continue;
}
let since_encryption = services.rooms.state_accessor.state_get(
since_shortstatehash,
&StateEventType::RoomEncryption,
"",
)?;
let joined_since_last_sync =
since_sender_member.map_or(true, |member| member.membership != MembershipState::Join);
let new_encrypted_room = encrypted_room && since_encryption.is_none();
if encrypted_room {
.rooms
.state_accessor
.state_full_ids(current_shortstatehash)
.await?;
.rooms
.state_accessor
.state_full_ids(since_shortstatehash)
.await?;
for (key, id) in current_state_ids {
if since_state_ids.get(&key) != Some(&id) {
let Some(pdu) = services.rooms.timeline.get_pdu(&id)? else {
error!("Pdu in state not found: {}", id);
continue;
};
if pdu.kind == TimelineEventType::RoomMember {
if let Some(state_key) = &pdu.state_key {
let user_id = UserId::parse(state_key.clone())
.map_err(|_| Error::bad_database("Invalid UserId in member PDU."))?;
if user_id == sender_user {
continue;
}
let new_membership =
serde_json::from_str::<RoomMemberEventContent>(pdu.content.get())
.map_err(|_| Error::bad_database("Invalid PDU in database."))?
.membership;
match new_membership {
MembershipState::Join => {
// A new user joined an encrypted room
if !share_encrypted_room(&services, &sender_user, &user_id, room_id)? {
device_list_changes.insert(user_id);
}
},
MembershipState::Leave => {
// Write down users that have left encrypted rooms we are in
left_encrypted_users.insert(user_id);
},
_ => {},
}
}
}
}
}
if joined_since_last_sync || new_encrypted_room {
// If the user is in a new encrypted room, give them all joined users
device_list_changes.extend(
.rooms
.state_cache
.room_members(room_id)
.flatten()
.filter(|user_id| {
// Don't send key updates from the sender to the sender
&sender_user != user_id
})
.filter(|user_id| {
// Only send keys if the sender doesn't share an encrypted room with the target
// already
!share_encrypted_room(&services, &sender_user, user_id, room_id).unwrap_or(false)
}),
);
}
}
}
// Look for device list updates in this room
.users
.keys_changed(room_id.as_ref(), globalsince, None)
.filter_map(Result::ok),
);
}
for user_id in left_encrypted_users {
.rooms
.user
.get_shared_rooms(vec![sender_user.clone(), user_id.clone()])?
.filter_map(Result::ok)
.rooms
.state_accessor
.room_state_get(&other_room_id, &StateEventType::RoomEncryption, "")
.ok()?
.is_some(),
)
})
.all(|encrypted| !encrypted);
// If the user doesn't share an encrypted room with the target anymore, we need
// to tell them
if dont_share_encrypted_room {
device_list_left.insert(user_id);
}
}
}
let mut lists = BTreeMap::new();
let mut todo_rooms = BTreeMap::new(); // and required state
for (list_id, list) in body.lists {
let active_rooms = match list.filters.clone().and_then(|f| f.is_invite) {
Some(true) => &all_invited_rooms,
Some(false) => &all_joined_rooms,
None => &all_rooms,
};
let active_rooms = match list.filters.clone().map(|f| f.not_room_types) {
Some(filter) if filter.is_empty() => active_rooms.clone(),
Some(value) => filter_rooms(active_rooms, State(services), &value, true),
None => active_rooms.clone(),
};
let active_rooms = match list.filters.clone().map(|f| f.room_types) {
Some(filter) if filter.is_empty() => active_rooms.clone(),
Some(value) => filter_rooms(&active_rooms, State(services), &value, false),
None => active_rooms,
};
let mut new_known_rooms = BTreeSet::new();
lists.insert(
list_id.clone(),
sync_events::v4::SyncList {
ops: list
.ranges
.into_iter()
.map(|mut r| {
r.0 = r.0.clamp(
uint!(0),
UInt::try_from(active_rooms.len().saturating_sub(1)).unwrap_or(UInt::MAX),
r.1 =
r.1.clamp(r.0, UInt::try_from(active_rooms.len().saturating_sub(1)).unwrap_or(UInt::MAX));
let room_ids = if !active_rooms.is_empty() {
active_rooms[usize_from_ruma(r.0)..=usize_from_ruma(r.1)].to_vec()
} else {
Vec::new()
};
new_known_rooms.extend(room_ids.iter().cloned());
for room_id in &room_ids {
let todo_room = todo_rooms
.entry(room_id.clone())
.or_insert((BTreeSet::new(), 0, u64::MAX));
let limit = list
.room_details
.timeline_limit
.map_or(10, u64::from)
.min(100);
todo_room
.0
.extend(list.room_details.required_state.iter().cloned());
todo_room.1 = todo_room.1.max(limit);
// 0 means unknown because it got out of date
todo_room.2 = todo_room.2.min(
known_rooms
.get(&list_id)
.and_then(|k| k.get(room_id))
.copied()
.unwrap_or(0),
);
}
sync_events::v4::SyncOp {
op: SlidingOp::Sync,
range: Some(r),
index: None,
room_ids,
room_id: None,
}
})
.collect(),
count: ruma_from_usize(active_rooms.len()),
},
);
if let Some(conn_id) = &body.conn_id {
sender_user.clone(),
sender_device.clone(),
conn_id.clone(),
list_id,
new_known_rooms,
globalsince,
);
}
}
let mut known_subscription_rooms = BTreeSet::new();
for (room_id, room) in &body.room_subscriptions {
if !services.rooms.metadata.exists(room_id)? {
let todo_room = todo_rooms
.entry(room_id.clone())
.or_insert((BTreeSet::new(), 0, u64::MAX));
let limit = room.timeline_limit.map_or(10, u64::from).min(100);
todo_room.0.extend(room.required_state.iter().cloned());
todo_room.1 = todo_room.1.max(limit);
// 0 means unknown because it got out of date
todo_room.2 = todo_room.2.min(
known_rooms
.get("subscriptions")
.and_then(|k| k.get(room_id))
.copied()
.unwrap_or(0),
);
known_subscription_rooms.insert(room_id.clone());
}
for r in body.unsubscribe_rooms {
known_subscription_rooms.remove(&r);
body.room_subscriptions.remove(&r);
}
if let Some(conn_id) = &body.conn_id {
sender_user.clone(),
sender_device.clone(),
conn_id.clone(),
"subscriptions".to_owned(),
known_subscription_rooms,
globalsince,
);
}
if let Some(conn_id) = &body.conn_id {
sender_user.clone(),
sender_device.clone(),
conn_id.clone(),
body.room_subscriptions,
);
}
let mut rooms = BTreeMap::new();
for (room_id, (required_state_request, timeline_limit, roomsince)) in &todo_rooms {
let roomsincecount = PduCount::Normal(*roomsince);
let (timeline_pdus, limited) =
load_timeline(&services, &sender_user, room_id, roomsincecount, *timeline_limit)?;
account_data.rooms.insert(
room_id.clone(),
services
.account_data
.changes_since(Some(room_id), &sender_user, *roomsince)?
.into_iter()
.filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Room))
.collect(),
);
if roomsince != &0 && timeline_pdus.is_empty() && account_data.rooms.get(room_id).is_some_and(Vec::is_empty) {
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
continue;
}
let prev_batch = timeline_pdus
.first()
.map_or(Ok::<_, Error>(None), |(pdu_count, _)| {
Ok(Some(match pdu_count {
PduCount::Backfilled(_) => {
error!("timeline in backfill state?!");
"0".to_owned()
},
PduCount::Normal(c) => c.to_string(),
}))
})?
.or_else(|| {
if roomsince != &0 {
Some(roomsince.to_string())
} else {
None
}
});
let room_events: Vec<_> = timeline_pdus
.iter()
.map(|(_, pdu)| pdu.to_sync_room_event())
.collect();
let invite_state = if all_invited_rooms.contains(room_id) {
services
.rooms
.state_cache
.invite_state(&sender_user, room_id)
.unwrap_or(None)
} else {
None
};
let mut timestamp: Option<_> = None;
for (_, pdu) in timeline_pdus {
timestamp = Some(MilliSecondsSinceUnixEpoch(pdu.origin_server_ts));
if DEFAULT_BUMP_TYPES.contains(pdu.event_type()) {
break;
}
}
let required_state = required_state_request
.iter()
.rooms
.state_accessor
.room_state_get(room_id, &state.0, &state.1)
})
.filter_map(Result::ok)
.flatten()
.map(|state| state.to_sync_state_event())
.collect();
// Heroes
.rooms
.state_cache
.room_members(room_id)
.filter_map(Result::ok)
.filter(|member| member != &sender_user)
.map(|member| {
Ok::<_, Error>(
.map(|memberevent| SlidingSyncRoomHero {
user_id: member,
name: memberevent.displayname,
avatar: memberevent.avatar_url,
.filter_map(Result::ok)
.flatten()
.take(5)
.collect::<Vec<_>>();
let name = match heroes.len().cmp(&(1_usize)) {
Ordering::Greater => {
let firsts = heroes[1..]
.iter()
.map(|h| h.name.clone().unwrap_or_else(|| h.user_id.to_string()))
.collect::<Vec<_>>()
.join(", ");
let last = heroes[0]
.name
.clone()
.unwrap_or_else(|| heroes[0].user_id.to_string());
Some(format!("{firsts} and {last}"))
Ordering::Equal => Some(
heroes[0]
.name
.clone()
.unwrap_or_else(|| heroes[0].user_id.to_string()),
),
Ordering::Less => None,
};
let heroes_avatar = if heroes.len() == 1 {
} else {
None
};
rooms.insert(
room_id.clone(),
sync_events::v4::SlidingSyncRoom {
name: services.rooms.state_accessor.get_name(room_id)?.or(name),
avatar: if let Some(heroes_avatar) = heroes_avatar {
ruma::JsOption::Some(heroes_avatar)
} else {
match services.rooms.state_accessor.get_avatar(room_id)? {
ruma::JsOption::Some(avatar) => ruma::JsOption::from_option(avatar.url),
ruma::JsOption::Null => ruma::JsOption::Null,
ruma::JsOption::Undefined => ruma::JsOption::Undefined,
}
},
initial: Some(roomsince == &0),
is_dm: None,
unread_notifications: UnreadNotificationsCount {
highlight_count: Some(
.rooms
.user
.highlight_count(&sender_user, room_id)?
.try_into()
.expect("notification count can't go that high"),
),
notification_count: Some(
.rooms
.user
.notification_count(&sender_user, room_id)?
.try_into()
.expect("notification count can't go that high"),
),
},
timeline: room_events,
required_state,
prev_batch,
limited,
joined_count: Some(
.unwrap_or(0)
.try_into()
.unwrap_or_else(|_| uint!(0)),
.unwrap_or(0)
.try_into()
.unwrap_or_else(|_| uint!(0)),
),
num_live: None, // Count events in timeline greater than global sync counter
timestamp,
if rooms
.iter()
.all(|(_, r)| r.timeline.is_empty() && r.required_state.is_empty())
{
// Hang a few seconds so requests are not spammed
// Stop hanging if new info arrives
let default = Duration::from_secs(30);
let duration = cmp::min(body.timeout.unwrap_or(default), default);
_ = tokio::time::timeout(duration, watcher).await;
}
Ok(sync_events::v4::Response {
initial: globalsince == 0,
txn_id: body.txn_id.clone(),
pos: next_batch.to_string(),
lists,
rooms,
extensions: sync_events::v4::Extensions {
to_device: if body.extensions.to_device.enabled.unwrap_or(false) {
Some(sync_events::v4::ToDevice {
.users
.get_to_device_events(&sender_user, &sender_device)?,
next_batch: next_batch.to_string(),
})
} else {
None
},
e2ee: sync_events::v4::E2EE {
device_lists: DeviceLists {
changed: device_list_changes.into_iter().collect(),
left: device_list_left.into_iter().collect(),
},
.users
.count_one_time_keys(&sender_user, &sender_device)?,
// Fallback keys are not yet supported
device_unused_fallback_key_types: None,
},
receipts: sync_events::v4::Receipts {
rooms: BTreeMap::new(),
},
typing: sync_events::v4::Typing {
rooms: BTreeMap::new(),
},
},
delta_token: None,
})
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
fn filter_rooms(
rooms: &[OwnedRoomId], State(services): State<crate::State>, filter: &[Option<RoomType>], negate: bool,
) -> Vec<OwnedRoomId> {
return rooms
.iter()
.filter(|r| {
match services.rooms.state_accessor.get_room_type(r) {
Err(e) => {
warn!("Requested room type for {}, but could not retrieve with error {}", r, e);
false
},
Ok(None) => {
// For rooms which do not have a room type, use 'null' to include them
if negate {
!filter.contains(&None)
} else {
filter.contains(&None)
}
},
Ok(Some(room_type)) => {
if negate {
!filter.contains(&Some(room_type))
} else {
filter.is_empty() || filter.contains(&Some(room_type))
}
},
}
})
.cloned()
.collect();
}