Newer
Older
cmp::Ordering,
collections::{hash_map::Entry, BTreeMap, BTreeSet, HashMap, HashSet},
time::Duration,
use conduit::{
error,
utils::math::{ruma_from_u64, ruma_from_usize, usize_from_ruma, usize_from_u64_truncated},
PduCount,
};
api::client::{
filter::{FilterDefinition, LazyLoadOptions},
sync::sync_events::{
self,
v3::{
Ephemeral, Filter, GlobalAccountData, InviteState, InvitedRoom, JoinedRoom, LeftRoom, Presence,
RoomAccountData, RoomSummary, Rooms, State, Timeline, ToDevice,
},
v4::SlidingOp,
DeviceLists, UnreadNotificationsCount,
},
uiaa::UiaaResponse,
},
events::{
presence::PresenceEvent,
room::member::{MembershipState, RoomMemberEventContent},
StateEventType, TimelineEventType,
},
serde::Raw,
uint, DeviceId, EventId, OwnedUserId, RoomId, UInt, UserId,
use crate::{service::pdu::EventHash, services, utils, Error, PduEvent, Result, Ruma, RumaResponse};
/// # `GET /_matrix/client/r0/sync`
///
/// Synchronize the client's state with the latest state on the server.
///
/// - This endpoint takes a `since` parameter which should be the `next_batch`
/// value from a previous request for incremental syncs.
///
/// Calling this endpoint without a `since` parameter returns:
/// - Some of the most recent events of each timeline
/// - Notification counts for each room
/// - Joined and invited member counts, heroes
/// - All state events
///
/// Calling this endpoint with a `since` parameter from a previous `next_batch`
/// returns: For joined rooms:
/// - Some of the most recent events of each timeline that happened after since
/// - If user joined the room after since: All state events (unless lazy loading
/// is activated) and all device list updates in that room
/// - If the user was already in the room: A list of all events that are in the
/// state now, but were not in the state at `since`
/// - If the state we send contains a member event: Joined and invited member
/// counts, heroes
/// - Device list updates that happened after `since`
/// - If there are events in the timeline we send or the user send updated his
/// read mark: Notification counts
/// - EDUs that are active now (read receipts, typing updates, presence)
/// - TODO: Allow multiple sync streams to support Pantalaimon
/// - If the user was invited after `since`: A subset of the state of the room
/// at the point of the invite
/// - If the user left after `since`: `prev_batch` token, empty state (TODO:
/// subset of the state at the point of the leave)
) -> Result<sync_events::v3::Response, RumaResponse<UiaaResponse>> {
let sender_user = body.sender_user.expect("user is authenticated");
let sender_device = body.sender_device.expect("user is authenticated");
let body = body.body;
// Presence update
if services().globals.allow_local_presence() {
services()
.presence
.ping_presence(&sender_user, &body.set_presence)?;
}
// Setup watchers, so if there's no response, we can wait for them
let watcher = services().globals.watch(&sender_user, &sender_device);
let next_batch = services().globals.current_count()?;
let next_batchcount = PduCount::Normal(next_batch);
let next_batch_string = next_batch.to_string();
// Load filter
let filter = match body.filter {
None => FilterDefinition::default(),
Some(Filter::FilterDefinition(filter)) => filter,
Some(Filter::FilterId(filter_id)) => services()
.users
.get_filter(&sender_user, &filter_id)?
.unwrap_or_default(),
};
let (lazy_load_enabled, lazy_load_send_redundant) = match filter.room.state.lazy_load_options {
LazyLoadOptions::Enabled {
include_redundant_members: redundant,
} => (true, redundant),
LazyLoadOptions::Disabled => (false, false),
};
let full_state = body.full_state;
let mut joined_rooms = BTreeMap::new();
let since = body
.since
.as_ref()
.and_then(|string| string.parse().ok())
.unwrap_or(0);
let sincecount = PduCount::Normal(since);
let mut presence_updates = HashMap::new();
let mut left_encrypted_users = HashSet::new(); // Users that have left any encrypted rooms the sender was in
let mut device_list_updates = HashSet::new();
let mut device_list_left = HashSet::new();
// Look for device list updates of this account
device_list_updates.extend(
services()
.users
.keys_changed(sender_user.as_ref(), since, None)
.filter_map(Result::ok),
);
if services().globals.allow_local_presence() {
process_presence_updates(&mut presence_updates, since, &sender_user).await?;
}
let all_joined_rooms = services()
.rooms
.state_cache
.rooms_joined(&sender_user)
.collect::<Vec<_>>();
// Coalesce database writes for the remainder of this scope.
let _cork = services().db.cork_and_flush();
for room_id in all_joined_rooms {
let room_id = room_id?;
if let Ok(joined_room) = load_joined_room(
&sender_user,
&sender_device,
&room_id,
since,
sincecount,
next_batch,
next_batchcount,
lazy_load_enabled,
lazy_load_send_redundant,
full_state,
&mut device_list_updates,
&mut left_encrypted_users,
)
.await
{
if !joined_room.is_empty() {
joined_rooms.insert(room_id.clone(), joined_room);
}
}
}
let mut left_rooms = BTreeMap::new();
let all_left_rooms: Vec<_> = services()
.rooms
.state_cache
.rooms_left(&sender_user)
.collect();
handle_left_room(
since,
&result?.0,
&sender_user,
&mut left_rooms,
&next_batch_string,
full_state,
lazy_load_enabled,
)
.instrument(Span::current())
.await?;
}
let mut invited_rooms = BTreeMap::new();
let all_invited_rooms: Vec<_> = services()
.rooms
.state_cache
.rooms_invited(&sender_user)
.collect();
for result in all_invited_rooms {
let (room_id, invite_state_events) = result?;
// Get and drop the lock to wait for remaining operations to finish
let insert_lock = services().rooms.timeline.mutex_insert.lock(&room_id).await;
let invite_count = services()
.rooms
.state_cache
.get_invite_count(&room_id, &sender_user)?;
// Invited before last sync
if Some(since) >= invite_count {
continue;
}
invited_rooms.insert(
room_id.clone(),
InvitedRoom {
invite_state: InviteState {
events: invite_state_events,
},
},
);
}
for user_id in left_encrypted_users {
let dont_share_encrypted_room = services()
.rooms
.user
.get_shared_rooms(vec![sender_user.clone(), user_id.clone()])?
.filter_map(Result::ok)
.filter_map(|other_room_id| {
Some(
services()
.rooms
.state_accessor
.room_state_get(&other_room_id, &StateEventType::RoomEncryption, "")
.ok()?
.is_some(),
)
})
.all(|encrypted| !encrypted);
// If the user doesn't share an encrypted room with the target anymore, we need
// to tell them
if dont_share_encrypted_room {
device_list_left.insert(user_id);
}
}
// Remove all to-device events the device received *last time*
services()
.users
.remove_to_device_events(&sender_user, &sender_device, since)?;
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
let response = sync_events::v3::Response {
next_batch: next_batch_string,
rooms: Rooms {
leave: left_rooms,
join: joined_rooms,
invite: invited_rooms,
knock: BTreeMap::new(), // TODO
},
presence: Presence {
events: presence_updates
.into_values()
.map(|v| Raw::new(&v).expect("PresenceEvent always serializes successfully"))
.collect(),
},
account_data: GlobalAccountData {
events: services()
.account_data
.changes_since(None, &sender_user, since)?
.into_iter()
.filter_map(|(_, v)| {
serde_json::from_str(v.json().get())
.map_err(|_| Error::bad_database("Invalid account event in database."))
.ok()
})
.collect(),
},
device_lists: DeviceLists {
changed: device_list_updates.into_iter().collect(),
left: device_list_left.into_iter().collect(),
},
device_one_time_keys_count: services()
.users
.count_one_time_keys(&sender_user, &sender_device)?,
events: services()
.users
.get_to_device_events(&sender_user, &sender_device)?,
},
// Fallback keys are not yet supported
device_unused_fallback_key_types: None,
};
// TODO: Retry the endpoint instead of returning
if !full_state
&& response.rooms.is_empty()
&& response.presence.is_empty()
&& response.account_data.is_empty()
&& response.device_lists.is_empty()
&& response.to_device.is_empty()
{
// Hang a few seconds so requests are not spammed
// Stop hanging if new info arrives
let default = Duration::from_secs(30);
let duration = cmp::min(body.timeout.unwrap_or(default), default);
_ = tokio::time::timeout(duration, watcher).await;
#[tracing::instrument(skip_all, fields(user_id = %sender_user, room_id = %room_id), name = "left_room")]
async fn handle_left_room(
since: u64, room_id: &RoomId, sender_user: &UserId, left_rooms: &mut BTreeMap<ruma::OwnedRoomId, LeftRoom>,
next_batch_string: &str, full_state: bool, lazy_load_enabled: bool,
) -> Result<()> {
// Get and drop the lock to wait for remaining operations to finish
let insert_lock = services().rooms.timeline.mutex_insert.lock(room_id).await;
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
let left_count = services()
.rooms
.state_cache
.get_left_count(room_id, sender_user)?;
// Left before last sync
if Some(since) >= left_count {
return Ok(());
}
if !services().rooms.metadata.exists(room_id)? {
// This is just a rejected invite, not a room we know
// Insert a leave event anyways
let event = PduEvent {
event_id: EventId::new(services().globals.server_name()).into(),
sender: sender_user.to_owned(),
origin: None,
origin_server_ts: utils::millis_since_unix_epoch()
.try_into()
.expect("Timestamp is valid js_int value"),
kind: TimelineEventType::RoomMember,
content: serde_json::from_str(r#"{"membership":"leave"}"#).expect("this is valid JSON"),
state_key: Some(sender_user.to_string()),
unsigned: None,
// The following keys are dropped on conversion
room_id: room_id.to_owned(),
prev_events: vec![],
depth: uint!(1),
auth_events: vec![],
redacts: None,
hashes: EventHash {
sha256: String::new(),
},
signatures: None,
};
left_rooms.insert(
room_id.to_owned(),
LeftRoom {
account_data: RoomAccountData {
events: Vec::new(),
},
timeline: Timeline {
limited: false,
prev_batch: Some(next_batch_string.to_owned()),
events: Vec::new(),
},
state: State {
events: vec![event.to_sync_state_event()],
},
},
);
return Ok(());
}
let mut left_state_events = Vec::new();
let since_shortstatehash = services()
.rooms
.user
.get_token_shortstatehash(room_id, since)?;
let since_state_ids = match since_shortstatehash {
Some(s) => services().rooms.state_accessor.state_full_ids(s).await?,
None => HashMap::new(),
};
let Some(left_event_id) = services().rooms.state_accessor.room_state_get_id(
room_id,
&StateEventType::RoomMember,
sender_user.as_str(),
)?
else {
error!("Left room but no left state event");
return Ok(());
};
let Some(left_shortstatehash) = services()
.rooms
.state_accessor
.pdu_shortstatehash(&left_event_id)?
else {
error!(event_id = %left_event_id, "Leave event has no state");
return Ok(());
};
let mut left_state_ids = services()
.rooms
.state_accessor
.state_full_ids(left_shortstatehash)
.await?;
let leave_shortstatekey = services()
.rooms
.short
.get_or_create_shortstatekey(&StateEventType::RoomMember, sender_user.as_str())?;
left_state_ids.insert(leave_shortstatekey, left_event_id);
let mut i: u8 = 0;
for (key, id) in left_state_ids {
if full_state || since_state_ids.get(&key) != Some(&id) {
let (event_type, state_key) = services().rooms.short.get_statekey_from_short(key)?;
if !lazy_load_enabled
|| event_type != StateEventType::RoomMember
|| full_state
// TODO: Delete the following line when this is resolved: https://github.com/vector-im/element-web/issues/22565
|| (cfg!(feature = "element_hacks") && *sender_user == state_key)
{
let Some(pdu) = services().rooms.timeline.get_pdu(&id)? else {
error!("Pdu in state not found: {}", id);
continue;
};
left_state_events.push(pdu.to_sync_state_event());
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
if i % 100 == 0 {
tokio::task::yield_now().await;
}
}
}
}
left_rooms.insert(
room_id.to_owned(),
LeftRoom {
account_data: RoomAccountData {
events: Vec::new(),
},
timeline: Timeline {
limited: false,
prev_batch: Some(next_batch_string.to_owned()),
events: Vec::new(),
},
state: State {
events: left_state_events,
},
},
);
Ok(())
}
presence_updates: &mut HashMap<OwnedUserId, PresenceEvent>, since: u64, syncing_user: &UserId,
for (user_id, _, presence_bytes) in services().presence.presence_since(since) {
if !services()
.rooms
.state_cache
.user_sees_user(syncing_user, &user_id)?
{
continue;
}
let presence_event = Presence::from_json_bytes_to_event(&presence_bytes, &user_id)?;
match presence_updates.entry(user_id) {
Entry::Vacant(slot) => {
slot.insert(presence_event);
},
Entry::Occupied(mut slot) => {
let curr_event = slot.get_mut();
let curr_content = &mut curr_event.content;
let new_content = presence_event.content;
// Update existing presence event with more info
curr_content.presence = new_content.presence;
curr_content.status_msg = new_content
.status_msg
.or_else(|| curr_content.status_msg.take());
curr_content.last_active_ago = new_content.last_active_ago.or(curr_content.last_active_ago);
curr_content.displayname = new_content
.displayname
.or_else(|| curr_content.displayname.take());
curr_content.avatar_url = new_content
.avatar_url
.or_else(|| curr_content.avatar_url.take());
curr_content.currently_active = new_content
.currently_active
.or(curr_content.currently_active);
#[allow(clippy::too_many_arguments)]
sender_user: &UserId, sender_device: &DeviceId, room_id: &RoomId, since: u64, sincecount: PduCount,
next_batch: u64, next_batchcount: PduCount, lazy_load_enabled: bool, lazy_load_send_redundant: bool,
full_state: bool, device_list_updates: &mut HashSet<OwnedUserId>, left_encrypted_users: &mut HashSet<OwnedUserId>,
// Get and drop the lock to wait for remaining operations to finish
// This will make sure the we have all events until next_batch
let insert_lock = services().rooms.timeline.mutex_insert.lock(room_id).await;
let (timeline_pdus, limited) = load_timeline(sender_user, room_id, sincecount, 10)?;
let send_notification_counts = !timeline_pdus.is_empty()
|| services()
.rooms
.user
.last_notification_read(sender_user, room_id)?
> since;
let mut timeline_users = HashSet::new();
for (_, event) in &timeline_pdus {
timeline_users.insert(event.sender.as_str().to_owned());
}
services()
.rooms
.lazy_loading
.lazy_load_confirm_delivery(sender_user, sender_device, room_id, sincecount)
.await?;
let Some(current_shortstatehash) = services().rooms.state.get_room_shortstatehash(room_id)? else {
error!("Room {} has no state", room_id);
return Err(Error::BadDatabase("Room has no state"));
};
let since_shortstatehash = services()
.rooms
.user
.get_token_shortstatehash(room_id, since)?;
let (heroes, joined_member_count, invited_member_count, joined_since_last_sync, state_events) =
if timeline_pdus.is_empty() && since_shortstatehash == Some(current_shortstatehash) {
// No state changes
(Vec::new(), None, None, false, Vec::new())
} else {
// Calculates joined_member_count, invited_member_count and heroes
let calculate_counts = || {
let joined_member_count = services()
.rooms
.state_cache
.room_joined_count(room_id)?
.unwrap_or(0);
let invited_member_count = services()
.rooms
.state_cache
.room_invited_count(room_id)?
.unwrap_or(0);
let mut heroes: Vec<OwnedUserId> = Vec::with_capacity(5);
if joined_member_count.saturating_add(invited_member_count) <= 5 {
// Go through all PDUs and for each member event, check if the user is still
// joined or invited until we have 5 or we reach the end
.rooms
.timeline
.all_pdus(sender_user, room_id)?
.filter_map(Result::ok) // Ignore all broken pdus
.filter(|(_, pdu)| pdu.kind == TimelineEventType::RoomMember)
.map(|(_, pdu)| {
let content: RoomMemberEventContent = serde_json::from_str(pdu.content.get())
.map_err(|_| Error::bad_database("Invalid member event in database."))?;
if let Some(state_key) = &pdu.state_key {
let user_id = UserId::parse(state_key.clone())
.map_err(|_| Error::bad_database("Invalid UserId in member PDU."))?;
// The membership was and still is invite or join
if matches!(content.membership, MembershipState::Join | MembershipState::Invite)
&& (services().rooms.state_cache.is_joined(&user_id, room_id)?
|| services().rooms.state_cache.is_invited(&user_id, room_id)?)
{
} else {
Ok(None)
}
} else {
Ok(None)
}
})
.filter_map(Result::ok)
// Filter for possible heroes
.flatten()
if heroes.contains(&hero) || hero == sender_user {
Ok::<_, Error>((Some(joined_member_count), Some(invited_member_count), heroes))
};
let since_sender_member: Option<RoomMemberEventContent> = since_shortstatehash
.and_then(|shortstatehash| {
services()
.rooms
.state_accessor
.state_get(shortstatehash, &StateEventType::RoomMember, sender_user.as_str())
.transpose()
})
.transpose()?
.and_then(|pdu| {
serde_json::from_str(pdu.content.get())
.map_err(|_| Error::bad_database("Invalid PDU in database."))
.ok()
});
let joined_since_last_sync =
since_sender_member.map_or(true, |member| member.membership != MembershipState::Join);
if since_shortstatehash.is_none() || joined_since_last_sync {
// Probably since = 0, we will do an initial sync
let (joined_member_count, invited_member_count, heroes) = calculate_counts()?;
let current_state_ids = services()
.rooms
.state_accessor
.state_full_ids(current_shortstatehash)
.await?;
let mut state_events = Vec::new();
let mut lazy_loaded = HashSet::new();
let mut i: u8 = 0;
for (shortstatekey, id) in current_state_ids {
let (event_type, state_key) = services()
.rooms
.short
.get_statekey_from_short(shortstatekey)?;
let Some(pdu) = services().rooms.timeline.get_pdu(&id)? else {
error!("Pdu in state not found: {}", id);
continue;
if i % 100 == 0 {
tokio::task::yield_now().await;
}
} else if !lazy_load_enabled
|| full_state
|| timeline_users.contains(&state_key)
// TODO: Delete the following line when this is resolved: https://github.com/vector-im/element-web/issues/22565
|| (cfg!(feature = "element_hacks") && *sender_user == state_key)
let Some(pdu) = services().rooms.timeline.get_pdu(&id)? else {
error!("Pdu in state not found: {}", id);
continue;
// This check is in case a bad user ID made it into the database
if let Ok(uid) = UserId::parse(&state_key) {
lazy_loaded.insert(uid);
}
state_events.push(pdu);
// Reset lazy loading because this is an initial sync
services()
.rooms
.lazy_loading
.lazy_load_reset(sender_user, sender_device, room_id)?;
// The state_events above should contain all timeline_users, let's mark them as
// lazy loaded.
services()
.rooms
.lazy_loading
.lazy_load_mark_sent(sender_user, sender_device, room_id, lazy_loaded, next_batchcount)
.await;
(heroes, joined_member_count, invited_member_count, true, state_events)
} else {
// Incremental /sync
let since_shortstatehash = since_shortstatehash.unwrap();
let mut delta_state_events = Vec::new();
if since_shortstatehash != current_shortstatehash {
let current_state_ids = services()
.rooms
.state_accessor
.state_full_ids(current_shortstatehash)
.await?;
let since_state_ids = services()
.rooms
.state_accessor
.state_full_ids(since_shortstatehash)
.await?;
for (key, id) in current_state_ids {
if full_state || since_state_ids.get(&key) != Some(&id) {
let Some(pdu) = services().rooms.timeline.get_pdu(&id)? else {
error!("Pdu in state not found: {}", id);
continue;
delta_state_events.push(pdu);
let encrypted_room = services()
.rooms
.state_accessor
.state_get(current_shortstatehash, &StateEventType::RoomEncryption, "")?
.is_some();
let since_encryption = services().rooms.state_accessor.state_get(
since_shortstatehash,
&StateEventType::RoomEncryption,
"",
)?;
// Calculations:
let new_encrypted_room = encrypted_room && since_encryption.is_none();
let send_member_count = delta_state_events
.iter()
.any(|event| event.kind == TimelineEventType::RoomMember);
for state_event in &delta_state_events {
if let Some(state_key) = &state_event.state_key {
let user_id = UserId::parse(state_key.clone())
.map_err(|_| Error::bad_database("Invalid UserId in member PDU."))?;
if user_id == sender_user {
continue;
}
let new_membership =
serde_json::from_str::<RoomMemberEventContent>(state_event.content.get())
.map_err(|_| Error::bad_database("Invalid PDU in database."))?
.membership;
match new_membership {
MembershipState::Join => {
// A new user joined an encrypted room
if !share_encrypted_room(sender_user, &user_id, room_id)? {
device_list_updates.insert(user_id);
}
},
MembershipState::Leave => {
// Write down users that have left encrypted rooms we are in
left_encrypted_users.insert(user_id);
},
_ => {},
}
if joined_since_last_sync && encrypted_room || new_encrypted_room {
// If the user is in a new encrypted room, give them all joined users
device_list_updates.extend(
services()
.rooms
.state_cache
.room_members(room_id)
.flatten()
.filter(|user_id| {
// Don't send key updates from the sender to the sender
sender_user != user_id
})
.filter(|user_id| {
// Only send keys if the sender doesn't share an encrypted room with the target
// already
!share_encrypted_room(sender_user, user_id, room_id).unwrap_or(false)
}),
);
}
let (joined_member_count, invited_member_count, heroes) = if send_member_count {
calculate_counts()?
} else {
(None, None, Vec::new())
};
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
let mut state_events = delta_state_events;
let mut lazy_loaded = HashSet::new();
// Mark all member events we're returning as lazy-loaded
for pdu in &state_events {
if pdu.kind == TimelineEventType::RoomMember {
match UserId::parse(
pdu.state_key
.as_ref()
.expect("State event has state key")
.clone(),
) {
Ok(state_key_userid) => {
lazy_loaded.insert(state_key_userid);
},
Err(e) => error!("Invalid state key for member event: {}", e),
}
}
}
// Fetch contextual member state events for events from the timeline, and
// mark them as lazy-loaded as well.
for (_, event) in &timeline_pdus {
if lazy_loaded.contains(&event.sender) {
continue;
}
if !services().rooms.lazy_loading.lazy_load_was_sent_before(
sender_user,
sender_device,
room_id,
&event.sender,
)? || lazy_load_send_redundant
{
if let Some(member_event) = services().rooms.state_accessor.room_state_get(
room_id,
&StateEventType::RoomMember,
event.sender.as_str(),
)? {
lazy_loaded.insert(event.sender.clone());
state_events.push(member_event);
}
}
}
services()
.rooms
.lazy_loading
.lazy_load_mark_sent(sender_user, sender_device, room_id, lazy_loaded, next_batchcount)
.await;
(
heroes,
joined_member_count,
invited_member_count,
joined_since_last_sync,
state_events,
)
}
};
// Look for device list updates in this room
device_list_updates.extend(
services()
.users
.keys_changed(room_id.as_ref(), since, None)
.filter_map(Result::ok),
);
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
let notification_count = if send_notification_counts {
Some(
services()
.rooms
.user
.notification_count(sender_user, room_id)?
.try_into()
.expect("notification count can't go that high"),
)
} else {
None
};
let highlight_count = if send_notification_counts {
Some(
services()
.rooms
.user
.highlight_count(sender_user, room_id)?
.try_into()
.expect("highlight count can't go that high"),
)
} else {
None
};
let prev_batch = timeline_pdus
.first()
.map_or(Ok::<_, Error>(None), |(pdu_count, _)| {
Ok(Some(match pdu_count {
PduCount::Backfilled(_) => {
error!("timeline in backfill state?!");
"0".to_owned()
},
PduCount::Normal(c) => c.to_string(),
}))
})?;
let room_events: Vec<_> = timeline_pdus
.iter()
.map(|(_, pdu)| pdu.to_sync_room_event())
.collect();
let mut edus: Vec<_> = services()
.rooms
.read_receipt
.readreceipts_since(room_id, since)
.filter_map(Result::ok) // Filter out buggy events
if services().rooms.typing.last_typing_update(room_id).await? > since {
&serde_json::to_string(&services().rooms.typing.typings_all(room_id).await?)
.expect("event is valid, we just created it"),
)
.expect("event is valid, we just created it"),
);
}
// Save the state after this sync so we can send the correct state diff next
// sync
services()
.rooms
.user
.associate_token_shortstatehash(room_id, next_batch, current_shortstatehash)?;
Ok(JoinedRoom {
account_data: RoomAccountData {
events: services()
.account_data
.changes_since(Some(room_id), sender_user, since)?
.into_iter()
.filter_map(|(_, v)| {
serde_json::from_str(v.json().get())
.map_err(|_| Error::bad_database("Invalid account event in database."))
.ok()
})
.collect(),
},
summary: RoomSummary {
heroes,
joined_member_count: joined_member_count.map(ruma_from_u64),
invited_member_count: invited_member_count.map(ruma_from_u64),
},
unread_notifications: UnreadNotificationsCount {
highlight_count,
notification_count,
},
timeline: Timeline {
limited: limited || joined_since_last_sync,
prev_batch,
events: room_events,
},
state: State {
events: state_events
.iter()
.map(|pdu| pdu.to_sync_state_event())
.collect(),
},
ephemeral: Ephemeral {
events: edus,
},
unread_thread_notifications: BTreeMap::new(),
})
sender_user: &UserId, room_id: &RoomId, roomsincecount: PduCount, limit: u64,
) -> Result<(Vec<(PduCount, PduEvent)>, bool), Error> {
.rooms
.timeline
.last_timeline_count(sender_user, room_id)?
> roomsincecount
{
let mut non_timeline_pdus = services()
.rooms
.timeline
.pdus_until(sender_user, room_id, PduCount::max())?
.filter_map(|r| {
// Filter out buggy events
if r.is_err() {
error!("Bad pdu in pdus_since: {:?}", r);
}
r.ok()
})
.take_while(|(pducount, _)| pducount > &roomsincecount);
// Take the last events for the timeline
.collect::<Vec<_>>()
.into_iter()
.rev()
.collect::<Vec<_>>();
// They /sync response doesn't always return all messages, so we say the output
// is limited unless there are events in non_timeline_pdus
fn share_encrypted_room(sender_user: &UserId, user_id: &UserId, ignore_room: &RoomId) -> Result<bool> {
Ok(services()
.rooms
.user
.get_shared_rooms(vec![sender_user.to_owned(), user_id.to_owned()])?
.filter_map(Result::ok)
.filter(|room_id| room_id != ignore_room)
.filter_map(|other_room_id| {
Some(
services()
.rooms
.state_accessor
.room_state_get(&other_room_id, &StateEventType::RoomEncryption, "")
.ok()?
.is_some(),
)
})
.any(|encrypted| encrypted))
/// POST `/_matrix/client/unstable/org.matrix.msc3575/sync`
///
/// Sliding Sync endpoint (future endpoint: `/_matrix/client/v4/sync`)
pub(crate) async fn sync_events_v4_route(
) -> Result<sync_events::v4::Response, RumaResponse<UiaaResponse>> {
let sender_user = body.sender_user.expect("user is authenticated");
let sender_device = body.sender_device.expect("user is authenticated");
let mut body = body.body;
// Setup watchers, so if there's no response, we can wait for them
let watcher = services().globals.watch(&sender_user, &sender_device);
let next_batch = services().globals.next_count()?;
let globalsince = body
.pos
.as_ref()
.and_then(|string| string.parse().ok())
.unwrap_or(0);
if globalsince == 0 {
if let Some(conn_id) = &body.conn_id {
services().users.forget_sync_request_connection(
sender_user.clone(),
sender_device.clone(),
conn_id.clone(),
);
}
}
// Get sticky parameters from cache
let known_rooms =
services()
.users
.update_sync_request_with_cache(sender_user.clone(), sender_device.clone(), &mut body);
let all_joined_rooms = services()
.rooms
.state_cache
.rooms_joined(&sender_user)
.filter_map(Result::ok)
.collect::<Vec<_>>();
if body.extensions.to_device.enabled.unwrap_or(false) {
services()
.users
.remove_to_device_events(&sender_user, &sender_device, globalsince)?;
}
let mut left_encrypted_users = HashSet::new(); // Users that have left any encrypted rooms the sender was in
let mut device_list_changes = HashSet::new();
let mut device_list_left = HashSet::new();
if body.extensions.e2ee.enabled.unwrap_or(false) {
// Look for device list updates of this account
device_list_changes.extend(
services()
.users
.keys_changed(sender_user.as_ref(), globalsince, None)
.filter_map(Result::ok),
);
let Some(current_shortstatehash) = services().rooms.state.get_room_shortstatehash(room_id)? else {
error!("Room {} has no state", room_id);
continue;
};
let since_shortstatehash = services()
.rooms
.user
.get_token_shortstatehash(room_id, globalsince)?;
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
let since_sender_member: Option<RoomMemberEventContent> = since_shortstatehash
.and_then(|shortstatehash| {
services()
.rooms
.state_accessor
.state_get(shortstatehash, &StateEventType::RoomMember, sender_user.as_str())
.transpose()
})
.transpose()?
.and_then(|pdu| {
serde_json::from_str(pdu.content.get())
.map_err(|_| Error::bad_database("Invalid PDU in database."))
.ok()
});
let encrypted_room = services()
.rooms
.state_accessor
.state_get(current_shortstatehash, &StateEventType::RoomEncryption, "")?
.is_some();
if let Some(since_shortstatehash) = since_shortstatehash {
// Skip if there are only timeline changes
if since_shortstatehash == current_shortstatehash {
continue;
}
let since_encryption = services().rooms.state_accessor.state_get(
since_shortstatehash,
&StateEventType::RoomEncryption,
"",
)?;
let joined_since_last_sync =
since_sender_member.map_or(true, |member| member.membership != MembershipState::Join);
let new_encrypted_room = encrypted_room && since_encryption.is_none();
if encrypted_room {
let current_state_ids = services()
.rooms
.state_accessor
.state_full_ids(current_shortstatehash)
.await?;
let since_state_ids = services()
.rooms
.state_accessor
.state_full_ids(since_shortstatehash)
.await?;
for (key, id) in current_state_ids {
if since_state_ids.get(&key) != Some(&id) {
let Some(pdu) = services().rooms.timeline.get_pdu(&id)? else {
error!("Pdu in state not found: {}", id);
continue;
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
};
if pdu.kind == TimelineEventType::RoomMember {
if let Some(state_key) = &pdu.state_key {
let user_id = UserId::parse(state_key.clone())
.map_err(|_| Error::bad_database("Invalid UserId in member PDU."))?;
if user_id == sender_user {
continue;
}
let new_membership =
serde_json::from_str::<RoomMemberEventContent>(pdu.content.get())
.map_err(|_| Error::bad_database("Invalid PDU in database."))?
.membership;
match new_membership {
MembershipState::Join => {
// A new user joined an encrypted room
if !share_encrypted_room(&sender_user, &user_id, room_id)? {
device_list_changes.insert(user_id);
}
},
MembershipState::Leave => {
// Write down users that have left encrypted rooms we are in
left_encrypted_users.insert(user_id);
},
_ => {},
}
}
}
}
}
if joined_since_last_sync || new_encrypted_room {
// If the user is in a new encrypted room, give them all joined users
device_list_changes.extend(
services()
.rooms
.state_cache
.room_members(room_id)
.flatten()
.filter(|user_id| {
// Don't send key updates from the sender to the sender
&sender_user != user_id
})
.filter(|user_id| {
// Only send keys if the sender doesn't share an encrypted room with the target
// already
!share_encrypted_room(&sender_user, user_id, room_id).unwrap_or(false)
}),
);
}
}
}
// Look for device list updates in this room
device_list_changes.extend(
services()
.users
.keys_changed(room_id.as_ref(), globalsince, None)
.filter_map(Result::ok),
);
}
for user_id in left_encrypted_users {
let dont_share_encrypted_room = services()
.rooms
.user
.get_shared_rooms(vec![sender_user.clone(), user_id.clone()])?
.filter_map(Result::ok)
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
.filter_map(|other_room_id| {
Some(
services()
.rooms
.state_accessor
.room_state_get(&other_room_id, &StateEventType::RoomEncryption, "")
.ok()?
.is_some(),
)
})
.all(|encrypted| !encrypted);
// If the user doesn't share an encrypted room with the target anymore, we need
// to tell them
if dont_share_encrypted_room {
device_list_left.insert(user_id);
}
}
}
let mut lists = BTreeMap::new();
let mut todo_rooms = BTreeMap::new(); // and required state
for (list_id, list) in body.lists {
if list.filters.and_then(|f| f.is_invite).unwrap_or(false) {
continue;
}
let mut new_known_rooms = BTreeSet::new();
lists.insert(
list_id.clone(),
sync_events::v4::SyncList {
ops: list
.ranges
.into_iter()
.map(|mut r| {
r.0 = r.0.clamp(
uint!(0),
UInt::try_from(all_joined_rooms.len().saturating_sub(1)).unwrap_or(UInt::MAX),
);
r.1 = r.1.clamp(
r.0,
UInt::try_from(all_joined_rooms.len().saturating_sub(1)).unwrap_or(UInt::MAX),
);
let room_ids = all_joined_rooms[usize_from_ruma(r.0)..=usize_from_ruma(r.1)].to_vec();
new_known_rooms.extend(room_ids.iter().cloned());
for room_id in &room_ids {
let todo_room = todo_rooms
.entry(room_id.clone())
.or_insert((BTreeSet::new(), 0, u64::MAX));
let limit = list
.room_details
.timeline_limit
.map_or(10, u64::from)
.min(100);
todo_room
.0
.extend(list.room_details.required_state.iter().cloned());
todo_room.1 = todo_room.1.max(limit);
// 0 means unknown because it got out of date
todo_room.2 = todo_room.2.min(
known_rooms
.get(&list_id)
.and_then(|k| k.get(room_id))
.copied()
.unwrap_or(0),
);
}
sync_events::v4::SyncOp {
op: SlidingOp::Sync,
range: Some(r),
index: None,
room_ids,
room_id: None,
}
})
.collect(),
},
);
if let Some(conn_id) = &body.conn_id {
services().users.update_sync_known_rooms(
sender_user.clone(),
sender_device.clone(),
conn_id.clone(),
list_id,
new_known_rooms,
globalsince,
);
}
}
let mut known_subscription_rooms = BTreeSet::new();
for (room_id, room) in &body.room_subscriptions {
if !services().rooms.metadata.exists(room_id)? {
continue;
}
let todo_room = todo_rooms
.entry(room_id.clone())
.or_insert((BTreeSet::new(), 0, u64::MAX));
let limit = room.timeline_limit.map_or(10, u64::from).min(100);
todo_room.0.extend(room.required_state.iter().cloned());
todo_room.1 = todo_room.1.max(limit);
// 0 means unknown because it got out of date
todo_room.2 = todo_room.2.min(
known_rooms
.get("subscriptions")
.and_then(|k| k.get(room_id))
.copied()
.unwrap_or(0),
);
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
known_subscription_rooms.insert(room_id.clone());
}
for r in body.unsubscribe_rooms {
known_subscription_rooms.remove(&r);
body.room_subscriptions.remove(&r);
}
if let Some(conn_id) = &body.conn_id {
services().users.update_sync_known_rooms(
sender_user.clone(),
sender_device.clone(),
conn_id.clone(),
"subscriptions".to_owned(),
known_subscription_rooms,
globalsince,
);
}
if let Some(conn_id) = &body.conn_id {
services().users.update_sync_subscriptions(
sender_user.clone(),
sender_device.clone(),
conn_id.clone(),
body.room_subscriptions,
);
}
let mut rooms = BTreeMap::new();
for (room_id, (required_state_request, timeline_limit, roomsince)) in &todo_rooms {
let roomsincecount = PduCount::Normal(*roomsince);
let (timeline_pdus, limited) = load_timeline(&sender_user, room_id, roomsincecount, *timeline_limit)?;
if roomsince != &0 && timeline_pdus.is_empty() {
continue;
}
let prev_batch = timeline_pdus
.first()
.map_or(Ok::<_, Error>(None), |(pdu_count, _)| {
Ok(Some(match pdu_count {
PduCount::Backfilled(_) => {
error!("timeline in backfill state?!");
"0".to_owned()
},
PduCount::Normal(c) => c.to_string(),
}))
})?
.or_else(|| {
if roomsince != &0 {
Some(roomsince.to_string())
} else {
None
}
});
let room_events: Vec<_> = timeline_pdus
.iter()
.map(|(_, pdu)| pdu.to_sync_room_event())
.collect();
let required_state = required_state_request
.iter()
.map(|state| {
services()
.rooms
.state_accessor
.room_state_get(room_id, &state.0, &state.1)
})
.filter_map(Result::ok)
.flatten()
.map(|state| state.to_sync_state_event())
.collect();
// Heroes
let heroes = services()
.rooms
.state_cache
.room_members(room_id)
.filter_map(Result::ok)
.filter(|member| member != &sender_user)
.map(|member| {
Ok::<_, Error>(
services()
.rooms
.state_accessor
.get_member(room_id, &member)?
.map(|memberevent| {
(
memberevent
.displayname
.unwrap_or_else(|| member.to_string()),
memberevent.avatar_url,
)
}),
.filter_map(Result::ok)
.flatten()
.take(5)
.collect::<Vec<_>>();
let name = match heroes.len().cmp(&(1_usize)) {
Ordering::Greater => {
let firsts = heroes[1..]
.iter()
.map(|h| h.0.clone())
.collect::<Vec<_>>()
.join(", ");
Some(format!("{firsts} and {last}"))
},
Ordering::Equal => Some(heroes[0].0.clone()),
Ordering::Less => None,
};
let heroes_avatar = if heroes.len() == 1 {
heroes[0].1.clone()
} else {
None
};
rooms.insert(
room_id.clone(),
sync_events::v4::SlidingSyncRoom {
name: services().rooms.state_accessor.get_name(room_id)?.or(name),
avatar: if let Some(heroes_avatar) = heroes_avatar {
ruma::JsOption::Some(heroes_avatar)
} else {
match services().rooms.state_accessor.get_avatar(room_id)? {
ruma::JsOption::Some(avatar) => ruma::JsOption::from_option(avatar.url),
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
ruma::JsOption::Null => ruma::JsOption::Null,
ruma::JsOption::Undefined => ruma::JsOption::Undefined,
}
},
initial: Some(roomsince == &0),
is_dm: None,
invite_state: None,
unread_notifications: UnreadNotificationsCount {
highlight_count: Some(
services()
.rooms
.user
.highlight_count(&sender_user, room_id)?
.try_into()
.expect("notification count can't go that high"),
),
notification_count: Some(
services()
.rooms
.user
.notification_count(&sender_user, room_id)?
.try_into()
.expect("notification count can't go that high"),
),
},
timeline: room_events,
required_state,
prev_batch,
limited,
joined_count: Some(
.unwrap_or(0)
.try_into()
.unwrap_or_else(|_| uint!(0)),
.unwrap_or(0)
.try_into()
.unwrap_or_else(|_| uint!(0)),
),
num_live: None, // Count events in timeline greater than global sync counter
timestamp: None,
if rooms
.iter()
.all(|(_, r)| r.timeline.is_empty() && r.required_state.is_empty())
{
// Hang a few seconds so requests are not spammed
// Stop hanging if new info arrives
let default = Duration::from_secs(30);
let duration = cmp::min(body.timeout.unwrap_or(default), default);
_ = tokio::time::timeout(duration, watcher).await;
}
Ok(sync_events::v4::Response {
initial: globalsince == 0,
txn_id: body.txn_id.clone(),
pos: next_batch.to_string(),
lists,
rooms,
extensions: sync_events::v4::Extensions {
to_device: if body.extensions.to_device.enabled.unwrap_or(false) {
Some(sync_events::v4::ToDevice {
events: services()
.users
.get_to_device_events(&sender_user, &sender_device)?,
next_batch: next_batch.to_string(),
})
} else {
None
},
e2ee: sync_events::v4::E2EE {
device_lists: DeviceLists {
changed: device_list_changes.into_iter().collect(),
left: device_list_left.into_iter().collect(),
},
device_one_time_keys_count: services()
.users
.count_one_time_keys(&sender_user, &sender_device)?,
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
// Fallback keys are not yet supported
device_unused_fallback_key_types: None,
},
account_data: sync_events::v4::AccountData {
global: if body.extensions.account_data.enabled.unwrap_or(false) {
services()
.account_data
.changes_since(None, &sender_user, globalsince)?
.into_iter()
.filter_map(|(_, v)| {
serde_json::from_str(v.json().get())
.map_err(|_| Error::bad_database("Invalid account event in database."))
.ok()
})
.collect()
} else {
Vec::new()
},
rooms: BTreeMap::new(),
},
receipts: sync_events::v4::Receipts {
rooms: BTreeMap::new(),
},
typing: sync_events::v4::Typing {
rooms: BTreeMap::new(),
},
},
delta_token: None,
})