diff --git a/Cargo.lock b/Cargo.lock
index 83e21a3db8a9fc5057ead1f6695c7efa256f8fb9..2a7791ce15245e8381c40cd5e354e9def4480abf 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -2045,7 +2045,7 @@ dependencies = [
 [[package]]
 name = "ruma"
 version = "0.3.0"
-source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced"
+source = "git+https://github.com/DevinR528/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1"
 dependencies = [
  "assign",
  "js_int",
@@ -2066,7 +2066,7 @@ dependencies = [
 [[package]]
 name = "ruma-api"
 version = "0.18.3"
-source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced"
+source = "git+https://github.com/DevinR528/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1"
 dependencies = [
  "bytes",
  "http",
@@ -2082,7 +2082,7 @@ dependencies = [
 [[package]]
 name = "ruma-api-macros"
 version = "0.18.3"
-source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced"
+source = "git+https://github.com/DevinR528/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1"
 dependencies = [
  "proc-macro-crate",
  "proc-macro2",
@@ -2093,7 +2093,7 @@ dependencies = [
 [[package]]
 name = "ruma-appservice-api"
 version = "0.4.0"
-source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced"
+source = "git+https://github.com/DevinR528/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1"
 dependencies = [
  "ruma-api",
  "ruma-common",
@@ -2107,7 +2107,7 @@ dependencies = [
 [[package]]
 name = "ruma-client-api"
 version = "0.12.2"
-source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced"
+source = "git+https://github.com/DevinR528/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1"
 dependencies = [
  "assign",
  "bytes",
@@ -2127,7 +2127,7 @@ dependencies = [
 [[package]]
 name = "ruma-common"
 version = "0.6.0"
-source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced"
+source = "git+https://github.com/DevinR528/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1"
 dependencies = [
  "indexmap",
  "js_int",
@@ -2142,7 +2142,7 @@ dependencies = [
 [[package]]
 name = "ruma-events"
 version = "0.24.4"
-source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced"
+source = "git+https://github.com/DevinR528/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1"
 dependencies = [
  "indoc",
  "js_int",
@@ -2158,7 +2158,7 @@ dependencies = [
 [[package]]
 name = "ruma-events-macros"
 version = "0.24.4"
-source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced"
+source = "git+https://github.com/DevinR528/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1"
 dependencies = [
  "proc-macro-crate",
  "proc-macro2",
@@ -2169,7 +2169,7 @@ dependencies = [
 [[package]]
 name = "ruma-federation-api"
 version = "0.3.0"
-source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced"
+source = "git+https://github.com/DevinR528/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1"
 dependencies = [
  "js_int",
  "ruma-api",
@@ -2184,7 +2184,7 @@ dependencies = [
 [[package]]
 name = "ruma-identifiers"
 version = "0.20.0"
-source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced"
+source = "git+https://github.com/DevinR528/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1"
 dependencies = [
  "paste",
  "rand 0.8.4",
@@ -2198,7 +2198,7 @@ dependencies = [
 [[package]]
 name = "ruma-identifiers-macros"
 version = "0.20.0"
-source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced"
+source = "git+https://github.com/DevinR528/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1"
 dependencies = [
  "quote",
  "ruma-identifiers-validation",
@@ -2208,12 +2208,15 @@ dependencies = [
 [[package]]
 name = "ruma-identifiers-validation"
 version = "0.5.0"
-source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced"
+source = "git+https://github.com/DevinR528/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1"
+dependencies = [
+ "thiserror",
+]
 
 [[package]]
 name = "ruma-identity-service-api"
 version = "0.3.0"
-source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced"
+source = "git+https://github.com/DevinR528/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1"
 dependencies = [
  "js_int",
  "ruma-api",
@@ -2226,7 +2229,7 @@ dependencies = [
 [[package]]
 name = "ruma-push-gateway-api"
 version = "0.3.0"
-source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced"
+source = "git+https://github.com/DevinR528/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1"
 dependencies = [
  "js_int",
  "ruma-api",
@@ -2241,7 +2244,7 @@ dependencies = [
 [[package]]
 name = "ruma-serde"
 version = "0.5.0"
-source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced"
+source = "git+https://github.com/DevinR528/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1"
 dependencies = [
  "bytes",
  "form_urlencoded",
@@ -2255,7 +2258,7 @@ dependencies = [
 [[package]]
 name = "ruma-serde-macros"
 version = "0.5.0"
-source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced"
+source = "git+https://github.com/DevinR528/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1"
 dependencies = [
  "proc-macro-crate",
  "proc-macro2",
@@ -2266,7 +2269,7 @@ dependencies = [
 [[package]]
 name = "ruma-signatures"
 version = "0.9.0"
-source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced"
+source = "git+https://github.com/DevinR528/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1"
 dependencies = [
  "base64 0.13.0",
  "ed25519-dalek",
@@ -2283,7 +2286,7 @@ dependencies = [
 [[package]]
 name = "ruma-state-res"
 version = "0.3.0"
-source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced"
+source = "git+https://github.com/DevinR528/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1"
 dependencies = [
  "itertools 0.10.1",
  "js_int",
diff --git a/Cargo.toml b/Cargo.toml
index d28e0b7984268da1081de00b033a02dba3713a7b..2593b4a81b723a2e29f531f27b1b928b13f9aa57 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -18,8 +18,8 @@ edition = "2018"
 rocket = { version = "0.5.0-rc.1", features = ["tls"] } # Used to handle requests
 
 # Used for matrix spec type definitions and helpers
-ruma = { git = "https://github.com/ruma/ruma", rev = "f5ab038e22421ed338396ece977b6b2844772ced", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] }
-#ruma = { git = "https://github.com/timokoesters/ruma", rev = "995ccea20f5f6d4a8fb22041749ed4de22fa1b6a", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] }
+#ruma = { git = "https://github.com/ruma/ruma", rev = "f5ab038e22421ed338396ece977b6b2844772ced", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] }
+ruma = { git = "https://github.com/DevinR528/ruma", rev = "2215049b60a1c3358f5a52215adf1e7bb88619a1", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] }
 #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] }
 
 # Used for long polling and federation sender, should be the same as rocket::tokio
diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs
index 46f4b9f30840a6573ad2f8224b0170d07f92488c..29926e3bb68a368593ee3015eca56cc5717096b4 100644
--- a/src/client_server/membership.rs
+++ b/src/client_server/membership.rs
@@ -640,23 +640,40 @@ async fn join_room_by_id_helper(
 
             db.rooms.add_pdu_outlier(&event_id, &value)?;
             if let Some(state_key) = &pdu.state_key {
-                state.insert((pdu.kind.clone(), state_key.clone()), pdu.event_id.clone());
+                let shortstatekey =
+                    db.rooms
+                        .get_or_create_shortstatekey(&pdu.kind, state_key, &db.globals)?;
+                state.insert(shortstatekey, pdu.event_id.clone());
             }
         }
 
-        state.insert(
-            (
-                pdu.kind.clone(),
-                pdu.state_key.clone().expect("join event has state key"),
-            ),
-            pdu.event_id.clone(),
-        );
+        let incoming_shortstatekey = db.rooms.get_or_create_shortstatekey(
+            &pdu.kind,
+            pdu.state_key
+                .as_ref()
+                .expect("Pdu is a membership state event"),
+            &db.globals,
+        )?;
+
+        state.insert(incoming_shortstatekey, pdu.event_id.clone());
 
-        if state.get(&(EventType::RoomCreate, "".to_owned())).is_none() {
+        let create_shortstatekey = db
+            .rooms
+            .get_shortstatekey(&EventType::RoomCreate, "")?
+            .expect("Room exists");
+
+        if state.get(&create_shortstatekey).is_none() {
             return Err(Error::BadServerResponse("State contained no create event."));
         }
 
-        db.rooms.force_state(room_id, state, &db)?;
+        db.rooms.force_state(
+            room_id,
+            state
+                .into_iter()
+                .map(|(k, id)| db.rooms.compress_state_event(k, &id, &db.globals))
+                .collect::<Result<HashSet<_>>>()?,
+            &db,
+        )?;
 
         for result in futures::future::join_all(
             send_join_response
@@ -913,8 +930,8 @@ pub async fn invite_helper<'a>(
                 &room_version,
                 &Arc::new(pdu.clone()),
                 create_prev_event,
-                &auth_events,
                 None, // TODO: third_party_invite
+                |k, s| auth_events.get(&(k.clone(), s.to_owned())).map(Arc::clone),
             )
             .map_err(|e| {
                 error!("{:?}", e);
diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs
index c196b2a918a5592652a1dc38f5918a86ebb20488..81260479f18e2df69e8a37ac22ae2557a505e165 100644
--- a/src/client_server/sync.rs
+++ b/src/client_server/sync.rs
@@ -348,7 +348,7 @@ async fn sync_helper(
             let current_state_ids = db.rooms.state_full_ids(current_shortstatehash)?;
             let state_events = current_state_ids
                 .iter()
-                .map(|id| db.rooms.get_pdu(id))
+                .map(|(_, id)| db.rooms.get_pdu(id))
                 .filter_map(|r| r.ok().flatten())
                 .collect::<Vec<_>>();
 
@@ -393,18 +393,19 @@ async fn sync_helper(
             let state_events = if joined_since_last_sync {
                 current_state_ids
                     .iter()
-                    .map(|id| db.rooms.get_pdu(id))
+                    .map(|(_, id)| db.rooms.get_pdu(id))
                     .filter_map(|r| r.ok().flatten())
                     .collect::<Vec<_>>()
             } else {
                 current_state_ids
-                    .difference(&since_state_ids)
-                    .filter(|id| {
+                    .iter()
+                    .filter(|(key, id)| since_state_ids.get(key) != Some(id))
+                    .filter(|(_, id)| {
                         !timeline_pdus
                             .iter()
                             .any(|(_, timeline_pdu)| timeline_pdu.event_id == **id)
                     })
-                    .map(|id| db.rooms.get_pdu(id))
+                    .map(|(_, id)| db.rooms.get_pdu(id))
                     .filter_map(|r| r.ok().flatten())
                     .collect()
             };
diff --git a/src/database.rs b/src/database.rs
index 3d1324e6876aaa83127d2dae35b1e4ef626f6b75..a6ac67f8a8ceb52e70ad536740652ef396c4fefd 100644
--- a/src/database.rs
+++ b/src/database.rs
@@ -262,8 +262,8 @@ pub async fn load_or_create(config: &Config) -> Result<Arc<TokioRwLock<Self>>> {
                 userroomid_highlightcount: builder.open_tree("userroomid_highlightcount")?,
 
                 statekey_shortstatekey: builder.open_tree("statekey_shortstatekey")?,
+                shortstatekey_statekey: builder.open_tree("shortstatekey_statekey")?,
 
-                shortroomid_roomid: builder.open_tree("shortroomid_roomid")?,
                 roomid_shortroomid: builder.open_tree("roomid_shortroomid")?,
 
                 shortstatehash_statediff: builder.open_tree("shortstatehash_statediff")?,
@@ -279,8 +279,9 @@ pub async fn load_or_create(config: &Config) -> Result<Arc<TokioRwLock<Self>>> {
                 auth_chain_cache: Mutex::new(LruCache::new(100_000)),
                 shorteventid_cache: Mutex::new(LruCache::new(1_000_000)),
                 eventidshort_cache: Mutex::new(LruCache::new(1_000_000)),
+                shortstatekey_cache: Mutex::new(LruCache::new(1_000_000)),
                 statekeyshort_cache: Mutex::new(LruCache::new(1_000_000)),
-                stateinfo_cache: Mutex::new(LruCache::new(50)),
+                stateinfo_cache: Mutex::new(LruCache::new(1000)),
             },
             account_data: account_data::AccountData {
                 roomuserdataid_accountdata: builder.open_tree("roomuserdataid_accountdata")?,
@@ -579,7 +580,6 @@ pub async fn load_or_create(config: &Config) -> Result<Arc<TokioRwLock<Self>>> {
                 for (room_id, _) in db.rooms.roomid_shortstatehash.iter() {
                     let shortroomid = db.globals.next_count()?.to_be_bytes();
                     db.rooms.roomid_shortroomid.insert(&room_id, &shortroomid)?;
-                    db.rooms.shortroomid_roomid.insert(&shortroomid, &room_id)?;
                     println!("Migration: 8");
                 }
                 // Update pduids db layout
@@ -700,6 +700,19 @@ pub async fn load_or_create(config: &Config) -> Result<Arc<TokioRwLock<Self>>> {
 
                 println!("Migration: 8 -> 9 finished");
             }
+
+            if db.globals.database_version()? < 10 {
+                // Add other direction for shortstatekeys
+                for (statekey, shortstatekey) in db.rooms.statekey_shortstatekey.iter() {
+                    db.rooms
+                        .shortstatekey_statekey
+                        .insert(&shortstatekey, &statekey)?;
+                }
+
+                db.globals.bump_database_version(10)?;
+
+                println!("Migration: 9 -> 10 finished");
+            }
         }
 
         let guard = db.read().await;
diff --git a/src/database/rooms.rs b/src/database/rooms.rs
index 9e57c40e5569ed92f40c1e3fd0eeb465e252f0e9..8bb32fead24c7aa7158a3e65de6c3daf3acedea3 100644
--- a/src/database/rooms.rs
+++ b/src/database/rooms.rs
@@ -23,13 +23,13 @@
     uint, EventId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId,
 };
 use std::{
-    collections::{BTreeMap, BTreeSet, HashMap, HashSet},
+    collections::{BTreeMap, HashMap, HashSet},
     convert::{TryFrom, TryInto},
     mem::size_of,
     sync::{Arc, Mutex},
 };
 use tokio::sync::MutexGuard;
-use tracing::{debug, error, warn};
+use tracing::{error, warn};
 
 use super::{abstraction::Tree, admin::AdminCommand, pusher};
 
@@ -73,8 +73,8 @@ pub struct Rooms {
     pub(super) shorteventid_shortstatehash: Arc<dyn Tree>,
     /// StateKey = EventType + StateKey, ShortStateKey = Count
     pub(super) statekey_shortstatekey: Arc<dyn Tree>,
+    pub(super) shortstatekey_statekey: Arc<dyn Tree>,
 
-    pub(super) shortroomid_roomid: Arc<dyn Tree>,
     pub(super) roomid_shortroomid: Arc<dyn Tree>,
 
     pub(super) shorteventid_eventid: Arc<dyn Tree>,
@@ -95,6 +95,7 @@ pub struct Rooms {
     pub(super) shorteventid_cache: Mutex<LruCache<u64, EventId>>,
     pub(super) eventidshort_cache: Mutex<LruCache<EventId, u64>>,
     pub(super) statekeyshort_cache: Mutex<LruCache<(EventType, String), u64>>,
+    pub(super) shortstatekey_cache: Mutex<LruCache<u64, (EventType, String)>>,
     pub(super) stateinfo_cache: Mutex<
         LruCache<
             u64,
@@ -112,7 +113,7 @@ impl Rooms {
     /// Builds a StateMap by iterating over all keys that start
     /// with state_hash, this gives the full state for the given state_hash.
     #[tracing::instrument(skip(self))]
-    pub fn state_full_ids(&self, shortstatehash: u64) -> Result<BTreeSet<EventId>> {
+    pub fn state_full_ids(&self, shortstatehash: u64) -> Result<BTreeMap<u64, EventId>> {
         let full_state = self
             .load_shortstatehash_info(shortstatehash)?
             .pop()
@@ -138,7 +139,7 @@ pub fn state_full(
             .into_iter()
             .map(|compressed| self.parse_compressed_state_event(compressed))
             .filter_map(|r| r.ok())
-            .map(|eventid| self.get_pdu(&eventid))
+            .map(|(_, eventid)| self.get_pdu(&eventid))
             .filter_map(|r| r.ok().flatten())
             .map(|pdu| {
                 Ok::<_, Error>((
@@ -176,7 +177,11 @@ pub fn state_get_id(
         Ok(full_state
             .into_iter()
             .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes()))
-            .and_then(|compressed| self.parse_compressed_state_event(compressed).ok()))
+            .and_then(|compressed| {
+                self.parse_compressed_state_event(compressed)
+                    .ok()
+                    .map(|(_, id)| id)
+            }))
     }
 
     /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`).
@@ -232,6 +237,13 @@ pub fn get_auth_events(
         state_key: Option<&str>,
         content: &serde_json::Value,
     ) -> Result<StateMap<Arc<PduEvent>>> {
+        let shortstatehash =
+            if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? {
+                current_shortstatehash
+            } else {
+                return Ok(HashMap::new());
+            };
+
         let auth_events = state_res::auth_types_for_event(
             kind,
             sender,
@@ -239,19 +251,30 @@ pub fn get_auth_events(
             content.clone(),
         );
 
-        let mut events = StateMap::new();
-        for (event_type, state_key) in auth_events {
-            if let Some(pdu) = self.room_state_get(room_id, &event_type, &state_key)? {
-                events.insert((event_type, state_key), pdu);
-            } else {
-                // This is okay because when creating a new room some events were not created yet
-                debug!(
-                    "{:?}: Could not find {} {:?} in state",
-                    content, event_type, state_key
-                );
-            }
-        }
-        Ok(events)
+        let mut sauthevents = auth_events
+            .into_iter()
+            .filter_map(|(event_type, state_key)| {
+                self.get_shortstatekey(&event_type, &state_key)
+                    .ok()
+                    .flatten()
+                    .map(|s| (s, (event_type, state_key)))
+            })
+            .collect::<HashMap<_, _>>();
+
+        let full_state = self
+            .load_shortstatehash_info(shortstatehash)?
+            .pop()
+            .expect("there is always one layer")
+            .1;
+
+        Ok(full_state
+            .into_iter()
+            .filter_map(|compressed| self.parse_compressed_state_event(compressed).ok())
+            .filter_map(|(shortstatekey, event_id)| {
+                sauthevents.remove(&shortstatekey).map(|k| (k, event_id))
+            })
+            .filter_map(|(k, event_id)| self.get_pdu(&event_id).ok().flatten().map(|pdu| (k, pdu)))
+            .collect())
     }
 
     /// Generate a new StateHash.
@@ -306,32 +329,19 @@ pub fn first_pdu_in_room(&self, room_id: &RoomId) -> Result<Option<Arc<PduEvent>
     /// Force the creation of a new StateHash and insert it into the db.
     ///
     /// Whatever `state` is supplied to `force_state` becomes the new current room state snapshot.
-    #[tracing::instrument(skip(self, new_state, db))]
+    #[tracing::instrument(skip(self, new_state_ids_compressed, db))]
     pub fn force_state(
         &self,
         room_id: &RoomId,
-        new_state: HashMap<(EventType, String), EventId>,
+        new_state_ids_compressed: HashSet<CompressedStateEvent>,
         db: &Database,
     ) -> Result<()> {
         let previous_shortstatehash = self.current_shortstatehash(&room_id)?;
 
-        let new_state_ids_compressed = new_state
-            .iter()
-            .filter_map(|((event_type, state_key), event_id)| {
-                let shortstatekey = self
-                    .get_or_create_shortstatekey(event_type, state_key, &db.globals)
-                    .ok()?;
-                Some(
-                    self.compress_state_event(shortstatekey, event_id, &db.globals)
-                        .ok()?,
-                )
-            })
-            .collect::<HashSet<_>>();
-
         let state_hash = self.calculate_hash(
-            &new_state
-                .values()
-                .map(|event_id| event_id.as_bytes())
+            &new_state_ids_compressed
+                .iter()
+                .map(|bytes| &bytes[..])
                 .collect::<Vec<_>>(),
         );
 
@@ -373,10 +383,11 @@ pub fn force_state(
             )?;
         };
 
-        for event_id in statediffnew
-            .into_iter()
-            .filter_map(|new| self.parse_compressed_state_event(new).ok())
-        {
+        for event_id in statediffnew.into_iter().filter_map(|new| {
+            self.parse_compressed_state_event(new)
+                .ok()
+                .map(|(_, id)| id)
+        }) {
             if let Some(pdu) = self.get_pdu_json(&event_id)? {
                 if pdu.get("type").and_then(|val| val.as_str()) == Some("m.room.member") {
                     if let Ok(pdu) = serde_json::from_value::<PduEvent>(
@@ -504,15 +515,20 @@ pub fn compress_state_event(
         Ok(v.try_into().expect("we checked the size above"))
     }
 
+    /// Returns shortstatekey, event id
     #[tracing::instrument(skip(self, compressed_event))]
     pub fn parse_compressed_state_event(
         &self,
         compressed_event: CompressedStateEvent,
-    ) -> Result<EventId> {
-        self.get_eventid_from_short(
-            utils::u64_from_bytes(&compressed_event[size_of::<u64>()..])
+    ) -> Result<(u64, EventId)> {
+        Ok((
+            utils::u64_from_bytes(&compressed_event[0..size_of::<u64>()])
                 .expect("bytes have right length"),
-        )
+            self.get_eventid_from_short(
+                utils::u64_from_bytes(&compressed_event[size_of::<u64>()..])
+                    .expect("bytes have right length"),
+            )?,
+        ))
     }
 
     /// Creates a new shortstatehash that often is just a diff to an already existing
@@ -805,6 +821,8 @@ pub fn get_or_create_shortstatekey(
                 let shortstatekey = globals.next_count()?;
                 self.statekey_shortstatekey
                     .insert(&statekey, &shortstatekey.to_be_bytes())?;
+                self.shortstatekey_statekey
+                    .insert(&shortstatekey.to_be_bytes(), &statekey)?;
                 shortstatekey
             }
         };
@@ -833,11 +851,10 @@ pub fn get_eventid_from_short(&self, shorteventid: u64) -> Result<EventId> {
             .get(&shorteventid.to_be_bytes())?
             .ok_or_else(|| Error::bad_database("Shorteventid does not exist"))?;
 
-        let event_id =
-            EventId::try_from(utils::string_from_bytes(&bytes).map_err(|_| {
-                Error::bad_database("EventID in roomid_pduleaves is invalid unicode.")
-            })?)
-            .map_err(|_| Error::bad_database("EventId in roomid_pduleaves is invalid."))?;
+        let event_id = EventId::try_from(utils::string_from_bytes(&bytes).map_err(|_| {
+            Error::bad_database("EventID in shorteventid_eventid is invalid unicode.")
+        })?)
+        .map_err(|_| Error::bad_database("EventId in shorteventid_eventid is invalid."))?;
 
         self.shorteventid_cache
             .lock()
@@ -847,6 +864,48 @@ pub fn get_eventid_from_short(&self, shorteventid: u64) -> Result<EventId> {
         Ok(event_id)
     }
 
+    #[tracing::instrument(skip(self))]
+    pub fn get_statekey_from_short(&self, shortstatekey: u64) -> Result<(EventType, String)> {
+        if let Some(id) = self
+            .shortstatekey_cache
+            .lock()
+            .unwrap()
+            .get_mut(&shortstatekey)
+        {
+            return Ok(id.clone());
+        }
+
+        let bytes = self
+            .shortstatekey_statekey
+            .get(&shortstatekey.to_be_bytes())?
+            .ok_or_else(|| Error::bad_database("Shortstatekey does not exist"))?;
+
+        let mut parts = bytes.splitn(2, |&b| b == 0xff);
+        let eventtype_bytes = parts.next().expect("split always returns one entry");
+        let statekey_bytes = parts
+            .next()
+            .ok_or_else(|| Error::bad_database("Invalid statekey in shortstatekey_statekey."))?;
+
+        let event_type =
+            EventType::try_from(utils::string_from_bytes(&eventtype_bytes).map_err(|_| {
+                Error::bad_database("Event type in shortstatekey_statekey is invalid unicode.")
+            })?)
+            .map_err(|_| Error::bad_database("Event type in shortstatekey_statekey is invalid."))?;
+
+        let state_key = utils::string_from_bytes(&statekey_bytes).map_err(|_| {
+            Error::bad_database("Statekey in shortstatekey_statekey is invalid unicode.")
+        })?;
+
+        let result = (event_type, state_key);
+
+        self.shortstatekey_cache
+            .lock()
+            .unwrap()
+            .insert(shortstatekey, result.clone());
+
+        Ok(result)
+    }
+
     /// Returns the full room state.
     #[tracing::instrument(skip(self))]
     pub fn room_state_full(
@@ -1106,6 +1165,17 @@ pub fn get_pdu_leaves(&self, room_id: &RoomId) -> Result<HashSet<EventId>> {
             .collect()
     }
 
+    #[tracing::instrument(skip(self, room_id, event_ids))]
+    pub fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[EventId]) -> Result<()> {
+        for prev in event_ids {
+            let mut key = room_id.as_bytes().to_vec();
+            key.extend_from_slice(prev.as_bytes());
+            self.referencedevents.insert(&key, &[])?;
+        }
+
+        Ok(())
+    }
+
     /// Replace the leaves of a room.
     ///
     /// The provided `event_ids` become the new leaves, this allows a room to have multiple
@@ -1202,12 +1272,7 @@ pub fn append_pdu(
         }
 
         // We must keep track of all events that have been referenced.
-        for prev in &pdu.prev_events {
-            let mut key = pdu.room_id().as_bytes().to_vec();
-            key.extend_from_slice(prev.as_bytes());
-            self.referencedevents.insert(&key, &[])?;
-        }
-
+        self.mark_as_referenced(&pdu.room_id, &pdu.prev_events)?;
         self.replace_pdu_leaves(&pdu.room_id, leaves)?;
 
         let mutex_insert = Arc::clone(
@@ -1565,35 +1630,22 @@ pub fn highlight_count(&self, user_id: &UserId, room_id: &RoomId) -> Result<u64>
     ///
     /// This adds all current state events (not including the incoming event)
     /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`.
-    #[tracing::instrument(skip(self, state, globals))]
+    #[tracing::instrument(skip(self, state_ids_compressed, globals))]
     pub fn set_event_state(
         &self,
         event_id: &EventId,
         room_id: &RoomId,
-        state: &StateMap<Arc<PduEvent>>,
+        state_ids_compressed: HashSet<CompressedStateEvent>,
         globals: &super::globals::Globals,
     ) -> Result<()> {
         let shorteventid = self.get_or_create_shorteventid(&event_id, globals)?;
 
         let previous_shortstatehash = self.current_shortstatehash(&room_id)?;
 
-        let state_ids_compressed = state
-            .iter()
-            .filter_map(|((event_type, state_key), pdu)| {
-                let shortstatekey = self
-                    .get_or_create_shortstatekey(event_type, state_key, globals)
-                    .ok()?;
-                Some(
-                    self.compress_state_event(shortstatekey, &pdu.event_id, globals)
-                        .ok()?,
-                )
-            })
-            .collect::<HashSet<_>>();
-
         let state_hash = self.calculate_hash(
-            &state
-                .values()
-                .map(|pdu| pdu.event_id.as_bytes())
+            &state_ids_compressed
+                .iter()
+                .map(|s| &s[..])
                 .collect::<Vec<_>>(),
         );
 
@@ -1857,8 +1909,8 @@ pub fn build_and_append_pdu(
             &room_version,
             &Arc::new(pdu.clone()),
             create_prev_event,
-            &auth_events,
             None, // TODO: third_party_invite
+            |k, s| auth_events.get(&(k.clone(), s.to_owned())).map(Arc::clone),
         )
         .map_err(|e| {
             error!("{:?}", e);
diff --git a/src/database/sending.rs b/src/database/sending.rs
index 7d7a44aad6c7d8e5bb4ab8db189d7d5752e95fe5..31a1f67e2e6fe1db86a7822d17a4edcdad52fd6f 100644
--- a/src/database/sending.rs
+++ b/src/database/sending.rs
@@ -1,5 +1,5 @@
 use std::{
-    collections::{BTreeMap, HashMap},
+    collections::{BTreeMap, HashMap, HashSet},
     convert::{TryFrom, TryInto},
     fmt::Debug,
     sync::Arc,
@@ -20,14 +20,17 @@
         appservice,
         federation::{
             self,
-            transactions::edu::{Edu, ReceiptContent, ReceiptData, ReceiptMap},
+            transactions::edu::{
+                DeviceListUpdateContent, Edu, ReceiptContent, ReceiptData, ReceiptMap,
+            },
         },
         OutgoingRequest,
     },
+    device_id,
     events::{push_rules, AnySyncEphemeralRoomEvent, EventType},
     push,
     receipt::ReceiptType,
-    MilliSecondsSinceUnixEpoch, ServerName, UInt, UserId,
+    uint, MilliSecondsSinceUnixEpoch, ServerName, UInt, UserId,
 };
 use tokio::{
     select,
@@ -317,8 +320,19 @@ pub fn select_edus(db: &Database, server: &ServerName) -> Result<(Vec<Vec<u8>>,
             })?;
         let mut events = Vec::new();
         let mut max_edu_count = since;
+        let mut device_list_changes = HashSet::new();
+
         'outer: for room_id in db.rooms.server_rooms(server) {
             let room_id = room_id?;
+            // Look for device list updates in this room
+            device_list_changes.extend(
+                db.users
+                    .keys_changed(&room_id.to_string(), since, None)
+                    .filter_map(|r| r.ok())
+                    .filter(|user_id| user_id.server_name() == db.globals.server_name()),
+            );
+
+            // Look for read receipts in this room
             for r in db.rooms.edus.readreceipts_since(&room_id, since) {
                 let (user_id, count, read_receipt) = r?;
 
@@ -378,6 +392,22 @@ pub fn select_edus(db: &Database, server: &ServerName) -> Result<(Vec<Vec<u8>>,
             }
         }
 
+        for user_id in device_list_changes {
+            // Empty prev id forces synapse to resync: https://github.com/matrix-org/synapse/blob/98aec1cc9da2bd6b8e34ffb282c85abf9b8b42ca/synapse/handlers/device.py#L767
+            // Because synapse resyncs, we can just insert dummy data
+            let edu = Edu::DeviceListUpdate(DeviceListUpdateContent {
+                user_id,
+                device_id: device_id!("dummy"),
+                device_display_name: "Dummy".to_owned(),
+                stream_id: uint!(1),
+                prev_id: Vec::new(),
+                deleted: None,
+                keys: None,
+            });
+
+            events.push(serde_json::to_vec(&edu).expect("json can be serialized"));
+        }
+
         Ok((events, max_edu_count))
     }
 
diff --git a/src/database/users.rs b/src/database/users.rs
index f501ec30327adfe468ec53ba9f5948916d9cedb7..88d66bea60813fd45f20ca2f3dd39c931709c16e 100644
--- a/src/database/users.rs
+++ b/src/database/users.rs
@@ -673,7 +673,7 @@ pub fn keys_changed<'a>(
     }
 
     #[tracing::instrument(skip(self, user_id, rooms, globals))]
-    fn mark_device_key_update(
+    pub fn mark_device_key_update(
         &self,
         user_id: &UserId,
         rooms: &super::rooms::Rooms,
diff --git a/src/server_server.rs b/src/server_server.rs
index 6b6ed2812e0118bc7e63cec00587f3fcbbeed1c5..e8ea48621856eb83f20537f230d111f661b630e6 100644
--- a/src/server_server.rs
+++ b/src/server_server.rs
@@ -1,6 +1,6 @@
 use crate::{
     client_server::{self, claim_keys_helper, get_keys_helper},
-    database::DatabaseGuard,
+    database::{rooms::CompressedStateEvent, DatabaseGuard},
     utils, ConduitResult, Database, Error, PduEvent, Result, Ruma,
 };
 use get_profile_information::v1::ProfileField;
@@ -27,7 +27,7 @@
             },
             query::{get_profile_information, get_room_information},
             transactions::{
-                edu::{DirectDeviceContent, Edu},
+                edu::{DeviceListUpdateContent, DirectDeviceContent, Edu},
                 send_transaction_message,
             },
         },
@@ -51,7 +51,7 @@
     ServerSigningKeyId, UserId,
 };
 use std::{
-    collections::{hash_map::Entry, BTreeMap, HashMap, HashSet},
+    collections::{btree_map, hash_map, BTreeMap, HashMap, HashSet},
     convert::{TryFrom, TryInto},
     fmt::Debug,
     future::Future,
@@ -747,8 +747,9 @@ pub async fn send_transaction_message_route(
                         .typing_remove(&typing.user_id, &typing.room_id, &db.globals)?;
                 }
             }
-            Edu::DeviceListUpdate(_) => {
-                // TODO: Instead of worrying about stream ids we can just fetch all devices again
+            Edu::DeviceListUpdate(DeviceListUpdateContent { user_id, .. }) => {
+                db.users
+                    .mark_device_key_update(&user_id, &db.rooms, &db.globals)?;
             }
             Edu::DirectToDevice(DirectDeviceContent {
                 sender,
@@ -1079,7 +1080,7 @@ fn handle_outlier_pdu<'a>(
         // 4. fetch any missing auth events doing all checks listed here starting at 1. These are not timeline events
         // 5. Reject "due to auth events" if can't get all the auth events or some of the auth events are also rejected "due to auth events"
         // EDIT: Step 5 is not applied anymore because it failed too often
-        debug!("Fetching auth events for {}", incoming_pdu.event_id);
+        warn!("Fetching auth events for {}", incoming_pdu.event_id);
         fetch_and_handle_outliers(
             db,
             origin,
@@ -1114,10 +1115,10 @@ fn handle_outlier_pdu<'a>(
                     .clone()
                     .expect("all auth events have state keys"),
             )) {
-                Entry::Vacant(v) => {
+                hash_map::Entry::Vacant(v) => {
                     v.insert(auth_event.clone());
                 }
-                Entry::Occupied(_) => {
+                hash_map::Entry::Occupied(_) => {
                     return Err(
                         "Auth event's type and state_key combination exists multiple times."
                             .to_owned(),
@@ -1153,8 +1154,8 @@ fn handle_outlier_pdu<'a>(
             &room_version,
             &incoming_pdu,
             previous_create.clone(),
-            &auth_events,
             None, // TODO: third party invite
+            |k, s| auth_events.get(&(k.clone(), s.to_owned())).map(Arc::clone),
         )
         .map_err(|_e| "Auth check failed".to_string())?
         {
@@ -1205,38 +1206,21 @@ async fn upgrade_outlier_to_timeline_pdu(
         let state =
             prev_event_sstatehash.map(|shortstatehash| db.rooms.state_full_ids(shortstatehash));
 
-        if let Some(Ok(state)) = state {
+        if let Some(Ok(mut state)) = state {
             warn!("Using cached state");
-            let mut state = fetch_and_handle_outliers(
-                db,
-                origin,
-                &state.into_iter().collect::<Vec<_>>(),
-                &create_event,
-                &room_id,
-                pub_key_map,
-            )
-            .await
-            .into_iter()
-            .map(|(pdu, _)| {
-                (
-                    (
-                        pdu.kind.clone(),
-                        pdu.state_key
-                            .clone()
-                            .expect("events from state_full_ids are state events"),
-                    ),
-                    pdu,
-                )
-            })
-            .collect::<HashMap<_, _>>();
-
             let prev_pdu =
                 db.rooms.get_pdu(prev_event).ok().flatten().ok_or_else(|| {
                     "Could not find prev event, but we know the state.".to_owned()
                 })?;
 
             if let Some(state_key) = &prev_pdu.state_key {
-                state.insert((prev_pdu.kind.clone(), state_key.clone()), prev_pdu);
+                let shortstatekey = db
+                    .rooms
+                    .get_or_create_shortstatekey(&prev_pdu.kind, state_key, &db.globals)
+                    .map_err(|_| "Failed to create shortstatekey.".to_owned())?;
+
+                state.insert(shortstatekey, prev_event.clone());
+                // Now it's the state after the pdu
             }
 
             state_at_incoming_event = Some(state);
@@ -1261,7 +1245,7 @@ async fn upgrade_outlier_to_timeline_pdu(
             .await
         {
             Ok(res) => {
-                debug!("Fetching state events at event.");
+                warn!("Fetching state events at event.");
                 let state_vec = fetch_and_handle_outliers(
                     &db,
                     origin,
@@ -1272,18 +1256,23 @@ async fn upgrade_outlier_to_timeline_pdu(
                 )
                 .await;
 
-                let mut state = HashMap::new();
+                let mut state = BTreeMap::new();
                 for (pdu, _) in state_vec {
-                    match state.entry((
-                        pdu.kind.clone(),
-                        pdu.state_key
-                            .clone()
-                            .ok_or_else(|| "Found non-state pdu in state events.".to_owned())?,
-                    )) {
-                        Entry::Vacant(v) => {
-                            v.insert(pdu);
+                    let state_key = pdu
+                        .state_key
+                        .clone()
+                        .ok_or_else(|| "Found non-state pdu in state events.".to_owned())?;
+
+                    let shortstatekey = db
+                        .rooms
+                        .get_or_create_shortstatekey(&pdu.kind, &state_key, &db.globals)
+                        .map_err(|_| "Failed to create shortstatekey.".to_owned())?;
+
+                    match state.entry(shortstatekey) {
+                        btree_map::Entry::Vacant(v) => {
+                            v.insert(pdu.event_id.clone());
                         }
-                        Entry::Occupied(_) => return Err(
+                        btree_map::Entry::Occupied(_) => return Err(
                             "State event's type and state_key combination exists multiple times."
                                 .to_owned(),
                         ),
@@ -1291,28 +1280,20 @@ async fn upgrade_outlier_to_timeline_pdu(
                 }
 
                 // The original create event must still be in the state
-                if state
-                    .get(&(EventType::RoomCreate, "".to_owned()))
-                    .map(|a| a.as_ref())
-                    != Some(&create_event)
-                {
+                let create_shortstatekey = db
+                    .rooms
+                    .get_shortstatekey(&EventType::RoomCreate, "")
+                    .map_err(|_| "Failed to talk to db.")?
+                    .expect("Room exists");
+
+                if state.get(&create_shortstatekey) != Some(&create_event.event_id) {
                     return Err("Incoming event refers to wrong create event.".to_owned());
                 }
 
-                debug!("Fetching auth chain events at event.");
-                fetch_and_handle_outliers(
-                    &db,
-                    origin,
-                    &res.auth_chain_ids,
-                    &create_event,
-                    &room_id,
-                    pub_key_map,
-                )
-                .await;
-
                 state_at_incoming_event = Some(state);
             }
-            Err(_) => {
+            Err(e) => {
+                warn!("Fetching state for event failed: {}", e);
                 return Err("Fetching state for event failed".into());
             }
         };
@@ -1350,8 +1331,15 @@ async fn upgrade_outlier_to_timeline_pdu(
         &room_version,
         &incoming_pdu,
         previous_create.clone(),
-        &state_at_incoming_event,
         None, // TODO: third party invite
+        |k, s| {
+            db.rooms
+                .get_shortstatekey(&k, &s)
+                .ok()
+                .flatten()
+                .and_then(|shortstatekey| state_at_incoming_event.get(&shortstatekey))
+                .and_then(|event_id| db.rooms.get_pdu(&event_id).ok().flatten())
+        },
     )
     .map_err(|_e| "Auth check failed.".to_owned())?
     {
@@ -1388,28 +1376,28 @@ async fn upgrade_outlier_to_timeline_pdu(
     // Only keep those extremities were not referenced yet
     extremities.retain(|id| !matches!(db.rooms.is_event_referenced(&room_id, id), Ok(true)));
 
-    let current_statehash = db
+    let current_sstatehash = db
         .rooms
         .current_shortstatehash(&room_id)
         .map_err(|_| "Failed to load current state hash.".to_owned())?
         .expect("every room has state");
 
-    let current_state = db
+    let current_state_ids = db
         .rooms
-        .state_full(current_statehash)
+        .state_full_ids(current_sstatehash)
         .map_err(|_| "Failed to load room state.")?;
 
     if incoming_pdu.state_key.is_some() {
-        let mut extremity_statehashes = Vec::new();
+        let mut extremity_sstatehashes = HashMap::new();
 
-        for id in &extremities {
+        for id in dbg!(&extremities) {
             match db
                 .rooms
                 .get_pdu(&id)
                 .map_err(|_| "Failed to ask db for pdu.".to_owned())?
             {
                 Some(leaf_pdu) => {
-                    extremity_statehashes.push((
+                    extremity_sstatehashes.insert(
                         db.rooms
                             .pdu_shortstatehash(&leaf_pdu.event_id)
                             .map_err(|_| "Failed to ask db for pdu state hash.".to_owned())?
@@ -1420,8 +1408,8 @@ async fn upgrade_outlier_to_timeline_pdu(
                                 );
                                 "Found pdu with no statehash in db.".to_owned()
                             })?,
-                        Some(leaf_pdu),
-                    ));
+                        leaf_pdu,
+                    );
                 }
                 _ => {
                     error!("Missing state snapshot for {:?}", id);
@@ -1430,27 +1418,30 @@ async fn upgrade_outlier_to_timeline_pdu(
             }
         }
 
+        let mut fork_states = Vec::new();
+
         // 12. Ensure that the state is derived from the previous current state (i.e. we calculated
         //     by doing state res where one of the inputs was a previously trusted set of state,
         //     don't just trust a set of state we got from a remote).
 
         // We do this by adding the current state to the list of fork states
+        extremity_sstatehashes.remove(&current_sstatehash);
+        fork_states.push(current_state_ids);
+        dbg!(&extremity_sstatehashes);
 
-        extremity_statehashes.push((current_statehash.clone(), None));
-
-        let mut fork_states = Vec::new();
-        for (statehash, leaf_pdu) in extremity_statehashes {
+        for (sstatehash, leaf_pdu) in extremity_sstatehashes {
             let mut leaf_state = db
                 .rooms
-                .state_full(statehash)
+                .state_full_ids(sstatehash)
                 .map_err(|_| "Failed to ask db for room state.".to_owned())?;
 
-            if let Some(leaf_pdu) = leaf_pdu {
-                if let Some(state_key) = &leaf_pdu.state_key {
-                    // Now it's the state after
-                    let key = (leaf_pdu.kind.clone(), state_key.clone());
-                    leaf_state.insert(key, leaf_pdu);
-                }
+            if let Some(state_key) = &leaf_pdu.state_key {
+                let shortstatekey = db
+                    .rooms
+                    .get_or_create_shortstatekey(&leaf_pdu.kind, state_key, &db.globals)
+                    .map_err(|_| "Failed to create shortstatekey.".to_owned())?;
+                leaf_state.insert(shortstatekey, leaf_pdu.event_id.clone());
+                // Now it's the state after the pdu
             }
 
             fork_states.push(leaf_state);
@@ -1459,10 +1450,12 @@ async fn upgrade_outlier_to_timeline_pdu(
         // We also add state after incoming event to the fork states
         let mut state_after = state_at_incoming_event.clone();
         if let Some(state_key) = &incoming_pdu.state_key {
-            state_after.insert(
-                (incoming_pdu.kind.clone(), state_key.clone()),
-                incoming_pdu.clone(),
-            );
+            let shortstatekey = db
+                .rooms
+                .get_or_create_shortstatekey(&incoming_pdu.kind, state_key, &db.globals)
+                .map_err(|_| "Failed to create shortstatekey.".to_owned())?;
+
+            state_after.insert(shortstatekey, incoming_pdu.event_id.clone());
         }
         fork_states.push(state_after.clone());
 
@@ -1475,8 +1468,12 @@ async fn upgrade_outlier_to_timeline_pdu(
             // always included)
             fork_states[0]
                 .iter()
-                .map(|(k, pdu)| (k.clone(), pdu.event_id.clone()))
-                .collect()
+                .map(|(k, id)| {
+                    db.rooms
+                        .compress_state_event(*k, &id, &db.globals)
+                        .map_err(|_| "Failed to compress_state_event.".to_owned())
+                })
+                .collect::<StdResult<_, String>>()?
         } else {
             // We do need to force an update to this room's state
             update_state = true;
@@ -1485,10 +1482,11 @@ async fn upgrade_outlier_to_timeline_pdu(
                 .into_iter()
                 .map(|map| {
                     map.into_iter()
-                        .map(|(k, v)| (k, v.event_id.clone()))
-                        .collect::<StateMap<_>>()
+                        .map(|(k, id)| (db.rooms.get_statekey_from_short(k).map(|k| (k, id))))
+                        .collect::<Result<StateMap<_>>>()
                 })
-                .collect::<Vec<_>>();
+                .collect::<Result<Vec<_>>>()
+                .map_err(|_| "Failed to get_statekey_from_short.".to_owned())?;
 
             let mut auth_chain_sets = Vec::new();
             for state in fork_states {
@@ -1519,6 +1517,17 @@ async fn upgrade_outlier_to_timeline_pdu(
             };
 
             state
+                .into_iter()
+                .map(|((event_type, state_key), event_id)| {
+                    let shortstatekey = db
+                        .rooms
+                        .get_or_create_shortstatekey(&event_type, &state_key, &db.globals)
+                        .map_err(|_| "Failed to get_or_create_shortstatekey".to_owned())?;
+                    db.rooms
+                        .compress_state_event(shortstatekey, &event_id, &db.globals)
+                        .map_err(|_| "Failed to compress state event".to_owned())
+                })
+                .collect::<StdResult<_, String>>()?
         };
 
         // Set the new room state to the resolved state
@@ -1534,38 +1543,55 @@ async fn upgrade_outlier_to_timeline_pdu(
 
     debug!("starting soft fail auth check");
     // 13. Check if the event passes auth based on the "current state" of the room, if not "soft fail" it
+    let auth_events = db
+        .rooms
+        .get_auth_events(
+            &room_id,
+            &incoming_pdu.kind,
+            &incoming_pdu.sender,
+            incoming_pdu.state_key.as_deref(),
+            &incoming_pdu.content,
+        )
+        .map_err(|_| "Failed to get_auth_events.".to_owned())?;
+
     let soft_fail = !state_res::event_auth::auth_check(
         &room_version,
         &incoming_pdu,
         previous_create,
-        &current_state,
         None,
+        |k, s| auth_events.get(&(k.clone(), s.to_owned())).map(Arc::clone),
     )
     .map_err(|_e| "Auth check failed.".to_owned())?;
 
-    let mut pdu_id = None;
-    if !soft_fail {
-        // Now that the event has passed all auth it is added into the timeline.
-        // We use the `state_at_event` instead of `state_after` so we accurately
-        // represent the state for this event.
-        pdu_id = Some(
-            append_incoming_pdu(
-                &db,
-                &incoming_pdu,
-                val,
-                extremities,
-                &state_at_incoming_event,
-                &state_lock,
-            )
-            .map_err(|_| "Failed to add pdu to db.".to_owned())?,
-        );
-        debug!("Appended incoming pdu.");
-    } else {
-        warn!("Event was soft failed: {:?}", incoming_pdu);
-    }
+    // Now that the event has passed all auth it is added into the timeline.
+    // We use the `state_at_event` instead of `state_after` so we accurately
+    // represent the state for this event.
+
+    let state_ids_compressed = state_at_incoming_event
+        .iter()
+        .map(|(shortstatekey, id)| {
+            db.rooms
+                .compress_state_event(*shortstatekey, &id, &db.globals)
+                .map_err(|_| "Failed to compress_state_event".to_owned())
+        })
+        .collect::<StdResult<_, String>>()?;
+
+    let pdu_id = append_incoming_pdu(
+        &db,
+        &incoming_pdu,
+        val,
+        extremities,
+        state_ids_compressed,
+        soft_fail,
+        &state_lock,
+    )
+    .map_err(|_| "Failed to add pdu to db.".to_owned())?;
+
+    debug!("Appended incoming pdu.");
 
     if soft_fail {
-        // Soft fail, we leave the event as an outlier but don't add it to the timeline
+        // Soft fail, we keep the event as an outlier but don't add it to the timeline
+        warn!("Event was soft failed: {:?}", incoming_pdu);
         return Err("Event has been soft failed".into());
     }
 
@@ -1594,15 +1620,14 @@ pub(crate) fn fetch_and_handle_outliers<'a>(
 ) -> AsyncRecursiveType<'a, Vec<(Arc<PduEvent>, Option<BTreeMap<String, CanonicalJsonValue>>)>> {
     Box::pin(async move {
         let back_off = |id| match db.globals.bad_event_ratelimiter.write().unwrap().entry(id) {
-            Entry::Vacant(e) => {
+            hash_map::Entry::Vacant(e) => {
                 e.insert((Instant::now(), 1));
             }
-            Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1),
+            hash_map::Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1),
         };
 
         let mut pdus = vec![];
         for id in events {
-            info!("loading {}", id);
             if let Some((time, tries)) = db.globals.bad_event_ratelimiter.read().unwrap().get(&id) {
                 // Exponential backoff
                 let mut min_elapsed_duration = Duration::from_secs(5 * 60) * (*tries) * (*tries);
@@ -1627,7 +1652,7 @@ pub(crate) fn fetch_and_handle_outliers<'a>(
                 }
                 Ok(None) => {
                     // c. Ask origin server over federation
-                    info!("Fetching {} over federation.", id);
+                    warn!("Fetching {} over federation.", id);
                     match db
                         .sending
                         .send_federation_request(
@@ -1638,7 +1663,7 @@ pub(crate) fn fetch_and_handle_outliers<'a>(
                         .await
                     {
                         Ok(res) => {
-                            info!("Got {} over federation", id);
+                            warn!("Got {} over federation", id);
                             let (event_id, value) =
                                 match crate::pdu::gen_event_id_canonical_json(&res.pdu) {
                                     Ok(t) => t,
@@ -1727,10 +1752,10 @@ pub(crate) async fn fetch_signing_keys(
         .unwrap()
         .entry(id)
     {
-        Entry::Vacant(e) => {
+        hash_map::Entry::Vacant(e) => {
             e.insert((Instant::now(), 1));
         }
-        Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1),
+        hash_map::Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1),
     };
 
     if let Some((time, tries)) = db
@@ -1847,19 +1872,34 @@ pub(crate) async fn fetch_signing_keys(
 
 /// Append the incoming event setting the state snapshot to the state from the
 /// server that sent the event.
-#[tracing::instrument(skip(db, pdu, pdu_json, new_room_leaves, state, _mutex_lock))]
+#[tracing::instrument(skip(db, pdu, pdu_json, new_room_leaves, state_ids_compressed, _mutex_lock))]
 fn append_incoming_pdu(
     db: &Database,
     pdu: &PduEvent,
     pdu_json: CanonicalJsonObject,
     new_room_leaves: HashSet<EventId>,
-    state: &StateMap<Arc<PduEvent>>,
+    state_ids_compressed: HashSet<CompressedStateEvent>,
+    soft_fail: bool,
     _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room mutex
-) -> Result<Vec<u8>> {
+) -> Result<Option<Vec<u8>>> {
     // We append to state before appending the pdu, so we don't have a moment in time with the
     // pdu without it's state. This is okay because append_pdu can't fail.
-    db.rooms
-        .set_event_state(&pdu.event_id, &pdu.room_id, state, &db.globals)?;
+    db.rooms.set_event_state(
+        &pdu.event_id,
+        &pdu.room_id,
+        state_ids_compressed,
+        &db.globals,
+    )?;
+
+    if soft_fail {
+        db.rooms
+            .mark_as_referenced(&pdu.room_id, &pdu.prev_events)?;
+        db.rooms.replace_pdu_leaves(
+            &pdu.room_id,
+            &new_room_leaves.into_iter().collect::<Vec<_>>(),
+        )?;
+        return Ok(None);
+    }
 
     let pdu_id = db.rooms.append_pdu(
         pdu,
@@ -1926,7 +1966,7 @@ fn append_incoming_pdu(
         }
     }
 
-    Ok(pdu_id)
+    Ok(Some(pdu_id))
 }
 
 #[tracing::instrument(skip(starting_events, db))]
@@ -2120,7 +2160,7 @@ pub fn get_room_state_route(
         .rooms
         .state_full_ids(shortstatehash)?
         .into_iter()
-        .map(|id| {
+        .map(|(_, id)| {
             PduEvent::convert_to_outgoing_federation_event(
                 db.rooms.get_pdu_json(&id).unwrap().unwrap(),
             )
@@ -2168,6 +2208,7 @@ pub fn get_room_state_ids_route(
         .rooms
         .state_full_ids(shortstatehash)?
         .into_iter()
+        .map(|(_, id)| id)
         .collect();
 
     let auth_chain_ids = get_auth_chain(vec![body.event_id.clone()], &db)?;
@@ -2314,8 +2355,8 @@ pub fn create_join_event_template_route(
         &room_version,
         &Arc::new(pdu.clone()),
         create_prev_event,
-        &auth_events,
         None, // TODO: third_party_invite
+        |k, s| auth_events.get(&(k.clone(), s.to_owned())).map(Arc::clone),
     )
     .map_err(|e| {
         error!("{:?}", e);
@@ -2418,7 +2459,7 @@ async fn create_join_event(
     drop(mutex_lock);
 
     let state_ids = db.rooms.state_full_ids(shortstatehash)?;
-    let auth_chain_ids = get_auth_chain(state_ids.iter().cloned().collect(), &db)?;
+    let auth_chain_ids = get_auth_chain(state_ids.iter().map(|(_, id)| id.clone()).collect(), &db)?;
 
     for server in db
         .rooms
@@ -2438,7 +2479,7 @@ async fn create_join_event(
             .collect(),
         state: state_ids
             .iter()
-            .filter_map(|id| db.rooms.get_pdu_json(&id).ok().flatten())
+            .filter_map(|(_, id)| db.rooms.get_pdu_json(&id).ok().flatten())
             .map(PduEvent::convert_to_outgoing_federation_event)
             .collect(),
     })
@@ -2455,10 +2496,7 @@ pub async fn create_join_event_v1_route(
 ) -> ConduitResult<create_join_event::v1::Response> {
     let room_state = create_join_event(&db, &body.room_id, &body.pdu).await?;
 
-    Ok(create_join_event::v1::Response {
-        room_state: room_state,
-    }
-    .into())
+    Ok(create_join_event::v1::Response { room_state }.into())
 }
 
 #[cfg_attr(
@@ -2472,10 +2510,7 @@ pub async fn create_join_event_v2_route(
 ) -> ConduitResult<create_join_event::v2::Response> {
     let room_state = create_join_event(&db, &body.room_id, &body.pdu).await?;
 
-    Ok(create_join_event::v2::Response {
-        room_state: room_state,
-    }
-    .into())
+    Ok(create_join_event::v2::Response { room_state }.into())
 }
 
 #[cfg_attr(