diff --git a/Cargo.lock b/Cargo.lock
index 283cdb8477e936110d9332a1990772de3e383bce..071480558bb2e6bb8f4f3576cef17101be8cbb1f 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -130,7 +130,7 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193"
 dependencies = [
  "proc-macro2",
  "quote",
- "syn",
+ "syn 2.0.66",
 ]
 
 [[package]]
@@ -141,7 +141,7 @@ checksum = "c6fa2087f2753a7da8cc1c0dbfcf89579dd57458e36769de5ac750b4671737ca"
 dependencies = [
  "proc-macro2",
  "quote",
- "syn",
+ "syn 2.0.66",
 ]
 
 [[package]]
@@ -389,7 +389,7 @@ dependencies = [
  "regex",
  "rustc-hash",
  "shlex",
- "syn",
+ "syn 2.0.66",
 ]
 
 [[package]]
@@ -510,6 +510,12 @@ version = "1.0.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
 
+[[package]]
+name = "cfg_aliases"
+version = "0.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e"
+
 [[package]]
 name = "cfg_aliases"
 version = "0.2.1"
@@ -565,7 +571,7 @@ dependencies = [
  "heck 0.5.0",
  "proc-macro2",
  "quote",
- "syn",
+ "syn 2.0.66",
 ]
 
 [[package]]
@@ -574,6 +580,15 @@ version = "0.7.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "4b82cf0babdbd58558212896d1a4272303a57bdb245c2bf1147185fb45640e70"
 
+[[package]]
+name = "clipboard-win"
+version = "5.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "79f4473f5144e20d9aceaf2972478f06ddf687831eafeeb434fbaf0acc4144ad"
+dependencies = [
+ "error-code",
+]
+
 [[package]]
 name = "color_quant"
 version = "1.1.0"
@@ -681,7 +696,7 @@ dependencies = [
  "itertools 0.13.0",
  "libloading",
  "log",
- "nix",
+ "nix 0.29.0",
  "parking_lot",
  "rand",
  "regex",
@@ -781,10 +796,12 @@ dependencies = [
  "reqwest",
  "ruma",
  "ruma-identifiers-validation",
+ "rustyline",
  "serde",
  "serde_json",
  "serde_yaml",
  "sha2",
+ "termimad",
  "tokio",
  "tracing",
  "url",
@@ -840,6 +857,15 @@ version = "0.2.8"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "6051f239ecec86fde3410901ab7860d458d160371533842974fc61f96d15879b"
 
+[[package]]
+name = "coolor"
+version = "0.9.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "37e93977247fb916abeee1ff8c6594c9b421fd9c26c9b720a3944acb2a7de27b"
+dependencies = [
+ "crossterm",
+]
+
 [[package]]
 name = "core-foundation"
 version = "0.9.4"
@@ -874,6 +900,45 @@ dependencies = [
  "cfg-if",
 ]
 
+[[package]]
+name = "crokey"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b48209802ec5862bb034cb16719eec24d1c759e62921be7d3c899d0d85f3344b"
+dependencies = [
+ "crokey-proc_macros",
+ "crossterm",
+ "once_cell",
+ "serde",
+ "strict",
+]
+
+[[package]]
+name = "crokey-proc_macros"
+version = "0.6.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "397d3c009d8df93c4b063ddaa44a81ee7098feb056f99b00896c36e2cee9a9f7"
+dependencies = [
+ "crossterm",
+ "proc-macro2",
+ "quote",
+ "strict",
+ "syn 1.0.109",
+]
+
+[[package]]
+name = "crossbeam"
+version = "0.8.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1137cd7e7fc0fb5d3c5a8678be38ec56e819125d8d7907411fe24ccb943faca8"
+dependencies = [
+ "crossbeam-channel",
+ "crossbeam-deque",
+ "crossbeam-epoch",
+ "crossbeam-queue",
+ "crossbeam-utils",
+]
+
 [[package]]
 name = "crossbeam-channel"
 version = "0.5.13"
@@ -883,12 +948,65 @@ dependencies = [
  "crossbeam-utils",
 ]
 
+[[package]]
+name = "crossbeam-deque"
+version = "0.8.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "613f8cc01fe9cf1a3eb3d7f488fd2fa8388403e97039e2f73692932e291a770d"
+dependencies = [
+ "crossbeam-epoch",
+ "crossbeam-utils",
+]
+
+[[package]]
+name = "crossbeam-epoch"
+version = "0.9.18"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e"
+dependencies = [
+ "crossbeam-utils",
+]
+
+[[package]]
+name = "crossbeam-queue"
+version = "0.3.11"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "df0346b5d5e76ac2fe4e327c5fd1118d6be7c51dfb18f9b7922923f287471e35"
+dependencies = [
+ "crossbeam-utils",
+]
+
 [[package]]
 name = "crossbeam-utils"
 version = "0.8.20"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80"
 
+[[package]]
+name = "crossterm"
+version = "0.27.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f476fe445d41c9e991fd07515a6f463074b782242ccf4a5b7b1d1012e70824df"
+dependencies = [
+ "bitflags 2.5.0",
+ "crossterm_winapi",
+ "libc",
+ "mio",
+ "parking_lot",
+ "signal-hook",
+ "signal-hook-mio",
+ "winapi",
+]
+
+[[package]]
+name = "crossterm_winapi"
+version = "0.9.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "acdd7c62a3665c7f6830a51635d9ac9b23ed385797f70a83bb8bafe9c572ab2b"
+dependencies = [
+ "winapi",
+]
+
 [[package]]
 name = "crypto-common"
 version = "0.1.6"
@@ -924,7 +1042,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3"
 dependencies = [
  "proc-macro2",
  "quote",
- "syn",
+ "syn 2.0.66",
 ]
 
 [[package]]
@@ -993,7 +1111,7 @@ checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d"
 dependencies = [
  "proc-macro2",
  "quote",
- "syn",
+ "syn 2.0.66",
 ]
 
 [[package]]
@@ -1039,7 +1157,7 @@ dependencies = [
  "heck 0.4.1",
  "proc-macro2",
  "quote",
- "syn",
+ "syn 2.0.66",
 ]
 
 [[package]]
@@ -1048,6 +1166,12 @@ version = "1.0.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5"
 
+[[package]]
+name = "error-code"
+version = "3.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a0474425d51df81997e2f90a21591180b38eccf27292d755f3e30750225c175b"
+
 [[package]]
 name = "fallible-iterator"
 version = "0.3.0"
@@ -1187,7 +1311,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac"
 dependencies = [
  "proc-macro2",
  "quote",
- "syn",
+ "syn 2.0.66",
 ]
 
 [[package]]
@@ -1480,7 +1604,7 @@ dependencies = [
  "markup5ever",
  "proc-macro2",
  "quote",
- "syn",
+ "syn 2.0.66",
 ]
 
 [[package]]
@@ -1766,7 +1890,7 @@ checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6"
 dependencies = [
  "proc-macro2",
  "quote",
- "syn",
+ "syn 2.0.66",
 ]
 
 [[package]]
@@ -1978,6 +2102,29 @@ dependencies = [
  "typewit",
 ]
 
+[[package]]
+name = "lazy-regex"
+version = "3.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5d12be4595afdf58bd19e4a9f4e24187da2a66700786ff660a418e9059937a4c"
+dependencies = [
+ "lazy-regex-proc_macros",
+ "once_cell",
+ "regex",
+]
+
+[[package]]
+name = "lazy-regex-proc_macros"
+version = "3.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "44bcd58e6c97a7fcbaffcdc95728b393b8d98933bfadad49ed4097845b57ef0b"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "regex",
+ "syn 2.0.66",
+]
+
 [[package]]
 name = "lazy_static"
 version = "1.4.0"
@@ -2151,6 +2298,15 @@ version = "0.3.17"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a"
 
+[[package]]
+name = "minimad"
+version = "0.13.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a9c5d708226d186590a7b6d4a9780e2bdda5f689e0d58cd17012a298efd745d2"
+dependencies = [
+ "once_cell",
+]
+
 [[package]]
 name = "minimal-lexical"
 version = "0.2.1"
@@ -2174,6 +2330,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c"
 dependencies = [
  "libc",
+ "log",
  "wasi",
  "windows-sys 0.48.0",
 ]
@@ -2184,6 +2341,18 @@ version = "1.0.6"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "650eef8c711430f1a879fdd01d4745a7deea475becfb90269c06775983bbf086"
 
+[[package]]
+name = "nix"
+version = "0.28.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ab2156c4fce2f8df6c499cc1c763e4394b7482525bf2a9701c9d79d215f519e4"
+dependencies = [
+ "bitflags 2.5.0",
+ "cfg-if",
+ "cfg_aliases 0.1.1",
+ "libc",
+]
+
 [[package]]
 name = "nix"
 version = "0.29.0"
@@ -2192,7 +2361,7 @@ checksum = "71e2746dc3a24dd78b3cfcb7be93368c6de9963d30f43a6a73998a9cf4b17b46"
 dependencies = [
  "bitflags 2.5.0",
  "cfg-if",
- "cfg_aliases",
+ "cfg_aliases 0.2.1",
  "libc",
 ]
 
@@ -2490,7 +2659,7 @@ dependencies = [
  "proc-macro2",
  "proc-macro2-diagnostics",
  "quote",
- "syn",
+ "syn 2.0.66",
 ]
 
 [[package]]
@@ -2583,7 +2752,7 @@ checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965"
 dependencies = [
  "proc-macro2",
  "quote",
- "syn",
+ "syn 2.0.66",
 ]
 
 [[package]]
@@ -2677,7 +2846,7 @@ checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8"
 dependencies = [
  "proc-macro2",
  "quote",
- "syn",
+ "syn 2.0.66",
  "version_check",
  "yansi",
 ]
@@ -2702,7 +2871,7 @@ dependencies = [
  "itertools 0.12.1",
  "proc-macro2",
  "quote",
- "syn",
+ "syn 2.0.66",
 ]
 
 [[package]]
@@ -3053,7 +3222,7 @@ dependencies = [
  "quote",
  "ruma-identifiers-validation",
  "serde",
- "syn",
+ "syn 2.0.66",
  "toml",
 ]
 
@@ -3250,6 +3419,25 @@ version = "1.0.17"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "955d28af4278de8121b7ebeb796b6a45735dc01436d898801014aced2773a3d6"
 
+[[package]]
+name = "rustyline"
+version = "14.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7803e8936da37efd9b6d4478277f4b2b9bb5cdb37a113e8d63222e58da647e63"
+dependencies = [
+ "bitflags 2.5.0",
+ "cfg-if",
+ "clipboard-win",
+ "libc",
+ "log",
+ "memchr",
+ "nix 0.28.0",
+ "unicode-segmentation",
+ "unicode-width",
+ "utf8parse",
+ "windows-sys 0.52.0",
+]
+
 [[package]]
 name = "ryu"
 version = "1.0.18"
@@ -3478,7 +3666,7 @@ checksum = "500cbc0ebeb6f46627f50f3f5811ccf6bf00643be300b4c3eabc0ef55dc5b5ba"
 dependencies = [
  "proc-macro2",
  "quote",
- "syn",
+ "syn 2.0.66",
 ]
 
 [[package]]
@@ -3607,6 +3795,27 @@ version = "1.3.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64"
 
+[[package]]
+name = "signal-hook"
+version = "0.3.17"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8621587d4798caf8eb44879d42e56b9a93ea5dcd315a6487c357130095b62801"
+dependencies = [
+ "libc",
+ "signal-hook-registry",
+]
+
+[[package]]
+name = "signal-hook-mio"
+version = "0.2.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "29ad2e15f37ec9a6cc544097b78a1ec90001e9f71b81338ca39f430adaca99af"
+dependencies = [
+ "libc",
+ "mio",
+ "signal-hook",
+]
+
 [[package]]
 name = "signal-hook-registry"
 version = "1.4.2"
@@ -3696,6 +3905,12 @@ version = "1.2.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3"
 
+[[package]]
+name = "strict"
+version = "0.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f42444fea5b87a39db4218d9422087e66a85d0e7a0963a439b07bcdf91804006"
+
 [[package]]
 name = "string_cache"
 version = "0.8.7"
@@ -3737,6 +3952,17 @@ version = "2.5.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc"
 
+[[package]]
+name = "syn"
+version = "1.0.109"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "unicode-ident",
+]
+
 [[package]]
 name = "syn"
 version = "2.0.66"
@@ -3768,7 +3994,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971"
 dependencies = [
  "proc-macro2",
  "quote",
- "syn",
+ "syn 2.0.66",
 ]
 
 [[package]]
@@ -3782,6 +4008,22 @@ dependencies = [
  "utf-8",
 ]
 
+[[package]]
+name = "termimad"
+version = "0.29.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "aab6c8572830b10362f27e242c7c5e749f062ec310b76a0d0b56670eca81f28e"
+dependencies = [
+ "coolor",
+ "crokey",
+ "crossbeam",
+ "lazy-regex",
+ "minimad",
+ "serde",
+ "thiserror",
+ "unicode-width",
+]
+
 [[package]]
 name = "thiserror"
 version = "1.0.61"
@@ -3799,7 +4041,7 @@ checksum = "46c3384250002a6d5af4d114f2845d37b57521033f30d5c3f46c4d70e1197533"
 dependencies = [
  "proc-macro2",
  "quote",
- "syn",
+ "syn 2.0.66",
 ]
 
 [[package]]
@@ -3958,7 +4200,7 @@ checksum = "5f5ae998a069d4b5aba8ee9dad856af7d520c3699e6159b185c2acd48155d39a"
 dependencies = [
  "proc-macro2",
  "quote",
- "syn",
+ "syn 2.0.66",
 ]
 
 [[package]]
@@ -4174,7 +4416,7 @@ source = "git+https://github.com/girlbossceo/tracing?branch=tracing-subscriber/e
 dependencies = [
  "proc-macro2",
  "quote",
- "syn",
+ "syn 2.0.66",
 ]
 
 [[package]]
@@ -4328,6 +4570,18 @@ dependencies = [
  "tinyvec",
 ]
 
+[[package]]
+name = "unicode-segmentation"
+version = "1.11.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d4c87d22b6e3f4a18d4d40ef354e97c90fcb14dd91d7dc0aa9d8a1172ebf7202"
+
+[[package]]
+name = "unicode-width"
+version = "0.1.13"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0336d538f7abc86d282a4189614dfaa90810dfc2c6f6427eaf88e16311dd225d"
+
 [[package]]
 name = "unsafe-libyaml"
 version = "0.2.11"
@@ -4392,6 +4646,12 @@ version = "1.0.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be"
 
+[[package]]
+name = "utf8parse"
+version = "0.2.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821"
+
 [[package]]
 name = "uuid"
 version = "1.8.0"
@@ -4456,7 +4716,7 @@ dependencies = [
  "once_cell",
  "proc-macro2",
  "quote",
- "syn",
+ "syn 2.0.66",
  "wasm-bindgen-shared",
 ]
 
@@ -4490,7 +4750,7 @@ checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7"
 dependencies = [
  "proc-macro2",
  "quote",
- "syn",
+ "syn 2.0.66",
  "wasm-bindgen-backend",
  "wasm-bindgen-shared",
 ]
@@ -4837,7 +5097,7 @@ checksum = "28cc31741b18cb6f1d5ff12f5b7523e3d6eb0852bbbad19d73905511d9849b95"
 dependencies = [
  "proc-macro2",
  "quote",
- "syn",
+ "syn 2.0.66",
  "synstructure",
 ]
 
@@ -4858,7 +5118,7 @@ checksum = "15e934569e47891f7d9411f1a451d947a60e000ab3bd24fbb970f000387d1b3b"
 dependencies = [
  "proc-macro2",
  "quote",
- "syn",
+ "syn 2.0.66",
 ]
 
 [[package]]
@@ -4878,7 +5138,7 @@ checksum = "0ea7b4a3637ea8669cedf0f1fd5c286a17f3de97b8dd5a70a6c167a1730e63a5"
 dependencies = [
  "proc-macro2",
  "quote",
- "syn",
+ "syn 2.0.66",
  "synstructure",
 ]
 
@@ -4907,7 +5167,7 @@ checksum = "97cf56601ee5052b4417d90c8755c6683473c926039908196cf35d99f893ebe7"
 dependencies = [
  "proc-macro2",
  "quote",
- "syn",
+ "syn 2.0.66",
 ]
 
 [[package]]
diff --git a/Cargo.toml b/Cargo.toml
index 1fd8bdbc40a7c01aee43cde21bbe3298910aa558..801aa5a5f1e6986c26c82b1cc52aeabe52a49735 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -414,6 +414,13 @@ features = [
 	"light",
 ]
 
+[workspace.dependencies.rustyline]
+version = "14.0.0"
+default-features = false
+
+[workspace.dependencies.termimad]
+version = "0.29.2"
+
 #
 # Patches
 #
diff --git a/conduwuit-example.toml b/conduwuit-example.toml
index 04bf26cdfafa8026af6a8583f7766838fd333128..97652eee20669bae104392dbf7aa25f6ca053e36 100644
--- a/conduwuit-example.toml
+++ b/conduwuit-example.toml
@@ -198,6 +198,11 @@ registration_token = "change this token for something specific to your server"
 # defaults to false
 # block_non_admin_invites = false
 
+# Allows admins to enter commands in rooms other than #admins by prefixing with \!admin. The reply
+# will be publicly visible to the room, originating from the sender.
+# defaults to true
+#admin_escape_commands = true
+
 # List of forbidden username patterns/strings. Values in this list are matched as *contains*.
 # This is checked upon username availability check, registration, and startup as warnings if any local users in your database
 # have a forbidden username.
diff --git a/src/admin/debug/debug_commands.rs b/src/admin/debug/debug_commands.rs
index 0316abc21421de698cbda9623e0dc364dc5f58ab..0b9ff62cf2b9831a7569f6462d8f6763c34cdf10 100644
--- a/src/admin/debug/debug_commands.rs
+++ b/src/admin/debug/debug_commands.rs
@@ -595,17 +595,7 @@ pub(crate) async fn force_set_room_state_from_server(
 		.state_compressor
 		.save_state(room_id.clone().as_ref(), new_room_state)?;
 
-	let mutex_state = Arc::clone(
-		services()
-			.globals
-			.roomid_mutex_state
-			.write()
-			.await
-			.entry(room_id.clone().into())
-			.or_default(),
-	);
-	let state_lock = mutex_state.lock().await;
-
+	let state_lock = services().globals.roomid_mutex_state.lock(&room_id).await;
 	services()
 		.rooms
 		.state
diff --git a/src/admin/handler.rs b/src/admin/handler.rs
index b09bdb8df293fb6f9a14c64aeb7c3526207ac16d..e283def55772ec01fbf2fb94ebf9fd73e6c03cb2 100644
--- a/src/admin/handler.rs
+++ b/src/admin/handler.rs
@@ -1,24 +1,19 @@
-use std::sync::Arc;
-
 use clap::Parser;
+use conduit::trace;
 use regex::Regex;
 use ruma::{
 	events::{
 		relation::InReplyTo,
 		room::message::{Relation::Reply, RoomMessageEventContent},
-		TimelineEventType,
 	},
-	OwnedRoomId, OwnedUserId, RoomId, ServerName, UserId,
+	ServerName,
 };
-use serde_json::value::to_raw_value;
-use tokio::sync::MutexGuard;
-use tracing::error;
 
 extern crate conduit_service as service;
 
-use conduit::{Error, Result};
-pub(crate) use service::admin::{AdminRoomEvent, Service};
-use service::{admin::HandlerResult, pdu::PduBuilder};
+use conduit::Result;
+use service::admin::HandlerResult;
+pub(crate) use service::admin::{AdminEvent, Service};
 
 use self::{fsck::FsckCommand, tester::TesterCommands};
 use crate::{
@@ -30,7 +25,7 @@
 
 #[cfg_attr(test, derive(Debug))]
 #[derive(Parser)]
-#[command(name = "@conduit:server.name:", version = env!("CARGO_PKG_VERSION"))]
+#[command(name = "admin", version = env!("CARGO_PKG_VERSION"))]
 pub(crate) enum AdminCommand {
 	#[command(subcommand)]
 	/// - Commands for managing appservices
@@ -73,87 +68,30 @@ pub(crate) enum AdminCommand {
 }
 
 #[must_use]
-pub fn handle(event: AdminRoomEvent, room: OwnedRoomId, user: OwnedUserId) -> HandlerResult {
-	Box::pin(handle_event(event, room, user))
-}
+pub fn handle(event: AdminEvent) -> HandlerResult { Box::pin(handle_event(event)) }
 
-async fn handle_event(event: AdminRoomEvent, admin_room: OwnedRoomId, server_user: OwnedUserId) -> Result<()> {
-	let (mut message_content, reply) = match event {
-		AdminRoomEvent::SendMessage(content) => (content, None),
-		AdminRoomEvent::ProcessMessage(room_message, reply_id) => {
-			// This future is ~8 KiB so it's better to start it off the stack.
-			(Box::pin(process_admin_message(room_message)).await, Some(reply_id))
-		},
-	};
+#[tracing::instrument(skip_all, name = "admin")]
+async fn handle_event(event: AdminEvent) -> Result<AdminEvent> { Ok(AdminEvent::Reply(process_event(event).await)) }
 
-	let mutex_state = Arc::clone(
-		services()
-			.globals
-			.roomid_mutex_state
-			.write()
-			.await
-			.entry(admin_room.clone())
-			.or_default(),
-	);
-	let state_lock = mutex_state.lock().await;
-
-	if let Some(reply) = reply {
-		message_content.relates_to = Some(Reply {
-			in_reply_to: InReplyTo {
-				event_id: reply.into(),
-			},
-		});
-	}
-
-	let response_pdu = PduBuilder {
-		event_type: TimelineEventType::RoomMessage,
-		content: to_raw_value(&message_content).expect("event is valid, we just created it"),
-		unsigned: None,
-		state_key: None,
-		redacts: None,
+async fn process_event(event: AdminEvent) -> Option<RoomMessageEventContent> {
+	let (mut message_content, reply_id) = match event {
+		AdminEvent::Command(room_message, reply_id) => (Box::pin(process_admin_message(room_message)).await, reply_id),
+		AdminEvent::Notice(content) => (content, None),
+		AdminEvent::Reply(_) => return None,
 	};
 
-	if let Err(e) = services()
-		.rooms
-		.timeline
-		.build_and_append_pdu(response_pdu, &server_user, &admin_room, &state_lock)
-		.await
-	{
-		handle_response_error(&e, &admin_room, &server_user, &state_lock).await?;
-	}
-
-	Ok(())
-}
-
-async fn handle_response_error(
-	e: &Error, admin_room: &RoomId, server_user: &UserId, state_lock: &MutexGuard<'_, ()>,
-) -> Result<()> {
-	error!("Failed to build and append admin room response PDU: \"{e}\"");
-	let error_room_message = RoomMessageEventContent::text_plain(format!(
-		"Failed to build and append admin room PDU: \"{e}\"\n\nThe original admin command may have finished \
-		 successfully, but we could not return the output."
-	));
-
-	let response_pdu = PduBuilder {
-		event_type: TimelineEventType::RoomMessage,
-		content: to_raw_value(&error_room_message).expect("event is valid, we just created it"),
-		unsigned: None,
-		state_key: None,
-		redacts: None,
-	};
-
-	services()
-		.rooms
-		.timeline
-		.build_and_append_pdu(response_pdu, server_user, admin_room, state_lock)
-		.await?;
+	message_content.relates_to = reply_id.map(|reply_id| Reply {
+		in_reply_to: InReplyTo {
+			event_id: reply_id.into(),
+		},
+	});
 
-	Ok(())
+	Some(message_content)
 }
 
 // Parse and process a message from the admin room
-async fn process_admin_message(room_message: String) -> RoomMessageEventContent {
-	let mut lines = room_message.lines().filter(|l| !l.trim().is_empty());
+async fn process_admin_message(msg: String) -> RoomMessageEventContent {
+	let mut lines = msg.lines().filter(|l| !l.trim().is_empty());
 	let command_line = lines.next().expect("each string has at least one line");
 	let body = lines.collect::<Vec<_>>();
 
@@ -181,9 +119,19 @@ async fn process_admin_message(room_message: String) -> RoomMessageEventContent
 
 // Parse chat messages from the admin room into an AdminCommand object
 fn parse_admin_command(command_line: &str) -> Result<AdminCommand, String> {
-	// Note: argv[0] is `@conduit:servername:`, which is treated as the main command
 	let mut argv = command_line.split_whitespace().collect::<Vec<_>>();
 
+	// Remove any escapes that came with a server-side escape command
+	if !argv.is_empty() && argv[0].ends_with("admin") {
+		argv[0] = argv[0].trim_start_matches('\\');
+	}
+
+	// First indice has to be "admin" but for console convenience we add it here
+	let server_user = services().globals.server_user.as_str();
+	if !argv.is_empty() && !argv[0].ends_with("admin") && !argv[0].starts_with(server_user) {
+		argv.insert(0, "admin");
+	}
+
 	// Replace `help command` with `command --help`
 	// Clap has a help subcommand, but it omits the long help description.
 	if argv.len() > 1 && argv[1] == "help" {
@@ -213,9 +161,11 @@ fn parse_admin_command(command_line: &str) -> Result<AdminCommand, String> {
 		argv[3] = &command_with_dashes_argv3;
 	}
 
+	trace!(?command_line, ?argv, "parse");
 	AdminCommand::try_parse_from(argv).map_err(|error| error.to_string())
 }
 
+#[tracing::instrument(skip_all, name = "command")]
 async fn process_admin_command(command: AdminCommand, body: Vec<&str>) -> Result<RoomMessageEventContent> {
 	let reply_message_content = match command {
 		AdminCommand::Appservices(command) => appservice::process(command, body).await?,
diff --git a/src/api/client/account.rs b/src/api/client/account.rs
index d7868e212e6187e41da46ada5d4b80cd9785fc26..42049b56ac66d973d61e8d81c688cde2ee5d0687 100644
--- a/src/api/client/account.rs
+++ b/src/api/client/account.rs
@@ -354,10 +354,7 @@ pub(crate) async fn register_route(
 				.room_joined_count(&admin_room)?
 				== Some(1)
 			{
-				services()
-					.admin
-					.make_user_admin(&user_id, displayname)
-					.await?;
+				service::admin::make_user_admin(&user_id, displayname).await?;
 
 				warn!("Granting {user_id} admin privileges as the first user");
 			}
diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs
index f781808c9f56b18f2d7e457e3517bccc3a9c7b89..111d03c4c75619d44cc3ab37c80f535649155c63 100644
--- a/src/api/client/membership.rs
+++ b/src/api/client/membership.rs
@@ -7,6 +7,7 @@
 };
 
 use axum_client_ip::InsecureClientIp;
+use conduit::utils::mutex_map;
 use ruma::{
 	api::{
 		client::{
@@ -32,7 +33,7 @@
 	OwnedUserId, RoomId, RoomVersionId, ServerName, UserId,
 };
 use serde_json::value::{to_raw_value, RawValue as RawJsonValue};
-use tokio::sync::{MutexGuard, RwLock};
+use tokio::sync::RwLock;
 use tracing::{debug, error, info, trace, warn};
 
 use super::get_alias_helper;
@@ -373,16 +374,11 @@ pub(crate) async fn kick_user_route(body: Ruma<kick_user::v3::Request>) -> Resul
 	event.membership = MembershipState::Leave;
 	event.reason.clone_from(&body.reason);
 
-	let mutex_state = Arc::clone(
-		services()
-			.globals
-			.roomid_mutex_state
-			.write()
-			.await
-			.entry(body.room_id.clone())
-			.or_default(),
-	);
-	let state_lock = mutex_state.lock().await;
+	let state_lock = services()
+		.globals
+		.roomid_mutex_state
+		.lock(&body.room_id)
+		.await;
 
 	services()
 		.rooms
@@ -442,16 +438,11 @@ pub(crate) async fn ban_user_route(body: Ruma<ban_user::v3::Request>) -> Result<
 			},
 		)?;
 
-	let mutex_state = Arc::clone(
-		services()
-			.globals
-			.roomid_mutex_state
-			.write()
-			.await
-			.entry(body.room_id.clone())
-			.or_default(),
-	);
-	let state_lock = mutex_state.lock().await;
+	let state_lock = services()
+		.globals
+		.roomid_mutex_state
+		.lock(&body.room_id)
+		.await;
 
 	services()
 		.rooms
@@ -496,16 +487,11 @@ pub(crate) async fn unban_user_route(body: Ruma<unban_user::v3::Request>) -> Res
 	event.reason.clone_from(&body.reason);
 	event.join_authorized_via_users_server = None;
 
-	let mutex_state = Arc::clone(
-		services()
-			.globals
-			.roomid_mutex_state
-			.write()
-			.await
-			.entry(body.room_id.clone())
-			.or_default(),
-	);
-	let state_lock = mutex_state.lock().await;
+	let state_lock = services()
+		.globals
+		.roomid_mutex_state
+		.lock(&body.room_id)
+		.await;
 
 	services()
 		.rooms
@@ -670,16 +656,7 @@ pub async fn join_room_by_id_helper(
 		});
 	}
 
-	let mutex_state = Arc::clone(
-		services()
-			.globals
-			.roomid_mutex_state
-			.write()
-			.await
-			.entry(room_id.to_owned())
-			.or_default(),
-	);
-	let state_lock = mutex_state.lock().await;
+	let state_lock = services().globals.roomid_mutex_state.lock(room_id).await;
 
 	// Ask a remote server if we are not participating in this room
 	if !services()
@@ -695,7 +672,7 @@ pub async fn join_room_by_id_helper(
 
 async fn join_room_by_id_helper_remote(
 	sender_user: &UserId, room_id: &RoomId, reason: Option<String>, servers: &[OwnedServerName],
-	_third_party_signed: Option<&ThirdPartySigned>, state_lock: MutexGuard<'_, ()>,
+	_third_party_signed: Option<&ThirdPartySigned>, state_lock: mutex_map::Guard<()>,
 ) -> Result<join_room_by_id::v3::Response> {
 	info!("Joining {room_id} over federation.");
 
@@ -1030,7 +1007,7 @@ async fn join_room_by_id_helper_remote(
 
 async fn join_room_by_id_helper_local(
 	sender_user: &UserId, room_id: &RoomId, reason: Option<String>, servers: &[OwnedServerName],
-	_third_party_signed: Option<&ThirdPartySigned>, state_lock: MutexGuard<'_, ()>,
+	_third_party_signed: Option<&ThirdPartySigned>, state_lock: mutex_map::Guard<()>,
 ) -> Result<join_room_by_id::v3::Response> {
 	info!("We can join locally");
 
@@ -1413,17 +1390,7 @@ pub(crate) async fn invite_helper(
 
 	if !user_is_local(user_id) {
 		let (pdu, pdu_json, invite_room_state) = {
-			let mutex_state = Arc::clone(
-				services()
-					.globals
-					.roomid_mutex_state
-					.write()
-					.await
-					.entry(room_id.to_owned())
-					.or_default(),
-			);
-			let state_lock = mutex_state.lock().await;
-
+			let state_lock = services().globals.roomid_mutex_state.lock(room_id).await;
 			let content = to_raw_value(&RoomMemberEventContent {
 				avatar_url: services().users.avatar_url(user_id)?,
 				displayname: None,
@@ -1535,16 +1502,7 @@ pub(crate) async fn invite_helper(
 		));
 	}
 
-	let mutex_state = Arc::clone(
-		services()
-			.globals
-			.roomid_mutex_state
-			.write()
-			.await
-			.entry(room_id.to_owned())
-			.or_default(),
-	);
-	let state_lock = mutex_state.lock().await;
+	let state_lock = services().globals.roomid_mutex_state.lock(room_id).await;
 
 	services()
 		.rooms
@@ -1638,16 +1596,7 @@ pub async fn leave_room(user_id: &UserId, room_id: &RoomId, reason: Option<Strin
 			true,
 		)?;
 	} else {
-		let mutex_state = Arc::clone(
-			services()
-				.globals
-				.roomid_mutex_state
-				.write()
-				.await
-				.entry(room_id.to_owned())
-				.or_default(),
-		);
-		let state_lock = mutex_state.lock().await;
+		let state_lock = services().globals.roomid_mutex_state.lock(room_id).await;
 
 		let member_event =
 			services()
diff --git a/src/api/client/message.rs b/src/api/client/message.rs
index 5bb683f22700b747d90c4db68468a898659747d4..a57defb2b374b68bd4113363c112c14eacede070 100644
--- a/src/api/client/message.rs
+++ b/src/api/client/message.rs
@@ -1,7 +1,4 @@
-use std::{
-	collections::{BTreeMap, HashSet},
-	sync::Arc,
-};
+use std::collections::{BTreeMap, HashSet};
 
 use conduit::PduCount;
 use ruma::{
@@ -32,16 +29,11 @@ pub(crate) async fn send_message_event_route(
 	let sender_user = body.sender_user.as_ref().expect("user is authenticated");
 	let sender_device = body.sender_device.as_deref();
 
-	let mutex_state = Arc::clone(
-		services()
-			.globals
-			.roomid_mutex_state
-			.write()
-			.await
-			.entry(body.room_id.clone())
-			.or_default(),
-	);
-	let state_lock = mutex_state.lock().await;
+	let state_lock = services()
+		.globals
+		.roomid_mutex_state
+		.lock(&body.room_id)
+		.await;
 
 	// Forbid m.room.encrypted if encryption is disabled
 	if MessageLikeEventType::RoomEncrypted == body.event_type && !services().globals.allow_encryption() {
diff --git a/src/api/client/profile.rs b/src/api/client/profile.rs
index 8a53b335b0bc7236e551c9e74ae98a92fe76cc88..b71280f16e1afcd005fdabbb4affcdcee960de14 100644
--- a/src/api/client/profile.rs
+++ b/src/api/client/profile.rs
@@ -1,5 +1,3 @@
-use std::sync::Arc;
-
 use ruma::{
 	api::{
 		client::{
@@ -355,17 +353,7 @@ pub async fn update_avatar_url(
 
 pub async fn update_all_rooms(all_joined_rooms: Vec<(PduBuilder, &OwnedRoomId)>, user_id: OwnedUserId) {
 	for (pdu_builder, room_id) in all_joined_rooms {
-		let mutex_state = Arc::clone(
-			services()
-				.globals
-				.roomid_mutex_state
-				.write()
-				.await
-				.entry(room_id.clone())
-				.or_default(),
-		);
-		let state_lock = mutex_state.lock().await;
-
+		let state_lock = services().globals.roomid_mutex_state.lock(room_id).await;
 		if let Err(e) = services()
 			.rooms
 			.timeline
diff --git a/src/api/client/redact.rs b/src/api/client/redact.rs
index d92ef002b95ace634e8cdb0006b61c3ccb97368b..4cb24c33606c4918e2efc0a3b40cfb91b4f29454 100644
--- a/src/api/client/redact.rs
+++ b/src/api/client/redact.rs
@@ -1,5 +1,3 @@
-use std::sync::Arc;
-
 use ruma::{
 	api::client::redact::redact_event,
 	events::{room::redaction::RoomRedactionEventContent, TimelineEventType},
@@ -17,16 +15,11 @@ pub(crate) async fn redact_event_route(body: Ruma<redact_event::v3::Request>) ->
 	let sender_user = body.sender_user.as_ref().expect("user is authenticated");
 	let body = body.body;
 
-	let mutex_state = Arc::clone(
-		services()
-			.globals
-			.roomid_mutex_state
-			.write()
-			.await
-			.entry(body.room_id.clone())
-			.or_default(),
-	);
-	let state_lock = mutex_state.lock().await;
+	let state_lock = services()
+		.globals
+		.roomid_mutex_state
+		.lock(&body.room_id)
+		.await;
 
 	let event_id = services()
 		.rooms
diff --git a/src/api/client/room.rs b/src/api/client/room.rs
index 14071d395d5dd42a531cd243fc0ef9bb8ded5912..7090fdc8933234fac534923da36f24afe7a0ebd9 100644
--- a/src/api/client/room.rs
+++ b/src/api/client/room.rs
@@ -1,4 +1,4 @@
-use std::{cmp::max, collections::BTreeMap, sync::Arc};
+use std::{cmp::max, collections::BTreeMap};
 
 use conduit::{debug_info, debug_warn};
 use ruma::{
@@ -89,18 +89,8 @@ pub(crate) async fn create_room_route(body: Ruma<create_room::v3::Request>) -> R
 		));
 	}
 
-	services().rooms.short.get_or_create_shortroomid(&room_id)?;
-
-	let mutex_state = Arc::clone(
-		services()
-			.globals
-			.roomid_mutex_state
-			.write()
-			.await
-			.entry(room_id.clone())
-			.or_default(),
-	);
-	let state_lock = mutex_state.lock().await;
+	let _short_id = services().rooms.short.get_or_create_shortroomid(&room_id)?;
+	let state_lock = services().globals.roomid_mutex_state.lock(&room_id).await;
 
 	let alias: Option<OwnedRoomAliasId> = if let Some(alias) = &body.room_alias_name {
 		Some(room_alias_check(alias, &body.appservice_info).await?)
@@ -577,21 +567,17 @@ pub(crate) async fn upgrade_room_route(body: Ruma<upgrade_room::v3::Request>) ->
 
 	// Create a replacement room
 	let replacement_room = RoomId::new(services().globals.server_name());
-	services()
+
+	let _short_id = services()
 		.rooms
 		.short
 		.get_or_create_shortroomid(&replacement_room)?;
 
-	let mutex_state = Arc::clone(
-		services()
-			.globals
-			.roomid_mutex_state
-			.write()
-			.await
-			.entry(body.room_id.clone())
-			.or_default(),
-	);
-	let state_lock = mutex_state.lock().await;
+	let state_lock = services()
+		.globals
+		.roomid_mutex_state
+		.lock(&body.room_id)
+		.await;
 
 	// Send a m.room.tombstone event to the old room to indicate that it is not
 	// intended to be used any further Fail if the sender does not have the required
@@ -619,16 +605,11 @@ pub(crate) async fn upgrade_room_route(body: Ruma<upgrade_room::v3::Request>) ->
 
 	// Change lock to replacement room
 	drop(state_lock);
-	let mutex_state = Arc::clone(
-		services()
-			.globals
-			.roomid_mutex_state
-			.write()
-			.await
-			.entry(replacement_room.clone())
-			.or_default(),
-	);
-	let state_lock = mutex_state.lock().await;
+	let state_lock = services()
+		.globals
+		.roomid_mutex_state
+		.lock(&replacement_room)
+		.await;
 
 	// Get the old room creation event
 	let mut create_event_content = serde_json::from_str::<CanonicalJsonObject>(
diff --git a/src/api/client/state.rs b/src/api/client/state.rs
index 32e0fb68ff27fa6fa4732c1a1dc8e53074743f81..17ae7be4a06504184d7a107d442072a1f522a742 100644
--- a/src/api/client/state.rs
+++ b/src/api/client/state.rs
@@ -172,18 +172,7 @@ async fn send_state_event_for_key_helper(
 	sender: &UserId, room_id: &RoomId, event_type: &StateEventType, json: &Raw<AnyStateEventContent>, state_key: String,
 ) -> Result<Arc<EventId>> {
 	allowed_to_send_state_event(room_id, event_type, json).await?;
-
-	let mutex_state = Arc::clone(
-		services()
-			.globals
-			.roomid_mutex_state
-			.write()
-			.await
-			.entry(room_id.to_owned())
-			.or_default(),
-	);
-	let state_lock = mutex_state.lock().await;
-
+	let state_lock = services().globals.roomid_mutex_state.lock(room_id).await;
 	let event_id = services()
 		.rooms
 		.timeline
diff --git a/src/api/client/sync.rs b/src/api/client/sync.rs
index 5d881b4539379a34cfdb90440ab8704150c4bfc2..84cf11b6aaf0fe777b29337a3225201630c67b7e 100644
--- a/src/api/client/sync.rs
+++ b/src/api/client/sync.rs
@@ -1,7 +1,6 @@
 use std::{
 	cmp::Ordering,
 	collections::{hash_map::Entry, BTreeMap, BTreeSet, HashMap, HashSet},
-	sync::Arc,
 	time::Duration,
 };
 
@@ -197,20 +196,9 @@ pub(crate) async fn sync_events_route(
 	for result in all_invited_rooms {
 		let (room_id, invite_state_events) = result?;
 
-		{
-			// Get and drop the lock to wait for remaining operations to finish
-			let mutex_insert = Arc::clone(
-				services()
-					.globals
-					.roomid_mutex_insert
-					.write()
-					.await
-					.entry(room_id.clone())
-					.or_default(),
-			);
-			let insert_lock = mutex_insert.lock().await;
-			drop(insert_lock);
-		};
+		// Get and drop the lock to wait for remaining operations to finish
+		let insert_lock = services().globals.roomid_mutex_insert.lock(&room_id).await;
+		drop(insert_lock);
 
 		let invite_count = services()
 			.rooms
@@ -332,20 +320,9 @@ async fn handle_left_room(
 	since: u64, room_id: &RoomId, sender_user: &UserId, left_rooms: &mut BTreeMap<ruma::OwnedRoomId, LeftRoom>,
 	next_batch_string: &str, full_state: bool, lazy_load_enabled: bool,
 ) -> Result<()> {
-	{
-		// Get and drop the lock to wait for remaining operations to finish
-		let mutex_insert = Arc::clone(
-			services()
-				.globals
-				.roomid_mutex_insert
-				.write()
-				.await
-				.entry(room_id.to_owned())
-				.or_default(),
-		);
-		let insert_lock = mutex_insert.lock().await;
-		drop(insert_lock);
-	};
+	// Get and drop the lock to wait for remaining operations to finish
+	let insert_lock = services().globals.roomid_mutex_insert.lock(room_id).await;
+	drop(insert_lock);
 
 	let left_count = services()
 		.rooms
@@ -544,21 +521,10 @@ async fn load_joined_room(
 	next_batch: u64, next_batchcount: PduCount, lazy_load_enabled: bool, lazy_load_send_redundant: bool,
 	full_state: bool, device_list_updates: &mut HashSet<OwnedUserId>, left_encrypted_users: &mut HashSet<OwnedUserId>,
 ) -> Result<JoinedRoom> {
-	{
-		// Get and drop the lock to wait for remaining operations to finish
-		// This will make sure the we have all events until next_batch
-		let mutex_insert = Arc::clone(
-			services()
-				.globals
-				.roomid_mutex_insert
-				.write()
-				.await
-				.entry(room_id.to_owned())
-				.or_default(),
-		);
-		let insert_lock = mutex_insert.lock().await;
-		drop(insert_lock);
-	};
+	// Get and drop the lock to wait for remaining operations to finish
+	// This will make sure the we have all events until next_batch
+	let insert_lock = services().globals.roomid_mutex_insert.lock(room_id).await;
+	drop(insert_lock);
 
 	let (timeline_pdus, limited) = load_timeline(sender_user, room_id, sincecount, 10)?;
 
diff --git a/src/api/server/make_join.rs b/src/api/server/make_join.rs
index 0a6b1992946ff830350af7ebe6b6811a777879c5..d3d934f700a31c50524e07f206a09e7a72c847bf 100644
--- a/src/api/server/make_join.rs
+++ b/src/api/server/make_join.rs
@@ -1,5 +1,3 @@
-use std::sync::Arc;
-
 use ruma::{
 	api::{client::error::ErrorKind, federation::membership::prepare_join_event},
 	events::{
@@ -74,17 +72,11 @@ pub(crate) async fn create_join_event_template_route(
 		}
 	}
 
-	let mutex_state = Arc::clone(
-		services()
-			.globals
-			.roomid_mutex_state
-			.write()
-			.await
-			.entry(body.room_id.clone())
-			.or_default(),
-	);
-	let state_lock = mutex_state.lock().await;
-
+	let state_lock = services()
+		.globals
+		.roomid_mutex_state
+		.lock(&body.room_id)
+		.await;
 	let join_rules_event =
 		services()
 			.rooms
diff --git a/src/api/server/make_leave.rs b/src/api/server/make_leave.rs
index 72c931c5be2833ed73e5f105b022294901190607..62c0971712ba1ceb075304b9eeff4f5aa1d3e29c 100644
--- a/src/api/server/make_leave.rs
+++ b/src/api/server/make_leave.rs
@@ -1,5 +1,3 @@
-use std::sync::Arc;
-
 use ruma::{
 	api::{client::error::ErrorKind, federation::membership::prepare_leave_event},
 	events::{
@@ -37,18 +35,11 @@ pub(crate) async fn create_leave_event_template_route(
 		.acl_check(origin, &body.room_id)?;
 
 	let room_version_id = services().rooms.state.get_room_version(&body.room_id)?;
-
-	let mutex_state = Arc::clone(
-		services()
-			.globals
-			.roomid_mutex_state
-			.write()
-			.await
-			.entry(body.room_id.clone())
-			.or_default(),
-	);
-	let state_lock = mutex_state.lock().await;
-
+	let state_lock = services()
+		.globals
+		.roomid_mutex_state
+		.lock(&body.room_id)
+		.await;
 	let content = to_raw_value(&RoomMemberEventContent {
 		avatar_url: None,
 		blurhash: None,
diff --git a/src/api/server/send.rs b/src/api/server/send.rs
index e2413a4abb6f949c5a8989a5b68c2a09a05adc10..6faa7c25e3db1eacad69d9cea3a59b89f7cd5a7b 100644
--- a/src/api/server/send.rs
+++ b/src/api/server/send.rs
@@ -1,4 +1,4 @@
-use std::{collections::BTreeMap, sync::Arc, time::Instant};
+use std::{collections::BTreeMap, time::Instant};
 
 use axum_client_ip::InsecureClientIp;
 use conduit::debug_warn;
@@ -107,16 +107,11 @@ pub(crate) async fn send_transaction_message_route(
 	let mut resolved_map = BTreeMap::new();
 	for (event_id, value, room_id) in parsed_pdus {
 		let pdu_start_time = Instant::now();
-		let mutex = Arc::clone(
-			services()
-				.globals
-				.roomid_mutex_federation
-				.write()
-				.await
-				.entry(room_id.clone())
-				.or_default(),
-		);
-		let mutex_lock = mutex.lock().await;
+		let mutex_lock = services()
+			.globals
+			.roomid_mutex_federation
+			.lock(&room_id)
+			.await;
 		resolved_map.insert(
 			event_id.clone(),
 			services()
diff --git a/src/api/server/send_join.rs b/src/api/server/send_join.rs
index 18d075b4ca5b022d1916ce20061b2e517c38c07b..388280e9764130e342a56315072616d743a6f8ef 100644
--- a/src/api/server/send_join.rs
+++ b/src/api/server/send_join.rs
@@ -1,6 +1,6 @@
 #![allow(deprecated)]
 
-use std::{collections::BTreeMap, sync::Arc};
+use std::collections::BTreeMap;
 
 use ruma::{
 	api::{client::error::ErrorKind, federation::membership::create_join_event},
@@ -148,16 +148,11 @@ async fn create_join_event(
 		.fetch_required_signing_keys([&value], &pub_key_map)
 		.await?;
 
-	let mutex = Arc::clone(
-		services()
-			.globals
-			.roomid_mutex_federation
-			.write()
-			.await
-			.entry(room_id.to_owned())
-			.or_default(),
-	);
-	let mutex_lock = mutex.lock().await;
+	let mutex_lock = services()
+		.globals
+		.roomid_mutex_federation
+		.lock(room_id)
+		.await;
 	let pdu_id: Vec<u8> = services()
 		.rooms
 		.event_handler
diff --git a/src/api/server/send_leave.rs b/src/api/server/send_leave.rs
index ec84358881d0e0d582231afaebc9f6d4645bee84..2680378f96761e40e20eb58320835cc7f8dde835 100644
--- a/src/api/server/send_leave.rs
+++ b/src/api/server/send_leave.rs
@@ -1,6 +1,6 @@
 #![allow(deprecated)]
 
-use std::{collections::BTreeMap, sync::Arc};
+use std::collections::BTreeMap;
 
 use ruma::{
 	api::{client::error::ErrorKind, federation::membership::create_leave_event},
@@ -154,16 +154,11 @@ async fn create_leave_event(origin: &ServerName, room_id: &RoomId, pdu: &RawJson
 		.fetch_required_signing_keys([&value], &pub_key_map)
 		.await?;
 
-	let mutex = Arc::clone(
-		services()
-			.globals
-			.roomid_mutex_federation
-			.write()
-			.await
-			.entry(room_id.to_owned())
-			.or_default(),
-	);
-	let mutex_lock = mutex.lock().await;
+	let mutex_lock = services()
+		.globals
+		.roomid_mutex_federation
+		.lock(room_id)
+		.await;
 	let pdu_id: Vec<u8> = services()
 		.rooms
 		.event_handler
diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs
index dadb000b8dc0969d25908a8e24d304b7d0c843b8..58696f88b3fe899c140489fb1fd7c4eb8e2b623f 100644
--- a/src/core/config/mod.rs
+++ b/src/core/config/mod.rs
@@ -338,6 +338,8 @@ pub struct Config {
 
 	#[serde(default)]
 	pub block_non_admin_invites: bool,
+	#[serde(default = "true_fn")]
+	pub admin_escape_commands: bool,
 
 	#[serde(default)]
 	pub sentry: bool,
@@ -610,6 +612,7 @@ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
 				"Block non-admin room invites (local and remote, admins can still send and receive invites)",
 				&self.block_non_admin_invites.to_string(),
 			),
+			("Enable admin escape commands", &self.admin_escape_commands.to_string()),
 			("Allow outgoing federated typing", &self.allow_outgoing_typing.to_string()),
 			("Allow incoming federated typing", &self.allow_incoming_typing.to_string()),
 			(
diff --git a/src/core/error.rs b/src/core/error.rs
index 3aead9d50de4fa3bfe8dcb8ad8492e567afc4c8f..5847fb38ab8e1e9112ab06ed32807627bfa73d6a 100644
--- a/src/core/error.rs
+++ b/src/core/error.rs
@@ -87,6 +87,8 @@ pub enum Error {
 	#[error("{0}")]
 	Fmt(#[from] fmt::Error),
 	#[error("{0}")]
+	Mxid(#[from] ruma::IdParseError),
+	#[error("{0}")]
 	Err(String),
 }
 
diff --git a/src/core/log/mod.rs b/src/core/log/mod.rs
index 556bf2f811dd8ef2451e1734732d4f11761d3c86..97da6b40c20fb34b4f2b2bc5ae30ceab70103009 100644
--- a/src/core/log/mod.rs
+++ b/src/core/log/mod.rs
@@ -3,12 +3,15 @@
 pub mod fmt;
 mod reload;
 mod server;
+mod suppress;
 
 pub use capture::Capture;
 pub use reload::{LogLevelReloadHandles, ReloadHandle};
 pub use server::Server;
+pub use suppress::Suppress;
 pub use tracing::Level;
 pub use tracing_core::{Event, Metadata};
+pub use tracing_subscriber::EnvFilter;
 
 // Wraps for logging macros. Use these macros rather than extern tracing:: or
 // log:: crates in project code. ::log and ::tracing can still be used if
diff --git a/src/core/log/suppress.rs b/src/core/log/suppress.rs
new file mode 100644
index 0000000000000000000000000000000000000000..6e883086dd2638a440627e8521f7396d93cf2da5
--- /dev/null
+++ b/src/core/log/suppress.rs
@@ -0,0 +1,38 @@
+use std::sync::Arc;
+
+use super::EnvFilter;
+use crate::Server;
+
+pub struct Suppress {
+	server: Arc<Server>,
+	restore: EnvFilter,
+}
+
+impl Suppress {
+	pub fn new(server: &Arc<Server>) -> Self {
+		let config = &server.config.log;
+		Self::from_filters(server, EnvFilter::try_new(config).unwrap_or_default(), &EnvFilter::default())
+	}
+
+	fn from_filters(server: &Arc<Server>, restore: EnvFilter, suppress: &EnvFilter) -> Self {
+		server
+			.log
+			.reload
+			.reload(suppress)
+			.expect("log filter reloaded");
+		Self {
+			server: server.clone(),
+			restore,
+		}
+	}
+}
+
+impl Drop for Suppress {
+	fn drop(&mut self) {
+		self.server
+			.log
+			.reload
+			.reload(&self.restore)
+			.expect("log filter reloaded");
+	}
+}
diff --git a/src/core/utils/mod.rs b/src/core/utils/mod.rs
index 9ffbbbd03dce3a767f3748993e6413e1d86e5062..27d47fa54ae8b5c645df036af5509bf7a30f8f6b 100644
--- a/src/core/utils/mod.rs
+++ b/src/core/utils/mod.rs
@@ -4,6 +4,7 @@
 pub mod hash;
 pub mod html;
 pub mod json;
+pub mod mutex_map;
 pub mod sys;
 
 use std::{
@@ -14,6 +15,7 @@
 pub use debug::slice_truncated as debug_slice_truncated;
 pub use html::Escape as HtmlEscape;
 pub use json::{deserialize_from_str, to_canonical_object};
+pub use mutex_map::MutexMap;
 use rand::prelude::*;
 use ring::digest;
 use ruma::OwnedUserId;
diff --git a/src/core/utils/mutex_map.rs b/src/core/utils/mutex_map.rs
new file mode 100644
index 0000000000000000000000000000000000000000..f102487ccc2e51a8dc557f3495955ebd520d5d1e
--- /dev/null
+++ b/src/core/utils/mutex_map.rs
@@ -0,0 +1,56 @@
+use std::{hash::Hash, sync::Arc};
+
+type Value<Val> = tokio::sync::Mutex<Val>;
+type ArcMutex<Val> = Arc<Value<Val>>;
+type HashMap<Key, Val> = std::collections::HashMap<Key, ArcMutex<Val>>;
+type MapMutex<Key, Val> = std::sync::Mutex<HashMap<Key, Val>>;
+type Map<Key, Val> = MapMutex<Key, Val>;
+
+/// Map of Mutexes
+pub struct MutexMap<Key, Val> {
+	map: Map<Key, Val>,
+}
+
+pub struct Guard<Val> {
+	_guard: tokio::sync::OwnedMutexGuard<Val>,
+}
+
+impl<Key, Val> MutexMap<Key, Val>
+where
+	Key: Send + Hash + Eq + Clone,
+	Val: Send + Default,
+{
+	#[must_use]
+	pub fn new() -> Self {
+		Self {
+			map: Map::<Key, Val>::new(HashMap::<Key, Val>::new()),
+		}
+	}
+
+	pub async fn lock<K>(&self, k: &K) -> Guard<Val>
+	where
+		K: ?Sized + Send + Sync,
+		Key: for<'a> From<&'a K>,
+	{
+		let val = self
+			.map
+			.lock()
+			.expect("map mutex locked")
+			.entry(k.into())
+			.or_default()
+			.clone();
+
+		let guard = val.lock_owned().await;
+		Guard::<Val> {
+			_guard: guard,
+		}
+	}
+}
+
+impl<Key, Val> Default for MutexMap<Key, Val>
+where
+	Key: Send + Hash + Eq + Clone,
+	Val: Send + Default,
+{
+	fn default() -> Self { Self::new() }
+}
diff --git a/src/main/Cargo.toml b/src/main/Cargo.toml
index ed5a12a6a87b6122922a9da4413f7258bc7840a2..bc3e3951ce20815f18da413ff0472af055b1a148 100644
--- a/src/main/Cargo.toml
+++ b/src/main/Cargo.toml
@@ -58,6 +58,9 @@ brotli_compression = [
 	"conduit-router/brotli_compression",
 	"conduit-service/brotli_compression",
 ]
+console = [
+	"conduit-service/console",
+]
 dev_release_log_level = [
 	"conduit-admin/dev_release_log_level",
 	"conduit-api/dev_release_log_level",
diff --git a/src/main/main.rs b/src/main/main.rs
index 7933538d873c295e9ccd66828ceae5f60c0e169a..6e1bfe38f36dcfbadeb8fc8907b80117e6893b20 100644
--- a/src/main/main.rs
+++ b/src/main/main.rs
@@ -4,7 +4,11 @@
 
 extern crate conduit_core as conduit;
 
-use std::{cmp, sync::Arc, time::Duration};
+use std::{
+	cmp,
+	sync::{atomic::Ordering, Arc},
+	time::Duration,
+};
 
 use conduit::{debug_error, debug_info, error, trace, utils::available_parallelism, warn, Error, Result};
 use server::Server;
@@ -100,8 +104,13 @@ async fn async_main(server: &Arc<Server>) -> Result<(), Error> {
 #[tracing::instrument(skip_all)]
 async fn signal(server: Arc<Server>) {
 	use signal::unix;
-	let mut quit = unix::signal(unix::SignalKind::quit()).expect("SIGQUIT handler");
-	let mut term = unix::signal(unix::SignalKind::terminate()).expect("SIGTERM handler");
+	use unix::SignalKind;
+
+	const CONSOLE: bool = cfg!(feature = "console");
+	const RELOADING: bool = cfg!(all(unix, debug_assertions, not(CONSOLE)));
+
+	let mut quit = unix::signal(SignalKind::quit()).expect("SIGQUIT handler");
+	let mut term = unix::signal(SignalKind::terminate()).expect("SIGTERM handler");
 	loop {
 		trace!("Installed signal handlers");
 		let sig: &'static str;
@@ -111,6 +120,16 @@ async fn signal(server: Arc<Server>) {
 			_ = term.recv() => { sig = "SIGTERM"; },
 		}
 
+		// Indicate the SIGINT is requesting a hot-reload.
+		if RELOADING && sig == "SIGINT" {
+			server.server.reloading.store(true, Ordering::Release);
+		}
+
+		// Indicate the signal is requesting a shutdown
+		if matches!(sig, "SIGQUIT" | "SIGTERM") || (!CONSOLE && sig == "SIGINT") {
+			server.server.stopping.store(true, Ordering::Release);
+		}
+
 		warn!("Received {sig}");
 		if let Err(e) = server.server.signal.send(sig) {
 			debug_error!("signal channel: {e}");
diff --git a/src/router/run.rs b/src/router/run.rs
index 80a7d1eed2403cc3a0a106559ab49b438a129e22..e9876ef3663ba5f00d39248aedad8a774f037a8f 100644
--- a/src/router/run.rs
+++ b/src/router/run.rs
@@ -83,20 +83,23 @@ pub(crate) async fn stop(_server: Arc<Server>) -> Result<(), Error> {
 
 #[tracing::instrument(skip_all)]
 async fn signal(server: Arc<Server>, tx: Sender<()>, handle: axum_server::Handle) {
-	let sig: &'static str = server
-		.signal
-		.subscribe()
-		.recv()
-		.await
-		.expect("channel error");
-
-	debug!("Received signal {}", sig);
-	if sig == "SIGINT" {
-		let reload = cfg!(unix) && cfg!(debug_assertions);
-		server.reloading.store(reload, Ordering::Release);
+	loop {
+		let sig: &'static str = server
+			.signal
+			.subscribe()
+			.recv()
+			.await
+			.expect("channel error");
+
+		if !server.running() {
+			handle_shutdown(&server, &tx, &handle, sig).await;
+			break;
+		}
 	}
+}
 
-	server.stopping.store(true, Ordering::Release);
+async fn handle_shutdown(server: &Arc<Server>, tx: &Sender<()>, handle: &axum_server::Handle, sig: &str) {
+	debug!("Received signal {}", sig);
 	if let Err(e) = tx.send(()) {
 		error!("failed sending shutdown transaction to channel: {e}");
 	}
diff --git a/src/service/Cargo.toml b/src/service/Cargo.toml
index 63e3e95f0dcecabd4e1377f28e13d7d0c3f1fec3..8a254ef8d7409ec0565b1845b12867969e41e3ff 100644
--- a/src/service/Cargo.toml
+++ b/src/service/Cargo.toml
@@ -17,20 +17,24 @@ crate-type = [
 ]
 
 [features]
-element_hacks = []
+brotli_compression = [
+	"reqwest/brotli",
+]
+console = [
+	"dep:rustyline",
+	"dep:termimad",
+]
 dev_release_log_level = []
+element_hacks = []
+gzip_compression = [
+	"reqwest/gzip",
+]
 release_max_log_level = [
 	"tracing/max_level_trace",
 	"tracing/release_max_level_info",
 	"log/max_level_trace",
 	"log/release_max_level_info",
 ]
-gzip_compression = [
-	"reqwest/gzip",
-]
-brotli_compression = [
-	"reqwest/brotli",
-]
 sha256_media = [
 	"dep:sha2",
 ]
@@ -57,11 +61,15 @@ regex.workspace = true
 reqwest.workspace = true
 ruma-identifiers-validation.workspace = true
 ruma.workspace = true
+rustyline.workspace = true
+rustyline.optional = true
 serde_json.workspace = true
 serde.workspace = true
 serde_yaml.workspace = true
 sha2.optional = true
 sha2.workspace = true
+termimad.workspace = true
+termimad.optional = true
 tokio.workspace = true
 tracing.workspace = true
 url.workspace = true
diff --git a/src/service/admin.rs b/src/service/admin.rs
deleted file mode 100644
index 6c2eb8df5b253fe0512c1c8517d567e8be4cf83b..0000000000000000000000000000000000000000
--- a/src/service/admin.rs
+++ /dev/null
@@ -1,540 +0,0 @@
-use std::{collections::BTreeMap, future::Future, pin::Pin, sync::Arc};
-
-use conduit::{Error, Result};
-use ruma::{
-	api::client::error::ErrorKind,
-	events::{
-		room::{
-			canonical_alias::RoomCanonicalAliasEventContent,
-			create::RoomCreateEventContent,
-			guest_access::{GuestAccess, RoomGuestAccessEventContent},
-			history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent},
-			join_rules::{JoinRule, RoomJoinRulesEventContent},
-			member::{MembershipState, RoomMemberEventContent},
-			message::RoomMessageEventContent,
-			name::RoomNameEventContent,
-			power_levels::RoomPowerLevelsEventContent,
-			preview_url::RoomPreviewUrlsEventContent,
-			topic::RoomTopicEventContent,
-		},
-		TimelineEventType,
-	},
-	EventId, OwnedRoomId, OwnedUserId, RoomId, RoomVersionId, UserId,
-};
-use serde_json::value::to_raw_value;
-use tokio::{sync::Mutex, task::JoinHandle};
-use tracing::{error, warn};
-
-use crate::{pdu::PduBuilder, services};
-
-pub type HandlerResult = Pin<Box<dyn Future<Output = Result<(), Error>> + Send>>;
-pub type Handler = fn(AdminRoomEvent, OwnedRoomId, OwnedUserId) -> HandlerResult;
-
-pub struct Service {
-	sender: loole::Sender<AdminRoomEvent>,
-	receiver: Mutex<loole::Receiver<AdminRoomEvent>>,
-	handler_join: Mutex<Option<JoinHandle<()>>>,
-	pub handle: Mutex<Option<Handler>>,
-}
-
-#[derive(Debug)]
-pub enum AdminRoomEvent {
-	ProcessMessage(String, Arc<EventId>),
-	SendMessage(RoomMessageEventContent),
-}
-
-impl Service {
-	#[must_use]
-	pub fn build() -> Arc<Self> {
-		let (sender, receiver) = loole::unbounded();
-		Arc::new(Self {
-			sender,
-			receiver: Mutex::new(receiver),
-			handler_join: Mutex::new(None),
-			handle: Mutex::new(None),
-		})
-	}
-
-	pub async fn start_handler(self: &Arc<Self>) {
-		let self_ = Arc::clone(self);
-		let handle = services().server.runtime().spawn(async move {
-			self_
-				.handler()
-				.await
-				.expect("Failed to initialize admin room handler");
-		});
-
-		_ = self.handler_join.lock().await.insert(handle);
-	}
-
-	async fn handler(self: &Arc<Self>) -> Result<()> {
-		let receiver = self.receiver.lock().await;
-		let Ok(Some(admin_room)) = Self::get_admin_room() else {
-			return Ok(());
-		};
-
-		let server_user = &services().globals.server_user;
-
-		loop {
-			debug_assert!(!receiver.is_closed(), "channel closed");
-			tokio::select! {
-				event = receiver.recv_async() => match event {
-					Ok(event) => self.receive(event, &admin_room, server_user).await?,
-					Err(_e) => return Ok(()),
-				}
-			}
-		}
-	}
-
-	pub async fn close(&self) {
-		self.interrupt();
-		if let Some(handler_join) = self.handler_join.lock().await.take() {
-			if let Err(e) = handler_join.await {
-				error!("Failed to shutdown: {e:?}");
-			}
-		}
-	}
-
-	pub fn interrupt(&self) {
-		if !self.sender.is_closed() {
-			self.sender.close();
-		}
-	}
-
-	pub async fn send_message(&self, message_content: RoomMessageEventContent) {
-		self.send(AdminRoomEvent::SendMessage(message_content))
-			.await;
-	}
-
-	pub async fn process_message(&self, room_message: String, event_id: Arc<EventId>) {
-		self.send(AdminRoomEvent::ProcessMessage(room_message, event_id))
-			.await;
-	}
-
-	async fn receive(&self, event: AdminRoomEvent, room: &RoomId, user: &UserId) -> Result<(), Error> {
-		if let Some(handle) = self.handle.lock().await.as_ref() {
-			handle(event, room.into(), user.into()).await
-		} else {
-			Err(Error::Err("Admin module is not loaded.".into()))
-		}
-	}
-
-	async fn send(&self, message: AdminRoomEvent) {
-		debug_assert!(!self.sender.is_full(), "channel full");
-		debug_assert!(!self.sender.is_closed(), "channel closed");
-		self.sender.send(message).expect("message sent");
-	}
-
-	/// Gets the room ID of the admin room
-	///
-	/// Errors are propagated from the database, and will have None if there is
-	/// no admin room
-	pub fn get_admin_room() -> Result<Option<OwnedRoomId>> {
-		services()
-			.rooms
-			.alias
-			.resolve_local_alias(&services().globals.admin_alias)
-	}
-
-	/// Create the admin room.
-	///
-	/// Users in this room are considered admins by conduit, and the room can be
-	/// used to issue admin commands by talking to the server user inside it.
-	pub async fn create_admin_room(&self) -> Result<()> {
-		let room_id = RoomId::new(services().globals.server_name());
-
-		services().rooms.short.get_or_create_shortroomid(&room_id)?;
-
-		let mutex_state = Arc::clone(
-			services()
-				.globals
-				.roomid_mutex_state
-				.write()
-				.await
-				.entry(room_id.clone())
-				.or_default(),
-		);
-		let state_lock = mutex_state.lock().await;
-
-		// Create a user for the server
-		let server_user = &services().globals.server_user;
-
-		services().users.create(server_user, None)?;
-
-		let room_version = services().globals.default_room_version();
-		let mut content = match room_version {
-			RoomVersionId::V1
-			| RoomVersionId::V2
-			| RoomVersionId::V3
-			| RoomVersionId::V4
-			| RoomVersionId::V5
-			| RoomVersionId::V6
-			| RoomVersionId::V7
-			| RoomVersionId::V8
-			| RoomVersionId::V9
-			| RoomVersionId::V10 => RoomCreateEventContent::new_v1(server_user.clone()),
-			RoomVersionId::V11 => RoomCreateEventContent::new_v11(),
-			_ => {
-				warn!("Unexpected or unsupported room version {}", room_version);
-				return Err(Error::BadRequest(
-					ErrorKind::BadJson,
-					"Unexpected or unsupported room version found",
-				));
-			},
-		};
-
-		content.federate = true;
-		content.predecessor = None;
-		content.room_version = room_version;
-
-		// 1. The room create event
-		services()
-			.rooms
-			.timeline
-			.build_and_append_pdu(
-				PduBuilder {
-					event_type: TimelineEventType::RoomCreate,
-					content: to_raw_value(&content).expect("event is valid, we just created it"),
-					unsigned: None,
-					state_key: Some(String::new()),
-					redacts: None,
-				},
-				server_user,
-				&room_id,
-				&state_lock,
-			)
-			.await?;
-
-		// 2. Make conduit bot join
-		services()
-			.rooms
-			.timeline
-			.build_and_append_pdu(
-				PduBuilder {
-					event_type: TimelineEventType::RoomMember,
-					content: to_raw_value(&RoomMemberEventContent {
-						membership: MembershipState::Join,
-						displayname: None,
-						avatar_url: None,
-						is_direct: None,
-						third_party_invite: None,
-						blurhash: None,
-						reason: None,
-						join_authorized_via_users_server: None,
-					})
-					.expect("event is valid, we just created it"),
-					unsigned: None,
-					state_key: Some(server_user.to_string()),
-					redacts: None,
-				},
-				server_user,
-				&room_id,
-				&state_lock,
-			)
-			.await?;
-
-		// 3. Power levels
-		let mut users = BTreeMap::new();
-		users.insert(server_user.clone(), 100.into());
-
-		services()
-			.rooms
-			.timeline
-			.build_and_append_pdu(
-				PduBuilder {
-					event_type: TimelineEventType::RoomPowerLevels,
-					content: to_raw_value(&RoomPowerLevelsEventContent {
-						users,
-						..Default::default()
-					})
-					.expect("event is valid, we just created it"),
-					unsigned: None,
-					state_key: Some(String::new()),
-					redacts: None,
-				},
-				server_user,
-				&room_id,
-				&state_lock,
-			)
-			.await?;
-
-		// 4.1 Join Rules
-		services()
-			.rooms
-			.timeline
-			.build_and_append_pdu(
-				PduBuilder {
-					event_type: TimelineEventType::RoomJoinRules,
-					content: to_raw_value(&RoomJoinRulesEventContent::new(JoinRule::Invite))
-						.expect("event is valid, we just created it"),
-					unsigned: None,
-					state_key: Some(String::new()),
-					redacts: None,
-				},
-				server_user,
-				&room_id,
-				&state_lock,
-			)
-			.await?;
-
-		// 4.2 History Visibility
-		services()
-			.rooms
-			.timeline
-			.build_and_append_pdu(
-				PduBuilder {
-					event_type: TimelineEventType::RoomHistoryVisibility,
-					content: to_raw_value(&RoomHistoryVisibilityEventContent::new(HistoryVisibility::Shared))
-						.expect("event is valid, we just created it"),
-					unsigned: None,
-					state_key: Some(String::new()),
-					redacts: None,
-				},
-				server_user,
-				&room_id,
-				&state_lock,
-			)
-			.await?;
-
-		// 4.3 Guest Access
-		services()
-			.rooms
-			.timeline
-			.build_and_append_pdu(
-				PduBuilder {
-					event_type: TimelineEventType::RoomGuestAccess,
-					content: to_raw_value(&RoomGuestAccessEventContent::new(GuestAccess::Forbidden))
-						.expect("event is valid, we just created it"),
-					unsigned: None,
-					state_key: Some(String::new()),
-					redacts: None,
-				},
-				server_user,
-				&room_id,
-				&state_lock,
-			)
-			.await?;
-
-		// 5. Events implied by name and topic
-		let room_name = format!("{} Admin Room", services().globals.server_name());
-		services()
-			.rooms
-			.timeline
-			.build_and_append_pdu(
-				PduBuilder {
-					event_type: TimelineEventType::RoomName,
-					content: to_raw_value(&RoomNameEventContent::new(room_name))
-						.expect("event is valid, we just created it"),
-					unsigned: None,
-					state_key: Some(String::new()),
-					redacts: None,
-				},
-				server_user,
-				&room_id,
-				&state_lock,
-			)
-			.await?;
-
-		services()
-			.rooms
-			.timeline
-			.build_and_append_pdu(
-				PduBuilder {
-					event_type: TimelineEventType::RoomTopic,
-					content: to_raw_value(&RoomTopicEventContent {
-						topic: format!("Manage {}", services().globals.server_name()),
-					})
-					.expect("event is valid, we just created it"),
-					unsigned: None,
-					state_key: Some(String::new()),
-					redacts: None,
-				},
-				server_user,
-				&room_id,
-				&state_lock,
-			)
-			.await?;
-
-		// 6. Room alias
-		let alias = &services().globals.admin_alias;
-
-		services()
-			.rooms
-			.timeline
-			.build_and_append_pdu(
-				PduBuilder {
-					event_type: TimelineEventType::RoomCanonicalAlias,
-					content: to_raw_value(&RoomCanonicalAliasEventContent {
-						alias: Some(alias.clone()),
-						alt_aliases: Vec::new(),
-					})
-					.expect("event is valid, we just created it"),
-					unsigned: None,
-					state_key: Some(String::new()),
-					redacts: None,
-				},
-				server_user,
-				&room_id,
-				&state_lock,
-			)
-			.await?;
-
-		services()
-			.rooms
-			.alias
-			.set_alias(alias, &room_id, server_user)?;
-
-		// 7. (ad-hoc) Disable room previews for everyone by default
-		services()
-			.rooms
-			.timeline
-			.build_and_append_pdu(
-				PduBuilder {
-					event_type: TimelineEventType::RoomPreviewUrls,
-					content: to_raw_value(&RoomPreviewUrlsEventContent {
-						disabled: true,
-					})
-					.expect("event is valid we just created it"),
-					unsigned: None,
-					state_key: Some(String::new()),
-					redacts: None,
-				},
-				server_user,
-				&room_id,
-				&state_lock,
-			)
-			.await?;
-
-		Ok(())
-	}
-
-	/// Invite the user to the conduit admin room.
-	///
-	/// In conduit, this is equivalent to granting admin privileges.
-	pub async fn make_user_admin(&self, user_id: &UserId, displayname: String) -> Result<()> {
-		if let Some(room_id) = Self::get_admin_room()? {
-			let mutex_state = Arc::clone(
-				services()
-					.globals
-					.roomid_mutex_state
-					.write()
-					.await
-					.entry(room_id.clone())
-					.or_default(),
-			);
-			let state_lock = mutex_state.lock().await;
-
-			// Use the server user to grant the new admin's power level
-			let server_user = &services().globals.server_user;
-
-			// Invite and join the real user
-			services()
-				.rooms
-				.timeline
-				.build_and_append_pdu(
-					PduBuilder {
-						event_type: TimelineEventType::RoomMember,
-						content: to_raw_value(&RoomMemberEventContent {
-							membership: MembershipState::Invite,
-							displayname: None,
-							avatar_url: None,
-							is_direct: None,
-							third_party_invite: None,
-							blurhash: None,
-							reason: None,
-							join_authorized_via_users_server: None,
-						})
-						.expect("event is valid, we just created it"),
-						unsigned: None,
-						state_key: Some(user_id.to_string()),
-						redacts: None,
-					},
-					server_user,
-					&room_id,
-					&state_lock,
-				)
-				.await?;
-			services()
-				.rooms
-				.timeline
-				.build_and_append_pdu(
-					PduBuilder {
-						event_type: TimelineEventType::RoomMember,
-						content: to_raw_value(&RoomMemberEventContent {
-							membership: MembershipState::Join,
-							displayname: Some(displayname),
-							avatar_url: None,
-							is_direct: None,
-							third_party_invite: None,
-							blurhash: None,
-							reason: None,
-							join_authorized_via_users_server: None,
-						})
-						.expect("event is valid, we just created it"),
-						unsigned: None,
-						state_key: Some(user_id.to_string()),
-						redacts: None,
-					},
-					user_id,
-					&room_id,
-					&state_lock,
-				)
-				.await?;
-
-			// Set power level
-			let mut users = BTreeMap::new();
-			users.insert(server_user.clone(), 100.into());
-			users.insert(user_id.to_owned(), 100.into());
-
-			services()
-				.rooms
-				.timeline
-				.build_and_append_pdu(
-					PduBuilder {
-						event_type: TimelineEventType::RoomPowerLevels,
-						content: to_raw_value(&RoomPowerLevelsEventContent {
-							users,
-							..Default::default()
-						})
-						.expect("event is valid, we just created it"),
-						unsigned: None,
-						state_key: Some(String::new()),
-						redacts: None,
-					},
-					server_user,
-					&room_id,
-					&state_lock,
-				)
-				.await?;
-
-			// Send welcome message
-			services().rooms.timeline.build_and_append_pdu(
-  			PduBuilder {
-                event_type: TimelineEventType::RoomMessage,
-                content: to_raw_value(&RoomMessageEventContent::text_html(
-                        format!("## Thank you for trying out conduwuit!\n\nconduwuit is a fork of upstream Conduit which is in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.\n\nHelpful links:\n> Git and Documentation: https://github.com/girlbossceo/conduwuit\n> Report issues: https://github.com/girlbossceo/conduwuit/issues\n\nFor a list of available commands, send the following message in this room: `@conduit:{}: --help`\n\nHere are some rooms you can join (by typing the command):\n\nconduwuit room (Ask questions and get notified on updates):\n`/join #conduwuit:puppygock.gay`", services().globals.server_name()),
-                        format!("<h2>Thank you for trying out conduwuit!</h2>\n<p>conduwuit is a fork of upstream Conduit which is in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.</p>\n<p>Helpful links:</p>\n<blockquote>\n<p>Git and Documentation: https://github.com/girlbossceo/conduwuit<br>Report issues: https://github.com/girlbossceo/conduwuit/issues</p>\n</blockquote>\n<p>For a list of available commands, send the following message in this room: <code>@conduit:{}: --help</code></p>\n<p>Here are some rooms you can join (by typing the command):</p>\n<p>conduwuit room (Ask questions and get notified on updates):<br><code>/join #conduwuit:puppygock.gay</code></p>\n", services().globals.server_name()),
-                ))
-                .expect("event is valid, we just created it"),
-                unsigned: None,
-                state_key: None,
-                redacts: None,
-            },
-            server_user,
-            &room_id,
-            &state_lock,
-        ).await?;
-		}
-
-		Ok(())
-	}
-
-	/// Checks whether a given user is an admin of this server
-	pub async fn user_is_admin(&self, user_id: &UserId) -> Result<bool> {
-		let Ok(Some(admin_room)) = Self::get_admin_room() else {
-			return Ok(false);
-		};
-
-		services().rooms.state_cache.is_joined(user_id, &admin_room)
-	}
-}
diff --git a/src/service/admin/console.rs b/src/service/admin/console.rs
new file mode 100644
index 0000000000000000000000000000000000000000..66faf2c1420c7775005617edc55e8f974f29e2cd
--- /dev/null
+++ b/src/service/admin/console.rs
@@ -0,0 +1,121 @@
+#![cfg(feature = "console")]
+use std::sync::Arc;
+
+use conduit::{error, log, trace};
+use ruma::events::room::message::RoomMessageEventContent;
+use rustyline::{error::ReadlineError, history, Editor};
+use termimad::MadSkin;
+use tokio::{sync::Mutex, task::JoinHandle};
+
+use crate::services;
+
+pub struct Console {
+	join: Mutex<Option<JoinHandle<()>>>,
+	input: Mutex<Editor<(), history::MemHistory>>,
+	output: MadSkin,
+}
+
+impl Console {
+	#[must_use]
+	pub fn new() -> Arc<Self> {
+		use rustyline::config::{Behavior, BellStyle};
+		use termimad::{crossterm::style::Color, Alignment, CompoundStyle, LineStyle};
+
+		let config = rustyline::Config::builder()
+			.enable_signals(false)
+			.behavior(Behavior::PreferTerm)
+			.bell_style(BellStyle::Visible)
+			.auto_add_history(true)
+			.max_history_size(100)
+			.expect("valid history size")
+			.indent_size(4)
+			.tab_stop(4)
+			.build();
+
+		let history = history::MemHistory::with_config(config);
+		let input = Editor::with_history(config, history).expect("builder configuration succeeded");
+
+		let mut output = MadSkin::default_dark();
+
+		let code_style = CompoundStyle::with_fgbg(Color::AnsiValue(40), Color::AnsiValue(234));
+		output.inline_code = code_style.clone();
+		output.code_block = LineStyle {
+			left_margin: 0,
+			right_margin: 0,
+			align: Alignment::Left,
+			compound_style: code_style,
+		};
+
+		Arc::new(Self {
+			join: None.into(),
+			input: Mutex::new(input),
+			output,
+		})
+	}
+
+	#[allow(clippy::let_underscore_must_use)]
+	pub async fn start(self: &Arc<Self>) {
+		let mut join = self.join.lock().await;
+		if join.is_none() {
+			let self_ = Arc::clone(self);
+			_ = join.insert(services().server.runtime().spawn(self_.worker()));
+		}
+	}
+
+	pub fn interrupt(self: &Arc<Self>) { Self::handle_interrupt(); }
+
+	#[allow(clippy::let_underscore_must_use)]
+	pub async fn close(self: &Arc<Self>) {
+		if let Some(join) = self.join.lock().await.take() {
+			_ = join.await;
+		}
+	}
+
+	#[tracing::instrument(skip_all, name = "console")]
+	async fn worker(self: Arc<Self>) {
+		loop {
+			let mut input = self.input.lock().await;
+
+			let suppression = log::Suppress::new(&services().server);
+			let line = tokio::task::block_in_place(|| input.readline("uwu> "));
+			drop(suppression);
+
+			match line {
+				Ok(string) => self.handle(string).await,
+				Err(e) => match e {
+					ReadlineError::Eof => break,
+					ReadlineError::Interrupted => Self::handle_interrupt(),
+					ReadlineError::WindowResized => Self::handle_winch(),
+					_ => error!("console: {e:?}"),
+				},
+			}
+		}
+
+		self.join.lock().await.take();
+	}
+
+	async fn handle(&self, line: String) {
+		if line.is_empty() {
+			return;
+		}
+
+		match services().admin.command_in_place(line, None).await {
+			Ok(Some(content)) => self.output(content).await,
+			Err(e) => error!("processing command: {e}"),
+			_ => (),
+		}
+	}
+
+	async fn output(&self, output_content: RoomMessageEventContent) {
+		let output = self.output.term_text(output_content.body());
+		println!("{output}");
+	}
+
+	fn handle_interrupt() {
+		trace!("interrupted");
+	}
+
+	fn handle_winch() {
+		trace!("winch");
+	}
+}
diff --git a/src/service/admin/create.rs b/src/service/admin/create.rs
new file mode 100644
index 0000000000000000000000000000000000000000..ad70fe0c568e0fd54b490dac4c20f302eb84a236
--- /dev/null
+++ b/src/service/admin/create.rs
@@ -0,0 +1,288 @@
+use std::collections::BTreeMap;
+
+use conduit::{Error, Result};
+use ruma::{
+	api::client::error::ErrorKind,
+	events::{
+		room::{
+			canonical_alias::RoomCanonicalAliasEventContent,
+			create::RoomCreateEventContent,
+			guest_access::{GuestAccess, RoomGuestAccessEventContent},
+			history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent},
+			join_rules::{JoinRule, RoomJoinRulesEventContent},
+			member::{MembershipState, RoomMemberEventContent},
+			name::RoomNameEventContent,
+			power_levels::RoomPowerLevelsEventContent,
+			preview_url::RoomPreviewUrlsEventContent,
+			topic::RoomTopicEventContent,
+		},
+		TimelineEventType,
+	},
+	RoomId, RoomVersionId,
+};
+use serde_json::value::to_raw_value;
+use tracing::warn;
+
+use crate::{pdu::PduBuilder, services};
+
+/// Create the admin room.
+///
+/// Users in this room are considered admins by conduit, and the room can be
+/// used to issue admin commands by talking to the server user inside it.
+pub async fn create_admin_room() -> Result<()> {
+	let room_id = RoomId::new(services().globals.server_name());
+
+	let _short_id = services().rooms.short.get_or_create_shortroomid(&room_id)?;
+
+	let state_lock = services().globals.roomid_mutex_state.lock(&room_id).await;
+
+	// Create a user for the server
+	let server_user = &services().globals.server_user;
+	services().users.create(server_user, None)?;
+
+	let room_version = services().globals.default_room_version();
+	let mut content = match room_version {
+		RoomVersionId::V1
+		| RoomVersionId::V2
+		| RoomVersionId::V3
+		| RoomVersionId::V4
+		| RoomVersionId::V5
+		| RoomVersionId::V6
+		| RoomVersionId::V7
+		| RoomVersionId::V8
+		| RoomVersionId::V9
+		| RoomVersionId::V10 => RoomCreateEventContent::new_v1(server_user.clone()),
+		RoomVersionId::V11 => RoomCreateEventContent::new_v11(),
+		_ => {
+			warn!("Unexpected or unsupported room version {}", room_version);
+			return Err(Error::BadRequest(
+				ErrorKind::BadJson,
+				"Unexpected or unsupported room version found",
+			));
+		},
+	};
+
+	content.federate = true;
+	content.predecessor = None;
+	content.room_version = room_version;
+
+	// 1. The room create event
+	services()
+		.rooms
+		.timeline
+		.build_and_append_pdu(
+			PduBuilder {
+				event_type: TimelineEventType::RoomCreate,
+				content: to_raw_value(&content).expect("event is valid, we just created it"),
+				unsigned: None,
+				state_key: Some(String::new()),
+				redacts: None,
+			},
+			server_user,
+			&room_id,
+			&state_lock,
+		)
+		.await?;
+
+	// 2. Make conduit bot join
+	services()
+		.rooms
+		.timeline
+		.build_and_append_pdu(
+			PduBuilder {
+				event_type: TimelineEventType::RoomMember,
+				content: to_raw_value(&RoomMemberEventContent {
+					membership: MembershipState::Join,
+					displayname: None,
+					avatar_url: None,
+					is_direct: None,
+					third_party_invite: None,
+					blurhash: None,
+					reason: None,
+					join_authorized_via_users_server: None,
+				})
+				.expect("event is valid, we just created it"),
+				unsigned: None,
+				state_key: Some(server_user.to_string()),
+				redacts: None,
+			},
+			server_user,
+			&room_id,
+			&state_lock,
+		)
+		.await?;
+
+	// 3. Power levels
+	let mut users = BTreeMap::new();
+	users.insert(server_user.clone(), 100.into());
+
+	services()
+		.rooms
+		.timeline
+		.build_and_append_pdu(
+			PduBuilder {
+				event_type: TimelineEventType::RoomPowerLevels,
+				content: to_raw_value(&RoomPowerLevelsEventContent {
+					users,
+					..Default::default()
+				})
+				.expect("event is valid, we just created it"),
+				unsigned: None,
+				state_key: Some(String::new()),
+				redacts: None,
+			},
+			server_user,
+			&room_id,
+			&state_lock,
+		)
+		.await?;
+
+	// 4.1 Join Rules
+	services()
+		.rooms
+		.timeline
+		.build_and_append_pdu(
+			PduBuilder {
+				event_type: TimelineEventType::RoomJoinRules,
+				content: to_raw_value(&RoomJoinRulesEventContent::new(JoinRule::Invite))
+					.expect("event is valid, we just created it"),
+				unsigned: None,
+				state_key: Some(String::new()),
+				redacts: None,
+			},
+			server_user,
+			&room_id,
+			&state_lock,
+		)
+		.await?;
+
+	// 4.2 History Visibility
+	services()
+		.rooms
+		.timeline
+		.build_and_append_pdu(
+			PduBuilder {
+				event_type: TimelineEventType::RoomHistoryVisibility,
+				content: to_raw_value(&RoomHistoryVisibilityEventContent::new(HistoryVisibility::Shared))
+					.expect("event is valid, we just created it"),
+				unsigned: None,
+				state_key: Some(String::new()),
+				redacts: None,
+			},
+			server_user,
+			&room_id,
+			&state_lock,
+		)
+		.await?;
+
+	// 4.3 Guest Access
+	services()
+		.rooms
+		.timeline
+		.build_and_append_pdu(
+			PduBuilder {
+				event_type: TimelineEventType::RoomGuestAccess,
+				content: to_raw_value(&RoomGuestAccessEventContent::new(GuestAccess::Forbidden))
+					.expect("event is valid, we just created it"),
+				unsigned: None,
+				state_key: Some(String::new()),
+				redacts: None,
+			},
+			server_user,
+			&room_id,
+			&state_lock,
+		)
+		.await?;
+
+	// 5. Events implied by name and topic
+	let room_name = format!("{} Admin Room", services().globals.server_name());
+	services()
+		.rooms
+		.timeline
+		.build_and_append_pdu(
+			PduBuilder {
+				event_type: TimelineEventType::RoomName,
+				content: to_raw_value(&RoomNameEventContent::new(room_name))
+					.expect("event is valid, we just created it"),
+				unsigned: None,
+				state_key: Some(String::new()),
+				redacts: None,
+			},
+			server_user,
+			&room_id,
+			&state_lock,
+		)
+		.await?;
+
+	services()
+		.rooms
+		.timeline
+		.build_and_append_pdu(
+			PduBuilder {
+				event_type: TimelineEventType::RoomTopic,
+				content: to_raw_value(&RoomTopicEventContent {
+					topic: format!("Manage {}", services().globals.server_name()),
+				})
+				.expect("event is valid, we just created it"),
+				unsigned: None,
+				state_key: Some(String::new()),
+				redacts: None,
+			},
+			server_user,
+			&room_id,
+			&state_lock,
+		)
+		.await?;
+
+	// 6. Room alias
+	let alias = &services().globals.admin_alias;
+
+	services()
+		.rooms
+		.timeline
+		.build_and_append_pdu(
+			PduBuilder {
+				event_type: TimelineEventType::RoomCanonicalAlias,
+				content: to_raw_value(&RoomCanonicalAliasEventContent {
+					alias: Some(alias.clone()),
+					alt_aliases: Vec::new(),
+				})
+				.expect("event is valid, we just created it"),
+				unsigned: None,
+				state_key: Some(String::new()),
+				redacts: None,
+			},
+			server_user,
+			&room_id,
+			&state_lock,
+		)
+		.await?;
+
+	services()
+		.rooms
+		.alias
+		.set_alias(alias, &room_id, server_user)?;
+
+	// 7. (ad-hoc) Disable room previews for everyone by default
+	services()
+		.rooms
+		.timeline
+		.build_and_append_pdu(
+			PduBuilder {
+				event_type: TimelineEventType::RoomPreviewUrls,
+				content: to_raw_value(&RoomPreviewUrlsEventContent {
+					disabled: true,
+				})
+				.expect("event is valid we just created it"),
+				unsigned: None,
+				state_key: Some(String::new()),
+				redacts: None,
+			},
+			server_user,
+			&room_id,
+			&state_lock,
+		)
+		.await?;
+
+	Ok(())
+}
diff --git a/src/service/admin/grant.rs b/src/service/admin/grant.rs
new file mode 100644
index 0000000000000000000000000000000000000000..ca48ce0dbeb82a42e4821889d662b860b2eb13b7
--- /dev/null
+++ b/src/service/admin/grant.rs
@@ -0,0 +1,130 @@
+use std::collections::BTreeMap;
+
+use conduit::Result;
+use ruma::{
+	events::{
+		room::{
+			member::{MembershipState, RoomMemberEventContent},
+			message::RoomMessageEventContent,
+			power_levels::RoomPowerLevelsEventContent,
+		},
+		TimelineEventType,
+	},
+	UserId,
+};
+use serde_json::value::to_raw_value;
+
+use super::Service;
+use crate::{pdu::PduBuilder, services};
+
+/// Invite the user to the conduit admin room.
+///
+/// In conduit, this is equivalent to granting admin privileges.
+pub async fn make_user_admin(user_id: &UserId, displayname: String) -> Result<()> {
+	if let Some(room_id) = Service::get_admin_room()? {
+		let state_lock = services().globals.roomid_mutex_state.lock(&room_id).await;
+
+		// Use the server user to grant the new admin's power level
+		let server_user = &services().globals.server_user;
+
+		// Invite and join the real user
+		services()
+			.rooms
+			.timeline
+			.build_and_append_pdu(
+				PduBuilder {
+					event_type: TimelineEventType::RoomMember,
+					content: to_raw_value(&RoomMemberEventContent {
+						membership: MembershipState::Invite,
+						displayname: None,
+						avatar_url: None,
+						is_direct: None,
+						third_party_invite: None,
+						blurhash: None,
+						reason: None,
+						join_authorized_via_users_server: None,
+					})
+					.expect("event is valid, we just created it"),
+					unsigned: None,
+					state_key: Some(user_id.to_string()),
+					redacts: None,
+				},
+				server_user,
+				&room_id,
+				&state_lock,
+			)
+			.await?;
+		services()
+			.rooms
+			.timeline
+			.build_and_append_pdu(
+				PduBuilder {
+					event_type: TimelineEventType::RoomMember,
+					content: to_raw_value(&RoomMemberEventContent {
+						membership: MembershipState::Join,
+						displayname: Some(displayname),
+						avatar_url: None,
+						is_direct: None,
+						third_party_invite: None,
+						blurhash: None,
+						reason: None,
+						join_authorized_via_users_server: None,
+					})
+					.expect("event is valid, we just created it"),
+					unsigned: None,
+					state_key: Some(user_id.to_string()),
+					redacts: None,
+				},
+				user_id,
+				&room_id,
+				&state_lock,
+			)
+			.await?;
+
+		// Set power level
+		let mut users = BTreeMap::new();
+		users.insert(server_user.clone(), 100.into());
+		users.insert(user_id.to_owned(), 100.into());
+
+		services()
+			.rooms
+			.timeline
+			.build_and_append_pdu(
+				PduBuilder {
+					event_type: TimelineEventType::RoomPowerLevels,
+					content: to_raw_value(&RoomPowerLevelsEventContent {
+						users,
+						..Default::default()
+					})
+					.expect("event is valid, we just created it"),
+					unsigned: None,
+					state_key: Some(String::new()),
+					redacts: None,
+				},
+				server_user,
+				&room_id,
+				&state_lock,
+			)
+			.await?;
+
+		// Send welcome message
+		services().rooms.timeline.build_and_append_pdu(
+  			PduBuilder {
+                event_type: TimelineEventType::RoomMessage,
+                content: to_raw_value(&RoomMessageEventContent::text_html(
+                        format!("## Thank you for trying out conduwuit!\n\nconduwuit is a fork of upstream Conduit which is in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.\n\nHelpful links:\n> Git and Documentation: https://github.com/girlbossceo/conduwuit\n> Report issues: https://github.com/girlbossceo/conduwuit/issues\n\nFor a list of available commands, send the following message in this room: `@conduit:{}: --help`\n\nHere are some rooms you can join (by typing the command):\n\nconduwuit room (Ask questions and get notified on updates):\n`/join #conduwuit:puppygock.gay`", services().globals.server_name()),
+                        format!("<h2>Thank you for trying out conduwuit!</h2>\n<p>conduwuit is a fork of upstream Conduit which is in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.</p>\n<p>Helpful links:</p>\n<blockquote>\n<p>Git and Documentation: https://github.com/girlbossceo/conduwuit<br>Report issues: https://github.com/girlbossceo/conduwuit/issues</p>\n</blockquote>\n<p>For a list of available commands, send the following message in this room: <code>@conduit:{}: --help</code></p>\n<p>Here are some rooms you can join (by typing the command):</p>\n<p>conduwuit room (Ask questions and get notified on updates):<br><code>/join #conduwuit:puppygock.gay</code></p>\n", services().globals.server_name()),
+                ))
+                .expect("event is valid, we just created it"),
+                unsigned: None,
+                state_key: None,
+                redacts: None,
+            },
+            server_user,
+            &room_id,
+            &state_lock,
+        ).await?;
+	}
+
+	Ok(())
+}
diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs
new file mode 100644
index 0000000000000000000000000000000000000000..f5f818d3a599a359bc1baa28a954a09ee1d2b161
--- /dev/null
+++ b/src/service/admin/mod.rs
@@ -0,0 +1,318 @@
+pub mod console;
+mod create;
+mod grant;
+
+use std::{future::Future, pin::Pin, sync::Arc};
+
+use conduit::{utils::mutex_map, Error, Result};
+pub use create::create_admin_room;
+pub use grant::make_user_admin;
+use ruma::{
+	events::{
+		room::message::{Relation, RoomMessageEventContent},
+		TimelineEventType,
+	},
+	EventId, OwnedRoomId, RoomId, UserId,
+};
+use serde_json::value::to_raw_value;
+use tokio::{sync::Mutex, task::JoinHandle};
+use tracing::error;
+
+use crate::{pdu::PduBuilder, services, PduEvent};
+
+pub type HandlerResult = Pin<Box<dyn Future<Output = Result<AdminEvent, Error>> + Send>>;
+pub type Handler = fn(AdminEvent) -> HandlerResult;
+
+pub struct Service {
+	sender: loole::Sender<AdminEvent>,
+	receiver: Mutex<loole::Receiver<AdminEvent>>,
+	handler_join: Mutex<Option<JoinHandle<()>>>,
+	pub handle: Mutex<Option<Handler>>,
+	#[cfg(feature = "console")]
+	pub console: Arc<console::Console>,
+}
+
+#[derive(Debug)]
+pub enum AdminEvent {
+	Command(String, Option<Arc<EventId>>),
+	Reply(Option<RoomMessageEventContent>),
+	Notice(RoomMessageEventContent),
+}
+
+impl Service {
+	#[must_use]
+	pub fn build() -> Arc<Self> {
+		let (sender, receiver) = loole::unbounded();
+		Arc::new(Self {
+			sender,
+			receiver: Mutex::new(receiver),
+			handler_join: Mutex::new(None),
+			handle: Mutex::new(None),
+			#[cfg(feature = "console")]
+			console: console::Console::new(),
+		})
+	}
+
+	pub fn interrupt(&self) {
+		#[cfg(feature = "console")]
+		self.console.interrupt();
+
+		if !self.sender.is_closed() {
+			self.sender.close();
+		}
+	}
+
+	pub async fn close(&self) {
+		self.interrupt();
+
+		#[cfg(feature = "console")]
+		self.console.close().await;
+
+		if let Some(handler_join) = self.handler_join.lock().await.take() {
+			if let Err(e) = handler_join.await {
+				error!("Failed to shutdown: {e:?}");
+			}
+		}
+	}
+
+	pub async fn start_handler(self: &Arc<Self>) {
+		let self_ = Arc::clone(self);
+		let handle = services().server.runtime().spawn(async move {
+			self_
+				.handler()
+				.await
+				.expect("Failed to initialize admin room handler");
+		});
+
+		_ = self.handler_join.lock().await.insert(handle);
+	}
+
+	async fn handler(self: &Arc<Self>) -> Result<()> {
+		let receiver = self.receiver.lock().await;
+		let mut signals = services().server.signal.subscribe();
+		loop {
+			debug_assert!(!receiver.is_closed(), "channel closed");
+			tokio::select! {
+				event = receiver.recv_async() => match event {
+					Ok(event) => self.receive(event).await,
+					Err(_) => return Ok(()),
+				},
+				sig = signals.recv() => match sig {
+					Ok(sig) => self.handle_signal(sig).await,
+					Err(_) => continue,
+				},
+			}
+		}
+	}
+
+	pub async fn send_text(&self, body: &str) {
+		self.send_message(RoomMessageEventContent::text_plain(body))
+			.await;
+	}
+
+	pub async fn send_message(&self, message_content: RoomMessageEventContent) {
+		self.send(AdminEvent::Notice(message_content)).await;
+	}
+
+	pub async fn command(&self, command: String, event_id: Option<Arc<EventId>>) {
+		self.send(AdminEvent::Command(command, event_id)).await;
+	}
+
+	pub async fn command_in_place(
+		&self, command: String, event_id: Option<Arc<EventId>>,
+	) -> Result<Option<RoomMessageEventContent>> {
+		match self.handle(AdminEvent::Command(command, event_id)).await? {
+			AdminEvent::Reply(content) => Ok(content),
+			_ => Ok(None),
+		}
+	}
+
+	async fn send(&self, message: AdminEvent) {
+		debug_assert!(!self.sender.is_full(), "channel full");
+		debug_assert!(!self.sender.is_closed(), "channel closed");
+		self.sender.send(message).expect("message sent");
+	}
+
+	async fn receive(&self, event: AdminEvent) {
+		if let Ok(AdminEvent::Reply(content)) = self.handle(event).await {
+			handle_response(content).await;
+		}
+	}
+
+	async fn handle(&self, event: AdminEvent) -> Result<AdminEvent, Error> {
+		if let Some(handle) = self.handle.lock().await.as_ref() {
+			handle(event).await
+		} else {
+			Err(Error::Err("Admin module is not loaded.".into()))
+		}
+	}
+
+	async fn handle_signal(&self, #[allow(unused_variables)] sig: &'static str) {
+		#[cfg(feature = "console")]
+		if sig == "SIGINT" && services().server.running() {
+			self.console.start().await;
+		}
+	}
+
+	/// Checks whether a given user is an admin of this server
+	pub async fn user_is_admin(&self, user_id: &UserId) -> Result<bool> {
+		if let Ok(Some(admin_room)) = Self::get_admin_room() {
+			services().rooms.state_cache.is_joined(user_id, &admin_room)
+		} else {
+			Ok(false)
+		}
+	}
+
+	/// Gets the room ID of the admin room
+	///
+	/// Errors are propagated from the database, and will have None if there is
+	/// no admin room
+	pub fn get_admin_room() -> Result<Option<OwnedRoomId>> {
+		if let Some(room_id) = services()
+			.rooms
+			.alias
+			.resolve_local_alias(&services().globals.admin_alias)?
+		{
+			if services()
+				.rooms
+				.state_cache
+				.is_joined(&services().globals.server_user, &room_id)?
+			{
+				return Ok(Some(room_id));
+			}
+		}
+
+		Ok(None)
+	}
+}
+
+async fn handle_response(content: Option<RoomMessageEventContent>) {
+	if let Some(content) = content.as_ref() {
+		if let Some(Relation::Reply {
+			in_reply_to,
+		}) = content.relates_to.as_ref()
+		{
+			if let Ok(Some(pdu)) = services().rooms.timeline.get_pdu(&in_reply_to.event_id) {
+				let response_sender = if is_admin_room(&pdu.room_id) {
+					&services().globals.server_user
+				} else {
+					&pdu.sender
+				};
+
+				respond_to_room(content, &pdu.room_id, response_sender).await;
+			}
+		}
+	}
+}
+
+async fn respond_to_room(content: &RoomMessageEventContent, room_id: &RoomId, user_id: &UserId) {
+	assert!(
+		services()
+			.admin
+			.user_is_admin(user_id)
+			.await
+			.expect("checked user is admin"),
+		"sender is not admin"
+	);
+
+	let state_lock = services().globals.roomid_mutex_state.lock(room_id).await;
+	let response_pdu = PduBuilder {
+		event_type: TimelineEventType::RoomMessage,
+		content: to_raw_value(content).expect("event is valid, we just created it"),
+		unsigned: None,
+		state_key: None,
+		redacts: None,
+	};
+
+	if let Err(e) = services()
+		.rooms
+		.timeline
+		.build_and_append_pdu(response_pdu, user_id, room_id, &state_lock)
+		.await
+	{
+		if let Err(e) = handle_response_error(&e, room_id, user_id, &state_lock).await {
+			error!("{e}");
+		}
+	}
+}
+
+async fn handle_response_error(
+	e: &Error, room_id: &RoomId, user_id: &UserId, state_lock: &mutex_map::Guard<()>,
+) -> Result<()> {
+	error!("Failed to build and append admin room response PDU: \"{e}\"");
+	let error_room_message = RoomMessageEventContent::text_plain(format!(
+		"Failed to build and append admin room PDU: \"{e}\"\n\nThe original admin command may have finished \
+		 successfully, but we could not return the output."
+	));
+
+	let response_pdu = PduBuilder {
+		event_type: TimelineEventType::RoomMessage,
+		content: to_raw_value(&error_room_message).expect("event is valid, we just created it"),
+		unsigned: None,
+		state_key: None,
+		redacts: None,
+	};
+
+	services()
+		.rooms
+		.timeline
+		.build_and_append_pdu(response_pdu, user_id, room_id, state_lock)
+		.await?;
+
+	Ok(())
+}
+
+pub async fn is_admin_command(pdu: &PduEvent, body: &str) -> bool {
+	// Server-side command-escape with public echo
+	let is_escape = body.starts_with('\\');
+	let is_public_escape = is_escape && body.trim_start_matches('\\').starts_with("!admin");
+
+	// Admin command with public echo (in admin room)
+	let server_user = &services().globals.server_user;
+	let is_public_prefix = body.starts_with("!admin") || body.starts_with(server_user.as_str());
+
+	// Expected backward branch
+	if !is_public_escape && !is_public_prefix {
+		return false;
+	}
+
+	// Check if server-side command-escape is disabled by configuration
+	if is_public_escape && !services().globals.config.admin_escape_commands {
+		return false;
+	}
+
+	// Prevent unescaped !admin from being used outside of the admin room
+	if is_public_prefix && !is_admin_room(&pdu.room_id) {
+		return false;
+	}
+
+	// Only senders who are admin can proceed
+	if !services()
+		.admin
+		.user_is_admin(&pdu.sender)
+		.await
+		.unwrap_or(false)
+	{
+		return false;
+	}
+
+	// This will evaluate to false if the emergency password is set up so that
+	// the administrator can execute commands as conduit
+	let emergency_password_set = services().globals.emergency_password().is_some();
+	let from_server = pdu.sender == *server_user && !emergency_password_set;
+	if from_server && is_admin_room(&pdu.room_id) {
+		return false;
+	}
+
+	// Authentic admin command
+	true
+}
+
+#[must_use]
+pub fn is_admin_room(room_id: &RoomId) -> bool {
+	if let Ok(Some(admin_room_id)) = Service::get_admin_room() {
+		admin_room_id == room_id
+	} else {
+		false
+	}
+}
diff --git a/src/service/globals/data.rs b/src/service/globals/data.rs
index b49fc4e92d2b103c4cacae24896fe614c0ea37dd..9851bc0ae807bf3bb5d17d53e837f19ebf278821 100644
--- a/src/service/globals/data.rs
+++ b/src/service/globals/data.rs
@@ -160,7 +160,9 @@ async fn watch(&self, user_id: &UserId, device_id: &DeviceId) -> Result<()> {
 		futures.push(self.userid_lastonetimekeyupdate.watch_prefix(&userid_bytes));
 
 		futures.push(Box::pin(async move {
-			let _result = services().server.signal.subscribe().recv().await;
+			while services().server.running() {
+				let _result = services().server.signal.subscribe().recv().await;
+			}
 		}));
 
 		if !services().server.running() {
diff --git a/src/service/globals/migrations.rs b/src/service/globals/migrations.rs
index 83b3be9947a8f12999e732f8bf3428f1a5fad67e..bc9eaaa8b3ef5afc1a7cd8dc8767df824ad1c3a0 100644
--- a/src/service/globals/migrations.rs
+++ b/src/service/globals/migrations.rs
@@ -190,7 +190,7 @@ pub(crate) async fn migrations(db: &KeyValueDatabase, config: &Config) -> Result
 			.insert(b"retroactively_fix_bad_data_from_roomuserid_joined", &[])?;
 
 		// Create the admin room and server user on first run
-		services().admin.create_admin_room().await?;
+		crate::admin::create_admin_room().await?;
 
 		warn!(
 			"Created new {} database with version {}",
diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs
index 317a0d5619d8b37833963dc548c3e8149c9caa0e..626b27793de1e32dc178b95f9936f841f633baf4 100644
--- a/src/service/globals/mod.rs
+++ b/src/service/globals/mod.rs
@@ -14,6 +14,7 @@
 };
 
 use base64::{engine::general_purpose, Engine as _};
+use conduit::utils;
 use data::Data;
 use hickory_resolver::TokioAsyncResolver;
 use ipaddress::IPAddress;
@@ -33,6 +34,7 @@
 };
 use tracing::{error, trace};
 use url::Url;
+use utils::MutexMap;
 
 use crate::{services, Config, Result};
 
@@ -52,9 +54,9 @@ pub struct Service {
 	pub bad_event_ratelimiter: Arc<RwLock<HashMap<OwnedEventId, RateLimitState>>>,
 	pub bad_signature_ratelimiter: Arc<RwLock<HashMap<Vec<String>, RateLimitState>>>,
 	pub bad_query_ratelimiter: Arc<RwLock<HashMap<OwnedServerName, RateLimitState>>>,
-	pub roomid_mutex_insert: RwLock<HashMap<OwnedRoomId, Arc<Mutex<()>>>>,
-	pub roomid_mutex_state: RwLock<HashMap<OwnedRoomId, Arc<Mutex<()>>>>,
-	pub roomid_mutex_federation: RwLock<HashMap<OwnedRoomId, Arc<Mutex<()>>>>, // this lock will be held longer
+	pub roomid_mutex_insert: MutexMap<OwnedRoomId, ()>,
+	pub roomid_mutex_state: MutexMap<OwnedRoomId, ()>,
+	pub roomid_mutex_federation: MutexMap<OwnedRoomId, ()>,
 	pub roomid_federationhandletime: RwLock<HashMap<OwnedRoomId, (OwnedEventId, Instant)>>,
 	pub updates_handle: Mutex<Option<JoinHandle<()>>>,
 	pub stateres_mutex: Arc<Mutex<()>>,
@@ -114,9 +116,9 @@ pub fn load(db: Arc<dyn Data>, config: &Config) -> Result<Self> {
 			bad_event_ratelimiter: Arc::new(RwLock::new(HashMap::new())),
 			bad_signature_ratelimiter: Arc::new(RwLock::new(HashMap::new())),
 			bad_query_ratelimiter: Arc::new(RwLock::new(HashMap::new())),
-			roomid_mutex_state: RwLock::new(HashMap::new()),
-			roomid_mutex_insert: RwLock::new(HashMap::new()),
-			roomid_mutex_federation: RwLock::new(HashMap::new()),
+			roomid_mutex_state: MutexMap::<OwnedRoomId, ()>::new(),
+			roomid_mutex_insert: MutexMap::<OwnedRoomId, ()>::new(),
+			roomid_mutex_federation: MutexMap::<OwnedRoomId, ()>::new(),
 			roomid_federationhandletime: RwLock::new(HashMap::new()),
 			updates_handle: Mutex::new(None),
 			stateres_mutex: Arc::new(Mutex::new(())),
diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs
index d1413cb520f909f235592bc5b358471a9421b2e3..e2cd5066694b2e99952dcb0f7d39b3f0876f94a3 100644
--- a/src/service/rooms/event_handler/mod.rs
+++ b/src/service/rooms/event_handler/mod.rs
@@ -530,18 +530,8 @@ pub async fn upgrade_outlier_to_timeline_pdu(
 		// 13. Use state resolution to find new room state
 
 		// We start looking at current room state now, so lets lock the room
-		let mutex_state = Arc::clone(
-			services()
-				.globals
-				.roomid_mutex_state
-				.write()
-				.await
-				.entry(room_id.to_owned())
-				.or_default(),
-		);
-
 		trace!("Locking the room");
-		let state_lock = mutex_state.lock().await;
+		let state_lock = services().globals.roomid_mutex_state.lock(room_id).await;
 
 		// Now we calculate the set of extremities this room has after the incoming
 		// event has been applied. We start with the previous extremities (aka leaves)
diff --git a/src/service/rooms/state/data.rs b/src/service/rooms/state/data.rs
index f0fef086e410d8e25abb8a1fd68424125284fee9..f8c7f6a6c01aa05140485daeeaeb76bef2bfac74 100644
--- a/src/service/rooms/state/data.rs
+++ b/src/service/rooms/state/data.rs
@@ -1,7 +1,7 @@
 use std::{collections::HashSet, sync::Arc};
 
+use conduit::utils::mutex_map;
 use ruma::{EventId, OwnedEventId, RoomId};
-use tokio::sync::MutexGuard;
 
 use crate::{utils, Error, KeyValueDatabase, Result};
 
@@ -14,7 +14,7 @@ fn set_room_state(
 		&self,
 		room_id: &RoomId,
 		new_shortstatehash: u64,
-		_mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex
+		_mutex_lock: &mutex_map::Guard<()>, // Take mutex guard to make sure users get the room state mutex
 	) -> Result<()>;
 
 	/// Associates a state with an event.
@@ -28,7 +28,7 @@ fn set_forward_extremities(
 		&self,
 		room_id: &RoomId,
 		event_ids: Vec<OwnedEventId>,
-		_mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex
+		_mutex_lock: &mutex_map::Guard<()>, // Take mutex guard to make sure users get the room state mutex
 	) -> Result<()>;
 }
 
@@ -47,7 +47,7 @@ fn set_room_state(
 		&self,
 		room_id: &RoomId,
 		new_shortstatehash: u64,
-		_mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex
+		_mutex_lock: &mutex_map::Guard<()>, // Take mutex guard to make sure users get the room state mutex
 	) -> Result<()> {
 		self.roomid_shortstatehash
 			.insert(room_id.as_bytes(), &new_shortstatehash.to_be_bytes())?;
@@ -80,7 +80,7 @@ fn set_forward_extremities(
 		&self,
 		room_id: &RoomId,
 		event_ids: Vec<OwnedEventId>,
-		_mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex
+		_mutex_lock: &mutex_map::Guard<()>, // Take mutex guard to make sure users get the room state mutex
 	) -> Result<()> {
 		let mut prefix = room_id.as_bytes().to_vec();
 		prefix.push(0xFF);
diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs
index 8a32d9caf213d63afae74baf1be4e05529659f5c..199ee87fa0ce6f48292a2b39a2c36b8173a8a181 100644
--- a/src/service/rooms/state/mod.rs
+++ b/src/service/rooms/state/mod.rs
@@ -4,6 +4,7 @@
 	sync::Arc,
 };
 
+use conduit::utils::mutex_map;
 use data::Data;
 use ruma::{
 	api::client::error::ErrorKind,
@@ -15,7 +16,6 @@
 	state_res::{self, StateMap},
 	EventId, OwnedEventId, RoomId, RoomVersionId, UserId,
 };
-use tokio::sync::MutexGuard;
 use tracing::warn;
 
 use super::state_compressor::CompressedStateEvent;
@@ -33,7 +33,7 @@ pub async fn force_state(
 		shortstatehash: u64,
 		statediffnew: Arc<HashSet<CompressedStateEvent>>,
 		_statediffremoved: Arc<HashSet<CompressedStateEvent>>,
-		state_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex
+		state_lock: &mutex_map::Guard<()>, // Take mutex guard to make sure users get the room state mutex
 	) -> Result<()> {
 		for event_id in statediffnew.iter().filter_map(|new| {
 			services()
@@ -299,12 +299,12 @@ pub fn calculate_invite_state(&self, invite_event: &PduEvent) -> Result<Vec<Raw<
 	}
 
 	/// Set the state hash to a new version, but does not update state_cache.
-	#[tracing::instrument(skip(self))]
+	#[tracing::instrument(skip(self, mutex_lock))]
 	pub fn set_room_state(
 		&self,
 		room_id: &RoomId,
 		shortstatehash: u64,
-		mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex
+		mutex_lock: &mutex_map::Guard<()>, // Take mutex guard to make sure users get the room state mutex
 	) -> Result<()> {
 		self.db.set_room_state(room_id, shortstatehash, mutex_lock)
 	}
@@ -343,7 +343,7 @@ pub fn set_forward_extremities(
 		&self,
 		room_id: &RoomId,
 		event_ids: Vec<OwnedEventId>,
-		state_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex
+		state_lock: &mutex_map::Guard<()>, // Take mutex guard to make sure users get the room state mutex
 	) -> Result<()> {
 		self.db
 			.set_forward_extremities(room_id, event_ids, state_lock)
diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs
index d3dc92eff23f251b884404b9021c02e4d46a0539..ab290cbd7772fbaebb51415b6a300e892f478504 100644
--- a/src/service/rooms/state_accessor/mod.rs
+++ b/src/service/rooms/state_accessor/mod.rs
@@ -4,6 +4,7 @@
 	sync::{Arc, Mutex},
 };
 
+use conduit::utils::mutex_map;
 use data::Data;
 use lru_cache::LruCache;
 use ruma::{
@@ -22,7 +23,6 @@
 	EventId, OwnedRoomAliasId, OwnedServerName, OwnedUserId, RoomId, ServerName, UserId,
 };
 use serde_json::value::to_raw_value;
-use tokio::sync::MutexGuard;
 use tracing::{error, warn};
 
 use crate::{service::pdu::PduBuilder, services, Error, PduEvent, Result};
@@ -285,7 +285,7 @@ pub fn get_member(&self, room_id: &RoomId, user_id: &UserId) -> Result<Option<Ro
 	}
 
 	pub async fn user_can_invite(
-		&self, room_id: &RoomId, sender: &UserId, target_user: &UserId, state_lock: &MutexGuard<'_, ()>,
+		&self, room_id: &RoomId, sender: &UserId, target_user: &UserId, state_lock: &mutex_map::Guard<()>,
 	) -> Result<bool> {
 		let content = to_raw_value(&RoomMemberEventContent::new(MembershipState::Invite))
 			.expect("Event content always serializes");
diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs
index 1addd56736d22307121868e856901d0851ab2be4..e8774cdc723911a8a002dd39fc7890f9570049bf 100644
--- a/src/service/rooms/timeline/mod.rs
+++ b/src/service/rooms/timeline/mod.rs
@@ -30,21 +30,21 @@
 };
 use serde::Deserialize;
 use serde_json::value::{to_raw_value, RawValue as RawJsonValue};
-use tokio::sync::{Mutex, MutexGuard, RwLock};
+use tokio::sync::{Mutex, RwLock};
 use tracing::{debug, error, info, warn};
 
 use super::state_compressor::CompressedStateEvent;
 use crate::{
+	admin,
 	server_is_ours,
 	//api::server_server,
 	service::{
-		self,
 		appservice::NamespaceRegex,
 		pdu::{EventHash, PduBuilder},
 		rooms::event_handler::parse_incoming_pdu,
 	},
 	services,
-	utils::{self},
+	utils::{self, mutex_map},
 	Error,
 	PduCount,
 	PduEvent,
@@ -200,13 +200,13 @@ pub fn replace_pdu(&self, pdu_id: &[u8], pdu_json: &CanonicalJsonObject, pdu: &P
 	/// happens in `append_pdu`.
 	///
 	/// Returns pdu id
-	#[tracing::instrument(skip(self, pdu, pdu_json, leaves))]
+	#[tracing::instrument(skip_all)]
 	pub async fn append_pdu(
 		&self,
 		pdu: &PduEvent,
 		mut pdu_json: CanonicalJsonObject,
 		leaves: Vec<OwnedEventId>,
-		state_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex
+		state_lock: &mutex_map::Guard<()>, // Take mutex guard to make sure users get the room state mutex
 	) -> Result<Vec<u8>> {
 		// Coalesce database writes for the remainder of this scope.
 		let _cork = services().globals.db.cork_and_flush();
@@ -271,16 +271,11 @@ pub async fn append_pdu(
 			.state
 			.set_forward_extremities(&pdu.room_id, leaves, state_lock)?;
 
-		let mutex_insert = Arc::clone(
-			services()
-				.globals
-				.roomid_mutex_insert
-				.write()
-				.await
-				.entry(pdu.room_id.clone())
-				.or_default(),
-		);
-		let insert_lock = mutex_insert.lock().await;
+		let insert_lock = services()
+			.globals
+			.roomid_mutex_insert
+			.lock(&pdu.room_id)
+			.await;
 
 		let count1 = services().globals.next_count()?;
 		// Mark as read first so the sending client doesn't get a notification even if
@@ -477,30 +472,11 @@ pub async fn append_pdu(
 						.search
 						.index_pdu(shortroomid, &pdu_id, &body)?;
 
-					let server_user = &services().globals.server_user;
-
-					let to_conduit = body.starts_with(&format!("{server_user}: "))
-						|| body.starts_with(&format!("{server_user} "))
-						|| body.starts_with("!admin")
-						|| body == format!("{server_user}:")
-						|| body == *server_user;
-
-					// This will evaluate to false if the emergency password is set up so that
-					// the administrator can execute commands as conduit
-					let from_conduit = pdu.sender == *server_user && services().globals.emergency_password().is_none();
-					if let Some(admin_room) = service::admin::Service::get_admin_room()? {
-						if to_conduit
-							&& !from_conduit && admin_room == pdu.room_id
-							&& services()
-								.rooms
-								.state_cache
-								.is_joined(server_user, &admin_room)?
-						{
-							services()
-								.admin
-								.process_message(body, pdu.event_id.clone())
-								.await;
-						}
+					if admin::is_admin_command(pdu, &body).await {
+						services()
+							.admin
+							.command(body, Some(pdu.event_id.clone()))
+							.await;
 					}
 				}
 			},
@@ -605,7 +581,7 @@ pub fn create_hash_and_sign_event(
 		pdu_builder: PduBuilder,
 		sender: &UserId,
 		room_id: &RoomId,
-		_mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex
+		_mutex_lock: &mutex_map::Guard<()>, // Take mutex guard to make sure users get the room state mutex
 	) -> Result<(PduEvent, CanonicalJsonObject)> {
 		let PduBuilder {
 			event_type,
@@ -792,10 +768,10 @@ pub async fn build_and_append_pdu(
 		pdu_builder: PduBuilder,
 		sender: &UserId,
 		room_id: &RoomId,
-		state_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex
+		state_lock: &mutex_map::Guard<()>, // Take mutex guard to make sure users get the room state mutex
 	) -> Result<Arc<EventId>> {
 		let (pdu, pdu_json) = self.create_hash_and_sign_event(pdu_builder, sender, room_id, state_lock)?;
-		if let Some(admin_room) = service::admin::Service::get_admin_room()? {
+		if let Some(admin_room) = admin::Service::get_admin_room()? {
 			if admin_room == room_id {
 				match pdu.event_type() {
 					TimelineEventType::RoomEncryption => {
@@ -933,7 +909,7 @@ pub async fn append_incoming_pdu(
 		new_room_leaves: Vec<OwnedEventId>,
 		state_ids_compressed: Arc<HashSet<CompressedStateEvent>>,
 		soft_fail: bool,
-		state_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex
+		state_lock: &mutex_map::Guard<()>, // Take mutex guard to make sure users get the room state mutex
 	) -> Result<Option<Vec<u8>>> {
 		// We append to state before appending the pdu, so we don't have a moment in
 		// time with the pdu without it's state. This is okay because append_pdu can't
@@ -1146,16 +1122,11 @@ pub async fn backfill_pdu(
 		let (event_id, value, room_id) = parse_incoming_pdu(&pdu)?;
 
 		// Lock so we cannot backfill the same pdu twice at the same time
-		let mutex = Arc::clone(
-			services()
-				.globals
-				.roomid_mutex_federation
-				.write()
-				.await
-				.entry(room_id.clone())
-				.or_default(),
-		);
-		let mutex_lock = mutex.lock().await;
+		let mutex_lock = services()
+			.globals
+			.roomid_mutex_federation
+			.lock(&room_id)
+			.await;
 
 		// Skip the PDU if we already have it as a timeline event
 		if let Some(pdu_id) = self.get_pdu_id(&event_id)? {
@@ -1184,16 +1155,7 @@ pub async fn backfill_pdu(
 			.get_shortroomid(&room_id)?
 			.expect("room exists");
 
-		let mutex_insert = Arc::clone(
-			services()
-				.globals
-				.roomid_mutex_insert
-				.write()
-				.await
-				.entry(room_id.clone())
-				.or_default(),
-		);
-		let insert_lock = mutex_insert.lock().await;
+		let insert_lock = services().globals.roomid_mutex_insert.lock(&room_id).await;
 
 		let count = services().globals.next_count()?;
 		let mut pdu_id = shortroomid.to_be_bytes().to_vec();
@@ -1222,6 +1184,7 @@ pub async fn backfill_pdu(
 		Ok(())
 	}
 }
+
 #[cfg(test)]
 mod tests {
 	use super::*;