diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 5b56b66a4d34c3630d391473450264fbd3093d12..6959ec90b225f1cfeb69788e0e6bd2557384f027 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -53,6 +53,8 @@ env: show-trace = true extra-substituters = https://attic.kennel.juneis.dog/conduit https://attic.kennel.juneis.dog/conduwuit https://cache.lix.systems https://conduwuit.cachix.org extra-trusted-public-keys = conduit:eEKoUwlQGDdYmAI/Q/0slVlegqh/QmAvQd7HBSm21Wk= conduwuit:BbycGUgTISsltcmH0qNjFR9dbrQNYgdIAcmViSGoVTE= cache.lix.systems:aBnZUw8zA7H35Cz2RyKFVs3H4PlGTLawyY5KRbvJR8o= conduwuit.cachix.org-1:MFRm6jcnfTf0jSAbmvLfhO3KBMt4px+1xaereWXp8Xg= + # complement uses libolm + NIXPKGS_ALLOW_INSECURE: 1 permissions: packages: write @@ -130,9 +132,9 @@ jobs: - name: Prepare build environment run: | echo 'source $HOME/.nix-profile/share/nix-direnv/direnvrc' > "$HOME/.direnvrc" - nix profile install --inputs-from . nixpkgs#direnv nixpkgs#nix-direnv + nix profile install --impure --inputs-from . nixpkgs#direnv nixpkgs#nix-direnv direnv allow - nix develop .#all-features --command true + nix develop .#all-features --command true --impure - name: Cache CI dependencies run: | @@ -283,9 +285,9 @@ jobs: - name: Prepare build environment run: | echo 'source $HOME/.nix-profile/share/nix-direnv/direnvrc' > "$HOME/.direnvrc" - nix profile install --inputs-from . nixpkgs#direnv nixpkgs#nix-direnv + nix profile install --impure --inputs-from . nixpkgs#direnv nixpkgs#nix-direnv direnv allow - nix develop .#all-features --command true + nix develop .#all-features --command true --impure # use sccache for Rust - name: Run sccache-cache diff --git a/Cargo.lock b/Cargo.lock index ac188fe393128dabba066c411aea8c87c03d4c7d..7d9ddc53a3b1ffc9caeec5772126fc86c74fb52f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -17,6 +17,12 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" +[[package]] +name = "adler2" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627" + [[package]] name = "aho-corasick" version = "1.1.3" @@ -73,9 +79,9 @@ dependencies = [ [[package]] name = "arrayvec" -version = "0.7.4" +version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" +checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" [[package]] name = "as_variant" @@ -124,7 +130,7 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.75", ] [[package]] @@ -135,7 +141,7 @@ checksum = "6e0c28dcc82d7c8ead5cb13beb15405b57b8546e93215673ff8ca0349a028107" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.75", ] [[package]] @@ -159,6 +165,33 @@ version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" +[[package]] +name = "aws-lc-rs" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ae74d9bd0a7530e8afd1770739ad34b36838829d6ad61818f9230f683f5ad77" +dependencies = [ + "aws-lc-sys", + "mirai-annotations", + "paste", + "zeroize", +] + +[[package]] +name = "aws-lc-sys" +version = "0.20.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f0e249228c6ad2d240c2dc94b714d711629d52bad946075d8e9b2f5391f0703" +dependencies = [ + "bindgen", + "cc", + "cmake", + "dunce", + "fs_extra", + "libc", + "paste", +] + [[package]] name = "axum" version = "0.7.5" @@ -187,7 +220,7 @@ dependencies = [ "serde_urlencoded", "sync_wrapper 1.0.1", "tokio", - "tower", + "tower 0.4.13", "tower-layer", "tower-service", "tracing", @@ -242,7 +275,7 @@ dependencies = [ "mime", "pin-project-lite", "serde", - "tower", + "tower 0.4.13", "tower-layer", "tower-service", "tracing", @@ -250,8 +283,9 @@ dependencies = [ [[package]] name = "axum-server" -version = "0.6.0" -source = "git+https://github.com/girlbossceo/axum-server?rev=8e3368d899079818934e61cc9c839abcbbcada8a#8e3368d899079818934e61cc9c839abcbbcada8a" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56bac90848f6a9393ac03c63c640925c4b7c8ca21654de40d53f55964667c7d8" dependencies = [ "arc-swap", "bytes", @@ -262,27 +296,29 @@ dependencies = [ "hyper", "hyper-util", "pin-project-lite", - "rustls 0.21.12", + "rustls 0.23.12", "rustls-pemfile", + "rustls-pki-types", "tokio", - "tokio-rustls 0.24.1", - "tower", + "tokio-rustls", + "tower 0.4.13", "tower-service", ] [[package]] name = "axum-server-dual-protocol" -version = "0.6.0" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ea4cd08ae2a5f075d28fa31190163c8106a1d2d3189442494bae22b39040a0d" +checksum = "2164551db024e87f20316d164eab9f5ad342d8188b08051ceb15ca92a60ea7b7" dependencies = [ "axum-server", "bytes", "http", "http-body-util", "pin-project", + "rustls 0.23.12", "tokio", - "tokio-rustls 0.24.1", + "tokio-rustls", "tokio-util", "tower-layer", "tower-service", @@ -298,7 +334,7 @@ dependencies = [ "cc", "cfg-if", "libc", - "miniz_oxide", + "miniz_oxide 0.7.4", "object", "rustc-demangle", ] @@ -333,12 +369,15 @@ dependencies = [ "itertools 0.12.1", "lazy_static", "lazycell", + "log", + "prettyplease", "proc-macro2", "quote", "regex", - "rustc-hash", + "rustc-hash 1.1.0", "shlex", - "syn 2.0.72", + "syn 2.0.75", + "which", ] [[package]] @@ -400,9 +439,9 @@ checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c" [[package]] name = "bytemuck" -version = "1.16.3" +version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "102087e286b4677862ea56cf8fc58bb2cdfa8725c40ffb80fe3a008eb7f2fc83" +checksum = "6fd4c6dcc3b0aea2f5c0b4b82c2b15fe39ddbc76041a310848f4706edf76bb31" [[package]] name = "byteorder" @@ -418,9 +457,9 @@ checksum = "8f1fe948ff07f4bd06c30984e69f5b4899c516a3ef74f34df92a2df2ab535495" [[package]] name = "bytes" -version = "1.7.0" +version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fca2be1d5c43812bae364ee3f30b3afcb7877cf59f4aeb94c66f313a41d2fac9" +checksum = "8318a53db07bb3f8dca91a600466bdb3f2eaadeedfdbcf02e1accbad9271ba50" [[package]] name = "bzip2-sys" @@ -445,12 +484,13 @@ dependencies = [ [[package]] name = "cc" -version = "1.1.7" +version = "1.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26a5c3fd7bfa1ce3897a3a3501d362b2d87b7f2583ebcb4a949ec25911025cbc" +checksum = "50d2eb3cd3d1bf4529e31c215ee6f93ec5a3d536d9f578f93d9d33ee19562932" dependencies = [ "jobserver", "libc", + "shlex", ] [[package]] @@ -505,9 +545,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.13" +version = "4.5.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fbb260a053428790f3de475e304ff84cdbc4face759ea7a3e64c1edd938a7fc" +checksum = "ed6719fffa43d0d87e5fd8caeab59be1554fb028cd30edc88fc4369b17971019" dependencies = [ "clap_builder", "clap_derive", @@ -515,9 +555,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.13" +version = "4.5.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64b17d7ea74e9f833c7dbf2cbe4fb12ff26783eda4782a8975b72f895c9b4d99" +checksum = "216aec2b177652e3846684cbfe25c9964d18ec45234f0f5da5157b207ed1aab6" dependencies = [ "anstyle", "clap_lex", @@ -532,7 +572,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.75", ] [[package]] @@ -541,6 +581,15 @@ version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1462739cb27611015575c0c11df5df7601141071f07518d56fcc1be504cbec97" +[[package]] +name = "cmake" +version = "0.1.51" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fb1e43aa7fd152b1f968787f7dbcdeb306d1867ff373c69955211876c053f91a" +dependencies = [ + "cc", +] + [[package]] name = "color_quant" version = "1.1.0" @@ -643,6 +692,7 @@ dependencies = [ "conduit_macros", "const-str", "ctor", + "cyborgtime", "either", "figment", "hardened_malloc-rs", @@ -659,6 +709,7 @@ dependencies = [ "reqwest", "ring", "ruma", + "rustls 0.23.12", "sanitize-filename", "serde", "serde_json", @@ -695,7 +746,7 @@ dependencies = [ "itertools 0.13.0", "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.75", ] [[package]] @@ -718,13 +769,14 @@ dependencies = [ "hyper-util", "log", "ruma", + "rustls 0.23.12", "sd-notify", "sentry", "sentry-tower", "sentry-tracing", "serde_json", "tokio", - "tower", + "tower 0.5.0", "tower-http", "tracing", ] @@ -739,7 +791,6 @@ dependencies = [ "conduit_core", "conduit_database", "const-str", - "cyborgtime", "futures-util", "hickory-resolver", "http", @@ -825,9 +876,9 @@ checksum = "6051f239ecec86fde3410901ab7860d458d160371533842974fc61f96d15879b" [[package]] name = "coolor" -version = "0.9.0" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37e93977247fb916abeee1ff8c6594c9b421fd9c26c9b720a3944acb2a7de27b" +checksum = "691defa50318376447a73ced869862baecfab35f6aabaa91a4cd726b315bfe1a" dependencies = [ "crossterm", ] @@ -844,15 +895,15 @@ dependencies = [ [[package]] name = "core-foundation-sys" -version = "0.8.6" +version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06ea2b9bc92be3c2baa9334a323ebca2d6f074ff852cd1d7b11064035cd3868f" +checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" [[package]] name = "cpufeatures" -version = "0.2.12" +version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53fe5e26ff1b7aef8bca9c6080520cfb8d9333c7568e1829cef191a9723e5504" +checksum = "51e852e6dc9a5bed1fae92dd2375037bf2b768725bf3be87811edee3249d09ad" dependencies = [ "libc", ] @@ -868,9 +919,9 @@ dependencies = [ [[package]] name = "crokey" -version = "1.0.1" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b015414137feca6f4137550d60d233b580a09307e2de329412b8e8c661b6dfab" +checksum = "520e83558f4c008ac06fa6a86e5c1d4357be6f994cce7434463ebcdaadf47bb1" dependencies = [ "crokey-proc_macros", "crossterm", @@ -881,9 +932,9 @@ dependencies = [ [[package]] name = "crokey-proc_macros" -version = "1.0.1" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6085c6385034f0847d2b3120547a88da1e5e35727970a70f32b555eb3c47051c" +checksum = "370956e708a1ce65fe4ac5bb7185791e0ece7485087f17736d54a23a0895049f" dependencies = [ "crossterm", "proc-macro2", @@ -950,16 +1001,16 @@ checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80" [[package]] name = "crossterm" -version = "0.27.0" +version = "0.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f476fe445d41c9e991fd07515a6f463074b782242ccf4a5b7b1d1012e70824df" +checksum = "829d955a0bb380ef178a640b91779e3987da38c9aea133b20614cfed8cdea9c6" dependencies = [ "bitflags 2.6.0", "crossterm_winapi", "futures-core", - "libc", - "mio 0.8.11", + "mio", "parking_lot", + "rustix", "signal-hook", "signal-hook-mio", "winapi", @@ -991,7 +1042,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "edb49164822f3ee45b17acd4a208cfc1251410cf0cad9a833234c9890774dd9f" dependencies = [ "quote", - "syn 2.0.72", + "syn 2.0.75", ] [[package]] @@ -1018,7 +1069,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.75", ] [[package]] @@ -1079,6 +1130,12 @@ dependencies = [ "subtle", ] +[[package]] +name = "dunce" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813" + [[package]] name = "ed25519" version = "2.2.3" @@ -1122,7 +1179,7 @@ dependencies = [ "heck 0.4.1", "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.75", ] [[package]] @@ -1131,6 +1188,16 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" +[[package]] +name = "errno" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "534c5cf6194dfab3db3242765c03bbe257cf92f22b38f6bc0c58d59108a820ba" +dependencies = [ + "libc", + "windows-sys 0.52.0", +] + [[package]] name = "fdeflate" version = "0.3.4" @@ -1174,12 +1241,12 @@ dependencies = [ [[package]] name = "flate2" -version = "1.0.30" +version = "1.0.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f54427cfd1c7829e2a139fcefea601bf088ebca651d2bf53ebc600eac295dae" +checksum = "9c0596c1eac1f9e04ed902702e9878208b336edc9d6fddc8a48387349bab3666" dependencies = [ "crc32fast", - "miniz_oxide", + "miniz_oxide 0.8.0", ] [[package]] @@ -1207,6 +1274,12 @@ dependencies = [ "thiserror", ] +[[package]] +name = "fs_extra" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42703706b716c37f96a77aea830392ad231f44c9e9a67872fa5548707e11b11c" + [[package]] name = "futf" version = "0.1.5" @@ -1258,7 +1331,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.75", ] [[package]] @@ -1337,9 +1410,9 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" [[package]] name = "h2" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa82e28a107a8cc405f0839610bdc9b15f1e25ec7d696aa5cf173edbcb1486ab" +checksum = "524e8ac6999421f49a846c2d4411f337e53497d8ec55d67753beffa43c5d9205" dependencies = [ "atomic-waker", "bytes", @@ -1347,7 +1420,7 @@ dependencies = [ "futures-core", "futures-sink", "http", - "indexmap 2.3.0", + "indexmap 2.4.0", "slab", "tokio", "tokio-util", @@ -1487,6 +1560,15 @@ dependencies = [ "digest", ] +[[package]] +name = "home" +version = "0.5.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3d1354bf6b7235cb4a0576c2619fd4ed18183f689b12b006a0ee7329eeff9a5" +dependencies = [ + "windows-sys 0.52.0", +] + [[package]] name = "hostname" version = "0.3.1" @@ -1520,7 +1602,7 @@ dependencies = [ "markup5ever", "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.75", ] [[package]] @@ -1619,7 +1701,7 @@ dependencies = [ "rustls-native-certs", "rustls-pki-types", "tokio", - "tokio-rustls 0.26.0", + "tokio-rustls", "tower-service", "webpki-roots", ] @@ -1639,9 +1721,9 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ab92f4f49ee4fb4f997c784b7a2e0fa70050211e0b6a287f898c3c9785ca956" +checksum = "cde7055719c54e36e95e8719f95883f22072a48ede39db7fc17a4e1d5281e9b9" dependencies = [ "bytes", "futures-channel", @@ -1652,7 +1734,7 @@ dependencies = [ "pin-project-lite", "socket2", "tokio", - "tower", + "tower 0.4.13", "tower-service", "tracing", ] @@ -1716,9 +1798,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.3.0" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de3fc2e30ba82dd1b3911c8de1ffc143c74a914a14e99514d7637e3099df5ea0" +checksum = "93ead53efc7ea8ed3cfb0c79fc8023fbb782a5432b52830b6518941cebe6505c" dependencies = [ "equivalent", "hashbrown 0.14.5", @@ -1760,7 +1842,7 @@ dependencies = [ "socket2", "widestring", "windows-sys 0.48.0", - "winreg 0.50.0", + "winreg", ] [[package]] @@ -1804,9 +1886,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.69" +version = "0.3.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29c15563dc2726973df627357ce0c9ddddbea194836909d655df6a75d2cf296d" +checksum = "1868808506b929d7b0cfa8f75951347aa71bb21144b7791bae35d9bccfcfe37a" dependencies = [ "wasm-bindgen", ] @@ -1866,9 +1948,9 @@ dependencies = [ [[package]] name = "lazy-regex" -version = "3.2.0" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "576c8060ecfdf2e56995cf3274b4f2d71fa5e4fa3607c1c0b63c10180ee58741" +checksum = "8d8e41c97e6bc7ecb552016274b99fbb5d035e8de288c582d9b933af6677bfda" dependencies = [ "lazy-regex-proc_macros", "once_cell", @@ -1877,14 +1959,14 @@ dependencies = [ [[package]] name = "lazy-regex-proc_macros" -version = "3.2.0" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9efb9e65d4503df81c615dc33ff07042a9408ac7f26b45abee25566f7fbfd12c" +checksum = "76e1d8b05d672c53cb9c7b920bbba8783845ae4f0b076e02a3db1d02c81b4163" dependencies = [ "proc-macro2", "quote", "regex", - "syn 2.0.72", + "syn 2.0.75", ] [[package]] @@ -1901,9 +1983,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.155" +version = "0.2.158" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97b3888a4aecf77e811145cadf6eef5901f4782c53886191b2f693f24761847c" +checksum = "d8adc4bb1803a324070e64a98ae98f38934d91957a99cfb3a43dcbc01bc56439" [[package]] name = "libloading" @@ -1917,9 +1999,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.18" +version = "1.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c15da26e5af7e25c90b37a2d75cdbf940cf4a55316de9d84c679c9b8bfabf82e" +checksum = "fdc53a7799a7496ebc9fd29f31f7df80e83c9bda5299768af5f9e59eeea74647" dependencies = [ "cc", "pkg-config", @@ -1932,6 +2014,12 @@ version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" +[[package]] +name = "linux-raw-sys" +version = "0.4.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" + [[package]] name = "lock_api" version = "0.4.12" @@ -2070,29 +2158,33 @@ dependencies = [ ] [[package]] -name = "mio" -version = "0.8.11" +name = "miniz_oxide" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" +checksum = "e2d80299ef12ff69b16a84bb182e3b9df68b5a91574d3d4fa6e41b65deec4df1" dependencies = [ - "libc", - "log", - "wasi", - "windows-sys 0.48.0", + "adler2", ] [[package]] name = "mio" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4569e456d394deccd22ce1c1913e6ea0e54519f577285001215d33557431afe4" +checksum = "80e04d1dcff3aae0704555fe5fee3bcfaf3d1fdf8a7e521d5b9d2b42acb52cec" dependencies = [ "hermit-abi", "libc", + "log", "wasi", "windows-sys 0.52.0", ] +[[package]] +name = "mirai-annotations" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c9be0862c1b3f26a88803c4a49de6889c10e608b3ee9344e6ef5b45fb37ad3d1" + [[package]] name = "new_debug_unreachable" version = "1.0.6" @@ -2228,9 +2320,9 @@ dependencies = [ [[package]] name = "object" -version = "0.36.2" +version = "0.36.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f203fa8daa7bb185f760ae12bd8e097f63d17041dcdcaf675ac54cdf863170e" +checksum = "27b64972346851a39438c60b341ebc01bba47464ae329e55cf343eb93964efd9" dependencies = [ "memchr", ] @@ -2255,7 +2347,7 @@ checksum = "1e32339a5dc40459130b3bd269e9892439f55b33e772d2a9d402a789baaf4e8a" dependencies = [ "futures-core", "futures-sink", - "indexmap 2.3.0", + "indexmap 2.4.0", "js-sys", "once_cell", "pin-project-lite", @@ -2405,7 +2497,7 @@ dependencies = [ "proc-macro2", "proc-macro2-diagnostics", "quote", - "syn 2.0.72", + "syn 2.0.75", ] [[package]] @@ -2498,7 +2590,7 @@ checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.75", ] [[package]] @@ -2539,7 +2631,7 @@ dependencies = [ "crc32fast", "fdeflate", "flate2", - "miniz_oxide", + "miniz_oxide 0.7.4", ] [[package]] @@ -2550,9 +2642,9 @@ checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" [[package]] name = "ppv-lite86" -version = "0.2.18" +version = "0.2.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dee4364d9f3b902ef14fab8a1ddffb783a1cb6b4bba3bfc1fa3922732c7de97f" +checksum = "77957b295656769bb8ad2b6a6b09d897d94f05c41b069aede1fcdaa675eaea04" dependencies = [ "zerocopy", ] @@ -2563,6 +2655,16 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "925383efa346730478fb4838dbe9137d2a47675ad789c546d150a6e1dd4ab31c" +[[package]] +name = "prettyplease" +version = "0.2.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f12335488a2f3b0a83b14edad48dca9879ce89b2edd10e80237e4e852dd645e" +dependencies = [ + "proc-macro2", + "syn 2.0.75", +] + [[package]] name = "proc-macro-crate" version = "3.1.0" @@ -2589,7 +2691,7 @@ checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.75", "version_check", "yansi", ] @@ -2614,7 +2716,7 @@ dependencies = [ "itertools 0.13.0", "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.75", ] [[package]] @@ -2628,9 +2730,9 @@ dependencies = [ [[package]] name = "pulldown-cmark" -version = "0.11.0" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8746739f11d39ce5ad5c2520a9b75285310dbfe78c541ccf832d38615765aec0" +checksum = "cb4e75767fbc9d92b90e4d0c011f61358cde9513b31ef07ea3631b15ffc3b4fd" dependencies = [ "bitflags 2.6.0", "memchr", @@ -2658,16 +2760,17 @@ checksum = "a993555f31e5a609f617c12db6250dedcac1b0a85076912c436e6fc9b2c8e6a3" [[package]] name = "quinn" -version = "0.11.2" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4ceeeeabace7857413798eb1ffa1e9c905a9946a57d81fb69b4b71c4d8eb3ad" +checksum = "b22d8e7369034b9a7132bc2008cac12f2013c8132b45e0554e6e20e2617f2156" dependencies = [ "bytes", "pin-project-lite", "quinn-proto", "quinn-udp", - "rustc-hash", + "rustc-hash 2.0.0", "rustls 0.23.12", + "socket2", "thiserror", "tokio", "tracing", @@ -2675,14 +2778,14 @@ dependencies = [ [[package]] name = "quinn-proto" -version = "0.11.3" +version = "0.11.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddf517c03a109db8100448a4be38d498df8a210a99fe0e1b9eaf39e78c640efe" +checksum = "ba92fb39ec7ad06ca2582c0ca834dfeadcaf06ddfc8e635c80aa7e1c05315fdd" dependencies = [ "bytes", "rand", "ring", - "rustc-hash", + "rustc-hash 2.0.0", "rustls 0.23.12", "slab", "thiserror", @@ -2699,14 +2802,15 @@ dependencies = [ "libc", "once_cell", "socket2", + "tracing", "windows-sys 0.52.0", ] [[package]] name = "quote" -version = "1.0.36" +version = "1.0.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7" +checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af" dependencies = [ "proc-macro2", ] @@ -2752,9 +2856,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.10.5" +version = "1.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b91213439dad192326a0d7c6ee3955910425f441d7038e0d6933b0aec5c4517f" +checksum = "4219d74c6b67a3654a9fbebc4b419e22126d13d2f3c4a07ee0cb61ff79a79619" dependencies = [ "aho-corasick", "memchr", @@ -2796,9 +2900,9 @@ checksum = "7a66a03ae7c801facd77a29370b4faec201768915ac14a721ba36f20bc9c209b" [[package]] name = "reqwest" -version = "0.12.5" +version = "0.12.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7d6d2a27d57148378eb5e111173f4276ad26340ecc5c49a4a2152167a2d6a37" +checksum = "f8f4955649ef5c38cc7f9e8aa41761d48fb9677197daea9984dc54f56aad5e63" dependencies = [ "async-compression", "base64 0.22.1", @@ -2831,7 +2935,7 @@ dependencies = [ "serde_urlencoded", "sync_wrapper 1.0.1", "tokio", - "tokio-rustls 0.26.0", + "tokio-rustls", "tokio-socks", "tokio-util", "tower-service", @@ -2840,7 +2944,7 @@ dependencies = [ "wasm-bindgen-futures", "web-sys", "webpki-roots", - "winreg 0.52.0", + "windows-registry", ] [[package]] @@ -2871,7 +2975,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=c76e2873c1593a3308d4ba3e0e4a1db65acf8536#c76e2873c1593a3308d4ba3e0e4a1db65acf8536" +source = "git+https://github.com/girlbossceo/ruwuma?rev=1de0f493e8aab7e65ea78e3a079a3de10167c777#1de0f493e8aab7e65ea78e3a079a3de10167c777" dependencies = [ "assign", "js_int", @@ -2893,7 +2997,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=c76e2873c1593a3308d4ba3e0e4a1db65acf8536#c76e2873c1593a3308d4ba3e0e4a1db65acf8536" +source = "git+https://github.com/girlbossceo/ruwuma?rev=1de0f493e8aab7e65ea78e3a079a3de10167c777#1de0f493e8aab7e65ea78e3a079a3de10167c777" dependencies = [ "js_int", "ruma-common", @@ -2905,7 +3009,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=c76e2873c1593a3308d4ba3e0e4a1db65acf8536#c76e2873c1593a3308d4ba3e0e4a1db65acf8536" +source = "git+https://github.com/girlbossceo/ruwuma?rev=1de0f493e8aab7e65ea78e3a079a3de10167c777#1de0f493e8aab7e65ea78e3a079a3de10167c777" dependencies = [ "as_variant", "assign", @@ -2928,14 +3032,14 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=c76e2873c1593a3308d4ba3e0e4a1db65acf8536#c76e2873c1593a3308d4ba3e0e4a1db65acf8536" +source = "git+https://github.com/girlbossceo/ruwuma?rev=1de0f493e8aab7e65ea78e3a079a3de10167c777#1de0f493e8aab7e65ea78e3a079a3de10167c777" dependencies = [ "as_variant", "base64 0.22.1", "bytes", "form_urlencoded", "http", - "indexmap 2.3.0", + "indexmap 2.4.0", "js_int", "konst", "percent-encoding", @@ -2958,10 +3062,10 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=c76e2873c1593a3308d4ba3e0e4a1db65acf8536#c76e2873c1593a3308d4ba3e0e4a1db65acf8536" +source = "git+https://github.com/girlbossceo/ruwuma?rev=1de0f493e8aab7e65ea78e3a079a3de10167c777#1de0f493e8aab7e65ea78e3a079a3de10167c777" dependencies = [ "as_variant", - "indexmap 2.3.0", + "indexmap 2.4.0", "js_int", "js_option", "percent-encoding", @@ -2975,15 +3079,22 @@ dependencies = [ "thiserror", "tracing", "url", + "web-time 1.1.0", "wildmatch", ] [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=c76e2873c1593a3308d4ba3e0e4a1db65acf8536#c76e2873c1593a3308d4ba3e0e4a1db65acf8536" +source = "git+https://github.com/girlbossceo/ruwuma?rev=1de0f493e8aab7e65ea78e3a079a3de10167c777#1de0f493e8aab7e65ea78e3a079a3de10167c777" dependencies = [ + "bytes", + "http", + "httparse", "js_int", + "memchr", + "mime", + "rand", "ruma-common", "ruma-events", "serde", @@ -2993,7 +3104,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://github.com/girlbossceo/ruwuma?rev=c76e2873c1593a3308d4ba3e0e4a1db65acf8536#c76e2873c1593a3308d4ba3e0e4a1db65acf8536" +source = "git+https://github.com/girlbossceo/ruwuma?rev=1de0f493e8aab7e65ea78e3a079a3de10167c777#1de0f493e8aab7e65ea78e3a079a3de10167c777" dependencies = [ "js_int", "thiserror", @@ -3002,7 +3113,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=c76e2873c1593a3308d4ba3e0e4a1db65acf8536#c76e2873c1593a3308d4ba3e0e4a1db65acf8536" +source = "git+https://github.com/girlbossceo/ruwuma?rev=1de0f493e8aab7e65ea78e3a079a3de10167c777#1de0f493e8aab7e65ea78e3a079a3de10167c777" dependencies = [ "js_int", "ruma-common", @@ -3012,7 +3123,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=c76e2873c1593a3308d4ba3e0e4a1db65acf8536#c76e2873c1593a3308d4ba3e0e4a1db65acf8536" +source = "git+https://github.com/girlbossceo/ruwuma?rev=1de0f493e8aab7e65ea78e3a079a3de10167c777#1de0f493e8aab7e65ea78e3a079a3de10167c777" dependencies = [ "once_cell", "proc-macro-crate", @@ -3020,14 +3131,14 @@ dependencies = [ "quote", "ruma-identifiers-validation", "serde", - "syn 2.0.72", + "syn 2.0.75", "toml", ] [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=c76e2873c1593a3308d4ba3e0e4a1db65acf8536#c76e2873c1593a3308d4ba3e0e4a1db65acf8536" +source = "git+https://github.com/girlbossceo/ruwuma?rev=1de0f493e8aab7e65ea78e3a079a3de10167c777#1de0f493e8aab7e65ea78e3a079a3de10167c777" dependencies = [ "js_int", "ruma-common", @@ -3039,7 +3150,7 @@ dependencies = [ [[package]] name = "ruma-server-util" version = "0.3.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=c76e2873c1593a3308d4ba3e0e4a1db65acf8536#c76e2873c1593a3308d4ba3e0e4a1db65acf8536" +source = "git+https://github.com/girlbossceo/ruwuma?rev=1de0f493e8aab7e65ea78e3a079a3de10167c777#1de0f493e8aab7e65ea78e3a079a3de10167c777" dependencies = [ "headers", "http", @@ -3052,7 +3163,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=c76e2873c1593a3308d4ba3e0e4a1db65acf8536#c76e2873c1593a3308d4ba3e0e4a1db65acf8536" +source = "git+https://github.com/girlbossceo/ruwuma?rev=1de0f493e8aab7e65ea78e3a079a3de10167c777#1de0f493e8aab7e65ea78e3a079a3de10167c777" dependencies = [ "base64 0.22.1", "ed25519-dalek", @@ -3068,7 +3179,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.11.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=c76e2873c1593a3308d4ba3e0e4a1db65acf8536#c76e2873c1593a3308d4ba3e0e4a1db65acf8536" +source = "git+https://github.com/girlbossceo/ruwuma?rev=1de0f493e8aab7e65ea78e3a079a3de10167c777#1de0f493e8aab7e65ea78e3a079a3de10167c777" dependencies = [ "itertools 0.12.1", "js_int", @@ -3082,8 +3193,8 @@ dependencies = [ [[package]] name = "rust-librocksdb-sys" -version = "0.24.0+9.4.0" -source = "git+https://github.com/zaidoon1/rust-rocksdb?rev=4056a3b0f823013fec49f6d0b3e5698856e6476a#4056a3b0f823013fec49f6d0b3e5698856e6476a" +version = "0.25.0+9.5.2" +source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=5383ca8173299066b516406e3a2cf945ead891cb#5383ca8173299066b516406e3a2cf945ead891cb" dependencies = [ "bindgen", "bzip2-sys", @@ -3099,8 +3210,8 @@ dependencies = [ [[package]] name = "rust-rocksdb" -version = "0.28.0" -source = "git+https://github.com/zaidoon1/rust-rocksdb?rev=4056a3b0f823013fec49f6d0b3e5698856e6476a#4056a3b0f823013fec49f6d0b3e5698856e6476a" +version = "0.29.0" +source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=5383ca8173299066b516406e3a2cf945ead891cb#5383ca8173299066b516406e3a2cf945ead891cb" dependencies = [ "libc", "rust-librocksdb-sys", @@ -3126,6 +3237,12 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" +[[package]] +name = "rustc-hash" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "583034fd73374156e66797ed8e5b0d5690409c9226b22d87cb7f19821c05d152" + [[package]] name = "rustc_version" version = "0.4.0" @@ -3136,15 +3253,16 @@ dependencies = [ ] [[package]] -name = "rustls" -version = "0.21.12" +name = "rustix" +version = "0.38.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e" +checksum = "70dc5ec042f7a43c4a73241207cecc9873a06d45debb38b329f8541d85c2730f" dependencies = [ - "log", - "ring", - "rustls-webpki 0.101.7", - "sct", + "bitflags 2.6.0", + "errno", + "libc", + "linux-raw-sys", + "windows-sys 0.52.0", ] [[package]] @@ -3156,7 +3274,7 @@ dependencies = [ "log", "ring", "rustls-pki-types", - "rustls-webpki 0.102.6", + "rustls-webpki", "subtle", "zeroize", ] @@ -3167,20 +3285,21 @@ version = "0.23.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c58f8c84392efc0a126acce10fa59ff7b3d2ac06ab451a33f2741989b806b044" dependencies = [ + "aws-lc-rs", "log", "once_cell", "ring", "rustls-pki-types", - "rustls-webpki 0.102.6", + "rustls-webpki", "subtle", "zeroize", ] [[package]] name = "rustls-native-certs" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a88d6d420651b496bdd98684116959239430022a115c1240e6c3993be0b15fba" +checksum = "04182dffc9091a404e0fc069ea5cd60e5b866c3adf881eff99a32d048242dffa" dependencies = [ "openssl-probe", "rustls-pemfile", @@ -3191,9 +3310,9 @@ dependencies = [ [[package]] name = "rustls-pemfile" -version = "2.1.2" +version = "2.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29993a25686778eb88d4189742cd713c9bce943bc54251a33509dc63cbacf73d" +checksum = "196fe16b00e106300d3e45ecfcb764fa292a535d7326a29a5875c579c7417425" dependencies = [ "base64 0.22.1", "rustls-pki-types", @@ -3201,19 +3320,9 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.7.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "976295e77ce332211c0d24d92c0e83e50f5c5f046d11082cea19f3df13a3562d" - -[[package]] -name = "rustls-webpki" -version = "0.101.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" -dependencies = [ - "ring", - "untrusted", -] +checksum = "fc0a2ce646f8655401bb81e7927b812614bd5d91dbc968696be50603510fcaf0" [[package]] name = "rustls-webpki" @@ -3221,6 +3330,7 @@ version = "0.102.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e6b52d4fda176fd835fdc55a835d4a89b8499cad995885a21149d5ad62f852e" dependencies = [ + "aws-lc-rs", "ring", "rustls-pki-types", "untrusted", @@ -3234,8 +3344,8 @@ checksum = "955d28af4278de8121b7ebeb796b6a45735dc01436d898801014aced2773a3d6" [[package]] name = "rustyline-async" -version = "0.4.2" -source = "git+https://github.com/girlbossceo/rustyline-async?rev=de26100b0db03e419a3d8e1dd26895d170d1fe50#de26100b0db03e419a3d8e1dd26895d170d1fe50" +version = "0.4.3" +source = "git+https://github.com/girlbossceo/rustyline-async?rev=9654cc84e19241f6e19021eb8e677892656f5071#9654cc84e19241f6e19021eb8e677892656f5071" dependencies = [ "crossterm", "futures-channel", @@ -3278,16 +3388,6 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" -[[package]] -name = "sct" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" -dependencies = [ - "ring", - "untrusted", -] - [[package]] name = "sd-notify" version = "0.4.2" @@ -3460,22 +3560,22 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.204" +version = "1.0.208" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc76f558e0cbb2a839d37354c575f1dc3fdc6546b5be373ba43d95f231bf7c12" +checksum = "cff085d2cb684faa248efb494c39b68e522822ac0de72ccf08109abde717cfb2" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.204" +version = "1.0.208" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0cd7e117be63d3c3678776753929474f3b04a43a080c744d6b0ae2a8c28e222" +checksum = "24008e81ff7613ed8e5ba0cfaf24e2c2f1e5b8a0495711e44fcd4882fca62bcf" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.75", ] [[package]] @@ -3485,7 +3585,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8de514ef58196f1fc96dcaef80fe6170a1ce6215df9687a93fe8300e773fefc5" dependencies = [ "form_urlencoded", - "indexmap 2.3.0", + "indexmap 2.4.0", "itoa", "ryu", "serde", @@ -3493,9 +3593,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.121" +version = "1.0.127" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ab380d7d9f22ef3f21ad3e6c1ebe8e4fc7a2000ccba2e4d71fc96f15b2cb609" +checksum = "8043c06d9f82bd7271361ed64f415fe5e12a77fdb52e573e7f06a516dea329ad" dependencies = [ "itoa", "memchr", @@ -3550,7 +3650,7 @@ version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ - "indexmap 2.3.0", + "indexmap 2.4.0", "itoa", "ryu", "serde", @@ -3622,7 +3722,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34db1a06d485c9142248b7a054f034b349b212551f3dfd19c94d45a754a217cd" dependencies = [ "libc", - "mio 0.8.11", + "mio", "signal-hook", ] @@ -3769,9 +3869,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.72" +version = "2.0.75" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc4b9b9bf2add8093d3f2c0204471e951b2285580335de42f9d2534f3ae7a8af" +checksum = "f6af063034fc1935ede7be0122941bafa9bacb949334d090b77ca98b5817c7d9" dependencies = [ "proc-macro2", "quote", @@ -3789,6 +3889,9 @@ name = "sync_wrapper" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a7065abeca94b6a8a577f9bd45aa0867a2238b74e8eb67cf10d492bc39351394" +dependencies = [ + "futures-core", +] [[package]] name = "tendril" @@ -3803,9 +3906,9 @@ dependencies = [ [[package]] name = "termimad" -version = "0.29.4" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eff854b510d076163c3b8628ebfe6bfbe6f01c3254b8b8e77cac7ad2d8c72b06" +checksum = "920e7c4671e79f3d9df269da9c8edf0dbc580044fd727d3594f7bfba5eb6107a" dependencies = [ "coolor", "crokey", @@ -3844,7 +3947,7 @@ checksum = "a4558b58466b9ad7ca0f102865eccc95938dca1a74a856f2b57b6629050da261" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.75", ] [[package]] @@ -3881,9 +3984,8 @@ dependencies = [ [[package]] name = "tikv-jemalloc-ctl" -version = "0.5.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "619bfed27d807b54f7f776b9430d4f8060e66ee138a28632ca898584d462c31c" +version = "0.6.0" +source = "git+https://github.com/girlbossceo/jemallocator?rev=c32af15f3b440ae5e46c3404f78b19093bbd5294#c32af15f3b440ae5e46c3404f78b19093bbd5294" dependencies = [ "libc", "paste", @@ -3892,9 +3994,8 @@ dependencies = [ [[package]] name = "tikv-jemalloc-sys" -version = "0.5.4+5.3.0-patched" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9402443cb8fd499b6f327e40565234ff34dbda27460c5b47db0db77443dd85d1" +version = "0.6.0+5.3.0-1-ge13ca993e8ccb9ba9847cc330696e02839f328f7" +source = "git+https://github.com/girlbossceo/jemallocator?rev=c32af15f3b440ae5e46c3404f78b19093bbd5294#c32af15f3b440ae5e46c3404f78b19093bbd5294" dependencies = [ "cc", "libc", @@ -3902,9 +4003,8 @@ dependencies = [ [[package]] name = "tikv-jemallocator" -version = "0.5.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "965fe0c26be5c56c94e38ba547249074803efd52adfb66de62107d95aab3eaca" +version = "0.6.0" +source = "git+https://github.com/girlbossceo/jemallocator?rev=c32af15f3b440ae5e46c3404f78b19093bbd5294#c32af15f3b440ae5e46c3404f78b19093bbd5294" dependencies = [ "libc", "tikv-jemalloc-sys", @@ -3958,14 +4058,14 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.39.2" +version = "1.39.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "daa4fb1bc778bd6f04cbfc4bb2d06a7396a8f299dc33ea1900cedaa316f467b1" +checksum = "9babc99b9923bfa4804bd74722ff02c0381021eafa4db9949217e3be8e84fff5" dependencies = [ "backtrace", "bytes", "libc", - "mio 1.0.1", + "mio", "pin-project-lite", "signal-hook-registry", "socket2", @@ -3982,7 +4082,7 @@ checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.75", ] [[package]] @@ -3997,16 +4097,6 @@ dependencies = [ "tokio-stream", ] -[[package]] -name = "tokio-rustls" -version = "0.24.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" -dependencies = [ - "rustls 0.21.12", - "tokio", -] - [[package]] name = "tokio-rustls" version = "0.26.0" @@ -4081,7 +4171,7 @@ version = "0.21.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8534fd7f78b5405e860340ad6575217ce99f38d4d5c8f2442cb5ecb50090e1" dependencies = [ - "indexmap 2.3.0", + "indexmap 2.4.0", "toml_datetime", "winnow 0.5.40", ] @@ -4092,7 +4182,7 @@ version = "0.22.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "583c44c02ad26b0c3f3066fe629275e50627026c51ac2e595cca4c230ce1ce1d" dependencies = [ - "indexmap 2.3.0", + "indexmap 2.4.0", "serde", "serde_spanned", "toml_datetime", @@ -4123,7 +4213,7 @@ dependencies = [ "socket2", "tokio", "tokio-stream", - "tower", + "tower 0.4.13", "tower-layer", "tower-service", "tracing", @@ -4149,6 +4239,20 @@ dependencies = [ "tracing", ] +[[package]] +name = "tower" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "36b837f86b25d7c0d7988f00a54e74739be6477f2aac6201b8f429a7569991b7" +dependencies = [ + "futures-core", + "futures-util", + "pin-project-lite", + "sync_wrapper 0.1.2", + "tower-layer", + "tower-service", +] + [[package]] name = "tower-http" version = "0.5.2" @@ -4166,7 +4270,7 @@ dependencies = [ "pin-project-lite", "tokio", "tokio-util", - "tower", + "tower 0.4.13", "tower-layer", "tower-service", "tracing", @@ -4174,15 +4278,15 @@ dependencies = [ [[package]] name = "tower-layer" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c20c8dbed6283a09604c3e69b4b7eeb54e298b8a600d4d5ecb5ad39de609f1d0" +checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e" [[package]] name = "tower-service" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" +checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" [[package]] name = "tracing" @@ -4202,7 +4306,7 @@ source = "git+https://github.com/girlbossceo/tracing?rev=4d78a14a5e03f539b8c6b47 dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.75", ] [[package]] @@ -4371,9 +4475,9 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "ureq" -version = "2.10.0" +version = "2.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72139d247e5f97a3eff96229a7ae85ead5328a39efe76f8bf5a06313d505b6ea" +checksum = "b74fc6b57825be3373f7054754755f03ac3a8f5d70015ccad699ba2029956f4a" dependencies = [ "base64 0.22.1", "log", @@ -4453,34 +4557,35 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.92" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4be2531df63900aeb2bca0daaaddec08491ee64ceecbee5076636a3b026795a8" +checksum = "a82edfc16a6c469f5f44dc7b571814045d60404b55a0ee849f9bcfa2e63dd9b5" dependencies = [ "cfg-if", + "once_cell", "wasm-bindgen-macro", ] [[package]] name = "wasm-bindgen-backend" -version = "0.2.92" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "614d787b966d3989fa7bb98a654e369c762374fd3213d212cfc0251257e747da" +checksum = "9de396da306523044d3302746f1208fa71d7532227f15e347e2d93e4145dd77b" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.75", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.42" +version = "0.4.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76bc14366121efc8dbb487ab05bcc9d346b3b5ec0eaa76e46594cabbe51762c0" +checksum = "61e9300f63a621e96ed275155c108eb6f843b6a26d053f122ab69724559dc8ed" dependencies = [ "cfg-if", "js-sys", @@ -4490,9 +4595,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.92" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1f8823de937b71b9460c0c34e25f3da88250760bec0ebac694b49997550d726" +checksum = "585c4c91a46b072c92e908d99cb1dcdf95c5218eeb6f3bf1efa991ee7a68cccf" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -4500,28 +4605,28 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.92" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" +checksum = "afc340c74d9005395cf9dd098506f7f44e38f2b4a21c6aaacf9a105ea5e1e836" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.75", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.92" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af190c94f2773fdb3729c55b007a722abb5384da03bc0986df4c289bf5567e96" +checksum = "c62a0a307cb4a311d3a07867860911ca130c3494e8c2719593806c08bc5d0484" [[package]] name = "web-sys" -version = "0.3.69" +version = "0.3.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77afa9a11836342370f4817622a2f0f418b134426d91a82dfb48f532d2ec13ef" +checksum = "26fdeaafd9bd129f65e7c031593c24d62186301e0c72c8978fa1678be7d532c0" dependencies = [ "js-sys", "wasm-bindgen", @@ -4574,6 +4679,18 @@ version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "53a85b86a771b1c87058196170769dd264f66c0782acf1ae6cc51bfd64b39082" +[[package]] +name = "which" +version = "4.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87ba24419a2078cd2b0f2ede2691b6c66d8e47836da3b6db8265ebad47afbfc7" +dependencies = [ + "either", + "home", + "once_cell", + "rustix", +] + [[package]] name = "widestring" version = "1.1.0" @@ -4627,6 +4744,36 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "windows-registry" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e400001bb720a623c1c69032f8e3e4cf09984deec740f007dd2b03ec864804b0" +dependencies = [ + "windows-result", + "windows-strings", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-result" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d1043d8214f791817bab27572aaa8af63732e11bf84aa21a45a78d6c317ae0e" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-strings" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cd9b125c486025df0eabcb585e62173c6c9eddcec5d117d3b6e8c30e2ee4d10" +dependencies = [ + "windows-result", + "windows-targets 0.52.6", +] + [[package]] name = "windows-sys" version = "0.48.0" @@ -4794,16 +4941,6 @@ dependencies = [ "windows-sys 0.48.0", ] -[[package]] -name = "winreg" -version = "0.52.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a277a57398d4bfa075df44f501a17cfdf8542d224f0d36095a2adc7aee4ef0a5" -dependencies = [ - "cfg-if", - "windows-sys 0.48.0", -] - [[package]] name = "xml5ever" version = "0.18.1" @@ -4823,9 +4960,9 @@ checksum = "cfe53a6657fd280eaa890a3bc59152892ffa3e30101319d168b781ed6529b049" [[package]] name = "zerocopy" -version = "0.6.6" +version = "0.7.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "854e949ac82d619ee9a14c66a1b674ac730422372ccb759ce0c39cabcf2bf8e6" +checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" dependencies = [ "byteorder", "zerocopy-derive", @@ -4833,13 +4970,13 @@ dependencies = [ [[package]] name = "zerocopy-derive" -version = "0.6.6" +version = "0.7.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "125139de3f6b9d625c39e2efdd73d41bdac468ccd556556440e322be0e1bbd91" +checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.72", + "syn 2.0.75", ] [[package]] @@ -4847,6 +4984,20 @@ name = "zeroize" version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" +dependencies = [ + "zeroize_derive", +] + +[[package]] +name = "zeroize_derive" +version = "1.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.75", +] [[package]] name = "zstd" @@ -4859,18 +5010,18 @@ dependencies = [ [[package]] name = "zstd-safe" -version = "7.2.0" +version = "7.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa556e971e7b568dc775c136fc9de8c779b1c2fc3a63defaafadffdbd3181afa" +checksum = "54a3ab4db68cea366acc5c897c7b4d4d1b8994a9cd6e6f841f8964566a419059" dependencies = [ "zstd-sys", ] [[package]] name = "zstd-sys" -version = "2.0.12+zstd.1.5.6" +version = "2.0.13+zstd.1.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a4e40c320c3cb459d9a9ff6de98cff88f4751ee9275d140e2be94a2b74e4c13" +checksum = "38ff0f21cfee8f97d94cef41359e0c89aa6113028ab0291aa8ca0038995a95aa" dependencies = [ "cc", "pkg-config", diff --git a/Cargo.toml b/Cargo.toml index 4f808d28230742f34dc9ba109d3d00280077d9e9..e780b314a18356117148782064e4c72521856432 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -19,7 +19,7 @@ license = "Apache-2.0" # See also `rust-toolchain.toml` readme = "README.md" repository = "https://github.com/girlbossceo/conduwuit" -rust-version = "1.80.0" +rust-version = "1.80.1" version = "0.4.6" [workspace.metadata.crane] @@ -69,7 +69,7 @@ version = "0.8.5" # Used for the http request / response body type for Ruma endpoints used with reqwest [workspace.dependencies.bytes] -version = "1.6.1" +version = "1.7.1" [workspace.dependencies.http-body-util] version = "0.1.1" @@ -78,7 +78,7 @@ version = "0.1.1" version = "1.1.0" [workspace.dependencies.regex] -version = "1.10.4" +version = "1.10.6" [workspace.dependencies.axum] version = "0.7.5" @@ -99,15 +99,19 @@ default-features = false features = ["typed-header", "tracing"] [workspace.dependencies.axum-server] -version = "0.6.0" +version = "0.7.1" default-features = false features = ["tls-rustls"] +# to listen on both HTTP and HTTPS if listening on TLS dierctly from conduwuit for complement or sytest +[workspace.dependencies.axum-server-dual-protocol] +version = "0.7" + [workspace.dependencies.axum-client-ip] version = "0.6.0" [workspace.dependencies.tower] -version = "0.4.13" +version = "0.5.0" default-features = false features = ["util"] @@ -124,8 +128,11 @@ features = [ "catch-panic", ] +[workspace.dependencies.rustls] +version = "0.23.12" + [workspace.dependencies.reqwest] -version = "0.12.4" +version = "0.12.7" default-features = false features = [ "rustls-tls-native-roots", @@ -140,7 +147,7 @@ default-features = false features = ["rc"] [workspace.dependencies.serde_json] -version = "1.0.121" +version = "1.0.124" default-features = false features = ["raw_value"] @@ -193,7 +200,7 @@ default-features = false # used for conduit's CLI and admin room command parsing [workspace.dependencies.clap] -version = "4.5.9" +version = "4.5.15" default-features = false features = [ "std", @@ -307,7 +314,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://github.com/girlbossceo/ruwuma" #branch = "conduwuit-changes" -rev = "c76e2873c1593a3308d4ba3e0e4a1db65acf8536" +rev = "1de0f493e8aab7e65ea78e3a079a3de10167c777" features = [ "compat", "rand", @@ -347,10 +354,6 @@ features = [ "bzip2", ] -# to listen on both HTTP and HTTPS if listening on TLS dierctly from conduwuit for complement or sytest -[workspace.dependencies.axum-server-dual-protocol] -version = "0.6" - # optional SHA256 media keys feature [workspace.dependencies.sha2] version = "0.10.8" @@ -396,17 +399,19 @@ version = "0.34.0" version = "0.34.0" # jemalloc usage -# locked to 0.5.4 due to static binary linking breakage [workspace.dependencies.tikv-jemalloc-sys] -version = "=0.5.4" +git = "https://github.com/girlbossceo/jemallocator" +rev = "c32af15f3b440ae5e46c3404f78b19093bbd5294" default-features = false -features = ["stats", "unprefixed_malloc_on_supported_platforms"] +features = ["unprefixed_malloc_on_supported_platforms"] [workspace.dependencies.tikv-jemallocator] -version = "=0.5.4" +git = "https://github.com/girlbossceo/jemallocator" +rev = "c32af15f3b440ae5e46c3404f78b19093bbd5294" default-features = false -features = ["stats", "unprefixed_malloc_on_supported_platforms"] +features = ["unprefixed_malloc_on_supported_platforms"] [workspace.dependencies.tikv-jemalloc-ctl] -version = "=0.5.4" +git = "https://github.com/girlbossceo/jemallocator" +rev = "c32af15f3b440ae5e46c3404f78b19093bbd5294" default-features = false features = ["use_std"] @@ -431,11 +436,11 @@ features = [ ] [workspace.dependencies.rustyline-async] -version = "0.4.2" +version = "0.4.3" default-features = false [workspace.dependencies.termimad] -version = "0.29.4" +version = "0.30.0" default-features = false [workspace.dependencies.checked_ops] @@ -473,17 +478,11 @@ rev = "4d78a14a5e03f539b8c6b475aefa08bb14e4de91" git = "https://github.com/girlbossceo/tracing" rev = "4d78a14a5e03f539b8c6b475aefa08bb14e4de91" -# fixes hyper graceful shutdowns [https://github.com/programatik29/axum-server/issues/114] -# https://github.com/girlbossceo/axum-server/commit/8e3368d899079818934e61cc9c839abcbbcada8a -[patch.crates-io.axum-server] -git = "https://github.com/girlbossceo/axum-server" -rev = "8e3368d899079818934e61cc9c839abcbbcada8a" - # adds a tab completion callback: https://github.com/girlbossceo/rustyline-async/commit/de26100b0db03e419a3d8e1dd26895d170d1fe50 # adds event for CTRL+\: https://github.com/girlbossceo/rustyline-async/commit/67d8c49aeac03a5ef4e818f663eaa94dd7bf339b [patch.crates-io.rustyline-async] git = "https://github.com/girlbossceo/rustyline-async" -rev = "de26100b0db03e419a3d8e1dd26895d170d1fe50" +rev = "9654cc84e19241f6e19021eb8e677892656f5071" # # Our crates diff --git a/conduwuit-example.toml b/conduwuit-example.toml index fdf537832eb60b5e08ddd4cc8234f27d31c89872..a99aa05cfcf60b223d22b127a6acdea9d708ca40 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -224,9 +224,11 @@ registration_token = "change this token for something specific to your server" # No default. # forbidden_alias_names = [] -# List of forbidden server names that we will block all client room joins, incoming federated room directory requests, incoming federated invites for, and incoming federated joins. This check is applied on the room ID, room alias, sender server name, and sender user's server name. -# Basically "global" ACLs. For our user (client) checks, admin users are allowed. -# No default. +# List of forbidden server names that we will block incoming AND outgoing federation with, and block client room joins / remote user invites. +# +# This check is applied on the room ID, room alias, sender server name, sender user's server name, inbound federation X-Matrix origin, and outbound federation handler. +# +# Basically "global" ACLs. No default. # forbidden_remote_server_names = [] # List of forbidden server names that we will block all outgoing federated room directory requests for. Useful for preventing our users from wandering into bad servers or spaces. @@ -388,10 +390,21 @@ allow_profile_lookup_federation_requests = true # Enable backward-compatibility with Conduit's media directory by creating symlinks of media. This # option is only necessary if you plan on using Conduit again. Otherwise setting this to false -# reduces filesystem clutter and overhead for managing these symlinks in the directory. +# reduces filesystem clutter and overhead for managing these symlinks in the directory. This is now +# disabled by default. You may still return to upstream Conduit but you have to run Conduwuit at +# least once with this set to true and allow the media_startup_check to take place before shutting +# down to return to Conduit. # -# Enabled by default. -#media_compat_file_link = true +# Disabled by default. +#media_compat_file_link = false + +# Prunes missing media from the database as part of the media startup checks. This means if you +# delete files from the media directory the corresponding entries will be removed from the +# database. This is disabled by default because if the media directory is accidentally moved or +# inaccessible the metadata entries in the database will be lost with sadness. +# +# Disabled by default. +#prune_missing_media = false # Checks consistency of the media directory at startup: # 1. When `media_compat_file_link` is enbled, this check will upgrade media when switching back diff --git a/deps/rust-rocksdb/Cargo.toml b/deps/rust-rocksdb/Cargo.toml index cf49e1adbe5bc1f75719167b331bb9082e3d0c74..cc87e99da0748b249e3d2b72fac06fd68e628323 100644 --- a/deps/rust-rocksdb/Cargo.toml +++ b/deps/rust-rocksdb/Cargo.toml @@ -26,8 +26,8 @@ serde1 = ["rust-rocksdb/serde1"] malloc-usable-size = ["rust-rocksdb/malloc-usable-size"] [dependencies.rust-rocksdb] -git = "https://github.com/zaidoon1/rust-rocksdb" -rev = "4056a3b0f823013fec49f6d0b3e5698856e6476a" +git = "https://github.com/girlbossceo/rust-rocksdb-zaidoon1" +rev = "5383ca8173299066b516406e3a2cf945ead891cb" #branch = "master" default-features = false diff --git a/docs/development.md b/docs/development.md index a23fe859dd99d2f7d98da9a9716993fd59649574..62a03e2fd771067ec1185bfe0e6c178ae5f3fae7 100644 --- a/docs/development.md +++ b/docs/development.md @@ -4,9 +4,19 @@ # Development it, you can safely ignore this section. If you plan on contributing, see the [contributor's guide](contributing.md). +## List of forked dependencies +During conduwuit development, we have had to fork some dependencies to support our use-cases in some areas. This ranges from things said upstream project won't accept for any reason, faster-paced development (unresponsive or slow upstream), conduwuit-specific usecases, or lack of time to upstream some things. + +- [ruma/ruma][1]: <https://github.com/girlbossceo/ruwuma> - various performance improvements, more features, faster-paced development, client/server interop hacks upstream won't accept, etc +- [facebook/rocksdb][2]: <https://github.com/girlbossceo/rocksdb> - liburing build fixes, GCC build fix, and logging callback C API for Rust tracing integration +- [tikv/jemallocator][3]: <https://github.com/girlbossceo/jemallocator> - musl builds seem to be broken on upstream +- [zyansheep/rustyline-async][4]: <https://github.com/girlbossceo/rustyline-async> - tab completion callback and `CTRL+\` signal quit event for CLI +- [rust-rocksdb/rust-rocksdb][5]: <https://github.com/girlbossceo/rust-rocksdb-zaidoon1> - [`@zaidoon1`'s][8] fork has quicker updates, more up to date dependencies. Our changes fix musl build issues, Rust part of the logging callback C API, removes unnecessary `gtest` include, and uses our RocksDB and jemallocator +- [tokio-rs/tracing][6]: <https://github.com/girlbossceo/tracing> - Implements `Clone` for `EnvFilter` to support dynamically changing tracing envfilter's alongside other logging/metrics things + ## Debugging with `tokio-console` -[`tokio-console`][1] can be a useful tool for debugging and profiling. To make +[`tokio-console`][7] can be a useful tool for debugging and profiling. To make a `tokio-console`-enabled build of conduwuit, enable the `tokio_console` feature, disable the default `release_max_log_level` feature, and set the `--cfg tokio_unstable` flag to enable experimental tokio APIs. A build might @@ -19,4 +29,11 @@ ## Debugging with `tokio-console` --features=systemd,element_hacks,gzip_compression,brotli_compression,zstd_compression,tokio_console ``` -[1]: https://docs.rs/tokio-console/latest/tokio_console/ +[1]: https://github.com/ruma/ruma/ +[2]: https://github.com/facebook/rocksdb/ +[3]: https://github.com/tikv/jemallocator/ +[4]: https://github.com/zyansheep/rustyline-async/ +[5]: https://github.com/rust-rocksdb/rust-rocksdb/ +[6]: https://github.com/tokio-rs/tracing/ +[7]: https://docs.rs/tokio-console/latest/tokio_console/ +[8]: https://github.com/zaidoon1/ diff --git a/flake.lock b/flake.lock index 0688e3783ae25045670f84c5aa54bed1d79eab3e..c492e3bea9df435fe4df782457d005702cd1c990 100644 --- a/flake.lock +++ b/flake.lock @@ -9,11 +9,11 @@ "nixpkgs-stable": "nixpkgs-stable" }, "locked": { - "lastModified": 1720542474, - "narHash": "sha256-aKjJ/4l2I9+wNGTaOGRsuS3M1+IoTibqgEMPDikXm04=", + "lastModified": 1724226964, + "narHash": "sha256-cltFh4su2vcFidxKp7LuEgX3ZGLfPy0DCdrQZ/QTe68=", "owner": "zhaofengli", "repo": "attic", - "rev": "6139576a3ce6bb992e0f6c3022528ec233e45f00", + "rev": "6d9aeaef0a067d664cb11bb7704f7ec373d47fb2", "type": "github" }, "original": { @@ -27,15 +27,15 @@ "inputs": { "devenv": "devenv", "flake-compat": "flake-compat_3", - "nixpkgs": "nixpkgs_3", - "pre-commit-hooks": "pre-commit-hooks" + "git-hooks": "git-hooks", + "nixpkgs": "nixpkgs_3" }, "locked": { - "lastModified": 1719923519, - "narHash": "sha256-7Rhljj2fsklFRsu+eq7N683Z9qukmreMEj5C1GqCrSA=", + "lastModified": 1724232775, + "narHash": "sha256-6u2DycIEgrgNYlLxyGqdFVmBNiKIitnQKJ1pbRP5oko=", "owner": "cachix", "repo": "cachix", - "rev": "4e9e71f78b9500fa6210cf1eaa4d75bdbab777c3", + "rev": "03b6cb3f953097bff378fb8b9ea094bd091a4ec7", "type": "github" }, "original": { @@ -103,11 +103,11 @@ ] }, "locked": { - "lastModified": 1717025063, - "narHash": "sha256-dIubLa56W9sNNz0e8jGxrX3CAkPXsq7snuFA/Ie6dn8=", + "lastModified": 1722960479, + "narHash": "sha256-NhCkJJQhD5GUib8zN9JrmYGMwt4lCRp6ZVNzIiYCl0Y=", "owner": "ipetkov", "repo": "crane", - "rev": "480dff0be03dac0e51a8dfc26e882b0d123a450e", + "rev": "4c6c77920b8d44cd6660c1621dea6b3fc4b4c4f4", "type": "github" }, "original": { @@ -123,11 +123,11 @@ ] }, "locked": { - "lastModified": 1720546058, - "narHash": "sha256-iU2yVaPIZm5vMGdlT0+57vdB/aPq/V5oZFBRwYw+HBM=", + "lastModified": 1724006180, + "narHash": "sha256-PVxPj0Ga2fMYMtcT9ARCthF+4U71YkOT7ZjgD/vf1Aw=", "owner": "ipetkov", "repo": "crane", - "rev": "2d83156f23c43598cf44e152c33a59d3892f8b29", + "rev": "7ce92819802bc583b7e82ebc08013a530f22209f", "type": "github" }, "original": { @@ -151,15 +151,15 @@ ], "pre-commit-hooks": [ "cachix", - "pre-commit-hooks" + "git-hooks" ] }, "locked": { - "lastModified": 1719759336, - "narHash": "sha256-3a34VL/QnHprl5gMy9xlx6d8J+iNp+W88Ex8smkgH9M=", + "lastModified": 1723156315, + "narHash": "sha256-0JrfahRMJ37Rf1i0iOOn+8Z4CLvbcGNwa2ChOAVrp/8=", "owner": "cachix", "repo": "devenv", - "rev": "bb32aa986f2f695385e54428d0eaf7d05b31466e", + "rev": "ff5eb4f2accbcda963af67f1a1159e3f6c7f5f91", "type": "github" }, "original": { @@ -209,11 +209,11 @@ "rust-analyzer-src": "rust-analyzer-src" }, "locked": { - "lastModified": 1720852044, - "narHash": "sha256-3NBYz8VuXuKU+8ONd9NFafCNjPEGHIZQ2Mdoam1a4mY=", + "lastModified": 1724221791, + "narHash": "sha256-mKX67QPnUybOopVph/LhOV1G/H4EvPxDIfSmbufrVdA=", "owner": "nix-community", "repo": "fenix", - "rev": "5087b12a595ee73131a944d922f24d81dae05725", + "rev": "e88b38a5a3834e039d413a88f8150a75ef6453ef", "type": "github" }, "original": { @@ -226,11 +226,11 @@ "flake-compat": { "flake": false, "locked": { - "lastModified": 1673956053, - "narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=", + "lastModified": 1696426674, + "narHash": "sha256-kvjfFW7WAETZlt09AgDn1MrtKzP7t90Vf7vypd3OL1U=", "owner": "edolstra", "repo": "flake-compat", - "rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9", + "rev": "0f9255e01c2351cc7d116c072cb317785dd33b33", "type": "github" }, "original": { @@ -272,22 +272,6 @@ } }, "flake-compat_4": { - "flake": false, - "locked": { - "lastModified": 1696426674, - "narHash": "sha256-kvjfFW7WAETZlt09AgDn1MrtKzP7t90Vf7vypd3OL1U=", - "owner": "edolstra", - "repo": "flake-compat", - "rev": "0f9255e01c2351cc7d116c072cb317785dd33b33", - "type": "github" - }, - "original": { - "owner": "edolstra", - "repo": "flake-compat", - "type": "github" - } - }, - "flake-compat_5": { "flake": false, "locked": { "lastModified": 1696426674, @@ -305,12 +289,15 @@ } }, "flake-utils": { + "inputs": { + "systems": "systems" + }, "locked": { - "lastModified": 1667395993, - "narHash": "sha256-nuEHfE/LcWyuSWnS8t12N1wc105Qtau+/OdUAjtQ0rA=", + "lastModified": 1710146030, + "narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=", "owner": "numtide", "repo": "flake-utils", - "rev": "5aed5285a952e0b949eb3ba02c12fa4fcfef535f", + "rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a", "type": "github" }, "original": { @@ -321,7 +308,7 @@ }, "flake-utils_2": { "inputs": { - "systems": "systems" + "systems": "systems_2" }, "locked": { "lastModified": 1689068808, @@ -339,7 +326,7 @@ }, "flake-utils_3": { "inputs": { - "systems": "systems_2" + "systems": "systems_3" }, "locked": { "lastModified": 1710146030, @@ -356,11 +343,38 @@ "type": "github" } }, + "git-hooks": { + "inputs": { + "flake-compat": [ + "cachix", + "flake-compat" + ], + "gitignore": "gitignore", + "nixpkgs": [ + "cachix", + "nixpkgs" + ], + "nixpkgs-stable": "nixpkgs-stable_2" + }, + "locked": { + "lastModified": 1723202784, + "narHash": "sha256-qbhjc/NEGaDbyy0ucycubq4N3//gDFFH3DOmp1D3u1Q=", + "owner": "cachix", + "repo": "git-hooks.nix", + "rev": "c7012d0c18567c889b948781bc74a501e92275d1", + "type": "github" + }, + "original": { + "owner": "cachix", + "repo": "git-hooks.nix", + "type": "github" + } + }, "gitignore": { "inputs": { "nixpkgs": [ "cachix", - "pre-commit-hooks", + "git-hooks", "nixpkgs" ] }, @@ -381,11 +395,11 @@ "liburing": { "flake": false, "locked": { - "lastModified": 1720798442, - "narHash": "sha256-gtPppAoksMLW4GuruQ36nf4EAqIA1Bs6V9Xcx8dBxrQ=", + "lastModified": 1724199144, + "narHash": "sha256-MVjnwO6EbKzzSrU51dSseLarZ1fRp+6SagAf/nE/XZU=", "owner": "axboe", "repo": "liburing", - "rev": "1d674f83b7d0f07553ac44d99a401b05853d9dbe", + "rev": "2d4e799017d64cd2f8304503eef9064931bb3fbd", "type": "github" }, "original": { @@ -494,11 +508,11 @@ }, "nixpkgs": { "locked": { - "lastModified": 1711401922, - "narHash": "sha256-QoQqXoj8ClGo0sqD/qWKFWezgEwUL0SUh37/vY2jNhc=", + "lastModified": 1723827930, + "narHash": "sha256-EU+W5F6y2CVNxGrGIMpY7nSVYq72WRChYxF4zpjx0y4=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "07262b18b97000d16a4bdb003418bd2fb067a932", + "rev": "d4a7a4d0e066278bfb0d77bd2a7adde1c0ec9e3d", "type": "github" }, "original": { @@ -542,11 +556,11 @@ }, "nixpkgs-stable": { "locked": { - "lastModified": 1711460390, - "narHash": "sha256-akSgjDZL6pVHEfSE6sz1DNSXuYX6hq+P/1Z5IoYWs7E=", + "lastModified": 1720535198, + "narHash": "sha256-zwVvxrdIzralnSbcpghA92tWu2DV2lwv89xZc8MTrbg=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "44733514b72e732bd49f5511bd0203dea9b9a434", + "rev": "205fd4226592cc83fd4c0885a3e4c9c400efabb5", "type": "github" }, "original": { @@ -558,16 +572,16 @@ }, "nixpkgs-stable_2": { "locked": { - "lastModified": 1718811006, - "narHash": "sha256-0Y8IrGhRmBmT7HHXlxxepg2t8j1X90++qRN3lukGaIk=", + "lastModified": 1720386169, + "narHash": "sha256-NGKVY4PjzwAa4upkGtAMz1npHGoRzWotlSnVlqI40mo=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "03d771e513ce90147b65fe922d87d3a0356fc125", + "rev": "194846768975b7ad2c4988bdb82572c00222c0d7", "type": "github" }, "original": { "owner": "NixOS", - "ref": "nixos-23.11", + "ref": "nixos-24.05", "repo": "nixpkgs", "type": "github" } @@ -590,11 +604,11 @@ }, "nixpkgs_3": { "locked": { - "lastModified": 1719848872, - "narHash": "sha256-H3+EC5cYuq+gQW8y0lSrrDZfH71LB4DAf+TDFyvwCNA=", + "lastModified": 1722813957, + "narHash": "sha256-IAoYyYnED7P8zrBFMnmp7ydaJfwTnwcnqxUElC1I26Y=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "00d80d13810dbfea8ab4ed1009b09100cca86ba8", + "rev": "cb9a96f23c491c081b38eab96d22fa958043c9fa", "type": "github" }, "original": { @@ -606,11 +620,11 @@ }, "nixpkgs_4": { "locked": { - "lastModified": 1720768451, - "narHash": "sha256-EYekUHJE2gxeo2pM/zM9Wlqw1Uw2XTJXOSAO79ksc4Y=", + "lastModified": 1724271409, + "narHash": "sha256-z4nw9HxkaXEn+5OT8ljLVL2oataHvAzUQ1LEi8Fp+SY=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "7e7c39ea35c5cdd002cd4588b03a3fb9ece6fad9", + "rev": "36a9aeaaa17a2d4348498275f9fe530cd4f9e519", "type": "github" }, "original": { @@ -646,43 +660,19 @@ "type": "github" } }, - "pre-commit-hooks": { - "inputs": { - "flake-compat": "flake-compat_4", - "gitignore": "gitignore", - "nixpkgs": [ - "cachix", - "nixpkgs" - ], - "nixpkgs-stable": "nixpkgs-stable_2" - }, - "locked": { - "lastModified": 1719259945, - "narHash": "sha256-F1h+XIsGKT9TkGO3omxDLEb/9jOOsI6NnzsXFsZhry4=", - "owner": "cachix", - "repo": "pre-commit-hooks.nix", - "rev": "0ff4381bbb8f7a52ca4a851660fc7a437a4c6e07", - "type": "github" - }, - "original": { - "owner": "cachix", - "repo": "pre-commit-hooks.nix", - "type": "github" - } - }, "rocksdb": { "flake": false, "locked": { - "lastModified": 1720900786, - "narHash": "sha256-Vta9Um/RRuWwZ46BjXftV06iWLm/j/9MX39emXUvSAY=", + "lastModified": 1724285323, + "narHash": "sha256-k60kreKQ0v+bQ16yBd2SfLYpuNjMw2qoRmZL/S3k6CU=", "owner": "girlbossceo", "repo": "rocksdb", - "rev": "911f4243e69c2e320a7a209bf1f5f3ff5f825495", + "rev": "5a67ad7ce46328578ee5587fb0c23faa03d14e67", "type": "github" }, "original": { "owner": "girlbossceo", - "ref": "v9.4.0", + "ref": "v9.5.2", "repo": "rocksdb", "type": "github" } @@ -694,7 +684,7 @@ "complement": "complement", "crane": "crane_2", "fenix": "fenix", - "flake-compat": "flake-compat_5", + "flake-compat": "flake-compat_4", "flake-utils": "flake-utils_3", "liburing": "liburing", "nix-filter": "nix-filter", @@ -705,11 +695,11 @@ "rust-analyzer-src": { "flake": false, "locked": { - "lastModified": 1720717809, - "narHash": "sha256-6I+fm+nTLF/iaj7ffiFGlSY7POmubwUaPA/Wq0Bm53M=", + "lastModified": 1724153119, + "narHash": "sha256-WxpvDJDttkINkXmUA/W5o11lwLPYhATAgu0QUAacZ2g=", "owner": "rust-lang", "repo": "rust-analyzer", - "rev": "ffbc5ad993d5cd2f3b8bcf9a511165470944ab91", + "rev": "3723e5910c14f0ffbd13de474b8a8fcc74db04ce", "type": "github" }, "original": { @@ -748,6 +738,21 @@ "repo": "default", "type": "github" } + }, + "systems_3": { + "locked": { + "lastModified": 1681028828, + "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", + "owner": "nix-systems", + "repo": "default", + "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", + "type": "github" + }, + "original": { + "owner": "nix-systems", + "repo": "default", + "type": "github" + } } }, "root": "root", diff --git a/flake.nix b/flake.nix index 84ec86a2f8b7d2ffedddaafa965322ee78b9a12b..a507ed81e0f01782da2c4c2cd3947330bd309620 100644 --- a/flake.nix +++ b/flake.nix @@ -9,14 +9,17 @@ flake-utils.url = "github:numtide/flake-utils?ref=main"; nix-filter.url = "github:numtide/nix-filter?ref=main"; nixpkgs.url = "github:NixOS/nixpkgs?ref=nixos-unstable"; - rocksdb = { url = "github:girlbossceo/rocksdb?ref=v9.4.0"; flake = false; }; + rocksdb = { url = "github:girlbossceo/rocksdb?ref=v9.5.2"; flake = false; }; liburing = { url = "github:axboe/liburing?ref=master"; flake = false; }; }; outputs = inputs: inputs.flake-utils.lib.eachDefaultSystem (system: let - pkgsHost = inputs.nixpkgs.legacyPackages.${system}; + pkgsHost = import inputs.nixpkgs{ + inherit system; + config.permittedInsecurePackages = [ "olm-3.2.16" ]; + }; pkgsHostStatic = pkgsHost.pkgsStatic; # The Rust toolchain to use @@ -24,7 +27,7 @@ file = ./rust-toolchain.toml; # See also `rust-toolchain.toml` - sha256 = "sha256-6eN/GKzjVSjEhGO9FhWObkRFaE1Jf+uqMSdQnb8lcB4="; + sha256 = "sha256-3jVIIf5XPnUU1CRaTyAiO0XHVbJl12MSx3eucTXCjtE="; }; mkScope = pkgs: pkgs.lib.makeScope pkgs.newScope (self: { diff --git a/nix/pkgs/main/default.nix b/nix/pkgs/main/default.nix index 66d60b7765f0e68c83fa5f07b88da87f54032198..47b984324eeeeea30daf12281ac38384d247a21b 100644 --- a/nix/pkgs/main/default.nix +++ b/nix/pkgs/main/default.nix @@ -57,7 +57,9 @@ rust-jemalloc-sys' = (rust-jemalloc-sys.override { # we dont need cxx/C++ integration [ "--disable-cxx" ] ++ # tikv-jemalloc-sys/profiling feature - lib.optional (featureEnabled "jemalloc_prof") "--enable-prof"; + lib.optional (featureEnabled "jemalloc_prof") "--enable-prof" ++ + # tikv-jemalloc-sys/stats feature + (if (featureEnabled "jemalloc_stats") then [ "--enable-stats" ] else [ "--disable-stats" ]); }); buildDepsOnlyEnv = diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 098a99e64be7c278bd4b2150dd4ce993abf41058..a21340c22f363f54bd44383ba4bac2ac7945713f 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -11,7 +11,7 @@ # If you're having trouble making the relevant changes, bug a maintainer. [toolchain] -channel = "1.80.0" +channel = "1.80.1" components = [ # For rust-analyzer "rust-src", diff --git a/src/admin/command.rs b/src/admin/command.rs index a26564fcd495045d685df9a61b27c45f8049f8a7..c594736d25b4a2f64776563273fc0f4c87c38bb7 100644 --- a/src/admin/command.rs +++ b/src/admin/command.rs @@ -1,9 +1,11 @@ use std::time::SystemTime; use conduit_service::Services; +use ruma::EventId; pub(crate) struct Command<'a> { pub(crate) services: &'a Services, pub(crate) body: &'a [&'a str], pub(crate) timer: SystemTime, + pub(crate) reply_id: Option<&'a EventId>, } diff --git a/src/admin/debug/mod.rs b/src/admin/debug/mod.rs index 1f51a35e4f08a7dbf6178952c2094f067d1a60ff..20ddbf2f6bfccccfc6f0aa2bec6b687efa8abced 100644 --- a/src/admin/debug/mod.rs +++ b/src/admin/debug/mod.rs @@ -195,5 +195,6 @@ pub(super) enum DebugCommand { /// - Developer test stubs #[command(subcommand)] #[allow(non_snake_case)] + #[clap(hide(true))] Tester(TesterCommand), } diff --git a/src/admin/debug/tester.rs b/src/admin/debug/tester.rs index af4ea2dca99cfd82e4f8123b8549491b4e076b03..c11f893e25e8969da91e46f782f97c3c78282e8c 100644 --- a/src/admin/debug/tester.rs +++ b/src/admin/debug/tester.rs @@ -1,3 +1,4 @@ +use conduit::Err; use ruma::events::room::message::RoomMessageEventContent; use crate::{admin_command, admin_command_dispatch, Result}; @@ -5,13 +6,28 @@ #[admin_command_dispatch] #[derive(Debug, clap::Subcommand)] pub(crate) enum TesterCommand { + Panic, + Failure, Tester, Timer, } +#[rustfmt::skip] +#[admin_command] +async fn panic(&self) -> Result<RoomMessageEventContent> { + + panic!("panicked") +} + +#[rustfmt::skip] +#[admin_command] +async fn failure(&self) -> Result<RoomMessageEventContent> { + + Err!("failed") +} + #[inline(never)] #[rustfmt::skip] -#[allow(unused_variables)] #[admin_command] async fn tester(&self) -> Result<RoomMessageEventContent> { diff --git a/src/admin/media/commands.rs b/src/admin/media/commands.rs index 7906d951b7e57ba452df44e4a755799eedfbfdf7..535ad31aa3e869c4ce0c7cbf0fca64e952d7f322 100644 --- a/src/admin/media/commands.rs +++ b/src/admin/media/commands.rs @@ -1,7 +1,12 @@ -use conduit::{debug, info, Result}; -use ruma::{events::room::message::RoomMessageEventContent, EventId, MxcUri}; +use std::time::Duration; -use crate::admin_command; +use conduit::{debug, info, trace, utils::time::parse_timepoint_ago, warn, Result}; +use conduit_service::media::Dim; +use ruma::{ + events::room::message::RoomMessageEventContent, EventId, Mxc, MxcUri, OwnedMxcUri, OwnedServerName, ServerName, +}; + +use crate::{admin_command, utils::parse_local_user_id}; #[admin_command] pub(super) async fn delete( @@ -15,7 +20,10 @@ pub(super) async fn delete( if let Some(mxc) = mxc { debug!("Got MXC URL: {mxc}"); - self.services.media.delete(mxc.as_ref()).await?; + self.services + .media + .delete(&mxc.as_str().try_into()?) + .await?; return Ok(RoomMessageEventContent::text_plain( "Deleted the MXC from our database and on our filesystem.", @@ -123,7 +131,10 @@ pub(super) async fn delete( } for mxc_url in mxc_urls { - self.services.media.delete(&mxc_url).await?; + self.services + .media + .delete(&mxc_url.as_str().try_into()?) + .await?; mxc_deletion_count = mxc_deletion_count.saturating_add(1); } @@ -157,7 +168,7 @@ pub(super) async fn delete_list(&self) -> Result<RoomMessageEventContent> { for mxc in mxc_list { debug!("Deleting MXC {mxc} in bulk"); - self.services.media.delete(mxc).await?; + self.services.media.delete(&mxc.try_into()?).await?; mxc_deletion_count = mxc_deletion_count .checked_add(1) .expect("mxc_deletion_count should not get this high"); @@ -170,6 +181,7 @@ pub(super) async fn delete_list(&self) -> Result<RoomMessageEventContent> { #[admin_command] pub(super) async fn delete_past_remote_media(&self, duration: String, force: bool) -> Result<RoomMessageEventContent> { + let duration = parse_timepoint_ago(&duration)?; let deleted_count = self .services .media @@ -180,3 +192,123 @@ pub(super) async fn delete_past_remote_media(&self, duration: String, force: boo "Deleted {deleted_count} total files.", ))) } + +#[admin_command] +pub(super) async fn delete_all_from_user(&self, username: String, force: bool) -> Result<RoomMessageEventContent> { + let user_id = parse_local_user_id(self.services, &username)?; + + let deleted_count = self + .services + .media + .delete_from_user(&user_id, force) + .await?; + + Ok(RoomMessageEventContent::text_plain(format!( + "Deleted {deleted_count} total files.", + ))) +} + +#[admin_command] +pub(super) async fn delete_all_from_server( + &self, server_name: Box<ServerName>, force: bool, +) -> Result<RoomMessageEventContent> { + if server_name == self.services.globals.server_name() { + return Ok(RoomMessageEventContent::text_plain("This command only works for remote media.")); + } + + let Ok(all_mxcs) = self.services.media.get_all_mxcs().await else { + return Ok(RoomMessageEventContent::text_plain("Failed to get MXC URIs from our database")); + }; + + let mut deleted_count: usize = 0; + + for mxc in all_mxcs { + let mxc_server_name = match mxc.server_name() { + Ok(server_name) => server_name, + Err(e) => { + if force { + warn!("Failed to parse MXC {mxc} server name from database, ignoring error and skipping: {e}"); + continue; + } + + return Ok(RoomMessageEventContent::text_plain(format!( + "Failed to parse MXC {mxc} server name from database: {e}", + ))); + }, + }; + + if mxc_server_name != server_name || self.services.globals.server_is_ours(mxc_server_name) { + trace!("skipping MXC URI {mxc}"); + continue; + } + + let mxc: Mxc<'_> = mxc.as_str().try_into()?; + + match self.services.media.delete(&mxc).await { + Ok(()) => { + deleted_count = deleted_count.saturating_add(1); + }, + Err(e) => { + if force { + warn!("Failed to delete {mxc}, ignoring error and skipping: {e}"); + continue; + } + + return Ok(RoomMessageEventContent::text_plain(format!("Failed to delete MXC {mxc}: {e}"))); + }, + } + } + + Ok(RoomMessageEventContent::text_plain(format!( + "Deleted {deleted_count} total files.", + ))) +} + +#[admin_command] +pub(super) async fn get_file_info(&self, mxc: OwnedMxcUri) -> Result<RoomMessageEventContent> { + let mxc: Mxc<'_> = mxc.as_str().try_into()?; + let metadata = self.services.media.get_metadata(&mxc); + + Ok(RoomMessageEventContent::notice_markdown(format!("```\n{metadata:#?}\n```"))) +} + +#[admin_command] +pub(super) async fn get_remote_file( + &self, mxc: OwnedMxcUri, server: Option<OwnedServerName>, timeout: u32, +) -> Result<RoomMessageEventContent> { + let mxc: Mxc<'_> = mxc.as_str().try_into()?; + let timeout = Duration::from_millis(timeout.into()); + let mut result = self + .services + .media + .fetch_remote_content(&mxc, None, server.as_deref(), timeout) + .await?; + + // Grab the length of the content before clearing it to not flood the output + let len = result.content.as_ref().expect("content").len(); + result.content.as_mut().expect("content").clear(); + + let out = format!("```\n{result:#?}\nreceived {len} bytes for file content.\n```"); + Ok(RoomMessageEventContent::notice_markdown(out)) +} + +#[admin_command] +pub(super) async fn get_remote_thumbnail( + &self, mxc: OwnedMxcUri, server: Option<OwnedServerName>, timeout: u32, width: u32, height: u32, +) -> Result<RoomMessageEventContent> { + let mxc: Mxc<'_> = mxc.as_str().try_into()?; + let timeout = Duration::from_millis(timeout.into()); + let dim = Dim::new(width, height, None); + let mut result = self + .services + .media + .fetch_remote_thumbnail(&mxc, None, server.as_deref(), timeout, &dim) + .await?; + + // Grab the length of the content before clearing it to not flood the output + let len = result.content.as_ref().expect("content").len(); + result.content.as_mut().expect("content").clear(); + + let out = format!("```\n{result:#?}\nreceived {len} bytes for file content.\n```"); + Ok(RoomMessageEventContent::notice_markdown(out)) +} diff --git a/src/admin/media/mod.rs b/src/admin/media/mod.rs index 31cbf810e2aeef8a32106386425872ff9e7080c4..5977c0fae35c30ef018dd438a4be6d0f81084e84 100644 --- a/src/admin/media/mod.rs +++ b/src/admin/media/mod.rs @@ -2,7 +2,7 @@ use clap::Subcommand; use conduit::Result; -use ruma::{EventId, MxcUri}; +use ruma::{EventId, MxcUri, OwnedMxcUri, OwnedServerName, ServerName}; use crate::admin_command_dispatch; @@ -32,8 +32,60 @@ pub(super) enum MediaCommand { /// - The duration (at or after), e.g. "5m" to delete all media in the /// past 5 minutes duration: String, + /// Continues deleting remote media if an undeletable object is found #[arg(short, long)] force: bool, }, + + /// - Deletes all the local media from a local user on our server + DeleteAllFromUser { + username: String, + + /// Continues deleting media if an undeletable object is found + #[arg(short, long)] + force: bool, + }, + + /// - Deletes all remote media from the specified remote server + DeleteAllFromServer { + server_name: Box<ServerName>, + + /// Continues deleting media if an undeletable object is found + #[arg(short, long)] + force: bool, + }, + + GetFileInfo { + /// The MXC URL to lookup info for. + mxc: OwnedMxcUri, + }, + + GetRemoteFile { + /// The MXC URL to fetch + mxc: OwnedMxcUri, + + #[arg(short, long)] + server: Option<OwnedServerName>, + + #[arg(short, long, default_value("10000"))] + timeout: u32, + }, + + GetRemoteThumbnail { + /// The MXC URL to fetch + mxc: OwnedMxcUri, + + #[arg(short, long)] + server: Option<OwnedServerName>, + + #[arg(short, long, default_value("10000"))] + timeout: u32, + + #[arg(short, long)] + width: u32, + + #[arg(short, long)] + height: u32, + }, } diff --git a/src/admin/mod.rs b/src/admin/mod.rs index fb1c02be70c5fd65db30cf6170051fe46ce00ea9..83db18fa779b093b0374eaccec11df0e6779fab3 100644 --- a/src/admin/mod.rs +++ b/src/admin/mod.rs @@ -4,7 +4,7 @@ pub(crate) mod admin; pub(crate) mod command; -pub(crate) mod handler; +pub(crate) mod processor; mod tests; pub(crate) mod utils; @@ -36,14 +36,18 @@ conduit::mod_dtor! {} conduit::rustc_flags_capture! {} -/// Install the admin command handler +/// Install the admin command processor pub async fn init(admin_service: &service::admin::Service) { _ = admin_service .complete .write() .expect("locked for writing") - .insert(handler::complete); - _ = admin_service.handle.write().await.insert(handler::handle); + .insert(processor::complete); + _ = admin_service + .handle + .write() + .await + .insert(processor::dispatch); } /// Uninstall the admin command handler diff --git a/src/admin/handler.rs b/src/admin/processor.rs similarity index 73% rename from src/admin/handler.rs rename to src/admin/processor.rs index c25c380fe2ebbf01f0a1c5e8d9f9fd7776977588..67548f49ecb844774ad45b707a2e753e11133b21 100644 --- a/src/admin/handler.rs +++ b/src/admin/processor.rs @@ -1,4 +1,5 @@ use std::{ + fmt::Write, panic::AssertUnwindSafe, sync::{Arc, Mutex}, time::SystemTime, @@ -22,13 +23,14 @@ relation::InReplyTo, room::message::{Relation::Reply, RoomMessageEventContent}, }, - OwnedEventId, + EventId, }; use service::{ - admin::{CommandInput, CommandOutput, HandlerFuture, HandlerResult}, + admin::{CommandInput, CommandOutput, ProcessorFuture, ProcessorResult}, Services, }; use tracing::Level; +use tracing_subscriber::{filter::LevelFilter, EnvFilter}; use crate::{admin, admin::AdminCommand, Command}; @@ -36,22 +38,22 @@ pub(super) fn complete(line: &str) -> String { complete_command(AdminCommand::command(), line) } #[must_use] -pub(super) fn handle(services: Arc<Services>, command: CommandInput) -> HandlerFuture { +pub(super) fn dispatch(services: Arc<Services>, command: CommandInput) -> ProcessorFuture { Box::pin(handle_command(services, command)) } #[tracing::instrument(skip_all, name = "admin")] -async fn handle_command(services: Arc<Services>, command: CommandInput) -> HandlerResult { +async fn handle_command(services: Arc<Services>, command: CommandInput) -> ProcessorResult { AssertUnwindSafe(Box::pin(process_command(services, &command))) .catch_unwind() .await .map_err(Error::from_panic) - .or_else(|error| handle_panic(&error, command)) + .unwrap_or_else(|error| handle_panic(&error, &command)) } -async fn process_command(services: Arc<Services>, input: &CommandInput) -> CommandOutput { +async fn process_command(services: Arc<Services>, input: &CommandInput) -> ProcessorResult { let (command, args, body) = match parse(&services, input) { - Err(error) => return error, + Err(error) => return Err(error), Ok(parsed) => parsed, }; @@ -59,44 +61,23 @@ async fn process_command(services: Arc<Services>, input: &CommandInput) -> Comma services: &services, body: &body, timer: SystemTime::now(), + reply_id: input.reply_id.as_deref(), }; - process(&context, command, &args) - .await - .and_then(|content| reply(content, input.reply_id.clone())) + process(&context, command, &args).await } -fn handle_panic(error: &Error, command: CommandInput) -> HandlerResult { +fn handle_panic(error: &Error, command: &CommandInput) -> ProcessorResult { let link = "Please submit a [bug report](https://github.com/girlbossceo/conduwuit/issues/new). 🥺"; let msg = format!("Panic occurred while processing command:\n```\n{error:#?}\n```\n{link}"); let content = RoomMessageEventContent::notice_markdown(msg); error!("Panic while processing command: {error:?}"); - Ok(reply(content, command.reply_id)) -} - -fn reply(mut content: RoomMessageEventContent, reply_id: Option<OwnedEventId>) -> Option<RoomMessageEventContent> { - content.relates_to = reply_id.map(|event_id| Reply { - in_reply_to: InReplyTo { - event_id, - }, - }); - - Some(content) + Err(reply(content, command.reply_id.as_deref())) } // Parse and process a message from the admin room -async fn process(context: &Command<'_>, command: AdminCommand, args: &[String]) -> CommandOutput { - let filter: &capture::Filter = - &|data| data.level() <= Level::DEBUG && data.our_modules() && data.scope.contains(&"admin"); - let logs = Arc::new(Mutex::new( - collect_stream(|s| markdown_table_head(s)).expect("markdown table header"), - )); - - let capture = Capture::new( - &context.services.server.log.capture, - Some(filter), - capture::fmt(markdown_table, logs.clone()), - ); +async fn process(context: &Command<'_>, command: AdminCommand, args: &[String]) -> ProcessorResult { + let (capture, logs) = capture_create(context); let capture_scope = capture.start(); let result = Box::pin(admin::process(command, context)).await; @@ -109,13 +90,50 @@ async fn process(context: &Command<'_>, command: AdminCommand, args: &[String]) "command processed" ); + let mut output = String::new(); + + // Prepend the logs only if any were captured let logs = logs.lock().expect("locked"); - let output = match result { - Err(error) => format!("{logs}\nEncountered an error while handling the command:\n```\n{error:#?}\n```"), - Ok(reply) => format!("{logs}\n{}", reply.body()), //TODO: content is recreated to add logs - }; + if logs.lines().count() > 2 { + writeln!(&mut output, "{logs}").expect("failed to format logs to command output"); + } + drop(logs); - Some(RoomMessageEventContent::notice_markdown(output)) + match result { + Ok(content) => { + write!(&mut output, "{0}", content.body()).expect("failed to format command result to output buffer"); + Ok(Some(reply(RoomMessageEventContent::notice_markdown(output), context.reply_id))) + }, + Err(error) => { + write!(&mut output, "Command failed with error:\n```\n{error:#?}\n```") + .expect("failed to format command result to output"); + Err(reply(RoomMessageEventContent::notice_markdown(output), context.reply_id)) + }, + } +} + +fn capture_create(context: &Command<'_>) -> (Arc<Capture>, Arc<Mutex<String>>) { + let env_config = &context.services.server.config.admin_log_capture; + let env_filter = EnvFilter::try_new(env_config).unwrap_or_else(|_| "debug".into()); + let log_level = env_filter + .max_level_hint() + .and_then(LevelFilter::into_level) + .unwrap_or(Level::DEBUG); + + let filter = + move |data: capture::Data<'_>| data.level() <= log_level && data.our_modules() && data.scope.contains(&"admin"); + + let logs = Arc::new(Mutex::new( + collect_stream(|s| markdown_table_head(s)).expect("markdown table header"), + )); + + let capture = Capture::new( + &context.services.server.log.capture, + Some(filter), + capture::fmt(markdown_table, logs.clone()), + ); + + (capture, logs) } // Parse chat messages from the admin room into an AdminCommand object @@ -131,7 +149,10 @@ fn parse<'a>( let message = error .to_string() .replace("server.name", services.globals.server_name().as_str()); - Err(Some(RoomMessageEventContent::notice_markdown(message))) + Err(reply( + RoomMessageEventContent::notice_markdown(message), + input.reply_id.as_deref(), + )) }, } } @@ -228,3 +249,13 @@ fn parse_line(command_line: &str) -> Vec<String> { trace!(?command_line, ?argv, "parse"); argv } + +fn reply(mut content: RoomMessageEventContent, reply_id: Option<&EventId>) -> RoomMessageEventContent { + content.relates_to = reply_id.map(|event_id| Reply { + in_reply_to: InReplyTo { + event_id: event_id.to_owned(), + }, + }); + + content +} diff --git a/src/admin/room/moderation.rs b/src/admin/room/moderation.rs index ddde9146992f82f37e57a3757d56278d2cca5bd6..70d8486b4b5fd013d8d527199b75362708ab6539 100644 --- a/src/admin/room/moderation.rs +++ b/src/admin/room/moderation.rs @@ -60,7 +60,12 @@ pub(crate) enum RoomModerationCommand { }, /// - List of all rooms we have banned - ListBannedRooms, + ListBannedRooms { + #[arg(long)] + /// Whether to only output room IDs without supplementary room + /// information + no_details: bool, + }, } #[admin_command] @@ -208,6 +213,25 @@ async fn ban_room( } } + // remove any local aliases, ignore errors + for ref local_alias in self + .services + .rooms + .alias + .local_aliases_for_room(&room_id) + .filter_map(Result::ok) + { + _ = self + .services + .rooms + .alias + .remove_alias(local_alias, &self.services.globals.server_user) + .await; + } + + // unpublish from room directory, ignore errors + _ = self.services.rooms.directory.set_not_public(&room_id); + if disable_federation { self.services.rooms.metadata.disable_room(&room_id, true)?; return Ok(RoomMessageEventContent::text_plain( @@ -428,6 +452,25 @@ async fn ban_list_of_rooms(&self, force: bool, disable_federation: bool) -> Resu } } + // remove any local aliases, ignore errors + for ref local_alias in self + .services + .rooms + .alias + .local_aliases_for_room(&room_id) + .filter_map(Result::ok) + { + _ = self + .services + .rooms + .alias + .remove_alias(local_alias, &self.services.globals.server_user) + .await; + } + + // unpublish from room directory, ignore errors + _ = self.services.rooms.directory.set_not_public(&room_id); + if disable_federation { self.services.rooms.metadata.disable_room(&room_id, true)?; } @@ -525,7 +568,7 @@ async fn unban_room(&self, enable_federation: bool, room: Box<RoomOrAliasId>) -> } #[admin_command] -async fn list_banned_rooms(&self) -> Result<RoomMessageEventContent> { +async fn list_banned_rooms(&self, no_details: bool) -> Result<RoomMessageEventContent> { let rooms = self .services .rooms @@ -551,7 +594,11 @@ async fn list_banned_rooms(&self) -> Result<RoomMessageEventContent> { rooms.len(), rooms .iter() - .map(|(id, members, name)| format!("{id}\tMembers: {members}\tName: {name}")) + .map(|(id, members, name)| if no_details { + format!("{id}") + } else { + format!("{id}\tMembers: {members}\tName: {name}") + }) .collect::<Vec<_>>() .join("\n") ); @@ -559,7 +606,7 @@ async fn list_banned_rooms(&self) -> Result<RoomMessageEventContent> { Ok(RoomMessageEventContent::notice_markdown(output_plain)) }, Err(e) => { - error!("Failed to list banned rooms: {}", e); + error!("Failed to list banned rooms: {e}"); Ok(RoomMessageEventContent::text_plain(format!("Unable to list banned rooms: {e}"))) }, } diff --git a/src/admin/user/commands.rs b/src/admin/user/commands.rs index bdd35d59e65f72509da8a17e419a3ca69a4ee57d..c36a3c1d05852757a518b4b1e709601a475724ef 100644 --- a/src/admin/user/commands.rs +++ b/src/admin/user/commands.rs @@ -1,15 +1,16 @@ use std::{collections::BTreeMap, fmt::Write as _}; use api::client::{join_room_by_id_helper, leave_all_rooms, update_avatar_url, update_displayname}; -use conduit::{error, info, utils, warn, Result}; +use conduit::{error, info, utils, warn, PduBuilder, Result}; use ruma::{ events::{ - room::message::RoomMessageEventContent, + room::{message::RoomMessageEventContent, redaction::RoomRedactionEventContent}, tag::{TagEvent, TagEventContent, TagInfo}, - RoomAccountDataEventType, + RoomAccountDataEventType, TimelineEventType, }, - OwnedRoomId, OwnedRoomOrAliasId, OwnedUserId, RoomId, + EventId, OwnedRoomId, OwnedRoomOrAliasId, OwnedUserId, RoomId, }; +use serde_json::value::to_raw_value; use crate::{ admin_command, escape_html, get_room_info, @@ -485,3 +486,62 @@ pub(super) async fn get_room_tags(&self, user_id: String, room_id: Box<RoomId>) tags_event.content.tags ))) } + +#[admin_command] +pub(super) async fn redact_event(&self, event_id: Box<EventId>) -> Result<RoomMessageEventContent> { + let Some(event) = self + .services + .rooms + .timeline + .get_non_outlier_pdu(&event_id)? + else { + return Ok(RoomMessageEventContent::text_plain("Event does not exist in our database.")); + }; + + if event.is_redacted() { + return Ok(RoomMessageEventContent::text_plain("Event is already redacted.")); + } + + let room_id = event.room_id; + let sender_user = event.sender; + + if !self.services.globals.user_is_local(&sender_user) { + return Ok(RoomMessageEventContent::text_plain("This command only works on local users.")); + } + + let reason = format!( + "The administrator(s) of {} has redacted this user's message.", + self.services.globals.server_name() + ); + + let state_lock = self.services.rooms.state.mutex.lock(&room_id).await; + + let redaction_event_id = self + .services + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: TimelineEventType::RoomRedaction, + content: to_raw_value(&RoomRedactionEventContent { + redacts: Some(event.event_id.clone().into()), + reason: Some(reason), + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: None, + redacts: Some(event.event_id), + timestamp: None, + }, + &sender_user, + &room_id, + &state_lock, + ) + .await?; + + drop(state_lock); + + Ok(RoomMessageEventContent::text_plain(format!( + "Successfully redacted event. Redaction event ID: {redaction_event_id}" + ))) +} diff --git a/src/admin/user/mod.rs b/src/admin/user/mod.rs index b0c0bd1ecd1a7c851d7fec2a5f9df42246af04e6..f6ecd5e8b86209d6b42bf83619db95f9ef9c6032 100644 --- a/src/admin/user/mod.rs +++ b/src/admin/user/mod.rs @@ -2,7 +2,7 @@ use clap::Subcommand; use conduit::Result; -use ruma::{OwnedRoomOrAliasId, RoomId}; +use ruma::{EventId, OwnedRoomOrAliasId, RoomId}; use crate::admin_command_dispatch; @@ -103,4 +103,12 @@ pub(super) enum UserCommand { user_id: String, room_id: Box<RoomId>, }, + + /// - Attempts to forcefully redact the specified event ID from the sender + /// user + /// + /// This is only valid for local users + RedactEvent { + event_id: Box<EventId>, + }, } diff --git a/src/api/Cargo.toml b/src/api/Cargo.toml index f537fd5ff8c48ca1f3d3ffdc37ab910d95d69155..2b89c3e82ffc32c1462f152677822aa2e6091f75 100644 --- a/src/api/Cargo.toml +++ b/src/api/Cargo.toml @@ -25,6 +25,9 @@ release_max_log_level = [ "log/max_level_trace", "log/release_max_level_info", ] +zstd_compression = [ + "reqwest/zstd", +] gzip_compression = [ "reqwest/gzip", ] diff --git a/src/api/client/account.rs b/src/api/client/account.rs index 10e63d71541cf3ebb89c42abbb20a9b2be5a3270..bf71bc0d3c63b9e5215a5c37a4d042290ec9dc2b 100644 --- a/src/api/client/account.rs +++ b/src/api/client/account.rs @@ -240,7 +240,7 @@ pub(crate) async fn register_route( // If `new_user_displayname_suffix` is set, registration will push whatever // content is set to the user's display name with a space before it - if !services.globals.new_user_displayname_suffix().is_empty() { + if !services.globals.new_user_displayname_suffix().is_empty() && body.appservice_info.is_none() { write!(displayname, " {}", services.globals.config.new_user_displayname_suffix) .expect("should be able to write to string buffer"); } diff --git a/src/api/client/alias.rs b/src/api/client/alias.rs index 18d1c5b0e3c58048920509dd655bd6e496b382e1..12d6352c946e53503a0cd1803001e0e6b7ac153d 100644 --- a/src/api/client/alias.rs +++ b/src/api/client/alias.rs @@ -69,15 +69,6 @@ pub(crate) async fn delete_alias_route( .appservice_checks(&body.room_alias, &body.appservice_info) .await?; - if services - .rooms - .alias - .resolve_local_alias(&body.room_alias)? - .is_none() - { - return Err(Error::BadRequest(ErrorKind::NotFound, "Alias does not exist.")); - } - services .rooms .alias diff --git a/src/api/client/media.rs b/src/api/client/media.rs index bce9b2b5bb136b19f124150ae080baa48b62f618..12012711642ba1c3800164125cabe98a9509bcae 100644 --- a/src/api/client/media.rs +++ b/src/api/client/media.rs @@ -1,125 +1,37 @@ -#![allow(deprecated)] - use std::time::Duration; use axum::extract::State; use axum_client_ip::InsecureClientIp; use conduit::{ - debug_warn, err, error, - utils::{ - self, - content_disposition::{content_disposition_type, make_content_disposition, sanitise_filename}, - math::ruma_from_usize, - }, - warn, Err, Error, Result, + err, + utils::{self, content_disposition::make_content_disposition, math::ruma_from_usize}, + Err, Result, }; -use ruma::api::client::{ - error::{ErrorKind, RetryAfter}, - media::{ - create_content, get_content, get_content_as_filename, get_content_thumbnail, get_media_config, - get_media_preview, - }, -}; -use service::{ - media::{FileMeta, MXC_LENGTH}, +use conduit_service::{ + media::{Dim, FileMeta, CACHE_CONTROL_IMMUTABLE, CORP_CROSS_ORIGIN, MXC_LENGTH}, Services, }; +use ruma::{ + api::client::{ + authenticated_media::{ + get_content, get_content_as_filename, get_content_thumbnail, get_media_config, get_media_preview, + }, + media::create_content, + }, + Mxc, UserId, +}; -use crate::{Ruma, RumaResponse}; - -/// Cache control for immutable objects -const CACHE_CONTROL_IMMUTABLE: &str = "public,max-age=31536000,immutable"; - -const CORP_CROSS_ORIGIN: &str = "cross-origin"; +use crate::Ruma; -/// # `GET /_matrix/media/v3/config` -/// -/// Returns max upload size. +/// # `GET /_matrix/client/v1/media/config` pub(crate) async fn get_media_config_route( - State(services): State<crate::State>, _body: Ruma<get_media_config::v3::Request>, -) -> Result<get_media_config::v3::Response> { - Ok(get_media_config::v3::Response { + State(services): State<crate::State>, _body: Ruma<get_media_config::v1::Request>, +) -> Result<get_media_config::v1::Response> { + Ok(get_media_config::v1::Response { upload_size: ruma_from_usize(services.globals.config.max_request_size), }) } -/// # `GET /_matrix/media/v1/config` -/// -/// This is a legacy endpoint ("/v1/") that some very old homeservers and/or -/// clients may call. conduwuit adds these for compatibility purposes. -/// See <https://spec.matrix.org/legacy/legacy/#id27> -/// -/// Returns max upload size. -pub(crate) async fn get_media_config_v1_route( - State(services): State<crate::State>, body: Ruma<get_media_config::v3::Request>, -) -> Result<RumaResponse<get_media_config::v3::Response>> { - get_media_config_route(State(services), body) - .await - .map(RumaResponse) -} - -/// # `GET /_matrix/media/v3/preview_url` -/// -/// Returns URL preview. -#[tracing::instrument(skip_all, fields(%client), name = "url_preview")] -pub(crate) async fn get_media_preview_route( - State(services): State<crate::State>, InsecureClientIp(client): InsecureClientIp, - body: Ruma<get_media_preview::v3::Request>, -) -> Result<get_media_preview::v3::Response> { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - - let url = &body.url; - if !services.media.url_preview_allowed(url) { - return Err!(Request(Forbidden( - warn!(%sender_user, %url, "URL is not allowed to be previewed") - ))); - } - - match services.media.get_url_preview(url).await { - Ok(preview) => { - let res = serde_json::value::to_raw_value(&preview).map_err(|e| { - error!(%sender_user, "Failed to convert UrlPreviewData into a serde json value: {e}"); - Error::BadRequest( - ErrorKind::LimitExceeded { - retry_after: Some(RetryAfter::Delay(Duration::from_secs(5))), - }, - "Failed to generate a URL preview, try again later.", - ) - })?; - - Ok(get_media_preview::v3::Response::from_raw_value(res)) - }, - Err(e) => { - warn!(%sender_user, "Failed to generate a URL preview: {e}"); - // there doesn't seem to be an agreed-upon error code in the spec. - // the only response codes in the preview_url spec page are 200 and 429. - Err(Error::BadRequest( - ErrorKind::LimitExceeded { - retry_after: Some(RetryAfter::Delay(Duration::from_secs(5))), - }, - "Failed to generate a URL preview, try again later.", - )) - }, - } -} - -/// # `GET /_matrix/media/v1/preview_url` -/// -/// This is a legacy endpoint ("/v1/") that some very old homeservers and/or -/// clients may call. conduwuit adds these for compatibility purposes. -/// See <https://spec.matrix.org/legacy/legacy/#id27> -/// -/// Returns URL preview. -#[tracing::instrument(skip_all, fields(%client), name = "url_preview")] -pub(crate) async fn get_media_preview_v1_route( - State(services): State<crate::State>, InsecureClientIp(client): InsecureClientIp, - body: Ruma<get_media_preview::v3::Request>, -) -> Result<RumaResponse<get_media_preview::v3::Response>> { - get_media_preview_route(State(services), InsecureClientIp(client), body) - .await - .map(RumaResponse) -} - /// # `POST /_matrix/media/v3/upload` /// /// Permanently save media in the server. @@ -131,409 +43,221 @@ pub(crate) async fn create_content_route( State(services): State<crate::State>, InsecureClientIp(client): InsecureClientIp, body: Ruma<create_content::v3::Request>, ) -> Result<create_content::v3::Response> { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let user = body.sender_user.as_ref().expect("user is authenticated"); - let mxc = format!("mxc://{}/{}", services.globals.server_name(), utils::random_string(MXC_LENGTH)); + let filename = body.filename.as_deref(); + let content_type = body.content_type.as_deref(); + let content_disposition = make_content_disposition(None, content_type, filename); + let mxc = Mxc { + server_name: services.globals.server_name(), + media_id: &utils::random_string(MXC_LENGTH), + }; services .media - .create( - Some(sender_user.clone()), - &mxc, - body.filename - .as_ref() - .map(|filename| { - format!( - "{}; filename={}", - content_disposition_type(&body.content_type), - sanitise_filename(filename.to_owned()) - ) - }) - .as_deref(), - body.content_type.as_deref(), - &body.file, - ) - .await?; - - Ok(create_content::v3::Response { - content_uri: mxc.into(), - blurhash: None, - }) + .create(&mxc, Some(user), Some(&content_disposition), content_type, &body.file) + .await + .map(|()| create_content::v3::Response { + content_uri: mxc.to_string().into(), + blurhash: None, + }) } -/// # `POST /_matrix/media/v1/upload` -/// -/// Permanently save media in the server. +/// # `GET /_matrix/client/v1/media/thumbnail/{serverName}/{mediaId}` /// -/// This is a legacy endpoint ("/v1/") that some very old homeservers and/or -/// clients may call. conduwuit adds these for compatibility purposes. -/// See <https://spec.matrix.org/legacy/legacy/#id27> -/// -/// - Some metadata will be saved in the database -/// - Media will be saved in the media/ directory -#[tracing::instrument(skip_all, fields(%client), name = "media_upload")] -pub(crate) async fn create_content_v1_route( +/// Load media thumbnail from our server or over federation. +#[tracing::instrument(skip_all, fields(%client), name = "media_thumbnail_get")] +pub(crate) async fn get_content_thumbnail_route( State(services): State<crate::State>, InsecureClientIp(client): InsecureClientIp, - body: Ruma<create_content::v3::Request>, -) -> Result<RumaResponse<create_content::v3::Response>> { - create_content_route(State(services), InsecureClientIp(client), body) - .await - .map(RumaResponse) + body: Ruma<get_content_thumbnail::v1::Request>, +) -> Result<get_content_thumbnail::v1::Response> { + let user = body.sender_user.as_ref().expect("user is authenticated"); + + let dim = Dim::from_ruma(body.width, body.height, body.method.clone())?; + let mxc = Mxc { + server_name: &body.server_name, + media_id: &body.media_id, + }; + + let FileMeta { + content, + content_type, + content_disposition, + } = fetch_thumbnail(&services, &mxc, user, body.timeout_ms, &dim).await?; + + Ok(get_content_thumbnail::v1::Response { + file: content.expect("entire file contents"), + content_type: content_type.map(Into::into), + cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()), + cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()), + content_disposition, + }) } -/// # `GET /_matrix/media/v3/download/{serverName}/{mediaId}` +/// # `GET /_matrix/client/v1/media/download/{serverName}/{mediaId}` /// /// Load media from our server or over federation. -/// -/// - Only allows federation if `allow_remote` is true -/// - Only redirects if `allow_redirect` is true -/// - Uses client-provided `timeout_ms` if available, else defaults to 20 -/// seconds #[tracing::instrument(skip_all, fields(%client), name = "media_get")] pub(crate) async fn get_content_route( State(services): State<crate::State>, InsecureClientIp(client): InsecureClientIp, - body: Ruma<get_content::v3::Request>, -) -> Result<get_content::v3::Response> { - let mxc = format!("mxc://{}/{}", body.server_name, body.media_id); + body: Ruma<get_content::v1::Request>, +) -> Result<get_content::v1::Response> { + let user = body.sender_user.as_ref().expect("user is authenticated"); + + let mxc = Mxc { + server_name: &body.server_name, + media_id: &body.media_id, + }; - if let Some(FileMeta { + let FileMeta { content, content_type, content_disposition, - }) = services.media.get(&mxc).await? - { - let content_disposition = Some(make_content_disposition(&content_type, content_disposition, None)); - let file = content.expect("content"); - - Ok(get_content::v3::Response { - file, - content_type, - content_disposition, - cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.to_owned()), - cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()), - }) - } else if !services.globals.server_is_ours(&body.server_name) && body.allow_remote { - let response = get_remote_content( - &services, - &mxc, - &body.server_name, - body.media_id.clone(), - body.allow_redirect, - body.timeout_ms, - ) - .await - .map_err(|e| err!(Request(NotFound(debug_warn!("Fetching media `{mxc}` failed: {e:?}")))))?; - - let content_disposition = Some(make_content_disposition( - &response.content_type, - response.content_disposition, - None, - )); - - Ok(get_content::v3::Response { - file: response.file, - content_type: response.content_type, - content_disposition, - cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.to_owned()), - cache_control: Some(CACHE_CONTROL_IMMUTABLE.to_owned()), - }) - } else { - Err!(Request(NotFound("Media not found."))) - } -} + } = fetch_file(&services, &mxc, user, body.timeout_ms, None).await?; -/// # `GET /_matrix/media/v1/download/{serverName}/{mediaId}` -/// -/// Load media from our server or over federation. -/// -/// This is a legacy endpoint ("/v1/") that some very old homeservers and/or -/// clients may call. conduwuit adds these for compatibility purposes. -/// See <https://spec.matrix.org/legacy/legacy/#id27> -/// -/// - Only allows federation if `allow_remote` is true -/// - Only redirects if `allow_redirect` is true -/// - Uses client-provided `timeout_ms` if available, else defaults to 20 -/// seconds -#[tracing::instrument(skip_all, fields(%client), name = "media_get")] -pub(crate) async fn get_content_v1_route( - State(services): State<crate::State>, InsecureClientIp(client): InsecureClientIp, - body: Ruma<get_content::v3::Request>, -) -> Result<RumaResponse<get_content::v3::Response>> { - get_content_route(State(services), InsecureClientIp(client), body) - .await - .map(RumaResponse) + Ok(get_content::v1::Response { + file: content.expect("entire file contents"), + content_type: content_type.map(Into::into), + cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()), + cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()), + content_disposition, + }) } -/// # `GET /_matrix/media/v3/download/{serverName}/{mediaId}/{fileName}` -/// -/// Load media from our server or over federation, permitting desired filename. +/// # `GET /_matrix/client/v1/media/download/{serverName}/{mediaId}/{fileName}` /// -/// - Only allows federation if `allow_remote` is true -/// - Only redirects if `allow_redirect` is true -/// - Uses client-provided `timeout_ms` if available, else defaults to 20 -/// seconds -#[tracing::instrument(skip_all, fields(%client), name = "media_get")] +/// Load media from our server or over federation as fileName. +#[tracing::instrument(skip_all, fields(%client), name = "media_get_af")] pub(crate) async fn get_content_as_filename_route( State(services): State<crate::State>, InsecureClientIp(client): InsecureClientIp, - body: Ruma<get_content_as_filename::v3::Request>, -) -> Result<get_content_as_filename::v3::Response> { - let mxc = format!("mxc://{}/{}", body.server_name, body.media_id); + body: Ruma<get_content_as_filename::v1::Request>, +) -> Result<get_content_as_filename::v1::Response> { + let user = body.sender_user.as_ref().expect("user is authenticated"); + + let mxc = Mxc { + server_name: &body.server_name, + media_id: &body.media_id, + }; - if let Some(FileMeta { + let FileMeta { content, content_type, content_disposition, - }) = services.media.get(&mxc).await? - { - let content_disposition = Some(make_content_disposition( - &content_type, - content_disposition, - Some(body.filename.clone()), - )); - - let file = content.expect("content"); - Ok(get_content_as_filename::v3::Response { - file, - content_type, - content_disposition, - cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.to_owned()), - cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()), - }) - } else if !services.globals.server_is_ours(&body.server_name) && body.allow_remote { - match get_remote_content( - &services, - &mxc, - &body.server_name, - body.media_id.clone(), - body.allow_redirect, - body.timeout_ms, - ) - .await - { - Ok(remote_content_response) => { - let content_disposition = Some(make_content_disposition( - &remote_content_response.content_type, - remote_content_response.content_disposition, - None, - )); - - Ok(get_content_as_filename::v3::Response { - content_disposition, - content_type: remote_content_response.content_type, - file: remote_content_response.file, - cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.to_owned()), - cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()), - }) - }, - Err(e) => Err!(Request(NotFound(debug_warn!("Fetching media `{mxc}` failed: {e:?}")))), - } - } else { - Err!(Request(NotFound("Media not found."))) - } + } = fetch_file(&services, &mxc, user, body.timeout_ms, Some(&body.filename)).await?; + + Ok(get_content_as_filename::v1::Response { + file: content.expect("entire file contents"), + content_type: content_type.map(Into::into), + cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()), + cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()), + content_disposition, + }) } -/// # `GET /_matrix/media/v1/download/{serverName}/{mediaId}/{fileName}` -/// -/// Load media from our server or over federation, permitting desired filename. -/// -/// This is a legacy endpoint ("/v1/") that some very old homeservers and/or -/// clients may call. conduwuit adds these for compatibility purposes. -/// See <https://spec.matrix.org/legacy/legacy/#id27> +/// # `GET /_matrix/client/v1/media/preview_url` /// -/// - Only allows federation if `allow_remote` is true -/// - Only redirects if `allow_redirect` is true -/// - Uses client-provided `timeout_ms` if available, else defaults to 20 -/// seconds -#[tracing::instrument(skip_all, fields(%client), name = "media_get")] -pub(crate) async fn get_content_as_filename_v1_route( +/// Returns URL preview. +#[tracing::instrument(skip_all, fields(%client), name = "url_preview")] +pub(crate) async fn get_media_preview_route( State(services): State<crate::State>, InsecureClientIp(client): InsecureClientIp, - body: Ruma<get_content_as_filename::v3::Request>, -) -> Result<RumaResponse<get_content_as_filename::v3::Response>> { - get_content_as_filename_route(State(services), InsecureClientIp(client), body) - .await - .map(RumaResponse) + body: Ruma<get_media_preview::v1::Request>, +) -> Result<get_media_preview::v1::Response> { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + let url = &body.url; + if !services.media.url_preview_allowed(url) { + return Err!(Request(Forbidden( + debug_warn!(%sender_user, %url, "URL is not allowed to be previewed") + ))); + } + + let preview = services.media.get_url_preview(url).await.map_err(|error| { + err!(Request(Unknown( + debug_error!(%sender_user, %url, ?error, "Failed to fetch URL preview.") + ))) + })?; + + serde_json::value::to_raw_value(&preview) + .map(get_media_preview::v1::Response::from_raw_value) + .map_err(|error| { + err!(Request(Unknown( + debug_error!(%sender_user, %url, ?error, "Failed to parse URL preview.") + ))) + }) } -/// # `GET /_matrix/media/v3/thumbnail/{serverName}/{mediaId}` -/// -/// Load media thumbnail from our server or over federation. -/// -/// - Only allows federation if `allow_remote` is true -/// - Only redirects if `allow_redirect` is true -/// - Uses client-provided `timeout_ms` if available, else defaults to 20 -/// seconds -#[tracing::instrument(skip_all, fields(%client), name = "media_thumbnail_get")] -pub(crate) async fn get_content_thumbnail_route( - State(services): State<crate::State>, InsecureClientIp(client): InsecureClientIp, - body: Ruma<get_content_thumbnail::v3::Request>, -) -> Result<get_content_thumbnail::v3::Response> { - let mxc = format!("mxc://{}/{}", body.server_name, body.media_id); +async fn fetch_thumbnail( + services: &Services, mxc: &Mxc<'_>, user: &UserId, timeout_ms: Duration, dim: &Dim, +) -> Result<FileMeta> { + let FileMeta { + content, + content_type, + content_disposition, + } = fetch_thumbnail_meta(services, mxc, user, timeout_ms, dim).await?; - if let Some(FileMeta { + let content_disposition = Some(make_content_disposition( + content_disposition.as_ref(), + content_type.as_deref(), + None, + )); + + Ok(FileMeta { content, content_type, content_disposition, - }) = services - .media - .get_thumbnail( - &mxc, - body.width - .try_into() - .map_err(|e| err!(Request(InvalidParam("Width is invalid: {e:?}"))))?, - body.height - .try_into() - .map_err(|e| err!(Request(InvalidParam("Height is invalid: {e:?}"))))?, - ) - .await? - { - let content_disposition = Some(make_content_disposition(&content_type, content_disposition, None)); - let file = content.expect("content"); - - Ok(get_content_thumbnail::v3::Response { - file, - content_type, - cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.to_owned()), - cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()), - content_disposition, - }) - } else if !services.globals.server_is_ours(&body.server_name) && body.allow_remote { - if services - .globals - .prevent_media_downloads_from() - .contains(&body.server_name) - { - // we'll lie to the client and say the blocked server's media was not found and - // log. the client has no way of telling anyways so this is a security bonus. - debug_warn!("Received request for media `{}` on blocklisted server", mxc); - return Err!(Request(NotFound("Media not found."))); - } - - match services - .sending - .send_federation_request( - &body.server_name, - get_content_thumbnail::v3::Request { - allow_remote: body.allow_remote, - height: body.height, - width: body.width, - method: body.method.clone(), - server_name: body.server_name.clone(), - media_id: body.media_id.clone(), - timeout_ms: body.timeout_ms, - allow_redirect: body.allow_redirect, - animated: body.animated, - }, - ) - .await - { - Ok(get_thumbnail_response) => { - services - .media - .upload_thumbnail( - None, - &mxc, - None, - get_thumbnail_response.content_type.as_deref(), - body.width.try_into().expect("all UInts are valid u32s"), - body.height.try_into().expect("all UInts are valid u32s"), - &get_thumbnail_response.file, - ) - .await?; - - let content_disposition = Some(make_content_disposition( - &get_thumbnail_response.content_type, - get_thumbnail_response.content_disposition, - None, - )); - - Ok(get_content_thumbnail::v3::Response { - file: get_thumbnail_response.file, - content_type: get_thumbnail_response.content_type, - cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.to_owned()), - cache_control: Some(CACHE_CONTROL_IMMUTABLE.to_owned()), - content_disposition, - }) - }, - Err(e) => Err!(Request(NotFound(debug_warn!("Fetching media `{mxc}` failed: {e:?}")))), - } - } else { - Err!(Request(NotFound("Media not found."))) - } + }) } -/// # `GET /_matrix/media/v1/thumbnail/{serverName}/{mediaId}` -/// -/// Load media thumbnail from our server or over federation. -/// -/// This is a legacy endpoint ("/v1/") that some very old homeservers and/or -/// clients may call. conduwuit adds these for compatibility purposes. -/// See <https://spec.matrix.org/legacy/legacy/#id27> -/// -/// - Only allows federation if `allow_remote` is true -/// - Only redirects if `allow_redirect` is true -/// - Uses client-provided `timeout_ms` if available, else defaults to 20 -/// seconds -#[tracing::instrument(skip_all, fields(%client), name = "media_thumbnail_get")] -pub(crate) async fn get_content_thumbnail_v1_route( - State(services): State<crate::State>, InsecureClientIp(client): InsecureClientIp, - body: Ruma<get_content_thumbnail::v3::Request>, -) -> Result<RumaResponse<get_content_thumbnail::v3::Response>> { - get_content_thumbnail_route(State(services), InsecureClientIp(client), body) - .await - .map(RumaResponse) +async fn fetch_file( + services: &Services, mxc: &Mxc<'_>, user: &UserId, timeout_ms: Duration, filename: Option<&str>, +) -> Result<FileMeta> { + let FileMeta { + content, + content_type, + content_disposition, + } = fetch_file_meta(services, mxc, user, timeout_ms).await?; + + let content_disposition = Some(make_content_disposition( + content_disposition.as_ref(), + content_type.as_deref(), + filename, + )); + + Ok(FileMeta { + content, + content_type, + content_disposition, + }) } -async fn get_remote_content( - services: &Services, mxc: &str, server_name: &ruma::ServerName, media_id: String, allow_redirect: bool, - timeout_ms: Duration, -) -> Result<get_content::v3::Response, Error> { - if services - .globals - .prevent_media_downloads_from() - .contains(&server_name.to_owned()) - { - // we'll lie to the client and say the blocked server's media was not found and - // log. the client has no way of telling anyways so this is a security bonus. - debug_warn!("Received request for media `{mxc}` on blocklisted server"); - return Err!(Request(NotFound("Media not found."))); +async fn fetch_thumbnail_meta( + services: &Services, mxc: &Mxc<'_>, user: &UserId, timeout_ms: Duration, dim: &Dim, +) -> Result<FileMeta> { + if let Some(filemeta) = services.media.get_thumbnail(mxc, dim).await? { + return Ok(filemeta); } - let content_response = services - .sending - .send_federation_request( - server_name, - get_content::v3::Request { - allow_remote: true, - server_name: server_name.to_owned(), - media_id, - timeout_ms, - allow_redirect, - }, - ) - .await?; + if services.globals.server_is_ours(mxc.server_name) { + return Err!(Request(NotFound("Local thumbnail not found."))); + } - let content_disposition = Some(make_content_disposition( - &content_response.content_type, - content_response.content_disposition, - None, - )); + services + .media + .fetch_remote_thumbnail(mxc, Some(user), None, timeout_ms, dim) + .await +} + +async fn fetch_file_meta(services: &Services, mxc: &Mxc<'_>, user: &UserId, timeout_ms: Duration) -> Result<FileMeta> { + if let Some(filemeta) = services.media.get(mxc).await? { + return Ok(filemeta); + } + + if services.globals.server_is_ours(mxc.server_name) { + return Err!(Request(NotFound("Local media not found."))); + } services .media - .create( - None, - mxc, - content_disposition.as_deref(), - content_response.content_type.as_deref(), - &content_response.file, - ) - .await?; - - Ok(get_content::v3::Response { - file: content_response.file, - content_type: content_response.content_type, - content_disposition, - cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.to_owned()), - cache_control: Some(CACHE_CONTROL_IMMUTABLE.to_owned()), - }) + .fetch_remote_content(mxc, Some(user), None, timeout_ms) + .await } diff --git a/src/api/client/media_legacy.rs b/src/api/client/media_legacy.rs new file mode 100644 index 0000000000000000000000000000000000000000..e87b9a2b2c9629d943b6fae6b9de6aac8c4739ab --- /dev/null +++ b/src/api/client/media_legacy.rs @@ -0,0 +1,343 @@ +#![allow(deprecated)] + +use axum::extract::State; +use axum_client_ip::InsecureClientIp; +use conduit::{ + err, + utils::{content_disposition::make_content_disposition, math::ruma_from_usize}, + Err, Result, +}; +use conduit_service::media::{Dim, FileMeta, CACHE_CONTROL_IMMUTABLE, CORP_CROSS_ORIGIN}; +use ruma::{ + api::client::media::{ + create_content, get_content, get_content_as_filename, get_content_thumbnail, get_media_config, + get_media_preview, + }, + Mxc, +}; + +use crate::{client::create_content_route, Ruma, RumaResponse}; + +/// # `GET /_matrix/media/v3/config` +/// +/// Returns max upload size. +pub(crate) async fn get_media_config_legacy_route( + State(services): State<crate::State>, _body: Ruma<get_media_config::v3::Request>, +) -> Result<get_media_config::v3::Response> { + Ok(get_media_config::v3::Response { + upload_size: ruma_from_usize(services.globals.config.max_request_size), + }) +} + +/// # `GET /_matrix/media/v1/config` +/// +/// This is a legacy endpoint ("/v1/") that some very old homeservers and/or +/// clients may call. conduwuit adds these for compatibility purposes. +/// See <https://spec.matrix.org/legacy/legacy/#id27> +/// +/// Returns max upload size. +pub(crate) async fn get_media_config_legacy_legacy_route( + State(services): State<crate::State>, body: Ruma<get_media_config::v3::Request>, +) -> Result<RumaResponse<get_media_config::v3::Response>> { + get_media_config_legacy_route(State(services), body) + .await + .map(RumaResponse) +} + +/// # `GET /_matrix/media/v3/preview_url` +/// +/// Returns URL preview. +#[tracing::instrument(skip_all, fields(%client), name = "url_preview_legacy")] +pub(crate) async fn get_media_preview_legacy_route( + State(services): State<crate::State>, InsecureClientIp(client): InsecureClientIp, + body: Ruma<get_media_preview::v3::Request>, +) -> Result<get_media_preview::v3::Response> { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + let url = &body.url; + if !services.media.url_preview_allowed(url) { + return Err!(Request(Forbidden( + debug_warn!(%sender_user, %url, "URL is not allowed to be previewed") + ))); + } + + let preview = services.media.get_url_preview(url).await.map_err(|e| { + err!(Request(Unknown( + debug_error!(%sender_user, %url, "Failed to fetch a URL preview: {e}") + ))) + })?; + + let res = serde_json::value::to_raw_value(&preview).map_err(|e| { + err!(Request(Unknown( + debug_error!(%sender_user, %url, "Failed to parse a URL preview: {e}") + ))) + })?; + + Ok(get_media_preview::v3::Response::from_raw_value(res)) +} + +/// # `GET /_matrix/media/v1/preview_url` +/// +/// This is a legacy endpoint ("/v1/") that some very old homeservers and/or +/// clients may call. conduwuit adds these for compatibility purposes. +/// See <https://spec.matrix.org/legacy/legacy/#id27> +/// +/// Returns URL preview. +pub(crate) async fn get_media_preview_legacy_legacy_route( + State(services): State<crate::State>, InsecureClientIp(client): InsecureClientIp, + body: Ruma<get_media_preview::v3::Request>, +) -> Result<RumaResponse<get_media_preview::v3::Response>> { + get_media_preview_legacy_route(State(services), InsecureClientIp(client), body) + .await + .map(RumaResponse) +} + +/// # `POST /_matrix/media/v1/upload` +/// +/// Permanently save media in the server. +/// +/// This is a legacy endpoint ("/v1/") that some very old homeservers and/or +/// clients may call. conduwuit adds these for compatibility purposes. +/// See <https://spec.matrix.org/legacy/legacy/#id27> +/// +/// - Some metadata will be saved in the database +/// - Media will be saved in the media/ directory +pub(crate) async fn create_content_legacy_route( + State(services): State<crate::State>, InsecureClientIp(client): InsecureClientIp, + body: Ruma<create_content::v3::Request>, +) -> Result<RumaResponse<create_content::v3::Response>> { + create_content_route(State(services), InsecureClientIp(client), body) + .await + .map(RumaResponse) +} + +/// # `GET /_matrix/media/v3/download/{serverName}/{mediaId}` +/// +/// Load media from our server or over federation. +/// +/// - Only allows federation if `allow_remote` is true +/// - Only redirects if `allow_redirect` is true +/// - Uses client-provided `timeout_ms` if available, else defaults to 20 +/// seconds +#[tracing::instrument(skip_all, fields(%client), name = "media_get_legacy")] +pub(crate) async fn get_content_legacy_route( + State(services): State<crate::State>, InsecureClientIp(client): InsecureClientIp, + body: Ruma<get_content::v3::Request>, +) -> Result<get_content::v3::Response> { + let mxc = Mxc { + server_name: &body.server_name, + media_id: &body.media_id, + }; + + if let Some(FileMeta { + content, + content_type, + content_disposition, + }) = services.media.get(&mxc).await? + { + let content_disposition = make_content_disposition(content_disposition.as_ref(), content_type.as_deref(), None); + + Ok(get_content::v3::Response { + file: content.expect("entire file contents"), + content_type: content_type.map(Into::into), + content_disposition: Some(content_disposition), + cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()), + cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()), + }) + } else if !services.globals.server_is_ours(&body.server_name) && body.allow_remote { + let response = services + .media + .fetch_remote_content_legacy(&mxc, body.allow_redirect, body.timeout_ms) + .await + .map_err(|e| err!(Request(NotFound(debug_warn!(%mxc, "Fetching media failed: {e:?}")))))?; + + let content_disposition = + make_content_disposition(response.content_disposition.as_ref(), response.content_type.as_deref(), None); + + Ok(get_content::v3::Response { + file: response.file, + content_type: response.content_type, + content_disposition: Some(content_disposition), + cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()), + cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()), + }) + } else { + Err!(Request(NotFound("Media not found."))) + } +} + +/// # `GET /_matrix/media/v1/download/{serverName}/{mediaId}` +/// +/// Load media from our server or over federation. +/// +/// This is a legacy endpoint ("/v1/") that some very old homeservers and/or +/// clients may call. conduwuit adds these for compatibility purposes. +/// See <https://spec.matrix.org/legacy/legacy/#id27> +/// +/// - Only allows federation if `allow_remote` is true +/// - Only redirects if `allow_redirect` is true +/// - Uses client-provided `timeout_ms` if available, else defaults to 20 +/// seconds +#[tracing::instrument(skip_all, fields(%client), name = "media_get_legacy")] +pub(crate) async fn get_content_legacy_legacy_route( + State(services): State<crate::State>, InsecureClientIp(client): InsecureClientIp, + body: Ruma<get_content::v3::Request>, +) -> Result<RumaResponse<get_content::v3::Response>> { + get_content_legacy_route(State(services), InsecureClientIp(client), body) + .await + .map(RumaResponse) +} + +/// # `GET /_matrix/media/v3/download/{serverName}/{mediaId}/{fileName}` +/// +/// Load media from our server or over federation, permitting desired filename. +/// +/// - Only allows federation if `allow_remote` is true +/// - Only redirects if `allow_redirect` is true +/// - Uses client-provided `timeout_ms` if available, else defaults to 20 +/// seconds +#[tracing::instrument(skip_all, fields(%client), name = "media_get_legacy")] +pub(crate) async fn get_content_as_filename_legacy_route( + State(services): State<crate::State>, InsecureClientIp(client): InsecureClientIp, + body: Ruma<get_content_as_filename::v3::Request>, +) -> Result<get_content_as_filename::v3::Response> { + let mxc = Mxc { + server_name: &body.server_name, + media_id: &body.media_id, + }; + + if let Some(FileMeta { + content, + content_type, + content_disposition, + }) = services.media.get(&mxc).await? + { + let content_disposition = + make_content_disposition(content_disposition.as_ref(), content_type.as_deref(), Some(&body.filename)); + + Ok(get_content_as_filename::v3::Response { + file: content.expect("entire file contents"), + content_type: content_type.map(Into::into), + content_disposition: Some(content_disposition), + cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()), + cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()), + }) + } else if !services.globals.server_is_ours(&body.server_name) && body.allow_remote { + let response = services + .media + .fetch_remote_content_legacy(&mxc, body.allow_redirect, body.timeout_ms) + .await + .map_err(|e| err!(Request(NotFound(debug_warn!(%mxc, "Fetching media failed: {e:?}")))))?; + + let content_disposition = + make_content_disposition(response.content_disposition.as_ref(), response.content_type.as_deref(), None); + + Ok(get_content_as_filename::v3::Response { + content_disposition: Some(content_disposition), + content_type: response.content_type, + file: response.file, + cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()), + cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()), + }) + } else { + Err!(Request(NotFound("Media not found."))) + } +} + +/// # `GET /_matrix/media/v1/download/{serverName}/{mediaId}/{fileName}` +/// +/// Load media from our server or over federation, permitting desired filename. +/// +/// This is a legacy endpoint ("/v1/") that some very old homeservers and/or +/// clients may call. conduwuit adds these for compatibility purposes. +/// See <https://spec.matrix.org/legacy/legacy/#id27> +/// +/// - Only allows federation if `allow_remote` is true +/// - Only redirects if `allow_redirect` is true +/// - Uses client-provided `timeout_ms` if available, else defaults to 20 +/// seconds +pub(crate) async fn get_content_as_filename_legacy_legacy_route( + State(services): State<crate::State>, InsecureClientIp(client): InsecureClientIp, + body: Ruma<get_content_as_filename::v3::Request>, +) -> Result<RumaResponse<get_content_as_filename::v3::Response>> { + get_content_as_filename_legacy_route(State(services), InsecureClientIp(client), body) + .await + .map(RumaResponse) +} + +/// # `GET /_matrix/media/v3/thumbnail/{serverName}/{mediaId}` +/// +/// Load media thumbnail from our server or over federation. +/// +/// - Only allows federation if `allow_remote` is true +/// - Only redirects if `allow_redirect` is true +/// - Uses client-provided `timeout_ms` if available, else defaults to 20 +/// seconds +#[tracing::instrument(skip_all, fields(%client), name = "media_thumbnail_get_legacy")] +pub(crate) async fn get_content_thumbnail_legacy_route( + State(services): State<crate::State>, InsecureClientIp(client): InsecureClientIp, + body: Ruma<get_content_thumbnail::v3::Request>, +) -> Result<get_content_thumbnail::v3::Response> { + let mxc = Mxc { + server_name: &body.server_name, + media_id: &body.media_id, + }; + + let dim = Dim::from_ruma(body.width, body.height, body.method.clone())?; + if let Some(FileMeta { + content, + content_type, + content_disposition, + }) = services.media.get_thumbnail(&mxc, &dim).await? + { + let content_disposition = make_content_disposition(content_disposition.as_ref(), content_type.as_deref(), None); + + Ok(get_content_thumbnail::v3::Response { + file: content.expect("entire file contents"), + content_type: content_type.map(Into::into), + cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()), + cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()), + content_disposition: Some(content_disposition), + }) + } else if !services.globals.server_is_ours(&body.server_name) && body.allow_remote { + let response = services + .media + .fetch_remote_thumbnail_legacy(&body) + .await + .map_err(|e| err!(Request(NotFound(debug_warn!(%mxc, "Fetching media failed: {e:?}")))))?; + + let content_disposition = + make_content_disposition(response.content_disposition.as_ref(), response.content_type.as_deref(), None); + + Ok(get_content_thumbnail::v3::Response { + file: response.file, + content_type: response.content_type, + cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()), + cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()), + content_disposition: Some(content_disposition), + }) + } else { + Err!(Request(NotFound("Media not found."))) + } +} + +/// # `GET /_matrix/media/v1/thumbnail/{serverName}/{mediaId}` +/// +/// Load media thumbnail from our server or over federation. +/// +/// This is a legacy endpoint ("/v1/") that some very old homeservers and/or +/// clients may call. conduwuit adds these for compatibility purposes. +/// See <https://spec.matrix.org/legacy/legacy/#id27> +/// +/// - Only allows federation if `allow_remote` is true +/// - Only redirects if `allow_redirect` is true +/// - Uses client-provided `timeout_ms` if available, else defaults to 20 +/// seconds +pub(crate) async fn get_content_thumbnail_legacy_legacy_route( + State(services): State<crate::State>, InsecureClientIp(client): InsecureClientIp, + body: Ruma<get_content_thumbnail::v3::Request>, +) -> Result<RumaResponse<get_content_thumbnail::v3::Response>> { + get_content_thumbnail_legacy_route(State(services), InsecureClientIp(client), body) + .await + .map(RumaResponse) +} diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index 4b27d8e345189fe2d885dbdff1b121874ae5aefb..d3d80387067344a30b3008316bd6476d266f46c1 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -403,6 +403,7 @@ pub(crate) async fn kick_user_route( unsigned: None, state_key: Some(body.user_id.to_string()), redacts: None, + timestamp: None, }, sender_user, &body.room_id, @@ -465,6 +466,7 @@ pub(crate) async fn ban_user_route( unsigned: None, state_key: Some(body.user_id.to_string()), redacts: None, + timestamp: None, }, sender_user, &body.room_id, @@ -512,6 +514,7 @@ pub(crate) async fn unban_user_route( unsigned: None, state_key: Some(body.user_id.to_string()), redacts: None, + timestamp: None, }, sender_user, &body.room_id, @@ -662,6 +665,12 @@ pub async fn join_room_by_id_helper( ) -> Result<join_room_by_id::v3::Response> { let state_lock = services.rooms.state.mutex.lock(room_id).await; + let user_is_guest = services.users.is_deactivated(sender_user).unwrap_or(false); + + if matches!(services.rooms.state_accessor.guest_can_join(room_id), Ok(false)) && user_is_guest { + return Err!(Request(Forbidden("Guests are not allowed to join this room"))); + } + if matches!(services.rooms.state_cache.is_joined(sender_user, room_id), Ok(true)) { debug_warn!("{sender_user} is already joined in {room_id}"); return Ok(join_room_by_id::v3::Response { @@ -810,7 +819,7 @@ async fn join_room_by_id_helper_remote( }, // only room versions 8 and above using `join_authorized_via_users_server` (restricted joins) need to // validate and send signatures - V8 | V9 | V10 | V11 => { + _ => { if let Some(signed_raw) = &send_join_response.room_state.event { info!( "There is a signed event. This room is probably using restricted joins. Adding signature to \ @@ -859,16 +868,6 @@ async fn join_room_by_id_helper_remote( } } }, - _ => { - warn!( - "Unexpected or unsupported room version {} for room {}", - &room_version_id, room_id - ); - return Err(Error::BadRequest( - ErrorKind::BadJson, - "Unexpected or unsupported room version found", - )); - }, } } @@ -1094,6 +1093,7 @@ async fn join_room_by_id_helper_local( unsigned: None, state_key: Some(sender_user.to_string()), redacts: None, + timestamp: None, }, sender_user, room_id, @@ -1406,6 +1406,7 @@ pub(crate) async fn invite_helper( unsigned: None, state_key: Some(user_id.to_string()), redacts: None, + timestamp: None, }, sender_user, room_id, @@ -1517,6 +1518,7 @@ pub(crate) async fn invite_helper( unsigned: None, state_key: Some(user_id.to_string()), redacts: None, + timestamp: None, }, sender_user, room_id, @@ -1634,6 +1636,7 @@ pub async fn leave_room(services: &Services, user_id: &UserId, room_id: &RoomId, unsigned: None, state_key: Some(user_id.to_string()), redacts: None, + timestamp: None, }, user_id, room_id, diff --git a/src/api/client/message.rs b/src/api/client/message.rs index 9aae4aaf602565105e87750fd7adec49fb38504e..51aee8c12f04391652b6c22c3ff4d5e3c541e907 100644 --- a/src/api/client/message.rs +++ b/src/api/client/message.rs @@ -84,6 +84,11 @@ pub(crate) async fn send_message_event_route( unsigned: Some(unsigned), state_key: None, redacts: None, + timestamp: if body.appservice_info.is_some() { + body.timestamp + } else { + None + }, }, sender_user, &body.room_id, diff --git a/src/api/client/mod.rs b/src/api/client/mod.rs index cf13cf7ee696f7479794508e99c7a632546929e7..03c87e5da1d75be5ea488a97f1a5e687234ae3df 100644 --- a/src/api/client/mod.rs +++ b/src/api/client/mod.rs @@ -9,6 +9,7 @@ pub(super) mod filter; pub(super) mod keys; pub(super) mod media; +pub(super) mod media_legacy; pub(super) mod membership; pub(super) mod message; pub(super) mod openid; @@ -46,6 +47,7 @@ pub(super) use filter::*; pub(super) use keys::*; pub(super) use media::*; +pub(super) use media_legacy::*; pub(super) use membership::*; pub use membership::{join_room_by_id_helper, leave_all_rooms, leave_room, validate_and_add_event_id}; pub(super) use message::*; diff --git a/src/api/client/profile.rs b/src/api/client/profile.rs index 8f6d90568bb3e05ebdfcea9b0b0c2ce74d5cc7c2..71d49cd8be62167a144f7cad10499517d0f60f1d 100644 --- a/src/api/client/profile.rs +++ b/src/api/client/profile.rs @@ -257,6 +257,12 @@ pub(crate) async fn get_profile_route( pub async fn update_displayname( services: &Services, user_id: OwnedUserId, displayname: Option<String>, all_joined_rooms: Vec<OwnedRoomId>, ) -> Result<()> { + let current_display_name = services.users.displayname(&user_id).unwrap_or_default(); + + if displayname == current_display_name { + return Ok(()); + } + services .users .set_displayname(&user_id, displayname.clone()) @@ -289,6 +295,7 @@ pub async fn update_displayname( unsigned: None, state_key: Some(user_id.to_string()), redacts: None, + timestamp: None, }, room_id, )) @@ -305,6 +312,13 @@ pub async fn update_avatar_url( services: &Services, user_id: OwnedUserId, avatar_url: Option<OwnedMxcUri>, blurhash: Option<String>, all_joined_rooms: Vec<OwnedRoomId>, ) -> Result<()> { + let current_avatar_url = services.users.avatar_url(&user_id).unwrap_or_default(); + let current_blurhash = services.users.blurhash(&user_id).unwrap_or_default(); + + if current_avatar_url == avatar_url && current_blurhash == blurhash { + return Ok(()); + } + services .users .set_avatar_url(&user_id, avatar_url.clone()) @@ -342,6 +356,7 @@ pub async fn update_avatar_url( unsigned: None, state_key: Some(user_id.to_string()), redacts: None, + timestamp: None, }, room_id, )) diff --git a/src/api/client/push.rs b/src/api/client/push.rs index 26462e79011169a55b454f6008b6b03ae6747498..8723e676bcdf5d85d7fcef98f0de6871e603dbfb 100644 --- a/src/api/client/push.rs +++ b/src/api/client/push.rs @@ -1,4 +1,5 @@ use axum::extract::State; +use conduit::err; use ruma::{ api::client::{ error::ErrorKind, @@ -7,9 +8,14 @@ set_pusher, set_pushrule, set_pushrule_actions, set_pushrule_enabled, RuleScope, }, }, - events::{push_rules::PushRulesEvent, GlobalAccountDataEventType}, + events::{ + push_rules::{PushRulesEvent, PushRulesEventContent}, + GlobalAccountDataEventType, + }, push::{InsertPushRuleError, RemovePushRuleError, Ruleset}, + CanonicalJsonObject, }; +use service::Services; use crate::{Error, Result, Ruma}; @@ -21,36 +27,46 @@ pub(crate) async fn get_pushrules_all_route( ) -> Result<get_pushrules_all::v3::Response> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let event = + let global_ruleset: Ruleset; + + let Ok(event) = services .account_data - .get(None, sender_user, GlobalAccountDataEventType::PushRules.to_string().into())?; + .get(None, sender_user, GlobalAccountDataEventType::PushRules.to_string().into()) + else { + // push rules event doesn't exist, create it and return default + return recreate_push_rules_and_return(&services, sender_user); + }; if let Some(event) = event { - let account_data = serde_json::from_str::<PushRulesEvent>(event.get()) - .map_err(|_| Error::bad_database("Invalid account data event in db."))? - .content; + let value = serde_json::from_str::<CanonicalJsonObject>(event.get()) + .map_err(|e| err!(Database(warn!("Invalid push rules account data event in database: {e}"))))?; - Ok(get_pushrules_all::v3::Response { - global: account_data.global, - }) + let Some(content_value) = value.get("content") else { + // user somehow has a push rule event with no content key, recreate it and + // return server default silently + return recreate_push_rules_and_return(&services, sender_user); + }; + + if content_value.to_string().is_empty() { + // user somehow has a push rule event with empty content, recreate it and return + // server default silently + return recreate_push_rules_and_return(&services, sender_user); + } + + let account_data_content = serde_json::from_value::<PushRulesEventContent>(content_value.clone().into()) + .map_err(|e| err!(Database(warn!("Invalid push rules account data event in database: {e}"))))?; + + global_ruleset = account_data_content.global; } else { - services.account_data.update( - None, - sender_user, - GlobalAccountDataEventType::PushRules.to_string().into(), - &serde_json::to_value(PushRulesEvent { - content: ruma::events::push_rules::PushRulesEventContent { - global: Ruleset::server_default(sender_user), - }, - }) - .expect("to json always works"), - )?; - - Ok(get_pushrules_all::v3::Response { - global: Ruleset::server_default(sender_user), - }) + // user somehow has non-existent push rule event. recreate it and return server + // default silently + return recreate_push_rules_and_return(&services, sender_user); } + + Ok(get_pushrules_all::v3::Response { + global: global_ruleset, + }) } /// # `GET /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}` @@ -378,3 +394,25 @@ pub(crate) async fn set_pushers_route( Ok(set_pusher::v3::Response::default()) } + +/// user somehow has bad push rules, these must always exist per spec. +/// so recreate it and return server default silently +fn recreate_push_rules_and_return( + services: &Services, sender_user: &ruma::UserId, +) -> Result<get_pushrules_all::v3::Response> { + services.account_data.update( + None, + sender_user, + GlobalAccountDataEventType::PushRules.to_string().into(), + &serde_json::to_value(PushRulesEvent { + content: PushRulesEventContent { + global: Ruleset::server_default(sender_user), + }, + }) + .expect("to json always works"), + )?; + + Ok(get_pushrules_all::v3::Response { + global: Ruleset::server_default(sender_user), + }) +} diff --git a/src/api/client/redact.rs b/src/api/client/redact.rs index 89446754cd2c2726297f1a2d6beb51a7a71540ec..2102f6cd58a33533aaab28f7d3001dc389eadbb1 100644 --- a/src/api/client/redact.rs +++ b/src/api/client/redact.rs @@ -34,6 +34,7 @@ pub(crate) async fn redact_event_route( unsigned: None, state_key: None, redacts: Some(body.event_id.into()), + timestamp: None, }, sender_user, &body.room_id, diff --git a/src/api/client/room.rs b/src/api/client/room.rs index c78ba6edbcc90ef6d4748e45f3392c21ca0b7139..96c4375880e2544d833454bc147026de7e39043c 100644 --- a/src/api/client/room.rs +++ b/src/api/client/room.rs @@ -1,7 +1,7 @@ use std::{cmp::max, collections::BTreeMap}; use axum::extract::State; -use conduit::{debug_info, debug_warn}; +use conduit::{debug_info, debug_warn, err}; use ruma::{ api::client::{ error::ErrorKind, @@ -139,13 +139,8 @@ pub(crate) async fn create_room_route( })?, ); }, - V11 => {}, // V11 removed the "creator" key _ => { - warn!("Unexpected or unsupported room version {room_version}"); - return Err(Error::BadRequest( - ErrorKind::BadJson, - "Unexpected or unsupported room version found", - )); + // V11+ removed the "creator" key }, } content.insert( @@ -161,14 +156,7 @@ pub(crate) async fn create_room_route( let content = match room_version { V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 => RoomCreateEventContent::new_v1(sender_user.clone()), - V11 => RoomCreateEventContent::new_v11(), - _ => { - warn!("Unexpected or unsupported room version {room_version}"); - return Err(Error::BadRequest( - ErrorKind::BadJson, - "Unexpected or unsupported room version found", - )); - }, + _ => RoomCreateEventContent::new_v11(), }; let mut content = serde_json::from_str::<CanonicalJsonObject>( to_raw_value(&content) @@ -197,6 +185,7 @@ pub(crate) async fn create_room_route( unsigned: None, state_key: Some(String::new()), redacts: None, + timestamp: None, }, sender_user, &room_id, @@ -225,6 +214,7 @@ pub(crate) async fn create_room_route( unsigned: None, state_key: Some(sender_user.to_string()), redacts: None, + timestamp: None, }, sender_user, &room_id, @@ -262,6 +252,7 @@ pub(crate) async fn create_room_route( unsigned: None, state_key: Some(String::new()), redacts: None, + timestamp: None, }, sender_user, &room_id, @@ -285,6 +276,7 @@ pub(crate) async fn create_room_route( unsigned: None, state_key: Some(String::new()), redacts: None, + timestamp: None, }, sender_user, &room_id, @@ -311,6 +303,7 @@ pub(crate) async fn create_room_route( unsigned: None, state_key: Some(String::new()), redacts: None, + timestamp: None, }, sender_user, &room_id, @@ -330,6 +323,7 @@ pub(crate) async fn create_room_route( unsigned: None, state_key: Some(String::new()), redacts: None, + timestamp: None, }, sender_user, &room_id, @@ -352,6 +346,7 @@ pub(crate) async fn create_room_route( unsigned: None, state_key: Some(String::new()), redacts: None, + timestamp: None, }, sender_user, &room_id, @@ -405,6 +400,7 @@ pub(crate) async fn create_room_route( unsigned: None, state_key: Some(String::new()), redacts: None, + timestamp: None, }, sender_user, &room_id, @@ -427,6 +423,7 @@ pub(crate) async fn create_room_route( unsigned: None, state_key: Some(String::new()), redacts: None, + timestamp: None, }, sender_user, &room_id, @@ -475,10 +472,7 @@ pub(crate) async fn get_room_event_route( .rooms .timeline .get_pdu(&body.event_id)? - .ok_or_else(|| { - warn!("Event not found, event ID: {:?}", &body.event_id); - Error::BadRequest(ErrorKind::NotFound, "Event not found.") - })?; + .ok_or_else(|| err!(Request(NotFound("Event {} not found.", &body.event_id))))?; if !services .rooms @@ -584,6 +578,7 @@ pub(crate) async fn upgrade_room_route( unsigned: None, state_key: Some(String::new()), redacts: None, + timestamp: None, }, sender_user, &body.room_id, @@ -627,16 +622,9 @@ pub(crate) async fn upgrade_room_route( })?, ); }, - V11 => { - // "creator" key no longer exists in V11 rooms - create_event_content.remove("creator"); - }, _ => { - warn!("Unexpected or unsupported room version {}", body.new_version); - return Err(Error::BadRequest( - ErrorKind::BadJson, - "Unexpected or unsupported room version found", - )); + // "creator" key no longer exists in V11+ rooms + create_event_content.remove("creator"); }, } } @@ -675,6 +663,7 @@ pub(crate) async fn upgrade_room_route( unsigned: None, state_key: Some(String::new()), redacts: None, + timestamp: None, }, sender_user, &replacement_room, @@ -703,6 +692,7 @@ pub(crate) async fn upgrade_room_route( unsigned: None, state_key: Some(sender_user.to_string()), redacts: None, + timestamp: None, }, sender_user, &replacement_room, @@ -731,6 +721,7 @@ pub(crate) async fn upgrade_room_route( unsigned: None, state_key: Some(String::new()), redacts: None, + timestamp: None, }, sender_user, &replacement_room, @@ -746,6 +737,11 @@ pub(crate) async fn upgrade_room_route( .local_aliases_for_room(&body.room_id) .filter_map(Result::ok) { + services + .rooms + .alias + .remove_alias(&alias, sender_user) + .await?; services .rooms .alias @@ -789,6 +785,7 @@ pub(crate) async fn upgrade_room_route( unsigned: None, state_key: Some(String::new()), redacts: None, + timestamp: None, }, sender_user, &body.room_id, diff --git a/src/api/client/session.rs b/src/api/client/session.rs index 7c2a9718650ad51a7d6f4ec5c0a0a80f6e98dcab..4702b0ec142e8242fab50a625f8654f39df036e6 100644 --- a/src/api/client/session.rs +++ b/src/api/client/session.rs @@ -34,7 +34,7 @@ struct Claims { /// /// Get the supported login types of this server. One of these should be used as /// the `type` field when logging in. -#[tracing::instrument(skip_all, fields(%client), name = "register")] +#[tracing::instrument(skip_all, fields(%client), name = "login")] pub(crate) async fn get_login_types_route( InsecureClientIp(client): InsecureClientIp, _body: Ruma<get_login_types::v3::Request>, ) -> Result<get_login_types::v3::Response> { @@ -58,7 +58,7 @@ pub(crate) async fn get_login_types_route( /// Note: You can use [`GET /// /_matrix/client/r0/login`](fn.get_supported_versions_route.html) to see /// supported login types. -#[tracing::instrument(skip_all, fields(%client), name = "register")] +#[tracing::instrument(skip_all, fields(%client), name = "login")] pub(crate) async fn login_route( State(services): State<crate::State>, InsecureClientIp(client): InsecureClientIp, body: Ruma<login::v3::Request>, ) -> Result<login::v3::Response> { @@ -221,7 +221,7 @@ pub(crate) async fn login_route( /// last seen ts) /// - Forgets to-device events /// - Triggers device list updates -#[tracing::instrument(skip_all, fields(%client), name = "register")] +#[tracing::instrument(skip_all, fields(%client), name = "logout")] pub(crate) async fn logout_route( State(services): State<crate::State>, InsecureClientIp(client): InsecureClientIp, body: Ruma<logout::v3::Request>, ) -> Result<logout::v3::Response> { @@ -249,7 +249,7 @@ pub(crate) async fn logout_route( /// Note: This is equivalent to calling [`GET /// /_matrix/client/r0/logout`](fn.logout_route.html) from each device of this /// user. -#[tracing::instrument(skip_all, fields(%client), name = "register")] +#[tracing::instrument(skip_all, fields(%client), name = "logout")] pub(crate) async fn logout_all_route( State(services): State<crate::State>, InsecureClientIp(client): InsecureClientIp, body: Ruma<logout_all::v3::Request>, diff --git a/src/api/client/state.rs b/src/api/client/state.rs index d0fb83d170c90f17393abb6ed06760ce43141209..fd0496639fd97336268e5ac723d120992dc6aa56 100644 --- a/src/api/client/state.rs +++ b/src/api/client/state.rs @@ -43,6 +43,11 @@ pub(crate) async fn send_state_event_for_key_route( &body.event_type, &body.body.body, body.state_key.clone(), + if body.appservice_info.is_some() { + body.timestamp + } else { + None + }, ) .await? .into(), @@ -172,7 +177,7 @@ pub(crate) async fn get_state_events_for_empty_key_route( async fn send_state_event_for_key_helper( services: &Services, sender: &UserId, room_id: &RoomId, event_type: &StateEventType, - json: &Raw<AnyStateEventContent>, state_key: String, + json: &Raw<AnyStateEventContent>, state_key: String, timestamp: Option<ruma::MilliSecondsSinceUnixEpoch>, ) -> Result<Arc<EventId>> { allowed_to_send_state_event(services, room_id, event_type, json).await?; let state_lock = services.rooms.state.mutex.lock(room_id).await; @@ -186,6 +191,7 @@ async fn send_state_event_for_key_helper( unsigned: None, state_key: Some(state_key), redacts: None, + timestamp, }, sender, room_id, diff --git a/src/api/client/sync.rs b/src/api/client/sync.rs index b25348d40c65fe1e3723a90c600f18a65ba07b79..b069a77e94a5e482b05e6819bb330fa4d530c80f 100644 --- a/src/api/client/sync.rs +++ b/src/api/client/sync.rs @@ -20,7 +20,7 @@ Ephemeral, Filter, GlobalAccountData, InviteState, InvitedRoom, JoinedRoom, LeftRoom, Presence, RoomAccountData, RoomSummary, Rooms, State as RoomState, Timeline, ToDevice, }, - v4::SlidingOp, + v4::{SlidingOp, SlidingSyncRoomHero}, DeviceLists, UnreadNotificationsCount, }, uiaa::UiaaResponse, @@ -1466,13 +1466,10 @@ pub(crate) async fn sync_events_v4_route( .rooms .state_accessor .get_member(room_id, &member)? - .map(|memberevent| { - ( - memberevent - .displayname - .unwrap_or_else(|| member.to_string()), - memberevent.avatar_url, - ) + .map(|memberevent| SlidingSyncRoomHero { + user_id: member, + name: memberevent.displayname, + avatar: memberevent.avatar_url, }), ) }) @@ -1484,18 +1481,26 @@ pub(crate) async fn sync_events_v4_route( Ordering::Greater => { let firsts = heroes[1..] .iter() - .map(|h| h.0.clone()) + .map(|h| h.name.clone().unwrap_or_else(|| h.user_id.to_string())) .collect::<Vec<_>>() .join(", "); - let last = heroes[0].0.clone(); + let last = heroes[0] + .name + .clone() + .unwrap_or_else(|| heroes[0].user_id.to_string()); Some(format!("{firsts} and {last}")) }, - Ordering::Equal => Some(heroes[0].0.clone()), + Ordering::Equal => Some( + heroes[0] + .name + .clone() + .unwrap_or_else(|| heroes[0].user_id.to_string()), + ), Ordering::Less => None, }; let heroes_avatar = if heroes.len() == 1 { - heroes[0].1.clone() + heroes[0].avatar.clone() } else { None }; @@ -1558,7 +1563,7 @@ pub(crate) async fn sync_events_v4_route( ), num_live: None, // Count events in timeline greater than global sync counter timestamp: None, - heroes: None, + heroes: Some(heroes), }, ); } diff --git a/src/api/client/unversioned.rs b/src/api/client/unversioned.rs index 9a8f3220bf440d7dafb733492a522cdcd03fa15a..765749496fbca4fe5ef7d83059feb5063c4f4df0 100644 --- a/src/api/client/unversioned.rs +++ b/src/api/client/unversioned.rs @@ -42,6 +42,7 @@ pub(crate) async fn get_supported_versions_route( "v1.3".to_owned(), "v1.4".to_owned(), "v1.5".to_owned(), + "v1.11".to_owned(), ], unstable_features: BTreeMap::from_iter([ ("org.matrix.e2e_cross_signing".to_owned(), true), @@ -52,6 +53,8 @@ pub(crate) async fn get_supported_versions_route( ("org.matrix.msc3026.busy_presence".to_owned(), true), /* busy presence status (https://github.com/matrix-org/matrix-spec-proposals/pull/3026) */ ("org.matrix.msc3827".to_owned(), true), /* filtering of /publicRooms by room type (https://github.com/matrix-org/matrix-spec-proposals/pull/3827) */ ("org.matrix.msc3575".to_owned(), true), /* sliding sync (https://github.com/matrix-org/matrix-spec-proposals/pull/3575/files#r1588877046) */ + ("org.matrix.msc3916.stable".to_owned(), true), /* authenticated media (https://github.com/matrix-org/matrix-spec-proposals/pull/3916) */ + ("org.matrix.msc4180".to_owned(), true), /* stable flag for 3916 (https://github.com/matrix-org/matrix-spec-proposals/pull/4180) */ ]), }; diff --git a/src/api/client/voip.rs b/src/api/client/voip.rs index ed6971ee914f3cf20b310a2153664d479596138b..a51e2657062c0f1eb559e97b44700113bb8bee38 100644 --- a/src/api/client/voip.rs +++ b/src/api/client/voip.rs @@ -2,7 +2,7 @@ use axum::extract::State; use base64::{engine::general_purpose, Engine as _}; -use conduit::utils; +use conduit::{utils, Err}; use hmac::{Hmac, Mac}; use ruma::{api::client::voip::get_turn_server_info, SecondsSinceUnixEpoch, UserId}; use sha1::Sha1; @@ -19,6 +19,11 @@ pub(crate) async fn turn_server_route( State(services): State<crate::State>, body: Ruma<get_turn_server_info::v3::Request>, ) -> Result<get_turn_server_info::v3::Response> { + // MSC4166: return M_NOT_FOUND 404 if no TURN URIs are specified in any way + if services.server.config.turn_uris.is_empty() { + return Err!(Request(NotFound("Not Found"))); + } + let turn_secret = services.globals.turn_secret().clone(); let (username, password) = if !turn_secret.is_empty() { diff --git a/src/api/router.rs b/src/api/router.rs index 7d6df16c61a91f7d7932b52aa39e5294d486d5a8..94ec5571ecb5a22ea8d39490e1a1751bcf45f640 100644 --- a/src/api/router.rs +++ b/src/api/router.rs @@ -5,13 +5,15 @@ mod response; pub mod state; +use std::str::FromStr; + use axum::{ - response::IntoResponse, + response::{IntoResponse, Redirect}, routing::{any, get, post}, Router, }; use conduit::{err, Server}; -use http::Uri; +use http::{uri, Uri}; use self::handler::RouterExt; pub(super) use self::{args::Args as Ruma, response::RumaResponse, state::State}; @@ -19,7 +21,7 @@ pub fn build(router: Router<State>, server: &Server) -> Router<State> { let config = &server.config; - let router = router + let mut router = router .ruma_route(client::get_supported_versions_route) .ruma_route(client::get_register_available_route) .ruma_route(client::register_route) @@ -138,37 +140,12 @@ pub fn build(router: Router<State>, server: &Server) -> Router<State> { .ruma_route(client::search_events_route) .ruma_route(client::turn_server_route) .ruma_route(client::send_event_to_device_route) - .ruma_route(client::get_media_config_route) - .ruma_route(client::get_media_preview_route) .ruma_route(client::create_content_route) - // legacy v1 media routes - .route( - "/_matrix/media/v1/preview_url", - get(client::get_media_preview_v1_route) - ) - .route( - "/_matrix/media/v1/config", - get(client::get_media_config_v1_route) - ) - .route( - "/_matrix/media/v1/upload", - post(client::create_content_v1_route) - ) - .route( - "/_matrix/media/v1/download/:server_name/:media_id", - get(client::get_content_v1_route) - ) - .route( - "/_matrix/media/v1/download/:server_name/:media_id/:file_name", - get(client::get_content_as_filename_v1_route) - ) - .route( - "/_matrix/media/v1/thumbnail/:server_name/:media_id", - get(client::get_content_thumbnail_v1_route) - ) + .ruma_route(client::get_content_thumbnail_route) .ruma_route(client::get_content_route) .ruma_route(client::get_content_as_filename_route) - .ruma_route(client::get_content_thumbnail_route) + .ruma_route(client::get_media_preview_route) + .ruma_route(client::get_media_config_route) .ruma_route(client::get_devices_route) .ruma_route(client::get_device_route) .ruma_route(client::update_device_route) @@ -202,7 +179,7 @@ pub fn build(router: Router<State>, server: &Server) -> Router<State> { .route("/client/server.json", get(client::syncv3_client_server_json)); if config.allow_federation { - router + router = router .ruma_route(server::get_server_version_route) .route("/_matrix/key/v2/server", get(server::get_server_keys_route)) .route("/_matrix/key/v2/server/:key_id", get(server::get_server_keys_deprecated_route)) @@ -230,18 +207,75 @@ pub fn build(router: Router<State>, server: &Server) -> Router<State> { .ruma_route(server::get_openid_userinfo_route) .ruma_route(server::get_hierarchy_route) .ruma_route(server::well_known_server) - .route("/_conduwuit/local_user_count", get(client::conduwuit_local_user_count)) + .ruma_route(server::get_content_route) + .ruma_route(server::get_content_thumbnail_route) + .route("/_conduwuit/local_user_count", get(client::conduwuit_local_user_count)); } else { - router + router = router .route("/_matrix/federation/*path", any(federation_disabled)) .route("/.well-known/matrix/server", any(federation_disabled)) .route("/_matrix/key/*path", any(federation_disabled)) - .route("/_conduwuit/local_user_count", any(federation_disabled)) + .route("/_conduwuit/local_user_count", any(federation_disabled)); + } + + if config.allow_legacy_media { + router = router + .ruma_route(client::get_media_config_legacy_route) + .ruma_route(client::get_media_preview_legacy_route) + .ruma_route(client::get_content_legacy_route) + .ruma_route(client::get_content_as_filename_legacy_route) + .ruma_route(client::get_content_thumbnail_legacy_route) + .route("/_matrix/media/v1/config", get(client::get_media_config_legacy_legacy_route)) + .route("/_matrix/media/v1/upload", post(client::create_content_legacy_route)) + .route( + "/_matrix/media/v1/preview_url", + get(client::get_media_preview_legacy_legacy_route), + ) + .route( + "/_matrix/media/v1/download/:server_name/:media_id", + get(client::get_content_legacy_legacy_route), + ) + .route( + "/_matrix/media/v1/download/:server_name/:media_id/:file_name", + get(client::get_content_as_filename_legacy_legacy_route), + ) + .route( + "/_matrix/media/v1/thumbnail/:server_name/:media_id", + get(client::get_content_thumbnail_legacy_legacy_route), + ); + } else { + router = router + .route("/_matrix/media/v1/*path", any(legacy_media_disabled)) + .route("/_matrix/media/v3/config", any(legacy_media_disabled)) + .route("/_matrix/media/v3/download/*path", any(legacy_media_disabled)) + .route("/_matrix/media/v3/thumbnail/*path", any(legacy_media_disabled)) + .route("/_matrix/media/v3/preview_url", any(redirect_legacy_preview)); } + + router +} + +async fn redirect_legacy_preview(uri: Uri) -> impl IntoResponse { + let path = "/_matrix/client/v1/media/preview_url"; + let query = uri.query().unwrap_or_default(); + + let path_and_query = format!("{path}?{query}"); + let path_and_query = uri::PathAndQuery::from_str(&path_and_query) + .expect("Failed to build PathAndQuery for media preview redirect URI"); + + let uri = uri::Builder::new() + .path_and_query(path_and_query) + .build() + .expect("Failed to build URI for redirect") + .to_string(); + + Redirect::temporary(&uri) } async fn initial_sync(_uri: Uri) -> impl IntoResponse { err!(Request(GuestAccessForbidden("Guest access not implemented"))) } -async fn federation_disabled() -> impl IntoResponse { err!(Config("allow_federation", "Federation is disabled.")) } +async fn legacy_media_disabled() -> impl IntoResponse { err!(Request(Forbidden("Unauthenticated media is disabled."))) } + +async fn federation_disabled() -> impl IntoResponse { err!(Request(Forbidden("Federation is disabled."))) } diff --git a/src/api/router/auth.rs b/src/api/router/auth.rs index 838c5e79fa2df0f52287b46dfa52c77f853badc2..fe98e458a1d6515793e2a54313b5c9294b085789 100644 --- a/src/api/router/auth.rs +++ b/src/api/router/auth.rs @@ -6,7 +6,7 @@ typed_header::TypedHeaderRejectionReason, TypedHeader, }; -use conduit::{warn, Err, Error, Result}; +use conduit::{debug_info, warn, Err, Error, Result}; use http::uri::PathAndQuery; use ruma::{ api::{client::error::ErrorKind, AuthScheme, Metadata}, @@ -185,7 +185,7 @@ fn auth_appservice(services: &Services, request: &Request, info: Box<Registratio async fn auth_server( services: &Services, request: &mut Request, json_body: &Option<CanonicalJsonValue>, ) -> Result<Auth> { - if !services.globals.allow_federation() { + if !services.server.config.allow_federation { return Err!(Config("allow_federation", "Federation is disabled.")); } @@ -206,6 +206,17 @@ async fn auth_server( })?; let origin = &x_matrix.origin; + + if services + .server + .config + .forbidden_remote_server_names + .contains(origin) + { + debug_info!("Refusing to accept inbound federation request to {origin}"); + return Err!(Request(Forbidden("Federation with this homeserver is not allowed."))); + } + let signatures = BTreeMap::from_iter([(x_matrix.key.clone(), CanonicalJsonValue::String(x_matrix.sig.to_string()))]); let signatures = BTreeMap::from_iter([( diff --git a/src/api/server/make_join.rs b/src/api/server/make_join.rs index e9b1a6c780d17b698dc470df9b8449f18e0f8768..021016be22bfe38683cb34f350f00b224ec29141 100644 --- a/src/api/server/make_join.rs +++ b/src/api/server/make_join.rs @@ -139,6 +139,7 @@ pub(crate) async fn create_join_event_template_route( unsigned: None, state_key: Some(body.user_id.to_string()), redacts: None, + timestamp: None, }, &body.user_id, &body.room_id, @@ -221,15 +222,8 @@ pub(crate) fn maybe_strip_event_id(pdu_json: &mut CanonicalJsonObject, room_vers match room_version_id { V1 | V2 => {}, - V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 | V11 => { - pdu_json.remove("event_id"); - }, _ => { - warn!("Unexpected or unsupported room version {room_version_id}"); - return Err(Error::BadRequest( - ErrorKind::BadJson, - "Unexpected or unsupported room version found", - )); + pdu_json.remove("event_id"); }, }; diff --git a/src/api/server/make_leave.rs b/src/api/server/make_leave.rs index ae7237ad3cf045e128ce7106499227bc6f78f9d7..3eb0d77ab7ce59a204e84824bfa78bca5e82da79 100644 --- a/src/api/server/make_leave.rs +++ b/src/api/server/make_leave.rs @@ -57,6 +57,7 @@ pub(crate) async fn create_leave_event_template_route( unsigned: None, state_key: Some(body.user_id.to_string()), redacts: None, + timestamp: None, }, &body.user_id, &body.room_id, diff --git a/src/api/server/media.rs b/src/api/server/media.rs new file mode 100644 index 0000000000000000000000000000000000000000..757cc9cc908f922be2afe96150016c32abc89041 --- /dev/null +++ b/src/api/server/media.rs @@ -0,0 +1,83 @@ +use axum::extract::State; +use axum_client_ip::InsecureClientIp; +use conduit::{utils::content_disposition::make_content_disposition, Err, Result}; +use conduit_service::media::{Dim, FileMeta}; +use ruma::{ + api::federation::authenticated_media::{ + get_content, get_content_thumbnail, Content, ContentMetadata, FileOrLocation, + }, + Mxc, +}; + +use crate::Ruma; + +/// # `GET /_matrix/federation/v1/media/download/{mediaId}` +/// +/// Load media from our server. +#[tracing::instrument(skip_all, fields(%client), name = "media_get")] +pub(crate) async fn get_content_route( + State(services): State<crate::State>, InsecureClientIp(client): InsecureClientIp, + body: Ruma<get_content::v1::Request>, +) -> Result<get_content::v1::Response> { + let mxc = Mxc { + server_name: services.globals.server_name(), + media_id: &body.media_id, + }; + + let Some(FileMeta { + content, + content_type, + content_disposition, + }) = services.media.get(&mxc).await? + else { + return Err!(Request(NotFound("Media not found."))); + }; + + let content_disposition = make_content_disposition(content_disposition.as_ref(), content_type.as_deref(), None); + let content = Content { + file: content.expect("entire file contents"), + content_type: content_type.map(Into::into), + content_disposition: Some(content_disposition), + }; + + Ok(get_content::v1::Response { + content: FileOrLocation::File(content), + metadata: ContentMetadata::new(), + }) +} + +/// # `GET /_matrix/federation/v1/media/thumbnail/{mediaId}` +/// +/// Load media thumbnail from our server. +#[tracing::instrument(skip_all, fields(%client), name = "media_thumbnail_get")] +pub(crate) async fn get_content_thumbnail_route( + State(services): State<crate::State>, InsecureClientIp(client): InsecureClientIp, + body: Ruma<get_content_thumbnail::v1::Request>, +) -> Result<get_content_thumbnail::v1::Response> { + let dim = Dim::from_ruma(body.width, body.height, body.method.clone())?; + let mxc = Mxc { + server_name: services.globals.server_name(), + media_id: &body.media_id, + }; + + let Some(FileMeta { + content, + content_type, + content_disposition, + }) = services.media.get_thumbnail(&mxc, &dim).await? + else { + return Err!(Request(NotFound("Media not found."))); + }; + + let content_disposition = make_content_disposition(content_disposition.as_ref(), content_type.as_deref(), None); + let content = Content { + file: content.expect("entire file contents"), + content_type: content_type.map(Into::into), + content_disposition: Some(content_disposition), + }; + + Ok(get_content_thumbnail::v1::Response { + content: FileOrLocation::File(content), + metadata: ContentMetadata::new(), + }) +} diff --git a/src/api/server/mod.rs b/src/api/server/mod.rs index 5d46a86673e734d3f61b2566ebdb92954335efef..9a184f237b2b3b6c3401d5e825df6b7eb036dcf0 100644 --- a/src/api/server/mod.rs +++ b/src/api/server/mod.rs @@ -7,6 +7,7 @@ pub(super) mod key; pub(super) mod make_join; pub(super) mod make_leave; +pub(super) mod media; pub(super) mod openid; pub(super) mod publicrooms; pub(super) mod query; @@ -28,6 +29,7 @@ pub(super) use key::*; pub(super) use make_join::*; pub(super) use make_leave::*; +pub(super) use media::*; pub(super) use openid::*; pub(super) use publicrooms::*; pub(super) use query::*; diff --git a/src/core/Cargo.toml b/src/core/Cargo.toml index a5c379e9c6bd189bb2289c698955aa11c5f12486..cc5865c836a55a0e36c256247f36849f1fb3f463 100644 --- a/src/core/Cargo.toml +++ b/src/core/Cargo.toml @@ -32,6 +32,11 @@ jemalloc = [ jemalloc_prof = [ "tikv-jemalloc-sys/profiling", ] +jemalloc_stats = [ + "tikv-jemalloc-sys/stats", + "tikv-jemalloc-ctl/stats", + "tikv-jemallocator/stats", +] hardened_malloc = [ "dep:hardened_malloc-rs" ] @@ -41,7 +46,9 @@ gzip_compression = [ brotli_compression = [ "reqwest/brotli", ] -zstd_compression =[] +zstd_compression = [ + "reqwest/zstd", +] perf_measurements = [] sentry_telemetry = [] @@ -61,6 +68,7 @@ clap.workspace = true conduit-macros.workspace = true const-str.workspace = true ctor.workspace = true +cyborgtime.workspace = true either.workspace = true figment.workspace = true http-body-util.workspace = true @@ -75,6 +83,7 @@ regex.workspace = true reqwest.workspace = true ring.workspace = true ruma.workspace = true +rustls.workspace = true sanitize-filename.workspace = true serde_json.workspace = true serde_regex.workspace = true diff --git a/src/core/alloc/je.rs b/src/core/alloc/je.rs index 08bfc49ada7f29320fc17d5b562ad056ce994453..7561eb9595d51c90ef1831255e6d53393c2229f8 100644 --- a/src/core/alloc/je.rs +++ b/src/core/alloc/je.rs @@ -2,7 +2,6 @@ use std::ffi::{c_char, c_void}; -use tikv_jemalloc_ctl as mallctl; use tikv_jemalloc_sys as ffi; use tikv_jemallocator as jemalloc; @@ -10,8 +9,10 @@ static JEMALLOC: jemalloc::Jemalloc = jemalloc::Jemalloc; #[must_use] +#[cfg(feature = "jemalloc_stats")] pub fn memory_usage() -> Option<String> { use mallctl::stats; + use tikv_jemalloc_ctl as mallctl; let mibs = |input: Result<usize, mallctl::Error>| { let input = input.unwrap_or_default(); @@ -33,6 +34,10 @@ pub fn memory_usage() -> Option<String> { )) } +#[must_use] +#[cfg(not(feature = "jemalloc_stats"))] +pub fn memory_usage() -> Option<String> { None } + #[must_use] pub fn memory_stats() -> Option<String> { const MAX_LENGTH: usize = 65536 - 4096; diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index bec7c4ea3f49c8c68c9d648f694a9d5ee188716f..71ffd7f369297f496b69ae5c6c15931a9d063280 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -1,5 +1,5 @@ use std::{ - collections::BTreeMap, + collections::{BTreeMap, BTreeSet}, fmt, net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}, path::PathBuf, @@ -144,6 +144,8 @@ pub struct Config { #[serde(default = "true_fn")] pub allow_federation: bool, #[serde(default)] + pub federation_loopback: bool, + #[serde(default)] pub allow_public_room_directory_over_federation: bool, #[serde(default)] pub allow_public_room_directory_without_auth: bool, @@ -287,10 +289,16 @@ pub struct Config { #[serde(default)] pub allow_guests_auto_join_rooms: bool, + #[serde(default = "true_fn")] + pub allow_legacy_media: bool, + #[serde(default = "true_fn")] + pub freeze_legacy_media: bool, #[serde(default = "true_fn")] pub media_startup_check: bool, #[serde(default)] pub media_compat_file_link: bool, + #[serde(default)] + pub prune_missing_media: bool, #[serde(default = "Vec::new")] pub prevent_media_downloads_from: Vec<OwnedServerName>, @@ -334,6 +342,14 @@ pub struct Config { pub admin_escape_commands: bool, #[serde(default)] pub admin_console_automatic: bool, + #[serde(default)] + pub admin_execute: Vec<String>, + #[serde(default)] + pub admin_execute_errors_ignore: bool, + #[serde(default = "default_admin_log_capture")] + pub admin_log_capture: String, + #[serde(default = "default_admin_room_tag")] + pub admin_room_tag: String, #[serde(default)] pub sentry: bool, @@ -355,6 +371,9 @@ pub struct Config { #[serde(default)] pub tokio_console: bool, + #[serde(default)] + pub test: BTreeSet<String>, + #[serde(flatten)] #[allow(clippy::zero_sized_map_values)] // this is a catchall, the map shouldn't be zero at runtime catchall: BTreeMap<String, IgnoredAny>, @@ -366,8 +385,6 @@ pub struct TlsConfig { pub key: String, #[serde(default)] /// Whether to listen and allow for HTTP and HTTPS connections (insecure!) - /// Only works / does something if the `axum_dual_protocol` feature flag was - /// built pub dual_protocol: bool, } @@ -563,6 +580,7 @@ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { line("New user display name suffix", &self.new_user_displayname_suffix); line("Allow encryption", &self.allow_encryption.to_string()); line("Allow federation", &self.allow_federation.to_string()); + line("Federation loopback", &self.federation_loopback.to_string()); line( "Allow incoming federated presence requests (updates)", &self.allow_incoming_presence.to_string(), @@ -592,6 +610,13 @@ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { "Activate admin console after startup", &self.admin_console_automatic.to_string(), ); + line("Execute admin commands after startup", &self.admin_execute.join(", ")); + line( + "Continue startup even if some commands fail", + &self.admin_execute_errors_ignore.to_string(), + ); + line("Filter for admin command log capture", &self.admin_log_capture); + line("Admin room tag", &self.admin_room_tag); line("Allow outgoing federated typing", &self.allow_outgoing_typing.to_string()); line("Allow incoming federated typing", &self.allow_incoming_typing.to_string()); line( @@ -723,6 +748,9 @@ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { line("RocksDB Statistics level", &self.rocksdb_stats_level.to_string()); line("Media integrity checks on startup", &self.media_startup_check.to_string()); line("Media compatibility filesystem links", &self.media_compat_file_link.to_string()); + line("Prune missing media from database", &self.prune_missing_media.to_string()); + line("Allow legacy (unauthenticated) media", &self.allow_legacy_media.to_string()); + line("Freeze legacy (unauthenticated) media", &self.freeze_legacy_media.to_string()); line("Prevent Media Downloads From", { let mut lst = vec![]; for domain in &self.prevent_media_downloads_from { @@ -1050,3 +1078,7 @@ fn default_sentry_traces_sample_rate() -> f32 { 0.15 } fn default_sentry_filter() -> String { "info".to_owned() } fn default_startup_netburst_keep() -> i64 { 50 } + +fn default_admin_log_capture() -> String { "debug".to_owned() } + +fn default_admin_room_tag() -> String { "m.server_notice".to_owned() } diff --git a/src/core/debug.rs b/src/core/debug.rs index 3ab9ed0d2247f7510a63f72cc559eb2c55486262..f6c992b92f69c60077465a79488c3717f48c289f 100644 --- a/src/core/debug.rs +++ b/src/core/debug.rs @@ -84,10 +84,13 @@ pub fn trap() { #[must_use] pub fn panic_str(p: &Box<dyn Any + Send>) -> &'static str { p.downcast_ref::<&str>().copied().unwrap_or_default() } -#[cfg(debug_assertions)] #[inline(always)] #[must_use] -pub fn type_name<T>(_: &T) -> &'static str { std::any::type_name::<T>() } +pub fn rttype_name<T>(_: &T) -> &'static str { type_name::<T>() } + +#[inline(always)] +#[must_use] +pub fn type_name<T>() -> &'static str { std::any::type_name::<T>() } #[must_use] #[inline] diff --git a/src/core/error/mod.rs b/src/core/error/mod.rs index b5721deea13e7f880caa8467b2f2aa50e8a80db4..92dbdfe3bb9f35f062eebb58c2c1852283b243fc 100644 --- a/src/core/error/mod.rs +++ b/src/core/error/mod.rs @@ -2,6 +2,7 @@ mod log; mod panic; mod response; +mod serde; use std::{any::Any, borrow::Cow, convert::Infallible, fmt}; @@ -16,86 +17,98 @@ pub enum Error { Panic(&'static str, Box<dyn Any + Send + 'static>), // std - #[error("{0}")] + #[error(transparent)] Fmt(#[from] fmt::Error), + #[error(transparent)] + FromUtf8(#[from] std::string::FromUtf8Error), #[error("I/O error: {0}")] Io(#[from] std::io::Error), - #[error("{0}")] - Utf8Error(#[from] std::str::Utf8Error), - #[error("{0}")] - FromUtf8Error(#[from] std::string::FromUtf8Error), - #[error("{0}")] - TryFromSliceError(#[from] std::array::TryFromSliceError), - #[error("{0}")] - TryFromIntError(#[from] std::num::TryFromIntError), - #[error("{0}")] - ParseIntError(#[from] std::num::ParseIntError), - #[error("{0}")] - ParseFloatError(#[from] std::num::ParseFloatError), + #[error(transparent)] + ParseFloat(#[from] std::num::ParseFloatError), + #[error(transparent)] + ParseInt(#[from] std::num::ParseIntError), + #[error(transparent)] + TryFromInt(#[from] std::num::TryFromIntError), + #[error(transparent)] + TryFromSlice(#[from] std::array::TryFromSliceError), + #[error(transparent)] + Utf8(#[from] std::str::Utf8Error), // third-party + #[error(transparent)] + CargoToml(#[from] cargo_toml::Error), + #[error(transparent)] + Clap(#[from] clap::error::Error), + #[error(transparent)] + Extension(#[from] axum::extract::rejection::ExtensionRejection), + #[error(transparent)] + Figment(#[from] figment::error::Error), + #[error(transparent)] + Http(#[from] http::Error), + #[error(transparent)] + HttpHeader(#[from] http::header::InvalidHeaderValue), + #[error("Image error: {0}")] + Image(#[from] image::error::ImageError), #[error("Join error: {0}")] JoinError(#[from] tokio::task::JoinError), + #[error(transparent)] + Json(#[from] serde_json::Error), + #[error(transparent)] + JsParseInt(#[from] ruma::JsParseIntError), // js_int re-export + #[error(transparent)] + JsTryFromInt(#[from] ruma::JsTryFromIntError), // js_int re-export + #[error(transparent)] + Path(#[from] axum::extract::rejection::PathRejection), #[error("Regex error: {0}")] Regex(#[from] regex::Error), - #[error("Tracing filter error: {0}")] - TracingFilter(#[from] tracing_subscriber::filter::ParseError), - #[error("Tracing reload error: {0}")] - TracingReload(#[from] tracing_subscriber::reload::Error), - #[error("Image error: {0}")] - Image(#[from] image::error::ImageError), #[error("Request error: {0}")] Reqwest(#[from] reqwest::Error), #[error("{0}")] - Extension(#[from] axum::extract::rejection::ExtensionRejection), - #[error("{0}")] - Path(#[from] axum::extract::rejection::PathRejection), - #[error("{0}")] - Http(#[from] http::Error), - #[error("{0}")] - HttpHeader(#[from] http::header::InvalidHeaderValue), - #[error("{0}")] - CargoToml(#[from] cargo_toml::Error), - #[error("{0}")] - FigmentError(#[from] figment::error::Error), - #[error("{0}")] - TomlSerError(#[from] toml::ser::Error), - #[error("{0}")] - TomlDeError(#[from] toml::de::Error), - #[error("{0}")] - Clap(#[from] clap::error::Error), - - // ruma - #[error("{0}")] - IntoHttpError(#[from] ruma::api::error::IntoHttpError), + SerdeDe(Cow<'static, str>), #[error("{0}")] - RumaError(#[from] ruma::api::client::error::Error), - #[error("uiaa")] - Uiaa(ruma::api::client::uiaa::UiaaInfo), - #[error("{0}")] - Mxid(#[from] ruma::IdParseError), - #[error("{0}: {1}")] - BadRequest(ruma::api::client::error::ErrorKind, &'static str), //TODO: remove - #[error("{0}: {1}")] - Request(ruma::api::client::error::ErrorKind, Cow<'static, str>, http::StatusCode), - #[error("from {0}: {1}")] - Redaction(ruma::OwnedServerName, ruma::canonical_json::RedactionError), - #[error("Remote server {0} responded with: {1}")] - Federation(ruma::OwnedServerName, ruma::api::client::error::Error), - #[error("{0} in {1}")] - InconsistentRoomState(&'static str, ruma::OwnedRoomId), + SerdeSer(Cow<'static, str>), + #[error(transparent)] + TomlDe(#[from] toml::de::Error), + #[error(transparent)] + TomlSer(#[from] toml::ser::Error), + #[error("Tracing filter error: {0}")] + TracingFilter(#[from] tracing_subscriber::filter::ParseError), + #[error("Tracing reload error: {0}")] + TracingReload(#[from] tracing_subscriber::reload::Error), - // conduwuit + // ruma/conduwuit #[error("Arithmetic operation failed: {0}")] Arithmetic(Cow<'static, str>), + #[error("{0}: {1}")] + BadRequest(ruma::api::client::error::ErrorKind, &'static str), //TODO: remove #[error("{0}")] BadServerResponse(Cow<'static, str>), #[error("There was a problem with the '{0}' directive in your configuration: {1}")] Config(&'static str, Cow<'static, str>), #[error("{0}")] - Database(Cow<'static, str>), - #[error("{0}")] Conflict(&'static str), // This is only needed for when a room alias already exists + #[error(transparent)] + ContentDisposition(#[from] ruma::http_headers::ContentDispositionParseError), + #[error("{0}")] + Database(Cow<'static, str>), + #[error("Remote server {0} responded with: {1}")] + Federation(ruma::OwnedServerName, ruma::api::client::error::Error), + #[error("{0} in {1}")] + InconsistentRoomState(&'static str, ruma::OwnedRoomId), + #[error(transparent)] + IntoHttp(#[from] ruma::api::error::IntoHttpError), + #[error(transparent)] + Mxc(#[from] ruma::MxcUriError), + #[error(transparent)] + Mxid(#[from] ruma::IdParseError), + #[error("from {0}: {1}")] + Redaction(ruma::OwnedServerName, ruma::canonical_json::RedactionError), + #[error("{0}: {1}")] + Request(ruma::api::client::error::ErrorKind, Cow<'static, str>, http::StatusCode), + #[error(transparent)] + Ruma(#[from] ruma::api::client::error::Error), + #[error("uiaa")] + Uiaa(ruma::api::client::uiaa::UiaaInfo), // unique / untyped #[error("{0}")] @@ -117,7 +130,7 @@ pub fn sanitized_string(&self) -> String { pub fn message(&self) -> String { match self { Self::Federation(ref origin, ref error) => format!("Answer from {origin}: {error}"), - Self::RumaError(ref error) => response::ruma_error_message(error), + Self::Ruma(ref error) => response::ruma_error_message(error), _ => format!("{self}"), } } @@ -136,7 +149,7 @@ pub fn kind(&self) -> ruma::api::client::error::ErrorKind { pub fn status_code(&self) -> http::StatusCode { match self { - Self::Federation(_, ref error) | Self::RumaError(ref error) => error.status_code, + Self::Federation(_, ref error) | Self::Ruma(ref error) => error.status_code, Self::Request(ref kind, _, code) => response::status_code(kind, *code), Self::BadRequest(ref kind, ..) => response::bad_request_code(kind), Self::Conflict(_) => http::StatusCode::CONFLICT, diff --git a/src/core/error/serde.rs b/src/core/error/serde.rs new file mode 100644 index 0000000000000000000000000000000000000000..0c5a153b4e8d5ff51d4494b95783d6229010787e --- /dev/null +++ b/src/core/error/serde.rs @@ -0,0 +1,13 @@ +use std::fmt::Display; + +use serde::{de, ser}; + +use crate::Error; + +impl de::Error for Error { + fn custom<T: Display + ToString>(msg: T) -> Self { Self::SerdeDe(msg.to_string().into()) } +} + +impl ser::Error for Error { + fn custom<T: Display + ToString>(msg: T) -> Self { Self::SerdeSer(msg.to_string().into()) } +} diff --git a/src/core/pdu/builder.rs b/src/core/pdu/builder.rs index a8bad67706bfda4ad48ab08c8ac46158d58a406c..ba4c19e57229ca331c0b301eebd2358a5bc6bb9d 100644 --- a/src/core/pdu/builder.rs +++ b/src/core/pdu/builder.rs @@ -1,6 +1,6 @@ use std::{collections::BTreeMap, sync::Arc}; -use ruma::{events::TimelineEventType, EventId}; +use ruma::{events::TimelineEventType, EventId, MilliSecondsSinceUnixEpoch}; use serde::Deserialize; use serde_json::value::RawValue as RawJsonValue; @@ -13,4 +13,8 @@ pub struct PduBuilder { pub unsigned: Option<BTreeMap<String, serde_json::Value>>, pub state_key: Option<String>, pub redacts: Option<Arc<EventId>>, + /// For timestamped messaging, should only be used for appservices + /// + /// Will be set to current time if None + pub timestamp: Option<MilliSecondsSinceUnixEpoch>, } diff --git a/src/core/utils/content_disposition.rs b/src/core/utils/content_disposition.rs index be17a731cea8c9b85c672e8a441151d017a87fbb..a2fe923c405a94d7b88219cdf0ce3e2b18a23f86 100644 --- a/src/core/utils/content_disposition.rs +++ b/src/core/utils/content_disposition.rs @@ -1,7 +1,8 @@ -use crate::debug_info; +use std::borrow::Cow; + +use ruma::http_headers::{ContentDisposition, ContentDispositionType}; -const ATTACHMENT: &str = "attachment"; -const INLINE: &str = "inline"; +use crate::debug_info; /// as defined by MSC2702 const ALLOWED_INLINE_CONTENT_TYPES: [&str; 26] = [ @@ -38,42 +39,44 @@ /// Content-Type against MSC2702 list of safe inline Content-Types /// (`ALLOWED_INLINE_CONTENT_TYPES`) #[must_use] -pub fn content_disposition_type(content_type: &Option<String>) -> &'static str { +pub fn content_disposition_type(content_type: Option<&str>) -> ContentDispositionType { let Some(content_type) = content_type else { debug_info!("No Content-Type was given, assuming attachment for Content-Disposition"); - return ATTACHMENT; + return ContentDispositionType::Attachment; }; // is_sorted is unstable /* debug_assert!(ALLOWED_INLINE_CONTENT_TYPES.is_sorted(), * "ALLOWED_INLINE_CONTENT_TYPES is not sorted"); */ - let content_type = content_type + let content_type: Cow<'_, str> = content_type .split(';') .next() .unwrap_or(content_type) - .to_ascii_lowercase(); + .to_ascii_lowercase() + .into(); if ALLOWED_INLINE_CONTENT_TYPES - .binary_search(&content_type.as_str()) + .binary_search(&content_type.as_ref()) .is_ok() { - INLINE + ContentDispositionType::Inline } else { - ATTACHMENT + ContentDispositionType::Attachment } } /// sanitises the file name for the Content-Disposition using /// `sanitize_filename` crate #[tracing::instrument(level = "debug")] -pub fn sanitise_filename(filename: String) -> String { - let options = sanitize_filename::Options { - truncate: false, - ..Default::default() - }; - - sanitize_filename::sanitize_with_options(filename, options) +pub fn sanitise_filename(filename: &str) -> String { + sanitize_filename::sanitize_with_options( + filename, + sanitize_filename::Options { + truncate: false, + ..Default::default() + }, + ) } /// creates the final Content-Disposition based on whether the filename exists @@ -85,33 +88,13 @@ pub fn sanitise_filename(filename: String) -> String { /// /// else: `Content-Disposition: attachment/inline` pub fn make_content_disposition( - content_type: &Option<String>, content_disposition: Option<String>, req_filename: Option<String>, -) -> String { - let filename: String; - - if let Some(req_filename) = req_filename { - filename = sanitise_filename(req_filename); - } else { - filename = content_disposition.map_or_else(String::new, |content_disposition| { - let (_, filename) = content_disposition - .split_once("filename=") - .unwrap_or(("", "")); - - if filename.is_empty() { - String::new() - } else { - sanitise_filename(filename.to_owned()) - } - }); - }; - - if !filename.is_empty() { - // Content-Disposition: attachment/inline; filename=filename.ext - format!("{}; filename={}", content_disposition_type(content_type), filename) - } else { - // Content-Disposition: attachment/inline - String::from(content_disposition_type(content_type)) - } + content_disposition: Option<&ContentDisposition>, content_type: Option<&str>, filename: Option<&str>, +) -> ContentDisposition { + ContentDisposition::new(content_disposition_type(content_type)).with_filename( + filename + .or_else(|| content_disposition.and_then(|content_disposition| content_disposition.filename.as_deref())) + .map(sanitise_filename), + ) } #[cfg(test)] @@ -136,4 +119,20 @@ fn string_sanitisation() { assert_eq!(SANITISED, sanitize_filename::sanitize_with_options(SAMPLE, options.clone())); } + + #[test] + fn empty_sanitisation() { + use crate::utils::string::EMPTY; + + let result = sanitize_filename::sanitize_with_options( + EMPTY, + sanitize_filename::Options { + windows: true, + truncate: true, + replacement: "", + }, + ); + + assert_eq!(EMPTY, result); + } } diff --git a/src/core/utils/defer.rs b/src/core/utils/defer.rs index 9d42e6795bf68b2a263318b8475bfbbc59c88498..08477b6f5e850738a12077d475cbba58e2a5ba30 100644 --- a/src/core/utils/defer.rs +++ b/src/core/utils/defer.rs @@ -13,4 +13,10 @@ impl<F: FnMut()> Drop for _Defer_<F> { closure: || $body, }; }; + + ($body:expr) => { + $crate::defer! {{ + $body + }} + }; } diff --git a/src/core/utils/math.rs b/src/core/utils/math.rs index 96ac6dc2996efa3c33c7e3bcbd4c0d48eeabe654..f9d0de3022fcc600d8ed7296f2697aa4c4a0d975 100644 --- a/src/core/utils/math.rs +++ b/src/core/utils/math.rs @@ -1,8 +1,8 @@ -use std::{cmp, time::Duration}; +use std::{cmp, convert::TryFrom, time::Duration}; pub use checked_ops::checked_ops; -use crate::{Err, Error, Result}; +use crate::{debug::type_name, err, Err, Error, Result}; /// Checked arithmetic expression. Returns a Result<R, Error::Arithmetic> #[macro_export] @@ -86,3 +86,17 @@ pub fn ruma_from_usize(val: usize) -> ruma::UInt { #[must_use] #[allow(clippy::as_conversions, clippy::cast_possible_truncation)] pub fn usize_from_u64_truncated(val: u64) -> usize { val as usize } + +#[inline] +pub fn try_into<Dst: TryFrom<Src>, Src>(src: Src) -> Result<Dst> { + Dst::try_from(src).map_err(try_into_err::<Dst, Src>) +} + +fn try_into_err<Dst: TryFrom<Src>, Src>(e: <Dst as TryFrom<Src>>::Error) -> Error { + drop(e); + err!(Arithmetic( + "failed to convert from {} to {}", + type_name::<Src>(), + type_name::<Dst>() + )) +} diff --git a/src/core/utils/time.rs b/src/core/utils/time.rs index 9a31632e252e621ea4ed0882daac44fb1d7be372..04f47ac38098fef90db6b48ee9c0c85fa3bd4d44 100644 --- a/src/core/utils/time.rs +++ b/src/core/utils/time.rs @@ -1,5 +1,7 @@ use std::time::{Duration, SystemTime, UNIX_EPOCH}; +use crate::{err, Result}; + #[inline] #[must_use] #[allow(clippy::as_conversions, clippy::cast_possible_truncation)] @@ -10,6 +12,22 @@ pub fn now_millis() -> u64 { .as_millis() as u64 } +#[inline] +pub fn parse_timepoint_ago(ago: &str) -> Result<SystemTime> { timepoint_ago(parse_duration(ago)?) } + +#[inline] +pub fn timepoint_ago(duration: Duration) -> Result<SystemTime> { + SystemTime::now() + .checked_sub(duration) + .ok_or_else(|| err!(Arithmetic("Duration {duration:?} is too large"))) +} + +#[inline] +pub fn parse_duration(duration: &str) -> Result<Duration> { + cyborgtime::parse_duration(duration) + .map_err(|error| err!("'{duration:?}' is not a valid duration string: {error:?}")) +} + #[must_use] pub fn rfc2822_from_seconds(epoch: i64) -> String { use chrono::{DateTime, Utc}; diff --git a/src/database/engine.rs b/src/database/engine.rs index 3975d3d9a7dac7966b72c0623231d0fefa97e01a..3850c1d3f2bbd0aae529a06df1af80458ad37203 100644 --- a/src/database/engine.rs +++ b/src/database/engine.rs @@ -49,7 +49,12 @@ pub(crate) fn open(server: &Arc<Server>) -> Result<Arc<Self>> { let mut db_env = Env::new().or_else(or_else)?; let row_cache = Cache::new_lru_cache(row_cache_capacity_bytes); - let db_opts = db_options(config, &mut db_env, &row_cache, col_cache.get("primary").expect("cache")); + let db_opts = db_options( + config, + &mut db_env, + &row_cache, + col_cache.get("primary").expect("primary cache exists"), + )?; let load_time = std::time::Instant::now(); if config.rocksdb_repair { @@ -63,9 +68,15 @@ pub(crate) fn open(server: &Arc<Server>) -> Result<Arc<Self>> { .collect::<BTreeSet<_>>(); debug!("Opening {} column family descriptors in database", cfs.len()); + let cfopts = cfs + .iter() + .map(|name| cf_options(config, name, db_opts.clone(), &mut col_cache)) + .collect::<Result<Vec<_>>>()?; + let cfds = cfs .iter() - .map(|name| ColumnFamilyDescriptor::new(name, cf_options(config, name, db_opts.clone(), &mut col_cache))) + .zip(cfopts.into_iter()) + .map(|(name, opts)| ColumnFamilyDescriptor::new(name, opts)) .collect::<Vec<_>>(); debug!("Opening database..."); @@ -102,7 +113,7 @@ pub(crate) fn open_cf(&self, name: &str) -> Result<Arc<BoundColumnFamily<'_>>> { debug!("Creating new column family in database: {name}"); let mut col_cache = self.col_cache.write().expect("locked"); - let opts = cf_options(&self.server.config, name, self.opts.clone(), &mut col_cache); + let opts = cf_options(&self.server.config, name, self.opts.clone(), &mut col_cache)?; if let Err(e) = self.db.create_cf(name, &opts) { error!(?name, "Failed to create new column family: {e}"); return or_else(e); @@ -172,7 +183,7 @@ pub fn backup(&self) -> Result<(), Box<dyn std::error::Error>> { return Ok(()); } - let options = BackupEngineOptions::new(path.unwrap())?; + let options = BackupEngineOptions::new(path.expect("valid database backup path"))?; let mut engine = BackupEngine::open(&options, &self.env)?; if config.database_backups_to_keep > 0 { if let Err(e) = engine.create_new_backup_flush(&self.db, true) { @@ -180,7 +191,7 @@ pub fn backup(&self) -> Result<(), Box<dyn std::error::Error>> { } let engine_info = engine.get_backup_info(); - let info = &engine_info.last().unwrap(); + let info = &engine_info.last().expect("backup engine info is not empty"); info!( "Created database backup #{} using {} bytes in {} files", info.backup_id, info.size, info.num_files, @@ -190,7 +201,7 @@ pub fn backup(&self) -> Result<(), Box<dyn std::error::Error>> { if config.database_backups_to_keep >= 0 { let keep = u32::try_from(config.database_backups_to_keep)?; if let Err(e) = engine.purge_old_backups(keep.try_into()?) { - error!("Failed to purge old backup: {:?}", e.to_string()); + error!("Failed to purge old backup: {e:?}"); } } @@ -207,7 +218,7 @@ pub fn backup_list(&self) -> Result<String> { } let mut res = String::new(); - let options = BackupEngineOptions::new(path.expect("valid path")).or_else(or_else)?; + let options = BackupEngineOptions::new(path.expect("valid database backup path")).or_else(or_else)?; let engine = BackupEngine::open(&options, &self.env).or_else(or_else)?; for info in engine.get_backup_info() { writeln!( diff --git a/src/database/opts.rs b/src/database/opts.rs index 8791e77834d994845075919dcd60cacb3c82889b..d2ad4b95c7ea63e9334b796714d917edcf2b008b 100644 --- a/src/database/opts.rs +++ b/src/database/opts.rs @@ -1,6 +1,6 @@ -use std::{cmp, collections::HashMap}; +use std::{cmp, collections::HashMap, convert::TryFrom}; -use conduit::{utils, Config}; +use conduit::{err, utils, Config, Result}; use rocksdb::{ statistics::StatsLevel, BlockBasedOptions, Cache, DBCompactionStyle, DBCompressionType, DBRecoveryMode, Env, LogLevel, Options, UniversalCompactOptions, UniversalCompactionStopStyle, @@ -11,8 +11,7 @@ /// resulting value. Note that we require special per-column options on some /// columns, therefor columns should only be opened after passing this result /// through cf_options(). -pub(crate) fn db_options(config: &Config, env: &mut Env, row_cache: &Cache, col_cache: &Cache) -> Options { - const MIN_PARALLELISM: usize = 2; +pub(crate) fn db_options(config: &Config, env: &mut Env, row_cache: &Cache, col_cache: &Cache) -> Result<Options> { const DEFAULT_STATS_LEVEL: StatsLevel = if cfg!(debug_assertions) { StatsLevel::ExceptDetailedTimers } else { @@ -25,14 +24,9 @@ pub(crate) fn db_options(config: &Config, env: &mut Env, row_cache: &Cache, col_ set_logging_defaults(&mut opts, config); // Processing - let threads = if config.rocksdb_parallelism_threads == 0 { - cmp::max(MIN_PARALLELISM, utils::available_parallelism()) - } else { - cmp::max(MIN_PARALLELISM, config.rocksdb_parallelism_threads) - }; - - opts.set_max_background_jobs(threads.try_into().unwrap()); - opts.set_max_subcompactions(threads.try_into().unwrap()); + opts.set_enable_pipelined_write(true); + opts.set_max_background_jobs(num_threads::<i32>(config)?); + opts.set_max_subcompactions(num_threads::<u32>(config)?); opts.set_max_file_opening_threads(0); if config.rocksdb_compaction_prio_idle { env.lower_thread_pool_cpu_priority(); @@ -100,13 +94,15 @@ pub(crate) fn db_options(config: &Config, env: &mut Env, row_cache: &Cache, col_ }); opts.set_env(env); - opts + Ok(opts) } /// Adjust options for the specific column by name. Provide the result of /// db_options() as the argument to this function and use the return value in /// the arguments to open the specific column. -pub(crate) fn cf_options(cfg: &Config, name: &str, mut opts: Options, cache: &mut HashMap<String, Cache>) -> Options { +pub(crate) fn cf_options( + cfg: &Config, name: &str, mut opts: Options, cache: &mut HashMap<String, Cache>, +) -> Result<Options> { // Columns with non-default compaction options match name { "backupid_algorithm" @@ -129,7 +125,7 @@ pub(crate) fn cf_options(cfg: &Config, name: &str, mut opts: Options, cache: &mu cfg, cache, name, - cache_size(cfg, cfg.shorteventid_cache_capacity, 64), + cache_size(cfg, cfg.shorteventid_cache_capacity, 64)?, ), "eventid_shorteventid" => set_table_with_new_cache( @@ -137,11 +133,17 @@ pub(crate) fn cf_options(cfg: &Config, name: &str, mut opts: Options, cache: &mu cfg, cache, name, - cache_size(cfg, cfg.eventidshort_cache_capacity, 64), + cache_size(cfg, cfg.eventidshort_cache_capacity, 64)?, ), "shorteventid_authchain" => { - set_table_with_new_cache(&mut opts, cfg, cache, name, cache_size(cfg, cfg.auth_chain_cache_capacity, 192)); + set_table_with_new_cache( + &mut opts, + cfg, + cache, + name, + cache_size(cfg, cfg.auth_chain_cache_capacity, 192)?, + ); }, "shortstatekey_statekey" => set_table_with_new_cache( @@ -149,7 +151,7 @@ pub(crate) fn cf_options(cfg: &Config, name: &str, mut opts: Options, cache: &mu cfg, cache, name, - cache_size(cfg, cfg.shortstatekey_cache_capacity, 1024), + cache_size(cfg, cfg.shortstatekey_cache_capacity, 1024)?, ), "statekey_shortstatekey" => set_table_with_new_cache( @@ -157,11 +159,11 @@ pub(crate) fn cf_options(cfg: &Config, name: &str, mut opts: Options, cache: &mu cfg, cache, name, - cache_size(cfg, cfg.statekeyshort_cache_capacity, 1024), + cache_size(cfg, cfg.statekeyshort_cache_capacity, 1024)?, ), "eventid_outlierpdu" => { - set_table_with_new_cache(&mut opts, cfg, cache, name, cache_size(cfg, cfg.pdu_cache_capacity, 1536)); + set_table_with_new_cache(&mut opts, cfg, cache, name, cache_size(cfg, cfg.pdu_cache_capacity, 1536)?); }, "pduid_pdu" => set_table_with_shared_cache(&mut opts, cfg, cache, name, "eventid_outlierpdu"), @@ -169,7 +171,7 @@ pub(crate) fn cf_options(cfg: &Config, name: &str, mut opts: Options, cache: &mu &_ => {}, } - opts + Ok(opts) } fn set_logging_defaults(opts: &mut Options, config: &Config) { @@ -325,13 +327,13 @@ fn set_table_with_shared_cache( opts.set_block_based_table_factory(&table); } -fn cache_size(config: &Config, base_size: u32, entity_size: usize) -> usize { +fn cache_size(config: &Config, base_size: u32, entity_size: usize) -> Result<usize> { let ents = f64::from(base_size) * config.cache_capacity_modifier; #[allow(clippy::as_conversions, clippy::cast_sign_loss, clippy::cast_possible_truncation)] (ents as usize) .checked_mul(entity_size) - .expect("cache capacity size is too large") + .ok_or_else(|| err!(Config("cache_capacity_modifier", "Cache size is too large."))) } fn table_options(_config: &Config) -> BlockBasedOptions { @@ -346,3 +348,15 @@ fn table_options(_config: &Config) -> BlockBasedOptions { opts } + +fn num_threads<T: TryFrom<usize>>(config: &Config) -> Result<T> { + const MIN_PARALLELISM: usize = 2; + + let requested = if config.rocksdb_parallelism_threads != 0 { + config.rocksdb_parallelism_threads + } else { + utils::available_parallelism() + }; + + utils::math::try_into::<T, usize>(cmp::max(MIN_PARALLELISM, requested)) +} diff --git a/src/main/Cargo.toml b/src/main/Cargo.toml index 8dc2a34dc314d066c109c5d0521cf938bb010e22..fae39605f089f0620b8dc9342154be2bb6bc5ab0 100644 --- a/src/main/Cargo.toml +++ b/src/main/Cargo.toml @@ -48,9 +48,6 @@ default = [ "zstd_compression", ] -axum_dual_protocol = [ - "conduit-router/axum_dual_protocol", -] brotli_compression = [ "conduit-api/brotli_compression", "conduit-core/brotli_compression", @@ -90,6 +87,9 @@ jemalloc = [ jemalloc_prof = [ "conduit-core/jemalloc_prof", ] +jemalloc_stats = [ + "conduit-core/jemalloc_stats", +] perf_measurements = [ "dep:opentelemetry", "dep:tracing-flame", @@ -129,6 +129,7 @@ tokio_console = [ "tokio/tracing", ] zstd_compression = [ + "conduit-api/zstd_compression", "conduit-core/zstd_compression", "conduit-database/zstd_compression", "conduit-router/zstd_compression", diff --git a/src/main/clap.rs b/src/main/clap.rs index 92bd73c162a84ec81b69095b7692505a1f754c5e..3af0be0213c4ace417fc09b88a54932cc2a2c88b 100644 --- a/src/main/clap.rs +++ b/src/main/clap.rs @@ -24,6 +24,14 @@ pub(crate) struct Args { /// Activate admin command console automatically after startup. #[arg(long, num_args(0))] pub(crate) console: bool, + + /// Execute console command automatically after startup. + #[arg(long)] + pub(crate) execute: Vec<String>, + + /// Set functional testing modes if available. Ex '--test=smoke' + #[arg(long, hide(true))] + pub(crate) test: Vec<String>, } /// Parse commandline arguments into structured data @@ -39,6 +47,12 @@ pub(crate) fn update(mut config: Figment, args: &Args) -> Result<Figment> { config = config.join(("admin_console_automatic", true)); } + // Execute commands after any commands listed in configuration file + config = config.adjoin(("admin_execute", &args.execute)); + + // Update config with names of any functional-tests + config = config.adjoin(("test", &args.test)); + // All other individual overrides can go last in case we have options which // set multiple conf items at once and the user still needs granular overrides. for option in &args.option { diff --git a/src/router/Cargo.toml b/src/router/Cargo.toml index 38e6adc7c333a552b0b10c49c341e44633880db6..2f85ffb7713ee4caaa60f0a63a485f47e46a949c 100644 --- a/src/router/Cargo.toml +++ b/src/router/Cargo.toml @@ -41,13 +41,9 @@ brotli_compression = [ systemd = [ "dep:sd-notify", ] -axum_dual_protocol = [ - "dep:axum-server-dual-protocol" -] [dependencies] axum-client-ip.workspace = true -axum-server-dual-protocol.optional = true axum-server-dual-protocol.workspace = true axum-server.workspace = true axum.workspace = true @@ -66,6 +62,7 @@ http.workspace = true hyper.workspace = true hyper-util.workspace = true ruma.workspace = true +rustls.workspace = true sentry.optional = true sentry-tower.optional = true sentry-tower.workspace = true diff --git a/src/router/layers.rs b/src/router/layers.rs index 2567fe081c197781093c06a92fe03b68971805bd..a1a70bb86500748db48f8a52b71d318cb128d7dd 100644 --- a/src/router/layers.rs +++ b/src/router/layers.rs @@ -16,9 +16,9 @@ use tower_http::{ catch_panic::CatchPanicLayer, cors::{self, CorsLayer}, + sensitive_headers::SetSensitiveHeadersLayer, set_header::SetResponseHeaderLayer, trace::{DefaultOnFailure, DefaultOnRequest, DefaultOnResponse, TraceLayer}, - ServiceBuilderExt as _, }; use tracing::Level; @@ -27,8 +27,6 @@ const CONDUWUIT_CSP: &[&str] = &[ "sandbox", "default-src 'none'", - "font-src 'none'", - "script-src 'none'", "frame-ancestors 'none'", "form-action 'none'", "base-uri 'none'", @@ -47,7 +45,7 @@ pub(crate) fn build(services: &Arc<Services>) -> Result<(Router, Guard)> { let layers = layers.layer(compression_layer(server)); let layers = layers - .sensitive_headers([header::AUTHORIZATION]) + .layer(SetSensitiveHeadersLayer::new([header::AUTHORIZATION])) .layer(axum::middleware::from_fn_with_state(Arc::clone(services), request::spawn)) .layer( TraceLayer::new_for_http() diff --git a/src/router/serve/tls.rs b/src/router/serve/tls.rs index 6f58ce82d673488cada8bd839a0fe4df443a8877..174a511f4c8ef3486d7c609ea57d86665e65b5ab 100644 --- a/src/router/serve/tls.rs +++ b/src/router/serve/tls.rs @@ -1,9 +1,11 @@ use std::{net::SocketAddr, sync::Arc}; use axum::Router; -use axum_server::{bind_rustls, tls_rustls::RustlsConfig, Handle as ServerHandle}; -#[cfg(feature = "axum_dual_protocol")] -use axum_server_dual_protocol::ServerExt; +use axum_server::Handle as ServerHandle; +use axum_server_dual_protocol::{ + axum_server::{bind_rustls, tls_rustls::RustlsConfig}, + ServerExt, +}; use conduit::{Result, Server}; use tokio::task::JoinSet; use tracing::{debug, info, warn}; @@ -13,27 +15,22 @@ pub(super) async fn serve( ) -> Result<()> { let config = &server.config; let tls = config.tls.as_ref().expect("TLS configuration"); + let certs = &tls.certs; + let key = &tls.key; - debug!( - "Using direct TLS. Certificate path {} and certificate private key path {}", - &tls.certs, &tls.key - ); + // we use ring for ruma and hashing state, but aws-lc-rs is the new default. + // without this, TLS mode will panic. + _ = rustls::crypto::aws_lc_rs::default_provider().install_default(); + + debug!("Using direct TLS. Certificate path {certs} and certificate private key path {key}",); info!( "Note: It is strongly recommended that you use a reverse proxy instead of running conduwuit directly with TLS." ); - let conf = RustlsConfig::from_pem_file(&tls.certs, &tls.key).await?; - - if cfg!(feature = "axum_dual_protocol") { - info!( - "conduwuit was built with axum_dual_protocol feature to listen on both HTTP and HTTPS. This will only \ - take effect if `dual_protocol` is enabled in `[global.tls]`" - ); - } + let conf = RustlsConfig::from_pem_file(certs, key).await?; let mut join_set = JoinSet::new(); let app = app.into_make_service_with_connect_info::<SocketAddr>(); - if cfg!(feature = "axum_dual_protocol") && tls.dual_protocol { - #[cfg(feature = "axum_dual_protocol")] + if tls.dual_protocol { for addr in &addrs { join_set.spawn_on( axum_server_dual_protocol::bind_dual_protocol(*addr, conf.clone()) @@ -54,13 +51,13 @@ pub(super) async fn serve( } } - if cfg!(feature = "axum_dual_protocol") && tls.dual_protocol { + if tls.dual_protocol { warn!( - "Listening on {:?} with TLS certificate {} and supporting plain text (HTTP) connections too (insecure!)", - addrs, &tls.certs + "Listening on {addrs:?} with TLS certificate {certs} and supporting plain text (HTTP) connections too \ + (insecure!)", ); } else { - info!("Listening on {:?} with TLS certificate {}", addrs, &tls.certs); + info!("Listening on {addrs:?} with TLS certificate {certs}"); } while join_set.join_next().await.is_some() {} diff --git a/src/service/Cargo.toml b/src/service/Cargo.toml index c1d9889e1a4e31b75489a043fcc156ee2f9110b9..cfed5a0e3ee30310b37dc24f0214d89c9c70ec4f 100644 --- a/src/service/Cargo.toml +++ b/src/service/Cargo.toml @@ -29,6 +29,9 @@ element_hacks = [] gzip_compression = [ "reqwest/gzip", ] +zstd_compression = [ + "reqwest/zstd", +] release_max_log_level = [ "tracing/max_level_trace", "tracing/release_max_level_info", @@ -43,7 +46,6 @@ bytes.workspace = true conduit-core.workspace = true conduit-database.workspace = true const-str.workspace = true -cyborgtime.workspace = true futures-util.workspace = true hickory-resolver.workspace = true http.workspace = true diff --git a/src/service/admin/console.rs b/src/service/admin/console.rs index c9a288d94d9eaeab1b44085a0f920963306620da..55bae36582608f19de61c11c0aa46cc82b6205b7 100644 --- a/src/service/admin/console.rs +++ b/src/service/admin/console.rs @@ -158,13 +158,18 @@ async fn handle(self: Arc<Self>, line: String) { async fn process(self: Arc<Self>, line: String) { match self.admin.command_in_place(line, None).await { - Ok(Some(content)) => self.output(content).await, - Err(e) => error!("processing command: {e}"), - _ => (), + Ok(Some(ref content)) => self.output(content), + Err(ref content) => self.output_err(content), + _ => unreachable!(), } } - async fn output(self: Arc<Self>, output_content: RoomMessageEventContent) { + fn output_err(self: Arc<Self>, output_content: &RoomMessageEventContent) { + let output = configure_output_err(self.output.clone()); + output.print_text(output_content.body()); + } + + fn output(self: Arc<Self>, output_content: &RoomMessageEventContent) { self.output.print_text(output_content.body()); } @@ -194,6 +199,32 @@ fn tab_complete(&self, line: &str) -> String { } } +/// Standalone/static markdown printer for errors. +pub fn print_err(markdown: &str) { + let output = configure_output_err(MadSkin::default_dark()); + output.print_text(markdown); +} +/// Standalone/static markdown printer. +pub fn print(markdown: &str) { + let output = configure_output(MadSkin::default_dark()); + output.print_text(markdown); +} + +fn configure_output_err(mut output: MadSkin) -> MadSkin { + use termimad::{crossterm::style::Color, Alignment, CompoundStyle, LineStyle}; + + let code_style = CompoundStyle::with_fgbg(Color::AnsiValue(196), Color::AnsiValue(234)); + output.inline_code = code_style.clone(); + output.code_block = LineStyle { + left_margin: 0, + right_margin: 0, + align: Alignment::Left, + compound_style: code_style, + }; + + output +} + fn configure_output(mut output: MadSkin) -> MadSkin { use termimad::{crossterm::style::Color, Alignment, CompoundStyle, LineStyle}; diff --git a/src/service/admin/create.rs b/src/service/admin/create.rs index 18cbe0399ca2d543372938d6a220e53e50dd63b7..ef84d8033f760432916fe14e555494c741fcf2e2 100644 --- a/src/service/admin/create.rs +++ b/src/service/admin/create.rs @@ -1,8 +1,7 @@ use std::collections::BTreeMap; -use conduit::{pdu::PduBuilder, warn, Error, Result}; +use conduit::{pdu::PduBuilder, Result}; use ruma::{ - api::client::error::ErrorKind, events::{ room::{ canonical_alias::RoomCanonicalAliasEventContent, @@ -45,14 +44,7 @@ pub async fn create_admin_room(services: &Services) -> Result<()> { use RoomVersionId::*; match room_version { V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 => RoomCreateEventContent::new_v1(server_user.clone()), - V11 => RoomCreateEventContent::new_v11(), - _ => { - warn!("Unexpected or unsupported room version {}", room_version); - return Err(Error::BadRequest( - ErrorKind::BadJson, - "Unexpected or unsupported room version found", - )); - }, + _ => RoomCreateEventContent::new_v11(), } }; @@ -71,6 +63,7 @@ pub async fn create_admin_room(services: &Services) -> Result<()> { unsigned: None, state_key: Some(String::new()), redacts: None, + timestamp: None, }, server_user, &room_id, @@ -99,6 +92,7 @@ pub async fn create_admin_room(services: &Services) -> Result<()> { unsigned: None, state_key: Some(server_user.to_string()), redacts: None, + timestamp: None, }, server_user, &room_id, @@ -124,6 +118,7 @@ pub async fn create_admin_room(services: &Services) -> Result<()> { unsigned: None, state_key: Some(String::new()), redacts: None, + timestamp: None, }, server_user, &room_id, @@ -143,6 +138,7 @@ pub async fn create_admin_room(services: &Services) -> Result<()> { unsigned: None, state_key: Some(String::new()), redacts: None, + timestamp: None, }, server_user, &room_id, @@ -162,6 +158,7 @@ pub async fn create_admin_room(services: &Services) -> Result<()> { unsigned: None, state_key: Some(String::new()), redacts: None, + timestamp: None, }, server_user, &room_id, @@ -181,6 +178,7 @@ pub async fn create_admin_room(services: &Services) -> Result<()> { unsigned: None, state_key: Some(String::new()), redacts: None, + timestamp: None, }, server_user, &room_id, @@ -201,6 +199,7 @@ pub async fn create_admin_room(services: &Services) -> Result<()> { unsigned: None, state_key: Some(String::new()), redacts: None, + timestamp: None, }, server_user, &room_id, @@ -221,6 +220,7 @@ pub async fn create_admin_room(services: &Services) -> Result<()> { unsigned: None, state_key: Some(String::new()), redacts: None, + timestamp: None, }, server_user, &room_id, @@ -245,6 +245,7 @@ pub async fn create_admin_room(services: &Services) -> Result<()> { unsigned: None, state_key: Some(String::new()), redacts: None, + timestamp: None, }, server_user, &room_id, @@ -271,6 +272,7 @@ pub async fn create_admin_room(services: &Services) -> Result<()> { unsigned: None, state_key: Some(String::new()), redacts: None, + timestamp: None, }, server_user, &room_id, diff --git a/src/service/admin/grant.rs b/src/service/admin/grant.rs index c35f8c42117e2e67bebe96a704abd7ba52e2cfcd..8cbee604dbd999834a70148a7799da6c2262a59f 100644 --- a/src/service/admin/grant.rs +++ b/src/service/admin/grant.rs @@ -1,6 +1,6 @@ use std::collections::BTreeMap; -use conduit::Result; +use conduit::{error, implement, Result}; use ruma::{ events::{ room::{ @@ -8,9 +8,10 @@ message::RoomMessageEventContent, power_levels::RoomPowerLevelsEventContent, }, - TimelineEventType, + tag::{TagEvent, TagEventContent, TagInfo}, + RoomAccountDataEventType, TimelineEventType, }, - UserId, + RoomId, UserId, }; use serde_json::value::to_raw_value; @@ -50,6 +51,7 @@ pub async fn make_user_admin(&self, user_id: &UserId, displayname: String) -> Re unsigned: None, state_key: Some(user_id.to_string()), redacts: None, + timestamp: None, }, server_user, &room_id, @@ -75,6 +77,7 @@ pub async fn make_user_admin(&self, user_id: &UserId, displayname: String) -> Re unsigned: None, state_key: Some(user_id.to_string()), redacts: None, + timestamp: None, }, user_id, &room_id, @@ -100,6 +103,7 @@ pub async fn make_user_admin(&self, user_id: &UserId, displayname: String) -> Re unsigned: None, state_key: Some(String::new()), redacts: None, + timestamp: None, }, server_user, &room_id, @@ -107,6 +111,14 @@ pub async fn make_user_admin(&self, user_id: &UserId, displayname: String) -> Re ) .await?; + // Set room tag + let room_tag = &self.services.server.config.admin_room_tag; + if !room_tag.is_empty() { + if let Err(e) = self.set_room_tag(&room_id, user_id, room_tag) { + error!(?room_id, ?user_id, ?room_tag, ?e, "Failed to set tag for admin grant"); + } + } + // Send welcome message self.services.timeline.build_and_append_pdu( PduBuilder { @@ -119,6 +131,7 @@ pub async fn make_user_admin(&self, user_id: &UserId, displayname: String) -> Re unsigned: None, state_key: None, redacts: None, + timestamp: None, }, server_user, &room_id, @@ -128,3 +141,32 @@ pub async fn make_user_admin(&self, user_id: &UserId, displayname: String) -> Re Ok(()) } } + +#[implement(super::Service)] +fn set_room_tag(&self, room_id: &RoomId, user_id: &UserId, tag: &str) -> Result<()> { + let mut event = self + .services + .account_data + .get(Some(room_id), user_id, RoomAccountDataEventType::Tag)? + .map(|event| serde_json::from_str(event.get())) + .and_then(Result::ok) + .unwrap_or_else(|| TagEvent { + content: TagEventContent { + tags: BTreeMap::new(), + }, + }); + + event + .content + .tags + .insert(tag.to_owned().into(), TagInfo::new()); + + self.services.account_data.update( + Some(room_id), + user_id, + RoomAccountDataEventType::Tag, + &serde_json::to_value(event)?, + )?; + + Ok(()) +} diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index 0e8a65f1ff54c5d6dbec94d8e11ba831a600979f..3274249e6060f7e09c2d2628768a42059a4114e7 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -1,6 +1,7 @@ pub mod console; mod create; mod grant; +mod startup; use std::{ future::Future, @@ -9,7 +10,7 @@ }; use async_trait::async_trait; -use conduit::{debug, error, error::default_log, pdu::PduBuilder, Err, Error, PduEvent, Result, Server}; +use conduit::{debug, err, error, error::default_log, pdu::PduBuilder, Error, PduEvent, Result, Server}; pub use create::create_admin_room; use loole::{Receiver, Sender}; use ruma::{ @@ -22,13 +23,13 @@ use serde_json::value::to_raw_value; use tokio::sync::{Mutex, RwLock}; -use crate::{globals, rooms, rooms::state::RoomMutexGuard, Dep}; +use crate::{account_data, globals, rooms, rooms::state::RoomMutexGuard, Dep}; pub struct Service { services: Services, sender: Sender<CommandInput>, receiver: Mutex<Receiver<CommandInput>>, - pub handle: RwLock<Option<Handler>>, + pub handle: RwLock<Option<Processor>>, pub complete: StdRwLock<Option<Completer>>, #[cfg(feature = "console")] pub console: Arc<console::Console>, @@ -41,21 +42,38 @@ struct Services { timeline: Dep<rooms::timeline::Service>, state: Dep<rooms::state::Service>, state_cache: Dep<rooms::state_cache::Service>, + account_data: Dep<account_data::Service>, services: StdRwLock<Option<Weak<crate::Services>>>, } +/// Inputs to a command are a multi-line string and optional reply_id. #[derive(Debug)] pub struct CommandInput { pub command: String, pub reply_id: Option<OwnedEventId>, } +/// Prototype of the tab-completer. The input is buffered text when tab +/// asserted; the output will fully replace the input buffer. pub type Completer = fn(&str) -> String; -pub type Handler = fn(Arc<crate::Services>, CommandInput) -> HandlerFuture; -pub type HandlerFuture = Pin<Box<dyn Future<Output = HandlerResult> + Send>>; -pub type HandlerResult = Result<CommandOutput>; -pub type CommandOutput = Option<RoomMessageEventContent>; +/// Prototype of the command processor. This is a callback supplied by the +/// reloadable admin module. +pub type Processor = fn(Arc<crate::Services>, CommandInput) -> ProcessorFuture; + +/// Return type of the processor +pub type ProcessorFuture = Pin<Box<dyn Future<Output = ProcessorResult> + Send>>; + +/// Result wrapping of a command's handling. Both variants are complete message +/// events which have digested any prior errors. The wrapping preserves whether +/// the command failed without interpreting the text. Ok(None) outputs are +/// dropped to produce no response. +pub type ProcessorResult = Result<Option<CommandOutput>, CommandOutput>; + +/// Alias for the output structure. +pub type CommandOutput = RoomMessageEventContent; + +/// Maximum number of commands which can be queued for dispatch. const COMMAND_QUEUE_LIMIT: usize = 512; #[async_trait] @@ -70,6 +88,7 @@ fn build(args: crate::Args<'_>) -> Result<Arc<Self>> { timeline: args.depend::<rooms::timeline::Service>("rooms::timeline"), state: args.depend::<rooms::state::Service>("rooms::state"), state_cache: args.depend::<rooms::state_cache::Service>("rooms::state_cache"), + account_data: args.depend::<account_data::Service>("account_data"), services: None.into(), }, sender, @@ -84,6 +103,8 @@ fn build(args: crate::Args<'_>) -> Result<Arc<Self>> { async fn worker(self: Arc<Self>) -> Result<()> { let receiver = self.receiver.lock().await; let mut signals = self.services.server.signal.subscribe(); + + self.startup_execute().await?; self.console_auto_start().await; loop { @@ -117,11 +138,15 @@ fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } } impl Service { + /// Sends markdown message (not an m.notice for notification reasons) to the + /// admin room as the admin user. pub async fn send_text(&self, body: &str) { self.send_message(RoomMessageEventContent::text_markdown(body)) .await; } + /// Sends a message to the admin room as the admin user (see send_text() for + /// convenience). pub async fn send_message(&self, message_content: RoomMessageEventContent) { if let Ok(Some(room_id)) = self.get_admin_room() { let user_id = &self.services.globals.server_user; @@ -130,17 +155,21 @@ pub async fn send_message(&self, message_content: RoomMessageEventContent) { } } - pub async fn command(&self, command: String, reply_id: Option<OwnedEventId>) { - self.send(CommandInput { - command, - reply_id, - }) - .await; + /// Posts a command to the command processor queue and returns. Processing + /// will take place on the service worker's task asynchronously. Errors if + /// the queue is full. + pub fn command(&self, command: String, reply_id: Option<OwnedEventId>) -> Result<()> { + self.sender + .send(CommandInput { + command, + reply_id, + }) + .map_err(|e| err!("Failed to enqueue admin command: {e:?}")) } - pub async fn command_in_place( - &self, command: String, reply_id: Option<OwnedEventId>, - ) -> Result<Option<RoomMessageEventContent>> { + /// Dispatches a comamnd to the processor on the current task and waits for + /// completion. + pub async fn command_in_place(&self, command: String, reply_id: Option<OwnedEventId>) -> ProcessorResult { self.process_command(CommandInput { command, reply_id, @@ -148,6 +177,8 @@ pub async fn command_in_place( .await } + /// Invokes the tab-completer to complete the command. When unavailable, + /// None is returned. pub fn complete_command(&self, command: &str) -> Option<String> { self.complete .read() @@ -155,11 +186,6 @@ pub fn complete_command(&self, command: &str) -> Option<String> { .map(|complete| complete(command)) } - async fn send(&self, message: CommandInput) { - debug_assert!(!self.sender.is_closed(), "channel closed"); - self.sender.send_async(message).await.expect("message sent"); - } - async fn handle_signal(&self, #[allow(unused_variables)] sig: &'static str) { #[cfg(feature = "console")] self.console.handle_signal(sig).await; @@ -167,29 +193,28 @@ async fn handle_signal(&self, #[allow(unused_variables)] sig: &'static str) { async fn handle_command(&self, command: CommandInput) { match self.process_command(command).await { - Ok(Some(output)) => self.handle_response(output).await, + Ok(Some(output)) | Err(output) => self.handle_response(output).await, Ok(None) => debug!("Command successful with no response"), - Err(e) => error!("Command processing error: {e}"), } } - async fn process_command(&self, command: CommandInput) -> HandlerResult { - let Some(services) = self + async fn process_command(&self, command: CommandInput) -> ProcessorResult { + let handle = &self + .handle + .read() + .await + .expect("Admin module is not loaded"); + + let services = self .services .services .read() .expect("locked") .as_ref() .and_then(Weak::upgrade) - else { - return Err!("Services self-reference not initialized."); - }; + .expect("Services self-reference not initialized."); - if let Some(handle) = self.handle.read().await.as_ref() { - handle(services, command).await - } else { - Err!("Admin module is not loaded.") - } + handle(services, command).await } /// Checks whether a given user is an admin of this server @@ -232,6 +257,10 @@ async fn handle_response(&self, content: RoomMessageEventContent) { }; let Ok(Some(pdu)) = self.services.timeline.get_pdu(&in_reply_to.event_id) else { + error!( + event_id = ?in_reply_to.event_id, + "Missing admin command in_reply_to event" + ); return; }; @@ -260,6 +289,7 @@ async fn respond_to_room(&self, content: RoomMessageEventContent, room_id: &Room unsigned: None, state_key: None, redacts: None, + timestamp: None, }; if let Err(e) = self @@ -289,6 +319,7 @@ async fn handle_response_error( unsigned: None, state_key: None, redacts: None, + timestamp: None, }; self.services @@ -354,22 +385,6 @@ pub fn is_admin_room(&self, room_id: &RoomId) -> bool { } } - /// Possibly spawn the terminal console at startup if configured. - async fn console_auto_start(&self) { - #[cfg(feature = "console")] - if self.services.server.config.admin_console_automatic { - // Allow more of the startup sequence to execute before spawning - tokio::task::yield_now().await; - self.console.start().await; - } - } - - /// Shutdown the console when the admin worker terminates. - async fn console_auto_stop(&self) { - #[cfg(feature = "console")] - self.console.close().await; - } - /// Sets the self-reference to crate::Services which will provide context to /// the admin commands. pub(super) fn set_services(&self, services: &Option<Arc<crate::Services>>) { diff --git a/src/service/admin/startup.rs b/src/service/admin/startup.rs new file mode 100644 index 0000000000000000000000000000000000000000..b7bc8d4442d8b30cb406324e45915f34d93628f7 --- /dev/null +++ b/src/service/admin/startup.rs @@ -0,0 +1,103 @@ +use conduit::{debug, debug_info, error, implement, info, Err, Result}; +use ruma::events::room::message::RoomMessageEventContent; +use tokio::time::{sleep, Duration}; + +/// Possibly spawn the terminal console at startup if configured. +#[implement(super::Service)] +pub(super) async fn console_auto_start(&self) { + #[cfg(feature = "console")] + if self.services.server.config.admin_console_automatic { + // Allow more of the startup sequence to execute before spawning + tokio::task::yield_now().await; + self.console.start().await; + } +} + +/// Shutdown the console when the admin worker terminates. +#[implement(super::Service)] +pub(super) async fn console_auto_stop(&self) { + #[cfg(feature = "console")] + self.console.close().await; +} + +/// Execute admin commands after startup +#[implement(super::Service)] +pub(super) async fn startup_execute(&self) -> Result<()> { + // List of comamnds to execute + let commands = &self.services.server.config.admin_execute; + + // Determine if we're running in smoketest-mode which will change some behaviors + let smoketest = self.services.server.config.test.contains("smoke"); + + // When true, errors are ignored and startup continues. + let errors = !smoketest && self.services.server.config.admin_execute_errors_ignore; + + //TODO: remove this after run-states are broadcast + sleep(Duration::from_millis(500)).await; + + for (i, command) in commands.iter().enumerate() { + if let Err(e) = self.startup_execute_command(i, command.clone()).await { + if !errors { + return Err(e); + } + } + + tokio::task::yield_now().await; + } + + // The smoketest functionality is placed here for now and simply initiates + // shutdown after all commands have executed. + if smoketest { + debug_info!("Smoketest mode. All commands complete. Shutting down now..."); + self.services + .server + .shutdown() + .inspect_err(error::inspect_log) + .expect("Error shutting down from smoketest"); + } + + Ok(()) +} + +/// Execute one admin command after startup +#[implement(super::Service)] +async fn startup_execute_command(&self, i: usize, command: String) -> Result<()> { + debug!("Startup command #{i}: executing {command:?}"); + + match self.command_in_place(command, None).await { + Ok(Some(output)) => Self::startup_command_output(i, &output), + Err(output) => Self::startup_command_error(i, &output), + Ok(None) => { + info!("Startup command #{i} completed (no output)."); + Ok(()) + }, + } +} + +#[cfg(feature = "console")] +#[implement(super::Service)] +fn startup_command_output(i: usize, content: &RoomMessageEventContent) -> Result<()> { + debug_info!("Startup command #{i} completed:"); + super::console::print(content.body()); + Ok(()) +} + +#[cfg(feature = "console")] +#[implement(super::Service)] +fn startup_command_error(i: usize, content: &RoomMessageEventContent) -> Result<()> { + super::console::print_err(content.body()); + Err!(debug_error!("Startup command #{i} failed.")) +} + +#[cfg(not(feature = "console"))] +#[implement(super::Service)] +fn startup_command_output(i: usize, content: &RoomMessageEventContent) -> Result<()> { + info!("Startup command #{i} completed:\n{:#?}", content.body()); + Ok(()) +} + +#[cfg(not(feature = "console"))] +#[implement(super::Service)] +fn startup_command_error(i: usize, content: &RoomMessageEventContent) -> Result<()> { + Err!(error!("Startup command #{i} failed:\n{:#?}", content.body())) +} diff --git a/src/service/client/mod.rs b/src/service/client/mod.rs index 386bd33ca093347beaeb82a2f0343aea1a58a443..e0f43107b7056c5a2af54b4a7d73d06d050a39b4 100644 --- a/src/service/client/mod.rs +++ b/src/service/client/mod.rs @@ -8,6 +8,7 @@ pub struct Service { pub default: reqwest::Client, pub url_preview: reqwest::Client, + pub extern_media: reqwest::Client, pub well_known: reqwest::Client, pub federation: reqwest::Client, pub sender: reqwest::Client, @@ -21,54 +22,48 @@ fn build(args: crate::Args<'_>) -> Result<Arc<Self>> { let resolver = args.require::<resolver::Service>("resolver"); Ok(Arc::new(Self { - default: base(config) - .unwrap() + default: base(config)? .dns_resolver(resolver.resolver.clone()) - .build() - .unwrap(), + .build()?, - url_preview: base(config) - .unwrap() + url_preview: base(config)? .dns_resolver(resolver.resolver.clone()) .redirect(redirect::Policy::limited(3)) - .build() - .unwrap(), + .build()?, - well_known: base(config) - .unwrap() + extern_media: base(config)? + .dns_resolver(resolver.resolver.clone()) + .redirect(redirect::Policy::limited(3)) + .build()?, + + well_known: base(config)? .dns_resolver(resolver.resolver.hooked.clone()) .connect_timeout(Duration::from_secs(config.well_known_conn_timeout)) .read_timeout(Duration::from_secs(config.well_known_timeout)) .timeout(Duration::from_secs(config.well_known_timeout)) .pool_max_idle_per_host(0) .redirect(redirect::Policy::limited(4)) - .build() - .unwrap(), + .build()?, - federation: base(config) - .unwrap() + federation: base(config)? .dns_resolver(resolver.resolver.hooked.clone()) .read_timeout(Duration::from_secs(config.federation_timeout)) .timeout(Duration::from_secs(config.federation_timeout)) .pool_max_idle_per_host(config.federation_idle_per_host.into()) .pool_idle_timeout(Duration::from_secs(config.federation_idle_timeout)) .redirect(redirect::Policy::limited(3)) - .build() - .unwrap(), + .build()?, - sender: base(config) - .unwrap() + sender: base(config)? .dns_resolver(resolver.resolver.hooked.clone()) .read_timeout(Duration::from_secs(config.sender_timeout)) .timeout(Duration::from_secs(config.sender_timeout)) .pool_max_idle_per_host(1) .pool_idle_timeout(Duration::from_secs(config.sender_idle_timeout)) .redirect(redirect::Policy::limited(2)) - .build() - .unwrap(), + .build()?, - appservice: base(config) - .unwrap() + appservice: base(config)? .dns_resolver(resolver.resolver.clone()) .connect_timeout(Duration::from_secs(5)) .read_timeout(Duration::from_secs(config.appservice_timeout)) @@ -76,17 +71,14 @@ fn build(args: crate::Args<'_>) -> Result<Arc<Self>> { .pool_max_idle_per_host(1) .pool_idle_timeout(Duration::from_secs(config.appservice_idle_timeout)) .redirect(redirect::Policy::limited(2)) - .build() - .unwrap(), + .build()?, - pusher: base(config) - .unwrap() + pusher: base(config)? .dns_resolver(resolver.resolver.clone()) .pool_max_idle_per_host(1) .pool_idle_timeout(Duration::from_secs(config.pusher_idle_timeout)) .redirect(redirect::Policy::limited(2)) - .build() - .unwrap(), + .build()?, })) } @@ -123,6 +115,15 @@ fn base(config: &Config) -> Result<reqwest::ClientBuilder> { }; }; + #[cfg(feature = "zstd_compression")] + { + builder = if config.zstd_compression { + builder.zstd(true) + } else { + builder.zstd(false).no_brotli() + }; + }; + #[cfg(not(feature = "gzip_compression"))] { builder = builder.no_gzip(); @@ -133,6 +134,11 @@ fn base(config: &Config) -> Result<reqwest::ClientBuilder> { builder = builder.no_brotli(); }; + #[cfg(not(feature = "zstd_compression"))] + { + builder = builder.no_zstd(); + }; + if let Some(proxy) = config.proxy.to_proxy()? { Ok(builder.proxy(proxy)) } else { diff --git a/src/service/globals/client.rs b/src/service/globals/client.rs deleted file mode 100644 index d8b84dedebb02d46ac23e0d41accea27c8dd1487..0000000000000000000000000000000000000000 --- a/src/service/globals/client.rs +++ /dev/null @@ -1,135 +0,0 @@ -use std::{sync::Arc, time::Duration}; - -use reqwest::redirect; - -use crate::{resolver, Config, Result}; - -pub struct Client { - pub default: reqwest::Client, - pub url_preview: reqwest::Client, - pub well_known: reqwest::Client, - pub federation: reqwest::Client, - pub sender: reqwest::Client, - pub appservice: reqwest::Client, - pub pusher: reqwest::Client, -} - -impl Client { - pub fn new(config: &Config, resolver: &Arc<resolver::Service>) -> Self { - Self { - default: Self::base(config) - .unwrap() - .dns_resolver(resolver.clone()) - .build() - .unwrap(), - - url_preview: Self::base(config) - .unwrap() - .dns_resolver(resolver.clone()) - .redirect(redirect::Policy::limited(3)) - .build() - .unwrap(), - - well_known: Self::base(config) - .unwrap() - .dns_resolver(resolver.hooked.clone()) - .connect_timeout(Duration::from_secs(config.well_known_conn_timeout)) - .read_timeout(Duration::from_secs(config.well_known_timeout)) - .timeout(Duration::from_secs(config.well_known_timeout)) - .pool_max_idle_per_host(0) - .redirect(redirect::Policy::limited(4)) - .build() - .unwrap(), - - federation: Self::base(config) - .unwrap() - .dns_resolver(resolver.hooked.clone()) - .read_timeout(Duration::from_secs(config.federation_timeout)) - .timeout(Duration::from_secs(config.federation_timeout)) - .pool_max_idle_per_host(config.federation_idle_per_host.into()) - .pool_idle_timeout(Duration::from_secs(config.federation_idle_timeout)) - .redirect(redirect::Policy::limited(3)) - .build() - .unwrap(), - - sender: Self::base(config) - .unwrap() - .dns_resolver(resolver.hooked.clone()) - .read_timeout(Duration::from_secs(config.sender_timeout)) - .timeout(Duration::from_secs(config.sender_timeout)) - .pool_max_idle_per_host(1) - .pool_idle_timeout(Duration::from_secs(config.sender_idle_timeout)) - .redirect(redirect::Policy::limited(2)) - .build() - .unwrap(), - - appservice: Self::base(config) - .unwrap() - .dns_resolver(resolver.clone()) - .connect_timeout(Duration::from_secs(5)) - .read_timeout(Duration::from_secs(config.appservice_timeout)) - .timeout(Duration::from_secs(config.appservice_timeout)) - .pool_max_idle_per_host(1) - .pool_idle_timeout(Duration::from_secs(config.appservice_idle_timeout)) - .redirect(redirect::Policy::limited(2)) - .build() - .unwrap(), - - pusher: Self::base(config) - .unwrap() - .dns_resolver(resolver.clone()) - .pool_max_idle_per_host(1) - .pool_idle_timeout(Duration::from_secs(config.pusher_idle_timeout)) - .redirect(redirect::Policy::limited(2)) - .build() - .unwrap(), - } - } - - fn base(config: &Config) -> Result<reqwest::ClientBuilder> { - let mut builder = reqwest::Client::builder() - .hickory_dns(true) - .connect_timeout(Duration::from_secs(config.request_conn_timeout)) - .read_timeout(Duration::from_secs(config.request_timeout)) - .timeout(Duration::from_secs(config.request_total_timeout)) - .pool_idle_timeout(Duration::from_secs(config.request_idle_timeout)) - .pool_max_idle_per_host(config.request_idle_per_host.into()) - .user_agent(conduit::version::user_agent()) - .redirect(redirect::Policy::limited(6)) - .connection_verbose(true); - - #[cfg(feature = "gzip_compression")] - { - builder = if config.gzip_compression { - builder.gzip(true) - } else { - builder.gzip(false).no_gzip() - }; - }; - - #[cfg(feature = "brotli_compression")] - { - builder = if config.brotli_compression { - builder.brotli(true) - } else { - builder.brotli(false).no_brotli() - }; - }; - - #[cfg(not(feature = "gzip_compression"))] - { - builder = builder.no_gzip(); - }; - - #[cfg(not(feature = "brotli_compression"))] - { - builder = builder.no_brotli(); - }; - - if let Some(proxy) = config.proxy.to_proxy()? { - Ok(builder.proxy(proxy)) - } else { - Ok(builder) - } - } -} diff --git a/src/service/globals/migrations.rs b/src/service/globals/migrations.rs index 5e078595b2e89487cfa5714b9a01690a6e7e16c3..39d829b4133bcb2c8e7c381b3c7eda717b85a489 100644 --- a/src/service/globals/migrations.rs +++ b/src/service/globals/migrations.rs @@ -1,15 +1,12 @@ use std::{ collections::{HashMap, HashSet}, - ffi::{OsStr, OsString}, fs::{self}, io::Write, mem::size_of, - path::PathBuf, sync::Arc, - time::Instant, }; -use conduit::{debug, debug_info, debug_warn, error, info, utils, warn, Config, Error, Result}; +use conduit::{debug, debug_info, debug_warn, error, info, utils, warn, Error, Result}; use itertools::Itertools; use ruma::{ events::{push_rules::PushRulesEvent, room::member::MembershipState, GlobalAccountDataEventType}, @@ -17,7 +14,7 @@ EventId, OwnedRoomId, RoomId, UserId, }; -use crate::Services; +use crate::{media, Services}; /// The current schema version. /// - If database is opened at greater version we reject with error. The @@ -25,7 +22,7 @@ /// - If database is opened at lesser version we apply migrations up to this. /// Note that named-feature migrations may also be performed when opening at /// equal or lesser version. These are expected to be backward-compatible. -const DATABASE_VERSION: u64 = 13; +pub(crate) const DATABASE_VERSION: u64 = 13; pub(crate) async fn migrations(services: &Services) -> Result<()> { // Matrix resource ownership is based on the server name; changing it @@ -131,9 +128,9 @@ async fn migrate(services: &Services) -> Result<()> { } if db["global"].get(b"feat_sha256_media")?.is_none() { - migrate_sha256_media(services).await?; + media::migrations::migrate_sha256_media(services).await?; } else if config.media_startup_check { - checkup_sha256_media(services).await?; + media::migrations::checkup_sha256_media(services).await?; } if db["global"] @@ -738,152 +735,6 @@ async fn db_lt_13(services: &Services) -> Result<()> { Ok(()) } -/// Migrates a media directory from legacy base64 file names to sha2 file names. -/// All errors are fatal. Upon success the database is keyed to not perform this -/// again. -async fn migrate_sha256_media(services: &Services) -> Result<()> { - let db = &services.db; - let config = &services.server.config; - - warn!("Migrating legacy base64 file names to sha256 file names"); - let mediaid_file = &db["mediaid_file"]; - - // Move old media files to new names - let mut changes = Vec::<(PathBuf, PathBuf)>::new(); - for (key, _) in mediaid_file.iter() { - let old = services.media.get_media_file_b64(&key); - let new = services.media.get_media_file_sha256(&key); - debug!(?key, ?old, ?new, num = changes.len(), "change"); - changes.push((old, new)); - } - // move the file to the new location - for (old_path, path) in changes { - if old_path.exists() { - tokio::fs::rename(&old_path, &path).await?; - if config.media_compat_file_link { - tokio::fs::symlink(&path, &old_path).await?; - } - } - } - - // Apply fix from when sha256_media was backward-incompat and bumped the schema - // version from 13 to 14. For users satisfying these conditions we can go back. - if services.globals.db.database_version()? == 14 && DATABASE_VERSION == 13 { - services.globals.db.bump_database_version(13)?; - } - - db["global"].insert(b"feat_sha256_media", &[])?; - info!("Finished applying sha256_media"); - Ok(()) -} - -/// Check is run on startup for prior-migrated media directories. This handles: -/// - Going back and forth to non-sha256 legacy binaries (e.g. upstream). -/// - Deletion of artifacts in the media directory which will then fall out of -/// sync with the database. -async fn checkup_sha256_media(services: &Services) -> Result<()> { - use crate::media::encode_key; - - debug!("Checking integrity of media directory"); - let db = &services.db; - let media = &services.media; - let config = &services.server.config; - let mediaid_file = &db["mediaid_file"]; - let mediaid_user = &db["mediaid_user"]; - let dbs = (mediaid_file, mediaid_user); - let timer = Instant::now(); - - let dir = media.get_media_dir(); - let files: HashSet<OsString> = fs::read_dir(dir)? - .filter_map(|ent| ent.map_or(None, |ent| Some(ent.path().into_os_string()))) - .collect(); - - for key in media.db.get_all_media_keys() { - let new_path = media.get_media_file_sha256(&key).into_os_string(); - let old_path = media.get_media_file_b64(&key).into_os_string(); - if let Err(e) = handle_media_check(&dbs, config, &files, &key, &new_path, &old_path).await { - error!( - media_id = ?encode_key(&key), ?new_path, ?old_path, - "Failed to resolve media check failure: {e}" - ); - } - } - - debug_info!( - elapsed = ?timer.elapsed(), - "Finished checking media directory" - ); - - Ok(()) -} - -async fn handle_media_check( - dbs: &(&Arc<database::Map>, &Arc<database::Map>), config: &Config, files: &HashSet<OsString>, key: &[u8], - new_path: &OsStr, old_path: &OsStr, -) -> Result<()> { - use crate::media::encode_key; - - let (mediaid_file, mediaid_user) = dbs; - - let new_exists = files.contains(new_path); - let old_exists = files.contains(old_path); - let old_is_symlink = || async { - tokio::fs::symlink_metadata(old_path) - .await - .map_or(false, |md| md.is_symlink()) - }; - - if !old_exists && !new_exists { - error!( - media_id = ?encode_key(key), ?new_path, ?old_path, - "Media is missing at all paths. Removing from database..." - ); - - mediaid_file.remove(key)?; - mediaid_user.remove(key)?; - } - - if config.media_compat_file_link && !old_exists && new_exists { - debug_warn!( - media_id = ?encode_key(key), ?new_path, ?old_path, - "Media found but missing legacy link. Fixing..." - ); - - tokio::fs::symlink(&new_path, &old_path).await?; - } - - if config.media_compat_file_link && !new_exists && old_exists { - debug_warn!( - media_id = ?encode_key(key), ?new_path, ?old_path, - "Legacy media found without sha256 migration. Fixing..." - ); - - debug_assert!( - old_is_symlink().await, - "Legacy media not expected to be a symlink without an existing sha256 migration." - ); - - tokio::fs::rename(&old_path, &new_path).await?; - tokio::fs::symlink(&new_path, &old_path).await?; - } - - if !config.media_compat_file_link && old_exists && old_is_symlink().await { - debug_warn!( - media_id = ?encode_key(key), ?new_path, ?old_path, - "Legacy link found but compat disabled. Cleansing symlink..." - ); - - debug_assert!( - new_exists, - "sha256 migration into new file expected prior to cleaning legacy symlink here." - ); - - tokio::fs::remove_file(&old_path).await?; - } - - Ok(()) -} - async fn fix_bad_double_separator_in_state_cache(services: &Services) -> Result<()> { warn!("Fixing bad double separator in state_cache roomuserid_joined"); diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index 2c588dce09a31216b7deb73a2a88690f47f409fd..05fe1a77c2923358e6a3fe19cb6b4d01f1feb582 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -248,8 +248,6 @@ pub fn allow_incoming_read_receipts(&self) -> bool { self.config.allow_incoming_ pub fn allow_outgoing_read_receipts(&self) -> bool { self.config.allow_outgoing_read_receipts } - pub fn prevent_media_downloads_from(&self) -> &[OwnedServerName] { &self.config.prevent_media_downloads_from } - pub fn forbidden_remote_room_directory_server_names(&self) -> &[OwnedServerName] { &self.config.forbidden_remote_room_directory_server_names } diff --git a/src/service/media/data.rs b/src/service/media/data.rs index 70e010c2a2580a98c97ed590ad7348e58f9ea342..e5d6d20b19f0d004c39d381a8f560fdd7d650ea0 100644 --- a/src/service/media/data.rs +++ b/src/service/media/data.rs @@ -1,10 +1,14 @@ use std::sync::Arc; -use conduit::{debug, debug_info, utils::string_from_bytes, Error, Result}; +use conduit::{ + debug, debug_info, trace, + utils::{str_from_bytes, string_from_bytes}, + Err, Error, Result, +}; use database::{Database, Map}; -use ruma::api::client::error::ErrorKind; +use ruma::{api::client::error::ErrorKind, http_headers::ContentDisposition, Mxc, OwnedMxcUri, UserId}; -use super::preview::UrlPreviewData; +use super::{preview::UrlPreviewData, thumbnail::Dim}; pub(crate) struct Data { mediaid_file: Arc<Map>, @@ -14,7 +18,7 @@ pub(crate) struct Data { #[derive(Debug)] pub(super) struct Metadata { - pub(super) content_disposition: Option<String>, + pub(super) content_disposition: Option<ContentDisposition>, pub(super) content_type: Option<String>, pub(super) key: Vec<u8>, } @@ -29,19 +33,23 @@ pub(super) fn new(db: &Arc<Database>) -> Self { } pub(super) fn create_file_metadata( - &self, sender_user: Option<&str>, mxc: &str, width: u32, height: u32, content_disposition: Option<&str>, + &self, mxc: &Mxc<'_>, user: Option<&UserId>, dim: &Dim, content_disposition: Option<&ContentDisposition>, content_type: Option<&str>, ) -> Result<Vec<u8>> { - let mut key = mxc.as_bytes().to_vec(); + let mut key: Vec<u8> = Vec::new(); + key.extend_from_slice(b"mxc://"); + key.extend_from_slice(mxc.server_name.as_bytes()); + key.extend_from_slice(b"/"); + key.extend_from_slice(mxc.media_id.as_bytes()); key.push(0xFF); - key.extend_from_slice(&width.to_be_bytes()); - key.extend_from_slice(&height.to_be_bytes()); + key.extend_from_slice(&dim.width.to_be_bytes()); + key.extend_from_slice(&dim.height.to_be_bytes()); key.push(0xFF); key.extend_from_slice( content_disposition - .as_ref() - .map(|f| f.as_bytes()) - .unwrap_or_default(), + .map(ToString::to_string) + .unwrap_or_default() + .as_bytes(), ); key.push(0xFF); key.extend_from_slice( @@ -53,8 +61,12 @@ pub(super) fn create_file_metadata( self.mediaid_file.insert(&key, &[])?; - if let Some(user) = sender_user { - let key = mxc.as_bytes().to_vec(); + if let Some(user) = user { + let mut key: Vec<u8> = Vec::new(); + key.extend_from_slice(b"mxc://"); + key.extend_from_slice(mxc.server_name.as_bytes()); + key.extend_from_slice(b"/"); + key.extend_from_slice(mxc.media_id.as_bytes()); let user = user.as_bytes().to_vec(); self.mediaid_user.insert(&key, &user)?; } @@ -62,22 +74,25 @@ pub(super) fn create_file_metadata( Ok(key) } - pub(super) fn delete_file_mxc(&self, mxc: &str) -> Result<()> { - debug!("MXC URI: {:?}", mxc); + pub(super) fn delete_file_mxc(&self, mxc: &Mxc<'_>) -> Result<()> { + debug!("MXC URI: {mxc}"); - let mut prefix = mxc.as_bytes().to_vec(); + let mut prefix: Vec<u8> = Vec::new(); + prefix.extend_from_slice(b"mxc://"); + prefix.extend_from_slice(mxc.server_name.as_bytes()); + prefix.extend_from_slice(b"/"); + prefix.extend_from_slice(mxc.media_id.as_bytes()); prefix.push(0xFF); - debug!("MXC db prefix: {prefix:?}"); - - for (key, _) in self.mediaid_file.scan_prefix(prefix) { + trace!("MXC db prefix: {prefix:?}"); + for (key, _) in self.mediaid_file.scan_prefix(prefix.clone()) { debug!("Deleting key: {:?}", key); self.mediaid_file.remove(&key)?; } - for (key, value) in self.mediaid_user.scan_prefix(mxc.as_bytes().to_vec()) { - if key == mxc.as_bytes().to_vec() { - let user = string_from_bytes(&value).unwrap_or_default(); + for (key, value) in self.mediaid_user.scan_prefix(prefix.clone()) { + if key.starts_with(&prefix) { + let user = str_from_bytes(&value).unwrap_or_default(); debug_info!("Deleting key \"{key:?}\" which was uploaded by user {user}"); self.mediaid_user.remove(&key)?; @@ -88,10 +103,14 @@ pub(super) fn delete_file_mxc(&self, mxc: &str) -> Result<()> { } /// Searches for all files with the given MXC - pub(super) fn search_mxc_metadata_prefix(&self, mxc: &str) -> Result<Vec<Vec<u8>>> { - debug!("MXC URI: {:?}", mxc); - - let mut prefix = mxc.as_bytes().to_vec(); + pub(super) fn search_mxc_metadata_prefix(&self, mxc: &Mxc<'_>) -> Result<Vec<Vec<u8>>> { + debug!("MXC URI: {mxc}"); + + let mut prefix: Vec<u8> = Vec::new(); + prefix.extend_from_slice(b"mxc://"); + prefix.extend_from_slice(mxc.server_name.as_bytes()); + prefix.extend_from_slice(b"/"); + prefix.extend_from_slice(mxc.media_id.as_bytes()); prefix.push(0xFF); let keys: Vec<Vec<u8>> = self @@ -101,21 +120,23 @@ pub(super) fn search_mxc_metadata_prefix(&self, mxc: &str) -> Result<Vec<Vec<u8> .collect(); if keys.is_empty() { - return Err(Error::bad_database( - "Failed to find any keys in database with the provided MXC.", - )); + return Err!(Database("Failed to find any keys in database for `{mxc}`",)); } - debug!("Got the following keys: {:?}", keys); + debug!("Got the following keys: {keys:?}"); Ok(keys) } - pub(super) fn search_file_metadata(&self, mxc: &str, width: u32, height: u32) -> Result<Metadata> { - let mut prefix = mxc.as_bytes().to_vec(); + pub(super) fn search_file_metadata(&self, mxc: &Mxc<'_>, dim: &Dim) -> Result<Metadata> { + let mut prefix: Vec<u8> = Vec::new(); + prefix.extend_from_slice(b"mxc://"); + prefix.extend_from_slice(mxc.server_name.as_bytes()); + prefix.extend_from_slice(b"/"); + prefix.extend_from_slice(mxc.media_id.as_bytes()); prefix.push(0xFF); - prefix.extend_from_slice(&width.to_be_bytes()); - prefix.extend_from_slice(&height.to_be_bytes()); + prefix.extend_from_slice(&dim.width.to_be_bytes()); + prefix.extend_from_slice(&dim.height.to_be_bytes()); prefix.push(0xFF); let (key, _) = self @@ -143,7 +164,8 @@ pub(super) fn search_file_metadata(&self, mxc: &str, width: u32, height: u32) -> } else { Some( string_from_bytes(content_disposition_bytes) - .map_err(|_| Error::bad_database("Content Disposition in mediaid_file is invalid unicode."))?, + .map_err(|_| Error::bad_database("Content Disposition in mediaid_file is invalid unicode."))? + .parse()?, ) }; @@ -154,6 +176,23 @@ pub(super) fn search_file_metadata(&self, mxc: &str, width: u32, height: u32) -> }) } + /// Gets all the MXCs associated with a user + pub(super) fn get_all_user_mxcs(&self, user_id: &UserId) -> Vec<OwnedMxcUri> { + let user_id = user_id.as_bytes().to_vec(); + + self.mediaid_user + .iter() + .filter_map(|(key, user)| { + if *user == user_id { + let mxc_s = string_from_bytes(&key).ok()?; + Some(OwnedMxcUri::from(mxc_s)) + } else { + None + } + }) + .collect() + } + /// Gets all the media keys in our database (this includes all the metadata /// associated with it such as width, height, content-type, etc) pub(crate) fn get_all_media_keys(&self) -> Vec<Vec<u8>> { self.mediaid_file.iter().map(|(key, _)| key).collect() } diff --git a/src/service/media/migrations.rs b/src/service/media/migrations.rs new file mode 100644 index 0000000000000000000000000000000000000000..9968d25b78be2d9bb863478486b84d1767a2353a --- /dev/null +++ b/src/service/media/migrations.rs @@ -0,0 +1,158 @@ +use std::{ + collections::HashSet, + ffi::{OsStr, OsString}, + fs::{self}, + path::PathBuf, + sync::Arc, + time::Instant, +}; + +use conduit::{debug, debug_info, debug_warn, error, info, warn, Config, Result}; + +use crate::{globals, Services}; + +/// Migrates a media directory from legacy base64 file names to sha2 file names. +/// All errors are fatal. Upon success the database is keyed to not perform this +/// again. +pub(crate) async fn migrate_sha256_media(services: &Services) -> Result<()> { + let db = &services.db; + let config = &services.server.config; + + warn!("Migrating legacy base64 file names to sha256 file names"); + let mediaid_file = &db["mediaid_file"]; + + // Move old media files to new names + let mut changes = Vec::<(PathBuf, PathBuf)>::new(); + for (key, _) in mediaid_file.iter() { + let old = services.media.get_media_file_b64(&key); + let new = services.media.get_media_file_sha256(&key); + debug!(?key, ?old, ?new, num = changes.len(), "change"); + changes.push((old, new)); + } + // move the file to the new location + for (old_path, path) in changes { + if old_path.exists() { + tokio::fs::rename(&old_path, &path).await?; + if config.media_compat_file_link { + tokio::fs::symlink(&path, &old_path).await?; + } + } + } + + // Apply fix from when sha256_media was backward-incompat and bumped the schema + // version from 13 to 14. For users satisfying these conditions we can go back. + if services.globals.db.database_version()? == 14 && globals::migrations::DATABASE_VERSION == 13 { + services.globals.db.bump_database_version(13)?; + } + + db["global"].insert(b"feat_sha256_media", &[])?; + info!("Finished applying sha256_media"); + Ok(()) +} + +/// Check is run on startup for prior-migrated media directories. This handles: +/// - Going back and forth to non-sha256 legacy binaries (e.g. upstream). +/// - Deletion of artifacts in the media directory which will then fall out of +/// sync with the database. +pub(crate) async fn checkup_sha256_media(services: &Services) -> Result<()> { + use crate::media::encode_key; + + debug!("Checking integrity of media directory"); + let db = &services.db; + let media = &services.media; + let config = &services.server.config; + let mediaid_file = &db["mediaid_file"]; + let mediaid_user = &db["mediaid_user"]; + let dbs = (mediaid_file, mediaid_user); + let timer = Instant::now(); + + let dir = media.get_media_dir(); + let files: HashSet<OsString> = fs::read_dir(dir)? + .filter_map(|ent| ent.map_or(None, |ent| Some(ent.path().into_os_string()))) + .collect(); + + for key in media.db.get_all_media_keys() { + let new_path = media.get_media_file_sha256(&key).into_os_string(); + let old_path = media.get_media_file_b64(&key).into_os_string(); + if let Err(e) = handle_media_check(&dbs, config, &files, &key, &new_path, &old_path).await { + error!( + media_id = ?encode_key(&key), ?new_path, ?old_path, + "Failed to resolve media check failure: {e}" + ); + } + } + + debug_info!( + elapsed = ?timer.elapsed(), + "Finished checking media directory" + ); + + Ok(()) +} + +async fn handle_media_check( + dbs: &(&Arc<database::Map>, &Arc<database::Map>), config: &Config, files: &HashSet<OsString>, key: &[u8], + new_path: &OsStr, old_path: &OsStr, +) -> Result<()> { + use crate::media::encode_key; + + let (mediaid_file, mediaid_user) = dbs; + + let new_exists = files.contains(new_path); + let old_exists = files.contains(old_path); + let old_is_symlink = || async { + tokio::fs::symlink_metadata(old_path) + .await + .map_or(false, |md| md.is_symlink()) + }; + + if config.prune_missing_media && !old_exists && !new_exists { + error!( + media_id = ?encode_key(key), ?new_path, ?old_path, + "Media is missing at all paths. Removing from database..." + ); + + mediaid_file.remove(key)?; + mediaid_user.remove(key)?; + } + + if config.media_compat_file_link && !old_exists && new_exists { + debug_warn!( + media_id = ?encode_key(key), ?new_path, ?old_path, + "Media found but missing legacy link. Fixing..." + ); + + tokio::fs::symlink(&new_path, &old_path).await?; + } + + if config.media_compat_file_link && !new_exists && old_exists { + debug_warn!( + media_id = ?encode_key(key), ?new_path, ?old_path, + "Legacy media found without sha256 migration. Fixing..." + ); + + debug_assert!( + old_is_symlink().await, + "Legacy media not expected to be a symlink without an existing sha256 migration." + ); + + tokio::fs::rename(&old_path, &new_path).await?; + tokio::fs::symlink(&new_path, &old_path).await?; + } + + if !config.media_compat_file_link && old_exists && old_is_symlink().await { + debug_warn!( + media_id = ?encode_key(key), ?new_path, ?old_path, + "Legacy link found but compat disabled. Cleansing symlink..." + ); + + debug_assert!( + new_exists, + "sha256 migration into new file expected prior to cleaning legacy symlink here." + ); + + tokio::fs::remove_file(&old_path).await?; + } + + Ok(()) +} diff --git a/src/service/media/mod.rs b/src/service/media/mod.rs index ff3f3dc47e038e578309ffbd0dcec2f7362537e8..2faa13d8921516a5f4023e98433e9d5a05f654fc 100644 --- a/src/service/media/mod.rs +++ b/src/service/media/mod.rs @@ -1,5 +1,7 @@ mod data; +pub(super) mod migrations; mod preview; +mod remote; mod tests; mod thumbnail; @@ -7,26 +9,31 @@ use async_trait::async_trait; use base64::{engine::general_purpose, Engine as _}; -use conduit::{debug, debug_error, err, error, trace, utils, utils::MutexMap, Err, Result, Server}; -use data::{Data, Metadata}; -use ruma::{OwnedMxcUri, OwnedUserId}; +use conduit::{ + debug, debug_error, debug_info, err, error, trace, + utils::{self, MutexMap}, + warn, Err, Result, Server, +}; +use ruma::{http_headers::ContentDisposition, Mxc, OwnedMxcUri, UserId}; use tokio::{ fs, io::{AsyncReadExt, AsyncWriteExt, BufReader}, }; -use crate::{client, globals, Dep}; +use self::data::{Data, Metadata}; +pub use self::thumbnail::Dim; +use crate::{client, globals, sending, Dep}; #[derive(Debug)] pub struct FileMeta { pub content: Option<Vec<u8>>, pub content_type: Option<String>, - pub content_disposition: Option<String>, + pub content_disposition: Option<ContentDisposition>, } pub struct Service { url_preview_mutex: MutexMap<String, ()>, - pub(crate) db: Data, + pub(super) db: Data, services: Services, } @@ -34,11 +41,18 @@ struct Services { server: Arc<Server>, client: Dep<client::Service>, globals: Dep<globals::Service>, + sending: Dep<sending::Service>, } /// generated MXC ID (`media-id`) length pub const MXC_LENGTH: usize = 32; +/// Cache control for immutable objects. +pub const CACHE_CONTROL_IMMUTABLE: &str = "public,max-age=31536000,immutable"; + +/// Default cross-origin resource policy. +pub const CORP_CROSS_ORIGIN: &str = "cross-origin"; + #[async_trait] impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result<Arc<Self>> { @@ -49,6 +63,7 @@ fn build(args: crate::Args<'_>) -> Result<Arc<Self>> { server: args.server.clone(), client: args.depend::<client::Service>("client"), globals: args.depend::<globals::Service>("globals"), + sending: args.depend::<sending::Service>("sending"), }, })) } @@ -65,17 +80,13 @@ fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } impl Service { /// Uploads a file. pub async fn create( - &self, sender_user: Option<OwnedUserId>, mxc: &str, content_disposition: Option<&str>, + &self, mxc: &Mxc<'_>, user: Option<&UserId>, content_disposition: Option<&ContentDisposition>, content_type: Option<&str>, file: &[u8], ) -> Result<()> { // Width, Height = 0 if it's not a thumbnail - let key = if let Some(user) = sender_user { - self.db - .create_file_metadata(Some(user.as_str()), mxc, 0, 0, content_disposition, content_type)? - } else { - self.db - .create_file_metadata(None, mxc, 0, 0, content_disposition, content_type)? - }; + let key = self + .db + .create_file_metadata(mxc, user, &Dim::default(), content_disposition, content_type)?; //TODO: Dangling metadata in database if creation fails let mut f = self.create_media_file(&key).await?; @@ -85,7 +96,7 @@ pub async fn create( } /// Deletes a file in the database and from the media directory via an MXC - pub async fn delete(&self, mxc: &str) -> Result<()> { + pub async fn delete(&self, mxc: &Mxc<'_>) -> Result<()> { if let Ok(keys) = self.db.search_mxc_metadata_prefix(mxc) { for key in keys { trace!(?mxc, ?key, "Deleting from filesystem"); @@ -107,13 +118,38 @@ pub async fn delete(&self, mxc: &str) -> Result<()> { } } + /// Deletes all media by the specified user + /// + /// currently, this is only practical for local users + pub async fn delete_from_user(&self, user: &UserId, force: bool) -> Result<usize> { + let mxcs = self.db.get_all_user_mxcs(user); + let mut deletion_count: usize = 0; + + for mxc in mxcs { + let mxc: Mxc<'_> = mxc.as_str().try_into()?; + debug_info!("Deleting MXC {mxc} by user {user} from database and filesystem"); + if force { + _ = self + .delete(&mxc) + .await + .inspect_err(|e| warn!("Failed to delete {mxc} from user {user}, ignoring error: {e}")); + } else { + self.delete(&mxc).await?; + } + + deletion_count = deletion_count.saturating_add(1); + } + + Ok(deletion_count) + } + /// Downloads a file. - pub async fn get(&self, mxc: &str) -> Result<Option<FileMeta>> { + pub async fn get(&self, mxc: &Mxc<'_>) -> Result<Option<FileMeta>> { if let Ok(Metadata { content_disposition, content_type, key, - }) = self.db.search_file_metadata(mxc, 0, 0) + }) = self.db.search_file_metadata(mxc, &Dim::default()) { let mut content = Vec::new(); let path = self.get_media_file(&key); @@ -131,22 +167,49 @@ pub async fn get(&self, mxc: &str) -> Result<Option<FileMeta>> { } } + /// Gets all the MXC URIs in our media database + pub async fn get_all_mxcs(&self) -> Result<Vec<OwnedMxcUri>> { + let all_keys = self.db.get_all_media_keys(); + + let mut mxcs = Vec::with_capacity(all_keys.len()); + + for key in all_keys { + trace!("Full MXC key from database: {key:?}"); + + // we need to get the MXC URL from the first part of the key (the first 0xff / + // 255 push). this is all necessary because of conduit using magic keys for + // media + let mut parts = key.split(|&b| b == 0xFF); + let mxc = parts + .next() + .map(|bytes| { + utils::string_from_bytes(bytes) + .map_err(|e| err!(Database(error!("Failed to parse MXC unicode bytes from our database: {e}")))) + }) + .transpose()?; + + let Some(mxc_s) = mxc else { + return Err!(Database("Parsed MXC URL unicode bytes from database but still is None")); + }; + + trace!("Parsed MXC key to URL: {mxc_s}"); + let mxc = OwnedMxcUri::from(mxc_s); + + mxcs.push(mxc); + } + + Ok(mxcs) + } + /// Deletes all remote only media files in the given at or after /// time/duration. Returns a u32 with the amount of media files deleted. - pub async fn delete_all_remote_media_at_after_time(&self, time: String, force: bool) -> Result<usize> { + pub async fn delete_all_remote_media_at_after_time(&self, time: SystemTime, force: bool) -> Result<usize> { let all_keys = self.db.get_all_media_keys(); - let user_duration: SystemTime = match cyborgtime::parse_duration(&time) { - Err(e) => return Err!(Database(error!("Failed to parse specified time duration: {e}"))), - Ok(duration) => SystemTime::now() - .checked_sub(duration) - .ok_or(err!(Arithmetic("Duration {duration:?} is too large")))?, - }; - - let mut remote_mxcs: Vec<String> = vec![]; + let mut remote_mxcs = Vec::with_capacity(all_keys.len()); for key in all_keys { - debug!("Full MXC key from database: {key:?}"); + trace!("Full MXC key from database: {key:?}"); // we need to get the MXC URL from the first part of the key (the first 0xff / // 255 push). this is all necessary because of conduit using magic keys for @@ -164,7 +227,7 @@ pub async fn delete_all_remote_media_at_after_time(&self, time: String, force: b return Err!(Database("Parsed MXC URL unicode bytes from database but still is None")); }; - debug!("Parsed MXC key to URL: {mxc_s}"); + trace!("Parsed MXC key to URL: {mxc_s}"); let mxc = OwnedMxcUri::from(mxc_s); if mxc.server_name() == Ok(self.services.globals.server_name()) { debug!("Ignoring local media MXC: {mxc}"); @@ -175,7 +238,20 @@ pub async fn delete_all_remote_media_at_after_time(&self, time: String, force: b let path = self.get_media_file(&key); debug!("MXC path: {path:?}"); - let file_metadata = fs::metadata(path.clone()).await?; + let file_metadata = match fs::metadata(path.clone()).await { + Ok(file_metadata) => file_metadata, + Err(e) => { + if force { + error!("Failed to obtain file metadata for MXC {mxc} at file path \"{path:?}\", skipping: {e}"); + continue; + } + + return Err!(Database( + "Failed to obtain file metadata for MXC {mxc} at file path \"{path:?}\": {e}" + )); + }, + }; + debug!("File metadata: {file_metadata:?}"); let file_created_at = match file_metadata.created() { @@ -186,15 +262,16 @@ pub async fn delete_all_remote_media_at_after_time(&self, time: String, force: b }, Err(err) => { if force { - error!("Could not delete MXC path {path:?}: {err:?}. Skipping..."); + error!("Could not delete MXC {mxc} at path {path:?}: {err:?}. Skipping..."); continue; } + return Err(err.into()); }, }; debug!("File created at: {file_created_at:?}"); - if file_created_at <= user_duration { + if file_created_at <= time { debug!("File is within user duration, pushing to list of file paths and keys to delete."); remote_mxcs.push(mxc.to_string()); } @@ -207,12 +284,25 @@ pub async fn delete_all_remote_media_at_after_time(&self, time: String, force: b return Err!(Database("Did not found any eligible MXCs to delete.")); } - debug!("Deleting media now in the past {user_duration:?}."); + debug_info!("Deleting media now in the past {time:?}."); let mut deletion_count: usize = 0; for mxc in remote_mxcs { - debug!("Deleting MXC {mxc} from database and filesystem"); - self.delete(&mxc).await?; - deletion_count = deletion_count.saturating_add(1); + let mxc: Mxc<'_> = mxc.as_str().try_into()?; + debug_info!("Deleting MXC {mxc} from database and filesystem"); + + match self.delete(&mxc).await { + Ok(()) => { + deletion_count = deletion_count.saturating_add(1); + }, + Err(e) => { + if force { + warn!("Failed to delete {mxc}, ignoring error and skipping: {e}"); + continue; + } + + return Err!(Database(warn!("Failed to delete MXC {mxc}: {e}"))); + }, + } } Ok(deletion_count) @@ -258,6 +348,18 @@ async fn create_media_file(&self, key: &[u8]) -> Result<fs::File> { Ok(file) } + #[inline] + pub fn get_metadata(&self, mxc: &Mxc<'_>) -> Option<FileMeta> { + self.db + .search_file_metadata(mxc, &Dim::default()) + .map(|metadata| FileMeta { + content_disposition: metadata.content_disposition, + content_type: metadata.content_type, + content: None, + }) + .ok() + } + #[inline] #[must_use] pub fn get_media_file(&self, key: &[u8]) -> PathBuf { self.get_media_file_sha256(key) } diff --git a/src/service/media/preview.rs b/src/service/media/preview.rs index ac24d81af55e395f8b8654d5b1aa91116d5c3cfd..5704075e5b8afd3ec430c4d08dbdabf4c2ed828d 100644 --- a/src/service/media/preview.rs +++ b/src/service/media/preview.rs @@ -4,6 +4,7 @@ use conduit_core::implement; use image::ImageReader as ImgReader; use ipaddress::IPAddress; +use ruma::Mxc; use serde::Serialize; use url::Url; use webpage::HTML; @@ -44,13 +45,12 @@ pub async fn set_url_preview(&self, url: &str, data: &UrlPreviewData) -> Result< pub async fn download_image(&self, url: &str) -> Result<UrlPreviewData> { let client = &self.services.client.url_preview; let image = client.get(url).send().await?.bytes().await?; - let mxc = format!( - "mxc://{}/{}", - self.services.globals.server_name(), - utils::random_string(MXC_LENGTH) - ); + let mxc = Mxc { + server_name: self.services.globals.server_name(), + media_id: &utils::random_string(MXC_LENGTH), + }; - self.create(None, &mxc, None, None, &image).await?; + self.create(&mxc, None, None, None, &image).await?; let (width, height) = match ImgReader::new(Cursor::new(&image)).with_guessed_format() { Err(_) => (None, None), @@ -61,7 +61,7 @@ pub async fn download_image(&self, url: &str) -> Result<UrlPreviewData> { }; Ok(UrlPreviewData { - image: Some(mxc), + image: Some(mxc.to_string()), image_size: Some(image.len()), image_width: width, image_height: height, diff --git a/src/service/media/remote.rs b/src/service/media/remote.rs new file mode 100644 index 0000000000000000000000000000000000000000..59846b8ee1f6d7a123d2c0f6e86fb09ffd47689e --- /dev/null +++ b/src/service/media/remote.rs @@ -0,0 +1,405 @@ +use std::{fmt::Debug, time::Duration}; + +use conduit::{debug_warn, err, implement, utils::content_disposition::make_content_disposition, Err, Error, Result}; +use http::header::{HeaderValue, CONTENT_DISPOSITION, CONTENT_TYPE}; +use ruma::{ + api::{ + client::{ + error::ErrorKind::{NotFound, Unrecognized}, + media, + }, + federation, + federation::authenticated_media::{Content, FileOrLocation}, + OutgoingRequest, + }, + Mxc, ServerName, UserId, +}; + +use super::{Dim, FileMeta}; + +#[implement(super::Service)] +pub async fn fetch_remote_thumbnail( + &self, mxc: &Mxc<'_>, user: Option<&UserId>, server: Option<&ServerName>, timeout_ms: Duration, dim: &Dim, +) -> Result<FileMeta> { + self.check_fetch_authorized(mxc)?; + + let result = self + .fetch_thumbnail_unauthenticated(mxc, user, server, timeout_ms, dim) + .await; + + if let Err(Error::Request(NotFound, ..)) = &result { + return self + .fetch_thumbnail_authenticated(mxc, user, server, timeout_ms, dim) + .await; + } + + result +} + +#[implement(super::Service)] +pub async fn fetch_remote_content( + &self, mxc: &Mxc<'_>, user: Option<&UserId>, server: Option<&ServerName>, timeout_ms: Duration, +) -> Result<FileMeta> { + self.check_fetch_authorized(mxc)?; + + let result = self + .fetch_content_unauthenticated(mxc, user, server, timeout_ms) + .await; + + if let Err(Error::Request(NotFound, ..)) = &result { + return self + .fetch_content_authenticated(mxc, user, server, timeout_ms) + .await; + } + + result +} + +#[implement(super::Service)] +async fn fetch_thumbnail_authenticated( + &self, mxc: &Mxc<'_>, user: Option<&UserId>, server: Option<&ServerName>, timeout_ms: Duration, dim: &Dim, +) -> Result<FileMeta> { + use federation::authenticated_media::get_content_thumbnail::v1::{Request, Response}; + + let request = Request { + media_id: mxc.media_id.into(), + method: dim.method.clone().into(), + width: dim.width.into(), + height: dim.height.into(), + animated: true.into(), + timeout_ms, + }; + + let Response { + content, + .. + } = self.federation_request(mxc, user, server, request).await?; + + match content { + FileOrLocation::File(content) => self.handle_thumbnail_file(mxc, user, dim, content).await, + FileOrLocation::Location(location) => self.handle_location(mxc, user, &location).await, + } +} + +#[implement(super::Service)] +async fn fetch_content_authenticated( + &self, mxc: &Mxc<'_>, user: Option<&UserId>, server: Option<&ServerName>, timeout_ms: Duration, +) -> Result<FileMeta> { + use federation::authenticated_media::get_content::v1::{Request, Response}; + + let request = Request { + media_id: mxc.media_id.into(), + timeout_ms, + }; + + let Response { + content, + .. + } = self.federation_request(mxc, user, server, request).await?; + + match content { + FileOrLocation::File(content) => self.handle_content_file(mxc, user, content).await, + FileOrLocation::Location(location) => self.handle_location(mxc, user, &location).await, + } +} + +#[allow(deprecated)] +#[implement(super::Service)] +async fn fetch_thumbnail_unauthenticated( + &self, mxc: &Mxc<'_>, user: Option<&UserId>, server: Option<&ServerName>, timeout_ms: Duration, dim: &Dim, +) -> Result<FileMeta> { + use media::get_content_thumbnail::v3::{Request, Response}; + + let request = Request { + allow_remote: true, + allow_redirect: true, + animated: true.into(), + method: dim.method.clone().into(), + width: dim.width.into(), + height: dim.height.into(), + server_name: mxc.server_name.into(), + media_id: mxc.media_id.into(), + timeout_ms, + }; + + let Response { + file, + content_type, + content_disposition, + .. + } = self.federation_request(mxc, user, server, request).await?; + + let content = Content { + file, + content_type, + content_disposition, + }; + + self.handle_thumbnail_file(mxc, user, dim, content).await +} + +#[allow(deprecated)] +#[implement(super::Service)] +async fn fetch_content_unauthenticated( + &self, mxc: &Mxc<'_>, user: Option<&UserId>, server: Option<&ServerName>, timeout_ms: Duration, +) -> Result<FileMeta> { + use media::get_content::v3::{Request, Response}; + + let request = Request { + allow_remote: true, + allow_redirect: true, + server_name: mxc.server_name.into(), + media_id: mxc.media_id.into(), + timeout_ms, + }; + + let Response { + file, + content_type, + content_disposition, + .. + } = self.federation_request(mxc, user, server, request).await?; + + let content = Content { + file, + content_type, + content_disposition, + }; + + self.handle_content_file(mxc, user, content).await +} + +#[implement(super::Service)] +async fn handle_thumbnail_file( + &self, mxc: &Mxc<'_>, user: Option<&UserId>, dim: &Dim, content: Content, +) -> Result<FileMeta> { + let content_disposition = + make_content_disposition(content.content_disposition.as_ref(), content.content_type.as_deref(), None); + + self.upload_thumbnail( + mxc, + user, + Some(&content_disposition), + content.content_type.as_deref(), + dim, + &content.file, + ) + .await + .map(|()| FileMeta { + content: Some(content.file), + content_type: content.content_type.map(Into::into), + content_disposition: Some(content_disposition), + }) +} + +#[implement(super::Service)] +async fn handle_content_file(&self, mxc: &Mxc<'_>, user: Option<&UserId>, content: Content) -> Result<FileMeta> { + let content_disposition = + make_content_disposition(content.content_disposition.as_ref(), content.content_type.as_deref(), None); + + self.create( + mxc, + user, + Some(&content_disposition), + content.content_type.as_deref(), + &content.file, + ) + .await + .map(|()| FileMeta { + content: Some(content.file), + content_type: content.content_type.map(Into::into), + content_disposition: Some(content_disposition), + }) +} + +#[implement(super::Service)] +async fn handle_location(&self, mxc: &Mxc<'_>, user: Option<&UserId>, location: &str) -> Result<FileMeta> { + self.location_request(location).await.map_err(|error| { + err!(Request(NotFound( + debug_warn!(%mxc, ?user, ?location, ?error, "Fetching media from location failed") + ))) + }) +} + +#[implement(super::Service)] +async fn location_request(&self, location: &str) -> Result<FileMeta> { + let response = self + .services + .client + .extern_media + .get(location) + .send() + .await?; + + let content_type = response + .headers() + .get(CONTENT_TYPE) + .map(HeaderValue::to_str) + .and_then(Result::ok) + .map(str::to_owned); + + let content_disposition = response + .headers() + .get(CONTENT_DISPOSITION) + .map(HeaderValue::as_bytes) + .map(TryFrom::try_from) + .and_then(Result::ok); + + response + .bytes() + .await + .map(Vec::from) + .map_err(Into::into) + .map(|content| FileMeta { + content: Some(content), + content_type: content_type.clone().map(Into::into), + content_disposition: Some(make_content_disposition( + content_disposition.as_ref(), + content_type.as_deref(), + None, + )), + }) +} + +#[implement(super::Service)] +async fn federation_request<Request>( + &self, mxc: &Mxc<'_>, user: Option<&UserId>, server: Option<&ServerName>, request: Request, +) -> Result<Request::IncomingResponse> +where + Request: OutgoingRequest + Send + Debug, +{ + self.services + .sending + .send_federation_request(server.unwrap_or(mxc.server_name), request) + .await + .map_err(|error| handle_federation_error(mxc, user, server, error)) +} + +// Handles and adjusts the error for the caller to determine if they should +// request the fallback endpoint or give up. +fn handle_federation_error(mxc: &Mxc<'_>, user: Option<&UserId>, server: Option<&ServerName>, error: Error) -> Error { + let fallback = || { + err!(Request(NotFound( + debug_error!(%mxc, ?user, ?server, ?error, "Remote media not found") + ))) + }; + + // Matrix server responses for fallback always taken. + if error.kind() == NotFound || error.kind() == Unrecognized { + return fallback(); + } + + // If we get these from any middleware we'll try the other endpoint rather than + // giving up too early. + if error.status_code().is_client_error() || error.status_code().is_redirection() { + return fallback(); + } + + // Reached for 5xx errors. This is where we don't fallback given the likelyhood + // the other endpoint will also be a 5xx and we're wasting time. + error +} + +#[implement(super::Service)] +#[allow(deprecated)] +pub async fn fetch_remote_thumbnail_legacy( + &self, body: &media::get_content_thumbnail::v3::Request, +) -> Result<media::get_content_thumbnail::v3::Response> { + let mxc = Mxc { + server_name: &body.server_name, + media_id: &body.media_id, + }; + + self.check_legacy_freeze()?; + self.check_fetch_authorized(&mxc)?; + let reponse = self + .services + .sending + .send_federation_request( + mxc.server_name, + media::get_content_thumbnail::v3::Request { + allow_remote: body.allow_remote, + height: body.height, + width: body.width, + method: body.method.clone(), + server_name: body.server_name.clone(), + media_id: body.media_id.clone(), + timeout_ms: body.timeout_ms, + allow_redirect: body.allow_redirect, + animated: body.animated, + }, + ) + .await?; + + let dim = Dim::from_ruma(body.width, body.height, body.method.clone())?; + self.upload_thumbnail(&mxc, None, None, reponse.content_type.as_deref(), &dim, &reponse.file) + .await?; + + Ok(reponse) +} + +#[implement(super::Service)] +#[allow(deprecated)] +pub async fn fetch_remote_content_legacy( + &self, mxc: &Mxc<'_>, allow_redirect: bool, timeout_ms: Duration, +) -> Result<media::get_content::v3::Response, Error> { + self.check_legacy_freeze()?; + self.check_fetch_authorized(mxc)?; + let response = self + .services + .sending + .send_federation_request( + mxc.server_name, + media::get_content::v3::Request { + allow_remote: true, + server_name: mxc.server_name.into(), + media_id: mxc.media_id.into(), + timeout_ms, + allow_redirect, + }, + ) + .await?; + + let content_disposition = + make_content_disposition(response.content_disposition.as_ref(), response.content_type.as_deref(), None); + + self.create( + mxc, + None, + Some(&content_disposition), + response.content_type.as_deref(), + &response.file, + ) + .await?; + + Ok(response) +} + +#[implement(super::Service)] +fn check_fetch_authorized(&self, mxc: &Mxc<'_>) -> Result<()> { + if self + .services + .server + .config + .prevent_media_downloads_from + .iter() + .any(|entry| entry == mxc.server_name) + { + // we'll lie to the client and say the blocked server's media was not found and + // log. the client has no way of telling anyways so this is a security bonus. + debug_warn!(%mxc, "Received request for media on blocklisted server"); + return Err!(Request(NotFound("Media not found."))); + } + + Ok(()) +} + +#[implement(super::Service)] +fn check_legacy_freeze(&self) -> Result<()> { + self.services + .server + .config + .freeze_legacy_media + .then_some(()) + .ok_or(err!(Request(NotFound("Remote media is frozen.")))) +} diff --git a/src/service/media/thumbnail.rs b/src/service/media/thumbnail.rs index 01bf73f6970b4122e16eb7720b205a62f4da6a9e..630f7b3b1b9bd017e5a0381a9d66bf13edf821df 100644 --- a/src/service/media/thumbnail.rs +++ b/src/service/media/thumbnail.rs @@ -1,8 +1,8 @@ use std::{cmp, io::Cursor, num::Saturating as Sat}; -use conduit::{checked, Result}; +use conduit::{checked, err, Result}; use image::{imageops::FilterType, DynamicImage}; -use ruma::OwnedUserId; +use ruma::{http_headers::ContentDisposition, media::Method, Mxc, UInt, UserId}; use tokio::{ fs, io::{AsyncReadExt, AsyncWriteExt}, @@ -10,20 +10,24 @@ use super::{data::Metadata, FileMeta}; +/// Dimension specification for a thumbnail. +#[derive(Debug)] +pub struct Dim { + pub width: u32, + pub height: u32, + pub method: Method, +} + impl super::Service { /// Uploads or replaces a file thumbnail. #[allow(clippy::too_many_arguments)] pub async fn upload_thumbnail( - &self, sender_user: Option<OwnedUserId>, mxc: &str, content_disposition: Option<&str>, - content_type: Option<&str>, width: u32, height: u32, file: &[u8], + &self, mxc: &Mxc<'_>, user: Option<&UserId>, content_disposition: Option<&ContentDisposition>, + content_type: Option<&str>, dim: &Dim, file: &[u8], ) -> Result<()> { - let key = if let Some(user) = sender_user { - self.db - .create_file_metadata(Some(user.as_str()), mxc, width, height, content_disposition, content_type)? - } else { - self.db - .create_file_metadata(None, mxc, width, height, content_disposition, content_type)? - }; + let key = self + .db + .create_file_metadata(mxc, user, dim, content_disposition, content_type)?; //TODO: Dangling metadata in database if creation fails let mut f = self.create_media_file(&key).await?; @@ -46,15 +50,14 @@ pub async fn upload_thumbnail( /// For width,height <= 96 the server uses another thumbnailing algorithm /// which crops the image afterwards. #[tracing::instrument(skip(self), name = "thumbnail", level = "debug")] - pub async fn get_thumbnail(&self, mxc: &str, width: u32, height: u32) -> Result<Option<FileMeta>> { + pub async fn get_thumbnail(&self, mxc: &Mxc<'_>, dim: &Dim) -> Result<Option<FileMeta>> { // 0, 0 because that's the original file - let (width, height, crop) = thumbnail_properties(width, height).unwrap_or((0, 0, false)); + let dim = dim.normalized(); - if let Ok(metadata) = self.db.search_file_metadata(mxc, width, height) { + if let Ok(metadata) = self.db.search_file_metadata(mxc, &dim) { self.get_thumbnail_saved(metadata).await - } else if let Ok(metadata) = self.db.search_file_metadata(mxc, 0, 0) { - self.get_thumbnail_generate(mxc, width, height, crop, metadata) - .await + } else if let Ok(metadata) = self.db.search_file_metadata(mxc, &Dim::default()) { + self.get_thumbnail_generate(mxc, &dim, metadata).await } else { Ok(None) } @@ -75,9 +78,7 @@ async fn get_thumbnail_saved(&self, data: Metadata) -> Result<Option<FileMeta>> /// Generate a thumbnail #[tracing::instrument(skip(self), name = "generate", level = "debug")] - async fn get_thumbnail_generate( - &self, mxc: &str, width: u32, height: u32, crop: bool, data: Metadata, - ) -> Result<Option<FileMeta>> { + async fn get_thumbnail_generate(&self, mxc: &Mxc<'_>, dim: &Dim, data: Metadata) -> Result<Option<FileMeta>> { let mut content = Vec::new(); let path = self.get_media_file(&data.key); fs::File::open(path) @@ -90,21 +91,20 @@ async fn get_thumbnail_generate( return Ok(Some(into_filemeta(data, content))); }; - if width > image.width() || height > image.height() { + if dim.width > image.width() || dim.height > image.height() { return Ok(Some(into_filemeta(data, content))); } let mut thumbnail_bytes = Vec::new(); - let thumbnail = thumbnail_generate(&image, width, height, crop)?; + let thumbnail = thumbnail_generate(&image, dim)?; thumbnail.write_to(&mut Cursor::new(&mut thumbnail_bytes), image::ImageFormat::Png)?; // Save thumbnail in database so we don't have to generate it again next time let thumbnail_key = self.db.create_file_metadata( - None, mxc, - width, - height, - data.content_disposition.as_deref(), + None, + dim, + data.content_disposition.as_ref(), data.content_type.as_deref(), )?; @@ -115,60 +115,115 @@ async fn get_thumbnail_generate( } } -fn thumbnail_generate(image: &DynamicImage, width: u32, height: u32, crop: bool) -> Result<DynamicImage> { - let thumbnail = if crop { - image.resize_to_fill(width, height, FilterType::CatmullRom) +fn thumbnail_generate(image: &DynamicImage, requested: &Dim) -> Result<DynamicImage> { + let thumbnail = if !requested.crop() { + let Dim { + width, + height, + .. + } = requested.scaled(&Dim { + width: image.width(), + height: image.height(), + ..Dim::default() + })?; + image.thumbnail_exact(width, height) } else { - let (exact_width, exact_height) = thumbnail_dimension(image, width, height)?; - image.thumbnail_exact(exact_width, exact_height) + image.resize_to_fill(requested.width, requested.height, FilterType::CatmullRom) }; Ok(thumbnail) } -fn thumbnail_dimension(image: &DynamicImage, width: u32, height: u32) -> Result<(u32, u32)> { - let image_width = image.width(); - let image_height = image.height(); +fn into_filemeta(data: Metadata, content: Vec<u8>) -> FileMeta { + FileMeta { + content: Some(content), + content_type: data.content_type, + content_disposition: data.content_disposition, + } +} - let width = cmp::min(width, image_width); - let height = cmp::min(height, image_height); +impl Dim { + /// Instantiate a Dim from Ruma integers with optional method. + pub fn from_ruma(width: UInt, height: UInt, method: Option<Method>) -> Result<Self> { + let width = width + .try_into() + .map_err(|e| err!(Request(InvalidParam("Width is invalid: {e:?}"))))?; + let height = height + .try_into() + .map_err(|e| err!(Request(InvalidParam("Height is invalid: {e:?}"))))?; + + Ok(Self::new(width, height, method)) + } - let use_width = Sat(width) * Sat(image_height) < Sat(height) * Sat(image_width); + /// Instantiate a Dim with optional method + #[inline] + #[must_use] + pub fn new(width: u32, height: u32, method: Option<Method>) -> Self { + Self { + width, + height, + method: method.unwrap_or(Method::Scale), + } + } - let x = if use_width { - let dividend = (Sat(height) * Sat(image_width)).0; - checked!(dividend / image_height)? - } else { - width - }; + pub fn scaled(&self, image: &Self) -> Result<Self> { + let image_width = image.width; + let image_height = image.height; - let y = if !use_width { - let dividend = (Sat(width) * Sat(image_height)).0; - checked!(dividend / image_width)? - } else { - height - }; + let width = cmp::min(self.width, image_width); + let height = cmp::min(self.height, image_height); - Ok((x, y)) -} + let use_width = Sat(width) * Sat(image_height) < Sat(height) * Sat(image_width); + + let x = if use_width { + let dividend = (Sat(height) * Sat(image_width)).0; + checked!(dividend / image_height)? + } else { + width + }; + + let y = if !use_width { + let dividend = (Sat(width) * Sat(image_height)).0; + checked!(dividend / image_width)? + } else { + height + }; -/// Returns width, height of the thumbnail and whether it should be cropped. -/// Returns None when the server should send the original file. -fn thumbnail_properties(width: u32, height: u32) -> Option<(u32, u32, bool)> { - match (width, height) { - (0..=32, 0..=32) => Some((32, 32, true)), - (0..=96, 0..=96) => Some((96, 96, true)), - (0..=320, 0..=240) => Some((320, 240, false)), - (0..=640, 0..=480) => Some((640, 480, false)), - (0..=800, 0..=600) => Some((800, 600, false)), - _ => None, + Ok(Self { + width: x, + height: y, + method: Method::Scale, + }) } + + /// Returns width, height of the thumbnail and whether it should be cropped. + /// Returns None when the server should send the original file. + /// Ignores the input Method. + #[must_use] + pub fn normalized(&self) -> Self { + match (self.width, self.height) { + (0..=32, 0..=32) => Self::new(32, 32, Some(Method::Crop)), + (0..=96, 0..=96) => Self::new(96, 96, Some(Method::Crop)), + (0..=320, 0..=240) => Self::new(320, 240, Some(Method::Scale)), + (0..=640, 0..=480) => Self::new(640, 480, Some(Method::Scale)), + (0..=800, 0..=600) => Self::new(800, 600, Some(Method::Scale)), + _ => Self::default(), + } + } + + /// Returns true if the method is Crop. + #[inline] + #[must_use] + pub fn crop(&self) -> bool { self.method == Method::Crop } } -fn into_filemeta(data: Metadata, content: Vec<u8>) -> FileMeta { - FileMeta { - content: Some(content), - content_type: data.content_type, - content_disposition: data.content_disposition, +impl Default for Dim { + #[inline] + fn default() -> Self { + Self { + width: 0, + height: 0, + method: Method::Scale, + } } } diff --git a/src/service/resolver/actual.rs b/src/service/resolver/actual.rs index 623e3a964f3264060340145d916f6ec29d6cac65..07d9a0fae4971e674e2bcf4c9a95775b556def59 100644 --- a/src/service/resolver/actual.rs +++ b/src/service/resolver/actual.rs @@ -312,7 +312,8 @@ fn handle_resolve_error(e: &ResolveError) -> Result<()> { } fn validate_dest(&self, dest: &ServerName) -> Result<()> { - if dest == self.services.server.config.server_name { + let config = &self.services.server.config; + if dest == config.server_name && !config.federation_loopback { return Err!("Won't send federation request to ourselves"); } diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index 2526f1bddc43bb034829fc90820b7dddfd435767..58fa31b3d427470ab4119606a5140b3f44d428d2 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -350,6 +350,7 @@ pub fn user_can_invite( unsigned: None, state_key: Some(target_user.into()), redacts: None, + timestamp: None, }; Ok(self diff --git a/src/service/rooms/timeline/data.rs b/src/service/rooms/timeline/data.rs index 5917e96b774ab4b41774ddba5786315502053396..2f0c8f25878c8676b53cbefb8261a3fd4452be7c 100644 --- a/src/service/rooms/timeline/data.rs +++ b/src/service/rooms/timeline/data.rs @@ -113,7 +113,7 @@ pub(super) fn get_pdu_id(&self, event_id: &EventId) -> Result<Option<database::H self.eventid_pduid.get(event_id.as_bytes()) } - /// Returns the pdu. + /// Returns the pdu directly from `eventid_pduid` only. pub(super) fn get_non_outlier_pdu(&self, event_id: &EventId) -> Result<Option<PduEvent>> { self.eventid_pduid .get(event_id.as_bytes())? diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index ac25b2a84012c6f436b2386c462195c4d00475b3..4f2352f81ceb7d9916716acc7981c912abfbc17a 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -449,7 +449,7 @@ pub async fn append_pdu( } } }, - V11 => { + _ => { let content = serde_json::from_str::<RoomRedactionEventContent>(pdu.content.get()).map_err(|e| { warn!("Invalid content in redaction pdu: {e}"); @@ -467,13 +467,6 @@ pub async fn append_pdu( } } }, - _ => { - warn!("Unexpected or unsupported room version {room_version_id}"); - return Err(Error::BadRequest( - ErrorKind::BadJson, - "Unexpected or unsupported room version found", - )); - }, }; }, TimelineEventType::SpaceChild => { @@ -530,8 +523,7 @@ pub async fn append_pdu( if self.services.admin.is_admin_command(pdu, &body).await { self.services .admin - .command(body, Some((*pdu.event_id).into())) - .await; + .command(body, Some((*pdu.event_id).into()))?; } } }, @@ -638,6 +630,7 @@ pub fn create_hash_and_sign_event( unsigned, state_key, redacts, + timestamp, } = pdu_builder; let prev_events: Vec<_> = self @@ -705,9 +698,14 @@ pub fn create_hash_and_sign_event( room_id: room_id.to_owned(), sender: sender.to_owned(), origin: None, - origin_server_ts: utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"), + origin_server_ts: timestamp.map_or_else( + || { + utils::millis_since_unix_epoch() + .try_into() + .expect("u64 fits into UInt") + }, + |ts| ts.get(), + ), kind: event_type, content, state_key, diff --git a/src/service/sending/dest.rs b/src/service/sending/dest.rs new file mode 100644 index 0000000000000000000000000000000000000000..9968acd766e1240615bb1410b27b7278b752cb51 --- /dev/null +++ b/src/service/sending/dest.rs @@ -0,0 +1,56 @@ +use std::fmt::Debug; + +use conduit::implement; +use ruma::{OwnedServerName, OwnedUserId}; + +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +pub enum Destination { + Appservice(String), + Push(OwnedUserId, String), // user and pushkey + Normal(OwnedServerName), +} + +#[implement(Destination)] +#[must_use] +pub fn get_prefix(&self) -> Vec<u8> { + match self { + Self::Normal(server) => { + let len = server.as_bytes().len().saturating_add(1); + + let mut p = Vec::with_capacity(len); + p.extend_from_slice(server.as_bytes()); + p.push(0xFF); + p + }, + Self::Appservice(server) => { + let sigil = b"+"; + let len = sigil + .len() + .saturating_add(server.as_bytes().len()) + .saturating_add(1); + + let mut p = Vec::with_capacity(len); + p.extend_from_slice(sigil); + p.extend_from_slice(server.as_bytes()); + p.push(0xFF); + p + }, + Self::Push(user, pushkey) => { + let sigil = b"$"; + let len = sigil + .len() + .saturating_add(user.as_bytes().len()) + .saturating_add(1) + .saturating_add(pushkey.as_bytes().len()) + .saturating_add(1); + + let mut p = Vec::with_capacity(len); + p.extend_from_slice(sigil); + p.extend_from_slice(user.as_bytes()); + p.push(0xFF); + p.extend_from_slice(pushkey.as_bytes()); + p.push(0xFF); + p + }, + } +} diff --git a/src/service/sending/mod.rs b/src/service/sending/mod.rs index fc32d04f91a62a5be08d05c93147b2fdd96bb0d4..b90ea361846df85ba6baed221fde70a678cb26cc 100644 --- a/src/service/sending/mod.rs +++ b/src/service/sending/mod.rs @@ -1,5 +1,6 @@ mod appservice; mod data; +mod dest; mod send; mod sender; @@ -9,16 +10,18 @@ use conduit::{err, warn, Result, Server}; use ruma::{ api::{appservice::Registration, OutgoingRequest}, - OwnedServerName, OwnedUserId, RoomId, ServerName, UserId, + OwnedServerName, RoomId, ServerName, UserId, }; use tokio::sync::Mutex; +use self::data::Data; +pub use self::dest::Destination; use crate::{account_data, client, globals, presence, pusher, resolver, rooms, users, Dep}; pub struct Service { server: Arc<Server>, services: Services, - pub db: data::Data, + pub db: Data, sender: loole::Sender<Msg>, receiver: Mutex<loole::Receiver<Msg>>, } @@ -46,13 +49,6 @@ struct Msg { queue_id: Vec<u8>, } -#[derive(Clone, Debug, PartialEq, Eq, Hash)] -pub enum Destination { - Appservice(String), - Push(OwnedUserId, String), // user and pushkey - Normal(OwnedServerName), -} - #[allow(clippy::module_name_repetitions)] #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub enum SendingEvent { @@ -82,7 +78,7 @@ fn build(args: crate::Args<'_>) -> Result<Arc<Self>> { appservice: args.depend::<crate::appservice::Service>("appservice"), pusher: args.depend::<pusher::Service>("pusher"), }, - db: data::Data::new(&args), + db: Data::new(&args), sender, receiver: Mutex::new(receiver), })) @@ -280,49 +276,3 @@ fn dispatch(&self, msg: Msg) -> Result<()> { self.sender.send(msg).map_err(|e| err!("{e}")) } } - -impl Destination { - #[must_use] - pub fn get_prefix(&self) -> Vec<u8> { - match self { - Self::Normal(server) => { - let len = server.as_bytes().len().saturating_add(1); - - let mut p = Vec::with_capacity(len); - p.extend_from_slice(server.as_bytes()); - p.push(0xFF); - p - }, - Self::Appservice(server) => { - let sigil = b"+"; - let len = sigil - .len() - .saturating_add(server.as_bytes().len()) - .saturating_add(1); - - let mut p = Vec::with_capacity(len); - p.extend_from_slice(sigil); - p.extend_from_slice(server.as_bytes()); - p.push(0xFF); - p - }, - Self::Push(user, pushkey) => { - let sigil = b"$"; - let len = sigil - .len() - .saturating_add(user.as_bytes().len()) - .saturating_add(1) - .saturating_add(pushkey.as_bytes().len()) - .saturating_add(1); - - let mut p = Vec::with_capacity(len); - p.extend_from_slice(sigil); - p.extend_from_slice(user.as_bytes()); - p.push(0xFF); - p.extend_from_slice(pushkey.as_bytes()); - p.push(0xFF); - p - }, - } - } -} diff --git a/src/service/sending/send.rs b/src/service/sending/send.rs index 8fd467f637f90a38c16f20fc4fa709c74c3ed906..9a8f408b5456cb1bb8578748e7972766cf8b13cc 100644 --- a/src/service/sending/send.rs +++ b/src/service/sending/send.rs @@ -1,7 +1,8 @@ use std::{fmt::Debug, mem}; use conduit::{ - debug, debug_error, debug_warn, err, error::inspect_debug_log, trace, utils::string::EMPTY, Err, Error, Result, + debug, debug_error, debug_info, debug_warn, err, error::inspect_debug_log, trace, utils::string::EMPTY, Err, Error, + Result, }; use http::{header::AUTHORIZATION, HeaderValue}; use ipaddress::IPAddress; @@ -31,6 +32,16 @@ pub async fn send<T>(&self, client: &Client, dest: &ServerName, req: T) -> Resul return Err!(Config("allow_federation", "Federation is disabled.")); } + if self + .server + .config + .forbidden_remote_server_names + .contains(&dest.to_owned()) + { + debug_info!("Refusing to send outbound federation request to {dest}"); + return Err!(Request(Forbidden("Federation with this homeserver is not allowed."))); + } + let actual = self.services.resolver.get_actual_dest(dest).await?; let request = self.prepare::<T>(dest, &actual, req).await?; self.execute::<T>(dest, &actual, request, client).await @@ -56,7 +67,7 @@ async fn prepare<T>(&self, dest: &ServerName, actual: &ActualDest, req: T) -> Re where T: OutgoingRequest + Debug + Send, { - const VERSIONS: [MatrixVersion; 1] = [MatrixVersion::V1_5]; + const VERSIONS: [MatrixVersion; 1] = [MatrixVersion::V1_11]; const SATIR: SendAccessToken<'_> = SendAccessToken::IfRequired(EMPTY); trace!("Preparing request"); diff --git a/src/service/service.rs b/src/service/service.rs index 0b9bc76c7a2a3fd3671c170b26cb4fd48a1b9883..635f782ea6b9525471a46c97092d38e568423a8d 100644 --- a/src/service/service.rs +++ b/src/service/service.rs @@ -97,7 +97,10 @@ pub(crate) fn require<T: Send + Sync + 'a + 'static>(&'a self, name: &'static st /// Reference a Service by name. Panics if the Service does not exist or was /// incorrectly cast. -pub(crate) fn require<'a, 'b, T: Send + Sync + 'a + 'b + 'static>(map: &'b Map, name: &'a str) -> Arc<T> { +pub(crate) fn require<'a, 'b, T>(map: &'b Map, name: &'a str) -> Arc<T> +where + T: Send + Sync + 'a + 'b + 'static, +{ try_get::<T>(map, name) .inspect_err(inspect_log) .expect("Failure to reference service required by another service.") @@ -109,7 +112,10 @@ pub(crate) fn require<'a, 'b, T: Send + Sync + 'a + 'b + 'static>(map: &'b Map, /// # Panics /// Incorrect type is not a silent failure (None) as the type never has a reason /// to be incorrect. -pub(crate) fn get<'a, 'b, T: Send + Sync + 'a + 'b + 'static>(map: &'b Map, name: &'a str) -> Option<Arc<T>> { +pub(crate) fn get<'a, 'b, T>(map: &'b Map, name: &'a str) -> Option<Arc<T>> +where + T: Send + Sync + 'a + 'b + 'static, +{ map.read() .expect("locked for reading") .get(name) @@ -123,7 +129,10 @@ pub(crate) fn get<'a, 'b, T: Send + Sync + 'a + 'b + 'static>(map: &'b Map, name /// Reference a Service by name. Returns Err if the Service does not exist or /// was incorrectly cast. -pub(crate) fn try_get<'a, 'b, T: Send + Sync + 'a + 'b + 'static>(map: &'b Map, name: &'a str) -> Result<Arc<T>> { +pub(crate) fn try_get<'a, 'b, T>(map: &'b Map, name: &'a str) -> Result<Arc<T>> +where + T: Send + Sync + 'a + 'b + 'static, +{ map.read() .expect("locked for reading") .get(name) diff --git a/src/service/services.rs b/src/service/services.rs index 2b9b93d457e6a3b478b40ccd36b6d60b16f0b9c1..8e69cdbb622328f301c5e197b344c788f6c9635a 100644 --- a/src/service/services.rs +++ b/src/service/services.rs @@ -193,11 +193,17 @@ fn interrupt(&self) { } } - pub fn try_get<'a, 'b, T: Send + Sync + 'a + 'b + 'static>(&'b self, name: &'a str) -> Result<Arc<T>> { + pub fn try_get<'a, 'b, T>(&'b self, name: &'a str) -> Result<Arc<T>> + where + T: Send + Sync + 'a + 'b + 'static, + { service::try_get::<T>(&self.service, name) } - pub fn get<'a, 'b, T: Send + Sync + 'a + 'b + 'static>(&'b self, name: &'a str) -> Option<Arc<T>> { + pub fn get<'a, 'b, T>(&'b self, name: &'a str) -> Option<Arc<T>> + where + T: Send + Sync + 'a + 'b + 'static, + { service::get::<T>(&self.service, name) } } diff --git a/tests/test_results/complement/test_results.jsonl b/tests/test_results/complement/test_results.jsonl index 873397bad245c6c0e365b3ffcb970470a8a67568..0f342cc4c5e92a89baf0a16bc7d96bbaf403e180 100644 --- a/tests/test_results/complement/test_results.jsonl +++ b/tests/test_results/complement/test_results.jsonl @@ -42,7 +42,7 @@ {"Action":"pass","Test":"TestCannotSendNonLeaveViaSendLeaveV2/knock_event"} {"Action":"pass","Test":"TestCannotSendNonLeaveViaSendLeaveV2/non-state_membership_event"} {"Action":"pass","Test":"TestCannotSendNonLeaveViaSendLeaveV2/regular_event"} -{"Action":"fail","Test":"TestContentMediaV1"} +{"Action":"pass","Test":"TestContentMediaV1"} {"Action":"fail","Test":"TestDeviceListsUpdateOverFederation"} {"Action":"fail","Test":"TestDeviceListsUpdateOverFederation/good_connectivity"} {"Action":"fail","Test":"TestDeviceListsUpdateOverFederation/interrupted_connectivity"} @@ -138,43 +138,43 @@ {"Action":"fail","Test":"TestKnockingInMSC3787Room/Knocking_on_a_room_with_join_rule_'knock'_should_succeed#01"} {"Action":"fail","Test":"TestKnockingInMSC3787Room/Users_in_the_room_see_a_user's_membership_update_when_they_knock"} {"Action":"fail","Test":"TestKnockingInMSC3787Room/Users_in_the_room_see_a_user's_membership_update_when_they_knock#01"} -{"Action":"fail","Test":"TestLocalPngThumbnail"} -{"Action":"fail","Test":"TestLocalPngThumbnail/test_/_matrix/client/v1/media_endpoint"} -{"Action":"fail","Test":"TestMediaFilenames"} -{"Action":"fail","Test":"TestMediaFilenames/Parallel"} -{"Action":"fail","Test":"TestMediaFilenames/Parallel/ASCII"} +{"Action":"pass","Test":"TestLocalPngThumbnail"} +{"Action":"pass","Test":"TestLocalPngThumbnail/test_/_matrix/client/v1/media_endpoint"} +{"Action":"pass","Test":"TestMediaFilenames"} +{"Action":"pass","Test":"TestMediaFilenames/Parallel"} +{"Action":"pass","Test":"TestMediaFilenames/Parallel/ASCII"} {"Action":"pass","Test":"TestMediaFilenames/Parallel/ASCII/Can_download_file_'ascii'"} -{"Action":"fail","Test":"TestMediaFilenames/Parallel/ASCII/Can_download_file_'ascii'_over_/_matrix/client/v1/media/download"} -{"Action":"fail","Test":"TestMediaFilenames/Parallel/ASCII/Can_download_file_'name;with;semicolons'"} -{"Action":"fail","Test":"TestMediaFilenames/Parallel/ASCII/Can_download_file_'name;with;semicolons'_over_/_matrix/client/v1/media/download"} -{"Action":"fail","Test":"TestMediaFilenames/Parallel/ASCII/Can_download_file_'name_with_spaces'"} -{"Action":"fail","Test":"TestMediaFilenames/Parallel/ASCII/Can_download_file_'name_with_spaces'_over_/_matrix/client/v1/media/download"} +{"Action":"pass","Test":"TestMediaFilenames/Parallel/ASCII/Can_download_file_'ascii'_over_/_matrix/client/v1/media/download"} +{"Action":"pass","Test":"TestMediaFilenames/Parallel/ASCII/Can_download_file_'name;with;semicolons'"} +{"Action":"pass","Test":"TestMediaFilenames/Parallel/ASCII/Can_download_file_'name;with;semicolons'_over_/_matrix/client/v1/media/download"} +{"Action":"pass","Test":"TestMediaFilenames/Parallel/ASCII/Can_download_file_'name_with_spaces'"} +{"Action":"pass","Test":"TestMediaFilenames/Parallel/ASCII/Can_download_file_'name_with_spaces'_over_/_matrix/client/v1/media/download"} {"Action":"pass","Test":"TestMediaFilenames/Parallel/ASCII/Can_download_specifying_a_different_ASCII_file_name"} -{"Action":"fail","Test":"TestMediaFilenames/Parallel/ASCII/Can_download_specifying_a_different_ASCII_file_name_over__matrix/client/v1/media/download"} +{"Action":"pass","Test":"TestMediaFilenames/Parallel/ASCII/Can_download_specifying_a_different_ASCII_file_name_over__matrix/client/v1/media/download"} {"Action":"pass","Test":"TestMediaFilenames/Parallel/ASCII/Can_upload_with_ASCII_file_name"} -{"Action":"fail","Test":"TestMediaFilenames/Parallel/Unicode"} -{"Action":"fail","Test":"TestMediaFilenames/Parallel/Unicode/Can_download_specifying_a_different_Unicode_file_name"} -{"Action":"fail","Test":"TestMediaFilenames/Parallel/Unicode/Can_download_specifying_a_different_Unicode_file_name_over__matrix/client/v1/media/download"} -{"Action":"fail","Test":"TestMediaFilenames/Parallel/Unicode/Can_download_with_Unicode_file_name_locally"} -{"Action":"fail","Test":"TestMediaFilenames/Parallel/Unicode/Can_download_with_Unicode_file_name_locally_over__matrix/client/v1/media/download"} -{"Action":"fail","Test":"TestMediaFilenames/Parallel/Unicode/Can_download_with_Unicode_file_name_over_federation"} -{"Action":"fail","Test":"TestMediaFilenames/Parallel/Unicode/Can_download_with_Unicode_file_name_over_federation_via__matrix/client/v1/media/download"} +{"Action":"pass","Test":"TestMediaFilenames/Parallel/Unicode"} +{"Action":"pass","Test":"TestMediaFilenames/Parallel/Unicode/Can_download_specifying_a_different_Unicode_file_name"} +{"Action":"pass","Test":"TestMediaFilenames/Parallel/Unicode/Can_download_specifying_a_different_Unicode_file_name_over__matrix/client/v1/media/download"} +{"Action":"pass","Test":"TestMediaFilenames/Parallel/Unicode/Can_download_with_Unicode_file_name_locally"} +{"Action":"pass","Test":"TestMediaFilenames/Parallel/Unicode/Can_download_with_Unicode_file_name_locally_over__matrix/client/v1/media/download"} +{"Action":"pass","Test":"TestMediaFilenames/Parallel/Unicode/Can_download_with_Unicode_file_name_over_federation"} +{"Action":"pass","Test":"TestMediaFilenames/Parallel/Unicode/Can_download_with_Unicode_file_name_over_federation_via__matrix/client/v1/media/download"} {"Action":"pass","Test":"TestMediaFilenames/Parallel/Unicode/Can_upload_with_Unicode_file_name"} {"Action":"pass","Test":"TestMediaFilenames/Parallel/Unicode/Will_serve_safe_media_types_as_inline"} -{"Action":"fail","Test":"TestMediaFilenames/Parallel/Unicode/Will_serve_safe_media_types_as_inline_via__matrix/client/v1/media/download"} +{"Action":"pass","Test":"TestMediaFilenames/Parallel/Unicode/Will_serve_safe_media_types_as_inline_via__matrix/client/v1/media/download"} {"Action":"pass","Test":"TestMediaFilenames/Parallel/Unicode/Will_serve_safe_media_types_with_parameters_as_inline"} -{"Action":"fail","Test":"TestMediaFilenames/Parallel/Unicode/Will_serve_safe_media_types_with_parameters_as_inline_via__matrix/client/v1/media/download"} +{"Action":"pass","Test":"TestMediaFilenames/Parallel/Unicode/Will_serve_safe_media_types_with_parameters_as_inline_via__matrix/client/v1/media/download"} {"Action":"pass","Test":"TestMediaFilenames/Parallel/Unicode/Will_serve_unsafe_media_types_as_attachments"} -{"Action":"fail","Test":"TestMediaFilenames/Parallel/Unicode/Will_serve_unsafe_media_types_as_attachments_via__matrix/client/v1/media/download"} +{"Action":"pass","Test":"TestMediaFilenames/Parallel/Unicode/Will_serve_unsafe_media_types_as_attachments_via__matrix/client/v1/media/download"} {"Action":"pass","Test":"TestMediaWithoutFileName"} {"Action":"pass","Test":"TestMediaWithoutFileName/parallel"} {"Action":"pass","Test":"TestMediaWithoutFileName/parallel/Can_download_without_a_file_name_locally"} {"Action":"pass","Test":"TestMediaWithoutFileName/parallel/Can_download_without_a_file_name_over_federation"} {"Action":"pass","Test":"TestMediaWithoutFileName/parallel/Can_upload_without_a_file_name"} -{"Action":"fail","Test":"TestMediaWithoutFileNameCSMediaV1"} -{"Action":"fail","Test":"TestMediaWithoutFileNameCSMediaV1/parallel"} -{"Action":"fail","Test":"TestMediaWithoutFileNameCSMediaV1/parallel/Can_download_without_a_file_name_locally"} -{"Action":"fail","Test":"TestMediaWithoutFileNameCSMediaV1/parallel/Can_download_without_a_file_name_over_federation"} +{"Action":"pass","Test":"TestMediaWithoutFileNameCSMediaV1"} +{"Action":"pass","Test":"TestMediaWithoutFileNameCSMediaV1/parallel"} +{"Action":"pass","Test":"TestMediaWithoutFileNameCSMediaV1/parallel/Can_download_without_a_file_name_locally"} +{"Action":"pass","Test":"TestMediaWithoutFileNameCSMediaV1/parallel/Can_download_without_a_file_name_over_federation"} {"Action":"pass","Test":"TestMediaWithoutFileNameCSMediaV1/parallel/Can_upload_without_a_file_name"} {"Action":"fail","Test":"TestNetworkPartitionOrdering"} {"Action":"fail","Test":"TestOutboundFederationIgnoresMissingEventWithBadJSONForRoomVersion6"} @@ -182,8 +182,8 @@ {"Action":"pass","Test":"TestOutboundFederationProfile/Outbound_federation_can_query_profile_data"} {"Action":"pass","Test":"TestOutboundFederationSend"} {"Action":"pass","Test":"TestRemoteAliasRequestsUnderstandUnicode"} -{"Action":"fail","Test":"TestRemotePngThumbnail"} -{"Action":"fail","Test":"TestRemotePngThumbnail/test_/_matrix/client/v1/media_endpoint"} +{"Action":"pass","Test":"TestRemotePngThumbnail"} +{"Action":"pass","Test":"TestRemotePngThumbnail/test_/_matrix/client/v1/media_endpoint"} {"Action":"fail","Test":"TestRemotePresence"} {"Action":"fail","Test":"TestRemotePresence/Presence_changes_are_also_reported_to_remote_room_members"} {"Action":"fail","Test":"TestRemotePresence/Presence_changes_to_UNAVAILABLE_are_reported_to_remote_room_members"}