diff --git a/Cargo.toml b/Cargo.toml
index eb43da5a88109285bb06ad40a3cd7a8788ea9842..e7ebadf20788995285443cc0a357f52711fb33c9 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -24,8 +24,8 @@ ruma = { git = "https://github.com/ruma/ruma", rev = "b39537812c12caafcbf8b7bd74
 # Used for long polling and federation sender, should be the same as rocket::tokio
 tokio = "1.2.0"
 # Used for storing data permanently
-sled = { version = "0.34.6", features = ["compression", "no_metrics"] }
-rocksdb = { version = "0.16.0", features = ["multi-threaded-cf"] }
+sled = { version = "0.34.6", features = ["compression", "no_metrics"], optional = true }
+rocksdb = { version = "0.16.0", features = ["multi-threaded-cf"], optional = true }
 #sled = { git = "https://github.com/spacejam/sled.git", rev = "e4640e0773595229f398438886f19bca6f7326a2", features = ["compression"] }
 
 # Used for the http request / response body type for Ruma endpoints used with reqwest
@@ -75,7 +75,9 @@ opentelemetry-jaeger = "0.11.0"
 pretty_env_logger = "0.4.0"
 
 [features]
-default = ["conduit_bin"]
+default = ["conduit_bin", "backend_sled"]
+backend_sled = ["sled"]
+backend_rocksdb = ["rocksdb"]
 conduit_bin = [] # TODO: add rocket to this when it is optional
 
 [[bin]]
diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs
index 92d7aced8d537637cab390d018aba782b4aaacdf..a3f1389a9d258651fc12ed96cc65ab6dc79a52a2 100644
--- a/src/client_server/membership.rs
+++ b/src/client_server/membership.rs
@@ -621,7 +621,7 @@ async fn join_room_by_id_helper(
             &pdu,
             utils::to_canonical_object(&pdu).expect("Pdu is valid canonical object"),
             count,
-            pdu_id.into(),
+            &pdu_id,
             &[pdu.event_id.clone()],
             db,
         )?;
diff --git a/src/database.rs b/src/database.rs
index b5a25eac1445a9b9b5951914c46fb966a5e38e63..e00bdcd521b1e7a36362057fa21cb9eb4260f4fc 100644
--- a/src/database.rs
+++ b/src/database.rs
@@ -77,8 +77,12 @@ fn default_log() -> String {
     "info,state_res=warn,rocket=off,_=off,sled=off".to_owned()
 }
 
+#[cfg(feature = "sled")]
 pub type Engine = abstraction::SledEngine;
 
+#[cfg(feature = "rocksdb")]
+pub type Engine = abstraction::RocksDbEngine;
+
 pub struct Database {
     pub globals: globals::Globals,
     pub users: users::Users,
diff --git a/src/database/abstraction.rs b/src/database/abstraction.rs
index ad032fb3ddc77dbe2638f7311d829035ebb0a3fa..f81c9def2c306aeaf530182f8a45e7588b5e5052 100644
--- a/src/database/abstraction.rs
+++ b/src/database/abstraction.rs
@@ -1,21 +1,19 @@
-use std::{
-    collections::BTreeMap,
-    future::Future,
-    pin::Pin,
-    sync::{Arc, RwLock},
-};
-
-use log::warn;
-use rocksdb::{
-    BoundColumnFamily, ColumnFamilyDescriptor, DBWithThreadMode, Direction, MultiThreaded, Options,
-};
-
 use super::Config;
 use crate::{utils, Result};
+use log::warn;
+use std::{future::Future, pin::Pin, sync::Arc};
+
+#[cfg(feature = "rocksdb")]
+use std::{collections::BTreeMap, sync::RwLock};
 
+#[cfg(feature = "sled")]
 pub struct SledEngine(sled::Db);
+#[cfg(feature = "sled")]
 pub struct SledEngineTree(sled::Tree);
-pub struct RocksDbEngine(rocksdb::DBWithThreadMode<MultiThreaded>);
+
+#[cfg(feature = "rocksdb")]
+pub struct RocksDbEngine(rocksdb::DBWithThreadMode<rocksdb::MultiThreaded>);
+#[cfg(feature = "rocksdb")]
 pub struct RocksDbEngineTree<'a> {
     db: Arc<RocksDbEngine>,
     name: &'a str,
@@ -60,6 +58,7 @@ fn clear(&self) -> Result<()> {
     }
 }
 
+#[cfg(feature = "sled")]
 impl DatabaseEngine for SledEngine {
     fn open(config: &Config) -> Result<Arc<Self>> {
         Ok(Arc::new(SledEngine(
@@ -76,6 +75,7 @@ fn open_tree(self: &Arc<Self>, name: &'static str) -> Result<Arc<dyn Tree>> {
     }
 }
 
+#[cfg(feature = "sled")]
 impl Tree for SledEngineTree {
     fn get(&self, key: &[u8]) -> Result<Option<Vec<u8>>> {
         Ok(self.0.get(key)?.map(|v| v.to_vec()))
@@ -165,29 +165,42 @@ fn watch_prefix<'a>(&'a self, prefix: &[u8]) -> Pin<Box<dyn Future<Output = ()>
     }
 }
 
+#[cfg(feature = "rocksdb")]
 impl DatabaseEngine for RocksDbEngine {
     fn open(config: &Config) -> Result<Arc<Self>> {
-        let mut db_opts = Options::default();
+        let mut db_opts = rocksdb::Options::default();
         db_opts.create_if_missing(true);
+        db_opts.set_max_open_files(16);
+        db_opts.set_compaction_style(rocksdb::DBCompactionStyle::Level);
+        db_opts.set_compression_type(rocksdb::DBCompressionType::Snappy);
+        db_opts.set_target_file_size_base(256 << 20);
+        db_opts.set_write_buffer_size(256 << 20);
+
+        let mut block_based_options = rocksdb::BlockBasedOptions::default();
+        block_based_options.set_block_size(512 << 10);
+        db_opts.set_block_based_table_factory(&block_based_options);
 
-        let cfs = DBWithThreadMode::<MultiThreaded>::list_cf(&db_opts, &config.database_path)
-            .unwrap_or_default();
+        let cfs = rocksdb::DBWithThreadMode::<rocksdb::MultiThreaded>::list_cf(
+            &db_opts,
+            &config.database_path,
+        )
+        .unwrap_or_default();
 
-        let mut options = Options::default();
+        let mut options = rocksdb::Options::default();
         options.set_merge_operator_associative("increment", utils::increment_rocksdb);
 
-        let db = DBWithThreadMode::<MultiThreaded>::open_cf_descriptors(
+        let db = rocksdb::DBWithThreadMode::<rocksdb::MultiThreaded>::open_cf_descriptors(
             &db_opts,
             &config.database_path,
             cfs.iter()
-                .map(|name| ColumnFamilyDescriptor::new(name, options.clone())),
+                .map(|name| rocksdb::ColumnFamilyDescriptor::new(name, options.clone())),
         )?;
 
         Ok(Arc::new(RocksDbEngine(db)))
     }
 
     fn open_tree(self: &Arc<Self>, name: &'static str) -> Result<Arc<dyn Tree>> {
-        let mut options = Options::default();
+        let mut options = rocksdb::Options::default();
         options.set_merge_operator_associative("increment", utils::increment_rocksdb);
 
         // Create if it doesn't exist
@@ -201,12 +214,14 @@ fn open_tree(self: &Arc<Self>, name: &'static str) -> Result<Arc<dyn Tree>> {
     }
 }
 
+#[cfg(feature = "rocksdb")]
 impl RocksDbEngineTree<'_> {
-    fn cf(&self) -> BoundColumnFamily<'_> {
+    fn cf(&self) -> rocksdb::BoundColumnFamily<'_> {
         self.db.0.cf_handle(self.name).unwrap()
     }
 }
 
+#[cfg(feature = "rocksdb")]
 impl Tree for RocksDbEngineTree<'_> {
     fn get(&self, key: &[u8]) -> Result<Option<Vec<u8>>> {
         Ok(self.db.0.get_cf(self.cf(), key)?)
@@ -260,15 +275,20 @@ fn iter_from<'a>(
             rocksdb::IteratorMode::From(
                 from,
                 if backwards {
-                    Direction::Reverse
+                    rocksdb::Direction::Reverse
                 } else {
-                    Direction::Forward
+                    rocksdb::Direction::Forward
                 },
             ),
         ))
     }
 
     fn increment(&self, key: &[u8]) -> Result<Vec<u8>> {
+        let stats = rocksdb::perf::get_memory_usage_stats(Some(&[&self.db.0]), None).unwrap();
+        dbg!(stats.mem_table_total);
+        dbg!(stats.mem_table_unflushed);
+        dbg!(stats.mem_table_readers_total);
+        dbg!(stats.cache_total);
         // TODO: atomic?
         let old = self.get(key)?;
         let new = utils::increment(old.as_deref()).unwrap();
@@ -285,7 +305,7 @@ fn scan_prefix<'a>(
                 .0
                 .iterator_cf(
                     self.cf(),
-                    rocksdb::IteratorMode::From(&prefix, Direction::Forward),
+                    rocksdb::IteratorMode::From(&prefix, rocksdb::Direction::Forward),
                 )
                 .take_while(move |(k, _)| k.starts_with(&prefix)),
         )
diff --git a/src/database/rooms.rs b/src/database/rooms.rs
index 0a8239d4fcda315290578873ef461eb4e47678ce..736ff4d85e45eaf46dcb26f480f0038fc974263c 100644
--- a/src/database/rooms.rs
+++ b/src/database/rooms.rs
@@ -19,8 +19,6 @@
     state_res::{self, Event, RoomVersion, StateMap},
     uint, EventId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId,
 };
-use sled::IVec;
-
 use std::{
     collections::{BTreeMap, HashMap, HashSet},
     convert::{TryFrom, TryInto},
@@ -34,7 +32,7 @@
 ///
 /// This is created when a state group is added to the database by
 /// hashing the entire state.
-pub type StateHashId = IVec;
+pub type StateHashId = Vec<u8>;
 
 pub struct Rooms {
     pub edus: edus::RoomEdus,
@@ -665,7 +663,7 @@ pub fn append_pdu(
         pdu: &PduEvent,
         mut pdu_json: CanonicalJsonObject,
         count: u64,
-        pdu_id: IVec,
+        pdu_id: &[u8],
         leaves: &[EventId],
         db: &Database,
     ) -> Result<()> {
@@ -713,14 +711,13 @@ pub fn append_pdu(
         self.reset_notification_counts(&pdu.sender, &pdu.room_id)?;
 
         self.pduid_pdu.insert(
-            &pdu_id,
+            pdu_id,
             &serde_json::to_vec(&pdu_json).expect("CanonicalJsonObject is always a valid"),
         )?;
 
         // This also replaces the eventid of any outliers with the correct
         // pduid, removing the place holder.
-        self.eventid_pduid
-            .insert(pdu.event_id.as_bytes(), &*pdu_id)?;
+        self.eventid_pduid.insert(pdu.event_id.as_bytes(), pdu_id)?;
 
         // See if the event matches any known pushers
         for user in db
@@ -1360,7 +1357,7 @@ pub fn build_and_append_pdu(
             &pdu,
             pdu_json,
             count,
-            pdu_id.clone().into(),
+            &pdu_id,
             // Since this PDU references all pdu_leaves we can update the leaves
             // of the room
             &[pdu.event_id.clone()],
diff --git a/src/database/sending.rs b/src/database/sending.rs
index 77f6ed781cade6e526c390d02fed32f08cd980d7..ecf0761828ad938b14d3a2aed2d27ef0900af650 100644
--- a/src/database/sending.rs
+++ b/src/database/sending.rs
@@ -91,8 +91,6 @@ enum TransactionStatus {
 
 impl Sending {
     pub fn start_handler(&self, db: Arc<Database>, mut receiver: mpsc::UnboundedReceiver<Vec<u8>>) {
-        let db = db.clone();
-
         tokio::spawn(async move {
             let mut futures = FuturesUnordered::new();
 
diff --git a/src/error.rs b/src/error.rs
index 10a48b7260ac1b0db2883ebbdc8d67542c74e48f..4f363fff654075e284f57a32414ecd71c57b7dff 100644
--- a/src/error.rs
+++ b/src/error.rs
@@ -23,11 +23,13 @@
 
 #[derive(Error, Debug)]
 pub enum Error {
+    #[cfg(feature = "sled")]
     #[error("There was a problem with the connection to the sled database.")]
     SledError {
         #[from]
         source: sled::Error,
     },
+    #[cfg(feature = "rocksdb")]
     #[error("There was a problem with the connection to the rocksdb database: {source}")]
     RocksDbError {
         #[from]
diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs
index ba2c37eddac8ee531799cfac5ee4bf3b9dac3c38..2912a578ac5447ffd8b3fc003d375a0c7e032be1 100644
--- a/src/ruma_wrapper.rs
+++ b/src/ruma_wrapper.rs
@@ -1,14 +1,15 @@
-use crate::{Database, Error};
+use crate::Error;
 use ruma::{
     api::OutgoingResponse,
     identifiers::{DeviceId, UserId},
-    Outgoing,
+    signatures::CanonicalJsonValue,
+    Outgoing, ServerName,
 };
-use std::{ops::Deref, sync::Arc};
+use std::ops::Deref;
 
 #[cfg(feature = "conduit_bin")]
 use {
-    crate::server_server,
+    crate::{server_server, Database},
     log::{debug, warn},
     rocket::{
         data::{self, ByteUnit, Data, FromData},
@@ -18,14 +19,11 @@
         tokio::io::AsyncReadExt,
         Request, State,
     },
-    ruma::{
-        api::{AuthScheme, IncomingRequest},
-        signatures::CanonicalJsonValue,
-        ServerName,
-    },
+    ruma::api::{AuthScheme, IncomingRequest},
     std::collections::BTreeMap,
     std::convert::TryFrom,
     std::io::Cursor,
+    std::sync::Arc,
 };
 
 /// This struct converts rocket requests into ruma structs by converting them into http requests
diff --git a/src/server_server.rs b/src/server_server.rs
index 7a338dc5ccbcb7b86bd0289391e834d61a1b2b7c..2a445c2ba219b5957eb348ed075aab9b35336cf1 100644
--- a/src/server_server.rs
+++ b/src/server_server.rs
@@ -1681,7 +1681,7 @@ pub(crate) fn append_incoming_pdu(
         pdu,
         pdu_json,
         count,
-        pdu_id.clone().into(),
+        &pdu_id,
         &new_room_leaves.into_iter().collect::<Vec<_>>(),
         &db,
     )?;
diff --git a/src/utils.rs b/src/utils.rs
index f59afb3ac90cdbc6ce7003e0bc32c41018d59c9c..0c8fb5ca3f54ea9debc2ed69abfb80ccd86546a4 100644
--- a/src/utils.rs
+++ b/src/utils.rs
@@ -1,7 +1,6 @@
 use argon2::{Config, Variant};
 use cmp::Ordering;
 use rand::prelude::*;
-use rocksdb::MergeOperands;
 use ruma::serde::{try_from_json_map, CanonicalJsonError, CanonicalJsonObject};
 use std::{
     cmp,
@@ -16,10 +15,11 @@ pub fn millis_since_unix_epoch() -> u64 {
         .as_millis() as u64
 }
 
+#[cfg(feature = "rocksdb")]
 pub fn increment_rocksdb(
     _new_key: &[u8],
     old: Option<&[u8]>,
-    _operands: &mut MergeOperands,
+    _operands: &mut rocksdb::MergeOperands,
 ) -> Option<Vec<u8>> {
     increment(old)
 }