diff --git a/src/database/abstraction/sqlite.rs b/src/database/abstraction/sqlite.rs
index e4acdbb568ef348bd41181a2b21acf9be3c6c1e9..14306e19d78b13c4666fa6ea696dbb4c3b1572a1 100644
--- a/src/database/abstraction/sqlite.rs
+++ b/src/database/abstraction/sqlite.rs
@@ -150,11 +150,16 @@ fn read_lock(&self) -> HoldingConn<'_> {
             }
         }
 
+        log::debug!("read_lock: All permanent readers locked, obtaining spillover reader...");
+
         // We didn't get a connection from the permanent pool, so we'll dumpster-dive for recycled connections.
         // Either we have a connection or we dont, if we don't, we make a new one.
         let conn = match self.spills.try_take() {
             Some(conn) => conn,
-            None => Self::prepare_conn(&self.path, None).unwrap(),
+            None => {
+                log::debug!("read_lock: No recycled connections left, creating new one...");
+                Self::prepare_conn(&self.path, None).unwrap()
+            }
         };
 
         // Clone the spill Arc to mark how many spilled connections actually exist.
@@ -163,8 +168,6 @@ fn read_lock(&self) -> HoldingConn<'_> {
         // Get a sense of how many connections exist now.
         let now_count = Arc::strong_count(&spill_arc) - 1 /* because one is held by the pool */;
 
-        log::debug!("read_lock: all readers locked, creating spillover reader...");
-
         // If the spillover readers are more than the number of total readers, there might be a problem.
         if now_count > self.readers.len() {
             log::warn!(