diff --git a/changelog.d/10513.feature b/changelog.d/10513.feature
new file mode 100644
index 0000000000000000000000000000000000000000..153b2df7b205001ca5fe8facf001611de5419ab1
--- /dev/null
+++ b/changelog.d/10513.feature
@@ -0,0 +1 @@
+Add a configuration setting for the time a `/sync` response is cached for.
diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml
index 1a217f35dba90794a5e4077543cb24071648f89f..a2efc14100ac52d6371d7d2e2ad78a94c8296fd4 100644
--- a/docs/sample_config.yaml
+++ b/docs/sample_config.yaml
@@ -711,6 +711,15 @@ caches:
   #
   #expiry_time: 30m
 
+  # Controls how long the results of a /sync request are cached for after
+  # a successful response is returned. A higher duration can help clients with
+  # intermittent connections, at the cost of higher memory usage.
+  #
+  # By default, this is zero, which means that sync responses are not cached
+  # at all.
+  #
+  #sync_response_cache_duration: 2m
+
 
 ## Database ##
 
diff --git a/synapse/config/cache.py b/synapse/config/cache.py
index 8d5f38b5d934abf5913201cce2f0be19c300ea85..d119427ad864a0d979155ef9e43c77ba1a098e8a 100644
--- a/synapse/config/cache.py
+++ b/synapse/config/cache.py
@@ -151,6 +151,15 @@ class CacheConfig(Config):
           # entries are never evicted based on time.
           #
           #expiry_time: 30m
+
+          # Controls how long the results of a /sync request are cached for after
+          # a successful response is returned. A higher duration can help clients with
+          # intermittent connections, at the cost of higher memory usage.
+          #
+          # By default, this is zero, which means that sync responses are not cached
+          # at all.
+          #
+          #sync_response_cache_duration: 2m
         """
 
     def read_config(self, config, **kwargs):
@@ -212,6 +221,10 @@ class CacheConfig(Config):
         else:
             self.expiry_time_msec = None
 
+        self.sync_response_cache_duration = self.parse_duration(
+            cache_config.get("sync_response_cache_duration", 0)
+        )
+
         # Resize all caches (if necessary) with the new factors we've loaded
         self.resize_all_caches()
 
diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py
index f30bfcc93cf2649c326117df495b2600b1585490..590642f510fe228685976ccada6f74773e781e8f 100644
--- a/synapse/handlers/sync.py
+++ b/synapse/handlers/sync.py
@@ -269,14 +269,22 @@ class SyncHandler:
         self.presence_handler = hs.get_presence_handler()
         self.event_sources = hs.get_event_sources()
         self.clock = hs.get_clock()
-        self.response_cache: ResponseCache[SyncRequestKey] = ResponseCache(
-            hs.get_clock(), "sync"
-        )
         self.state = hs.get_state_handler()
         self.auth = hs.get_auth()
         self.storage = hs.get_storage()
         self.state_store = self.storage.state
 
+        # TODO: flush cache entries on subsequent sync request.
+        #    Once we get the next /sync request (ie, one with the same access token
+        #    that sets 'since' to 'next_batch'), we know that device won't need a
+        #    cached result any more, and we could flush the entry from the cache to save
+        #    memory.
+        self.response_cache: ResponseCache[SyncRequestKey] = ResponseCache(
+            hs.get_clock(),
+            "sync",
+            timeout_ms=hs.config.caches.sync_response_cache_duration,
+        )
+
         # ExpiringCache((User, Device)) -> LruCache(user_id => event_id)
         self.lazy_loaded_members_cache: ExpiringCache[
             Tuple[str, Optional[str]], LruCache[str, str]