1use std::collections::{HashMap, HashSet};
4use std::path::{Path, PathBuf};
5use std::time::Duration;
6
7use anyhow::{Context as _, Result, bail};
8use rusqlite::{Connection, OpenFlags, Row, config::DbConfig, types::ValueRef};
9use tokio::sync::RwLock;
10
11use crate::blob::BlobObject;
12use crate::config::Config;
13use crate::constants::DC_CHAT_ID_TRASH;
14use crate::context::Context;
15use crate::debug_logging::set_debug_logging_xdc;
16use crate::ephemeral::start_ephemeral_timers;
17use crate::imex::BLOBS_BACKUP_NAME;
18use crate::location;
19use crate::log::{LogExt, warn};
20use crate::message::MsgId;
21use crate::net::dns::prune_dns_cache;
22use crate::net::http::http_cache_cleanup;
23use crate::net::prune_connection_history;
24use crate::param::{Param, Params};
25use crate::tools::{SystemTime, delete_file, time};
26
27pub trait ToSql: rusqlite::ToSql + Send + Sync {}
30
31impl<T: rusqlite::ToSql + Send + Sync> ToSql for T {}
32
33#[macro_export]
39macro_rules! params_slice {
40 ($($param:expr),+) => {
41 [$(&$param as &dyn $crate::sql::ToSql),+]
42 };
43}
44
45mod migrations;
46mod pool;
47
48use pool::{Pool, WalCheckpointStats};
49
50#[derive(Debug)]
52pub struct Sql {
53 pub(crate) dbfile: PathBuf,
55
56 pool: RwLock<Option<Pool>>,
58
59 is_encrypted: RwLock<Option<bool>>,
62
63 pub(crate) config_cache: RwLock<HashMap<String, Option<String>>>,
65}
66
67impl Sql {
68 pub fn new(dbfile: PathBuf) -> Sql {
70 Self {
71 dbfile,
72 pool: Default::default(),
73 is_encrypted: Default::default(),
74 config_cache: Default::default(),
75 }
76 }
77
78 pub async fn check_passphrase(&self, passphrase: String) -> Result<bool> {
86 if self.is_open().await {
87 bail!("Database is already opened.");
88 }
89
90 let _lock = self.pool.write().await;
92
93 let connection = Connection::open(&self.dbfile)?;
95 if !passphrase.is_empty() {
96 connection
97 .pragma_update(None, "key", &passphrase)
98 .context("Failed to set PRAGMA key")?;
99 }
100 let key_is_correct = connection
101 .query_row("SELECT count(*) FROM sqlite_master", [], |_row| Ok(()))
102 .is_ok();
103
104 Ok(key_is_correct)
105 }
106
107 pub async fn is_open(&self) -> bool {
109 self.pool.read().await.is_some()
110 }
111
112 pub(crate) async fn is_encrypted(&self) -> Option<bool> {
116 *self.is_encrypted.read().await
117 }
118
119 pub(crate) async fn close(&self) {
121 let _ = self.pool.write().await.take();
122 }
124
125 pub(crate) async fn import(&self, path: &Path, passphrase: String) -> Result<()> {
127 let path_str = path
128 .to_str()
129 .with_context(|| format!("path {path:?} is not valid unicode"))?
130 .to_string();
131
132 let mut config_cache = self.config_cache.write().await;
135 config_cache.clear();
136
137 let query_only = false;
138 self.call(query_only, move |conn| {
139 conn.execute("ATTACH DATABASE ? AS backup KEY ?", (path_str, passphrase))
141 .context("failed to attach backup database")?;
142 let res = conn
143 .query_row("SELECT count(*) FROM sqlite_master", [], |_row| Ok(()))
144 .context("backup passphrase is not correct");
145
146 res.and_then(|_| {
151 conn.set_db_config(DbConfig::SQLITE_DBCONFIG_RESET_DATABASE, true)
152 .context("failed to set SQLITE_DBCONFIG_RESET_DATABASE")
153 })
154 .and_then(|_| {
155 conn.execute("VACUUM", [])
156 .context("failed to vacuum the database")
157 })
158 .and(
159 conn.set_db_config(DbConfig::SQLITE_DBCONFIG_RESET_DATABASE, false)
160 .context("failed to unset SQLITE_DBCONFIG_RESET_DATABASE"),
161 )
162 .and_then(|_| {
163 conn.query_row("SELECT sqlcipher_export('main', 'backup')", [], |_row| {
164 Ok(())
165 })
166 .context("failed to import from attached backup database")
167 })
168 .and(
169 conn.execute("DETACH DATABASE backup", [])
170 .context("failed to detach backup database"),
171 )?;
172 Ok(())
173 })
174 .await
175 }
176
177 const N_DB_CONNECTIONS: usize = 3;
178
179 fn new_pool(dbfile: &Path, passphrase: String) -> Result<Pool> {
181 let mut connections = Vec::with_capacity(Self::N_DB_CONNECTIONS);
182 for _ in 0..Self::N_DB_CONNECTIONS {
183 let connection = new_connection(dbfile, &passphrase)?;
184 connections.push(connection);
185 }
186
187 let pool = Pool::new(connections);
188 Ok(pool)
189 }
190
191 async fn try_open(&self, context: &Context, dbfile: &Path, passphrase: String) -> Result<()> {
192 *self.pool.write().await = Some(Self::new_pool(dbfile, passphrase.to_string())?);
193
194 if let Err(e) = self.run_migrations(context).await {
195 error!(context, "Running migrations failed: {e:#}");
196 eprintln!("Running migrations failed: {e:#}");
201 context.set_migration_error(&format!("Updating Delta Chat failed. Please send this message to the Delta Chat developers, either at delta@merlinux.eu or at https://support.delta.chat.\n\n{e:#}"));
202 }
207
208 Ok(())
209 }
210
211 pub async fn run_migrations(&self, context: &Context) -> Result<()> {
213 let recode_avatar = migrations::run(context, self)
218 .await
219 .context("failed to run migrations")?;
220
221 if recode_avatar && let Some(avatar) = context.get_config(Config::Selfavatar).await? {
225 let mut blob = BlobObject::from_path(context, Path::new(&avatar))?;
226 match blob.recode_to_avatar_size(context).await {
227 Ok(()) => {
228 if let Some(path) = blob.to_abs_path().to_str() {
229 context
230 .set_config_internal(Config::Selfavatar, Some(path))
231 .await?;
232 } else {
233 warn!(context, "Setting selfavatar failed: non-UTF-8 filename");
234 }
235 }
236 Err(e) => {
237 warn!(context, "Migrations can't recode avatar, removing. {:#}", e);
238 context
239 .set_config_internal(Config::Selfavatar, None)
240 .await?
241 }
242 }
243 }
244
245 Ok(())
246 }
247
248 pub async fn open(&self, context: &Context, passphrase: String) -> Result<()> {
251 if self.is_open().await {
252 error!(
253 context,
254 "Cannot open, database \"{:?}\" already opened.", self.dbfile,
255 );
256 bail!("SQL database is already opened.");
257 }
258
259 let passphrase_nonempty = !passphrase.is_empty();
260 self.try_open(context, &self.dbfile, passphrase).await?;
261 info!(context, "Opened database {:?}.", self.dbfile);
262 *self.is_encrypted.write().await = Some(passphrase_nonempty);
263
264 if let Some(xdc_id) = self
266 .get_raw_config_u32(Config::DebugLogging.as_ref())
267 .await?
268 {
269 set_debug_logging_xdc(context, Some(MsgId::new(xdc_id))).await?;
270 }
271 Ok(())
272 }
273
274 pub async fn change_passphrase(&self, passphrase: String) -> Result<()> {
280 let mut lock = self.pool.write().await;
281
282 let pool = lock.take().context("SQL connection pool is not open")?;
283 let query_only = false;
284 let conn = pool.get(query_only).await?;
285 if !passphrase.is_empty() {
286 conn.pragma_update(None, "rekey", passphrase.clone())
287 .context("Failed to set PRAGMA rekey")?;
288 }
289 drop(pool);
290
291 *lock = Some(Self::new_pool(&self.dbfile, passphrase.to_string())?);
292
293 Ok(())
294 }
295
296 pub async fn call<'a, F, R>(&'a self, query_only: bool, function: F) -> Result<R>
303 where
304 F: 'a + FnOnce(&mut Connection) -> Result<R> + Send,
305 R: Send + 'static,
306 {
307 let lock = self.pool.read().await;
308 let pool = lock.as_ref().context("no SQL connection")?;
309 let mut conn = pool.get(query_only).await?;
310 let res = tokio::task::block_in_place(move || function(&mut conn))?;
311 Ok(res)
312 }
313
314 pub async fn call_write<'a, F, R>(&'a self, function: F) -> Result<R>
319 where
320 F: 'a + FnOnce(&mut Connection) -> Result<R> + Send,
321 R: Send + 'static,
322 {
323 let query_only = false;
324 self.call(query_only, function).await
325 }
326
327 pub async fn execute(
329 &self,
330 query: &str,
331 params: impl rusqlite::Params + Send,
332 ) -> Result<usize> {
333 self.call_write(move |conn| {
334 let res = conn.execute(query, params)?;
335 Ok(res)
336 })
337 .await
338 }
339
340 pub async fn insert(&self, query: &str, params: impl rusqlite::Params + Send) -> Result<i64> {
342 self.call_write(move |conn| {
343 conn.execute(query, params)?;
344 Ok(conn.last_insert_rowid())
345 })
346 .await
347 }
348
349 pub async fn query_map<T, F, G, H>(
353 &self,
354 sql: &str,
355 params: impl rusqlite::Params + Send,
356 f: F,
357 g: G,
358 ) -> Result<H>
359 where
360 F: Send + FnMut(&rusqlite::Row) -> Result<T>,
361 G: Send + FnOnce(rusqlite::AndThenRows<F>) -> Result<H>,
362 H: Send + 'static,
363 {
364 let query_only = true;
365 self.call(query_only, move |conn| {
366 let mut stmt = conn.prepare(sql)?;
367 let res = stmt.query_and_then(params, f)?;
368 g(res)
369 })
370 .await
371 }
372
373 pub async fn query_map_collect<T, C, F>(
377 &self,
378 sql: &str,
379 params: impl rusqlite::Params + Send,
380 f: F,
381 ) -> Result<C>
382 where
383 T: Send + 'static,
384 C: Send + 'static + std::iter::FromIterator<T>,
385 F: Send + FnMut(&rusqlite::Row) -> Result<T>,
386 {
387 self.query_map(sql, params, f, |rows| {
388 rows.collect::<std::result::Result<C, _>>()
389 })
390 .await
391 }
392
393 pub async fn query_map_vec<T, F>(
397 &self,
398 sql: &str,
399 params: impl rusqlite::Params + Send,
400 f: F,
401 ) -> Result<Vec<T>>
402 where
403 T: Send + 'static,
404 F: Send + FnMut(&rusqlite::Row) -> Result<T>,
405 {
406 self.query_map_collect(sql, params, f).await
407 }
408
409 pub async fn count(&self, query: &str, params: impl rusqlite::Params + Send) -> Result<usize> {
411 let count: isize = self.query_row(query, params, |row| row.get(0)).await?;
412 Ok(usize::try_from(count)?)
413 }
414
415 pub async fn exists(&self, sql: &str, params: impl rusqlite::Params + Send) -> Result<bool> {
418 let count = self.count(sql, params).await?;
419 Ok(count > 0)
420 }
421
422 pub async fn query_row<T, F>(
424 &self,
425 query: &str,
426 params: impl rusqlite::Params + Send,
427 f: F,
428 ) -> Result<T>
429 where
430 F: FnOnce(&rusqlite::Row) -> rusqlite::Result<T> + Send,
431 T: Send + 'static,
432 {
433 let query_only = true;
434 self.call(query_only, move |conn| {
435 let res = conn.query_row(query, params, f)?;
436 Ok(res)
437 })
438 .await
439 }
440
441 pub async fn transaction<G, H>(&self, callback: G) -> Result<H>
446 where
447 H: Send + 'static,
448 G: Send + FnOnce(&mut rusqlite::Transaction<'_>) -> Result<H>,
449 {
450 let query_only = false;
451 self.transaction_ex(query_only, callback).await
452 }
453
454 pub async fn transaction_ex<G, H>(&self, query_only: bool, callback: G) -> Result<H>
469 where
470 H: Send + 'static,
471 G: Send + FnOnce(&mut rusqlite::Transaction<'_>) -> Result<H>,
472 {
473 self.call(query_only, move |conn| {
474 let mut transaction = conn.transaction()?;
475 let ret = callback(&mut transaction);
476
477 match ret {
478 Ok(ret) => {
479 transaction.commit()?;
480 Ok(ret)
481 }
482 Err(err) => {
483 transaction.rollback()?;
484 Err(err)
485 }
486 }
487 })
488 .await
489 }
490
491 pub async fn table_exists(&self, name: &str) -> Result<bool> {
493 let query_only = true;
494 self.call(query_only, move |conn| {
495 let mut exists = false;
496 conn.pragma(None, "table_info", name.to_string(), |_row| {
497 exists = true;
499 Ok(())
500 })?;
501
502 Ok(exists)
503 })
504 .await
505 }
506
507 pub async fn col_exists(&self, table_name: &str, col_name: &str) -> Result<bool> {
509 let query_only = true;
510 self.call(query_only, move |conn| {
511 let mut exists = false;
512 conn.pragma(None, "table_info", table_name.to_string(), |row| {
515 let curr_name: String = row.get(1)?;
516 if col_name == curr_name {
517 exists = true;
518 }
519 Ok(())
520 })?;
521
522 Ok(exists)
523 })
524 .await
525 }
526
527 pub async fn query_row_optional<T, F>(
529 &self,
530 sql: &str,
531 params: impl rusqlite::Params + Send,
532 f: F,
533 ) -> Result<Option<T>>
534 where
535 F: Send + FnOnce(&rusqlite::Row) -> rusqlite::Result<T>,
536 T: Send + 'static,
537 {
538 let query_only = true;
539 self.call(query_only, move |conn| {
540 match conn.query_row(sql.as_ref(), params, f) {
541 Ok(res) => Ok(Some(res)),
542 Err(rusqlite::Error::QueryReturnedNoRows) => Ok(None),
543 Err(err) => Err(err.into()),
544 }
545 })
546 .await
547 }
548
549 pub async fn query_get_value<T>(
552 &self,
553 query: &str,
554 params: impl rusqlite::Params + Send,
555 ) -> Result<Option<T>>
556 where
557 T: rusqlite::types::FromSql + Send + 'static,
558 {
559 self.query_row_optional(query, params, |row| row.get::<_, T>(0))
560 .await
561 }
562
563 pub async fn set_raw_config(&self, key: &str, value: Option<&str>) -> Result<()> {
568 let mut lock = self.config_cache.write().await;
569 if let Some(value) = value {
570 self.execute(
571 "INSERT OR REPLACE INTO config (keyname, value) VALUES (?, ?)",
572 (key, value),
573 )
574 .await?;
575 } else {
576 self.execute("DELETE FROM config WHERE keyname=?", (key,))
577 .await?;
578 }
579 lock.insert(key.to_string(), value.map(|s| s.to_string()));
580 drop(lock);
581
582 Ok(())
583 }
584
585 pub async fn get_raw_config(&self, key: &str) -> Result<Option<String>> {
587 let lock = self.config_cache.read().await;
588 let cached = lock.get(key).cloned();
589 drop(lock);
590
591 if let Some(c) = cached {
592 return Ok(c);
593 }
594
595 let mut lock = self.config_cache.write().await;
596 let value = self
597 .query_get_value("SELECT value FROM config WHERE keyname=?", (key,))
598 .await
599 .context(format!("failed to fetch raw config: {key}"))?;
600 lock.insert(key.to_string(), value.clone());
601 drop(lock);
602
603 Ok(value)
604 }
605
606 pub(crate) async fn uncache_raw_config(&self, key: &str) {
608 let mut lock = self.config_cache.write().await;
609 lock.remove(key);
610 }
611
612 pub async fn set_raw_config_int(&self, key: &str, value: i32) -> Result<()> {
614 self.set_raw_config(key, Some(&format!("{value}"))).await
615 }
616
617 pub async fn get_raw_config_int(&self, key: &str) -> Result<Option<i32>> {
619 self.get_raw_config(key)
620 .await
621 .map(|s| s.and_then(|s| s.parse().ok()))
622 }
623
624 pub async fn get_raw_config_u32(&self, key: &str) -> Result<Option<u32>> {
626 self.get_raw_config(key)
627 .await
628 .map(|s| s.and_then(|s| s.parse().ok()))
629 }
630
631 pub async fn get_raw_config_bool(&self, key: &str) -> Result<bool> {
633 let res = self.get_raw_config_int(key).await?;
636 Ok(res.unwrap_or_default() > 0)
637 }
638
639 pub async fn set_raw_config_bool(&self, key: &str, value: bool) -> Result<()> {
641 let value = if value { Some("1") } else { None };
642 self.set_raw_config(key, value).await
643 }
644
645 pub async fn set_raw_config_int64(&self, key: &str, value: i64) -> Result<()> {
647 self.set_raw_config(key, Some(&format!("{value}"))).await
648 }
649
650 pub async fn get_raw_config_int64(&self, key: &str) -> Result<Option<i64>> {
652 self.get_raw_config(key)
653 .await
654 .map(|s| s.and_then(|r| r.parse().ok()))
655 }
656
657 #[cfg(feature = "internals")]
659 pub fn config_cache(&self) -> &RwLock<HashMap<String, Option<String>>> {
660 &self.config_cache
661 }
662
663 pub(crate) async fn wal_checkpoint(&self, context: &Context) -> Result<()> {
665 let lock = self.pool.read().await;
666 let Some(pool) = lock.as_ref() else {
667 return Ok(());
669 };
670
671 let WalCheckpointStats {
672 total_duration,
673 writers_blocked_duration,
674 readers_blocked_duration,
675 pages_total,
676 pages_checkpointed,
677 } = pool.wal_checkpoint().await?;
678 if pages_checkpointed < pages_total {
679 warn!(
680 context,
681 "Cannot checkpoint whole WAL. Pages total: {pages_total}, checkpointed: {pages_checkpointed}. Make sure there are no external connections running transactions.",
682 );
683 }
684 info!(
685 context,
686 "wal_checkpoint: Total time: {total_duration:?}. Writers blocked for: {writers_blocked_duration:?}. Readers blocked for: {readers_blocked_duration:?}."
687 );
688 Ok(())
689 }
690}
691
692fn new_connection(path: &Path, passphrase: &str) -> Result<Connection> {
699 let flags = OpenFlags::SQLITE_OPEN_NO_MUTEX
700 | OpenFlags::SQLITE_OPEN_READ_WRITE
701 | OpenFlags::SQLITE_OPEN_CREATE;
702 let conn = Connection::open_with_flags(path, flags)?;
703 conn.execute_batch(
704 "PRAGMA cipher_memory_security = OFF; -- Too slow on Android
705 PRAGMA secure_delete=on;
706 PRAGMA soft_heap_limit = 8388608; -- 8 MiB limit, same as set in Android SQLiteDatabase.
707 PRAGMA foreign_keys=on;
708 ",
709 )?;
710
711 if cfg!(not(target_os = "ios")) {
715 conn.pragma_update(None, "temp_store", "memory")?;
716 }
717
718 if cfg!(target_os = "ios") {
729 conn.busy_timeout(Duration::new(60, 0))?;
730 } else {
731 conn.busy_timeout(Duration::ZERO)?;
732 }
733
734 if !passphrase.is_empty() {
735 conn.pragma_update(None, "key", passphrase)?;
736 }
737 conn.pragma_update(None, "auto_vacuum", "INCREMENTAL".to_string())?;
744
745 conn.pragma_update(None, "journal_mode", "WAL".to_string())?;
746 conn.pragma_update(None, "synchronous", "NORMAL".to_string())?;
748
749 Ok(conn)
750}
751
752#[expect(clippy::arithmetic_side_effects)]
756async fn incremental_vacuum(context: &Context) -> Result<()> {
757 context
758 .sql
759 .call_write(move |conn| {
760 let mut stmt = conn
761 .prepare("PRAGMA incremental_vacuum")
762 .context("Failed to prepare incremental_vacuum statement")?;
763
764 let mut rows = stmt
768 .query(())
769 .context("Failed to run incremental_vacuum statement")?;
770 let mut row_count = 0;
771 while let Some(_row) = rows
772 .next()
773 .context("Failed to step incremental_vacuum statement")?
774 {
775 row_count += 1;
776 }
777 info!(context, "Incremental vacuum freed {row_count} pages.");
778 Ok(())
779 })
780 .await
781}
782
783pub async fn housekeeping(context: &Context) -> Result<()> {
785 let Ok(_housekeeping_lock) = context.housekeeping_mutex.try_lock() else {
786 return Ok(());
788 };
789 if let Err(e) = context
792 .set_config_internal(Config::LastHousekeeping, Some(&time().to_string()))
793 .await
794 {
795 warn!(context, "Can't set config: {e:#}.");
796 }
797
798 http_cache_cleanup(context)
799 .await
800 .context("Failed to cleanup HTTP cache")
801 .log_err(context)
802 .ok();
803 migrations::msgs_to_key_contacts(context)
804 .await
805 .context("migrations::msgs_to_key_contacts")
806 .log_err(context)
807 .ok();
808
809 if let Err(err) = remove_unused_files(context).await {
810 warn!(
811 context,
812 "Housekeeping: cannot remove unused files: {:#}.", err
813 );
814 }
815
816 if let Err(err) = start_ephemeral_timers(context).await {
817 warn!(
818 context,
819 "Housekeeping: cannot start ephemeral timers: {:#}.", err
820 );
821 }
822
823 if let Err(err) = prune_tombstones(&context.sql).await {
824 warn!(
825 context,
826 "Housekeeping: Cannot prune message tombstones: {:#}.", err
827 );
828 }
829
830 if let Err(err) = incremental_vacuum(context).await {
831 warn!(context, "Failed to run incremental vacuum: {err:#}.");
832 }
833 if let Err(err) = Sql::wal_checkpoint(&context.sql, context).await {
838 warn!(context, "wal_checkpoint() failed: {err:#}.");
839 debug_assert!(false);
840 }
841
842 context
843 .sql
844 .execute(
845 "DELETE FROM msgs_mdns WHERE msg_id NOT IN \
846 (SELECT id FROM msgs WHERE chat_id!=?)",
847 (DC_CHAT_ID_TRASH,),
848 )
849 .await
850 .context("failed to remove old MDNs")
851 .log_err(context)
852 .ok();
853
854 context
855 .sql
856 .execute(
857 "DELETE FROM msgs_status_updates WHERE msg_id NOT IN \
858 (SELECT id FROM msgs WHERE chat_id!=?)",
859 (DC_CHAT_ID_TRASH,),
860 )
861 .await
862 .context("failed to remove old webxdc status updates")
863 .log_err(context)
864 .ok();
865
866 prune_connection_history(context)
867 .await
868 .context("Failed to prune connection history")
869 .log_err(context)
870 .ok();
871 prune_dns_cache(context)
872 .await
873 .context("Failed to prune DNS cache")
874 .log_err(context)
875 .ok();
876
877 context
878 .spki_hash_store
879 .cleanup(&context.sql)
880 .await
881 .context("Failed to prune SPKI store")
882 .log_err(context)
883 .ok();
884
885 context
890 .sql
891 .execute(
892 "DELETE FROM imap WHERE transport_id NOT IN (SELECT transports.id FROM transports)",
893 (),
894 )
895 .await
896 .log_err(context)
897 .ok();
898 context.sql.execute(
899 "DELETE FROM imap_sync WHERE transport_id NOT IN (SELECT transports.id FROM transports)",
900 (),
901 ).await.log_err(context).ok();
902
903 location::delete_orphaned_poi(context)
906 .await
907 .context("Failed to delete orphaned POI locations")
908 .log_err(context)
909 .ok();
910
911 info!(context, "Housekeeping done.");
912 Ok(())
913}
914
915pub fn row_get_vec(row: &Row, idx: usize) -> rusqlite::Result<Vec<u8>> {
917 row.get(idx).or_else(|err| match row.get_ref(idx)? {
918 ValueRef::Null => Ok(Vec::new()),
919 ValueRef::Text(text) => Ok(text.to_vec()),
920 ValueRef::Blob(blob) => Ok(blob.to_vec()),
921 ValueRef::Integer(_) | ValueRef::Real(_) => Err(err),
922 })
923}
924
925#[expect(clippy::arithmetic_side_effects)]
927pub async fn remove_unused_files(context: &Context) -> Result<()> {
928 let mut files_in_use = HashSet::new();
929 let mut unreferenced_count = 0;
930
931 info!(context, "Start housekeeping...");
932 maybe_add_from_param(
933 &context.sql,
934 &mut files_in_use,
935 "SELECT param FROM msgs WHERE chat_id!=3 AND type!=10;",
936 Param::File,
937 )
938 .await?;
939 maybe_add_from_param(
940 &context.sql,
941 &mut files_in_use,
942 "SELECT param FROM chats;",
943 Param::ProfileImage,
944 )
945 .await?;
946 maybe_add_from_param(
947 &context.sql,
948 &mut files_in_use,
949 "SELECT param FROM contacts;",
950 Param::ProfileImage,
951 )
952 .await?;
953
954 context
955 .sql
956 .query_map(
957 "SELECT value FROM config;",
958 (),
959 |row| {
960 let row: String = row.get(0)?;
961 Ok(row)
962 },
963 |rows| {
964 for row in rows {
965 maybe_add_file(&mut files_in_use, &row?);
966 }
967 Ok(())
968 },
969 )
970 .await
971 .context("housekeeping: failed to SELECT value FROM config")?;
972
973 context
974 .sql
975 .query_map(
976 "SELECT blobname FROM http_cache",
977 (),
978 |row| {
979 let row: String = row.get(0)?;
980 Ok(row)
981 },
982 |rows| {
983 for row in rows {
984 maybe_add_file(&mut files_in_use, &row?);
985 }
986 Ok(())
987 },
988 )
989 .await
990 .context("Failed to SELECT blobname FROM http_cache")?;
991
992 info!(context, "{} files in use.", files_in_use.len());
993 let blobdir = context.get_blobdir();
995 for p in [&blobdir.join(BLOBS_BACKUP_NAME), blobdir] {
996 match tokio::fs::read_dir(p).await {
997 Ok(mut dir_handle) => {
998 let diff = std::time::Duration::from_secs(60 * 60);
1000 let keep_files_newer_than = SystemTime::now()
1001 .checked_sub(diff)
1002 .unwrap_or(SystemTime::UNIX_EPOCH);
1003
1004 while let Ok(Some(entry)) = dir_handle.next_entry().await {
1005 let name_f = entry.file_name();
1006 let name_s = name_f.to_string_lossy();
1007
1008 if p == blobdir
1009 && (is_file_in_use(&files_in_use, None, &name_s)
1010 || is_file_in_use(&files_in_use, Some(".waveform"), &name_s)
1011 || is_file_in_use(&files_in_use, Some("-preview.jpg"), &name_s))
1012 {
1013 continue;
1014 }
1015
1016 let stats = match tokio::fs::metadata(entry.path()).await {
1017 Err(err) => {
1018 warn!(
1019 context,
1020 "Cannot get metadata for {}: {:#}.",
1021 entry.path().display(),
1022 err
1023 );
1024 continue;
1025 }
1026 Ok(stats) => stats,
1027 };
1028
1029 if stats.is_dir() {
1030 if let Err(e) = tokio::fs::remove_dir(entry.path()).await {
1031 info!(
1034 context,
1035 "Housekeeping: Cannot rmdir {}: {:#}.",
1036 entry.path().display(),
1037 e
1038 );
1039 }
1040 continue;
1041 }
1042
1043 unreferenced_count += 1;
1044 let recently_created = stats.created().is_ok_and(|t| t > keep_files_newer_than);
1045 let recently_modified =
1046 stats.modified().is_ok_and(|t| t > keep_files_newer_than);
1047 let recently_accessed =
1048 stats.accessed().is_ok_and(|t| t > keep_files_newer_than);
1049
1050 if p == blobdir && (recently_created || recently_modified || recently_accessed)
1051 {
1052 info!(
1053 context,
1054 "Housekeeping: Keeping new unreferenced file #{}: {:?}.",
1055 unreferenced_count,
1056 entry.file_name(),
1057 );
1058 continue;
1059 }
1060
1061 info!(
1062 context,
1063 "Housekeeping: Deleting unreferenced file #{}: {:?}.",
1064 unreferenced_count,
1065 entry.file_name()
1066 );
1067 let path = entry.path();
1068 if let Err(err) = delete_file(context, &path).await {
1069 error!(
1070 context,
1071 "Failed to delete unused file {}: {:#}.",
1072 path.display(),
1073 err
1074 );
1075 }
1076 }
1077 }
1078 Err(err) => {
1079 if !p.ends_with(BLOBS_BACKUP_NAME) {
1080 warn!(
1081 context,
1082 "Housekeeping: Cannot read dir {}: {:#}.",
1083 p.display(),
1084 err
1085 );
1086 }
1087 }
1088 }
1089 }
1090
1091 Ok(())
1092}
1093
1094fn is_file_in_use(files_in_use: &HashSet<String>, namespc_opt: Option<&str>, name: &str) -> bool {
1095 let name_to_check = if let Some(namespc) = namespc_opt {
1096 let Some(name) = name.strip_suffix(namespc) else {
1097 return false;
1098 };
1099 name
1100 } else {
1101 name
1102 };
1103 files_in_use.contains(name_to_check)
1104}
1105
1106fn maybe_add_file(files_in_use: &mut HashSet<String>, file: &str) {
1107 if let Some(file) = file.strip_prefix("$BLOBDIR/") {
1108 files_in_use.insert(file.to_string());
1109 }
1110}
1111
1112async fn maybe_add_from_param(
1113 sql: &Sql,
1114 files_in_use: &mut HashSet<String>,
1115 query: &str,
1116 param_id: Param,
1117) -> Result<()> {
1118 sql.query_map(
1119 query,
1120 (),
1121 |row| {
1122 let row: String = row.get(0)?;
1123 Ok(row)
1124 },
1125 |rows| {
1126 for row in rows {
1127 let param: Params = row?.parse().unwrap_or_default();
1128 if let Some(file) = param.get(param_id) {
1129 maybe_add_file(files_in_use, file);
1130 }
1131 }
1132 Ok(())
1133 },
1134 )
1135 .await
1136 .context(format!("housekeeping: failed to add_from_param {query}"))?;
1137
1138 Ok(())
1139}
1140
1141async fn prune_tombstones(sql: &Sql) -> Result<()> {
1144 let timestamp_max = time().saturating_sub(2 * 24 * 3600);
1146 sql.execute(
1147 "DELETE FROM msgs
1148 WHERE chat_id=?
1149 AND timestamp<=?
1150 AND NOT EXISTS (
1151 SELECT * FROM imap WHERE msgs.rfc724_mid=rfc724_mid AND target!=''
1152 )",
1153 (DC_CHAT_ID_TRASH, timestamp_max),
1154 )
1155 .await?;
1156 Ok(())
1157}
1158
1159#[cfg(test)]
1160mod sql_tests;