1use std::collections::{HashMap, HashSet};
4use std::path::{Path, PathBuf};
5
6use anyhow::{bail, Context as _, Result};
7use rusqlite::{config::DbConfig, types::ValueRef, Connection, OpenFlags, Row};
8use tokio::sync::RwLock;
9
10use crate::blob::BlobObject;
11use crate::chat::add_device_msg;
12use crate::config::Config;
13use crate::constants::DC_CHAT_ID_TRASH;
14use crate::context::Context;
15use crate::debug_logging::set_debug_logging_xdc;
16use crate::ephemeral::start_ephemeral_timers;
17use crate::imex::BLOBS_BACKUP_NAME;
18use crate::location::delete_orphaned_poi_locations;
19use crate::log::{error, info, warn, LogExt};
20use crate::message::{Message, MsgId};
21use crate::net::dns::prune_dns_cache;
22use crate::net::http::http_cache_cleanup;
23use crate::net::prune_connection_history;
24use crate::param::{Param, Params};
25use crate::stock_str;
26use crate::tools::{delete_file, time, SystemTime};
27
28pub trait ToSql: rusqlite::ToSql + Send + Sync {}
31
32impl<T: rusqlite::ToSql + Send + Sync> ToSql for T {}
33
34#[macro_export]
40macro_rules! params_slice {
41 ($($param:expr),+) => {
42 [$(&$param as &dyn $crate::sql::ToSql),+]
43 };
44}
45
46mod migrations;
47mod pool;
48
49use pool::Pool;
50
51#[derive(Debug)]
53pub struct Sql {
54 pub(crate) dbfile: PathBuf,
56
57 pool: RwLock<Option<Pool>>,
59
60 is_encrypted: RwLock<Option<bool>>,
63
64 pub(crate) config_cache: RwLock<HashMap<String, Option<String>>>,
66}
67
68impl Sql {
69 pub fn new(dbfile: PathBuf) -> Sql {
71 Self {
72 dbfile,
73 pool: Default::default(),
74 is_encrypted: Default::default(),
75 config_cache: Default::default(),
76 }
77 }
78
79 pub async fn check_passphrase(&self, passphrase: String) -> Result<bool> {
87 if self.is_open().await {
88 bail!("Database is already opened.");
89 }
90
91 let _lock = self.pool.write().await;
93
94 let connection = Connection::open(&self.dbfile)?;
96 if !passphrase.is_empty() {
97 connection
98 .pragma_update(None, "key", &passphrase)
99 .context("Failed to set PRAGMA key")?;
100 }
101 let key_is_correct = connection
102 .query_row("SELECT count(*) FROM sqlite_master", [], |_row| Ok(()))
103 .is_ok();
104
105 Ok(key_is_correct)
106 }
107
108 pub async fn is_open(&self) -> bool {
110 self.pool.read().await.is_some()
111 }
112
113 pub(crate) async fn is_encrypted(&self) -> Option<bool> {
117 *self.is_encrypted.read().await
118 }
119
120 pub(crate) async fn close(&self) {
122 let _ = self.pool.write().await.take();
123 }
125
126 pub(crate) async fn import(&self, path: &Path, passphrase: String) -> Result<()> {
128 let path_str = path
129 .to_str()
130 .with_context(|| format!("path {path:?} is not valid unicode"))?
131 .to_string();
132
133 let mut config_cache = self.config_cache.write().await;
136 config_cache.clear();
137
138 let query_only = false;
139 self.call(query_only, move |conn| {
140 conn.execute("ATTACH DATABASE ? AS backup KEY ?", (path_str, passphrase))
142 .context("failed to attach backup database")?;
143 let res = conn
144 .query_row("SELECT count(*) FROM sqlite_master", [], |_row| Ok(()))
145 .context("backup passphrase is not correct");
146
147 res.and_then(|_| {
152 conn.set_db_config(DbConfig::SQLITE_DBCONFIG_RESET_DATABASE, true)
153 .context("failed to set SQLITE_DBCONFIG_RESET_DATABASE")
154 })
155 .and_then(|_| {
156 conn.execute("VACUUM", [])
157 .context("failed to vacuum the database")
158 })
159 .and(
160 conn.set_db_config(DbConfig::SQLITE_DBCONFIG_RESET_DATABASE, false)
161 .context("failed to unset SQLITE_DBCONFIG_RESET_DATABASE"),
162 )
163 .and_then(|_| {
164 conn.query_row("SELECT sqlcipher_export('main', 'backup')", [], |_row| {
165 Ok(())
166 })
167 .context("failed to import from attached backup database")
168 })
169 .and(
170 conn.execute("DETACH DATABASE backup", [])
171 .context("failed to detach backup database"),
172 )?;
173 Ok(())
174 })
175 .await
176 }
177
178 fn new_pool(dbfile: &Path, passphrase: String) -> Result<Pool> {
180 let mut connections = Vec::new();
181 for _ in 0..3 {
182 let connection = new_connection(dbfile, &passphrase)?;
183 connections.push(connection);
184 }
185
186 let pool = Pool::new(connections);
187 Ok(pool)
188 }
189
190 async fn try_open(&self, context: &Context, dbfile: &Path, passphrase: String) -> Result<()> {
191 *self.pool.write().await = Some(Self::new_pool(dbfile, passphrase.to_string())?);
192
193 if let Err(e) = self.run_migrations(context).await {
194 error!(context, "Running migrations failed: {e:#}");
195 eprintln!("Running migrations failed: {e:#}");
200 context.set_migration_error(&format!("Updating Delta Chat failed. Please send this message to the Delta Chat developers, either at delta@merlinux.eu or at https://support.delta.chat.\n\n{e:#}"));
201 }
206
207 Ok(())
208 }
209
210 pub async fn run_migrations(&self, context: &Context) -> Result<()> {
212 let (_update_icons, disable_server_delete, recode_avatar) = migrations::run(context, self)
218 .await
219 .context("failed to run migrations")?;
220
221 if disable_server_delete {
225 if context.get_config_delete_server_after().await?.is_some() {
228 let mut msg = Message::new_text(stock_str::delete_server_turned_off(context).await);
229 add_device_msg(context, None, Some(&mut msg)).await?;
230 context
231 .set_config_internal(Config::DeleteServerAfter, Some("0"))
232 .await?;
233 }
234 }
235
236 if recode_avatar {
237 if let Some(avatar) = context.get_config(Config::Selfavatar).await? {
238 let mut blob = BlobObject::from_path(context, Path::new(&avatar))?;
239 match blob.recode_to_avatar_size(context).await {
240 Ok(()) => {
241 if let Some(path) = blob.to_abs_path().to_str() {
242 context
243 .set_config_internal(Config::Selfavatar, Some(path))
244 .await?;
245 } else {
246 warn!(context, "Setting selfavatar failed: non-UTF-8 filename");
247 }
248 }
249 Err(e) => {
250 warn!(context, "Migrations can't recode avatar, removing. {:#}", e);
251 context
252 .set_config_internal(Config::Selfavatar, None)
253 .await?
254 }
255 }
256 }
257 }
258
259 Ok(())
260 }
261
262 pub async fn open(&self, context: &Context, passphrase: String) -> Result<()> {
265 if self.is_open().await {
266 error!(
267 context,
268 "Cannot open, database \"{:?}\" already opened.", self.dbfile,
269 );
270 bail!("SQL database is already opened.");
271 }
272
273 let passphrase_nonempty = !passphrase.is_empty();
274 self.try_open(context, &self.dbfile, passphrase).await?;
275 info!(context, "Opened database {:?}.", self.dbfile);
276 *self.is_encrypted.write().await = Some(passphrase_nonempty);
277
278 if let Some(xdc_id) = self
280 .get_raw_config_u32(Config::DebugLogging.as_ref())
281 .await?
282 {
283 set_debug_logging_xdc(context, Some(MsgId::new(xdc_id))).await?;
284 }
285 Ok(())
286 }
287
288 pub async fn change_passphrase(&self, passphrase: String) -> Result<()> {
294 let mut lock = self.pool.write().await;
295
296 let pool = lock.take().context("SQL connection pool is not open")?;
297 let query_only = false;
298 let conn = pool.get(query_only).await?;
299 if !passphrase.is_empty() {
300 conn.pragma_update(None, "rekey", passphrase.clone())
301 .context("Failed to set PRAGMA rekey")?;
302 }
303 drop(pool);
304
305 *lock = Some(Self::new_pool(&self.dbfile, passphrase.to_string())?);
306
307 Ok(())
308 }
309
310 async fn call<'a, F, R>(&'a self, query_only: bool, function: F) -> Result<R>
317 where
318 F: 'a + FnOnce(&mut Connection) -> Result<R> + Send,
319 R: Send + 'static,
320 {
321 let lock = self.pool.read().await;
322 let pool = lock.as_ref().context("no SQL connection")?;
323 let mut conn = pool.get(query_only).await?;
324 let res = tokio::task::block_in_place(move || function(&mut conn))?;
325 Ok(res)
326 }
327
328 pub async fn call_write<'a, F, R>(&'a self, function: F) -> Result<R>
333 where
334 F: 'a + FnOnce(&mut Connection) -> Result<R> + Send,
335 R: Send + 'static,
336 {
337 let query_only = false;
338 self.call(query_only, function).await
339 }
340
341 pub async fn execute(
343 &self,
344 query: &str,
345 params: impl rusqlite::Params + Send,
346 ) -> Result<usize> {
347 self.call_write(move |conn| {
348 let res = conn.execute(query, params)?;
349 Ok(res)
350 })
351 .await
352 }
353
354 pub async fn insert(&self, query: &str, params: impl rusqlite::Params + Send) -> Result<i64> {
356 self.call_write(move |conn| {
357 conn.execute(query, params)?;
358 Ok(conn.last_insert_rowid())
359 })
360 .await
361 }
362
363 pub async fn query_map<T, F, G, H>(
367 &self,
368 sql: &str,
369 params: impl rusqlite::Params + Send,
370 f: F,
371 mut g: G,
372 ) -> Result<H>
373 where
374 F: Send + FnMut(&rusqlite::Row) -> rusqlite::Result<T>,
375 G: Send + FnMut(rusqlite::MappedRows<F>) -> Result<H>,
376 H: Send + 'static,
377 {
378 let query_only = true;
379 self.call(query_only, move |conn| {
380 let mut stmt = conn.prepare(sql)?;
381 let res = stmt.query_map(params, f)?;
382 g(res)
383 })
384 .await
385 }
386
387 pub async fn count(&self, query: &str, params: impl rusqlite::Params + Send) -> Result<usize> {
389 let count: isize = self.query_row(query, params, |row| row.get(0)).await?;
390 Ok(usize::try_from(count)?)
391 }
392
393 pub async fn exists(&self, sql: &str, params: impl rusqlite::Params + Send) -> Result<bool> {
396 let count = self.count(sql, params).await?;
397 Ok(count > 0)
398 }
399
400 pub async fn query_row<T, F>(
402 &self,
403 query: &str,
404 params: impl rusqlite::Params + Send,
405 f: F,
406 ) -> Result<T>
407 where
408 F: FnOnce(&rusqlite::Row) -> rusqlite::Result<T> + Send,
409 T: Send + 'static,
410 {
411 let query_only = true;
412 self.call(query_only, move |conn| {
413 let res = conn.query_row(query, params, f)?;
414 Ok(res)
415 })
416 .await
417 }
418
419 pub async fn transaction<G, H>(&self, callback: G) -> Result<H>
424 where
425 H: Send + 'static,
426 G: Send + FnOnce(&mut rusqlite::Transaction<'_>) -> Result<H>,
427 {
428 let query_only = false;
429 self.transaction_ex(query_only, callback).await
430 }
431
432 pub async fn transaction_ex<G, H>(&self, query_only: bool, callback: G) -> Result<H>
447 where
448 H: Send + 'static,
449 G: Send + FnOnce(&mut rusqlite::Transaction<'_>) -> Result<H>,
450 {
451 self.call(query_only, move |conn| {
452 let mut transaction = conn.transaction()?;
453 let ret = callback(&mut transaction);
454
455 match ret {
456 Ok(ret) => {
457 transaction.commit()?;
458 Ok(ret)
459 }
460 Err(err) => {
461 transaction.rollback()?;
462 Err(err)
463 }
464 }
465 })
466 .await
467 }
468
469 pub async fn table_exists(&self, name: &str) -> Result<bool> {
471 let query_only = true;
472 self.call(query_only, move |conn| {
473 let mut exists = false;
474 conn.pragma(None, "table_info", name.to_string(), |_row| {
475 exists = true;
477 Ok(())
478 })?;
479
480 Ok(exists)
481 })
482 .await
483 }
484
485 pub async fn col_exists(&self, table_name: &str, col_name: &str) -> Result<bool> {
487 let query_only = true;
488 self.call(query_only, move |conn| {
489 let mut exists = false;
490 conn.pragma(None, "table_info", table_name.to_string(), |row| {
493 let curr_name: String = row.get(1)?;
494 if col_name == curr_name {
495 exists = true;
496 }
497 Ok(())
498 })?;
499
500 Ok(exists)
501 })
502 .await
503 }
504
505 pub async fn query_row_optional<T, F>(
507 &self,
508 sql: &str,
509 params: impl rusqlite::Params + Send,
510 f: F,
511 ) -> Result<Option<T>>
512 where
513 F: Send + FnOnce(&rusqlite::Row) -> rusqlite::Result<T>,
514 T: Send + 'static,
515 {
516 let query_only = true;
517 self.call(query_only, move |conn| {
518 match conn.query_row(sql.as_ref(), params, f) {
519 Ok(res) => Ok(Some(res)),
520 Err(rusqlite::Error::QueryReturnedNoRows) => Ok(None),
521 Err(err) => Err(err.into()),
522 }
523 })
524 .await
525 }
526
527 pub async fn query_get_value<T>(
530 &self,
531 query: &str,
532 params: impl rusqlite::Params + Send,
533 ) -> Result<Option<T>>
534 where
535 T: rusqlite::types::FromSql + Send + 'static,
536 {
537 self.query_row_optional(query, params, |row| row.get::<_, T>(0))
538 .await
539 }
540
541 pub async fn set_raw_config(&self, key: &str, value: Option<&str>) -> Result<()> {
546 let mut lock = self.config_cache.write().await;
547 if let Some(value) = value {
548 self.execute(
549 "INSERT OR REPLACE INTO config (keyname, value) VALUES (?, ?)",
550 (key, value),
551 )
552 .await?;
553 } else {
554 self.execute("DELETE FROM config WHERE keyname=?", (key,))
555 .await?;
556 }
557 lock.insert(key.to_string(), value.map(|s| s.to_string()));
558 drop(lock);
559
560 Ok(())
561 }
562
563 pub async fn get_raw_config(&self, key: &str) -> Result<Option<String>> {
565 let lock = self.config_cache.read().await;
566 let cached = lock.get(key).cloned();
567 drop(lock);
568
569 if let Some(c) = cached {
570 return Ok(c);
571 }
572
573 let mut lock = self.config_cache.write().await;
574 let value = self
575 .query_get_value("SELECT value FROM config WHERE keyname=?", (key,))
576 .await
577 .context(format!("failed to fetch raw config: {key}"))?;
578 lock.insert(key.to_string(), value.clone());
579 drop(lock);
580
581 Ok(value)
582 }
583
584 pub async fn set_raw_config_int(&self, key: &str, value: i32) -> Result<()> {
586 self.set_raw_config(key, Some(&format!("{value}"))).await
587 }
588
589 pub async fn get_raw_config_int(&self, key: &str) -> Result<Option<i32>> {
591 self.get_raw_config(key)
592 .await
593 .map(|s| s.and_then(|s| s.parse().ok()))
594 }
595
596 pub async fn get_raw_config_u32(&self, key: &str) -> Result<Option<u32>> {
598 self.get_raw_config(key)
599 .await
600 .map(|s| s.and_then(|s| s.parse().ok()))
601 }
602
603 pub async fn get_raw_config_bool(&self, key: &str) -> Result<bool> {
605 let res = self.get_raw_config_int(key).await?;
608 Ok(res.unwrap_or_default() > 0)
609 }
610
611 pub async fn set_raw_config_bool(&self, key: &str, value: bool) -> Result<()> {
613 let value = if value { Some("1") } else { None };
614 self.set_raw_config(key, value).await
615 }
616
617 pub async fn set_raw_config_int64(&self, key: &str, value: i64) -> Result<()> {
619 self.set_raw_config(key, Some(&format!("{value}"))).await
620 }
621
622 pub async fn get_raw_config_int64(&self, key: &str) -> Result<Option<i64>> {
624 self.get_raw_config(key)
625 .await
626 .map(|s| s.and_then(|r| r.parse().ok()))
627 }
628
629 #[cfg(feature = "internals")]
631 pub fn config_cache(&self) -> &RwLock<HashMap<String, Option<String>>> {
632 &self.config_cache
633 }
634}
635
636fn new_connection(path: &Path, passphrase: &str) -> Result<Connection> {
643 let flags = OpenFlags::SQLITE_OPEN_NO_MUTEX
644 | OpenFlags::SQLITE_OPEN_READ_WRITE
645 | OpenFlags::SQLITE_OPEN_CREATE;
646 let conn = Connection::open_with_flags(path, flags)?;
647 conn.execute_batch(
648 "PRAGMA cipher_memory_security = OFF; -- Too slow on Android
649 PRAGMA secure_delete=on;
650 PRAGMA busy_timeout = 0; -- fail immediately
651 PRAGMA soft_heap_limit = 8388608; -- 8 MiB limit, same as set in Android SQLiteDatabase.
652 PRAGMA foreign_keys=on;
653 ",
654 )?;
655
656 if cfg!(not(target_os = "ios")) {
660 conn.pragma_update(None, "temp_store", "memory")?;
661 }
662
663 if !passphrase.is_empty() {
664 conn.pragma_update(None, "key", passphrase)?;
665 }
666 conn.pragma_update(None, "auto_vacuum", "INCREMENTAL".to_string())?;
673
674 conn.pragma_update(None, "journal_mode", "WAL".to_string())?;
675 conn.pragma_update(None, "synchronous", "NORMAL".to_string())?;
677
678 Ok(conn)
679}
680
681async fn incremental_vacuum(context: &Context) -> Result<()> {
685 context
686 .sql
687 .call_write(move |conn| {
688 let mut stmt = conn
689 .prepare("PRAGMA incremental_vacuum")
690 .context("Failed to prepare incremental_vacuum statement")?;
691
692 let mut rows = stmt
696 .query(())
697 .context("Failed to run incremental_vacuum statement")?;
698 let mut row_count = 0;
699 while let Some(_row) = rows
700 .next()
701 .context("Failed to step incremental_vacuum statement")?
702 {
703 row_count += 1;
704 }
705 info!(context, "Incremental vacuum freed {row_count} pages.");
706 Ok(())
707 })
708 .await
709}
710
711pub async fn housekeeping(context: &Context) -> Result<()> {
713 if let Err(e) = context
716 .set_config_internal(Config::LastHousekeeping, Some(&time().to_string()))
717 .await
718 {
719 warn!(context, "Can't set config: {e:#}.");
720 }
721
722 http_cache_cleanup(context)
723 .await
724 .context("Failed to cleanup HTTP cache")
725 .log_err(context)
726 .ok();
727
728 if let Err(err) = remove_unused_files(context).await {
729 warn!(
730 context,
731 "Housekeeping: cannot remove unused files: {:#}.", err
732 );
733 }
734
735 if let Err(err) = start_ephemeral_timers(context).await {
736 warn!(
737 context,
738 "Housekeeping: cannot start ephemeral timers: {:#}.", err
739 );
740 }
741
742 if let Err(err) = prune_tombstones(&context.sql).await {
743 warn!(
744 context,
745 "Housekeeping: Cannot prune message tombstones: {:#}.", err
746 );
747 }
748
749 if let Err(err) = incremental_vacuum(context).await {
750 warn!(context, "Failed to run incremental vacuum: {err:#}.");
751 }
752
753 context
754 .sql
755 .execute(
756 "DELETE FROM msgs_mdns WHERE msg_id NOT IN \
757 (SELECT id FROM msgs WHERE chat_id!=?)",
758 (DC_CHAT_ID_TRASH,),
759 )
760 .await
761 .context("failed to remove old MDNs")
762 .log_err(context)
763 .ok();
764
765 context
766 .sql
767 .execute(
768 "DELETE FROM msgs_status_updates WHERE msg_id NOT IN \
769 (SELECT id FROM msgs WHERE chat_id!=?)",
770 (DC_CHAT_ID_TRASH,),
771 )
772 .await
773 .context("failed to remove old webxdc status updates")
774 .log_err(context)
775 .ok();
776
777 prune_connection_history(context)
778 .await
779 .context("Failed to prune connection history")
780 .log_err(context)
781 .ok();
782 prune_dns_cache(context)
783 .await
784 .context("Failed to prune DNS cache")
785 .log_err(context)
786 .ok();
787
788 delete_orphaned_poi_locations(context)
791 .await
792 .context("Failed to delete orphaned POI locations")
793 .log_err(context)
794 .ok();
795
796 info!(context, "Housekeeping done.");
797 Ok(())
798}
799
800pub fn row_get_vec(row: &Row, idx: usize) -> rusqlite::Result<Vec<u8>> {
802 row.get(idx).or_else(|err| match row.get_ref(idx)? {
803 ValueRef::Null => Ok(Vec::new()),
804 ValueRef::Text(text) => Ok(text.to_vec()),
805 ValueRef::Blob(blob) => Ok(blob.to_vec()),
806 ValueRef::Integer(_) | ValueRef::Real(_) => Err(err),
807 })
808}
809
810pub async fn remove_unused_files(context: &Context) -> Result<()> {
812 let mut files_in_use = HashSet::new();
813 let mut unreferenced_count = 0;
814
815 info!(context, "Start housekeeping...");
816 maybe_add_from_param(
817 &context.sql,
818 &mut files_in_use,
819 "SELECT param FROM msgs WHERE chat_id!=3 AND type!=10;",
820 Param::File,
821 )
822 .await?;
823 maybe_add_from_param(
824 &context.sql,
825 &mut files_in_use,
826 "SELECT param FROM chats;",
827 Param::ProfileImage,
828 )
829 .await?;
830 maybe_add_from_param(
831 &context.sql,
832 &mut files_in_use,
833 "SELECT param FROM contacts;",
834 Param::ProfileImage,
835 )
836 .await?;
837
838 context
839 .sql
840 .query_map(
841 "SELECT value FROM config;",
842 (),
843 |row| row.get::<_, String>(0),
844 |rows| {
845 for row in rows {
846 maybe_add_file(&mut files_in_use, &row?);
847 }
848 Ok(())
849 },
850 )
851 .await
852 .context("housekeeping: failed to SELECT value FROM config")?;
853
854 context
855 .sql
856 .query_map(
857 "SELECT blobname FROM http_cache",
858 (),
859 |row| row.get::<_, String>(0),
860 |rows| {
861 for row in rows {
862 maybe_add_file(&mut files_in_use, &row?);
863 }
864 Ok(())
865 },
866 )
867 .await
868 .context("Failed to SELECT blobname FROM http_cache")?;
869
870 info!(context, "{} files in use.", files_in_use.len());
871 let blobdir = context.get_blobdir();
873 for p in [&blobdir.join(BLOBS_BACKUP_NAME), blobdir] {
874 match tokio::fs::read_dir(p).await {
875 Ok(mut dir_handle) => {
876 let diff = std::time::Duration::from_secs(60 * 60);
878 let keep_files_newer_than = SystemTime::now()
879 .checked_sub(diff)
880 .unwrap_or(SystemTime::UNIX_EPOCH);
881
882 while let Ok(Some(entry)) = dir_handle.next_entry().await {
883 let name_f = entry.file_name();
884 let name_s = name_f.to_string_lossy();
885
886 if p == blobdir
887 && (is_file_in_use(&files_in_use, None, &name_s)
888 || is_file_in_use(&files_in_use, Some(".waveform"), &name_s)
889 || is_file_in_use(&files_in_use, Some("-preview.jpg"), &name_s))
890 {
891 continue;
892 }
893
894 let stats = match tokio::fs::metadata(entry.path()).await {
895 Err(err) => {
896 warn!(
897 context,
898 "Cannot get metadata for {}: {:#}.",
899 entry.path().display(),
900 err
901 );
902 continue;
903 }
904 Ok(stats) => stats,
905 };
906
907 if stats.is_dir() {
908 if let Err(e) = tokio::fs::remove_dir(entry.path()).await {
909 info!(
912 context,
913 "Housekeeping: Cannot rmdir {}: {:#}.",
914 entry.path().display(),
915 e
916 );
917 }
918 continue;
919 }
920
921 unreferenced_count += 1;
922 let recently_created = stats.created().is_ok_and(|t| t > keep_files_newer_than);
923 let recently_modified =
924 stats.modified().is_ok_and(|t| t > keep_files_newer_than);
925 let recently_accessed =
926 stats.accessed().is_ok_and(|t| t > keep_files_newer_than);
927
928 if p == blobdir && (recently_created || recently_modified || recently_accessed)
929 {
930 info!(
931 context,
932 "Housekeeping: Keeping new unreferenced file #{}: {:?}.",
933 unreferenced_count,
934 entry.file_name(),
935 );
936 continue;
937 }
938
939 info!(
940 context,
941 "Housekeeping: Deleting unreferenced file #{}: {:?}.",
942 unreferenced_count,
943 entry.file_name()
944 );
945 let path = entry.path();
946 if let Err(err) = delete_file(context, &path).await {
947 error!(
948 context,
949 "Failed to delete unused file {}: {:#}.",
950 path.display(),
951 err
952 );
953 }
954 }
955 }
956 Err(err) => {
957 if !p.ends_with(BLOBS_BACKUP_NAME) {
958 warn!(
959 context,
960 "Housekeeping: Cannot read dir {}: {:#}.",
961 p.display(),
962 err
963 );
964 }
965 }
966 }
967 }
968
969 Ok(())
970}
971
972fn is_file_in_use(files_in_use: &HashSet<String>, namespc_opt: Option<&str>, name: &str) -> bool {
973 let name_to_check = if let Some(namespc) = namespc_opt {
974 let Some(name) = name.strip_suffix(namespc) else {
975 return false;
976 };
977 name
978 } else {
979 name
980 };
981 files_in_use.contains(name_to_check)
982}
983
984fn maybe_add_file(files_in_use: &mut HashSet<String>, file: &str) {
985 if let Some(file) = file.strip_prefix("$BLOBDIR/") {
986 files_in_use.insert(file.to_string());
987 }
988}
989
990async fn maybe_add_from_param(
991 sql: &Sql,
992 files_in_use: &mut HashSet<String>,
993 query: &str,
994 param_id: Param,
995) -> Result<()> {
996 sql.query_map(
997 query,
998 (),
999 |row| row.get::<_, String>(0),
1000 |rows| {
1001 for row in rows {
1002 let param: Params = row?.parse().unwrap_or_default();
1003 if let Some(file) = param.get(param_id) {
1004 maybe_add_file(files_in_use, file);
1005 }
1006 }
1007 Ok(())
1008 },
1009 )
1010 .await
1011 .context(format!("housekeeping: failed to add_from_param {query}"))?;
1012
1013 Ok(())
1014}
1015
1016async fn prune_tombstones(sql: &Sql) -> Result<()> {
1019 let timestamp_max = time().saturating_sub(2 * 24 * 3600);
1021 sql.execute(
1022 "DELETE FROM msgs
1023 WHERE chat_id=?
1024 AND timestamp<=?
1025 AND NOT EXISTS (
1026 SELECT * FROM imap WHERE msgs.rfc724_mid=rfc724_mid AND target!=''
1027 )",
1028 (DC_CHAT_ID_TRASH, timestamp_max),
1029 )
1030 .await?;
1031 Ok(())
1032}
1033
1034#[cfg(test)]
1035mod sql_tests;