deltachat/
storage_usage.rs1use crate::{context::Context, message::MsgId};
3use anyhow::Result;
4use humansize::{BINARY, format_size};
5use walkdir::WalkDir;
6
7#[derive(Debug)]
10pub struct StorageUsage {
11 pub db_size: u64,
13 pub largest_tables: Vec<(String, u64, Option<u64>)>,
15 pub largest_webxdc_data: Vec<(MsgId, u64, u64)>,
18 pub blobdir_size: u64,
20}
21
22impl std::fmt::Display for StorageUsage {
23 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
24 writeln!(f, "Storage Usage:")?;
25 let blobdir_size = format_size(self.blobdir_size, BINARY);
26 writeln!(f, "[Blob Directory Size]: {blobdir_size}")?;
27 let human_db_size = format_size(self.db_size, BINARY);
28 writeln!(f, "[Database Size]: {human_db_size}")?;
29 writeln!(f, "[Largest Tables]:")?;
30 for (name, size, row_count) in &self.largest_tables {
31 let human_table_size = format_size(*size, BINARY);
32 writeln!(
33 f,
34 " {name:<20} {human_table_size:>10}, {row_count:>6} rows",
35 name = format!("{name}:"),
36 row_count = row_count.map(|c| c.to_string()).unwrap_or("?".to_owned())
37 )?;
38 }
39 writeln!(f, "[Webxdc With Biggest Status Update Space Usage]:")?;
40 for (msg_id, size, update_count) in &self.largest_webxdc_data {
41 let human_size = format_size(*size, BINARY);
42 writeln!(
43 f,
44 " {msg_id:<8} {human_size:>10} across {update_count:>5} updates",
45 msg_id = format!("{msg_id}:")
46 )?;
47 }
48 Ok(())
49 }
50}
51
52#[expect(clippy::arithmetic_side_effects)]
54pub async fn get_storage_usage(ctx: &Context) -> Result<StorageUsage> {
55 let context_clone = ctx.clone();
56 let blobdir_size =
57 tokio::task::spawn_blocking(move || get_blobdir_storage_usage(&context_clone));
58
59 let page_size: u64 = ctx
60 .sql
61 .query_get_value("PRAGMA page_size", ())
62 .await?
63 .unwrap_or_default();
64 let page_count: u64 = ctx
65 .sql
66 .query_get_value("PRAGMA page_count", ())
67 .await?
68 .unwrap_or_default();
69
70 let mut largest_tables = ctx
71 .sql
72 .query_map_vec(
73 "SELECT name,
74 SUM(pgsize) AS size
75 FROM dbstat
76 WHERE name IN (SELECT name FROM sqlite_master WHERE type='table')
77 GROUP BY name ORDER BY size DESC LIMIT 10",
78 (),
79 |row| {
80 let name: String = row.get(0)?;
81 let size: u64 = row.get(1)?;
82 Ok((name, size, None))
83 },
84 )
85 .await?;
86
87 for row in &mut largest_tables {
88 let name = &row.0;
89 let row_count: Result<Option<u64>> = ctx
90 .sql
91 .query_get_value(&format!("SELECT COUNT(*) FROM {name}"), ())
93 .await;
94 row.2 = row_count.unwrap_or_default();
95 }
96
97 let largest_webxdc_data = ctx
98 .sql
99 .query_map_vec(
100 "SELECT msg_id, SUM(length(update_item)) as size, COUNT(*) as update_count
101 FROM msgs_status_updates
102 GROUP BY msg_id ORDER BY size DESC LIMIT 10",
103 (),
104 |row| {
105 let msg_id: MsgId = row.get(0)?;
106 let size: u64 = row.get(1)?;
107 let count: u64 = row.get(2)?;
108
109 Ok((msg_id, size, count))
110 },
111 )
112 .await?;
113
114 let blobdir_size = blobdir_size.await?;
115
116 Ok(StorageUsage {
117 db_size: page_size * page_count,
118 largest_tables,
119 largest_webxdc_data,
120 blobdir_size,
121 })
122}
123
124#[expect(clippy::arithmetic_side_effects)]
126pub fn get_blobdir_storage_usage(ctx: &Context) -> u64 {
127 WalkDir::new(ctx.get_blobdir())
128 .max_depth(2)
129 .into_iter()
130 .filter_map(|entry| entry.ok())
131 .filter_map(|entry| entry.metadata().ok())
132 .filter(|metadata| metadata.is_file())
133 .fold(0, |acc, m| acc + m.len())
134}