deltachat/
storage_usage.rs1use crate::{context::Context, message::MsgId};
3use anyhow::Result;
4use humansize::{BINARY, format_size};
5use walkdir::WalkDir;
6
7#[derive(Debug)]
10pub struct StorageUsage {
11 pub db_size: u64,
13 pub largest_tables: Vec<(String, u64, Option<u64>)>,
15 pub largest_webxdc_data: Vec<(MsgId, u64, u64)>,
18 pub blobdir_size: u64,
20}
21
22impl std::fmt::Display for StorageUsage {
23 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
24 writeln!(f, "Storage Usage:")?;
25 let blobdir_size = format_size(self.blobdir_size, BINARY);
26 writeln!(f, "[Blob Directory Size]: {blobdir_size}")?;
27 let human_db_size = format_size(self.db_size, BINARY);
28 writeln!(f, "[Database Size]: {human_db_size}")?;
29 writeln!(f, "[Largest Tables]:")?;
30 for (name, size, row_count) in &self.largest_tables {
31 let human_table_size = format_size(*size, BINARY);
32 writeln!(
33 f,
34 " {name:<20} {human_table_size:>10}, {row_count:>6} rows",
35 name = format!("{name}:"),
36 row_count = row_count.map(|c| c.to_string()).unwrap_or("?".to_owned())
37 )?;
38 }
39 writeln!(f, "[Webxdc With Biggest Status Update Space Usage]:")?;
40 for (msg_id, size, update_count) in &self.largest_webxdc_data {
41 let human_size = format_size(*size, BINARY);
42 writeln!(
43 f,
44 " {msg_id:<8} {human_size:>10} across {update_count:>5} updates",
45 msg_id = format!("{msg_id}:")
46 )?;
47 }
48 Ok(())
49 }
50}
51
52pub async fn get_storage_usage(ctx: &Context) -> Result<StorageUsage> {
54 let context_clone = ctx.clone();
55 let blobdir_size =
56 tokio::task::spawn_blocking(move || get_blobdir_storage_usage(&context_clone));
57
58 let page_size: u64 = ctx
59 .sql
60 .query_get_value("PRAGMA page_size", ())
61 .await?
62 .unwrap_or_default();
63 let page_count: u64 = ctx
64 .sql
65 .query_get_value("PRAGMA page_count", ())
66 .await?
67 .unwrap_or_default();
68
69 let mut largest_tables = ctx
70 .sql
71 .query_map_vec(
72 "SELECT name,
73 SUM(pgsize) AS size
74 FROM dbstat
75 WHERE name IN (SELECT name FROM sqlite_master WHERE type='table')
76 GROUP BY name ORDER BY size DESC LIMIT 10",
77 (),
78 |row| {
79 let name: String = row.get(0)?;
80 let size: u64 = row.get(1)?;
81 Ok((name, size, None))
82 },
83 )
84 .await?;
85
86 for row in &mut largest_tables {
87 let name = &row.0;
88 let row_count: Result<Option<u64>> = ctx
89 .sql
90 .query_get_value(&format!("SELECT COUNT(*) FROM {name}"), ())
92 .await;
93 row.2 = row_count.unwrap_or_default();
94 }
95
96 let largest_webxdc_data = ctx
97 .sql
98 .query_map_vec(
99 "SELECT msg_id, SUM(length(update_item)) as size, COUNT(*) as update_count
100 FROM msgs_status_updates
101 GROUP BY msg_id ORDER BY size DESC LIMIT 10",
102 (),
103 |row| {
104 let msg_id: MsgId = row.get(0)?;
105 let size: u64 = row.get(1)?;
106 let count: u64 = row.get(2)?;
107
108 Ok((msg_id, size, count))
109 },
110 )
111 .await?;
112
113 let blobdir_size = blobdir_size.await?;
114
115 Ok(StorageUsage {
116 db_size: page_size * page_count,
117 largest_tables,
118 largest_webxdc_data,
119 blobdir_size,
120 })
121}
122
123pub fn get_blobdir_storage_usage(ctx: &Context) -> u64 {
125 WalkDir::new(ctx.get_blobdir())
126 .max_depth(2)
127 .into_iter()
128 .filter_map(|entry| entry.ok())
129 .filter_map(|entry| entry.metadata().ok())
130 .filter(|metadata| metadata.is_file())
131 .fold(0, |acc, m| acc + m.len())
132}