1use std::ffi::OsStr;
4use std::path::{Path, PathBuf};
5use std::pin::Pin;
6
7use ::pgp::types::PublicKeyTrait;
8use anyhow::{bail, ensure, format_err, Context as _, Result};
9use futures::TryStreamExt;
10use futures_lite::FutureExt;
11use pin_project::pin_project;
12
13use tokio::fs::{self, File};
14use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
15use tokio_tar::Archive;
16
17use crate::blob::BlobDirContents;
18use crate::chat::delete_and_reset_all_device_msgs;
19use crate::config::Config;
20use crate::context::Context;
21use crate::e2ee;
22use crate::events::EventType;
23use crate::key::{self, DcKey, DcSecretKey, SignedPublicKey, SignedSecretKey};
24use crate::log::LogExt;
25use crate::pgp;
26use crate::qr::DCBACKUP_VERSION;
27use crate::sql;
28use crate::tools::{
29 create_folder, delete_file, get_filesuffix_lc, read_file, time, write_file, TempPathGuard,
30};
31
32mod key_transfer;
33mod transfer;
34
35pub use key_transfer::{continue_key_transfer, initiate_key_transfer};
36pub use transfer::{get_backup, BackupProvider};
37
38const DBFILE_BACKUP_NAME: &str = "dc_database_backup.sqlite";
40pub(crate) const BLOBS_BACKUP_NAME: &str = "blobs_backup";
41
42#[derive(Debug, Display, Copy, Clone, PartialEq, Eq, FromPrimitive, ToPrimitive)]
44#[repr(u32)]
45pub enum ImexMode {
46 ExportSelfKeys = 1,
51
52 ImportSelfKeys = 2,
57
58 ExportBackup = 11,
63
64 ImportBackup = 12,
68}
69
70pub async fn imex(
85 context: &Context,
86 what: ImexMode,
87 path: &Path,
88 passphrase: Option<String>,
89) -> Result<()> {
90 let cancel = context.alloc_ongoing().await?;
91
92 let res = {
93 let _guard = context.scheduler.pause(context.clone()).await?;
94 imex_inner(context, what, path, passphrase)
95 .race(async {
96 cancel.recv().await.ok();
97 Err(format_err!("canceled"))
98 })
99 .await
100 };
101 context.free_ongoing().await;
102
103 if let Err(err) = res.as_ref() {
104 error!(context, "IMEX failed to complete: {:#}", err);
106 context.emit_event(EventType::ImexProgress(0));
107 } else {
108 info!(context, "IMEX successfully completed");
109 context.emit_event(EventType::ImexProgress(1000));
110 }
111
112 res
113}
114
115pub async fn has_backup(_context: &Context, dir_name: &Path) -> Result<String> {
117 let mut dir_iter = tokio::fs::read_dir(dir_name).await?;
118 let mut newest_backup_name = "".to_string();
119 let mut newest_backup_path: Option<PathBuf> = None;
120
121 while let Ok(Some(dirent)) = dir_iter.next_entry().await {
122 let path = dirent.path();
123 let name = dirent.file_name();
124 let name: String = name.to_string_lossy().into();
125 if name.starts_with("delta-chat")
126 && name.ends_with(".tar")
127 && (newest_backup_name.is_empty() || name > newest_backup_name)
128 {
129 newest_backup_path = Some(path);
132 newest_backup_name = name;
133 }
134 }
135
136 match newest_backup_path {
137 Some(path) => Ok(path.to_string_lossy().into_owned()),
138 None => bail!("no backup found in {}", dir_name.display()),
139 }
140}
141
142async fn set_self_key(context: &Context, armored: &str) -> Result<()> {
143 let (private_key, header) = SignedSecretKey::from_asc(armored)?;
145 let public_key = private_key.split_public_key()?;
146 if let Some(preferencrypt) = header.get("Autocrypt-Prefer-Encrypt") {
147 let e2ee_enabled = match preferencrypt.as_str() {
148 "nopreference" => 0,
149 "mutual" => 1,
150 _ => {
151 bail!("invalid Autocrypt-Prefer-Encrypt header: {:?}", header);
152 }
153 };
154 context
155 .sql
156 .set_raw_config_int("e2ee_enabled", e2ee_enabled)
157 .await?;
158 } else {
159 info!(context, "No Autocrypt-Prefer-Encrypt header.");
168 };
169
170 let keypair = pgp::KeyPair {
171 public: public_key,
172 secret: private_key,
173 };
174 key::store_self_keypair(context, &keypair).await?;
175
176 info!(context, "stored self key: {:?}", keypair.secret.key_id());
177 Ok(())
178}
179
180async fn imex_inner(
181 context: &Context,
182 what: ImexMode,
183 path: &Path,
184 passphrase: Option<String>,
185) -> Result<()> {
186 info!(
187 context,
188 "{} path: {}",
189 match what {
190 ImexMode::ExportSelfKeys | ImexMode::ExportBackup => "Export",
191 ImexMode::ImportSelfKeys | ImexMode::ImportBackup => "Import",
192 },
193 path.display()
194 );
195 ensure!(context.sql.is_open().await, "Database not opened.");
196 context.emit_event(EventType::ImexProgress(1));
197
198 if what == ImexMode::ExportBackup || what == ImexMode::ExportSelfKeys {
199 e2ee::ensure_secret_key_exists(context)
201 .await
202 .context("Cannot create private key or private key not available")?;
203
204 create_folder(context, path).await?;
205 }
206
207 match what {
208 ImexMode::ExportSelfKeys => export_self_keys(context, path).await,
209 ImexMode::ImportSelfKeys => import_self_keys(context, path).await,
210
211 ImexMode::ExportBackup => {
212 export_backup(context, path, passphrase.unwrap_or_default()).await
213 }
214 ImexMode::ImportBackup => {
215 import_backup(context, path, passphrase.unwrap_or_default()).await
216 }
217 }
218}
219
220async fn import_backup(
227 context: &Context,
228 backup_to_import: &Path,
229 passphrase: String,
230) -> Result<()> {
231 ensure!(
232 !context.is_configured().await?,
233 "Cannot import backups to accounts in use."
234 );
235 ensure!(
236 !context.scheduler.is_running().await,
237 "cannot import backup, IO is running"
238 );
239
240 let backup_file = File::open(backup_to_import).await?;
241 let file_size = backup_file.metadata().await?.len();
242 info!(
243 context,
244 "Import \"{}\" ({} bytes) to \"{}\".",
245 backup_to_import.display(),
246 file_size,
247 context.get_dbfile().display()
248 );
249
250 import_backup_stream(context, backup_file, file_size, passphrase).await?;
251 Ok(())
252}
253
254pub(crate) async fn import_backup_stream<R: tokio::io::AsyncRead + Unpin>(
268 context: &Context,
269 backup_file: R,
270 file_size: u64,
271 passphrase: String,
272) -> Result<()> {
273 import_backup_stream_inner(context, backup_file, file_size, passphrase)
274 .await
275 .0
276}
277
278#[pin_project]
280struct ProgressReader<R> {
281 #[pin]
283 inner: R,
284
285 read: usize,
287
288 file_size: usize,
291
292 last_progress: usize,
294
295 context: Context,
297}
298
299impl<R> ProgressReader<R> {
300 fn new(r: R, context: Context, file_size: u64) -> Self {
301 Self {
302 inner: r,
303 read: 0,
304 file_size: file_size as usize,
305 last_progress: 1,
306 context,
307 }
308 }
309}
310
311impl<R> AsyncRead for ProgressReader<R>
312where
313 R: AsyncRead,
314{
315 fn poll_read(
316 self: Pin<&mut Self>,
317 cx: &mut std::task::Context<'_>,
318 buf: &mut ReadBuf<'_>,
319 ) -> std::task::Poll<std::io::Result<()>> {
320 let this = self.project();
321 let before = buf.filled().len();
322 let res = this.inner.poll_read(cx, buf);
323 if let std::task::Poll::Ready(Ok(())) = res {
324 *this.read = this.read.saturating_add(buf.filled().len() - before);
325
326 let progress = std::cmp::min(1000 * *this.read / *this.file_size, 999);
327 if progress > *this.last_progress {
328 this.context.emit_event(EventType::ImexProgress(progress));
329 *this.last_progress = progress;
330 }
331 }
332 res
333 }
334}
335
336async fn import_backup_stream_inner<R: tokio::io::AsyncRead + Unpin>(
337 context: &Context,
338 backup_file: R,
339 file_size: u64,
340 passphrase: String,
341) -> (Result<()>,) {
342 let backup_file = ProgressReader::new(backup_file, context.clone(), file_size);
343 let mut archive = Archive::new(backup_file);
344
345 let mut entries = match archive.entries() {
346 Ok(entries) => entries,
347 Err(e) => return (Err(e).context("Failed to get archive entries"),),
348 };
349 let mut blobs = Vec::new();
350 let mut res: Result<()> = loop {
351 let mut f = match entries.try_next().await {
352 Ok(Some(f)) => f,
353 Ok(None) => break Ok(()),
354 Err(e) => break Err(e).context("Failed to get next entry"),
355 };
356
357 let path = match f.path() {
358 Ok(path) => path.to_path_buf(),
359 Err(e) => break Err(e).context("Failed to get entry path"),
360 };
361 if let Err(e) = f.unpack_in(context.get_blobdir()).await {
362 break Err(e).context("Failed to unpack file");
363 }
364 if path.file_name() == Some(OsStr::new(DBFILE_BACKUP_NAME)) {
365 continue;
366 }
367 let from_path = context.get_blobdir().join(&path);
369 if from_path.is_file() {
370 if let Some(name) = from_path.file_name() {
371 let to_path = context.get_blobdir().join(name);
372 if let Err(e) = fs::rename(&from_path, &to_path).await {
373 blobs.push(from_path);
374 break Err(e).context("Failed to move file to blobdir");
375 }
376 blobs.push(to_path);
377 } else {
378 warn!(context, "No file name");
379 }
380 }
381 };
382 if res.is_err() {
383 for blob in blobs {
384 fs::remove_file(&blob).await.log_err(context).ok();
385 }
386 }
387
388 let unpacked_database = context.get_blobdir().join(DBFILE_BACKUP_NAME);
389 if res.is_ok() {
390 res = context
391 .sql
392 .import(&unpacked_database, passphrase.clone())
393 .await
394 .context("cannot import unpacked database");
395 }
396 if res.is_ok() {
397 res = check_backup_version(context).await;
398 }
399 if res.is_ok() {
400 res = adjust_bcc_self(context).await;
401 }
402 fs::remove_file(unpacked_database)
403 .await
404 .context("cannot remove unpacked database")
405 .log_err(context)
406 .ok();
407 if res.is_ok() {
408 context.emit_event(EventType::ImexProgress(999));
409 res = context.sql.run_migrations(context).await;
410 context.emit_event(EventType::AccountsItemChanged);
411 }
412 if res.is_ok() {
413 delete_and_reset_all_device_msgs(context)
414 .await
415 .log_err(context)
416 .ok();
417 }
418 (res,)
419}
420
421fn get_next_backup_path(
429 folder: &Path,
430 addr: &str,
431 backup_time: i64,
432) -> Result<(PathBuf, PathBuf, PathBuf)> {
433 let folder = PathBuf::from(folder);
434 let stem = chrono::DateTime::<chrono::Utc>::from_timestamp(backup_time, 0)
435 .context("can't get next backup path")?
436 .format("delta-chat-backup-%Y-%m-%d")
438 .to_string();
439
440 for i in 0..64 {
442 let mut tempdbfile = folder.clone();
443 tempdbfile.push(format!("{stem}-{i:02}-{addr}.db"));
444
445 let mut tempfile = folder.clone();
446 tempfile.push(format!("{stem}-{i:02}-{addr}.tar.part"));
447
448 let mut destfile = folder.clone();
449 destfile.push(format!("{stem}-{i:02}-{addr}.tar"));
450
451 if !tempdbfile.exists() && !tempfile.exists() && !destfile.exists() {
452 return Ok((tempdbfile, tempfile, destfile));
453 }
454 }
455 bail!("could not create backup file, disk full?");
456}
457
458async fn export_backup(context: &Context, dir: &Path, passphrase: String) -> Result<()> {
462 let now = time();
464 let self_addr = context.get_primary_self_addr().await?;
465 let (temp_db_path, temp_path, dest_path) = get_next_backup_path(dir, &self_addr, now)?;
466 let temp_db_path = TempPathGuard::new(temp_db_path);
467 let temp_path = TempPathGuard::new(temp_path);
468
469 export_database(context, &temp_db_path, passphrase, now)
470 .await
471 .context("could not export database")?;
472
473 info!(
474 context,
475 "Backup '{}' to '{}'.",
476 context.get_dbfile().display(),
477 dest_path.display(),
478 );
479
480 let file = File::create(&temp_path).await?;
481 let blobdir = BlobDirContents::new(context).await?;
482
483 let mut file_size = 0;
484 file_size += temp_db_path.metadata()?.len();
485 for blob in blobdir.iter() {
486 file_size += blob.to_abs_path().metadata()?.len()
487 }
488
489 export_backup_stream(context, &temp_db_path, blobdir, file, file_size)
490 .await
491 .context("Exporting backup to file failed")?;
492 fs::rename(temp_path, &dest_path).await?;
493 context.emit_event(EventType::ImexFileWritten(dest_path));
494 Ok(())
495}
496
497#[pin_project]
499struct ProgressWriter<W> {
500 #[pin]
502 inner: W,
503
504 written: usize,
506
507 file_size: usize,
510
511 last_progress: usize,
513
514 context: Context,
516}
517
518impl<W> ProgressWriter<W> {
519 fn new(w: W, context: Context, file_size: u64) -> Self {
520 Self {
521 inner: w,
522 written: 0,
523 file_size: file_size as usize,
524 last_progress: 1,
525 context,
526 }
527 }
528}
529
530impl<W> AsyncWrite for ProgressWriter<W>
531where
532 W: AsyncWrite,
533{
534 fn poll_write(
535 self: Pin<&mut Self>,
536 cx: &mut std::task::Context<'_>,
537 buf: &[u8],
538 ) -> std::task::Poll<Result<usize, std::io::Error>> {
539 let this = self.project();
540 let res = this.inner.poll_write(cx, buf);
541 if let std::task::Poll::Ready(Ok(written)) = res {
542 *this.written = this.written.saturating_add(written);
543
544 let progress = std::cmp::min(1000 * *this.written / *this.file_size, 999);
545 if progress > *this.last_progress {
546 this.context.emit_event(EventType::ImexProgress(progress));
547 *this.last_progress = progress;
548 }
549 }
550 res
551 }
552
553 fn poll_flush(
554 self: Pin<&mut Self>,
555 cx: &mut std::task::Context<'_>,
556 ) -> std::task::Poll<Result<(), std::io::Error>> {
557 self.project().inner.poll_flush(cx)
558 }
559
560 fn poll_shutdown(
561 self: Pin<&mut Self>,
562 cx: &mut std::task::Context<'_>,
563 ) -> std::task::Poll<Result<(), std::io::Error>> {
564 self.project().inner.poll_shutdown(cx)
565 }
566}
567
568pub(crate) async fn export_backup_stream<'a, W>(
570 context: &'a Context,
571 temp_db_path: &Path,
572 blobdir: BlobDirContents<'a>,
573 writer: W,
574 file_size: u64,
575) -> Result<()>
576where
577 W: tokio::io::AsyncWrite + tokio::io::AsyncWriteExt + Unpin + Send + 'static,
578{
579 let writer = ProgressWriter::new(writer, context.clone(), file_size);
580 let mut builder = tokio_tar::Builder::new(writer);
581
582 builder
583 .append_path_with_name(temp_db_path, DBFILE_BACKUP_NAME)
584 .await?;
585
586 for blob in blobdir.iter() {
587 let mut file = File::open(blob.to_abs_path()).await?;
588 let path_in_archive = PathBuf::from(BLOBS_BACKUP_NAME).join(blob.as_name());
589 builder.append_file(path_in_archive, &mut file).await?;
590 }
591
592 builder.finish().await?;
593 Ok(())
594}
595
596async fn import_secret_key(context: &Context, path: &Path) -> Result<()> {
598 let buf = read_file(context, path).await?;
599 let armored = std::string::String::from_utf8_lossy(&buf);
600 set_self_key(context, &armored).await?;
601 Ok(())
602}
603
604async fn import_self_keys(context: &Context, path: &Path) -> Result<()> {
614 let attr = tokio::fs::metadata(path).await?;
615
616 if attr.is_file() {
617 info!(
618 context,
619 "Importing secret key from {} as the default key.",
620 path.display()
621 );
622 import_secret_key(context, path).await?;
623 return Ok(());
624 }
625
626 let mut imported_cnt = 0;
627
628 let mut dir_handle = tokio::fs::read_dir(&path).await?;
629 while let Ok(Some(entry)) = dir_handle.next_entry().await {
630 let entry_fn = entry.file_name();
631 let name_f = entry_fn.to_string_lossy();
632 let path_plus_name = path.join(&entry_fn);
633 if let Some(suffix) = get_filesuffix_lc(&name_f) {
634 if suffix != "asc" {
635 continue;
636 }
637 } else {
638 continue;
639 };
640 info!(
641 context,
642 "Considering key file: {}.",
643 path_plus_name.display()
644 );
645
646 if let Err(err) = import_secret_key(context, &path_plus_name).await {
647 warn!(
648 context,
649 "Failed to import secret key from {}: {:#}.",
650 path_plus_name.display(),
651 err
652 );
653 continue;
654 }
655
656 imported_cnt += 1;
657 }
658 ensure!(
659 imported_cnt > 0,
660 "No private keys found in {}.",
661 path.display()
662 );
663 Ok(())
664}
665
666async fn export_self_keys(context: &Context, dir: &Path) -> Result<()> {
667 let mut export_errors = 0;
668
669 let keys = context
670 .sql
671 .query_map(
672 "SELECT id, public_key, private_key, id=(SELECT value FROM config WHERE keyname='key_id') FROM keypairs;",
673 (),
674 |row| {
675 let id = row.get(0)?;
676 let public_key_blob: Vec<u8> = row.get(1)?;
677 let public_key = SignedPublicKey::from_slice(&public_key_blob);
678 let private_key_blob: Vec<u8> = row.get(2)?;
679 let private_key = SignedSecretKey::from_slice(&private_key_blob);
680 let is_default: i32 = row.get(3)?;
681
682 Ok((id, public_key, private_key, is_default))
683 },
684 |keys| {
685 keys.collect::<std::result::Result<Vec<_>, _>>()
686 .map_err(Into::into)
687 },
688 )
689 .await?;
690 let self_addr = context.get_primary_self_addr().await?;
691 for (id, public_key, private_key, is_default) in keys {
692 let id = Some(id).filter(|_| is_default == 0);
693
694 if let Ok(key) = public_key {
695 if let Err(err) = export_key_to_asc_file(context, dir, &self_addr, id, &key).await {
696 error!(context, "Failed to export public key: {:#}.", err);
697 export_errors += 1;
698 }
699 } else {
700 export_errors += 1;
701 }
702 if let Ok(key) = private_key {
703 if let Err(err) = export_key_to_asc_file(context, dir, &self_addr, id, &key).await {
704 error!(context, "Failed to export private key: {:#}.", err);
705 export_errors += 1;
706 }
707 } else {
708 export_errors += 1;
709 }
710 }
711
712 ensure!(export_errors == 0, "errors while exporting keys");
713 Ok(())
714}
715
716async fn export_key_to_asc_file<T>(
718 context: &Context,
719 dir: &Path,
720 addr: &str,
721 id: Option<i64>,
722 key: &T,
723) -> Result<String>
724where
725 T: DcKey,
726{
727 let file_name = {
728 let kind = match T::is_private() {
729 false => "public",
730 true => "private",
731 };
732 let id = id.map_or("default".into(), |i| i.to_string());
733 let fp = key.dc_fingerprint().hex();
734 format!("{kind}-key-{addr}-{id}-{fp}.asc")
735 };
736 let path = dir.join(&file_name);
737 info!(
738 context,
739 "Exporting key {:?} to {}.",
740 key.key_id(),
741 path.display()
742 );
743
744 delete_file(context, &path).await.ok();
746
747 let content = key.to_asc(None).into_bytes();
748 write_file(context, &path, &content)
749 .await
750 .with_context(|| format!("cannot write key to {}", path.display()))?;
751 context.emit_event(EventType::ImexFileWritten(path));
752 Ok(file_name)
753}
754
755async fn export_database(
762 context: &Context,
763 dest: &Path,
764 passphrase: String,
765 timestamp: i64,
766) -> Result<()> {
767 ensure!(
768 !context.scheduler.is_running().await,
769 "cannot export backup, IO is running"
770 );
771 let timestamp = timestamp.try_into().context("32-bit UNIX time overflow")?;
772
773 let dest = dest
775 .to_str()
776 .with_context(|| format!("path {} is not valid unicode", dest.display()))?;
777
778 adjust_bcc_self(context).await?;
779 context
780 .sql
781 .set_raw_config_int("backup_time", timestamp)
782 .await?;
783 context
784 .sql
785 .set_raw_config_int("backup_version", DCBACKUP_VERSION)
786 .await?;
787 sql::housekeeping(context).await.log_err(context).ok();
788 context
789 .sql
790 .call_write(|conn| {
791 conn.execute("VACUUM;", ())
792 .map_err(|err| warn!(context, "Vacuum failed, exporting anyway {err}"))
793 .ok();
794 conn.execute("ATTACH DATABASE ? AS backup KEY ?", (dest, passphrase))
795 .context("failed to attach backup database")?;
796 let res = conn
797 .query_row("SELECT sqlcipher_export('backup')", [], |_row| Ok(()))
798 .context("failed to export to attached backup database");
799 conn.execute(
800 "UPDATE backup.config SET value='0' WHERE keyname='verified_one_on_one_chats';",
801 [],
802 )
803 .ok(); conn.execute("DETACH DATABASE backup", [])
805 .context("failed to detach backup database")?;
806 res?;
807 Ok(())
808 })
809 .await
810}
811
812async fn adjust_bcc_self(context: &Context) -> Result<()> {
818 if context.is_chatmail().await? && !context.config_exists(Config::BccSelf).await? {
819 context.set_config(Config::BccSelf, Some("1")).await?;
820 }
821 Ok(())
822}
823
824async fn check_backup_version(context: &Context) -> Result<()> {
825 let version = (context.sql.get_raw_config_int("backup_version").await?).unwrap_or(2);
826 ensure!(
827 version <= DCBACKUP_VERSION,
828 "Backup too new, please update Delta Chat"
829 );
830 Ok(())
831}
832
833#[cfg(test)]
834mod tests {
835 use std::time::Duration;
836
837 use tokio::task;
838
839 use super::*;
840 use crate::config::Config;
841 use crate::test_utils::{alice_keypair, TestContext};
842
843 #[tokio::test(flavor = "multi_thread", worker_threads = 2)]
844 async fn test_export_public_key_to_asc_file() {
845 let context = TestContext::new().await;
846 let key = alice_keypair().public;
847 let blobdir = Path::new("$BLOBDIR");
848 let filename = export_key_to_asc_file(&context.ctx, blobdir, "a@b", None, &key)
849 .await
850 .unwrap();
851 assert!(filename.starts_with("public-key-a@b-default-"));
852 assert!(filename.ends_with(".asc"));
853 let blobdir = context.ctx.get_blobdir().to_str().unwrap();
854 let filename = format!("{blobdir}/{filename}");
855 let bytes = tokio::fs::read(&filename).await.unwrap();
856
857 assert_eq!(bytes, key.to_asc(None).into_bytes());
858 }
859
860 #[tokio::test(flavor = "multi_thread", worker_threads = 2)]
861 async fn test_import_private_key_exported_to_asc_file() {
862 let context = TestContext::new().await;
863 let key = alice_keypair().secret;
864 let blobdir = Path::new("$BLOBDIR");
865 let filename = export_key_to_asc_file(&context.ctx, blobdir, "a@b", None, &key)
866 .await
867 .unwrap();
868 let fingerprint = filename
869 .strip_prefix("private-key-a@b-default-")
870 .unwrap()
871 .strip_suffix(".asc")
872 .unwrap();
873 assert_eq!(fingerprint, key.dc_fingerprint().hex());
874 let blobdir = context.ctx.get_blobdir().to_str().unwrap();
875 let filename = format!("{blobdir}/{filename}");
876 let bytes = tokio::fs::read(&filename).await.unwrap();
877
878 assert_eq!(bytes, key.to_asc(None).into_bytes());
879
880 let alice = &TestContext::new().await;
881 if let Err(err) = imex(alice, ImexMode::ImportSelfKeys, Path::new(&filename), None).await {
882 panic!("got error on import: {err:#}");
883 }
884 }
885
886 #[tokio::test(flavor = "multi_thread", worker_threads = 2)]
887 async fn test_export_and_import_key_from_dir() {
888 let export_dir = tempfile::tempdir().unwrap();
889
890 let context = TestContext::new_alice().await;
891 if let Err(err) = imex(
892 &context.ctx,
893 ImexMode::ExportSelfKeys,
894 export_dir.path(),
895 None,
896 )
897 .await
898 {
899 panic!("got error on export: {err:#}");
900 }
901
902 let context2 = TestContext::new().await;
903 if let Err(err) = imex(
904 &context2.ctx,
905 ImexMode::ImportSelfKeys,
906 export_dir.path(),
907 None,
908 )
909 .await
910 {
911 panic!("got error on import: {err:#}");
912 }
913 }
914
915 #[tokio::test(flavor = "multi_thread", worker_threads = 2)]
916 async fn test_import_second_key() -> Result<()> {
917 let alice = &TestContext::new_alice().await;
918 let chat = alice.create_chat(alice).await;
919 let sent = alice.send_text(chat.id, "Encrypted with old key").await;
920 let export_dir = tempfile::tempdir().unwrap();
921
922 let alice = &TestContext::new().await;
923 alice.configure_addr("alice@example.org").await;
924 imex(alice, ImexMode::ExportSelfKeys, export_dir.path(), None).await?;
925
926 let alice = &TestContext::new_alice().await;
927 let old_key = key::load_self_secret_key(alice).await?;
928
929 assert!(
930 imex(alice, ImexMode::ImportSelfKeys, export_dir.path(), None)
931 .await
932 .is_err()
933 );
934
935 assert_eq!(key::load_self_secret_key(alice).await?, old_key);
938
939 assert_eq!(key::load_self_secret_keyring(alice).await?, vec![old_key]);
940
941 let msg = alice.recv_msg(&sent).await;
942 assert!(msg.get_showpadlock());
943 assert_eq!(msg.chat_id, alice.get_self_chat().await.id);
944 assert_eq!(msg.get_text(), "Encrypted with old key");
945
946 Ok(())
947 }
948
949 #[tokio::test(flavor = "multi_thread", worker_threads = 2)]
950 async fn test_export_and_import_backup() -> Result<()> {
951 for set_verified_oneonone_chats in [true, false] {
952 let backup_dir = tempfile::tempdir().unwrap();
953
954 let context1 = TestContext::new_alice().await;
955 assert!(context1.is_configured().await?);
956 if set_verified_oneonone_chats {
957 context1
958 .set_config_bool(Config::VerifiedOneOnOneChats, true)
959 .await?;
960 }
961
962 let context2 = TestContext::new().await;
963 assert!(!context2.is_configured().await?);
964 assert!(has_backup(&context2, backup_dir.path()).await.is_err());
965
966 assert!(
968 imex(&context1, ImexMode::ExportBackup, backup_dir.path(), None)
969 .await
970 .is_ok()
971 );
972 let _event = context1
973 .evtracker
974 .get_matching(|evt| matches!(evt, EventType::ImexProgress(1000)))
975 .await;
976
977 let backup = has_backup(&context2, backup_dir.path()).await?;
979
980 assert!(imex(
982 &context2,
983 ImexMode::ImportBackup,
984 backup.as_ref(),
985 Some("foobar".to_string())
986 )
987 .await
988 .is_err());
989
990 assert!(
991 imex(&context2, ImexMode::ImportBackup, backup.as_ref(), None)
992 .await
993 .is_ok()
994 );
995 let _event = context2
996 .evtracker
997 .get_matching(|evt| matches!(evt, EventType::ImexProgress(1000)))
998 .await;
999
1000 assert!(context2.is_configured().await?);
1001 assert_eq!(
1002 context2.get_config(Config::Addr).await?,
1003 Some("alice@example.org".to_string())
1004 );
1005 assert_eq!(
1006 context2
1007 .get_config_bool(Config::VerifiedOneOnOneChats)
1008 .await?,
1009 false
1010 );
1011 assert_eq!(
1012 context1
1013 .get_config_bool(Config::VerifiedOneOnOneChats)
1014 .await?,
1015 set_verified_oneonone_chats
1016 );
1017 }
1018 Ok(())
1019 }
1020
1021 #[tokio::test(flavor = "multi_thread", worker_threads = 2)]
1022 async fn test_export_import_chatmail_backup() -> Result<()> {
1023 let backup_dir = tempfile::tempdir().unwrap();
1024
1025 let context1 = &TestContext::new_alice().await;
1026
1027 assert_eq!(
1029 context1.get_config(Config::BccSelf).await?,
1030 Some("1".to_string())
1031 );
1032 assert_eq!(
1033 context1.get_config(Config::DeleteServerAfter).await?,
1034 Some("0".to_string())
1035 );
1036 context1.set_config_bool(Config::IsChatmail, true).await?;
1037 assert_eq!(
1038 context1.get_config(Config::BccSelf).await?,
1039 Some("0".to_string())
1040 );
1041 assert_eq!(
1042 context1.get_config(Config::DeleteServerAfter).await?,
1043 Some("1".to_string())
1044 );
1045
1046 assert_eq!(context1.get_config_delete_server_after().await?, Some(0));
1047 imex(context1, ImexMode::ExportBackup, backup_dir.path(), None).await?;
1048 let _event = context1
1049 .evtracker
1050 .get_matching(|evt| matches!(evt, EventType::ImexProgress(1000)))
1051 .await;
1052
1053 let context2 = &TestContext::new().await;
1054 let backup = has_backup(context2, backup_dir.path()).await?;
1055 imex(context2, ImexMode::ImportBackup, backup.as_ref(), None).await?;
1056 let _event = context2
1057 .evtracker
1058 .get_matching(|evt| matches!(evt, EventType::ImexProgress(1000)))
1059 .await;
1060 assert!(context2.is_configured().await?);
1061 assert!(context2.is_chatmail().await?);
1062 for ctx in [context1, context2] {
1063 assert_eq!(
1064 ctx.get_config(Config::BccSelf).await?,
1065 Some("1".to_string())
1066 );
1067 assert_eq!(
1068 ctx.get_config(Config::DeleteServerAfter).await?,
1069 Some("0".to_string())
1070 );
1071 assert_eq!(ctx.get_config_delete_server_after().await?, None);
1072 }
1073 Ok(())
1074 }
1075
1076 #[tokio::test(flavor = "multi_thread", worker_threads = 2)]
1080 async fn test_import_backup_reset_config_cache() -> Result<()> {
1081 let backup_dir = tempfile::tempdir()?;
1082 let context1 = TestContext::new_alice().await;
1083 let context2 = TestContext::new().await;
1084 assert!(!context2.is_configured().await?);
1085
1086 imex(&context1, ImexMode::ExportBackup, backup_dir.path(), None).await?;
1088
1089 let backup = has_backup(&context2, backup_dir.path()).await?;
1091 let context2_cloned = context2.clone();
1092 let handle = task::spawn(async move {
1093 imex(
1094 &context2_cloned,
1095 ImexMode::ImportBackup,
1096 backup.as_ref(),
1097 None,
1098 )
1099 .await
1100 .unwrap();
1101 });
1102
1103 while !handle.is_finished() {
1104 context2.is_configured().await.ok();
1107 tokio::time::sleep(Duration::from_micros(1)).await;
1108 }
1109
1110 assert!(context2.is_configured().await?);
1112
1113 Ok(())
1114 }
1115}