1use std::ffi::OsStr;
4use std::path::{Path, PathBuf};
5use std::pin::Pin;
6
7use anyhow::{Context as _, Result, bail, ensure, format_err};
8use futures::TryStreamExt;
9use futures_lite::FutureExt;
10use pin_project::pin_project;
11
12use tokio::fs::{self, File};
13use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
14use tokio_tar::Archive;
15
16use crate::blob::BlobDirContents;
17use crate::chat::delete_and_reset_all_device_msgs;
18use crate::config::Config;
19use crate::context::Context;
20use crate::e2ee;
21use crate::events::EventType;
22use crate::key::{self, DcKey, DcSecretKey, SignedPublicKey, SignedSecretKey};
23use crate::log::{LogExt, warn};
24use crate::pgp;
25use crate::qr::DCBACKUP_VERSION;
26use crate::sql;
27use crate::tools::{
28 TempPathGuard, create_folder, delete_file, get_filesuffix_lc, read_file, time, usize_to_u64,
29 write_file,
30};
31
32mod key_transfer;
33mod transfer;
34
35use ::pgp::types::KeyDetails;
36pub use key_transfer::{continue_key_transfer, initiate_key_transfer};
37pub use transfer::{BackupProvider, get_backup};
38
39const DBFILE_BACKUP_NAME: &str = "dc_database_backup.sqlite";
41pub(crate) const BLOBS_BACKUP_NAME: &str = "blobs_backup";
42
43#[derive(Debug, Display, Copy, Clone, PartialEq, Eq, FromPrimitive, ToPrimitive)]
45#[repr(u32)]
46pub enum ImexMode {
47 ExportSelfKeys = 1,
52
53 ImportSelfKeys = 2,
58
59 ExportBackup = 11,
64
65 ImportBackup = 12,
69}
70
71pub async fn imex(
86 context: &Context,
87 what: ImexMode,
88 path: &Path,
89 passphrase: Option<String>,
90) -> Result<()> {
91 let cancel = context.alloc_ongoing().await?;
92
93 let res = {
94 let _guard = context.scheduler.pause(context).await?;
95 imex_inner(context, what, path, passphrase)
96 .race(async {
97 cancel.recv().await.ok();
98 Err(format_err!("canceled"))
99 })
100 .await
101 };
102 context.free_ongoing().await;
103
104 if let Err(err) = res.as_ref() {
105 error!(context, "IMEX failed to complete: {:#}", err);
107 context.emit_event(EventType::ImexProgress(0));
108 } else {
109 info!(context, "IMEX successfully completed");
110 context.emit_event(EventType::ImexProgress(1000));
111 }
112
113 res
114}
115
116pub async fn has_backup(_context: &Context, dir_name: &Path) -> Result<String> {
118 let mut dir_iter = tokio::fs::read_dir(dir_name).await?;
119 let mut newest_backup_name = "".to_string();
120 let mut newest_backup_path: Option<PathBuf> = None;
121
122 while let Ok(Some(dirent)) = dir_iter.next_entry().await {
123 let path = dirent.path();
124 let name = dirent.file_name();
125 let name: String = name.to_string_lossy().into();
126 if name.starts_with("delta-chat")
127 && name.ends_with(".tar")
128 && (newest_backup_name.is_empty() || name > newest_backup_name)
129 {
130 newest_backup_path = Some(path);
133 newest_backup_name = name;
134 }
135 }
136
137 match newest_backup_path {
138 Some(path) => Ok(path.to_string_lossy().into_owned()),
139 None => bail!("no backup found in {}", dir_name.display()),
140 }
141}
142
143async fn set_self_key(context: &Context, armored: &str) -> Result<()> {
144 let private_key = SignedSecretKey::from_asc(armored)?;
145 let public_key = private_key.split_public_key()?;
146
147 let keypair = pgp::KeyPair {
148 public: public_key,
149 secret: private_key,
150 };
151 key::store_self_keypair(context, &keypair).await?;
152
153 info!(
154 context,
155 "stored self key: {:?}",
156 keypair.secret.public_key().key_id()
157 );
158 Ok(())
159}
160
161async fn imex_inner(
162 context: &Context,
163 what: ImexMode,
164 path: &Path,
165 passphrase: Option<String>,
166) -> Result<()> {
167 info!(
168 context,
169 "{} path: {}",
170 match what {
171 ImexMode::ExportSelfKeys | ImexMode::ExportBackup => "Export",
172 ImexMode::ImportSelfKeys | ImexMode::ImportBackup => "Import",
173 },
174 path.display()
175 );
176 ensure!(context.sql.is_open().await, "Database not opened.");
177 context.emit_event(EventType::ImexProgress(1));
178
179 if what == ImexMode::ExportBackup || what == ImexMode::ExportSelfKeys {
180 e2ee::ensure_secret_key_exists(context)
182 .await
183 .context("Cannot create private key or private key not available")?;
184
185 create_folder(context, path).await?;
186 }
187
188 match what {
189 ImexMode::ExportSelfKeys => export_self_keys(context, path).await,
190 ImexMode::ImportSelfKeys => import_self_keys(context, path).await,
191
192 ImexMode::ExportBackup => {
193 export_backup(context, path, passphrase.unwrap_or_default()).await
194 }
195 ImexMode::ImportBackup => {
196 import_backup(context, path, passphrase.unwrap_or_default()).await
197 }
198 }
199}
200
201async fn import_backup(
208 context: &Context,
209 backup_to_import: &Path,
210 passphrase: String,
211) -> Result<()> {
212 ensure!(
213 !context.is_configured().await?,
214 "Cannot import backups to accounts in use."
215 );
216 ensure!(
217 !context.scheduler.is_running().await,
218 "cannot import backup, IO is running"
219 );
220
221 let backup_file = File::open(backup_to_import).await?;
222 let file_size = backup_file.metadata().await?.len();
223 info!(
224 context,
225 "Import \"{}\" ({} bytes) to \"{}\".",
226 backup_to_import.display(),
227 file_size,
228 context.get_dbfile().display()
229 );
230
231 import_backup_stream(context, backup_file, file_size, passphrase).await?;
232 Ok(())
233}
234
235pub(crate) async fn import_backup_stream<R: tokio::io::AsyncRead + Unpin>(
249 context: &Context,
250 backup_file: R,
251 file_size: u64,
252 passphrase: String,
253) -> Result<()> {
254 import_backup_stream_inner(context, backup_file, file_size, passphrase)
255 .await
256 .0
257}
258
259#[pin_project]
261struct ProgressReader<R> {
262 #[pin]
264 inner: R,
265
266 read: u64,
268
269 file_size: u64,
272
273 last_progress: u16,
275
276 context: Context,
278}
279
280impl<R> ProgressReader<R> {
281 fn new(r: R, context: Context, file_size: u64) -> Self {
282 Self {
283 inner: r,
284 read: 0,
285 file_size,
286 last_progress: 1,
287 context,
288 }
289 }
290}
291
292impl<R> AsyncRead for ProgressReader<R>
293where
294 R: AsyncRead,
295{
296 fn poll_read(
297 self: Pin<&mut Self>,
298 cx: &mut std::task::Context<'_>,
299 buf: &mut ReadBuf<'_>,
300 ) -> std::task::Poll<std::io::Result<()>> {
301 let this = self.project();
302 let before = buf.filled().len();
303 let res = this.inner.poll_read(cx, buf);
304 if let std::task::Poll::Ready(Ok(())) = res {
305 *this.read = this
306 .read
307 .saturating_add(usize_to_u64(buf.filled().len() - before));
308
309 let progress = std::cmp::min(1000 * *this.read / *this.file_size, 999) as u16;
310 if progress > *this.last_progress {
311 this.context.emit_event(EventType::ImexProgress(progress));
312 *this.last_progress = progress;
313 }
314 }
315 res
316 }
317}
318
319async fn import_backup_stream_inner<R: tokio::io::AsyncRead + Unpin>(
320 context: &Context,
321 backup_file: R,
322 file_size: u64,
323 passphrase: String,
324) -> (Result<()>,) {
325 let backup_file = ProgressReader::new(backup_file, context.clone(), file_size);
326 let mut archive = Archive::new(backup_file);
327
328 let mut entries = match archive.entries() {
329 Ok(entries) => entries,
330 Err(e) => return (Err(e).context("Failed to get archive entries"),),
331 };
332 let mut blobs = Vec::new();
333 let mut res: Result<()> = loop {
334 let mut f = match entries.try_next().await {
335 Ok(Some(f)) => f,
336 Ok(None) => break Ok(()),
337 Err(e) => break Err(e).context("Failed to get next entry"),
338 };
339
340 let path = match f.path() {
341 Ok(path) => path.to_path_buf(),
342 Err(e) => break Err(e).context("Failed to get entry path"),
343 };
344 if let Err(e) = f.unpack_in(context.get_blobdir()).await {
345 break Err(e).context("Failed to unpack file");
346 }
347 if path.file_name() == Some(OsStr::new(DBFILE_BACKUP_NAME)) {
348 continue;
349 }
350 let from_path = context.get_blobdir().join(&path);
352 if from_path.is_file() {
353 if let Some(name) = from_path.file_name() {
354 let to_path = context.get_blobdir().join(name);
355 if let Err(e) = fs::rename(&from_path, &to_path).await {
356 blobs.push(from_path);
357 break Err(e).context("Failed to move file to blobdir");
358 }
359 blobs.push(to_path);
360 } else {
361 warn!(context, "No file name");
362 }
363 }
364 };
365 if res.is_err() {
366 for blob in blobs {
367 fs::remove_file(&blob).await.log_err(context).ok();
368 }
369 }
370
371 let unpacked_database = context.get_blobdir().join(DBFILE_BACKUP_NAME);
372 if res.is_ok() {
373 res = context
374 .sql
375 .import(&unpacked_database, passphrase.clone())
376 .await
377 .context("cannot import unpacked database");
378 }
379 if res.is_ok() {
380 res = check_backup_version(context).await;
381 }
382 if res.is_ok() {
383 res = context.set_config(Config::BccSelf, Some("1")).await;
392 }
393 fs::remove_file(unpacked_database)
394 .await
395 .context("cannot remove unpacked database")
396 .log_err(context)
397 .ok();
398 if res.is_ok() {
399 context.emit_event(EventType::ImexProgress(999));
400 res = context.sql.run_migrations(context).await;
401 context.emit_event(EventType::AccountsItemChanged);
402 }
403 if res.is_ok() {
404 delete_and_reset_all_device_msgs(context)
405 .await
406 .log_err(context)
407 .ok();
408 }
409 (res,)
410}
411
412fn get_next_backup_path(
420 folder: &Path,
421 addr: &str,
422 backup_time: i64,
423) -> Result<(PathBuf, PathBuf, PathBuf)> {
424 let folder = PathBuf::from(folder);
425 let stem = chrono::DateTime::<chrono::Utc>::from_timestamp(backup_time, 0)
426 .context("can't get next backup path")?
427 .format("delta-chat-backup-%Y-%m-%d")
429 .to_string();
430
431 for i in 0..64 {
433 let mut tempdbfile = folder.clone();
434 tempdbfile.push(format!("{stem}-{i:02}-{addr}.db"));
435
436 let mut tempfile = folder.clone();
437 tempfile.push(format!("{stem}-{i:02}-{addr}.tar.part"));
438
439 let mut destfile = folder.clone();
440 destfile.push(format!("{stem}-{i:02}-{addr}.tar"));
441
442 if !tempdbfile.exists() && !tempfile.exists() && !destfile.exists() {
443 return Ok((tempdbfile, tempfile, destfile));
444 }
445 }
446 bail!("could not create backup file, disk full?");
447}
448
449async fn export_backup(context: &Context, dir: &Path, passphrase: String) -> Result<()> {
453 let now = time();
455 let self_addr = context.get_primary_self_addr().await?;
456 let (temp_db_path, temp_path, dest_path) = get_next_backup_path(dir, &self_addr, now)?;
457 let temp_db_path = TempPathGuard::new(temp_db_path);
458 let temp_path = TempPathGuard::new(temp_path);
459
460 export_database(context, &temp_db_path, passphrase, now)
461 .await
462 .context("could not export database")?;
463
464 info!(
465 context,
466 "Backup '{}' to '{}'.",
467 context.get_dbfile().display(),
468 dest_path.display(),
469 );
470
471 let file = File::create(&temp_path).await?;
472 let blobdir = BlobDirContents::new(context).await?;
473
474 let mut file_size = 0;
475 file_size += temp_db_path.metadata()?.len();
476 for blob in blobdir.iter() {
477 file_size += blob.to_abs_path().metadata()?.len()
478 }
479
480 export_backup_stream(context, &temp_db_path, blobdir, file, file_size)
481 .await
482 .context("Exporting backup to file failed")?;
483 fs::rename(temp_path, &dest_path).await?;
484 context.emit_event(EventType::ImexFileWritten(dest_path));
485 Ok(())
486}
487
488#[pin_project]
490struct ProgressWriter<W> {
491 #[pin]
493 inner: W,
494
495 written: u64,
497
498 file_size: u64,
501
502 last_progress: u16,
504
505 context: Context,
507}
508
509impl<W> ProgressWriter<W> {
510 fn new(w: W, context: Context, file_size: u64) -> Self {
511 Self {
512 inner: w,
513 written: 0,
514 file_size,
515 last_progress: 1,
516 context,
517 }
518 }
519}
520
521impl<W> AsyncWrite for ProgressWriter<W>
522where
523 W: AsyncWrite,
524{
525 fn poll_write(
526 self: Pin<&mut Self>,
527 cx: &mut std::task::Context<'_>,
528 buf: &[u8],
529 ) -> std::task::Poll<Result<usize, std::io::Error>> {
530 let this = self.project();
531 let res = this.inner.poll_write(cx, buf);
532 if let std::task::Poll::Ready(Ok(written)) = res {
533 *this.written = this.written.saturating_add(usize_to_u64(written));
534
535 let progress = std::cmp::min(1000 * *this.written / *this.file_size, 999) as u16;
536 if progress > *this.last_progress {
537 this.context.emit_event(EventType::ImexProgress(progress));
538 *this.last_progress = progress;
539 }
540 }
541 res
542 }
543
544 fn poll_flush(
545 self: Pin<&mut Self>,
546 cx: &mut std::task::Context<'_>,
547 ) -> std::task::Poll<Result<(), std::io::Error>> {
548 self.project().inner.poll_flush(cx)
549 }
550
551 fn poll_shutdown(
552 self: Pin<&mut Self>,
553 cx: &mut std::task::Context<'_>,
554 ) -> std::task::Poll<Result<(), std::io::Error>> {
555 self.project().inner.poll_shutdown(cx)
556 }
557}
558
559pub(crate) async fn export_backup_stream<'a, W>(
561 context: &'a Context,
562 temp_db_path: &Path,
563 blobdir: BlobDirContents<'a>,
564 writer: W,
565 file_size: u64,
566) -> Result<()>
567where
568 W: tokio::io::AsyncWrite + tokio::io::AsyncWriteExt + Unpin + Send + 'static,
569{
570 let writer = ProgressWriter::new(writer, context.clone(), file_size);
571 let mut builder = tokio_tar::Builder::new(writer);
572
573 builder
574 .append_path_with_name(temp_db_path, DBFILE_BACKUP_NAME)
575 .await?;
576
577 for blob in blobdir.iter() {
578 let mut file = File::open(blob.to_abs_path()).await?;
579 let path_in_archive = PathBuf::from(BLOBS_BACKUP_NAME).join(blob.as_name());
580 builder.append_file(path_in_archive, &mut file).await?;
581 }
582
583 builder.finish().await?;
584 Ok(())
585}
586
587async fn import_secret_key(context: &Context, path: &Path) -> Result<()> {
589 let buf = read_file(context, path).await?;
590 let armored = std::string::String::from_utf8_lossy(&buf);
591 set_self_key(context, &armored).await?;
592 Ok(())
593}
594
595async fn import_self_keys(context: &Context, path: &Path) -> Result<()> {
605 let attr = tokio::fs::metadata(path).await?;
606
607 if attr.is_file() {
608 info!(
609 context,
610 "Importing secret key from {} as the default key.",
611 path.display()
612 );
613 import_secret_key(context, path).await?;
614 return Ok(());
615 }
616
617 let mut imported_cnt = 0;
618
619 let mut dir_handle = tokio::fs::read_dir(&path).await?;
620 while let Ok(Some(entry)) = dir_handle.next_entry().await {
621 let entry_fn = entry.file_name();
622 let name_f = entry_fn.to_string_lossy();
623 let path_plus_name = path.join(&entry_fn);
624 if let Some(suffix) = get_filesuffix_lc(&name_f) {
625 if suffix != "asc" {
626 continue;
627 }
628 } else {
629 continue;
630 };
631 info!(
632 context,
633 "Considering key file: {}.",
634 path_plus_name.display()
635 );
636
637 if let Err(err) = import_secret_key(context, &path_plus_name).await {
638 warn!(
639 context,
640 "Failed to import secret key from {}: {:#}.",
641 path_plus_name.display(),
642 err
643 );
644 continue;
645 }
646
647 imported_cnt += 1;
648 }
649 ensure!(
650 imported_cnt > 0,
651 "No private keys found in {}.",
652 path.display()
653 );
654 Ok(())
655}
656
657async fn export_self_keys(context: &Context, dir: &Path) -> Result<()> {
658 let mut export_errors = 0;
659
660 let keys = context
661 .sql
662 .query_map_vec(
663 "SELECT id, public_key, private_key, id=(SELECT value FROM config WHERE keyname='key_id') FROM keypairs;",
664 (),
665 |row| {
666 let id = row.get(0)?;
667 let public_key_blob: Vec<u8> = row.get(1)?;
668 let public_key = SignedPublicKey::from_slice(&public_key_blob);
669 let private_key_blob: Vec<u8> = row.get(2)?;
670 let private_key = SignedSecretKey::from_slice(&private_key_blob);
671 let is_default: i32 = row.get(3)?;
672
673 Ok((id, public_key, private_key, is_default))
674 },
675 )
676 .await?;
677 let self_addr = context.get_primary_self_addr().await?;
678 for (id, public_key, private_key, is_default) in keys {
679 let id = Some(id).filter(|_| is_default == 0);
680
681 if let Ok(key) = public_key {
682 if let Err(err) = export_key_to_asc_file(context, dir, &self_addr, id, &key).await {
683 error!(context, "Failed to export public key: {:#}.", err);
684 export_errors += 1;
685 }
686 } else {
687 export_errors += 1;
688 }
689 if let Ok(key) = private_key {
690 if let Err(err) = export_key_to_asc_file(context, dir, &self_addr, id, &key).await {
691 error!(context, "Failed to export private key: {:#}.", err);
692 export_errors += 1;
693 }
694 } else {
695 export_errors += 1;
696 }
697 }
698
699 ensure!(export_errors == 0, "errors while exporting keys");
700 Ok(())
701}
702
703async fn export_key_to_asc_file<T>(
705 context: &Context,
706 dir: &Path,
707 addr: &str,
708 id: Option<i64>,
709 key: &T,
710) -> Result<String>
711where
712 T: DcKey,
713{
714 let file_name = {
715 let kind = match T::is_private() {
716 false => "public",
717 true => "private",
718 };
719 let id = id.map_or("default".into(), |i| i.to_string());
720 let fp = key.dc_fingerprint().hex();
721 format!("{kind}-key-{addr}-{id}-{fp}.asc")
722 };
723 let path = dir.join(&file_name);
724 info!(
725 context,
726 "Exporting key {:?} to {}.",
727 key.key_id(),
728 path.display()
729 );
730
731 delete_file(context, &path).await.ok();
733
734 let content = key.to_asc(None).into_bytes();
735 write_file(context, &path, &content)
736 .await
737 .with_context(|| format!("cannot write key to {}", path.display()))?;
738 context.emit_event(EventType::ImexFileWritten(path));
739 Ok(file_name)
740}
741
742async fn export_database(
749 context: &Context,
750 dest: &Path,
751 passphrase: String,
752 timestamp: i64,
753) -> Result<()> {
754 ensure!(
755 !context.scheduler.is_running().await,
756 "cannot export backup, IO is running"
757 );
758 let timestamp = timestamp.try_into().context("32-bit UNIX time overflow")?;
759
760 let dest = dest
762 .to_str()
763 .with_context(|| format!("path {} is not valid unicode", dest.display()))?;
764
765 context.set_config(Config::BccSelf, Some("1")).await?;
766 context
767 .sql
768 .set_raw_config_int("backup_time", timestamp)
769 .await?;
770 context
771 .sql
772 .set_raw_config_int("backup_version", DCBACKUP_VERSION)
773 .await?;
774 sql::housekeeping(context).await.log_err(context).ok();
775 context
776 .sql
777 .call_write(|conn| {
778 conn.execute("VACUUM;", ())
779 .map_err(|err| warn!(context, "Vacuum failed, exporting anyway {err}"))
780 .ok();
781 conn.execute("ATTACH DATABASE ? AS backup KEY ?", (dest, passphrase))
782 .context("failed to attach backup database")?;
783 let res = conn
784 .query_row("SELECT sqlcipher_export('backup')", [], |_row| Ok(()))
785 .context("failed to export to attached backup database");
786 conn.execute(
787 "UPDATE backup.config SET value='0' WHERE keyname='verified_one_on_one_chats';",
788 [],
789 )
790 .ok(); conn.execute("DETACH DATABASE backup", [])
792 .context("failed to detach backup database")?;
793 res?;
794 Ok(())
795 })
796 .await
797}
798
799async fn check_backup_version(context: &Context) -> Result<()> {
800 let version = (context.sql.get_raw_config_int("backup_version").await?).unwrap_or(2);
801 ensure!(
802 version <= DCBACKUP_VERSION,
803 "Backup too new, please update Delta Chat"
804 );
805 Ok(())
806}
807
808#[cfg(test)]
809mod tests {
810 use std::time::Duration;
811
812 use tokio::task;
813
814 use super::*;
815 use crate::config::Config;
816 use crate::test_utils::{TestContext, alice_keypair};
817
818 #[tokio::test(flavor = "multi_thread", worker_threads = 2)]
819 async fn test_export_public_key_to_asc_file() {
820 let context = TestContext::new().await;
821 let key = alice_keypair().public;
822 let blobdir = Path::new("$BLOBDIR");
823 let filename = export_key_to_asc_file(&context.ctx, blobdir, "a@b", None, &key)
824 .await
825 .unwrap();
826 assert!(filename.starts_with("public-key-a@b-default-"));
827 assert!(filename.ends_with(".asc"));
828 let blobdir = context.ctx.get_blobdir().to_str().unwrap();
829 let filename = format!("{blobdir}/{filename}");
830 let bytes = tokio::fs::read(&filename).await.unwrap();
831
832 assert_eq!(bytes, key.to_asc(None).into_bytes());
833 }
834
835 #[tokio::test(flavor = "multi_thread", worker_threads = 2)]
836 async fn test_import_private_key_exported_to_asc_file() {
837 let context = TestContext::new().await;
838 let key = alice_keypair().secret;
839 let blobdir = Path::new("$BLOBDIR");
840 let filename = export_key_to_asc_file(&context.ctx, blobdir, "a@b", None, &key)
841 .await
842 .unwrap();
843 let fingerprint = filename
844 .strip_prefix("private-key-a@b-default-")
845 .unwrap()
846 .strip_suffix(".asc")
847 .unwrap();
848 assert_eq!(fingerprint, key.dc_fingerprint().hex());
849 let blobdir = context.ctx.get_blobdir().to_str().unwrap();
850 let filename = format!("{blobdir}/{filename}");
851 let bytes = tokio::fs::read(&filename).await.unwrap();
852
853 assert_eq!(bytes, key.to_asc(None).into_bytes());
854
855 let alice = &TestContext::new().await;
856 if let Err(err) = imex(alice, ImexMode::ImportSelfKeys, Path::new(&filename), None).await {
857 panic!("got error on import: {err:#}");
858 }
859 }
860
861 #[tokio::test(flavor = "multi_thread", worker_threads = 2)]
862 async fn test_export_and_import_key_from_dir() {
863 let export_dir = tempfile::tempdir().unwrap();
864
865 let context = TestContext::new_alice().await;
866 if let Err(err) = imex(
867 &context.ctx,
868 ImexMode::ExportSelfKeys,
869 export_dir.path(),
870 None,
871 )
872 .await
873 {
874 panic!("got error on export: {err:#}");
875 }
876
877 let context2 = TestContext::new().await;
878 if let Err(err) = imex(
879 &context2.ctx,
880 ImexMode::ImportSelfKeys,
881 export_dir.path(),
882 None,
883 )
884 .await
885 {
886 panic!("got error on import: {err:#}");
887 }
888 }
889
890 #[tokio::test(flavor = "multi_thread", worker_threads = 2)]
891 async fn test_import_second_key() -> Result<()> {
892 let alice = &TestContext::new_alice().await;
893 let chat = alice.create_chat(alice).await;
894 let sent = alice.send_text(chat.id, "Encrypted with old key").await;
895 let export_dir = tempfile::tempdir().unwrap();
896
897 let alice = &TestContext::new().await;
898 alice.configure_addr("alice@example.org").await;
899 imex(alice, ImexMode::ExportSelfKeys, export_dir.path(), None).await?;
900
901 let alice = &TestContext::new_alice().await;
902 let old_key = key::load_self_secret_key(alice).await?;
903
904 assert!(
905 imex(alice, ImexMode::ImportSelfKeys, export_dir.path(), None)
906 .await
907 .is_err()
908 );
909
910 assert_eq!(key::load_self_secret_key(alice).await?, old_key);
913
914 assert_eq!(key::load_self_secret_keyring(alice).await?, vec![old_key]);
915
916 let msg = alice.recv_msg(&sent).await;
917 assert!(msg.get_showpadlock());
918 assert_eq!(msg.chat_id, alice.get_self_chat().await.id);
919 assert_eq!(msg.get_text(), "Encrypted with old key");
920
921 Ok(())
922 }
923
924 #[tokio::test(flavor = "multi_thread", worker_threads = 2)]
925 async fn test_export_and_import_backup() -> Result<()> {
926 let backup_dir = tempfile::tempdir().unwrap();
927
928 let context1 = TestContext::new_alice().await;
929 assert!(context1.is_configured().await?);
930
931 let context2 = TestContext::new().await;
932 assert!(!context2.is_configured().await?);
933 assert!(has_backup(&context2, backup_dir.path()).await.is_err());
934
935 assert!(
937 imex(&context1, ImexMode::ExportBackup, backup_dir.path(), None)
938 .await
939 .is_ok()
940 );
941 let _event = context1
942 .evtracker
943 .get_matching(|evt| matches!(evt, EventType::ImexProgress(1000)))
944 .await;
945
946 let backup = has_backup(&context2, backup_dir.path()).await?;
948
949 assert!(
951 imex(
952 &context2,
953 ImexMode::ImportBackup,
954 backup.as_ref(),
955 Some("foobar".to_string())
956 )
957 .await
958 .is_err()
959 );
960
961 assert!(
962 imex(&context2, ImexMode::ImportBackup, backup.as_ref(), None)
963 .await
964 .is_ok()
965 );
966 let _event = context2
967 .evtracker
968 .get_matching(|evt| matches!(evt, EventType::ImexProgress(1000)))
969 .await;
970
971 assert!(context2.is_configured().await?);
972 assert_eq!(
973 context2.get_config(Config::Addr).await?,
974 Some("alice@example.org".to_string())
975 );
976 Ok(())
977 }
978
979 #[tokio::test(flavor = "multi_thread", worker_threads = 2)]
980 async fn test_export_import_chatmail_backup() -> Result<()> {
981 let backup_dir = tempfile::tempdir().unwrap();
982
983 let context1 = &TestContext::new_alice().await;
984
985 context1.set_config(Config::BccSelf, None).await?;
987
988 assert_eq!(
990 context1.get_config(Config::DeleteServerAfter).await?,
991 Some("0".to_string())
992 );
993 context1.set_config_bool(Config::IsChatmail, true).await?;
994 assert_eq!(
995 context1.get_config(Config::BccSelf).await?,
996 Some("0".to_string())
997 );
998 assert_eq!(
999 context1.get_config(Config::DeleteServerAfter).await?,
1000 Some("1".to_string())
1001 );
1002
1003 assert_eq!(context1.get_config_delete_server_after().await?, Some(0));
1004 imex(context1, ImexMode::ExportBackup, backup_dir.path(), None).await?;
1005 let _event = context1
1006 .evtracker
1007 .get_matching(|evt| matches!(evt, EventType::ImexProgress(1000)))
1008 .await;
1009
1010 let context2 = &TestContext::new().await;
1011 let backup = has_backup(context2, backup_dir.path()).await?;
1012 imex(context2, ImexMode::ImportBackup, backup.as_ref(), None).await?;
1013 let _event = context2
1014 .evtracker
1015 .get_matching(|evt| matches!(evt, EventType::ImexProgress(1000)))
1016 .await;
1017 assert!(context2.is_configured().await?);
1018 assert!(context2.is_chatmail().await?);
1019 for ctx in [context1, context2] {
1020 assert_eq!(
1021 ctx.get_config(Config::BccSelf).await?,
1022 Some("1".to_string())
1023 );
1024 assert_eq!(
1025 ctx.get_config(Config::DeleteServerAfter).await?,
1026 Some("0".to_string())
1027 );
1028 assert_eq!(ctx.get_config_delete_server_after().await?, None);
1029 }
1030 Ok(())
1031 }
1032
1033 #[tokio::test(flavor = "multi_thread", worker_threads = 2)]
1037 async fn test_import_backup_reset_config_cache() -> Result<()> {
1038 let backup_dir = tempfile::tempdir()?;
1039 let context1 = TestContext::new_alice().await;
1040 let context2 = TestContext::new().await;
1041 assert!(!context2.is_configured().await?);
1042
1043 imex(&context1, ImexMode::ExportBackup, backup_dir.path(), None).await?;
1045
1046 let backup = has_backup(&context2, backup_dir.path()).await?;
1048 let context2_cloned = context2.clone();
1049 let handle = task::spawn(async move {
1050 imex(
1051 &context2_cloned,
1052 ImexMode::ImportBackup,
1053 backup.as_ref(),
1054 None,
1055 )
1056 .await
1057 .unwrap();
1058 });
1059
1060 while !handle.is_finished() {
1061 context2.is_configured().await.ok();
1064 tokio::time::sleep(Duration::from_micros(1)).await;
1065 }
1066
1067 assert!(context2.is_configured().await?);
1069
1070 Ok(())
1071 }
1072}