deltachat/
imex.rs

1//! # Import/export module.
2
3use std::ffi::OsStr;
4use std::path::{Path, PathBuf};
5use std::pin::Pin;
6
7use anyhow::{bail, ensure, format_err, Context as _, Result};
8use futures::TryStreamExt;
9use futures_lite::FutureExt;
10use pin_project::pin_project;
11
12use tokio::fs::{self, File};
13use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
14use tokio_tar::Archive;
15
16use crate::blob::BlobDirContents;
17use crate::chat::delete_and_reset_all_device_msgs;
18use crate::config::Config;
19use crate::context::Context;
20use crate::e2ee;
21use crate::events::EventType;
22use crate::key::{self, DcKey, DcSecretKey, SignedPublicKey, SignedSecretKey};
23use crate::log::LogExt;
24use crate::pgp;
25use crate::qr::DCBACKUP_VERSION;
26use crate::sql;
27use crate::tools::{
28    create_folder, delete_file, get_filesuffix_lc, read_file, time, write_file, TempPathGuard,
29};
30
31mod key_transfer;
32mod transfer;
33
34use ::pgp::types::KeyDetails;
35pub use key_transfer::{continue_key_transfer, initiate_key_transfer};
36pub use transfer::{get_backup, BackupProvider};
37
38// Name of the database file in the backup.
39const DBFILE_BACKUP_NAME: &str = "dc_database_backup.sqlite";
40pub(crate) const BLOBS_BACKUP_NAME: &str = "blobs_backup";
41
42/// Import/export command.
43#[derive(Debug, Display, Copy, Clone, PartialEq, Eq, FromPrimitive, ToPrimitive)]
44#[repr(u32)]
45pub enum ImexMode {
46    /// Export all private keys and all public keys of the user to the
47    /// directory given as `path`. The default key is written to the files
48    /// `{public,private}-key-<addr>-default-<fingerprint>.asc`, if there are more keys, they are
49    /// written to files as `{public,private}-key-<addr>-<id>-<fingerprint>.asc`.
50    ExportSelfKeys = 1,
51
52    /// Import private keys found in `path` if it is a directory, otherwise import a private key
53    /// from `path`.
54    /// The last imported key is made the default keys unless its name contains the string `legacy`.
55    /// Public keys are not imported.
56    ImportSelfKeys = 2,
57
58    /// Export a backup to the directory given as `path` with the given `passphrase`.
59    /// The backup contains all contacts, chats, images and other data and device independent settings.
60    /// The backup does not contain device dependent settings as ringtones or LED notification settings.
61    /// The name of the backup is `delta-chat-backup-<day>-<number>-<addr>.tar`.
62    ExportBackup = 11,
63
64    /// `path` is the file (not: directory) to import. The file is normally
65    /// created by DC_IMEX_EXPORT_BACKUP and detected by imex_has_backup(). Importing a backup
66    /// is only possible as long as the context is not configured or used in another way.
67    ImportBackup = 12,
68}
69
70/// Import/export things.
71///
72/// What to do is defined by the `what` parameter.
73///
74/// During execution of the job,
75/// some events are sent out:
76///
77/// - A number of `DC_EVENT_IMEX_PROGRESS` events are sent and may be used to create
78///   a progress bar or stuff like that. Moreover, you'll be informed when the imex-job is done.
79///
80/// - For each file written on export, the function sends `DC_EVENT_IMEX_FILE_WRITTEN`
81///
82/// Only one import-/export-progress can run at the same time.
83/// To cancel an import-/export-progress, drop the future returned by this function.
84pub async fn imex(
85    context: &Context,
86    what: ImexMode,
87    path: &Path,
88    passphrase: Option<String>,
89) -> Result<()> {
90    let cancel = context.alloc_ongoing().await?;
91
92    let res = {
93        let _guard = context.scheduler.pause(context.clone()).await?;
94        imex_inner(context, what, path, passphrase)
95            .race(async {
96                cancel.recv().await.ok();
97                Err(format_err!("canceled"))
98            })
99            .await
100    };
101    context.free_ongoing().await;
102
103    if let Err(err) = res.as_ref() {
104        // We are using Anyhow's .context() and to show the inner error, too, we need the {:#}:
105        error!(context, "IMEX failed to complete: {:#}", err);
106        context.emit_event(EventType::ImexProgress(0));
107    } else {
108        info!(context, "IMEX successfully completed");
109        context.emit_event(EventType::ImexProgress(1000));
110    }
111
112    res
113}
114
115/// Returns the filename of the backup found (otherwise an error)
116pub async fn has_backup(_context: &Context, dir_name: &Path) -> Result<String> {
117    let mut dir_iter = tokio::fs::read_dir(dir_name).await?;
118    let mut newest_backup_name = "".to_string();
119    let mut newest_backup_path: Option<PathBuf> = None;
120
121    while let Ok(Some(dirent)) = dir_iter.next_entry().await {
122        let path = dirent.path();
123        let name = dirent.file_name();
124        let name: String = name.to_string_lossy().into();
125        if name.starts_with("delta-chat")
126            && name.ends_with(".tar")
127            && (newest_backup_name.is_empty() || name > newest_backup_name)
128        {
129            // We just use string comparison to determine which backup is newer.
130            // This works fine because the filenames have the form `delta-chat-backup-2023-10-18-00-foo@example.com.tar`
131            newest_backup_path = Some(path);
132            newest_backup_name = name;
133        }
134    }
135
136    match newest_backup_path {
137        Some(path) => Ok(path.to_string_lossy().into_owned()),
138        None => bail!("no backup found in {}", dir_name.display()),
139    }
140}
141
142async fn set_self_key(context: &Context, armored: &str) -> Result<()> {
143    // try hard to only modify key-state
144    let (private_key, header) = SignedSecretKey::from_asc(armored)?;
145    let public_key = private_key.split_public_key()?;
146    if let Some(preferencrypt) = header.get("Autocrypt-Prefer-Encrypt") {
147        let e2ee_enabled = match preferencrypt.as_str() {
148            "nopreference" => 0,
149            "mutual" => 1,
150            _ => {
151                bail!("invalid Autocrypt-Prefer-Encrypt header: {:?}", header);
152            }
153        };
154        context
155            .sql
156            .set_raw_config_int("e2ee_enabled", e2ee_enabled)
157            .await?;
158    } else {
159        // `Autocrypt-Prefer-Encrypt` is not included
160        // in keys exported to file.
161        //
162        // `Autocrypt-Prefer-Encrypt` also SHOULD be sent
163        // in Autocrypt Setup Message according to Autocrypt specification,
164        // but K-9 6.802 does not include this header.
165        //
166        // We keep current setting in this case.
167        info!(context, "No Autocrypt-Prefer-Encrypt header.");
168    };
169
170    let keypair = pgp::KeyPair {
171        public: public_key,
172        secret: private_key,
173    };
174    key::store_self_keypair(context, &keypair).await?;
175
176    info!(
177        context,
178        "stored self key: {:?}",
179        keypair.secret.public_key().key_id()
180    );
181    Ok(())
182}
183
184async fn imex_inner(
185    context: &Context,
186    what: ImexMode,
187    path: &Path,
188    passphrase: Option<String>,
189) -> Result<()> {
190    info!(
191        context,
192        "{} path: {}",
193        match what {
194            ImexMode::ExportSelfKeys | ImexMode::ExportBackup => "Export",
195            ImexMode::ImportSelfKeys | ImexMode::ImportBackup => "Import",
196        },
197        path.display()
198    );
199    ensure!(context.sql.is_open().await, "Database not opened.");
200    context.emit_event(EventType::ImexProgress(1));
201
202    if what == ImexMode::ExportBackup || what == ImexMode::ExportSelfKeys {
203        // before we export anything, make sure the private key exists
204        e2ee::ensure_secret_key_exists(context)
205            .await
206            .context("Cannot create private key or private key not available")?;
207
208        create_folder(context, path).await?;
209    }
210
211    match what {
212        ImexMode::ExportSelfKeys => export_self_keys(context, path).await,
213        ImexMode::ImportSelfKeys => import_self_keys(context, path).await,
214
215        ImexMode::ExportBackup => {
216            export_backup(context, path, passphrase.unwrap_or_default()).await
217        }
218        ImexMode::ImportBackup => {
219            import_backup(context, path, passphrase.unwrap_or_default()).await
220        }
221    }
222}
223
224/// Imports backup into the currently open database.
225///
226/// The contents of the currently open database will be lost.
227///
228/// `passphrase` is the passphrase used to open backup database. If backup is unencrypted, pass
229/// empty string here.
230async fn import_backup(
231    context: &Context,
232    backup_to_import: &Path,
233    passphrase: String,
234) -> Result<()> {
235    ensure!(
236        !context.is_configured().await?,
237        "Cannot import backups to accounts in use."
238    );
239    ensure!(
240        !context.scheduler.is_running().await,
241        "cannot import backup, IO is running"
242    );
243
244    let backup_file = File::open(backup_to_import).await?;
245    let file_size = backup_file.metadata().await?.len();
246    info!(
247        context,
248        "Import \"{}\" ({} bytes) to \"{}\".",
249        backup_to_import.display(),
250        file_size,
251        context.get_dbfile().display()
252    );
253
254    import_backup_stream(context, backup_file, file_size, passphrase).await?;
255    Ok(())
256}
257
258/// Imports backup by reading a tar file from a stream.
259///
260/// `file_size` is used to calculate the progress
261/// and emit progress events.
262/// Ideally it is the sum of the entry
263/// sizes without the header overhead,
264/// but can be estimated as tar file size
265/// in which case the progress is underestimated
266/// and may not reach 99.9% by the end of import.
267/// Underestimating is better than
268/// overestimating because the progress
269/// jumps to 100% instead of getting stuck at 99.9%
270/// for some time.
271pub(crate) async fn import_backup_stream<R: tokio::io::AsyncRead + Unpin>(
272    context: &Context,
273    backup_file: R,
274    file_size: u64,
275    passphrase: String,
276) -> Result<()> {
277    import_backup_stream_inner(context, backup_file, file_size, passphrase)
278        .await
279        .0
280}
281
282/// Reader that emits progress events as bytes are read from it.
283#[pin_project]
284struct ProgressReader<R> {
285    /// Wrapped reader.
286    #[pin]
287    inner: R,
288
289    /// Number of bytes successfully read from the internal reader.
290    read: usize,
291
292    /// Total size of the backup .tar file expected to be read from the reader.
293    /// Used to calculate the progress.
294    file_size: usize,
295
296    /// Last progress emitted to avoid emitting the same progress value twice.
297    last_progress: usize,
298
299    /// Context for emitting progress events.
300    context: Context,
301}
302
303impl<R> ProgressReader<R> {
304    fn new(r: R, context: Context, file_size: u64) -> Self {
305        Self {
306            inner: r,
307            read: 0,
308            file_size: file_size as usize,
309            last_progress: 1,
310            context,
311        }
312    }
313}
314
315impl<R> AsyncRead for ProgressReader<R>
316where
317    R: AsyncRead,
318{
319    fn poll_read(
320        self: Pin<&mut Self>,
321        cx: &mut std::task::Context<'_>,
322        buf: &mut ReadBuf<'_>,
323    ) -> std::task::Poll<std::io::Result<()>> {
324        let this = self.project();
325        let before = buf.filled().len();
326        let res = this.inner.poll_read(cx, buf);
327        if let std::task::Poll::Ready(Ok(())) = res {
328            *this.read = this.read.saturating_add(buf.filled().len() - before);
329
330            let progress = std::cmp::min(1000 * *this.read / *this.file_size, 999);
331            if progress > *this.last_progress {
332                this.context.emit_event(EventType::ImexProgress(progress));
333                *this.last_progress = progress;
334            }
335        }
336        res
337    }
338}
339
340async fn import_backup_stream_inner<R: tokio::io::AsyncRead + Unpin>(
341    context: &Context,
342    backup_file: R,
343    file_size: u64,
344    passphrase: String,
345) -> (Result<()>,) {
346    let backup_file = ProgressReader::new(backup_file, context.clone(), file_size);
347    let mut archive = Archive::new(backup_file);
348
349    let mut entries = match archive.entries() {
350        Ok(entries) => entries,
351        Err(e) => return (Err(e).context("Failed to get archive entries"),),
352    };
353    let mut blobs = Vec::new();
354    let mut res: Result<()> = loop {
355        let mut f = match entries.try_next().await {
356            Ok(Some(f)) => f,
357            Ok(None) => break Ok(()),
358            Err(e) => break Err(e).context("Failed to get next entry"),
359        };
360
361        let path = match f.path() {
362            Ok(path) => path.to_path_buf(),
363            Err(e) => break Err(e).context("Failed to get entry path"),
364        };
365        if let Err(e) = f.unpack_in(context.get_blobdir()).await {
366            break Err(e).context("Failed to unpack file");
367        }
368        if path.file_name() == Some(OsStr::new(DBFILE_BACKUP_NAME)) {
369            continue;
370        }
371        // async_tar unpacked to $BLOBDIR/BLOBS_BACKUP_NAME/, so we move the file afterwards.
372        let from_path = context.get_blobdir().join(&path);
373        if from_path.is_file() {
374            if let Some(name) = from_path.file_name() {
375                let to_path = context.get_blobdir().join(name);
376                if let Err(e) = fs::rename(&from_path, &to_path).await {
377                    blobs.push(from_path);
378                    break Err(e).context("Failed to move file to blobdir");
379                }
380                blobs.push(to_path);
381            } else {
382                warn!(context, "No file name");
383            }
384        }
385    };
386    if res.is_err() {
387        for blob in blobs {
388            fs::remove_file(&blob).await.log_err(context).ok();
389        }
390    }
391
392    let unpacked_database = context.get_blobdir().join(DBFILE_BACKUP_NAME);
393    if res.is_ok() {
394        res = context
395            .sql
396            .import(&unpacked_database, passphrase.clone())
397            .await
398            .context("cannot import unpacked database");
399    }
400    if res.is_ok() {
401        res = check_backup_version(context).await;
402    }
403    if res.is_ok() {
404        res = adjust_bcc_self(context).await;
405    }
406    fs::remove_file(unpacked_database)
407        .await
408        .context("cannot remove unpacked database")
409        .log_err(context)
410        .ok();
411    if res.is_ok() {
412        context.emit_event(EventType::ImexProgress(999));
413        res = context.sql.run_migrations(context).await;
414        context.emit_event(EventType::AccountsItemChanged);
415    }
416    if res.is_ok() {
417        delete_and_reset_all_device_msgs(context)
418            .await
419            .log_err(context)
420            .ok();
421    }
422    (res,)
423}
424
425/*******************************************************************************
426 * Export backup
427 ******************************************************************************/
428
429/// Returns Ok((temp_db_path, temp_path, dest_path)) on success. Unencrypted database can be
430/// written to temp_db_path. The backup can then be written to temp_path. If the backup succeeded,
431/// it can be renamed to dest_path. This guarantees that the backup is complete.
432fn get_next_backup_path(
433    folder: &Path,
434    addr: &str,
435    backup_time: i64,
436) -> Result<(PathBuf, PathBuf, PathBuf)> {
437    let folder = PathBuf::from(folder);
438    let stem = chrono::DateTime::<chrono::Utc>::from_timestamp(backup_time, 0)
439        .context("can't get next backup path")?
440        // Don't change this file name format, in `dc_imex_has_backup` we use string comparison to determine which backup is newer:
441        .format("delta-chat-backup-%Y-%m-%d")
442        .to_string();
443
444    // 64 backup files per day should be enough for everyone
445    for i in 0..64 {
446        let mut tempdbfile = folder.clone();
447        tempdbfile.push(format!("{stem}-{i:02}-{addr}.db"));
448
449        let mut tempfile = folder.clone();
450        tempfile.push(format!("{stem}-{i:02}-{addr}.tar.part"));
451
452        let mut destfile = folder.clone();
453        destfile.push(format!("{stem}-{i:02}-{addr}.tar"));
454
455        if !tempdbfile.exists() && !tempfile.exists() && !destfile.exists() {
456            return Ok((tempdbfile, tempfile, destfile));
457        }
458    }
459    bail!("could not create backup file, disk full?");
460}
461
462/// Exports the database to a separate file with the given passphrase.
463///
464/// Set passphrase to empty string to export the database unencrypted.
465async fn export_backup(context: &Context, dir: &Path, passphrase: String) -> Result<()> {
466    // get a fine backup file name (the name includes the date so that multiple backup instances are possible)
467    let now = time();
468    let self_addr = context.get_primary_self_addr().await?;
469    let (temp_db_path, temp_path, dest_path) = get_next_backup_path(dir, &self_addr, now)?;
470    let temp_db_path = TempPathGuard::new(temp_db_path);
471    let temp_path = TempPathGuard::new(temp_path);
472
473    export_database(context, &temp_db_path, passphrase, now)
474        .await
475        .context("could not export database")?;
476
477    info!(
478        context,
479        "Backup '{}' to '{}'.",
480        context.get_dbfile().display(),
481        dest_path.display(),
482    );
483
484    let file = File::create(&temp_path).await?;
485    let blobdir = BlobDirContents::new(context).await?;
486
487    let mut file_size = 0;
488    file_size += temp_db_path.metadata()?.len();
489    for blob in blobdir.iter() {
490        file_size += blob.to_abs_path().metadata()?.len()
491    }
492
493    export_backup_stream(context, &temp_db_path, blobdir, file, file_size)
494        .await
495        .context("Exporting backup to file failed")?;
496    fs::rename(temp_path, &dest_path).await?;
497    context.emit_event(EventType::ImexFileWritten(dest_path));
498    Ok(())
499}
500
501/// Writer that emits progress events as bytes are written into it.
502#[pin_project]
503struct ProgressWriter<W> {
504    /// Wrapped writer.
505    #[pin]
506    inner: W,
507
508    /// Number of bytes successfully written into the internal writer.
509    written: usize,
510
511    /// Total size of the backup .tar file expected to be written into the writer.
512    /// Used to calculate the progress.
513    file_size: usize,
514
515    /// Last progress emitted to avoid emitting the same progress value twice.
516    last_progress: usize,
517
518    /// Context for emitting progress events.
519    context: Context,
520}
521
522impl<W> ProgressWriter<W> {
523    fn new(w: W, context: Context, file_size: u64) -> Self {
524        Self {
525            inner: w,
526            written: 0,
527            file_size: file_size as usize,
528            last_progress: 1,
529            context,
530        }
531    }
532}
533
534impl<W> AsyncWrite for ProgressWriter<W>
535where
536    W: AsyncWrite,
537{
538    fn poll_write(
539        self: Pin<&mut Self>,
540        cx: &mut std::task::Context<'_>,
541        buf: &[u8],
542    ) -> std::task::Poll<Result<usize, std::io::Error>> {
543        let this = self.project();
544        let res = this.inner.poll_write(cx, buf);
545        if let std::task::Poll::Ready(Ok(written)) = res {
546            *this.written = this.written.saturating_add(written);
547
548            let progress = std::cmp::min(1000 * *this.written / *this.file_size, 999);
549            if progress > *this.last_progress {
550                this.context.emit_event(EventType::ImexProgress(progress));
551                *this.last_progress = progress;
552            }
553        }
554        res
555    }
556
557    fn poll_flush(
558        self: Pin<&mut Self>,
559        cx: &mut std::task::Context<'_>,
560    ) -> std::task::Poll<Result<(), std::io::Error>> {
561        self.project().inner.poll_flush(cx)
562    }
563
564    fn poll_shutdown(
565        self: Pin<&mut Self>,
566        cx: &mut std::task::Context<'_>,
567    ) -> std::task::Poll<Result<(), std::io::Error>> {
568        self.project().inner.poll_shutdown(cx)
569    }
570}
571
572/// Exports the database and blobs into a stream.
573pub(crate) async fn export_backup_stream<'a, W>(
574    context: &'a Context,
575    temp_db_path: &Path,
576    blobdir: BlobDirContents<'a>,
577    writer: W,
578    file_size: u64,
579) -> Result<()>
580where
581    W: tokio::io::AsyncWrite + tokio::io::AsyncWriteExt + Unpin + Send + 'static,
582{
583    let writer = ProgressWriter::new(writer, context.clone(), file_size);
584    let mut builder = tokio_tar::Builder::new(writer);
585
586    builder
587        .append_path_with_name(temp_db_path, DBFILE_BACKUP_NAME)
588        .await?;
589
590    for blob in blobdir.iter() {
591        let mut file = File::open(blob.to_abs_path()).await?;
592        let path_in_archive = PathBuf::from(BLOBS_BACKUP_NAME).join(blob.as_name());
593        builder.append_file(path_in_archive, &mut file).await?;
594    }
595
596    builder.finish().await?;
597    Ok(())
598}
599
600/// Imports secret key from a file.
601async fn import_secret_key(context: &Context, path: &Path) -> Result<()> {
602    let buf = read_file(context, path).await?;
603    let armored = std::string::String::from_utf8_lossy(&buf);
604    set_self_key(context, &armored).await?;
605    Ok(())
606}
607
608/// Imports secret keys from the provided file or directory.
609///
610/// If provided path is a file, ASCII-armored secret key is read from the file
611/// and set as the default key.
612///
613/// If provided path is a directory, all files with .asc extension
614/// containing secret keys are imported and the last successfully
615/// imported which does not contain "legacy" in its filename
616/// is set as the default.
617async fn import_self_keys(context: &Context, path: &Path) -> Result<()> {
618    let attr = tokio::fs::metadata(path).await?;
619
620    if attr.is_file() {
621        info!(
622            context,
623            "Importing secret key from {} as the default key.",
624            path.display()
625        );
626        import_secret_key(context, path).await?;
627        return Ok(());
628    }
629
630    let mut imported_cnt = 0;
631
632    let mut dir_handle = tokio::fs::read_dir(&path).await?;
633    while let Ok(Some(entry)) = dir_handle.next_entry().await {
634        let entry_fn = entry.file_name();
635        let name_f = entry_fn.to_string_lossy();
636        let path_plus_name = path.join(&entry_fn);
637        if let Some(suffix) = get_filesuffix_lc(&name_f) {
638            if suffix != "asc" {
639                continue;
640            }
641        } else {
642            continue;
643        };
644        info!(
645            context,
646            "Considering key file: {}.",
647            path_plus_name.display()
648        );
649
650        if let Err(err) = import_secret_key(context, &path_plus_name).await {
651            warn!(
652                context,
653                "Failed to import secret key from {}: {:#}.",
654                path_plus_name.display(),
655                err
656            );
657            continue;
658        }
659
660        imported_cnt += 1;
661    }
662    ensure!(
663        imported_cnt > 0,
664        "No private keys found in {}.",
665        path.display()
666    );
667    Ok(())
668}
669
670async fn export_self_keys(context: &Context, dir: &Path) -> Result<()> {
671    let mut export_errors = 0;
672
673    let keys = context
674        .sql
675        .query_map(
676            "SELECT id, public_key, private_key, id=(SELECT value FROM config WHERE keyname='key_id') FROM keypairs;",
677            (),
678            |row| {
679                let id = row.get(0)?;
680                let public_key_blob: Vec<u8> = row.get(1)?;
681                let public_key = SignedPublicKey::from_slice(&public_key_blob);
682                let private_key_blob: Vec<u8> = row.get(2)?;
683                let private_key = SignedSecretKey::from_slice(&private_key_blob);
684                let is_default: i32 = row.get(3)?;
685
686                Ok((id, public_key, private_key, is_default))
687            },
688            |keys| {
689                keys.collect::<std::result::Result<Vec<_>, _>>()
690                    .map_err(Into::into)
691            },
692        )
693        .await?;
694    let self_addr = context.get_primary_self_addr().await?;
695    for (id, public_key, private_key, is_default) in keys {
696        let id = Some(id).filter(|_| is_default == 0);
697
698        if let Ok(key) = public_key {
699            if let Err(err) = export_key_to_asc_file(context, dir, &self_addr, id, &key).await {
700                error!(context, "Failed to export public key: {:#}.", err);
701                export_errors += 1;
702            }
703        } else {
704            export_errors += 1;
705        }
706        if let Ok(key) = private_key {
707            if let Err(err) = export_key_to_asc_file(context, dir, &self_addr, id, &key).await {
708                error!(context, "Failed to export private key: {:#}.", err);
709                export_errors += 1;
710            }
711        } else {
712            export_errors += 1;
713        }
714    }
715
716    ensure!(export_errors == 0, "errors while exporting keys");
717    Ok(())
718}
719
720/// Returns the exported key file name inside `dir`.
721async fn export_key_to_asc_file<T>(
722    context: &Context,
723    dir: &Path,
724    addr: &str,
725    id: Option<i64>,
726    key: &T,
727) -> Result<String>
728where
729    T: DcKey,
730{
731    let file_name = {
732        let kind = match T::is_private() {
733            false => "public",
734            true => "private",
735        };
736        let id = id.map_or("default".into(), |i| i.to_string());
737        let fp = key.dc_fingerprint().hex();
738        format!("{kind}-key-{addr}-{id}-{fp}.asc")
739    };
740    let path = dir.join(&file_name);
741    info!(
742        context,
743        "Exporting key {:?} to {}.",
744        key.key_id(),
745        path.display()
746    );
747
748    // Delete the file if it already exists.
749    delete_file(context, &path).await.ok();
750
751    let content = key.to_asc(None).into_bytes();
752    write_file(context, &path, &content)
753        .await
754        .with_context(|| format!("cannot write key to {}", path.display()))?;
755    context.emit_event(EventType::ImexFileWritten(path));
756    Ok(file_name)
757}
758
759/// Exports the database to *dest*, encrypted using *passphrase*.
760///
761/// The directory of *dest* must already exist, if *dest* itself exists it will be
762/// overwritten.
763///
764/// This also verifies that IO is not running during the export.
765async fn export_database(
766    context: &Context,
767    dest: &Path,
768    passphrase: String,
769    timestamp: i64,
770) -> Result<()> {
771    ensure!(
772        !context.scheduler.is_running().await,
773        "cannot export backup, IO is running"
774    );
775    let timestamp = timestamp.try_into().context("32-bit UNIX time overflow")?;
776
777    // TODO: Maybe introduce camino crate for UTF-8 paths where we need them.
778    let dest = dest
779        .to_str()
780        .with_context(|| format!("path {} is not valid unicode", dest.display()))?;
781
782    adjust_bcc_self(context).await?;
783    context
784        .sql
785        .set_raw_config_int("backup_time", timestamp)
786        .await?;
787    context
788        .sql
789        .set_raw_config_int("backup_version", DCBACKUP_VERSION)
790        .await?;
791    sql::housekeeping(context).await.log_err(context).ok();
792    context
793        .sql
794        .call_write(|conn| {
795            conn.execute("VACUUM;", ())
796                .map_err(|err| warn!(context, "Vacuum failed, exporting anyway {err}"))
797                .ok();
798            conn.execute("ATTACH DATABASE ? AS backup KEY ?", (dest, passphrase))
799                .context("failed to attach backup database")?;
800            let res = conn
801                .query_row("SELECT sqlcipher_export('backup')", [], |_row| Ok(()))
802                .context("failed to export to attached backup database");
803            conn.execute(
804                "UPDATE backup.config SET value='0' WHERE keyname='verified_one_on_one_chats';",
805                [],
806            )
807            .ok(); // If verified_one_on_one_chats was not set, this errors, which we ignore
808            conn.execute("DETACH DATABASE backup", [])
809                .context("failed to detach backup database")?;
810            res?;
811            Ok(())
812        })
813        .await
814}
815
816/// Sets `Config::BccSelf` (and `DeleteServerAfter` to "never" in effect) if needed so that new
817/// messages are present on the server after a backup restoration or available for all devices in
818/// multi-device case. NB: Calling this after a backup import isn't reliable as we can crash in
819/// between, but this is a problem only for old backups, new backups already have `BccSelf` set if
820/// necessary.
821async fn adjust_bcc_self(context: &Context) -> Result<()> {
822    if context.is_chatmail().await? && !context.config_exists(Config::BccSelf).await? {
823        context.set_config(Config::BccSelf, Some("1")).await?;
824    }
825    Ok(())
826}
827
828async fn check_backup_version(context: &Context) -> Result<()> {
829    let version = (context.sql.get_raw_config_int("backup_version").await?).unwrap_or(2);
830    ensure!(
831        version <= DCBACKUP_VERSION,
832        "Backup too new, please update Delta Chat"
833    );
834    Ok(())
835}
836
837#[cfg(test)]
838mod tests {
839    use std::time::Duration;
840
841    use tokio::task;
842
843    use super::*;
844    use crate::config::Config;
845    use crate::test_utils::{alice_keypair, TestContext};
846
847    #[tokio::test(flavor = "multi_thread", worker_threads = 2)]
848    async fn test_export_public_key_to_asc_file() {
849        let context = TestContext::new().await;
850        let key = alice_keypair().public;
851        let blobdir = Path::new("$BLOBDIR");
852        let filename = export_key_to_asc_file(&context.ctx, blobdir, "a@b", None, &key)
853            .await
854            .unwrap();
855        assert!(filename.starts_with("public-key-a@b-default-"));
856        assert!(filename.ends_with(".asc"));
857        let blobdir = context.ctx.get_blobdir().to_str().unwrap();
858        let filename = format!("{blobdir}/{filename}");
859        let bytes = tokio::fs::read(&filename).await.unwrap();
860
861        assert_eq!(bytes, key.to_asc(None).into_bytes());
862    }
863
864    #[tokio::test(flavor = "multi_thread", worker_threads = 2)]
865    async fn test_import_private_key_exported_to_asc_file() {
866        let context = TestContext::new().await;
867        let key = alice_keypair().secret;
868        let blobdir = Path::new("$BLOBDIR");
869        let filename = export_key_to_asc_file(&context.ctx, blobdir, "a@b", None, &key)
870            .await
871            .unwrap();
872        let fingerprint = filename
873            .strip_prefix("private-key-a@b-default-")
874            .unwrap()
875            .strip_suffix(".asc")
876            .unwrap();
877        assert_eq!(fingerprint, key.dc_fingerprint().hex());
878        let blobdir = context.ctx.get_blobdir().to_str().unwrap();
879        let filename = format!("{blobdir}/{filename}");
880        let bytes = tokio::fs::read(&filename).await.unwrap();
881
882        assert_eq!(bytes, key.to_asc(None).into_bytes());
883
884        let alice = &TestContext::new().await;
885        if let Err(err) = imex(alice, ImexMode::ImportSelfKeys, Path::new(&filename), None).await {
886            panic!("got error on import: {err:#}");
887        }
888    }
889
890    #[tokio::test(flavor = "multi_thread", worker_threads = 2)]
891    async fn test_export_and_import_key_from_dir() {
892        let export_dir = tempfile::tempdir().unwrap();
893
894        let context = TestContext::new_alice().await;
895        if let Err(err) = imex(
896            &context.ctx,
897            ImexMode::ExportSelfKeys,
898            export_dir.path(),
899            None,
900        )
901        .await
902        {
903            panic!("got error on export: {err:#}");
904        }
905
906        let context2 = TestContext::new().await;
907        if let Err(err) = imex(
908            &context2.ctx,
909            ImexMode::ImportSelfKeys,
910            export_dir.path(),
911            None,
912        )
913        .await
914        {
915            panic!("got error on import: {err:#}");
916        }
917    }
918
919    #[tokio::test(flavor = "multi_thread", worker_threads = 2)]
920    async fn test_import_second_key() -> Result<()> {
921        let alice = &TestContext::new_alice().await;
922        let chat = alice.create_chat(alice).await;
923        let sent = alice.send_text(chat.id, "Encrypted with old key").await;
924        let export_dir = tempfile::tempdir().unwrap();
925
926        let alice = &TestContext::new().await;
927        alice.configure_addr("alice@example.org").await;
928        imex(alice, ImexMode::ExportSelfKeys, export_dir.path(), None).await?;
929
930        let alice = &TestContext::new_alice().await;
931        let old_key = key::load_self_secret_key(alice).await?;
932
933        assert!(
934            imex(alice, ImexMode::ImportSelfKeys, export_dir.path(), None)
935                .await
936                .is_err()
937        );
938
939        // Importing a second key is not allowed anymore,
940        // even as a non-default key.
941        assert_eq!(key::load_self_secret_key(alice).await?, old_key);
942
943        assert_eq!(key::load_self_secret_keyring(alice).await?, vec![old_key]);
944
945        let msg = alice.recv_msg(&sent).await;
946        assert!(msg.get_showpadlock());
947        assert_eq!(msg.chat_id, alice.get_self_chat().await.id);
948        assert_eq!(msg.get_text(), "Encrypted with old key");
949
950        Ok(())
951    }
952
953    #[tokio::test(flavor = "multi_thread", worker_threads = 2)]
954    async fn test_export_and_import_backup() -> Result<()> {
955        for set_verified_oneonone_chats in [true, false] {
956            let backup_dir = tempfile::tempdir().unwrap();
957
958            let context1 = TestContext::new_alice().await;
959            assert!(context1.is_configured().await?);
960            if set_verified_oneonone_chats {
961                context1
962                    .set_config_bool(Config::VerifiedOneOnOneChats, true)
963                    .await?;
964            }
965
966            let context2 = TestContext::new().await;
967            assert!(!context2.is_configured().await?);
968            assert!(has_backup(&context2, backup_dir.path()).await.is_err());
969
970            // export from context1
971            assert!(
972                imex(&context1, ImexMode::ExportBackup, backup_dir.path(), None)
973                    .await
974                    .is_ok()
975            );
976            let _event = context1
977                .evtracker
978                .get_matching(|evt| matches!(evt, EventType::ImexProgress(1000)))
979                .await;
980
981            // import to context2
982            let backup = has_backup(&context2, backup_dir.path()).await?;
983
984            // Import of unencrypted backup with incorrect "foobar" backup passphrase fails.
985            assert!(imex(
986                &context2,
987                ImexMode::ImportBackup,
988                backup.as_ref(),
989                Some("foobar".to_string())
990            )
991            .await
992            .is_err());
993
994            assert!(
995                imex(&context2, ImexMode::ImportBackup, backup.as_ref(), None)
996                    .await
997                    .is_ok()
998            );
999            let _event = context2
1000                .evtracker
1001                .get_matching(|evt| matches!(evt, EventType::ImexProgress(1000)))
1002                .await;
1003
1004            assert!(context2.is_configured().await?);
1005            assert_eq!(
1006                context2.get_config(Config::Addr).await?,
1007                Some("alice@example.org".to_string())
1008            );
1009            assert_eq!(
1010                context2
1011                    .get_config_bool(Config::VerifiedOneOnOneChats)
1012                    .await?,
1013                false
1014            );
1015            assert_eq!(
1016                context1
1017                    .get_config_bool(Config::VerifiedOneOnOneChats)
1018                    .await?,
1019                set_verified_oneonone_chats
1020            );
1021        }
1022        Ok(())
1023    }
1024
1025    #[tokio::test(flavor = "multi_thread", worker_threads = 2)]
1026    async fn test_export_import_chatmail_backup() -> Result<()> {
1027        let backup_dir = tempfile::tempdir().unwrap();
1028
1029        let context1 = &TestContext::new_alice().await;
1030
1031        // Check that the settings are displayed correctly.
1032        assert_eq!(
1033            context1.get_config(Config::BccSelf).await?,
1034            Some("1".to_string())
1035        );
1036        assert_eq!(
1037            context1.get_config(Config::DeleteServerAfter).await?,
1038            Some("0".to_string())
1039        );
1040        context1.set_config_bool(Config::IsChatmail, true).await?;
1041        assert_eq!(
1042            context1.get_config(Config::BccSelf).await?,
1043            Some("0".to_string())
1044        );
1045        assert_eq!(
1046            context1.get_config(Config::DeleteServerAfter).await?,
1047            Some("1".to_string())
1048        );
1049
1050        assert_eq!(context1.get_config_delete_server_after().await?, Some(0));
1051        imex(context1, ImexMode::ExportBackup, backup_dir.path(), None).await?;
1052        let _event = context1
1053            .evtracker
1054            .get_matching(|evt| matches!(evt, EventType::ImexProgress(1000)))
1055            .await;
1056
1057        let context2 = &TestContext::new().await;
1058        let backup = has_backup(context2, backup_dir.path()).await?;
1059        imex(context2, ImexMode::ImportBackup, backup.as_ref(), None).await?;
1060        let _event = context2
1061            .evtracker
1062            .get_matching(|evt| matches!(evt, EventType::ImexProgress(1000)))
1063            .await;
1064        assert!(context2.is_configured().await?);
1065        assert!(context2.is_chatmail().await?);
1066        for ctx in [context1, context2] {
1067            assert_eq!(
1068                ctx.get_config(Config::BccSelf).await?,
1069                Some("1".to_string())
1070            );
1071            assert_eq!(
1072                ctx.get_config(Config::DeleteServerAfter).await?,
1073                Some("0".to_string())
1074            );
1075            assert_eq!(ctx.get_config_delete_server_after().await?, None);
1076        }
1077        Ok(())
1078    }
1079
1080    /// This is a regression test for
1081    /// https://github.com/deltachat/deltachat-android/issues/2263
1082    /// where the config cache wasn't reset properly after a backup.
1083    #[tokio::test(flavor = "multi_thread", worker_threads = 2)]
1084    async fn test_import_backup_reset_config_cache() -> Result<()> {
1085        let backup_dir = tempfile::tempdir()?;
1086        let context1 = TestContext::new_alice().await;
1087        let context2 = TestContext::new().await;
1088        assert!(!context2.is_configured().await?);
1089
1090        // export from context1
1091        imex(&context1, ImexMode::ExportBackup, backup_dir.path(), None).await?;
1092
1093        // import to context2
1094        let backup = has_backup(&context2, backup_dir.path()).await?;
1095        let context2_cloned = context2.clone();
1096        let handle = task::spawn(async move {
1097            imex(
1098                &context2_cloned,
1099                ImexMode::ImportBackup,
1100                backup.as_ref(),
1101                None,
1102            )
1103            .await
1104            .unwrap();
1105        });
1106
1107        while !handle.is_finished() {
1108            // The database is still unconfigured;
1109            // fill the config cache with the old value.
1110            context2.is_configured().await.ok();
1111            tokio::time::sleep(Duration::from_micros(1)).await;
1112        }
1113
1114        // Assert that the config cache has the new value now.
1115        assert!(context2.is_configured().await?);
1116
1117        Ok(())
1118    }
1119}