deltachat/
imex.rs

1//! # Import/export module.
2
3use std::ffi::OsStr;
4use std::path::{Path, PathBuf};
5use std::pin::Pin;
6
7use ::pgp::types::PublicKeyTrait;
8use anyhow::{bail, ensure, format_err, Context as _, Result};
9use futures::TryStreamExt;
10use futures_lite::FutureExt;
11use pin_project::pin_project;
12
13use tokio::fs::{self, File};
14use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
15use tokio_tar::Archive;
16
17use crate::blob::BlobDirContents;
18use crate::chat::delete_and_reset_all_device_msgs;
19use crate::config::Config;
20use crate::context::Context;
21use crate::e2ee;
22use crate::events::EventType;
23use crate::key::{self, DcKey, DcSecretKey, SignedPublicKey, SignedSecretKey};
24use crate::log::LogExt;
25use crate::pgp;
26use crate::qr::DCBACKUP_VERSION;
27use crate::sql;
28use crate::tools::{
29    create_folder, delete_file, get_filesuffix_lc, read_file, time, write_file, TempPathGuard,
30};
31
32mod key_transfer;
33mod transfer;
34
35pub use key_transfer::{continue_key_transfer, initiate_key_transfer};
36pub use transfer::{get_backup, BackupProvider};
37
38// Name of the database file in the backup.
39const DBFILE_BACKUP_NAME: &str = "dc_database_backup.sqlite";
40pub(crate) const BLOBS_BACKUP_NAME: &str = "blobs_backup";
41
42/// Import/export command.
43#[derive(Debug, Display, Copy, Clone, PartialEq, Eq, FromPrimitive, ToPrimitive)]
44#[repr(u32)]
45pub enum ImexMode {
46    /// Export all private keys and all public keys of the user to the
47    /// directory given as `path`. The default key is written to the files
48    /// `{public,private}-key-<addr>-default-<fingerprint>.asc`, if there are more keys, they are
49    /// written to files as `{public,private}-key-<addr>-<id>-<fingerprint>.asc`.
50    ExportSelfKeys = 1,
51
52    /// Import private keys found in `path` if it is a directory, otherwise import a private key
53    /// from `path`.
54    /// The last imported key is made the default keys unless its name contains the string `legacy`.
55    /// Public keys are not imported.
56    ImportSelfKeys = 2,
57
58    /// Export a backup to the directory given as `path` with the given `passphrase`.
59    /// The backup contains all contacts, chats, images and other data and device independent settings.
60    /// The backup does not contain device dependent settings as ringtones or LED notification settings.
61    /// The name of the backup is `delta-chat-backup-<day>-<number>-<addr>.tar`.
62    ExportBackup = 11,
63
64    /// `path` is the file (not: directory) to import. The file is normally
65    /// created by DC_IMEX_EXPORT_BACKUP and detected by imex_has_backup(). Importing a backup
66    /// is only possible as long as the context is not configured or used in another way.
67    ImportBackup = 12,
68}
69
70/// Import/export things.
71///
72/// What to do is defined by the `what` parameter.
73///
74/// During execution of the job,
75/// some events are sent out:
76///
77/// - A number of `DC_EVENT_IMEX_PROGRESS` events are sent and may be used to create
78///   a progress bar or stuff like that. Moreover, you'll be informed when the imex-job is done.
79///
80/// - For each file written on export, the function sends `DC_EVENT_IMEX_FILE_WRITTEN`
81///
82/// Only one import-/export-progress can run at the same time.
83/// To cancel an import-/export-progress, drop the future returned by this function.
84pub async fn imex(
85    context: &Context,
86    what: ImexMode,
87    path: &Path,
88    passphrase: Option<String>,
89) -> Result<()> {
90    let cancel = context.alloc_ongoing().await?;
91
92    let res = {
93        let _guard = context.scheduler.pause(context.clone()).await?;
94        imex_inner(context, what, path, passphrase)
95            .race(async {
96                cancel.recv().await.ok();
97                Err(format_err!("canceled"))
98            })
99            .await
100    };
101    context.free_ongoing().await;
102
103    if let Err(err) = res.as_ref() {
104        // We are using Anyhow's .context() and to show the inner error, too, we need the {:#}:
105        error!(context, "IMEX failed to complete: {:#}", err);
106        context.emit_event(EventType::ImexProgress(0));
107    } else {
108        info!(context, "IMEX successfully completed");
109        context.emit_event(EventType::ImexProgress(1000));
110    }
111
112    res
113}
114
115/// Returns the filename of the backup found (otherwise an error)
116pub async fn has_backup(_context: &Context, dir_name: &Path) -> Result<String> {
117    let mut dir_iter = tokio::fs::read_dir(dir_name).await?;
118    let mut newest_backup_name = "".to_string();
119    let mut newest_backup_path: Option<PathBuf> = None;
120
121    while let Ok(Some(dirent)) = dir_iter.next_entry().await {
122        let path = dirent.path();
123        let name = dirent.file_name();
124        let name: String = name.to_string_lossy().into();
125        if name.starts_with("delta-chat")
126            && name.ends_with(".tar")
127            && (newest_backup_name.is_empty() || name > newest_backup_name)
128        {
129            // We just use string comparison to determine which backup is newer.
130            // This works fine because the filenames have the form `delta-chat-backup-2023-10-18-00-foo@example.com.tar`
131            newest_backup_path = Some(path);
132            newest_backup_name = name;
133        }
134    }
135
136    match newest_backup_path {
137        Some(path) => Ok(path.to_string_lossy().into_owned()),
138        None => bail!("no backup found in {}", dir_name.display()),
139    }
140}
141
142async fn set_self_key(context: &Context, armored: &str) -> Result<()> {
143    // try hard to only modify key-state
144    let (private_key, header) = SignedSecretKey::from_asc(armored)?;
145    let public_key = private_key.split_public_key()?;
146    if let Some(preferencrypt) = header.get("Autocrypt-Prefer-Encrypt") {
147        let e2ee_enabled = match preferencrypt.as_str() {
148            "nopreference" => 0,
149            "mutual" => 1,
150            _ => {
151                bail!("invalid Autocrypt-Prefer-Encrypt header: {:?}", header);
152            }
153        };
154        context
155            .sql
156            .set_raw_config_int("e2ee_enabled", e2ee_enabled)
157            .await?;
158    } else {
159        // `Autocrypt-Prefer-Encrypt` is not included
160        // in keys exported to file.
161        //
162        // `Autocrypt-Prefer-Encrypt` also SHOULD be sent
163        // in Autocrypt Setup Message according to Autocrypt specification,
164        // but K-9 6.802 does not include this header.
165        //
166        // We keep current setting in this case.
167        info!(context, "No Autocrypt-Prefer-Encrypt header.");
168    };
169
170    let keypair = pgp::KeyPair {
171        public: public_key,
172        secret: private_key,
173    };
174    key::store_self_keypair(context, &keypair).await?;
175
176    info!(context, "stored self key: {:?}", keypair.secret.key_id());
177    Ok(())
178}
179
180async fn imex_inner(
181    context: &Context,
182    what: ImexMode,
183    path: &Path,
184    passphrase: Option<String>,
185) -> Result<()> {
186    info!(
187        context,
188        "{} path: {}",
189        match what {
190            ImexMode::ExportSelfKeys | ImexMode::ExportBackup => "Export",
191            ImexMode::ImportSelfKeys | ImexMode::ImportBackup => "Import",
192        },
193        path.display()
194    );
195    ensure!(context.sql.is_open().await, "Database not opened.");
196    context.emit_event(EventType::ImexProgress(1));
197
198    if what == ImexMode::ExportBackup || what == ImexMode::ExportSelfKeys {
199        // before we export anything, make sure the private key exists
200        e2ee::ensure_secret_key_exists(context)
201            .await
202            .context("Cannot create private key or private key not available")?;
203
204        create_folder(context, path).await?;
205    }
206
207    match what {
208        ImexMode::ExportSelfKeys => export_self_keys(context, path).await,
209        ImexMode::ImportSelfKeys => import_self_keys(context, path).await,
210
211        ImexMode::ExportBackup => {
212            export_backup(context, path, passphrase.unwrap_or_default()).await
213        }
214        ImexMode::ImportBackup => {
215            import_backup(context, path, passphrase.unwrap_or_default()).await
216        }
217    }
218}
219
220/// Imports backup into the currently open database.
221///
222/// The contents of the currently open database will be lost.
223///
224/// `passphrase` is the passphrase used to open backup database. If backup is unencrypted, pass
225/// empty string here.
226async fn import_backup(
227    context: &Context,
228    backup_to_import: &Path,
229    passphrase: String,
230) -> Result<()> {
231    ensure!(
232        !context.is_configured().await?,
233        "Cannot import backups to accounts in use."
234    );
235    ensure!(
236        !context.scheduler.is_running().await,
237        "cannot import backup, IO is running"
238    );
239
240    let backup_file = File::open(backup_to_import).await?;
241    let file_size = backup_file.metadata().await?.len();
242    info!(
243        context,
244        "Import \"{}\" ({} bytes) to \"{}\".",
245        backup_to_import.display(),
246        file_size,
247        context.get_dbfile().display()
248    );
249
250    import_backup_stream(context, backup_file, file_size, passphrase).await?;
251    Ok(())
252}
253
254/// Imports backup by reading a tar file from a stream.
255///
256/// `file_size` is used to calculate the progress
257/// and emit progress events.
258/// Ideally it is the sum of the entry
259/// sizes without the header overhead,
260/// but can be estimated as tar file size
261/// in which case the progress is underestimated
262/// and may not reach 99.9% by the end of import.
263/// Underestimating is better than
264/// overestimating because the progress
265/// jumps to 100% instead of getting stuck at 99.9%
266/// for some time.
267pub(crate) async fn import_backup_stream<R: tokio::io::AsyncRead + Unpin>(
268    context: &Context,
269    backup_file: R,
270    file_size: u64,
271    passphrase: String,
272) -> Result<()> {
273    import_backup_stream_inner(context, backup_file, file_size, passphrase)
274        .await
275        .0
276}
277
278/// Reader that emits progress events as bytes are read from it.
279#[pin_project]
280struct ProgressReader<R> {
281    /// Wrapped reader.
282    #[pin]
283    inner: R,
284
285    /// Number of bytes successfully read from the internal reader.
286    read: usize,
287
288    /// Total size of the backup .tar file expected to be read from the reader.
289    /// Used to calculate the progress.
290    file_size: usize,
291
292    /// Last progress emitted to avoid emitting the same progress value twice.
293    last_progress: usize,
294
295    /// Context for emitting progress events.
296    context: Context,
297}
298
299impl<R> ProgressReader<R> {
300    fn new(r: R, context: Context, file_size: u64) -> Self {
301        Self {
302            inner: r,
303            read: 0,
304            file_size: file_size as usize,
305            last_progress: 1,
306            context,
307        }
308    }
309}
310
311impl<R> AsyncRead for ProgressReader<R>
312where
313    R: AsyncRead,
314{
315    fn poll_read(
316        self: Pin<&mut Self>,
317        cx: &mut std::task::Context<'_>,
318        buf: &mut ReadBuf<'_>,
319    ) -> std::task::Poll<std::io::Result<()>> {
320        let this = self.project();
321        let before = buf.filled().len();
322        let res = this.inner.poll_read(cx, buf);
323        if let std::task::Poll::Ready(Ok(())) = res {
324            *this.read = this.read.saturating_add(buf.filled().len() - before);
325
326            let progress = std::cmp::min(1000 * *this.read / *this.file_size, 999);
327            if progress > *this.last_progress {
328                this.context.emit_event(EventType::ImexProgress(progress));
329                *this.last_progress = progress;
330            }
331        }
332        res
333    }
334}
335
336async fn import_backup_stream_inner<R: tokio::io::AsyncRead + Unpin>(
337    context: &Context,
338    backup_file: R,
339    file_size: u64,
340    passphrase: String,
341) -> (Result<()>,) {
342    let backup_file = ProgressReader::new(backup_file, context.clone(), file_size);
343    let mut archive = Archive::new(backup_file);
344
345    let mut entries = match archive.entries() {
346        Ok(entries) => entries,
347        Err(e) => return (Err(e).context("Failed to get archive entries"),),
348    };
349    let mut blobs = Vec::new();
350    let mut res: Result<()> = loop {
351        let mut f = match entries.try_next().await {
352            Ok(Some(f)) => f,
353            Ok(None) => break Ok(()),
354            Err(e) => break Err(e).context("Failed to get next entry"),
355        };
356
357        let path = match f.path() {
358            Ok(path) => path.to_path_buf(),
359            Err(e) => break Err(e).context("Failed to get entry path"),
360        };
361        if let Err(e) = f.unpack_in(context.get_blobdir()).await {
362            break Err(e).context("Failed to unpack file");
363        }
364        if path.file_name() == Some(OsStr::new(DBFILE_BACKUP_NAME)) {
365            continue;
366        }
367        // async_tar unpacked to $BLOBDIR/BLOBS_BACKUP_NAME/, so we move the file afterwards.
368        let from_path = context.get_blobdir().join(&path);
369        if from_path.is_file() {
370            if let Some(name) = from_path.file_name() {
371                let to_path = context.get_blobdir().join(name);
372                if let Err(e) = fs::rename(&from_path, &to_path).await {
373                    blobs.push(from_path);
374                    break Err(e).context("Failed to move file to blobdir");
375                }
376                blobs.push(to_path);
377            } else {
378                warn!(context, "No file name");
379            }
380        }
381    };
382    if res.is_err() {
383        for blob in blobs {
384            fs::remove_file(&blob).await.log_err(context).ok();
385        }
386    }
387
388    let unpacked_database = context.get_blobdir().join(DBFILE_BACKUP_NAME);
389    if res.is_ok() {
390        res = context
391            .sql
392            .import(&unpacked_database, passphrase.clone())
393            .await
394            .context("cannot import unpacked database");
395    }
396    if res.is_ok() {
397        res = check_backup_version(context).await;
398    }
399    if res.is_ok() {
400        res = adjust_bcc_self(context).await;
401    }
402    fs::remove_file(unpacked_database)
403        .await
404        .context("cannot remove unpacked database")
405        .log_err(context)
406        .ok();
407    if res.is_ok() {
408        context.emit_event(EventType::ImexProgress(999));
409        res = context.sql.run_migrations(context).await;
410        context.emit_event(EventType::AccountsItemChanged);
411    }
412    if res.is_ok() {
413        delete_and_reset_all_device_msgs(context)
414            .await
415            .log_err(context)
416            .ok();
417    }
418    (res,)
419}
420
421/*******************************************************************************
422 * Export backup
423 ******************************************************************************/
424
425/// Returns Ok((temp_db_path, temp_path, dest_path)) on success. Unencrypted database can be
426/// written to temp_db_path. The backup can then be written to temp_path. If the backup succeeded,
427/// it can be renamed to dest_path. This guarantees that the backup is complete.
428fn get_next_backup_path(
429    folder: &Path,
430    addr: &str,
431    backup_time: i64,
432) -> Result<(PathBuf, PathBuf, PathBuf)> {
433    let folder = PathBuf::from(folder);
434    let stem = chrono::DateTime::<chrono::Utc>::from_timestamp(backup_time, 0)
435        .context("can't get next backup path")?
436        // Don't change this file name format, in `dc_imex_has_backup` we use string comparison to determine which backup is newer:
437        .format("delta-chat-backup-%Y-%m-%d")
438        .to_string();
439
440    // 64 backup files per day should be enough for everyone
441    for i in 0..64 {
442        let mut tempdbfile = folder.clone();
443        tempdbfile.push(format!("{stem}-{i:02}-{addr}.db"));
444
445        let mut tempfile = folder.clone();
446        tempfile.push(format!("{stem}-{i:02}-{addr}.tar.part"));
447
448        let mut destfile = folder.clone();
449        destfile.push(format!("{stem}-{i:02}-{addr}.tar"));
450
451        if !tempdbfile.exists() && !tempfile.exists() && !destfile.exists() {
452            return Ok((tempdbfile, tempfile, destfile));
453        }
454    }
455    bail!("could not create backup file, disk full?");
456}
457
458/// Exports the database to a separate file with the given passphrase.
459///
460/// Set passphrase to empty string to export the database unencrypted.
461async fn export_backup(context: &Context, dir: &Path, passphrase: String) -> Result<()> {
462    // get a fine backup file name (the name includes the date so that multiple backup instances are possible)
463    let now = time();
464    let self_addr = context.get_primary_self_addr().await?;
465    let (temp_db_path, temp_path, dest_path) = get_next_backup_path(dir, &self_addr, now)?;
466    let temp_db_path = TempPathGuard::new(temp_db_path);
467    let temp_path = TempPathGuard::new(temp_path);
468
469    export_database(context, &temp_db_path, passphrase, now)
470        .await
471        .context("could not export database")?;
472
473    info!(
474        context,
475        "Backup '{}' to '{}'.",
476        context.get_dbfile().display(),
477        dest_path.display(),
478    );
479
480    let file = File::create(&temp_path).await?;
481    let blobdir = BlobDirContents::new(context).await?;
482
483    let mut file_size = 0;
484    file_size += temp_db_path.metadata()?.len();
485    for blob in blobdir.iter() {
486        file_size += blob.to_abs_path().metadata()?.len()
487    }
488
489    export_backup_stream(context, &temp_db_path, blobdir, file, file_size)
490        .await
491        .context("Exporting backup to file failed")?;
492    fs::rename(temp_path, &dest_path).await?;
493    context.emit_event(EventType::ImexFileWritten(dest_path));
494    Ok(())
495}
496
497/// Writer that emits progress events as bytes are written into it.
498#[pin_project]
499struct ProgressWriter<W> {
500    /// Wrapped writer.
501    #[pin]
502    inner: W,
503
504    /// Number of bytes successfully written into the internal writer.
505    written: usize,
506
507    /// Total size of the backup .tar file expected to be written into the writer.
508    /// Used to calculate the progress.
509    file_size: usize,
510
511    /// Last progress emitted to avoid emitting the same progress value twice.
512    last_progress: usize,
513
514    /// Context for emitting progress events.
515    context: Context,
516}
517
518impl<W> ProgressWriter<W> {
519    fn new(w: W, context: Context, file_size: u64) -> Self {
520        Self {
521            inner: w,
522            written: 0,
523            file_size: file_size as usize,
524            last_progress: 1,
525            context,
526        }
527    }
528}
529
530impl<W> AsyncWrite for ProgressWriter<W>
531where
532    W: AsyncWrite,
533{
534    fn poll_write(
535        self: Pin<&mut Self>,
536        cx: &mut std::task::Context<'_>,
537        buf: &[u8],
538    ) -> std::task::Poll<Result<usize, std::io::Error>> {
539        let this = self.project();
540        let res = this.inner.poll_write(cx, buf);
541        if let std::task::Poll::Ready(Ok(written)) = res {
542            *this.written = this.written.saturating_add(written);
543
544            let progress = std::cmp::min(1000 * *this.written / *this.file_size, 999);
545            if progress > *this.last_progress {
546                this.context.emit_event(EventType::ImexProgress(progress));
547                *this.last_progress = progress;
548            }
549        }
550        res
551    }
552
553    fn poll_flush(
554        self: Pin<&mut Self>,
555        cx: &mut std::task::Context<'_>,
556    ) -> std::task::Poll<Result<(), std::io::Error>> {
557        self.project().inner.poll_flush(cx)
558    }
559
560    fn poll_shutdown(
561        self: Pin<&mut Self>,
562        cx: &mut std::task::Context<'_>,
563    ) -> std::task::Poll<Result<(), std::io::Error>> {
564        self.project().inner.poll_shutdown(cx)
565    }
566}
567
568/// Exports the database and blobs into a stream.
569pub(crate) async fn export_backup_stream<'a, W>(
570    context: &'a Context,
571    temp_db_path: &Path,
572    blobdir: BlobDirContents<'a>,
573    writer: W,
574    file_size: u64,
575) -> Result<()>
576where
577    W: tokio::io::AsyncWrite + tokio::io::AsyncWriteExt + Unpin + Send + 'static,
578{
579    let writer = ProgressWriter::new(writer, context.clone(), file_size);
580    let mut builder = tokio_tar::Builder::new(writer);
581
582    builder
583        .append_path_with_name(temp_db_path, DBFILE_BACKUP_NAME)
584        .await?;
585
586    for blob in blobdir.iter() {
587        let mut file = File::open(blob.to_abs_path()).await?;
588        let path_in_archive = PathBuf::from(BLOBS_BACKUP_NAME).join(blob.as_name());
589        builder.append_file(path_in_archive, &mut file).await?;
590    }
591
592    builder.finish().await?;
593    Ok(())
594}
595
596/// Imports secret key from a file.
597async fn import_secret_key(context: &Context, path: &Path) -> Result<()> {
598    let buf = read_file(context, path).await?;
599    let armored = std::string::String::from_utf8_lossy(&buf);
600    set_self_key(context, &armored).await?;
601    Ok(())
602}
603
604/// Imports secret keys from the provided file or directory.
605///
606/// If provided path is a file, ASCII-armored secret key is read from the file
607/// and set as the default key.
608///
609/// If provided path is a directory, all files with .asc extension
610/// containing secret keys are imported and the last successfully
611/// imported which does not contain "legacy" in its filename
612/// is set as the default.
613async fn import_self_keys(context: &Context, path: &Path) -> Result<()> {
614    let attr = tokio::fs::metadata(path).await?;
615
616    if attr.is_file() {
617        info!(
618            context,
619            "Importing secret key from {} as the default key.",
620            path.display()
621        );
622        import_secret_key(context, path).await?;
623        return Ok(());
624    }
625
626    let mut imported_cnt = 0;
627
628    let mut dir_handle = tokio::fs::read_dir(&path).await?;
629    while let Ok(Some(entry)) = dir_handle.next_entry().await {
630        let entry_fn = entry.file_name();
631        let name_f = entry_fn.to_string_lossy();
632        let path_plus_name = path.join(&entry_fn);
633        if let Some(suffix) = get_filesuffix_lc(&name_f) {
634            if suffix != "asc" {
635                continue;
636            }
637        } else {
638            continue;
639        };
640        info!(
641            context,
642            "Considering key file: {}.",
643            path_plus_name.display()
644        );
645
646        if let Err(err) = import_secret_key(context, &path_plus_name).await {
647            warn!(
648                context,
649                "Failed to import secret key from {}: {:#}.",
650                path_plus_name.display(),
651                err
652            );
653            continue;
654        }
655
656        imported_cnt += 1;
657    }
658    ensure!(
659        imported_cnt > 0,
660        "No private keys found in {}.",
661        path.display()
662    );
663    Ok(())
664}
665
666async fn export_self_keys(context: &Context, dir: &Path) -> Result<()> {
667    let mut export_errors = 0;
668
669    let keys = context
670        .sql
671        .query_map(
672            "SELECT id, public_key, private_key, id=(SELECT value FROM config WHERE keyname='key_id') FROM keypairs;",
673            (),
674            |row| {
675                let id = row.get(0)?;
676                let public_key_blob: Vec<u8> = row.get(1)?;
677                let public_key = SignedPublicKey::from_slice(&public_key_blob);
678                let private_key_blob: Vec<u8> = row.get(2)?;
679                let private_key = SignedSecretKey::from_slice(&private_key_blob);
680                let is_default: i32 = row.get(3)?;
681
682                Ok((id, public_key, private_key, is_default))
683            },
684            |keys| {
685                keys.collect::<std::result::Result<Vec<_>, _>>()
686                    .map_err(Into::into)
687            },
688        )
689        .await?;
690    let self_addr = context.get_primary_self_addr().await?;
691    for (id, public_key, private_key, is_default) in keys {
692        let id = Some(id).filter(|_| is_default == 0);
693
694        if let Ok(key) = public_key {
695            if let Err(err) = export_key_to_asc_file(context, dir, &self_addr, id, &key).await {
696                error!(context, "Failed to export public key: {:#}.", err);
697                export_errors += 1;
698            }
699        } else {
700            export_errors += 1;
701        }
702        if let Ok(key) = private_key {
703            if let Err(err) = export_key_to_asc_file(context, dir, &self_addr, id, &key).await {
704                error!(context, "Failed to export private key: {:#}.", err);
705                export_errors += 1;
706            }
707        } else {
708            export_errors += 1;
709        }
710    }
711
712    ensure!(export_errors == 0, "errors while exporting keys");
713    Ok(())
714}
715
716/// Returns the exported key file name inside `dir`.
717async fn export_key_to_asc_file<T>(
718    context: &Context,
719    dir: &Path,
720    addr: &str,
721    id: Option<i64>,
722    key: &T,
723) -> Result<String>
724where
725    T: DcKey,
726{
727    let file_name = {
728        let kind = match T::is_private() {
729            false => "public",
730            true => "private",
731        };
732        let id = id.map_or("default".into(), |i| i.to_string());
733        let fp = key.dc_fingerprint().hex();
734        format!("{kind}-key-{addr}-{id}-{fp}.asc")
735    };
736    let path = dir.join(&file_name);
737    info!(
738        context,
739        "Exporting key {:?} to {}.",
740        key.key_id(),
741        path.display()
742    );
743
744    // Delete the file if it already exists.
745    delete_file(context, &path).await.ok();
746
747    let content = key.to_asc(None).into_bytes();
748    write_file(context, &path, &content)
749        .await
750        .with_context(|| format!("cannot write key to {}", path.display()))?;
751    context.emit_event(EventType::ImexFileWritten(path));
752    Ok(file_name)
753}
754
755/// Exports the database to *dest*, encrypted using *passphrase*.
756///
757/// The directory of *dest* must already exist, if *dest* itself exists it will be
758/// overwritten.
759///
760/// This also verifies that IO is not running during the export.
761async fn export_database(
762    context: &Context,
763    dest: &Path,
764    passphrase: String,
765    timestamp: i64,
766) -> Result<()> {
767    ensure!(
768        !context.scheduler.is_running().await,
769        "cannot export backup, IO is running"
770    );
771    let timestamp = timestamp.try_into().context("32-bit UNIX time overflow")?;
772
773    // TODO: Maybe introduce camino crate for UTF-8 paths where we need them.
774    let dest = dest
775        .to_str()
776        .with_context(|| format!("path {} is not valid unicode", dest.display()))?;
777
778    adjust_bcc_self(context).await?;
779    context
780        .sql
781        .set_raw_config_int("backup_time", timestamp)
782        .await?;
783    context
784        .sql
785        .set_raw_config_int("backup_version", DCBACKUP_VERSION)
786        .await?;
787    sql::housekeeping(context).await.log_err(context).ok();
788    context
789        .sql
790        .call_write(|conn| {
791            conn.execute("VACUUM;", ())
792                .map_err(|err| warn!(context, "Vacuum failed, exporting anyway {err}"))
793                .ok();
794            conn.execute("ATTACH DATABASE ? AS backup KEY ?", (dest, passphrase))
795                .context("failed to attach backup database")?;
796            let res = conn
797                .query_row("SELECT sqlcipher_export('backup')", [], |_row| Ok(()))
798                .context("failed to export to attached backup database");
799            conn.execute(
800                "UPDATE backup.config SET value='0' WHERE keyname='verified_one_on_one_chats';",
801                [],
802            )
803            .ok(); // If verified_one_on_one_chats was not set, this errors, which we ignore
804            conn.execute("DETACH DATABASE backup", [])
805                .context("failed to detach backup database")?;
806            res?;
807            Ok(())
808        })
809        .await
810}
811
812/// Sets `Config::BccSelf` (and `DeleteServerAfter` to "never" in effect) if needed so that new
813/// messages are present on the server after a backup restoration or available for all devices in
814/// multi-device case. NB: Calling this after a backup import isn't reliable as we can crash in
815/// between, but this is a problem only for old backups, new backups already have `BccSelf` set if
816/// necessary.
817async fn adjust_bcc_self(context: &Context) -> Result<()> {
818    if context.is_chatmail().await? && !context.config_exists(Config::BccSelf).await? {
819        context.set_config(Config::BccSelf, Some("1")).await?;
820    }
821    Ok(())
822}
823
824async fn check_backup_version(context: &Context) -> Result<()> {
825    let version = (context.sql.get_raw_config_int("backup_version").await?).unwrap_or(2);
826    ensure!(
827        version <= DCBACKUP_VERSION,
828        "Backup too new, please update Delta Chat"
829    );
830    Ok(())
831}
832
833#[cfg(test)]
834mod tests {
835    use std::time::Duration;
836
837    use tokio::task;
838
839    use super::*;
840    use crate::config::Config;
841    use crate::test_utils::{alice_keypair, TestContext};
842
843    #[tokio::test(flavor = "multi_thread", worker_threads = 2)]
844    async fn test_export_public_key_to_asc_file() {
845        let context = TestContext::new().await;
846        let key = alice_keypair().public;
847        let blobdir = Path::new("$BLOBDIR");
848        let filename = export_key_to_asc_file(&context.ctx, blobdir, "a@b", None, &key)
849            .await
850            .unwrap();
851        assert!(filename.starts_with("public-key-a@b-default-"));
852        assert!(filename.ends_with(".asc"));
853        let blobdir = context.ctx.get_blobdir().to_str().unwrap();
854        let filename = format!("{blobdir}/{filename}");
855        let bytes = tokio::fs::read(&filename).await.unwrap();
856
857        assert_eq!(bytes, key.to_asc(None).into_bytes());
858    }
859
860    #[tokio::test(flavor = "multi_thread", worker_threads = 2)]
861    async fn test_import_private_key_exported_to_asc_file() {
862        let context = TestContext::new().await;
863        let key = alice_keypair().secret;
864        let blobdir = Path::new("$BLOBDIR");
865        let filename = export_key_to_asc_file(&context.ctx, blobdir, "a@b", None, &key)
866            .await
867            .unwrap();
868        let fingerprint = filename
869            .strip_prefix("private-key-a@b-default-")
870            .unwrap()
871            .strip_suffix(".asc")
872            .unwrap();
873        assert_eq!(fingerprint, key.dc_fingerprint().hex());
874        let blobdir = context.ctx.get_blobdir().to_str().unwrap();
875        let filename = format!("{blobdir}/{filename}");
876        let bytes = tokio::fs::read(&filename).await.unwrap();
877
878        assert_eq!(bytes, key.to_asc(None).into_bytes());
879
880        let alice = &TestContext::new().await;
881        if let Err(err) = imex(alice, ImexMode::ImportSelfKeys, Path::new(&filename), None).await {
882            panic!("got error on import: {err:#}");
883        }
884    }
885
886    #[tokio::test(flavor = "multi_thread", worker_threads = 2)]
887    async fn test_export_and_import_key_from_dir() {
888        let export_dir = tempfile::tempdir().unwrap();
889
890        let context = TestContext::new_alice().await;
891        if let Err(err) = imex(
892            &context.ctx,
893            ImexMode::ExportSelfKeys,
894            export_dir.path(),
895            None,
896        )
897        .await
898        {
899            panic!("got error on export: {err:#}");
900        }
901
902        let context2 = TestContext::new().await;
903        if let Err(err) = imex(
904            &context2.ctx,
905            ImexMode::ImportSelfKeys,
906            export_dir.path(),
907            None,
908        )
909        .await
910        {
911            panic!("got error on import: {err:#}");
912        }
913    }
914
915    #[tokio::test(flavor = "multi_thread", worker_threads = 2)]
916    async fn test_import_second_key() -> Result<()> {
917        let alice = &TestContext::new_alice().await;
918        let chat = alice.create_chat(alice).await;
919        let sent = alice.send_text(chat.id, "Encrypted with old key").await;
920        let export_dir = tempfile::tempdir().unwrap();
921
922        let alice = &TestContext::new().await;
923        alice.configure_addr("alice@example.org").await;
924        imex(alice, ImexMode::ExportSelfKeys, export_dir.path(), None).await?;
925
926        let alice = &TestContext::new_alice().await;
927        let old_key = key::load_self_secret_key(alice).await?;
928
929        assert!(
930            imex(alice, ImexMode::ImportSelfKeys, export_dir.path(), None)
931                .await
932                .is_err()
933        );
934
935        // Importing a second key is not allowed anymore,
936        // even as a non-default key.
937        assert_eq!(key::load_self_secret_key(alice).await?, old_key);
938
939        assert_eq!(key::load_self_secret_keyring(alice).await?, vec![old_key]);
940
941        let msg = alice.recv_msg(&sent).await;
942        assert!(msg.get_showpadlock());
943        assert_eq!(msg.chat_id, alice.get_self_chat().await.id);
944        assert_eq!(msg.get_text(), "Encrypted with old key");
945
946        Ok(())
947    }
948
949    #[tokio::test(flavor = "multi_thread", worker_threads = 2)]
950    async fn test_export_and_import_backup() -> Result<()> {
951        for set_verified_oneonone_chats in [true, false] {
952            let backup_dir = tempfile::tempdir().unwrap();
953
954            let context1 = TestContext::new_alice().await;
955            assert!(context1.is_configured().await?);
956            if set_verified_oneonone_chats {
957                context1
958                    .set_config_bool(Config::VerifiedOneOnOneChats, true)
959                    .await?;
960            }
961
962            let context2 = TestContext::new().await;
963            assert!(!context2.is_configured().await?);
964            assert!(has_backup(&context2, backup_dir.path()).await.is_err());
965
966            // export from context1
967            assert!(
968                imex(&context1, ImexMode::ExportBackup, backup_dir.path(), None)
969                    .await
970                    .is_ok()
971            );
972            let _event = context1
973                .evtracker
974                .get_matching(|evt| matches!(evt, EventType::ImexProgress(1000)))
975                .await;
976
977            // import to context2
978            let backup = has_backup(&context2, backup_dir.path()).await?;
979
980            // Import of unencrypted backup with incorrect "foobar" backup passphrase fails.
981            assert!(imex(
982                &context2,
983                ImexMode::ImportBackup,
984                backup.as_ref(),
985                Some("foobar".to_string())
986            )
987            .await
988            .is_err());
989
990            assert!(
991                imex(&context2, ImexMode::ImportBackup, backup.as_ref(), None)
992                    .await
993                    .is_ok()
994            );
995            let _event = context2
996                .evtracker
997                .get_matching(|evt| matches!(evt, EventType::ImexProgress(1000)))
998                .await;
999
1000            assert!(context2.is_configured().await?);
1001            assert_eq!(
1002                context2.get_config(Config::Addr).await?,
1003                Some("alice@example.org".to_string())
1004            );
1005            assert_eq!(
1006                context2
1007                    .get_config_bool(Config::VerifiedOneOnOneChats)
1008                    .await?,
1009                false
1010            );
1011            assert_eq!(
1012                context1
1013                    .get_config_bool(Config::VerifiedOneOnOneChats)
1014                    .await?,
1015                set_verified_oneonone_chats
1016            );
1017        }
1018        Ok(())
1019    }
1020
1021    #[tokio::test(flavor = "multi_thread", worker_threads = 2)]
1022    async fn test_export_import_chatmail_backup() -> Result<()> {
1023        let backup_dir = tempfile::tempdir().unwrap();
1024
1025        let context1 = &TestContext::new_alice().await;
1026
1027        // Check that the settings are displayed correctly.
1028        assert_eq!(
1029            context1.get_config(Config::BccSelf).await?,
1030            Some("1".to_string())
1031        );
1032        assert_eq!(
1033            context1.get_config(Config::DeleteServerAfter).await?,
1034            Some("0".to_string())
1035        );
1036        context1.set_config_bool(Config::IsChatmail, true).await?;
1037        assert_eq!(
1038            context1.get_config(Config::BccSelf).await?,
1039            Some("0".to_string())
1040        );
1041        assert_eq!(
1042            context1.get_config(Config::DeleteServerAfter).await?,
1043            Some("1".to_string())
1044        );
1045
1046        assert_eq!(context1.get_config_delete_server_after().await?, Some(0));
1047        imex(context1, ImexMode::ExportBackup, backup_dir.path(), None).await?;
1048        let _event = context1
1049            .evtracker
1050            .get_matching(|evt| matches!(evt, EventType::ImexProgress(1000)))
1051            .await;
1052
1053        let context2 = &TestContext::new().await;
1054        let backup = has_backup(context2, backup_dir.path()).await?;
1055        imex(context2, ImexMode::ImportBackup, backup.as_ref(), None).await?;
1056        let _event = context2
1057            .evtracker
1058            .get_matching(|evt| matches!(evt, EventType::ImexProgress(1000)))
1059            .await;
1060        assert!(context2.is_configured().await?);
1061        assert!(context2.is_chatmail().await?);
1062        for ctx in [context1, context2] {
1063            assert_eq!(
1064                ctx.get_config(Config::BccSelf).await?,
1065                Some("1".to_string())
1066            );
1067            assert_eq!(
1068                ctx.get_config(Config::DeleteServerAfter).await?,
1069                Some("0".to_string())
1070            );
1071            assert_eq!(ctx.get_config_delete_server_after().await?, None);
1072        }
1073        Ok(())
1074    }
1075
1076    /// This is a regression test for
1077    /// https://github.com/deltachat/deltachat-android/issues/2263
1078    /// where the config cache wasn't reset properly after a backup.
1079    #[tokio::test(flavor = "multi_thread", worker_threads = 2)]
1080    async fn test_import_backup_reset_config_cache() -> Result<()> {
1081        let backup_dir = tempfile::tempdir()?;
1082        let context1 = TestContext::new_alice().await;
1083        let context2 = TestContext::new().await;
1084        assert!(!context2.is_configured().await?);
1085
1086        // export from context1
1087        imex(&context1, ImexMode::ExportBackup, backup_dir.path(), None).await?;
1088
1089        // import to context2
1090        let backup = has_backup(&context2, backup_dir.path()).await?;
1091        let context2_cloned = context2.clone();
1092        let handle = task::spawn(async move {
1093            imex(
1094                &context2_cloned,
1095                ImexMode::ImportBackup,
1096                backup.as_ref(),
1097                None,
1098            )
1099            .await
1100            .unwrap();
1101        });
1102
1103        while !handle.is_finished() {
1104            // The database is still unconfigured;
1105            // fill the config cache with the old value.
1106            context2.is_configured().await.ok();
1107            tokio::time::sleep(Duration::from_micros(1)).await;
1108        }
1109
1110        // Assert that the config cache has the new value now.
1111        assert!(context2.is_configured().await?);
1112
1113        Ok(())
1114    }
1115}