deltachat/
scheduler.rs

1use std::cmp;
2use std::num::NonZeroUsize;
3
4use anyhow::{Context as _, Error, Result, bail};
5use async_channel::{self as channel, Receiver, Sender};
6use futures::future::try_join_all;
7use futures_lite::FutureExt;
8use tokio::sync::{RwLock, oneshot};
9use tokio::task;
10use tokio_util::sync::CancellationToken;
11use tokio_util::task::TaskTracker;
12
13pub(crate) use self::connectivity::ConnectivityStore;
14use crate::config::{self, Config};
15use crate::contact::{ContactId, RecentlySeenLoop};
16use crate::context::Context;
17use crate::download::{DownloadState, download_msg};
18use crate::ephemeral::{self, delete_expired_imap_messages};
19use crate::events::EventType;
20use crate::imap::{FolderMeaning, Imap, session::Session};
21use crate::location;
22use crate::log::{LogExt, warn};
23use crate::message::MsgId;
24use crate::smtp::{Smtp, send_smtp_messages};
25use crate::sql;
26use crate::stats::maybe_send_stats;
27use crate::tools::{self, duration_to_str, maybe_add_time_based_warnings, time, time_elapsed};
28use crate::transport::ConfiguredLoginParam;
29use crate::{constants, stats};
30
31pub(crate) mod connectivity;
32
33/// State of the IO scheduler, as stored on the [`Context`].
34///
35/// The IO scheduler can be stopped or started, but core can also pause it.  After pausing
36/// the IO scheduler will be restarted only if it was running before paused or
37/// [`Context::start_io`] was called in the meantime while it was paused.
38#[derive(Debug, Default)]
39pub(crate) struct SchedulerState {
40    inner: RwLock<InnerSchedulerState>,
41}
42
43impl SchedulerState {
44    pub(crate) fn new() -> Self {
45        Default::default()
46    }
47
48    /// Whether the scheduler is currently running.
49    pub(crate) async fn is_running(&self) -> bool {
50        let inner = self.inner.read().await;
51        matches!(*inner, InnerSchedulerState::Started(_))
52    }
53
54    /// Starts the scheduler if it is not yet started.
55    pub(crate) async fn start(&self, context: &Context) {
56        let mut inner = self.inner.write().await;
57        match *inner {
58            InnerSchedulerState::Started(_) => (),
59            InnerSchedulerState::Stopped => Self::do_start(&mut inner, context).await,
60            InnerSchedulerState::Paused {
61                ref mut started, ..
62            } => *started = true,
63        }
64        context.update_connectivities(&inner);
65    }
66
67    /// Starts the scheduler if it is not yet started.
68    async fn do_start(inner: &mut InnerSchedulerState, context: &Context) {
69        info!(context, "starting IO");
70
71        // Notify message processing loop
72        // to allow processing old messages after restart.
73        context.new_msgs_notify.notify_one();
74
75        match Scheduler::start(context).await {
76            Ok(scheduler) => {
77                *inner = InnerSchedulerState::Started(scheduler);
78                context.emit_event(EventType::ConnectivityChanged);
79            }
80            Err(err) => error!(context, "Failed to start IO: {:#}", err),
81        }
82    }
83
84    /// Stops the scheduler if it is currently running.
85    pub(crate) async fn stop(&self, context: &Context) {
86        let mut inner = self.inner.write().await;
87        match *inner {
88            InnerSchedulerState::Started(_) => {
89                Self::do_stop(&mut inner, context, InnerSchedulerState::Stopped).await
90            }
91            InnerSchedulerState::Stopped => (),
92            InnerSchedulerState::Paused {
93                ref mut started, ..
94            } => *started = false,
95        }
96        context.update_connectivities(&inner);
97    }
98
99    /// Stops the scheduler if it is currently running.
100    async fn do_stop(
101        inner: &mut InnerSchedulerState,
102        context: &Context,
103        new_state: InnerSchedulerState,
104    ) {
105        // Sending an event wakes up event pollers (get_next_event)
106        // so the caller of stop_io() can arrange for proper termination.
107        // For this, the caller needs to instruct the event poller
108        // to terminate on receiving the next event and then call stop_io()
109        // which will emit the below event(s)
110        info!(context, "stopping IO");
111
112        // Wake up message processing loop even if there are no messages
113        // to allow for clean shutdown.
114        context.new_msgs_notify.notify_one();
115
116        let debug_logging = context
117            .debug_logging
118            .write()
119            .expect("RwLock is poisoned")
120            .take();
121        if let Some(debug_logging) = debug_logging {
122            debug_logging.loop_handle.abort();
123            debug_logging.loop_handle.await.ok();
124        }
125        let prev_state = std::mem::replace(inner, new_state);
126        context.emit_event(EventType::ConnectivityChanged);
127        match prev_state {
128            InnerSchedulerState::Started(scheduler) => scheduler.stop(context).await,
129            InnerSchedulerState::Stopped | InnerSchedulerState::Paused { .. } => (),
130        }
131    }
132
133    /// Pauses the IO scheduler.
134    ///
135    /// If it is currently running the scheduler will be stopped.  When the
136    /// [`IoPausedGuard`] is dropped the scheduler is started again.
137    ///
138    /// If in the meantime [`SchedulerState::start`] or [`SchedulerState::stop`] is called
139    /// resume will do the right thing and restore the scheduler to the state requested by
140    /// the last call.
141    pub(crate) async fn pause(&'_ self, context: &Context) -> Result<IoPausedGuard> {
142        {
143            let mut inner = self.inner.write().await;
144            match *inner {
145                InnerSchedulerState::Started(_) => {
146                    let new_state = InnerSchedulerState::Paused {
147                        started: true,
148                        pause_guards_count: NonZeroUsize::MIN,
149                    };
150                    Self::do_stop(&mut inner, context, new_state).await;
151                }
152                InnerSchedulerState::Stopped => {
153                    *inner = InnerSchedulerState::Paused {
154                        started: false,
155                        pause_guards_count: NonZeroUsize::MIN,
156                    };
157                }
158                InnerSchedulerState::Paused {
159                    ref mut pause_guards_count,
160                    ..
161                } => {
162                    *pause_guards_count = pause_guards_count
163                        .checked_add(1)
164                        .ok_or_else(|| Error::msg("Too many pause guards active"))?
165                }
166            }
167            context.update_connectivities(&inner);
168        }
169
170        let (tx, rx) = oneshot::channel();
171        let context = context.clone();
172        tokio::spawn(async move {
173            rx.await.ok();
174            let mut inner = context.scheduler.inner.write().await;
175            match *inner {
176                InnerSchedulerState::Started(_) => {
177                    warn!(&context, "IoPausedGuard resume: started instead of paused");
178                }
179                InnerSchedulerState::Stopped => {
180                    warn!(&context, "IoPausedGuard resume: stopped instead of paused");
181                }
182                InnerSchedulerState::Paused {
183                    ref started,
184                    ref mut pause_guards_count,
185                } => {
186                    if *pause_guards_count == NonZeroUsize::MIN {
187                        match *started {
188                            true => SchedulerState::do_start(&mut inner, &context).await,
189                            false => *inner = InnerSchedulerState::Stopped,
190                        }
191                    } else {
192                        let new_count = pause_guards_count.get() - 1;
193                        // SAFETY: Value was >=2 before due to if condition
194                        *pause_guards_count = NonZeroUsize::new(new_count).unwrap();
195                    }
196                }
197            }
198            context.update_connectivities(&inner);
199        });
200        Ok(IoPausedGuard { sender: Some(tx) })
201    }
202
203    /// Restarts the scheduler, only if it is running.
204    pub(crate) async fn restart(&self, context: &Context) {
205        info!(context, "restarting IO");
206        if self.is_running().await {
207            self.stop(context).await;
208            self.start(context).await;
209        }
210    }
211
212    /// Indicate that the network likely has come back.
213    pub(crate) async fn maybe_network(&self) {
214        let inner = self.inner.read().await;
215        let (inboxes, oboxes) = match *inner {
216            InnerSchedulerState::Started(ref scheduler) => {
217                scheduler.maybe_network();
218                let inboxes = scheduler
219                    .inboxes
220                    .iter()
221                    .map(|b| b.conn_state.state.connectivity.clone())
222                    .collect::<Vec<_>>();
223                let oboxes = scheduler
224                    .oboxes
225                    .iter()
226                    .map(|b| b.conn_state.state.connectivity.clone())
227                    .collect::<Vec<_>>();
228                (inboxes, oboxes)
229            }
230            _ => return,
231        };
232        drop(inner);
233        connectivity::idle_interrupted(inboxes, oboxes);
234    }
235
236    /// Indicate that the network likely is lost.
237    pub(crate) async fn maybe_network_lost(&self, context: &Context) {
238        let inner = self.inner.read().await;
239        let stores = match *inner {
240            InnerSchedulerState::Started(ref scheduler) => {
241                scheduler.maybe_network_lost();
242                scheduler
243                    .boxes()
244                    .map(|b| b.conn_state.state.connectivity.clone())
245                    .collect()
246            }
247            _ => return,
248        };
249        drop(inner);
250        connectivity::maybe_network_lost(context, stores);
251    }
252
253    pub(crate) async fn interrupt_inbox(&self) {
254        let inner = self.inner.read().await;
255        if let InnerSchedulerState::Started(ref scheduler) = *inner {
256            scheduler.interrupt_inbox();
257        }
258    }
259
260    /// Interrupt optional boxes (mvbox currently) loops.
261    pub(crate) async fn interrupt_oboxes(&self) {
262        let inner = self.inner.read().await;
263        if let InnerSchedulerState::Started(ref scheduler) = *inner {
264            scheduler.interrupt_oboxes();
265        }
266    }
267
268    pub(crate) async fn interrupt_smtp(&self) {
269        let inner = self.inner.read().await;
270        if let InnerSchedulerState::Started(ref scheduler) = *inner {
271            scheduler.interrupt_smtp();
272        }
273    }
274
275    pub(crate) async fn interrupt_ephemeral_task(&self) {
276        let inner = self.inner.read().await;
277        if let InnerSchedulerState::Started(ref scheduler) = *inner {
278            scheduler.interrupt_ephemeral_task();
279        }
280    }
281
282    pub(crate) async fn interrupt_location(&self) {
283        let inner = self.inner.read().await;
284        if let InnerSchedulerState::Started(ref scheduler) = *inner {
285            scheduler.interrupt_location();
286        }
287    }
288
289    pub(crate) async fn interrupt_recently_seen(&self, contact_id: ContactId, timestamp: i64) {
290        let inner = self.inner.read().await;
291        if let InnerSchedulerState::Started(ref scheduler) = *inner {
292            scheduler.interrupt_recently_seen(contact_id, timestamp);
293        }
294    }
295}
296
297#[derive(Debug, Default)]
298pub(crate) enum InnerSchedulerState {
299    Started(Scheduler),
300    #[default]
301    Stopped,
302    Paused {
303        started: bool,
304        pause_guards_count: NonZeroUsize,
305    },
306}
307
308/// Guard to make sure the IO Scheduler is resumed.
309///
310/// Returned by [`SchedulerState::pause`].  To resume the IO scheduler simply drop this
311/// guard.
312#[derive(Default, Debug)]
313pub(crate) struct IoPausedGuard {
314    sender: Option<oneshot::Sender<()>>,
315}
316
317impl Drop for IoPausedGuard {
318    fn drop(&mut self) {
319        if let Some(sender) = self.sender.take() {
320            // Can only fail if receiver is dropped, but then we're already resumed.
321            sender.send(()).ok();
322        }
323    }
324}
325
326#[derive(Debug)]
327struct SchedBox {
328    /// Hostname of used chatmail/email relay
329    host: String,
330    meaning: FolderMeaning,
331    conn_state: ImapConnectionState,
332
333    /// IMAP loop task handle.
334    handle: task::JoinHandle<()>,
335}
336
337/// Job and connection scheduler.
338#[derive(Debug)]
339pub(crate) struct Scheduler {
340    /// Inboxes, one per transport.
341    inboxes: Vec<SchedBox>,
342    /// Optional boxes -- mvbox.
343    oboxes: Vec<SchedBox>,
344    smtp: SmtpConnectionState,
345    smtp_handle: task::JoinHandle<()>,
346    ephemeral_handle: task::JoinHandle<()>,
347    ephemeral_interrupt_send: Sender<()>,
348    location_handle: task::JoinHandle<()>,
349    location_interrupt_send: Sender<()>,
350
351    recently_seen_loop: RecentlySeenLoop,
352}
353
354async fn download_msgs(context: &Context, session: &mut Session) -> Result<()> {
355    let msg_ids = context
356        .sql
357        .query_map_vec("SELECT msg_id FROM download", (), |row| {
358            let msg_id: MsgId = row.get(0)?;
359            Ok(msg_id)
360        })
361        .await?;
362
363    for msg_id in msg_ids {
364        if let Err(err) = download_msg(context, msg_id, session).await {
365            warn!(context, "Failed to download message {msg_id}: {:#}.", err);
366
367            // Update download state to failure
368            // so it can be retried.
369            //
370            // On success update_download_state() is not needed
371            // as receive_imf() already
372            // set the state and emitted the event.
373            msg_id
374                .update_download_state(context, DownloadState::Failure)
375                .await?;
376        }
377        context
378            .sql
379            .execute("DELETE FROM download WHERE msg_id=?", (msg_id,))
380            .await?;
381    }
382
383    Ok(())
384}
385
386async fn inbox_loop(
387    ctx: Context,
388    started: oneshot::Sender<()>,
389    inbox_handlers: ImapConnectionHandlers,
390) {
391    use futures::future::FutureExt;
392
393    info!(ctx, "Starting inbox loop.");
394    let ImapConnectionHandlers {
395        mut connection,
396        stop_token,
397    } = inbox_handlers;
398
399    let ctx1 = ctx.clone();
400    let fut = async move {
401        let ctx = ctx1;
402        if let Err(()) = started.send(()) {
403            warn!(ctx, "Inbox loop, missing started receiver.");
404            return;
405        };
406
407        let mut old_session: Option<Session> = None;
408        loop {
409            let session = if let Some(session) = old_session.take() {
410                session
411            } else {
412                info!(ctx, "Preparing new IMAP session for inbox.");
413                match connection.prepare(&ctx).await {
414                    Err(err) => {
415                        warn!(ctx, "Failed to prepare inbox connection: {err:#}.");
416                        continue;
417                    }
418                    Ok(session) => session,
419                }
420            };
421
422            match inbox_fetch_idle(&ctx, &mut connection, session).await {
423                Err(err) => warn!(ctx, "Failed inbox fetch_idle: {err:#}."),
424                Ok(session) => {
425                    info!(
426                        ctx,
427                        "IMAP loop iteration for inbox finished, keeping the session."
428                    );
429                    old_session = Some(session);
430                }
431            }
432        }
433    };
434
435    stop_token
436        .cancelled()
437        .map(|_| {
438            info!(ctx, "Shutting down inbox loop.");
439        })
440        .race(fut)
441        .await;
442}
443
444/// Convert folder meaning
445/// used internally by [fetch_idle] and [Context::background_fetch].
446///
447/// Returns folder configuration key and folder name
448/// if such folder is configured, `Ok(None)` otherwise.
449pub async fn convert_folder_meaning(
450    ctx: &Context,
451    folder_meaning: FolderMeaning,
452) -> Result<Option<(Config, String)>> {
453    let folder_config = match folder_meaning.to_config() {
454        Some(c) => c,
455        None => {
456            // Such folder cannot be configured,
457            // e.g. a `FolderMeaning::Spam` folder.
458            return Ok(None);
459        }
460    };
461
462    let folder = ctx
463        .get_config(folder_config)
464        .await
465        .with_context(|| format!("Failed to retrieve {folder_config} folder"))?;
466
467    if let Some(watch_folder) = folder {
468        Ok(Some((folder_config, watch_folder)))
469    } else {
470        Ok(None)
471    }
472}
473
474async fn inbox_fetch_idle(ctx: &Context, imap: &mut Imap, mut session: Session) -> Result<Session> {
475    if !ctx.get_config_bool(Config::FixIsChatmail).await? {
476        ctx.set_config_internal(
477            Config::IsChatmail,
478            crate::config::from_bool(session.is_chatmail()),
479        )
480        .await?;
481    }
482
483    // Update quota no more than once a minute.
484    if ctx.quota_needs_update(session.transport_id(), 60).await
485        && let Err(err) = ctx.update_recent_quota(&mut session).await
486    {
487        warn!(ctx, "Failed to update quota: {:#}.", err);
488    }
489
490    if let Ok(()) = imap.resync_request_receiver.try_recv()
491        && let Err(err) = session.resync_folders(ctx).await
492    {
493        warn!(ctx, "Failed to resync folders: {:#}.", err);
494        imap.resync_request_sender.try_send(()).ok();
495    }
496
497    maybe_add_time_based_warnings(ctx).await;
498
499    match ctx.get_config_i64(Config::LastHousekeeping).await {
500        Ok(last_housekeeping_time) => {
501            let next_housekeeping_time =
502                last_housekeeping_time.saturating_add(constants::HOUSEKEEPING_PERIOD);
503            if next_housekeeping_time <= time() {
504                sql::housekeeping(ctx).await.log_err(ctx).ok();
505            }
506        }
507        Err(err) => {
508            warn!(ctx, "Failed to get last housekeeping time: {}", err);
509        }
510    };
511
512    maybe_send_stats(ctx).await.log_err(ctx).ok();
513    match ctx.get_config_bool(Config::FetchedExistingMsgs).await {
514        Ok(fetched_existing_msgs) => {
515            if !fetched_existing_msgs {
516                // Consider it done even if we fail.
517                //
518                // This operation is not critical enough to retry,
519                // especially if the error is persistent.
520                if let Err(err) = ctx
521                    .set_config_internal(Config::FetchedExistingMsgs, config::from_bool(true))
522                    .await
523                {
524                    warn!(ctx, "Can't set Config::FetchedExistingMsgs: {:#}", err);
525                }
526
527                if let Err(err) = imap.fetch_existing_msgs(ctx, &mut session).await {
528                    warn!(ctx, "Failed to fetch existing messages: {:#}", err);
529                }
530            }
531        }
532        Err(err) => {
533            warn!(ctx, "Can't get Config::FetchedExistingMsgs: {:#}", err);
534        }
535    }
536
537    download_msgs(ctx, &mut session)
538        .await
539        .context("Failed to download messages")?;
540    session
541        .update_metadata(ctx)
542        .await
543        .context("update_metadata")?;
544    session
545        .register_token(ctx)
546        .await
547        .context("Failed to register push token")?;
548
549    let session = fetch_idle(ctx, imap, session, FolderMeaning::Inbox).await?;
550    Ok(session)
551}
552
553/// Implement a single iteration of IMAP loop.
554///
555/// This function performs all IMAP operations on a single folder, selecting it if necessary and
556/// handling all the errors. In case of an error, an error is returned and connection is dropped,
557/// otherwise connection is returned.
558async fn fetch_idle(
559    ctx: &Context,
560    connection: &mut Imap,
561    mut session: Session,
562    folder_meaning: FolderMeaning,
563) -> Result<Session> {
564    let Some((folder_config, watch_folder)) = convert_folder_meaning(ctx, folder_meaning).await?
565    else {
566        // The folder is not configured.
567        // For example, this happens if the server does not have Sent folder
568        // but watching Sent folder is enabled.
569        connection.connectivity.set_not_configured(ctx);
570        connection.idle_interrupt_receiver.recv().await.ok();
571        bail!("Cannot fetch folder {folder_meaning} because it is not configured");
572    };
573
574    if folder_config == Config::ConfiguredInboxFolder {
575        session
576            .store_seen_flags_on_imap(ctx)
577            .await
578            .context("store_seen_flags_on_imap")?;
579    }
580
581    if !ctx.should_delete_to_trash().await?
582        || ctx
583            .get_config(Config::ConfiguredTrashFolder)
584            .await?
585            .is_some()
586    {
587        // Fetch the watched folder.
588        connection
589            .fetch_move_delete(ctx, &mut session, &watch_folder, folder_meaning)
590            .await
591            .context("fetch_move_delete")?;
592
593        // Mark expired messages for deletion. Marked messages will be deleted from the server
594        // on the next iteration of `fetch_move_delete`. `delete_expired_imap_messages` is not
595        // called right before `fetch_move_delete` because it is not well optimized and would
596        // otherwise slow down message fetching.
597        delete_expired_imap_messages(ctx)
598            .await
599            .context("delete_expired_imap_messages")?;
600    } else if folder_config == Config::ConfiguredInboxFolder {
601        session.last_full_folder_scan.lock().await.take();
602    }
603
604    // Scan additional folders only after finishing fetching the watched folder.
605    //
606    // On iOS the application has strictly limited time to work in background, so we may not
607    // be able to scan all folders before time is up if there are many of them.
608    if folder_config == Config::ConfiguredInboxFolder {
609        // Only scan on the Inbox thread in order to prevent parallel scans, which might lead to duplicate messages
610        match connection
611            .scan_folders(ctx, &mut session)
612            .await
613            .context("scan_folders")
614        {
615            Err(err) => {
616                // Don't reconnect, if there is a problem with the connection we will realize this when IDLEing
617                // but maybe just one folder can't be selected or something
618                warn!(ctx, "{:#}", err);
619            }
620            Ok(true) => {
621                // Fetch the watched folder again in case scanning other folder moved messages
622                // there.
623                //
624                // In most cases this will select the watched folder and return because there are
625                // no new messages. We want to select the watched folder anyway before going IDLE
626                // there, so this does not take additional protocol round-trip.
627                connection
628                    .fetch_move_delete(ctx, &mut session, &watch_folder, folder_meaning)
629                    .await
630                    .context("fetch_move_delete after scan_folders")?;
631            }
632            Ok(false) => {}
633        }
634    }
635
636    // Synchronize Seen flags.
637    session
638        .sync_seen_flags(ctx, &watch_folder)
639        .await
640        .context("sync_seen_flags")
641        .log_err(ctx)
642        .ok();
643
644    connection.connectivity.set_idle(ctx);
645
646    ctx.emit_event(EventType::ImapInboxIdle);
647
648    if !session.can_idle() {
649        info!(
650            ctx,
651            "IMAP session does not support IDLE, going to fake idle."
652        );
653        connection.fake_idle(ctx, watch_folder).await?;
654        return Ok(session);
655    }
656
657    if ctx
658        .get_config_bool(Config::DisableIdle)
659        .await
660        .context("Failed to get disable_idle config")
661        .log_err(ctx)
662        .unwrap_or_default()
663    {
664        info!(ctx, "IMAP IDLE is disabled, going to fake idle.");
665        connection.fake_idle(ctx, watch_folder).await?;
666        return Ok(session);
667    }
668
669    info!(
670        ctx,
671        "IMAP session in folder {watch_folder:?} supports IDLE, using it."
672    );
673    let session = session
674        .idle(
675            ctx,
676            connection.idle_interrupt_receiver.clone(),
677            &watch_folder,
678        )
679        .await
680        .context("idle")?;
681
682    Ok(session)
683}
684
685async fn simple_imap_loop(
686    ctx: Context,
687    started: oneshot::Sender<()>,
688    inbox_handlers: ImapConnectionHandlers,
689    folder_meaning: FolderMeaning,
690) {
691    use futures::future::FutureExt;
692
693    info!(ctx, "Starting simple loop for {folder_meaning}.");
694    let ImapConnectionHandlers {
695        mut connection,
696        stop_token,
697    } = inbox_handlers;
698
699    let ctx1 = ctx.clone();
700
701    let fut = async move {
702        let ctx = ctx1;
703        if let Err(()) = started.send(()) {
704            warn!(
705                ctx,
706                "Simple imap loop for {folder_meaning}, missing started receiver."
707            );
708            return;
709        }
710
711        let mut old_session: Option<Session> = None;
712        loop {
713            let session = if let Some(session) = old_session.take() {
714                session
715            } else {
716                info!(ctx, "Preparing new IMAP session for {folder_meaning}.");
717                match connection.prepare(&ctx).await {
718                    Err(err) => {
719                        warn!(
720                            ctx,
721                            "Failed to prepare {folder_meaning} connection: {err:#}."
722                        );
723                        continue;
724                    }
725                    Ok(session) => session,
726                }
727            };
728
729            match fetch_idle(&ctx, &mut connection, session, folder_meaning).await {
730                Err(err) => warn!(ctx, "Failed fetch_idle: {err:#}"),
731                Ok(session) => {
732                    info!(
733                        ctx,
734                        "IMAP loop iteration for {folder_meaning} finished, keeping the session"
735                    );
736                    old_session = Some(session);
737                }
738            }
739        }
740    };
741
742    stop_token
743        .cancelled()
744        .map(|_| {
745            info!(ctx, "Shutting down IMAP loop for {folder_meaning}.");
746        })
747        .race(fut)
748        .await;
749}
750
751async fn smtp_loop(
752    ctx: Context,
753    started: oneshot::Sender<()>,
754    smtp_handlers: SmtpConnectionHandlers,
755) {
756    use futures::future::FutureExt;
757
758    info!(ctx, "Starting SMTP loop.");
759    let SmtpConnectionHandlers {
760        mut connection,
761        stop_token,
762        idle_interrupt_receiver,
763    } = smtp_handlers;
764
765    let ctx1 = ctx.clone();
766    let fut = async move {
767        let ctx = ctx1;
768        if let Err(()) = started.send(()) {
769            warn!(&ctx, "SMTP loop, missing started receiver.");
770            return;
771        }
772
773        let mut timeout = None;
774        loop {
775            if let Err(err) = send_smtp_messages(&ctx, &mut connection).await {
776                warn!(ctx, "send_smtp_messages failed: {:#}.", err);
777                timeout = Some(timeout.unwrap_or(30));
778            } else {
779                timeout = None;
780                let duration_until_can_send = ctx.ratelimit.read().await.until_can_send();
781                if !duration_until_can_send.is_zero() {
782                    info!(
783                        ctx,
784                        "smtp got rate limited, waiting for {} until can send again",
785                        duration_to_str(duration_until_can_send)
786                    );
787                    tokio::time::sleep(duration_until_can_send).await;
788                    continue;
789                }
790            }
791
792            stats::maybe_update_message_stats(&ctx)
793                .await
794                .log_err(&ctx)
795                .ok();
796
797            // Fake Idle
798            info!(ctx, "SMTP fake idle started.");
799            match &connection.last_send_error {
800                None => connection.connectivity.set_idle(&ctx),
801                Some(err) => connection.connectivity.set_err(&ctx, err),
802            }
803
804            // If send_smtp_messages() failed, we set a timeout for the fake-idle so that
805            // sending is retried (at the latest) after the timeout. If sending fails
806            // again, we increase the timeout exponentially, in order not to do lots of
807            // unnecessary retries.
808            if let Some(t) = timeout {
809                let now = tools::Time::now();
810                info!(
811                    ctx,
812                    "SMTP has messages to retry, planning to retry {t} seconds later."
813                );
814                let duration = std::time::Duration::from_secs(t);
815                tokio::time::timeout(duration, async {
816                    idle_interrupt_receiver.recv().await.unwrap_or_default()
817                })
818                .await
819                .unwrap_or_default();
820                let slept = time_elapsed(&now).as_secs();
821                timeout = Some(cmp::max(
822                    t,
823                    slept.saturating_add(rand::random_range((slept / 2)..=slept)),
824                ));
825            } else {
826                info!(ctx, "SMTP has no messages to retry, waiting for interrupt.");
827                idle_interrupt_receiver.recv().await.unwrap_or_default();
828            };
829
830            info!(ctx, "SMTP fake idle interrupted.")
831        }
832    };
833
834    stop_token
835        .cancelled()
836        .map(|_| {
837            info!(ctx, "Shutting down SMTP loop.");
838        })
839        .race(fut)
840        .await;
841}
842
843impl Scheduler {
844    /// Start the scheduler.
845    pub async fn start(ctx: &Context) -> Result<Self> {
846        let (smtp, smtp_handlers) = SmtpConnectionState::new();
847
848        let (smtp_start_send, smtp_start_recv) = oneshot::channel();
849        let (ephemeral_interrupt_send, ephemeral_interrupt_recv) = channel::bounded(1);
850        let (location_interrupt_send, location_interrupt_recv) = channel::bounded(1);
851
852        let mut inboxes = Vec::new();
853        let mut oboxes = Vec::new();
854        let mut start_recvs = Vec::new();
855
856        for (transport_id, configured_login_param) in ConfiguredLoginParam::load_all(ctx).await? {
857            let (conn_state, inbox_handlers) =
858                ImapConnectionState::new(ctx, transport_id, configured_login_param.clone()).await?;
859            let (inbox_start_send, inbox_start_recv) = oneshot::channel();
860            let handle = {
861                let ctx = ctx.clone();
862                task::spawn(inbox_loop(ctx, inbox_start_send, inbox_handlers))
863            };
864            let host = configured_login_param
865                .addr
866                .split("@")
867                .last()
868                .context("address has no host")?
869                .to_owned();
870            let inbox = SchedBox {
871                host: host.clone(),
872                meaning: FolderMeaning::Inbox,
873                conn_state,
874                handle,
875            };
876            inboxes.push(inbox);
877            start_recvs.push(inbox_start_recv);
878
879            if ctx.should_watch_mvbox().await? {
880                let (conn_state, handlers) =
881                    ImapConnectionState::new(ctx, transport_id, configured_login_param).await?;
882                let (start_send, start_recv) = oneshot::channel();
883                let ctx = ctx.clone();
884                let meaning = FolderMeaning::Mvbox;
885                let handle = task::spawn(simple_imap_loop(ctx, start_send, handlers, meaning));
886                oboxes.push(SchedBox {
887                    host,
888                    meaning,
889                    conn_state,
890                    handle,
891                });
892                start_recvs.push(start_recv);
893            }
894        }
895
896        let smtp_handle = {
897            let ctx = ctx.clone();
898            task::spawn(smtp_loop(ctx, smtp_start_send, smtp_handlers))
899        };
900        start_recvs.push(smtp_start_recv);
901
902        let ephemeral_handle = {
903            let ctx = ctx.clone();
904            task::spawn(async move {
905                ephemeral::ephemeral_loop(&ctx, ephemeral_interrupt_recv).await;
906            })
907        };
908
909        let location_handle = {
910            let ctx = ctx.clone();
911            task::spawn(async move {
912                location::location_loop(&ctx, location_interrupt_recv).await;
913            })
914        };
915
916        let recently_seen_loop = RecentlySeenLoop::new(ctx.clone());
917
918        let res = Self {
919            inboxes,
920            oboxes,
921            smtp,
922            smtp_handle,
923            ephemeral_handle,
924            ephemeral_interrupt_send,
925            location_handle,
926            location_interrupt_send,
927            recently_seen_loop,
928        };
929
930        // wait for all loops to be started
931        if let Err(err) = try_join_all(start_recvs).await {
932            bail!("failed to start scheduler: {err}");
933        }
934
935        info!(ctx, "scheduler is running");
936        Ok(res)
937    }
938
939    fn boxes(&self) -> impl Iterator<Item = &SchedBox> {
940        self.inboxes.iter().chain(self.oboxes.iter())
941    }
942
943    fn maybe_network(&self) {
944        for b in self.boxes() {
945            b.conn_state.interrupt();
946        }
947        self.interrupt_smtp();
948    }
949
950    fn maybe_network_lost(&self) {
951        for b in self.boxes() {
952            b.conn_state.interrupt();
953        }
954        self.interrupt_smtp();
955    }
956
957    fn interrupt_inbox(&self) {
958        for b in &self.inboxes {
959            b.conn_state.interrupt();
960        }
961    }
962
963    fn interrupt_oboxes(&self) {
964        for b in &self.oboxes {
965            b.conn_state.interrupt();
966        }
967    }
968
969    fn interrupt_smtp(&self) {
970        self.smtp.interrupt();
971    }
972
973    fn interrupt_ephemeral_task(&self) {
974        self.ephemeral_interrupt_send.try_send(()).ok();
975    }
976
977    fn interrupt_location(&self) {
978        self.location_interrupt_send.try_send(()).ok();
979    }
980
981    fn interrupt_recently_seen(&self, contact_id: ContactId, timestamp: i64) {
982        self.recently_seen_loop.try_interrupt(contact_id, timestamp);
983    }
984
985    /// Halt the scheduler.
986    ///
987    /// It consumes the scheduler and never fails to stop it. In the worst case, long-running tasks
988    /// are forcefully terminated if they cannot shutdown within the timeout.
989    pub(crate) async fn stop(self, context: &Context) {
990        // Send stop signals to tasks so they can shutdown cleanly.
991        for b in self.boxes() {
992            b.conn_state.stop();
993        }
994        self.smtp.stop();
995
996        // Actually shutdown tasks.
997        let timeout_duration = std::time::Duration::from_secs(30);
998
999        let tracker = TaskTracker::new();
1000        for b in self.inboxes.into_iter().chain(self.oboxes.into_iter()) {
1001            let context = context.clone();
1002            tracker.spawn(async move {
1003                tokio::time::timeout(timeout_duration, b.handle)
1004                    .await
1005                    .log_err(&context)
1006            });
1007        }
1008        {
1009            let context = context.clone();
1010            tracker.spawn(async move {
1011                tokio::time::timeout(timeout_duration, self.smtp_handle)
1012                    .await
1013                    .log_err(&context)
1014            });
1015        }
1016        tracker.close();
1017        tracker.wait().await;
1018
1019        // Abort tasks, then await them to ensure the `Future` is dropped.
1020        // Just aborting the task may keep resources such as `Context` clone
1021        // moved into it indefinitely, resulting in database not being
1022        // closed etc.
1023        self.ephemeral_handle.abort();
1024        self.ephemeral_handle.await.ok();
1025        self.location_handle.abort();
1026        self.location_handle.await.ok();
1027        self.recently_seen_loop.abort().await;
1028    }
1029}
1030
1031/// Connection state logic shared between imap and smtp connections.
1032#[derive(Debug)]
1033struct ConnectionState {
1034    /// Cancellation token to interrupt the whole connection.
1035    stop_token: CancellationToken,
1036    /// Channel to interrupt idle.
1037    idle_interrupt_sender: Sender<()>,
1038    /// Mutex to pass connectivity info between IMAP/SMTP threads and the API
1039    connectivity: ConnectivityStore,
1040}
1041
1042impl ConnectionState {
1043    /// Shutdown this connection completely.
1044    fn stop(&self) {
1045        // Trigger shutdown of the run loop.
1046        self.stop_token.cancel();
1047    }
1048
1049    fn interrupt(&self) {
1050        // Use try_send to avoid blocking on interrupts.
1051        self.idle_interrupt_sender.try_send(()).ok();
1052    }
1053}
1054
1055#[derive(Debug)]
1056pub(crate) struct SmtpConnectionState {
1057    state: ConnectionState,
1058}
1059
1060impl SmtpConnectionState {
1061    fn new() -> (Self, SmtpConnectionHandlers) {
1062        let stop_token = CancellationToken::new();
1063        let (idle_interrupt_sender, idle_interrupt_receiver) = channel::bounded(1);
1064
1065        let handlers = SmtpConnectionHandlers {
1066            connection: Smtp::new(),
1067            stop_token: stop_token.clone(),
1068            idle_interrupt_receiver,
1069        };
1070
1071        let state = ConnectionState {
1072            stop_token,
1073            idle_interrupt_sender,
1074            connectivity: handlers.connection.connectivity.clone(),
1075        };
1076
1077        let conn = SmtpConnectionState { state };
1078
1079        (conn, handlers)
1080    }
1081
1082    /// Interrupt any form of idle.
1083    fn interrupt(&self) {
1084        self.state.interrupt();
1085    }
1086
1087    /// Shutdown this connection completely.
1088    fn stop(&self) {
1089        self.state.stop();
1090    }
1091}
1092
1093struct SmtpConnectionHandlers {
1094    connection: Smtp,
1095    stop_token: CancellationToken,
1096    idle_interrupt_receiver: Receiver<()>,
1097}
1098
1099#[derive(Debug)]
1100pub(crate) struct ImapConnectionState {
1101    state: ConnectionState,
1102}
1103
1104impl ImapConnectionState {
1105    /// Construct a new connection.
1106    async fn new(
1107        context: &Context,
1108        transport_id: u32,
1109        login_param: ConfiguredLoginParam,
1110    ) -> Result<(Self, ImapConnectionHandlers)> {
1111        let stop_token = CancellationToken::new();
1112        let (idle_interrupt_sender, idle_interrupt_receiver) = channel::bounded(1);
1113
1114        let handlers = ImapConnectionHandlers {
1115            connection: Imap::new(context, transport_id, login_param, idle_interrupt_receiver)
1116                .await?,
1117            stop_token: stop_token.clone(),
1118        };
1119
1120        let state = ConnectionState {
1121            stop_token,
1122            idle_interrupt_sender,
1123            connectivity: handlers.connection.connectivity.clone(),
1124        };
1125
1126        let conn = ImapConnectionState { state };
1127
1128        Ok((conn, handlers))
1129    }
1130
1131    /// Interrupt any form of idle.
1132    fn interrupt(&self) {
1133        self.state.interrupt();
1134    }
1135
1136    /// Shutdown this connection completely.
1137    fn stop(&self) {
1138        self.state.stop();
1139    }
1140}
1141
1142#[derive(Debug)]
1143struct ImapConnectionHandlers {
1144    connection: Imap,
1145    stop_token: CancellationToken,
1146}