1use std::cmp;
2use std::num::NonZeroUsize;
3
4use anyhow::{Context as _, Error, Result, bail};
5use async_channel::{self as channel, Receiver, Sender};
6use futures::future::try_join_all;
7use futures_lite::FutureExt;
8use tokio::sync::{RwLock, oneshot};
9use tokio::task;
10use tokio_util::sync::CancellationToken;
11use tokio_util::task::TaskTracker;
12
13pub(crate) use self::connectivity::ConnectivityStore;
14use crate::config::Config;
15use crate::contact::{ContactId, RecentlySeenLoop};
16use crate::context::Context;
17use crate::download::{download_known_post_messages_without_pre_message, download_msgs};
18use crate::ephemeral::{self, delete_expired_imap_messages};
19use crate::events::EventType;
20use crate::imap::{FolderMeaning, Imap, session::Session};
21use crate::location;
22use crate::log::{LogExt, warn};
23use crate::smtp::{Smtp, send_smtp_messages};
24use crate::sql;
25use crate::stats::maybe_send_stats;
26use crate::tools::{self, duration_to_str, maybe_add_time_based_warnings, time, time_elapsed};
27use crate::transport::ConfiguredLoginParam;
28use crate::{constants, stats};
29
30pub(crate) mod connectivity;
31
32#[derive(Debug, Default)]
38pub(crate) struct SchedulerState {
39 inner: RwLock<InnerSchedulerState>,
40}
41
42impl SchedulerState {
43 pub(crate) fn new() -> Self {
44 Default::default()
45 }
46
47 pub(crate) async fn is_running(&self) -> bool {
49 let inner = self.inner.read().await;
50 matches!(*inner, InnerSchedulerState::Started(_))
51 }
52
53 pub(crate) async fn start(&self, context: &Context) {
55 let mut inner = self.inner.write().await;
56 match *inner {
57 InnerSchedulerState::Started(_) => (),
58 InnerSchedulerState::Stopped => Self::do_start(&mut inner, context).await,
59 InnerSchedulerState::Paused {
60 ref mut started, ..
61 } => *started = true,
62 }
63 context.update_connectivities(&inner);
64 }
65
66 async fn do_start(inner: &mut InnerSchedulerState, context: &Context) {
68 info!(context, "starting IO");
69
70 context.new_msgs_notify.notify_one();
73
74 match Scheduler::start(context).await {
75 Ok(scheduler) => {
76 *inner = InnerSchedulerState::Started(scheduler);
77 context.emit_event(EventType::ConnectivityChanged);
78 }
79 Err(err) => error!(context, "Failed to start IO: {:#}", err),
80 }
81 }
82
83 pub(crate) async fn stop(&self, context: &Context) {
85 let mut inner = self.inner.write().await;
86 match *inner {
87 InnerSchedulerState::Started(_) => {
88 Self::do_stop(&mut inner, context, InnerSchedulerState::Stopped).await
89 }
90 InnerSchedulerState::Stopped => (),
91 InnerSchedulerState::Paused {
92 ref mut started, ..
93 } => *started = false,
94 }
95 context.update_connectivities(&inner);
96 }
97
98 async fn do_stop(
100 inner: &mut InnerSchedulerState,
101 context: &Context,
102 new_state: InnerSchedulerState,
103 ) {
104 info!(context, "stopping IO");
110
111 context.new_msgs_notify.notify_one();
114
115 let debug_logging = context
116 .debug_logging
117 .write()
118 .expect("RwLock is poisoned")
119 .take();
120 if let Some(debug_logging) = debug_logging {
121 debug_logging.loop_handle.abort();
122 debug_logging.loop_handle.await.ok();
123 }
124 let prev_state = std::mem::replace(inner, new_state);
125 context.emit_event(EventType::ConnectivityChanged);
126 match prev_state {
127 InnerSchedulerState::Started(scheduler) => scheduler.stop(context).await,
128 InnerSchedulerState::Stopped | InnerSchedulerState::Paused { .. } => (),
129 }
130 }
131
132 pub(crate) async fn pause(&'_ self, context: &Context) -> Result<IoPausedGuard> {
141 {
142 let mut inner = self.inner.write().await;
143 match *inner {
144 InnerSchedulerState::Started(_) => {
145 let new_state = InnerSchedulerState::Paused {
146 started: true,
147 pause_guards_count: NonZeroUsize::MIN,
148 };
149 Self::do_stop(&mut inner, context, new_state).await;
150 }
151 InnerSchedulerState::Stopped => {
152 *inner = InnerSchedulerState::Paused {
153 started: false,
154 pause_guards_count: NonZeroUsize::MIN,
155 };
156 }
157 InnerSchedulerState::Paused {
158 ref mut pause_guards_count,
159 ..
160 } => {
161 *pause_guards_count = pause_guards_count
162 .checked_add(1)
163 .ok_or_else(|| Error::msg("Too many pause guards active"))?
164 }
165 }
166 context.update_connectivities(&inner);
167 }
168
169 let (tx, rx) = oneshot::channel();
170 let context = context.clone();
171 tokio::spawn(async move {
172 rx.await.ok();
173 let mut inner = context.scheduler.inner.write().await;
174 match *inner {
175 InnerSchedulerState::Started(_) => {
176 warn!(&context, "IoPausedGuard resume: started instead of paused");
177 }
178 InnerSchedulerState::Stopped => {
179 warn!(&context, "IoPausedGuard resume: stopped instead of paused");
180 }
181 InnerSchedulerState::Paused {
182 ref started,
183 ref mut pause_guards_count,
184 } => {
185 if *pause_guards_count == NonZeroUsize::MIN {
186 match *started {
187 true => SchedulerState::do_start(&mut inner, &context).await,
188 false => *inner = InnerSchedulerState::Stopped,
189 }
190 } else {
191 let new_count = pause_guards_count.get() - 1;
192 *pause_guards_count = NonZeroUsize::new(new_count).unwrap();
194 }
195 }
196 }
197 context.update_connectivities(&inner);
198 });
199 Ok(IoPausedGuard { sender: Some(tx) })
200 }
201
202 pub(crate) async fn restart(&self, context: &Context) {
204 info!(context, "restarting IO");
205 if self.is_running().await {
206 self.stop(context).await;
207 self.start(context).await;
208 }
209 }
210
211 pub(crate) async fn maybe_network(&self) {
213 let inner = self.inner.read().await;
214 let (inboxes, oboxes) = match *inner {
215 InnerSchedulerState::Started(ref scheduler) => {
216 scheduler.maybe_network();
217 let inboxes = scheduler
218 .inboxes
219 .iter()
220 .map(|b| b.conn_state.state.connectivity.clone())
221 .collect::<Vec<_>>();
222 let oboxes = scheduler
223 .oboxes
224 .iter()
225 .map(|b| b.conn_state.state.connectivity.clone())
226 .collect::<Vec<_>>();
227 (inboxes, oboxes)
228 }
229 _ => return,
230 };
231 drop(inner);
232 connectivity::idle_interrupted(inboxes, oboxes);
233 }
234
235 pub(crate) async fn maybe_network_lost(&self, context: &Context) {
237 let inner = self.inner.read().await;
238 let stores = match *inner {
239 InnerSchedulerState::Started(ref scheduler) => {
240 scheduler.maybe_network_lost();
241 scheduler
242 .boxes()
243 .map(|b| b.conn_state.state.connectivity.clone())
244 .collect()
245 }
246 _ => return,
247 };
248 drop(inner);
249 connectivity::maybe_network_lost(context, stores);
250 }
251
252 pub(crate) async fn interrupt_inbox(&self) {
253 let inner = self.inner.read().await;
254 if let InnerSchedulerState::Started(ref scheduler) = *inner {
255 scheduler.interrupt_inbox();
256 }
257 }
258
259 pub(crate) async fn interrupt_smtp(&self) {
260 let inner = self.inner.read().await;
261 if let InnerSchedulerState::Started(ref scheduler) = *inner {
262 scheduler.interrupt_smtp();
263 }
264 }
265
266 pub(crate) async fn interrupt_ephemeral_task(&self) {
267 let inner = self.inner.read().await;
268 if let InnerSchedulerState::Started(ref scheduler) = *inner {
269 scheduler.interrupt_ephemeral_task();
270 }
271 }
272
273 pub(crate) async fn interrupt_location(&self) {
274 let inner = self.inner.read().await;
275 if let InnerSchedulerState::Started(ref scheduler) = *inner {
276 scheduler.interrupt_location();
277 }
278 }
279
280 pub(crate) async fn interrupt_recently_seen(&self, contact_id: ContactId, timestamp: i64) {
281 let inner = self.inner.read().await;
282 if let InnerSchedulerState::Started(ref scheduler) = *inner {
283 scheduler.interrupt_recently_seen(contact_id, timestamp);
284 }
285 }
286}
287
288#[derive(Debug, Default)]
289pub(crate) enum InnerSchedulerState {
290 Started(Scheduler),
291 #[default]
292 Stopped,
293 Paused {
294 started: bool,
295 pause_guards_count: NonZeroUsize,
296 },
297}
298
299#[derive(Default, Debug)]
304pub(crate) struct IoPausedGuard {
305 sender: Option<oneshot::Sender<()>>,
306}
307
308impl Drop for IoPausedGuard {
309 fn drop(&mut self) {
310 if let Some(sender) = self.sender.take() {
311 sender.send(()).ok();
313 }
314 }
315}
316
317#[derive(Debug)]
318struct SchedBox {
319 addr: String,
321 meaning: FolderMeaning,
322 conn_state: ImapConnectionState,
323
324 handle: task::JoinHandle<()>,
326}
327
328#[derive(Debug)]
330pub(crate) struct Scheduler {
331 inboxes: Vec<SchedBox>,
333 oboxes: Vec<SchedBox>,
335 smtp: SmtpConnectionState,
336 smtp_handle: task::JoinHandle<()>,
337 ephemeral_handle: task::JoinHandle<()>,
338 ephemeral_interrupt_send: Sender<()>,
339 location_handle: task::JoinHandle<()>,
340 location_interrupt_send: Sender<()>,
341
342 recently_seen_loop: RecentlySeenLoop,
343}
344
345async fn inbox_loop(
346 ctx: Context,
347 started: oneshot::Sender<()>,
348 inbox_handlers: ImapConnectionHandlers,
349) {
350 use futures::future::FutureExt;
351
352 info!(ctx, "Starting inbox loop.");
353 let ImapConnectionHandlers {
354 mut connection,
355 stop_token,
356 } = inbox_handlers;
357
358 let ctx1 = ctx.clone();
359 let fut = async move {
360 let ctx = ctx1;
361 if let Err(()) = started.send(()) {
362 warn!(ctx, "Inbox loop, missing started receiver.");
363 return;
364 };
365
366 let mut old_session: Option<Session> = None;
367 loop {
368 let session = if let Some(session) = old_session.take() {
369 session
370 } else {
371 info!(ctx, "Preparing new IMAP session for inbox.");
372 match connection.prepare(&ctx).await {
373 Err(err) => {
374 warn!(ctx, "Failed to prepare inbox connection: {err:#}.");
375 continue;
376 }
377 Ok(session) => session,
378 }
379 };
380
381 match inbox_fetch_idle(&ctx, &mut connection, session).await {
382 Err(err) => warn!(ctx, "Failed inbox fetch_idle: {err:#}."),
383 Ok(session) => {
384 info!(
385 ctx,
386 "IMAP loop iteration for inbox finished, keeping the session."
387 );
388 old_session = Some(session);
389 }
390 }
391 }
392 };
393
394 stop_token
395 .cancelled()
396 .map(|_| {
397 info!(ctx, "Shutting down inbox loop.");
398 })
399 .race(fut)
400 .await;
401}
402
403pub async fn convert_folder_meaning(
409 ctx: &Context,
410 folder_meaning: FolderMeaning,
411) -> Result<Option<(Config, String)>> {
412 let folder_config = match folder_meaning.to_config() {
413 Some(c) => c,
414 None => {
415 return Ok(None);
418 }
419 };
420
421 let folder = ctx
422 .get_config(folder_config)
423 .await
424 .with_context(|| format!("Failed to retrieve {folder_config} folder"))?;
425
426 if let Some(watch_folder) = folder {
427 Ok(Some((folder_config, watch_folder)))
428 } else {
429 Ok(None)
430 }
431}
432
433async fn inbox_fetch_idle(ctx: &Context, imap: &mut Imap, mut session: Session) -> Result<Session> {
434 if ctx.quota_needs_update(session.transport_id(), 60).await
436 && let Err(err) = ctx.update_recent_quota(&mut session).await
437 {
438 warn!(ctx, "Failed to update quota: {:#}.", err);
439 }
440
441 if let Ok(()) = imap.resync_request_receiver.try_recv()
442 && let Err(err) = session.resync_folders(ctx).await
443 {
444 warn!(ctx, "Failed to resync folders: {:#}.", err);
445 imap.resync_request_sender.try_send(()).ok();
446 }
447
448 maybe_add_time_based_warnings(ctx).await;
449
450 match ctx.get_config_i64(Config::LastHousekeeping).await {
451 Ok(last_housekeeping_time) => {
452 let next_housekeeping_time =
453 last_housekeeping_time.saturating_add(constants::HOUSEKEEPING_PERIOD);
454 if next_housekeeping_time <= time() {
455 sql::housekeeping(ctx).await.log_err(ctx).ok();
456 }
457 }
458 Err(err) => {
459 warn!(ctx, "Failed to get last housekeeping time: {}", err);
460 }
461 };
462
463 maybe_send_stats(ctx).await.log_err(ctx).ok();
464
465 session
466 .update_metadata(ctx)
467 .await
468 .context("update_metadata")?;
469 session
470 .register_token(ctx)
471 .await
472 .context("Failed to register push token")?;
473
474 let session = fetch_idle(ctx, imap, session, FolderMeaning::Inbox).await?;
475 Ok(session)
476}
477
478async fn fetch_idle(
484 ctx: &Context,
485 connection: &mut Imap,
486 mut session: Session,
487 folder_meaning: FolderMeaning,
488) -> Result<Session> {
489 let Some((folder_config, watch_folder)) = convert_folder_meaning(ctx, folder_meaning).await?
490 else {
491 connection.connectivity.set_not_configured(ctx);
495 connection.idle_interrupt_receiver.recv().await.ok();
496 bail!("Cannot fetch folder {folder_meaning} because it is not configured");
497 };
498
499 if folder_config == Config::ConfiguredInboxFolder {
500 session
501 .store_seen_flags_on_imap(ctx)
502 .await
503 .context("store_seen_flags_on_imap")?;
504 }
505
506 connection
508 .fetch_move_delete(ctx, &mut session, &watch_folder, folder_meaning)
509 .await
510 .context("fetch_move_delete")?;
511
512 delete_expired_imap_messages(ctx)
517 .await
518 .context("delete_expired_imap_messages")?;
519
520 download_known_post_messages_without_pre_message(ctx, &mut session).await?;
521 download_msgs(ctx, &mut session)
522 .await
523 .context("download_msgs")?;
524
525 session
527 .sync_seen_flags(ctx, &watch_folder)
528 .await
529 .context("sync_seen_flags")
530 .log_err(ctx)
531 .ok();
532
533 connection.connectivity.set_idle(ctx);
534
535 ctx.emit_event(EventType::ImapInboxIdle);
536
537 if !session.can_idle() {
538 info!(
539 ctx,
540 "IMAP session does not support IDLE, going to fake idle."
541 );
542 connection.fake_idle(ctx, watch_folder).await?;
543 return Ok(session);
544 }
545
546 if ctx
547 .get_config_bool(Config::DisableIdle)
548 .await
549 .context("Failed to get disable_idle config")
550 .log_err(ctx)
551 .unwrap_or_default()
552 {
553 info!(ctx, "IMAP IDLE is disabled, going to fake idle.");
554 connection.fake_idle(ctx, watch_folder).await?;
555 return Ok(session);
556 }
557
558 info!(
559 ctx,
560 "IMAP session in folder {watch_folder:?} supports IDLE, using it."
561 );
562 let session = session
563 .idle(
564 ctx,
565 connection.idle_interrupt_receiver.clone(),
566 &watch_folder,
567 )
568 .await
569 .context("idle")?;
570
571 Ok(session)
572}
573
574async fn simple_imap_loop(
576 ctx: Context,
577 started: oneshot::Sender<()>,
578 inbox_handlers: ImapConnectionHandlers,
579 folder_meaning: FolderMeaning,
580) {
581 use futures::future::FutureExt;
582
583 info!(ctx, "Starting simple loop for {folder_meaning}.");
584 let ImapConnectionHandlers {
585 mut connection,
586 stop_token,
587 } = inbox_handlers;
588
589 let ctx1 = ctx.clone();
590
591 let fut = async move {
592 let ctx = ctx1;
593 if let Err(()) = started.send(()) {
594 warn!(
595 ctx,
596 "Simple imap loop for {folder_meaning}, missing started receiver."
597 );
598 return;
599 }
600
601 let mut old_session: Option<Session> = None;
602 loop {
603 let session = if let Some(session) = old_session.take() {
604 session
605 } else {
606 info!(ctx, "Preparing new IMAP session for {folder_meaning}.");
607 match connection.prepare(&ctx).await {
608 Err(err) => {
609 warn!(
610 ctx,
611 "Failed to prepare {folder_meaning} connection: {err:#}."
612 );
613 continue;
614 }
615 Ok(session) => session,
616 }
617 };
618
619 match fetch_idle(&ctx, &mut connection, session, folder_meaning).await {
620 Err(err) => warn!(ctx, "Failed fetch_idle: {err:#}"),
621 Ok(session) => {
622 info!(
623 ctx,
624 "IMAP loop iteration for {folder_meaning} finished, keeping the session"
625 );
626 old_session = Some(session);
627 }
628 }
629 }
630 };
631
632 stop_token
633 .cancelled()
634 .map(|_| {
635 info!(ctx, "Shutting down IMAP loop for {folder_meaning}.");
636 })
637 .race(fut)
638 .await;
639}
640
641async fn smtp_loop(
642 ctx: Context,
643 started: oneshot::Sender<()>,
644 smtp_handlers: SmtpConnectionHandlers,
645) {
646 use futures::future::FutureExt;
647
648 info!(ctx, "Starting SMTP loop.");
649 let SmtpConnectionHandlers {
650 mut connection,
651 stop_token,
652 idle_interrupt_receiver,
653 } = smtp_handlers;
654
655 let ctx1 = ctx.clone();
656 let fut = async move {
657 let ctx = ctx1;
658 if let Err(()) = started.send(()) {
659 warn!(&ctx, "SMTP loop, missing started receiver.");
660 return;
661 }
662
663 let mut timeout = None;
664 loop {
665 if let Err(err) = send_smtp_messages(&ctx, &mut connection).await {
666 warn!(ctx, "send_smtp_messages failed: {:#}.", err);
667 timeout = Some(timeout.unwrap_or(30));
668 } else {
669 timeout = None;
670 let duration_until_can_send = ctx.ratelimit.read().await.until_can_send();
671 if !duration_until_can_send.is_zero() {
672 info!(
673 ctx,
674 "smtp got rate limited, waiting for {} until can send again",
675 duration_to_str(duration_until_can_send)
676 );
677 tokio::time::sleep(duration_until_can_send).await;
678 continue;
679 }
680 }
681
682 stats::maybe_update_message_stats(&ctx)
683 .await
684 .log_err(&ctx)
685 .ok();
686
687 info!(ctx, "SMTP fake idle started.");
689 match &connection.last_send_error {
690 None => connection.connectivity.set_idle(&ctx),
691 Some(err) => connection.connectivity.set_err(&ctx, err),
692 }
693
694 if let Some(t) = timeout {
699 let now = tools::Time::now();
700 info!(
701 ctx,
702 "SMTP has messages to retry, planning to retry {t} seconds later."
703 );
704 let duration = std::time::Duration::from_secs(t);
705 tokio::time::timeout(duration, async {
706 idle_interrupt_receiver.recv().await.unwrap_or_default()
707 })
708 .await
709 .unwrap_or_default();
710 let slept = time_elapsed(&now).as_secs();
711 timeout = Some(cmp::max(
712 t,
713 slept.saturating_add(rand::random_range((slept / 2)..=slept)),
714 ));
715 } else {
716 info!(ctx, "SMTP has no messages to retry, waiting for interrupt.");
717 idle_interrupt_receiver.recv().await.unwrap_or_default();
718 };
719
720 info!(ctx, "SMTP fake idle interrupted.")
721 }
722 };
723
724 stop_token
725 .cancelled()
726 .map(|_| {
727 info!(ctx, "Shutting down SMTP loop.");
728 })
729 .race(fut)
730 .await;
731}
732
733impl Scheduler {
734 pub async fn start(ctx: &Context) -> Result<Self> {
736 let (smtp, smtp_handlers) = SmtpConnectionState::new();
737
738 let (smtp_start_send, smtp_start_recv) = oneshot::channel();
739 let (ephemeral_interrupt_send, ephemeral_interrupt_recv) = channel::bounded(1);
740 let (location_interrupt_send, location_interrupt_recv) = channel::bounded(1);
741
742 let mut inboxes = Vec::new();
743 let mut oboxes = Vec::new();
744 let mut start_recvs = Vec::new();
745
746 for (transport_id, configured_login_param) in ConfiguredLoginParam::load_all(ctx).await? {
747 let (conn_state, inbox_handlers) =
748 ImapConnectionState::new(ctx, transport_id, configured_login_param.clone()).await?;
749 let (inbox_start_send, inbox_start_recv) = oneshot::channel();
750 let handle = {
751 let ctx = ctx.clone();
752 task::spawn(inbox_loop(ctx, inbox_start_send, inbox_handlers))
753 };
754 let addr = configured_login_param.addr.clone();
755 let inbox = SchedBox {
756 addr: addr.clone(),
757 meaning: FolderMeaning::Inbox,
758 conn_state,
759 handle,
760 };
761 inboxes.push(inbox);
762 start_recvs.push(inbox_start_recv);
763
764 if ctx.should_watch_mvbox().await? {
765 let (conn_state, handlers) =
766 ImapConnectionState::new(ctx, transport_id, configured_login_param).await?;
767 let (start_send, start_recv) = oneshot::channel();
768 let ctx = ctx.clone();
769 let meaning = FolderMeaning::Mvbox;
770 let handle = task::spawn(simple_imap_loop(ctx, start_send, handlers, meaning));
771 oboxes.push(SchedBox {
772 addr,
773 meaning,
774 conn_state,
775 handle,
776 });
777 start_recvs.push(start_recv);
778 }
779 }
780
781 let smtp_handle = {
782 let ctx = ctx.clone();
783 task::spawn(smtp_loop(ctx, smtp_start_send, smtp_handlers))
784 };
785 start_recvs.push(smtp_start_recv);
786
787 let ephemeral_handle = {
788 let ctx = ctx.clone();
789 task::spawn(async move {
790 ephemeral::ephemeral_loop(&ctx, ephemeral_interrupt_recv).await;
791 })
792 };
793
794 let location_handle = {
795 let ctx = ctx.clone();
796 task::spawn(async move {
797 location::location_loop(&ctx, location_interrupt_recv).await;
798 })
799 };
800
801 let recently_seen_loop = RecentlySeenLoop::new(ctx.clone());
802
803 let res = Self {
804 inboxes,
805 oboxes,
806 smtp,
807 smtp_handle,
808 ephemeral_handle,
809 ephemeral_interrupt_send,
810 location_handle,
811 location_interrupt_send,
812 recently_seen_loop,
813 };
814
815 if let Err(err) = try_join_all(start_recvs).await {
817 bail!("failed to start scheduler: {err}");
818 }
819
820 info!(ctx, "scheduler is running");
821 Ok(res)
822 }
823
824 fn boxes(&self) -> impl Iterator<Item = &SchedBox> {
825 self.inboxes.iter().chain(self.oboxes.iter())
826 }
827
828 fn maybe_network(&self) {
829 for b in self.boxes() {
830 b.conn_state.interrupt();
831 }
832 self.interrupt_smtp();
833 }
834
835 fn maybe_network_lost(&self) {
836 for b in self.boxes() {
837 b.conn_state.interrupt();
838 }
839 self.interrupt_smtp();
840 }
841
842 fn interrupt_inbox(&self) {
843 for b in &self.inboxes {
844 b.conn_state.interrupt();
845 }
846 }
847
848 fn interrupt_smtp(&self) {
849 self.smtp.interrupt();
850 }
851
852 fn interrupt_ephemeral_task(&self) {
853 self.ephemeral_interrupt_send.try_send(()).ok();
854 }
855
856 fn interrupt_location(&self) {
857 self.location_interrupt_send.try_send(()).ok();
858 }
859
860 fn interrupt_recently_seen(&self, contact_id: ContactId, timestamp: i64) {
861 self.recently_seen_loop.try_interrupt(contact_id, timestamp);
862 }
863
864 pub(crate) async fn stop(self, context: &Context) {
869 for b in self.boxes() {
871 b.conn_state.stop();
872 }
873 self.smtp.stop();
874
875 let timeout_duration = std::time::Duration::from_secs(30);
877
878 let tracker = TaskTracker::new();
879 for b in self.inboxes.into_iter().chain(self.oboxes.into_iter()) {
880 let context = context.clone();
881 tracker.spawn(async move {
882 tokio::time::timeout(timeout_duration, b.handle)
883 .await
884 .log_err(&context)
885 });
886 }
887 {
888 let context = context.clone();
889 tracker.spawn(async move {
890 tokio::time::timeout(timeout_duration, self.smtp_handle)
891 .await
892 .log_err(&context)
893 });
894 }
895 tracker.close();
896 tracker.wait().await;
897
898 self.ephemeral_handle.abort();
903 self.ephemeral_handle.await.ok();
904 self.location_handle.abort();
905 self.location_handle.await.ok();
906 self.recently_seen_loop.abort().await;
907 }
908}
909
910#[derive(Debug)]
912struct ConnectionState {
913 stop_token: CancellationToken,
915 idle_interrupt_sender: Sender<()>,
917 connectivity: ConnectivityStore,
919}
920
921impl ConnectionState {
922 fn stop(&self) {
924 self.stop_token.cancel();
926 }
927
928 fn interrupt(&self) {
929 self.idle_interrupt_sender.try_send(()).ok();
931 }
932}
933
934#[derive(Debug)]
935pub(crate) struct SmtpConnectionState {
936 state: ConnectionState,
937}
938
939impl SmtpConnectionState {
940 fn new() -> (Self, SmtpConnectionHandlers) {
941 let stop_token = CancellationToken::new();
942 let (idle_interrupt_sender, idle_interrupt_receiver) = channel::bounded(1);
943
944 let handlers = SmtpConnectionHandlers {
945 connection: Smtp::new(),
946 stop_token: stop_token.clone(),
947 idle_interrupt_receiver,
948 };
949
950 let state = ConnectionState {
951 stop_token,
952 idle_interrupt_sender,
953 connectivity: handlers.connection.connectivity.clone(),
954 };
955
956 let conn = SmtpConnectionState { state };
957
958 (conn, handlers)
959 }
960
961 fn interrupt(&self) {
963 self.state.interrupt();
964 }
965
966 fn stop(&self) {
968 self.state.stop();
969 }
970}
971
972struct SmtpConnectionHandlers {
973 connection: Smtp,
974 stop_token: CancellationToken,
975 idle_interrupt_receiver: Receiver<()>,
976}
977
978#[derive(Debug)]
979pub(crate) struct ImapConnectionState {
980 state: ConnectionState,
981}
982
983impl ImapConnectionState {
984 async fn new(
986 context: &Context,
987 transport_id: u32,
988 login_param: ConfiguredLoginParam,
989 ) -> Result<(Self, ImapConnectionHandlers)> {
990 let stop_token = CancellationToken::new();
991 let (idle_interrupt_sender, idle_interrupt_receiver) = channel::bounded(1);
992
993 let handlers = ImapConnectionHandlers {
994 connection: Imap::new(context, transport_id, login_param, idle_interrupt_receiver)
995 .await?,
996 stop_token: stop_token.clone(),
997 };
998
999 let state = ConnectionState {
1000 stop_token,
1001 idle_interrupt_sender,
1002 connectivity: handlers.connection.connectivity.clone(),
1003 };
1004
1005 let conn = ImapConnectionState { state };
1006
1007 Ok((conn, handlers))
1008 }
1009
1010 fn interrupt(&self) {
1012 self.state.interrupt();
1013 }
1014
1015 fn stop(&self) {
1017 self.state.stop();
1018 }
1019}
1020
1021#[derive(Debug)]
1022struct ImapConnectionHandlers {
1023 connection: Imap,
1024 stop_token: CancellationToken,
1025}