@bobfrankston/mailx 1.0.437 → 1.0.439
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/bin/mailx.js +5 -0
- package/client/app.js +43 -0
- package/client/components/message-viewer.js +50 -3
- package/client/compose/compose.html +1 -0
- package/client/compose/compose.js +49 -1
- package/client/index.html +6 -0
- package/client/lib/api-client.js +9 -0
- package/client/lib/mailxapi.js +9 -0
- package/client/styles/components.css +23 -0
- package/package.json +3 -3
- package/packages/mailx-imap/index.d.ts +79 -37
- package/packages/mailx-imap/index.js +356 -499
- package/packages/mailx-service/index.d.ts +64 -0
- package/packages/mailx-service/index.js +272 -4
- package/packages/mailx-service/jsonrpc.js +6 -0
- package/packages/mailx-settings/index.d.ts +2 -0
- package/packages/mailx-settings/index.js +10 -0
|
@@ -145,9 +145,11 @@ export class ImapManager extends EventEmitter {
|
|
|
145
145
|
inboxSyncing = false;
|
|
146
146
|
/** Use native IMAP client instead of imapflow. Set to true to enable. */
|
|
147
147
|
useNativeClient = false;
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
//
|
|
148
|
+
// Connection management: see withConnection() below.
|
|
149
|
+
// Cap-hit backoff machinery removed — bounded per-account concurrency
|
|
150
|
+
// (one ops socket + one IDLE socket) keeps mailx well under any
|
|
151
|
+
// reasonable server cap, so the recovery timer was dead weight that
|
|
152
|
+
// mostly served to lock the UI for minutes after a transient failure.
|
|
151
153
|
/** Per-account health counters. Incremented when the server misbehaves
|
|
152
154
|
* in ways that suggest a problem the user should know about (inactivity
|
|
153
155
|
* timeouts, connection-cap hits, rate-limit waits). Surfaced via a
|
|
@@ -258,68 +260,51 @@ export class ImapManager extends EventEmitter {
|
|
|
258
260
|
}
|
|
259
261
|
/** Delete a message directly on the IMAP server (for stuck outbox messages not in local DB) */
|
|
260
262
|
async deleteOnServer(accountId, folderPath, uid) {
|
|
261
|
-
|
|
262
|
-
try {
|
|
263
|
+
return this.withConnection(accountId, async (client) => {
|
|
263
264
|
await client.deleteMessageByUid(folderPath, uid);
|
|
264
265
|
console.log(` Deleted UID ${uid} from ${folderPath} on server`);
|
|
265
|
-
}
|
|
266
|
-
finally {
|
|
267
|
-
try {
|
|
268
|
-
await client.logout();
|
|
269
|
-
}
|
|
270
|
-
catch { /* ignore */ }
|
|
271
|
-
}
|
|
266
|
+
});
|
|
272
267
|
}
|
|
273
268
|
/** Search messages on the IMAP server — returns matching UIDs */
|
|
274
269
|
async searchOnServer(accountId, mailboxPath, criteria) {
|
|
275
|
-
|
|
276
|
-
try {
|
|
270
|
+
return this.withConnection(accountId, async (client) => {
|
|
277
271
|
return await client.searchMessages(mailboxPath, criteria);
|
|
278
|
-
}
|
|
279
|
-
finally {
|
|
280
|
-
try {
|
|
281
|
-
await client.logout();
|
|
282
|
-
}
|
|
283
|
-
catch { /* ignore */ }
|
|
284
|
-
}
|
|
272
|
+
});
|
|
285
273
|
}
|
|
286
274
|
/** Server-side search that also materializes any UIDs we don't yet have
|
|
287
275
|
* locally. Returns the full result after upsert, so the caller can
|
|
288
|
-
* render hits that fall outside the history window.
|
|
276
|
+
* render hits that fall outside the history window. The fetch loop
|
|
277
|
+
* can be long for big hit-sets, so this runs on the slow lane and
|
|
278
|
+
* yields between chunks (each chunk is a separate withConnection)
|
|
279
|
+
* so an interactive body fetch can interleave. */
|
|
289
280
|
async searchAndFetchOnServer(accountId, folderId, mailboxPath, criteria) {
|
|
290
|
-
const
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
281
|
+
const uids = await this.withConnection(accountId, async (client) => {
|
|
282
|
+
return await client.searchMessages(mailboxPath, criteria);
|
|
283
|
+
});
|
|
284
|
+
if (uids.length === 0)
|
|
285
|
+
return [];
|
|
286
|
+
const have = new Set(this.db.getUidsForFolder(accountId, folderId));
|
|
287
|
+
const missing = uids.filter(u => !have.has(u));
|
|
288
|
+
if (missing.length > 0) {
|
|
289
|
+
const folder = this.db.getFolders(accountId).find(f => f.id === folderId);
|
|
290
|
+
if (folder) {
|
|
291
|
+
const CHUNK = 500;
|
|
292
|
+
for (let i = 0; i < missing.length; i += CHUNK) {
|
|
293
|
+
const range = missing.slice(i, i + CHUNK).join(",");
|
|
294
|
+
await this.withConnection(accountId, async (client) => {
|
|
304
295
|
const fetched = await client.fetchMessages(mailboxPath, range, { source: false });
|
|
305
296
|
if (fetched?.length) {
|
|
306
297
|
await this.storeMessages(accountId, folderId, folder, fetched, 0);
|
|
307
298
|
}
|
|
308
|
-
}
|
|
309
|
-
this.db.recalcFolderCounts(folderId);
|
|
299
|
+
}, { slow: true });
|
|
310
300
|
}
|
|
301
|
+
this.db.recalcFolderCounts(folderId);
|
|
311
302
|
}
|
|
312
|
-
return uids;
|
|
313
|
-
}
|
|
314
|
-
finally {
|
|
315
|
-
try {
|
|
316
|
-
await client.logout();
|
|
317
|
-
}
|
|
318
|
-
catch { /* ignore */ }
|
|
319
303
|
}
|
|
304
|
+
return uids;
|
|
320
305
|
}
|
|
321
306
|
/** Create a fresh IMAP client for an account (public access for API endpoints) */
|
|
322
|
-
createPublicClient(accountId) {
|
|
307
|
+
async createPublicClient(accountId) {
|
|
323
308
|
return this.createClient(accountId);
|
|
324
309
|
}
|
|
325
310
|
// Legacy fallback disabled — was doubling connections without helping.
|
|
@@ -329,18 +314,23 @@ export class ImapManager extends EventEmitter {
|
|
|
329
314
|
// All operations on an account are serialized through an operation queue.
|
|
330
315
|
// No semaphore, no pool, no per-operation connect/disconnect.
|
|
331
316
|
// IDLE uses a separate connection (see startWatching).
|
|
332
|
-
/** Persistent operational connections — one per account, reused for all operations
|
|
317
|
+
/** Persistent operational connections — one per account, reused for all operations.
|
|
318
|
+
* Body fetch, sync, prefetch, outbox-append, flag/move all serialize through
|
|
319
|
+
* this single client per account via withConnection(). The priority lane
|
|
320
|
+
* in the queue lets interactive clicks jump ahead of background prefetch. */
|
|
333
321
|
opsClients = new Map();
|
|
334
|
-
/**
|
|
322
|
+
/** Two-lane operation queue per account — interactive ops (body fetch on
|
|
323
|
+
* click, flag toggle) drain before background ops (sync, prefetch). FIFO
|
|
324
|
+
* within each lane. The single ops connection means there's never a race
|
|
325
|
+
* on which folder is SELECTed; commands run strictly sequentially. */
|
|
335
326
|
opsQueues = new Map();
|
|
336
|
-
/**
|
|
337
|
-
*
|
|
338
|
-
*
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
|
|
343
|
-
bodyBackoff = new Map();
|
|
327
|
+
/** Per-host semaphore — caps simultaneous IMAP socket opens to one server.
|
|
328
|
+
* Defensive guardrail: with the single-ops-per-account model an individual
|
|
329
|
+
* user's mailx never hits more than (#accounts × 2) sockets per host, well
|
|
330
|
+
* under any reasonable server cap. Exists for the multi-account-on-one-host
|
|
331
|
+
* case (e.g. bobma + bobma2 both on imap.iecc.com). */
|
|
332
|
+
hostSemaphores = new Map();
|
|
333
|
+
static HOST_PERMITS = 4;
|
|
344
334
|
/** Get (or create) the persistent operational connection for an account.
|
|
345
335
|
* logout() is wrapped as a no-op so legacy callers don't close it. */
|
|
346
336
|
async getOpsClient(accountId) {
|
|
@@ -364,7 +354,7 @@ export class ImapManager extends EventEmitter {
|
|
|
364
354
|
console.log(` [conn] ${accountId}: stale ops client detected in getOpsClient — reconnecting`);
|
|
365
355
|
client = undefined;
|
|
366
356
|
}
|
|
367
|
-
client = this.newClient(accountId, "ops");
|
|
357
|
+
client = await this.newClient(accountId, "ops");
|
|
368
358
|
// Wrap logout as no-op — this is a persistent connection. The
|
|
369
359
|
// newClient wrapper's close-counter runs on `_realLogout`.
|
|
370
360
|
const realLogout = client.logout.bind(client);
|
|
@@ -374,63 +364,124 @@ export class ImapManager extends EventEmitter {
|
|
|
374
364
|
return client;
|
|
375
365
|
}
|
|
376
366
|
/** Run an operation on the account's connection — queued, sequential, no concurrency */
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
|
|
367
|
+
/** Run an operation against the account's single ops connection. Tasks
|
|
368
|
+
* queue strictly sequentially per account — only one IMAP command in
|
|
369
|
+
* flight at a time. This eliminates the SELECT-races and "stale client
|
|
370
|
+
* recovery" paths the old multi-client design needed.
|
|
371
|
+
*
|
|
372
|
+
* Default lane is `fast` — covers virtually everything (body fetch,
|
|
373
|
+
* flag toggle, move, incremental sync). Pass `slow: true` only for
|
|
374
|
+
* operations the caller knows will take a long time and shouldn't
|
|
375
|
+
* block the user (multi-folder prefetch batches, large backfills).
|
|
376
|
+
* When both lanes have tasks, fast drains first.
|
|
377
|
+
*
|
|
378
|
+
* Within a lane, FIFO. The running task always finishes — IMAP can't
|
|
379
|
+
* preempt a command mid-flight. */
|
|
380
|
+
async withConnection(accountId, fn, opts = {}) {
|
|
381
|
+
let queue = this.opsQueues.get(accountId);
|
|
382
|
+
if (!queue) {
|
|
383
|
+
queue = { fast: [], slow: [], running: false };
|
|
384
|
+
this.opsQueues.set(accountId, queue);
|
|
385
|
+
}
|
|
386
|
+
return new Promise((resolve, reject) => {
|
|
387
|
+
const task = async () => {
|
|
388
|
+
try {
|
|
389
|
+
const client = await this.getOpsClient(accountId);
|
|
390
|
+
resolve(await fn(client));
|
|
393
391
|
}
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
|
|
397
|
-
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
|
|
402
|
-
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
|
|
392
|
+
catch (e) {
|
|
393
|
+
// Discard client on any error — keeping a half-broken
|
|
394
|
+
// socket poisoned every subsequent request.
|
|
395
|
+
const stale = this.opsClients.get(accountId);
|
|
396
|
+
this.opsClients.delete(accountId);
|
|
397
|
+
if (stale) {
|
|
398
|
+
try {
|
|
399
|
+
await (stale._realLogout || stale.logout)();
|
|
400
|
+
}
|
|
401
|
+
catch { /* */ }
|
|
402
|
+
try {
|
|
403
|
+
stale.destroy?.();
|
|
404
|
+
}
|
|
405
|
+
catch { /* */ }
|
|
408
406
|
}
|
|
409
|
-
|
|
407
|
+
reject(e);
|
|
410
408
|
}
|
|
411
|
-
|
|
412
|
-
|
|
409
|
+
};
|
|
410
|
+
(opts.slow ? queue.slow : queue.fast).push(task);
|
|
411
|
+
this.drainOpsQueue(accountId);
|
|
412
|
+
});
|
|
413
|
+
}
|
|
414
|
+
/** Run the next queued task. Fast lane drains before slow.
|
|
415
|
+
* Idempotent — safe to call after each task completes; the running
|
|
416
|
+
* flag prevents reentrant draining. */
|
|
417
|
+
drainOpsQueue(accountId) {
|
|
418
|
+
const queue = this.opsQueues.get(accountId);
|
|
419
|
+
if (!queue || queue.running)
|
|
420
|
+
return;
|
|
421
|
+
const next = queue.fast.shift() || queue.slow.shift();
|
|
422
|
+
if (!next)
|
|
423
|
+
return;
|
|
424
|
+
queue.running = true;
|
|
425
|
+
next().finally(() => {
|
|
426
|
+
queue.running = false;
|
|
427
|
+
this.drainOpsQueue(accountId);
|
|
428
|
+
});
|
|
429
|
+
}
|
|
430
|
+
/** Acquire one slot of the per-host connection semaphore. Returns a release
|
|
431
|
+
* function — call exactly once when the socket is closed. Used by
|
|
432
|
+
* newClient to cap simultaneous IMAP connections to a single server
|
|
433
|
+
* across all mailx accounts. */
|
|
434
|
+
acquireHostSlot(host) {
|
|
435
|
+
let sem = this.hostSemaphores.get(host);
|
|
436
|
+
if (!sem) {
|
|
437
|
+
sem = { permits: ImapManager.HOST_PERMITS, waiters: [] };
|
|
438
|
+
this.hostSemaphores.set(host, sem);
|
|
439
|
+
}
|
|
440
|
+
const semRef = sem;
|
|
441
|
+
return new Promise(resolve => {
|
|
442
|
+
const grant = () => {
|
|
443
|
+
semRef.permits--;
|
|
444
|
+
let released = false;
|
|
445
|
+
resolve(() => {
|
|
446
|
+
if (released)
|
|
447
|
+
return;
|
|
448
|
+
released = true;
|
|
449
|
+
semRef.permits++;
|
|
450
|
+
const next = semRef.waiters.shift();
|
|
451
|
+
if (next)
|
|
452
|
+
next();
|
|
453
|
+
});
|
|
454
|
+
};
|
|
455
|
+
if (semRef.permits > 0)
|
|
456
|
+
grant();
|
|
457
|
+
else
|
|
458
|
+
semRef.waiters.push(grant);
|
|
413
459
|
});
|
|
414
|
-
this.opsQueues.set(accountId, next.catch(() => { }));
|
|
415
|
-
return next;
|
|
416
460
|
}
|
|
417
461
|
/** Open IMAP clients per account, used to trace who's opening sockets
|
|
418
462
|
* when we hit the Dovecot per-user+IP connection cap. */
|
|
419
463
|
openClients = new Map();
|
|
420
464
|
/** Create a new IMAP client (internal — callers use getOpsClient or withConnection).
|
|
421
|
-
*
|
|
422
|
-
*
|
|
423
|
-
|
|
465
|
+
* Acquires one slot of the per-host semaphore before constructing the
|
|
466
|
+
* client; the slot is released when logout() or destroy() runs.
|
|
467
|
+
* `purpose` is a short tag printed alongside the `[conn+]` log so we can
|
|
468
|
+
* tell which code path (ops/idle/etc.) opened each connection. */
|
|
469
|
+
async newClient(accountId, purpose = "?") {
|
|
424
470
|
if (this.reauthenticating.has(accountId))
|
|
425
471
|
throw new Error(`Account ${accountId} is re-authenticating`);
|
|
426
|
-
const backoffUntil = this.connectionBackoff.get(accountId);
|
|
427
|
-
if (backoffUntil && Date.now() < backoffUntil) {
|
|
428
|
-
throw new Error(`Account ${accountId} in connection backoff (${Math.round((backoffUntil - Date.now()) / 1000)}s remaining)`);
|
|
429
|
-
}
|
|
430
472
|
const config = this.configs.get(accountId);
|
|
431
473
|
if (!config)
|
|
432
474
|
throw new Error(`No config for account ${accountId}`);
|
|
433
|
-
const
|
|
475
|
+
const host = config.server || accountId;
|
|
476
|
+
const releaseHostSlot = await this.acquireHostSlot(host);
|
|
477
|
+
let client;
|
|
478
|
+
try {
|
|
479
|
+
client = new CompatImapClient(config, this.transportFactory);
|
|
480
|
+
}
|
|
481
|
+
catch (e) {
|
|
482
|
+
releaseHostSlot();
|
|
483
|
+
throw e;
|
|
484
|
+
}
|
|
434
485
|
let open = this.openClients.get(accountId);
|
|
435
486
|
if (!open) {
|
|
436
487
|
open = new Set();
|
|
@@ -444,6 +495,7 @@ export class ImapManager extends EventEmitter {
|
|
|
444
495
|
return;
|
|
445
496
|
closed = true;
|
|
446
497
|
open.delete(client);
|
|
498
|
+
releaseHostSlot();
|
|
447
499
|
console.log(` [conn-] ${accountId} (${purpose}/${how}) — ${open.size} open`);
|
|
448
500
|
};
|
|
449
501
|
const origLogout = client.logout?.bind(client);
|
|
@@ -470,38 +522,10 @@ export class ImapManager extends EventEmitter {
|
|
|
470
522
|
}
|
|
471
523
|
return client;
|
|
472
524
|
}
|
|
473
|
-
/**
|
|
474
|
-
*
|
|
475
|
-
|
|
476
|
-
|
|
477
|
-
if (client)
|
|
478
|
-
return client;
|
|
479
|
-
client = this.newClient(accountId, "body");
|
|
480
|
-
const realLogout = client.logout.bind(client);
|
|
481
|
-
client.logout = async () => { };
|
|
482
|
-
client._realLogout = realLogout;
|
|
483
|
-
this.bodyClients.set(accountId, client);
|
|
484
|
-
return client;
|
|
485
|
-
}
|
|
486
|
-
/** Drop the body-fetch connection (e.g. after a socket error). */
|
|
487
|
-
async dropBodyClient(accountId) {
|
|
488
|
-
const client = this.bodyClients.get(accountId);
|
|
489
|
-
if (!client)
|
|
490
|
-
return;
|
|
491
|
-
this.bodyClients.delete(accountId);
|
|
492
|
-
try {
|
|
493
|
-
await (client._realLogout || client.logout)();
|
|
494
|
-
}
|
|
495
|
-
catch { /* */ }
|
|
496
|
-
try {
|
|
497
|
-
client.destroy?.();
|
|
498
|
-
}
|
|
499
|
-
catch { /* */ }
|
|
500
|
-
}
|
|
501
|
-
/** Force-close every pooled client for an account — ops, body, any
|
|
502
|
-
* lingering ones in openClients. Used when the server reports its
|
|
503
|
-
* connection cap is hit so our slot count drops to zero on the
|
|
504
|
-
* server side before backoff expires. */
|
|
525
|
+
/** Force-close every IMAP socket for an account — ops + any lingering
|
|
526
|
+
* ones in openClients (e.g. an IDLE watcher in flight). Used during
|
|
527
|
+
* account removal and disconnectOps so the server's connection slots
|
|
528
|
+
* free immediately rather than waiting for socket idle timeouts. */
|
|
505
529
|
async closeAllClients(accountId) {
|
|
506
530
|
const ops = this.opsClients.get(accountId);
|
|
507
531
|
this.opsClients.delete(accountId);
|
|
@@ -515,18 +539,6 @@ export class ImapManager extends EventEmitter {
|
|
|
515
539
|
}
|
|
516
540
|
catch { /* */ }
|
|
517
541
|
}
|
|
518
|
-
const body = this.bodyClients.get(accountId);
|
|
519
|
-
this.bodyClients.delete(accountId);
|
|
520
|
-
if (body) {
|
|
521
|
-
try {
|
|
522
|
-
await (body._realLogout || body.logout)();
|
|
523
|
-
}
|
|
524
|
-
catch { /* */ }
|
|
525
|
-
try {
|
|
526
|
-
body.destroy?.();
|
|
527
|
-
}
|
|
528
|
-
catch { /* */ }
|
|
529
|
-
}
|
|
530
542
|
const open = this.openClients.get(accountId);
|
|
531
543
|
if (open) {
|
|
532
544
|
for (const c of Array.from(open)) {
|
|
@@ -561,14 +573,16 @@ export class ImapManager extends EventEmitter {
|
|
|
561
573
|
console.log(` [conn] ${accountId}: disconnected`);
|
|
562
574
|
}
|
|
563
575
|
}
|
|
564
|
-
/** Legacy
|
|
565
|
-
*
|
|
566
|
-
*
|
|
576
|
+
/** Legacy entry: returns the shared persistent ops client. Most callers
|
|
577
|
+
* should be using `withConnection()` instead — that gives proper
|
|
578
|
+
* queueing and lets fast operations jump ahead of slow ones. */
|
|
567
579
|
async createClientWithLimit(accountId) {
|
|
568
580
|
return this.getOpsClient(accountId);
|
|
569
581
|
}
|
|
570
|
-
|
|
571
|
-
|
|
582
|
+
/** Disposable fresh client — only used by the IDLE watcher, which holds
|
|
583
|
+
* its own socket so the fast/slow ops queue isn't blocked by IDLE
|
|
584
|
+
* parking the connection in a wait-for-server state. */
|
|
585
|
+
async createClient(accountId, purpose = "misc") {
|
|
572
586
|
return this.newClient(accountId, purpose);
|
|
573
587
|
}
|
|
574
588
|
trackLogout(_accountId) { }
|
|
@@ -699,7 +713,7 @@ export class ImapManager extends EventEmitter {
|
|
|
699
713
|
/** Sync folder list for an account */
|
|
700
714
|
async syncFolders(accountId, client) {
|
|
701
715
|
if (!client)
|
|
702
|
-
client = this.
|
|
716
|
+
client = await this.getOpsClient(accountId);
|
|
703
717
|
this.emit("syncProgress", accountId, "folders", 0);
|
|
704
718
|
const t0 = Date.now();
|
|
705
719
|
console.log(` [diag] ${accountId}: getFolderList starting...`);
|
|
@@ -831,7 +845,7 @@ export class ImapManager extends EventEmitter {
|
|
|
831
845
|
/** Sync messages for a specific folder */
|
|
832
846
|
async syncFolder(accountId, folderId, client) {
|
|
833
847
|
if (!client)
|
|
834
|
-
client = this.
|
|
848
|
+
client = await this.getOpsClient(accountId);
|
|
835
849
|
const prefetch = getPrefetch();
|
|
836
850
|
const folders = this.db.getFolders(accountId);
|
|
837
851
|
const folder = folders.find(f => f.id === folderId);
|
|
@@ -1503,20 +1517,13 @@ export class ImapManager extends EventEmitter {
|
|
|
1503
1517
|
}
|
|
1504
1518
|
console.log(` [conn] ${accountId}: reconnecting`);
|
|
1505
1519
|
}
|
|
1506
|
-
/** Handle sync errors — classify and emit appropriate UI events
|
|
1520
|
+
/** Handle sync errors — classify and emit appropriate UI events.
|
|
1521
|
+
* The connection-cap branch was removed: with the unified ops queue +
|
|
1522
|
+
* per-host semaphore, mailx alone can't exceed the server cap. If the
|
|
1523
|
+
* cap *is* hit, that means another client (Thunderbird, phone, sibling
|
|
1524
|
+
* process) is holding slots — punishing mailx with a multi-minute
|
|
1525
|
+
* blackout doesn't help the user, the next sync tick will retry. */
|
|
1507
1526
|
handleSyncError(accountId, errMsg) {
|
|
1508
|
-
if (errMsg.includes("max_userip_connections") || errMsg.includes("Too many simultaneous")) {
|
|
1509
|
-
// Dovecot connection cap hit. 60s was too short — the server
|
|
1510
|
-
// tracks slots with a decay window, and mailx was racing right
|
|
1511
|
-
// back into the cap every time. Extend to 5 min AND close all
|
|
1512
|
-
// pooled clients so the server's count drops to zero. Also
|
|
1513
|
-
// mark all of this account's folder-cooldowns so prefetch
|
|
1514
|
-
// doesn't try to reopen during the backoff.
|
|
1515
|
-
const BACKOFF_MS = 5 * 60_000;
|
|
1516
|
-
this.connectionBackoff.set(accountId, Date.now() + BACKOFF_MS);
|
|
1517
|
-
this.closeAllClients(accountId).catch(() => { });
|
|
1518
|
-
console.warn(` [conn] ${accountId}: server connection cap hit — closing all clients + ${BACKOFF_MS / 1000}s backoff`);
|
|
1519
|
-
}
|
|
1520
1527
|
const config = this.configs.get(accountId);
|
|
1521
1528
|
const isOAuth = !!config?.tokenProvider;
|
|
1522
1529
|
const isTransient = /timeout|ECONNREFUSED|ECONNRESET|ETIMEDOUT|ENETUNREACH|Too many/i.test(errMsg);
|
|
@@ -1669,7 +1676,7 @@ export class ImapManager extends EventEmitter {
|
|
|
1669
1676
|
let client = null;
|
|
1670
1677
|
try {
|
|
1671
1678
|
await this.quickCheck(accountId, async () => {
|
|
1672
|
-
client = this.newClient(accountId);
|
|
1679
|
+
client = await this.newClient(accountId, "quickCheck");
|
|
1673
1680
|
return await client.getMessagesCount("INBOX");
|
|
1674
1681
|
}, async (count, prev) => {
|
|
1675
1682
|
if (prev !== undefined)
|
|
@@ -1818,9 +1825,11 @@ export class ImapManager extends EventEmitter {
|
|
|
1818
1825
|
if (this.watchers.has(accountId))
|
|
1819
1826
|
continue;
|
|
1820
1827
|
try {
|
|
1821
|
-
// IDLE
|
|
1822
|
-
//
|
|
1823
|
-
|
|
1828
|
+
// IDLE keeps its own dedicated socket — once the connection
|
|
1829
|
+
// is parked in IDLE, it's unusable for any other command, so
|
|
1830
|
+
// it can't share the ops queue. Counts against the per-host
|
|
1831
|
+
// semaphore (one slot for the IDLE socket).
|
|
1832
|
+
const watchClient = await this.createClient(accountId, "idle");
|
|
1824
1833
|
const stop = await watchClient.watchMailbox("INBOX", (newCount) => {
|
|
1825
1834
|
console.log(` [idle] ${accountId}: ${newCount} new message(s)`);
|
|
1826
1835
|
// Fetch only the new UIDs — the heavyweight gap/reconcile
|
|
@@ -1849,10 +1858,6 @@ export class ImapManager extends EventEmitter {
|
|
|
1849
1858
|
}
|
|
1850
1859
|
this.watchers.clear();
|
|
1851
1860
|
}
|
|
1852
|
-
/** Per-account fetch queue — serializes body fetches so only one IMAP command runs at a time.
|
|
1853
|
-
* The persistent fetchClient can only handle one command at a time (IMAP protocol limitation). */
|
|
1854
|
-
fetchQueues = new Map();
|
|
1855
|
-
/** Serialize body fetch operations per account — prevents concurrent IMAP commands on same connection */
|
|
1856
1861
|
/** Unlink the on-disk body file for a message by reading its `body_path`
|
|
1857
1862
|
* from the DB. Safe to call either before or after `db.deleteMessage`
|
|
1858
1863
|
* — read body_path first, store it, then unlink whenever. */
|
|
@@ -1865,23 +1870,22 @@ export class ImapManager extends EventEmitter {
|
|
|
1865
1870
|
}
|
|
1866
1871
|
catch { /* row already gone / file already gone — both fine */ }
|
|
1867
1872
|
}
|
|
1868
|
-
enqueueFetch(accountId, fn) {
|
|
1869
|
-
const prev = this.fetchQueues.get(accountId) || Promise.resolve();
|
|
1870
|
-
const next = prev.then(fn, fn); // run fn after previous completes (regardless of success/failure)
|
|
1871
|
-
this.fetchQueues.set(accountId, next);
|
|
1872
|
-
return next;
|
|
1873
|
-
}
|
|
1874
|
-
// Body fetch uses withConnection — no separate client needed
|
|
1875
1873
|
/** Fetch a single message body on demand, caching in the store.
|
|
1876
|
-
*
|
|
1874
|
+
*
|
|
1875
|
+
* Cache lookup is folder-agnostic: when a UID exists in multiple folders
|
|
1876
|
+
* (Gmail labels, copy-instead-of-move) the prefetcher may have populated
|
|
1877
|
+
* body_path on only one row. Looking up by (account, uid) without the
|
|
1878
|
+
* folder filter finds the cached `.eml` regardless of which folder
|
|
1879
|
+
* context the UI passed.
|
|
1880
|
+
*
|
|
1881
|
+
* Server fetch goes through the unified ops queue on the fast lane —
|
|
1882
|
+
* the user clicked, they're waiting, this jumps ahead of any background
|
|
1883
|
+
* prefetch sitting in the slow lane. */
|
|
1877
1884
|
async fetchMessageBody(accountId, folderId, uid) {
|
|
1878
|
-
// Already cached? Read the DB row's `body_path` and check the file
|
|
1879
|
-
// exists there. No more `(folderId, uid)` path reconstruction — that
|
|
1880
|
-
// was the source of the S49 comingling bug (UID reuse + folder move
|
|
1881
|
-
// pointing two messages at one file). `body_path` is the sole
|
|
1882
|
-
// authority on where a given message's body lives on disk.
|
|
1883
1885
|
const envelope = this.db.getMessageByUid(accountId, uid, folderId);
|
|
1884
|
-
|
|
1886
|
+
let storedPath = envelope?.bodyPath || "";
|
|
1887
|
+
if (!storedPath)
|
|
1888
|
+
storedPath = this.db.getMessageBodyPath(accountId, uid) || "";
|
|
1885
1889
|
if (storedPath && await this.bodyStore.hasByPath(storedPath)) {
|
|
1886
1890
|
return this.bodyStore.readByPath(storedPath);
|
|
1887
1891
|
}
|
|
@@ -1890,102 +1894,35 @@ export class ImapManager extends EventEmitter {
|
|
|
1890
1894
|
const folder = this.db.getFolders(accountId).find(f => f.id === folderId);
|
|
1891
1895
|
if (!folder)
|
|
1892
1896
|
return null;
|
|
1893
|
-
// Gmail:
|
|
1897
|
+
// Gmail: REST API, no IMAP connection involved.
|
|
1894
1898
|
if (this.isGmailAccount(accountId)) {
|
|
1895
1899
|
return this.fetchMessageBodyViaApi(accountId, folderId, uid, folder.path);
|
|
1896
1900
|
}
|
|
1897
|
-
// IMAP:
|
|
1898
|
-
//
|
|
1899
|
-
//
|
|
1900
|
-
|
|
1901
|
-
|
|
1902
|
-
// Cap-error backoff: when Dovecot has rejected us recently with
|
|
1903
|
-
// "Maximum number of connections", short-circuit instead of
|
|
1904
|
-
// queueing every body request behind a doomed login attempt.
|
|
1905
|
-
const backoffUntil = this.bodyBackoff.get(accountId) || 0;
|
|
1906
|
-
if (backoffUntil > Date.now()) {
|
|
1907
|
-
const wait = Math.round((backoffUntil - Date.now()) / 1000);
|
|
1908
|
-
console.warn(` Body fetch (${accountId}/${uid}) skipped — server connection cap, retry in ${wait}s`);
|
|
1909
|
-
return null;
|
|
1910
|
-
}
|
|
1911
|
-
const attempt = async () => {
|
|
1912
|
-
const client = await this.getBodyClient(accountId);
|
|
1901
|
+
// IMAP: fast lane on the ops queue. One try; if the socket is stale,
|
|
1902
|
+
// withConnection's discard-on-error logic drops the client so the
|
|
1903
|
+
// next attempt (caller-driven retry) gets a fresh one.
|
|
1904
|
+
try {
|
|
1905
|
+
const raw = await this.withConnection(accountId, async (client) => {
|
|
1913
1906
|
const msg = await client.fetchMessageByUid(folder.path, uid, { source: true });
|
|
1914
1907
|
if (!msg)
|
|
1915
1908
|
throw makeNotFoundError(accountId, folderId, uid);
|
|
1916
1909
|
if (!msg.source)
|
|
1917
1910
|
return null;
|
|
1918
1911
|
return Buffer.from(msg.source, "utf-8");
|
|
1919
|
-
};
|
|
1920
|
-
const classify = (msg) => ({
|
|
1921
|
-
staleSocket: /Not connected|ECONNRESET|socket hang up|EPIPE|write after end|ended|closed/i.test(msg),
|
|
1922
|
-
connCap: /UNAVAILABLE|Maximum number of connections|too many connections/i.test(msg),
|
|
1923
1912
|
});
|
|
1924
|
-
let raw;
|
|
1925
|
-
try {
|
|
1926
|
-
raw = await attempt();
|
|
1927
|
-
}
|
|
1928
|
-
catch (e) {
|
|
1929
|
-
if (e?.isNotFound)
|
|
1930
|
-
throw e;
|
|
1931
|
-
const msg = e?.message || "";
|
|
1932
|
-
const { staleSocket, connCap } = classify(msg);
|
|
1933
|
-
// Always drop the cached client on any failure — keeping a
|
|
1934
|
-
// half-broken client poisoned every subsequent request.
|
|
1935
|
-
await this.dropBodyClient(accountId);
|
|
1936
|
-
if (connCap) {
|
|
1937
|
-
// The dedicated body socket is locked out by the server's
|
|
1938
|
-
// per-user+IP cap (something else holds the slots). Fall
|
|
1939
|
-
// back to the already-open ops connection — slower (queues
|
|
1940
|
-
// behind sync commands), but actually works.
|
|
1941
|
-
this.bodyBackoff.set(accountId, Date.now() + 30_000);
|
|
1942
|
-
console.warn(` Body fetch (${accountId}/${uid}): connection cap — falling back to ops connection`);
|
|
1943
|
-
try {
|
|
1944
|
-
const fallbackRaw = await this.withConnection(accountId, async (client) => {
|
|
1945
|
-
const msg = await client.fetchMessageByUid(folder.path, uid, { source: true });
|
|
1946
|
-
if (!msg)
|
|
1947
|
-
throw makeNotFoundError(accountId, folderId, uid);
|
|
1948
|
-
if (!msg.source)
|
|
1949
|
-
return null;
|
|
1950
|
-
return Buffer.from(msg.source, "utf-8");
|
|
1951
|
-
});
|
|
1952
|
-
if (!fallbackRaw)
|
|
1953
|
-
return null;
|
|
1954
|
-
const bodyPath = await this.bodyStore.putMessage(accountId, folderId, uid, fallbackRaw);
|
|
1955
|
-
this.db.updateBodyPath(accountId, uid, bodyPath);
|
|
1956
|
-
this.emit("bodyCached", accountId, uid);
|
|
1957
|
-
return fallbackRaw;
|
|
1958
|
-
}
|
|
1959
|
-
catch (e3) {
|
|
1960
|
-
if (e3?.isNotFound)
|
|
1961
|
-
throw e3;
|
|
1962
|
-
console.error(` Body fetch fallback failed (${accountId}/${uid}): ${e3?.message}`);
|
|
1963
|
-
return null;
|
|
1964
|
-
}
|
|
1965
|
-
}
|
|
1966
|
-
if (!staleSocket) {
|
|
1967
|
-
console.error(` Body fetch error (${accountId}/${uid}): ${msg}`);
|
|
1968
|
-
return null;
|
|
1969
|
-
}
|
|
1970
|
-
// Stale socket — try once more with a fresh client.
|
|
1971
|
-
try {
|
|
1972
|
-
raw = await attempt();
|
|
1973
|
-
}
|
|
1974
|
-
catch (e2) {
|
|
1975
|
-
if (e2?.isNotFound)
|
|
1976
|
-
throw e2;
|
|
1977
|
-
await this.dropBodyClient(accountId);
|
|
1978
|
-
console.error(` Body fetch error (${accountId}/${uid}) after reconnect: ${e2?.message}`);
|
|
1979
|
-
return null;
|
|
1980
|
-
}
|
|
1981
|
-
}
|
|
1982
1913
|
if (!raw)
|
|
1983
1914
|
return null;
|
|
1984
1915
|
const bodyPath = await this.bodyStore.putMessage(accountId, folderId, uid, raw);
|
|
1985
1916
|
this.db.updateBodyPath(accountId, uid, bodyPath);
|
|
1986
1917
|
this.emit("bodyCached", accountId, uid);
|
|
1987
1918
|
return raw;
|
|
1988
|
-
}
|
|
1919
|
+
}
|
|
1920
|
+
catch (e) {
|
|
1921
|
+
if (e?.isNotFound)
|
|
1922
|
+
throw e;
|
|
1923
|
+
console.error(` Body fetch error (${accountId}/${uid}): ${e?.message || e}`);
|
|
1924
|
+
return null;
|
|
1925
|
+
}
|
|
1989
1926
|
}
|
|
1990
1927
|
/** Fetch message body via Gmail/Outlook API.
|
|
1991
1928
|
* Throws `MessageNotFoundError` when the server says the message is gone
|
|
@@ -2191,7 +2128,11 @@ export class ImapManager extends EventEmitter {
|
|
|
2191
2128
|
}
|
|
2192
2129
|
}
|
|
2193
2130
|
else {
|
|
2194
|
-
// IMAP batch path:
|
|
2131
|
+
// IMAP batch path: one UID FETCH per folder, each on its own
|
|
2132
|
+
// turn through the slow lane. Yielding between folders is
|
|
2133
|
+
// crucial — a click-to-view body should jump ahead of the
|
|
2134
|
+
// next folder's batch via the fast lane, not wait for all
|
|
2135
|
+
// folders to finish.
|
|
2195
2136
|
const byFolder = new Map();
|
|
2196
2137
|
for (const m of missing) {
|
|
2197
2138
|
let arr = byFolder.get(m.folderId);
|
|
@@ -2202,37 +2143,30 @@ export class ImapManager extends EventEmitter {
|
|
|
2202
2143
|
arr.push(m.uid);
|
|
2203
2144
|
}
|
|
2204
2145
|
const folders = this.db.getFolders(accountId);
|
|
2205
|
-
|
|
2206
|
-
|
|
2207
|
-
|
|
2208
|
-
|
|
2209
|
-
|
|
2210
|
-
const
|
|
2211
|
-
|
|
2212
|
-
|
|
2213
|
-
|
|
2214
|
-
|
|
2215
|
-
|
|
2216
|
-
|
|
2217
|
-
|
|
2218
|
-
|
|
2219
|
-
|
|
2220
|
-
|
|
2221
|
-
|
|
2222
|
-
|
|
2223
|
-
|
|
2224
|
-
|
|
2225
|
-
|
|
2226
|
-
|
|
2227
|
-
|
|
2228
|
-
|
|
2229
|
-
// onBody fires synchronously as each message streams in from the server.
|
|
2230
|
-
// Disk/DB writes are kicked off fire-and-forget; we await them after the
|
|
2231
|
-
// batch command finishes. This keeps streaming throughput high while
|
|
2232
|
-
// still giving us a single await point for progress accounting.
|
|
2233
|
-
const pending = [];
|
|
2234
|
-
let batchSucceeded = false;
|
|
2235
|
-
try {
|
|
2146
|
+
// INBOX-first ordering so the folder the user actually looks at
|
|
2147
|
+
// gets its bodies even if a later folder eats the error budget.
|
|
2148
|
+
const orderedFolders = Array.from(byFolder.entries()).sort(([aid], [bid]) => {
|
|
2149
|
+
const af = folders.find(f => f.id === aid);
|
|
2150
|
+
const bf = folders.find(f => f.id === bid);
|
|
2151
|
+
const ai = af?.specialUse === "inbox" ? 0 : 1;
|
|
2152
|
+
const bi = bf?.specialUse === "inbox" ? 0 : 1;
|
|
2153
|
+
return ai - bi;
|
|
2154
|
+
});
|
|
2155
|
+
for (const [folderId, uids] of orderedFolders) {
|
|
2156
|
+
const folder = folders.find(f => f.id === folderId);
|
|
2157
|
+
if (!folder)
|
|
2158
|
+
continue;
|
|
2159
|
+
if (this.shouldSkipFolder(accountId, folder.path)) {
|
|
2160
|
+
console.log(` [prefetch] ${accountId}: skipping ${folder.path} (recent timeouts — cooling down)`);
|
|
2161
|
+
continue;
|
|
2162
|
+
}
|
|
2163
|
+
const received = new Set();
|
|
2164
|
+
let batchSucceeded = false;
|
|
2165
|
+
try {
|
|
2166
|
+
// Slow lane: prefetch is the textbook "this might take
|
|
2167
|
+
// a while" case — let interactive ops slip ahead.
|
|
2168
|
+
await this.withConnection(accountId, async (client) => {
|
|
2169
|
+
const pending = [];
|
|
2236
2170
|
await client.fetchBodiesBatch(folder.path, uids, (uid, source) => {
|
|
2237
2171
|
received.add(uid);
|
|
2238
2172
|
pending.push((async () => {
|
|
@@ -2245,57 +2179,38 @@ export class ImapManager extends EventEmitter {
|
|
|
2245
2179
|
madeProgress = true;
|
|
2246
2180
|
}
|
|
2247
2181
|
catch (e) {
|
|
2248
|
-
// EBUSY / disk error — non-fatal per message
|
|
2249
2182
|
console.error(` [prefetch] ${accountId}/${uid}: store write failed: ${e.message}`);
|
|
2250
2183
|
}
|
|
2251
2184
|
})());
|
|
2252
2185
|
});
|
|
2253
|
-
|
|
2254
|
-
|
|
2255
|
-
|
|
2256
|
-
|
|
2257
|
-
catch (e) {
|
|
2258
|
-
const msg = String(e?.message || "");
|
|
2259
|
-
console.error(` [prefetch] ${accountId} folder ${folder.path}: batch fetch failed: ${msg}`);
|
|
2260
|
-
counters.errors++;
|
|
2261
|
-
this.recordFolderError(accountId, folder.path);
|
|
2262
|
-
// Server connection cap hit during prefetch — this is why
|
|
2263
|
-
// bobma log shows "100+ bodies to fetch" with no follow-up
|
|
2264
|
-
// "done": subsequent folders also hit the cap, burn the
|
|
2265
|
-
// budget, and nothing progresses. Route through the
|
|
2266
|
-
// sync-error handler so backoff + closeAllClients kick in.
|
|
2267
|
-
if (/max_userip_connections|Too many simultaneous/i.test(msg)) {
|
|
2268
|
-
this.handleSyncError(accountId, msg);
|
|
2269
|
-
break;
|
|
2270
|
-
}
|
|
2271
|
-
if (counters.errors >= ERROR_BUDGET)
|
|
2272
|
-
break;
|
|
2273
|
-
}
|
|
2274
|
-
await Promise.all(pending);
|
|
2275
|
-
// CRITICAL: only prune when the batch actually completed.
|
|
2276
|
-
// A thrown batch means NOTHING was received and we must
|
|
2277
|
-
// not treat absence-from-received as server-deletion.
|
|
2278
|
-
if (batchSucceeded)
|
|
2279
|
-
for (const uid of uids) {
|
|
2280
|
-
if (received.has(uid))
|
|
2281
|
-
continue;
|
|
2282
|
-
try {
|
|
2283
|
-
this.unlinkBodyFile(accountId, uid, folderId).catch(() => { });
|
|
2284
|
-
this.db.deleteMessage(accountId, uid);
|
|
2285
|
-
counters.deleted++;
|
|
2286
|
-
madeProgress = true;
|
|
2287
|
-
}
|
|
2288
|
-
catch { /* ignore */ }
|
|
2289
|
-
}
|
|
2186
|
+
await Promise.all(pending);
|
|
2187
|
+
}, { slow: true });
|
|
2188
|
+
batchSucceeded = true;
|
|
2189
|
+
this.clearFolderErrors(accountId, folder.path);
|
|
2290
2190
|
}
|
|
2291
|
-
|
|
2292
|
-
|
|
2293
|
-
|
|
2294
|
-
|
|
2295
|
-
|
|
2296
|
-
|
|
2297
|
-
|
|
2191
|
+
catch (e) {
|
|
2192
|
+
const msg = String(e?.message || "");
|
|
2193
|
+
console.error(` [prefetch] ${accountId} folder ${folder.path}: batch fetch failed: ${msg}`);
|
|
2194
|
+
counters.errors++;
|
|
2195
|
+
this.recordFolderError(accountId, folder.path);
|
|
2196
|
+
if (counters.errors >= ERROR_BUDGET)
|
|
2197
|
+
break;
|
|
2298
2198
|
}
|
|
2199
|
+
// CRITICAL: only prune when the batch actually completed.
|
|
2200
|
+
// A thrown batch means NOTHING was received; treating
|
|
2201
|
+
// absence as server-deletion lost 296 messages once.
|
|
2202
|
+
if (batchSucceeded)
|
|
2203
|
+
for (const uid of uids) {
|
|
2204
|
+
if (received.has(uid))
|
|
2205
|
+
continue;
|
|
2206
|
+
try {
|
|
2207
|
+
this.unlinkBodyFile(accountId, uid, folderId).catch(() => { });
|
|
2208
|
+
this.db.deleteMessage(accountId, uid);
|
|
2209
|
+
counters.deleted++;
|
|
2210
|
+
madeProgress = true;
|
|
2211
|
+
}
|
|
2212
|
+
catch { /* ignore */ }
|
|
2213
|
+
}
|
|
2299
2214
|
}
|
|
2300
2215
|
if (counters.errors >= ERROR_BUDGET) {
|
|
2301
2216
|
console.error(` [prefetch] ${accountId}: stopping after ${counters.errors} errors (${counters.totalFetched} cached, ${counters.deleted} pruned)`);
|
|
@@ -2411,27 +2326,20 @@ export class ImapManager extends EventEmitter {
|
|
|
2411
2326
|
const toFolder = toFolders.find(f => f.id === toFolderId);
|
|
2412
2327
|
if (!toFolder)
|
|
2413
2328
|
throw new Error(`Target folder ${toFolderId} not found`);
|
|
2414
|
-
|
|
2415
|
-
|
|
2416
|
-
|
|
2417
|
-
|
|
2418
|
-
|
|
2419
|
-
|
|
2420
|
-
|
|
2421
|
-
|
|
2422
|
-
|
|
2423
|
-
|
|
2424
|
-
|
|
2425
|
-
|
|
2426
|
-
|
|
2427
|
-
|
|
2428
|
-
}
|
|
2429
|
-
catch { /* ignore */ }
|
|
2430
|
-
try {
|
|
2431
|
-
await targetClient.logout();
|
|
2432
|
-
}
|
|
2433
|
-
catch { /* ignore */ }
|
|
2434
|
-
}
|
|
2329
|
+
// Two accounts, two ops connections. Cross-account move is rare
|
|
2330
|
+
// and requires both sockets to be live concurrently (we APPEND to
|
|
2331
|
+
// target while still authenticated to source), so this can't fold
|
|
2332
|
+
// into a single withConnection call.
|
|
2333
|
+
await this.withConnection(fromAccountId, async (sourceClient) => {
|
|
2334
|
+
await this.withConnection(toAccountId, async (targetClient) => {
|
|
2335
|
+
const msg = await sourceClient.fetchMessageByUid(fromFolder.path, uid, { source: true });
|
|
2336
|
+
if (!msg)
|
|
2337
|
+
throw new Error(`Message UID ${uid} not found in ${fromFolder.path}`);
|
|
2338
|
+
await sourceClient.moveMessageToServer(msg, fromFolder.path, targetClient, toFolder.path);
|
|
2339
|
+
this.db.deleteMessage(fromAccountId, uid);
|
|
2340
|
+
console.log(` Cross-account move: ${fromAccountId}/${fromFolder.path} UID ${uid} → ${toAccountId}/${toFolder.path}`);
|
|
2341
|
+
});
|
|
2342
|
+
});
|
|
2435
2343
|
}
|
|
2436
2344
|
/** Undelete — move from Trash back to original folder */
|
|
2437
2345
|
async undeleteMessage(accountId, uid, originalFolderId) {
|
|
@@ -2676,17 +2584,10 @@ export class ImapManager extends EventEmitter {
|
|
|
2676
2584
|
console.error(` [sent] No Sent folder found for ${accountId}`);
|
|
2677
2585
|
return;
|
|
2678
2586
|
}
|
|
2679
|
-
|
|
2680
|
-
try {
|
|
2587
|
+
await this.withConnection(accountId, async (client) => {
|
|
2681
2588
|
await client.appendMessage(sent.path, rawMessage, ["\\Seen"]);
|
|
2682
2589
|
console.log(` [sent] Copied to ${sent.path}`);
|
|
2683
|
-
}
|
|
2684
|
-
finally {
|
|
2685
|
-
try {
|
|
2686
|
-
await client.logout();
|
|
2687
|
-
}
|
|
2688
|
-
catch { /* ignore */ }
|
|
2689
|
-
}
|
|
2590
|
+
});
|
|
2690
2591
|
}
|
|
2691
2592
|
/** Save a draft to the Drafts folder via IMAP APPEND.
|
|
2692
2593
|
* Returns the UID of the saved draft (for replacing on next save). */
|
|
@@ -2696,8 +2597,7 @@ export class ImapManager extends EventEmitter {
|
|
|
2696
2597
|
console.error(` [drafts] No Drafts folder found for ${accountId}`);
|
|
2697
2598
|
return null;
|
|
2698
2599
|
}
|
|
2699
|
-
|
|
2700
|
-
try {
|
|
2600
|
+
return this.withConnection(accountId, async (client) => {
|
|
2701
2601
|
// Delete previous draft — try UID first (fast path), and ALWAYS also try
|
|
2702
2602
|
// searchByHeader(X-Mailx-Draft-ID) as a safety net. Running both catches
|
|
2703
2603
|
// orphans from a crash-mid-save or a UID delete that failed silently.
|
|
@@ -2727,10 +2627,7 @@ export class ImapManager extends EventEmitter {
|
|
|
2727
2627
|
}
|
|
2728
2628
|
// Append new draft. If the server returns [TRYCREATE] (RFC 3501 §7.1),
|
|
2729
2629
|
// the folder doesn't exist on the server even though mailx's DB has
|
|
2730
|
-
// it
|
|
2731
|
-
// special-folder detection latched onto a path that doesn't match
|
|
2732
|
-
// the server's actual name. Create it then retry. Logs the path so
|
|
2733
|
-
// we can diagnose a mis-detected Drafts folder.
|
|
2630
|
+
// it. Create it and retry once.
|
|
2734
2631
|
let result;
|
|
2735
2632
|
try {
|
|
2736
2633
|
result = await client.appendMessage(drafts.path, rawMessage, ["\\Draft", "\\Seen"]);
|
|
@@ -2743,7 +2640,6 @@ export class ImapManager extends EventEmitter {
|
|
|
2743
2640
|
await client.createmailbox(drafts.path);
|
|
2744
2641
|
}
|
|
2745
2642
|
catch (ce) {
|
|
2746
|
-
// "already exists" is benign; others we surface
|
|
2747
2643
|
if (!/already exists/i.test(String(ce?.message || ""))) {
|
|
2748
2644
|
console.error(` [drafts] Folder create failed for "${drafts.path}": ${ce.message}`);
|
|
2749
2645
|
}
|
|
@@ -2754,16 +2650,9 @@ export class ImapManager extends EventEmitter {
|
|
|
2754
2650
|
throw e;
|
|
2755
2651
|
}
|
|
2756
2652
|
}
|
|
2757
|
-
// APPENDUID returns the UID directly; imapflow returns { destination, uid }
|
|
2758
2653
|
const uid = typeof result === "number" ? result : result?.uid || null;
|
|
2759
2654
|
return uid;
|
|
2760
|
-
}
|
|
2761
|
-
finally {
|
|
2762
|
-
try {
|
|
2763
|
-
await client.logout();
|
|
2764
|
-
}
|
|
2765
|
-
catch { /* ignore */ }
|
|
2766
|
-
}
|
|
2655
|
+
});
|
|
2767
2656
|
}
|
|
2768
2657
|
/** Delete a draft (or all drafts with a stable X-Mailx-Draft-ID) after successful send.
|
|
2769
2658
|
* Tries the specific UID first, then falls back to searchByHeader so orphaned copies
|
|
@@ -2774,8 +2663,7 @@ export class ImapManager extends EventEmitter {
|
|
|
2774
2663
|
return;
|
|
2775
2664
|
if (!draftUid && !draftId)
|
|
2776
2665
|
return;
|
|
2777
|
-
|
|
2778
|
-
try {
|
|
2666
|
+
await this.withConnection(accountId, async (client) => {
|
|
2779
2667
|
if (draftUid) {
|
|
2780
2668
|
try {
|
|
2781
2669
|
await client.deleteMessageByUid(drafts.path, draftUid);
|
|
@@ -2801,13 +2689,7 @@ export class ImapManager extends EventEmitter {
|
|
|
2801
2689
|
console.error(` [drafts] searchByHeader for ${draftId} failed: ${e.message}`);
|
|
2802
2690
|
}
|
|
2803
2691
|
}
|
|
2804
|
-
}
|
|
2805
|
-
finally {
|
|
2806
|
-
try {
|
|
2807
|
-
await client.logout();
|
|
2808
|
-
}
|
|
2809
|
-
catch { /* ignore */ }
|
|
2810
|
-
}
|
|
2692
|
+
});
|
|
2811
2693
|
}
|
|
2812
2694
|
/** Queue outgoing message locally — never fails, worker handles IMAP+SMTP.
|
|
2813
2695
|
* Single path: write `~/.mailx/outbox/<acct>/*.ltr` synchronously, then
|
|
@@ -2994,20 +2876,14 @@ export class ImapManager extends EventEmitter {
|
|
|
2994
2876
|
const existing = folders.find(f => f.path.toLowerCase() === "outbox");
|
|
2995
2877
|
if (existing)
|
|
2996
2878
|
return existing.path;
|
|
2997
|
-
// Create it
|
|
2998
|
-
const client = this.createClient(accountId);
|
|
2999
2879
|
try {
|
|
3000
|
-
await
|
|
3001
|
-
|
|
3002
|
-
|
|
3003
|
-
|
|
2880
|
+
await this.withConnection(accountId, async (client) => {
|
|
2881
|
+
await client.createmailbox("Outbox");
|
|
2882
|
+
await this.syncFolders(accountId, client);
|
|
2883
|
+
});
|
|
3004
2884
|
}
|
|
3005
2885
|
catch (e) {
|
|
3006
|
-
|
|
3007
|
-
await client.logout();
|
|
3008
|
-
}
|
|
3009
|
-
catch { /* ignore */ }
|
|
3010
|
-
// Might already exist
|
|
2886
|
+
// Might already exist — benign
|
|
3011
2887
|
if (!e.message?.includes("already exists"))
|
|
3012
2888
|
throw e;
|
|
3013
2889
|
}
|
|
@@ -3040,23 +2916,15 @@ export class ImapManager extends EventEmitter {
|
|
|
3040
2916
|
// - ~/.mailx/outbox/<acct>/*.ltr (fallback when IMAP is unreachable)
|
|
3041
2917
|
try {
|
|
3042
2918
|
const outboxPath = await this.ensureOutbox(accountId);
|
|
3043
|
-
|
|
3044
|
-
try {
|
|
2919
|
+
await this.withConnection(accountId, async (client) => {
|
|
3045
2920
|
await client.appendMessage(outboxPath, rawMessage, ["\\Seen"]);
|
|
3046
2921
|
console.log(` [outbox] Queued message in ${outboxPath}`);
|
|
3047
|
-
|
|
3048
|
-
|
|
3049
|
-
|
|
3050
|
-
|
|
3051
|
-
}
|
|
3052
|
-
return;
|
|
3053
|
-
}
|
|
3054
|
-
finally {
|
|
3055
|
-
try {
|
|
3056
|
-
await client.logout();
|
|
3057
|
-
}
|
|
3058
|
-
catch { /* ignore */ }
|
|
2922
|
+
});
|
|
2923
|
+
const outboxFolder = this.findFolder(accountId, "outbox");
|
|
2924
|
+
if (outboxFolder) {
|
|
2925
|
+
this.syncFolder(accountId, outboxFolder.id).catch(() => { });
|
|
3059
2926
|
}
|
|
2927
|
+
return;
|
|
3060
2928
|
}
|
|
3061
2929
|
catch (e) {
|
|
3062
2930
|
console.error(` [outbox] IMAP queue failed: ${e.message} — saving locally`);
|
|
@@ -3317,14 +3185,14 @@ export class ImapManager extends EventEmitter {
|
|
|
3317
3185
|
}
|
|
3318
3186
|
console.log(` [smtp] ${accountId}: sent to ${recipients.join(", ")}`);
|
|
3319
3187
|
}
|
|
3320
|
-
/** Process Outbox — send pending messages with flag-based interlock
|
|
3188
|
+
/** Process Outbox — send pending messages with flag-based interlock.
|
|
3189
|
+
* Each per-UID step is its own withConnection({slow}) call so the queue
|
|
3190
|
+
* yields between messages: a click-to-view body in the middle of a
|
|
3191
|
+
* 10-message outbox drain doesn't wait for all 10 to finish. */
|
|
3321
3192
|
async processOutbox(accountId) {
|
|
3322
3193
|
const outboxFolder = this.findFolder(accountId, "outbox");
|
|
3323
3194
|
if (!outboxFolder)
|
|
3324
3195
|
return;
|
|
3325
|
-
// Skip if this account's sync is failing — don't pile up connections
|
|
3326
|
-
if (this.connectionBackoff.has(accountId) && Date.now() < (this.connectionBackoff.get(accountId) || 0))
|
|
3327
|
-
return;
|
|
3328
3196
|
// Gmail: skip IMAP outbox check — sending handled by processLocalQueue which sends directly via SMTP
|
|
3329
3197
|
if (this.isGmailAccount(accountId))
|
|
3330
3198
|
return;
|
|
@@ -3332,32 +3200,23 @@ export class ImapManager extends EventEmitter {
|
|
|
3332
3200
|
const account = settings.accounts.find(a => a.id === accountId);
|
|
3333
3201
|
if (!account)
|
|
3334
3202
|
return;
|
|
3335
|
-
|
|
3336
|
-
|
|
3337
|
-
|
|
3338
|
-
|
|
3339
|
-
|
|
3340
|
-
|
|
3341
|
-
|
|
3342
|
-
|
|
3343
|
-
|
|
3344
|
-
|
|
3345
|
-
|
|
3346
|
-
//
|
|
3347
|
-
|
|
3348
|
-
// message forever. Sweep flags older than STALE_CLAIM_MS first.
|
|
3349
|
-
const STALE_CLAIM_MS = 3600_000; // 1 hour — far longer than any reasonable SMTP send
|
|
3350
|
-
const nowSec = Math.floor(Date.now() / 1000);
|
|
3351
|
-
// Encode our claim with a seconds-since-epoch timestamp so peers
|
|
3352
|
-
// (and our own restart sweeper) can identify stale entries.
|
|
3353
|
-
const sendingFlag = `$Sending-${this.hostname}-${nowSec}`;
|
|
3354
|
-
for (const uid of uids) {
|
|
3355
|
-
// Check flags — skip if already being sent or permanently failed
|
|
3203
|
+
// List UIDs first — quick command, fast lane.
|
|
3204
|
+
const uids = await this.withConnection(accountId, (client) => client.getUids(outboxFolder.path));
|
|
3205
|
+
if (uids.length === 0)
|
|
3206
|
+
return;
|
|
3207
|
+
const STALE_CLAIM_MS = 3600_000; // 1 hour — longer than any reasonable SMTP send
|
|
3208
|
+
const nowSec = Math.floor(Date.now() / 1000);
|
|
3209
|
+
const sendingFlag = `$Sending-${this.hostname}-${nowSec}`;
|
|
3210
|
+
const sentFolder = this.findFolder(accountId, "sent");
|
|
3211
|
+
for (const uid of uids) {
|
|
3212
|
+
// Each iteration is one slow-lane turn — fast-lane work can run
|
|
3213
|
+
// between iterations, so a body click during a long outbox drain
|
|
3214
|
+
// gets serviced promptly.
|
|
3215
|
+
const result = await this.withConnection(accountId, async (client) => {
|
|
3356
3216
|
const flags = await client.getFlags(outboxFolder.path, uid);
|
|
3357
|
-
// Sweep stale claims.
|
|
3358
|
-
//
|
|
3359
|
-
//
|
|
3360
|
-
// with a fresh timestamped flag on its next tick.
|
|
3217
|
+
// Sweep stale claims. $Sending-<host>-<sec> with old timestamp,
|
|
3218
|
+
// or legacy $Sending-<host> without timestamp (treated as
|
|
3219
|
+
// stale; if the real owner is alive it'll re-claim next tick).
|
|
3361
3220
|
const claimFlags = flags.filter((f) => f.startsWith("$Sending"));
|
|
3362
3221
|
for (const cf of claimFlags) {
|
|
3363
3222
|
const m = cf.match(/^\$Sending-(.+?)(?:-(\d+))?$/);
|
|
@@ -3373,78 +3232,76 @@ export class ImapManager extends EventEmitter {
|
|
|
3373
3232
|
catch { /* ignore */ }
|
|
3374
3233
|
}
|
|
3375
3234
|
}
|
|
3376
|
-
// Re-read flags after sweep
|
|
3377
3235
|
const flagsNow = (claimFlags.length > 0)
|
|
3378
3236
|
? await client.getFlags(outboxFolder.path, uid)
|
|
3379
3237
|
: flags;
|
|
3380
3238
|
if (flagsNow.some((f) => f.startsWith("$Sending")))
|
|
3381
|
-
|
|
3239
|
+
return { skip: true };
|
|
3382
3240
|
if (flagsNow.includes("$PermanentFailure"))
|
|
3383
|
-
|
|
3241
|
+
return { skip: true };
|
|
3384
3242
|
if (flagsNow.includes("$Failed")) {
|
|
3385
|
-
// Retry: remove failed flag
|
|
3386
3243
|
await client.removeFlags(outboxFolder.path, uid, ["$Failed"]);
|
|
3387
3244
|
}
|
|
3388
|
-
// Claim this message
|
|
3389
3245
|
await client.addFlags(outboxFolder.path, uid, [sendingFlag]);
|
|
3390
|
-
//
|
|
3391
|
-
//
|
|
3392
|
-
//
|
|
3393
|
-
// tick one wins.)
|
|
3246
|
+
// TOCTOU re-check: if two devices addFlags concurrently both
|
|
3247
|
+
// see ≥2 sending flags. Fail-safe: both back off, next tick
|
|
3248
|
+
// one wins.
|
|
3394
3249
|
const flagsAfter = await client.getFlags(outboxFolder.path, uid);
|
|
3395
3250
|
const sendingFlags = flagsAfter.filter((f) => f.startsWith("$Sending"));
|
|
3396
3251
|
if (sendingFlags.length > 1 || (sendingFlags.length === 1 && sendingFlags[0] !== sendingFlag)) {
|
|
3397
|
-
// Another machine claimed it — back off
|
|
3398
3252
|
await client.removeFlags(outboxFolder.path, uid, [sendingFlag]);
|
|
3399
|
-
|
|
3253
|
+
return { skip: true };
|
|
3400
3254
|
}
|
|
3401
|
-
// Fetch the raw message
|
|
3402
3255
|
const msg = await client.fetchMessageByUid(outboxFolder.path, uid, { source: true });
|
|
3403
3256
|
if (!msg?.source) {
|
|
3404
3257
|
await client.removeFlags(outboxFolder.path, uid, [sendingFlag]);
|
|
3405
|
-
|
|
3258
|
+
return { skip: true };
|
|
3406
3259
|
}
|
|
3407
|
-
|
|
3408
|
-
|
|
3409
|
-
|
|
3410
|
-
|
|
3411
|
-
|
|
3412
|
-
|
|
3413
|
-
|
|
3260
|
+
return { source: msg.source };
|
|
3261
|
+
}, { slow: true });
|
|
3262
|
+
if (result.skip)
|
|
3263
|
+
continue;
|
|
3264
|
+
const source = result.source;
|
|
3265
|
+
// SMTP send is its own connection — not an IMAP op, doesn't go
|
|
3266
|
+
// through withConnection.
|
|
3267
|
+
try {
|
|
3268
|
+
await this.sendRawViaSMTP(accountId, source);
|
|
3269
|
+
console.log(` [outbox] Sent UID ${uid}`);
|
|
3270
|
+
// Delete from Outbox + copy to Sent. Done in two separate
|
|
3271
|
+
// withConnection calls so other work can interleave.
|
|
3272
|
+
await this.withConnection(accountId, async (client) => {
|
|
3273
|
+
// Delete FIRST to prevent double-send if Sent-copy fails.
|
|
3414
3274
|
await client.deleteMessageByUid(outboxFolder.path, uid);
|
|
3415
|
-
|
|
3416
|
-
|
|
3417
|
-
|
|
3418
|
-
|
|
3419
|
-
await client.appendMessage(sentFolder.path,
|
|
3420
|
-
|
|
3421
|
-
}
|
|
3422
|
-
catch (sentErr) {
|
|
3423
|
-
console.error(` [outbox] Failed to copy to Sent: ${sentErr.message} — message was sent successfully`);
|
|
3424
|
-
}
|
|
3425
|
-
this.syncFolder(accountId, outboxFolder.id).catch(() => { });
|
|
3275
|
+
}, { slow: true });
|
|
3276
|
+
if (sentFolder) {
|
|
3277
|
+
try {
|
|
3278
|
+
await this.withConnection(accountId, async (client) => {
|
|
3279
|
+
await client.appendMessage(sentFolder.path, source, ["\\Seen"]);
|
|
3280
|
+
}, { slow: true });
|
|
3281
|
+
this.syncFolder(accountId, sentFolder.id).catch(() => { });
|
|
3426
3282
|
}
|
|
3427
|
-
|
|
3428
|
-
|
|
3429
|
-
const errMsg = e.message || String(e);
|
|
3430
|
-
console.error(` [outbox] Send failed UID ${uid}: ${errMsg}`);
|
|
3431
|
-
await client.removeFlags(outboxFolder.path, uid, [sendingFlag]);
|
|
3432
|
-
await client.addFlags(outboxFolder.path, uid, ["$Failed"]);
|
|
3433
|
-
// Notify UI about the send failure
|
|
3434
|
-
this.emit("accountError", accountId, `Send failed: ${errMsg}`, "Message kept in Outbox", false);
|
|
3435
|
-
// Auth failures should not retry — back off this account
|
|
3436
|
-
if (/auth|login|credential|invalid/i.test(errMsg)) {
|
|
3437
|
-
this.outboxBackoff.set(accountId, Date.now() + 3600000); // 1 hour
|
|
3438
|
-
console.error(` [outbox] Auth failure for ${accountId} — outbox paused for 1 hour`);
|
|
3283
|
+
catch (sentErr) {
|
|
3284
|
+
console.error(` [outbox] Failed to copy to Sent: ${sentErr.message} — message was sent successfully`);
|
|
3439
3285
|
}
|
|
3286
|
+
this.syncFolder(accountId, outboxFolder.id).catch(() => { });
|
|
3440
3287
|
}
|
|
3441
3288
|
}
|
|
3442
|
-
|
|
3443
|
-
|
|
3444
|
-
|
|
3445
|
-
|
|
3289
|
+
catch (e) {
|
|
3290
|
+
const errMsg = e.message || String(e);
|
|
3291
|
+
console.error(` [outbox] Send failed UID ${uid}: ${errMsg}`);
|
|
3292
|
+
try {
|
|
3293
|
+
await this.withConnection(accountId, async (client) => {
|
|
3294
|
+
await client.removeFlags(outboxFolder.path, uid, [sendingFlag]);
|
|
3295
|
+
await client.addFlags(outboxFolder.path, uid, ["$Failed"]);
|
|
3296
|
+
}, { slow: true });
|
|
3297
|
+
}
|
|
3298
|
+
catch { /* best-effort */ }
|
|
3299
|
+
this.emit("accountError", accountId, `Send failed: ${errMsg}`, "Message kept in Outbox", false);
|
|
3300
|
+
if (/auth|login|credential|invalid/i.test(errMsg)) {
|
|
3301
|
+
this.outboxBackoff.set(accountId, Date.now() + 3600000); // 1 hour
|
|
3302
|
+
console.error(` [outbox] Auth failure for ${accountId} — outbox paused for 1 hour`);
|
|
3303
|
+
}
|
|
3446
3304
|
}
|
|
3447
|
-
catch { /* ignore */ }
|
|
3448
3305
|
}
|
|
3449
3306
|
}
|
|
3450
3307
|
/** Start background Outbox worker — runs immediately then every 10 seconds */
|