actual-mcp-server 0.6.1 → 0.6.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +1 -1
- package/dist/package.json +2 -2
- package/dist/src/index.js +8 -1
- package/dist/src/lib/ActualConnectionPool.js +23 -1
- package/dist/src/lib/actual-adapter.js +378 -8
- package/dist/src/lib/apiState.js +26 -0
- package/dist/src/lib/requestContext.js +17 -0
- package/dist/src/server/httpServer.js +13 -4
- package/package.json +2 -2
package/README.md
CHANGED
package/dist/package.json
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "actual-mcp-server",
|
|
3
3
|
"displayName": "Actual MCP Server",
|
|
4
|
-
"version": "0.6.
|
|
4
|
+
"version": "0.6.2",
|
|
5
5
|
"engines": {
|
|
6
6
|
"node": ">=20.0.0",
|
|
7
7
|
"npm": ">=10.0.0"
|
|
@@ -30,7 +30,7 @@
|
|
|
30
30
|
"verify-tools": "npm run build && node scripts/verify-tools.js",
|
|
31
31
|
"check:coverage": "node scripts/list-actual-api-methods.mjs",
|
|
32
32
|
"direct-sync": "node scripts/direct-sync/bank-sync-direct.mjs",
|
|
33
|
-
"test:unit-js": "node tests/unit/transactions_create.test.js && node tests/unit/generated_tools.smoke.test.js && node tests/unit/schema_validation.test.js && node tests/unit/auth-acl.test.js && node tests/unit/bug76.test.js && node tests/unit/budgets_setAmount.test.js && node tests/unit/transactions_uncategorized.test.js",
|
|
33
|
+
"test:unit-js": "node tests/unit/transactions_create.test.js && node tests/unit/generated_tools.smoke.test.js && node tests/unit/schema_validation.test.js && node tests/unit/auth-acl.test.js && node tests/unit/bug76.test.js && node tests/unit/budgets_setAmount.test.js && node tests/unit/transactions_uncategorized.test.js && node tests/unit/httpServer_session_init.test.js && node tests/unit/manual_mcp_client_retry.test.js && node tests/unit/manual_mcp_client_session.test.js && node tests/unit/manual_mcp_client_circuit.test.js && node tests/unit/manual_runner_killswitch.test.js && node tests/unit/adapter_auth_rate_limit.test.js && node tests/unit/adapter_session_reuse.test.js",
|
|
34
34
|
"test:adapter": "npm run build && node dist/src/tests_adapter_runner.js",
|
|
35
35
|
"test:e2e": "npx playwright test",
|
|
36
36
|
"test:e2e:docker": "./tests/e2e/run-docker-e2e.sh",
|
package/dist/src/index.js
CHANGED
|
@@ -34,7 +34,14 @@ process.on('unhandledRejection', (reason, promise) => {
|
|
|
34
34
|
reasonStr.includes('Rate limit exceeded') ||
|
|
35
35
|
reasonStr.includes('Failed syncing account') ||
|
|
36
36
|
reasonStr.includes('GoCardless') ||
|
|
37
|
-
reasonStr.includes('SimpleFIN')
|
|
37
|
+
reasonStr.includes('SimpleFIN') ||
|
|
38
|
+
// Actual API auth failures (network-failure, too-many-requests, invalid-password,
|
|
39
|
+
// etc.) can escape as unhandled rejections from session-init code paths that
|
|
40
|
+
// create a deferred Promise but only conditionally await it (see #132). The
|
|
41
|
+
// primary fix lives in httpServer.ts (.catch on initPromise); this allow-list
|
|
42
|
+
// entry is defence-in-depth so any future deferred-promise leak in the same
|
|
43
|
+
// family also fails non-fatally.
|
|
44
|
+
reasonStr.includes('Authentication failed:')) {
|
|
38
45
|
console.error('⚠️ Known Actual API domain error escaped to unhandledRejection:');
|
|
39
46
|
console.error('⚠️ ' + reasonStr);
|
|
40
47
|
console.error('⚠️ Server will continue running. The caller received an error response.');
|
|
@@ -16,6 +16,7 @@ import config from '../config.js';
|
|
|
16
16
|
import path from 'path';
|
|
17
17
|
import os from 'os';
|
|
18
18
|
import fs from 'fs';
|
|
19
|
+
import { setApiInitialized } from './apiState.js';
|
|
19
20
|
const DEFAULT_DATA_DIR = path.resolve(os.homedir() || '.', '.actual');
|
|
20
21
|
class ActualConnectionPool {
|
|
21
22
|
connections = new Map();
|
|
@@ -118,6 +119,9 @@ class ActualConnectionPool {
|
|
|
118
119
|
serverURL: SERVER_URL,
|
|
119
120
|
password: PASSWORD,
|
|
120
121
|
});
|
|
122
|
+
// Mark the singleton as live so the adapter's pool-cooperation branch
|
|
123
|
+
// (withActualApi in actual-adapter.ts) can safely skip its per-op init.
|
|
124
|
+
setApiInitialized(true);
|
|
121
125
|
logger.info(`[ConnectionPool] Downloading budget for session: ${sessionId}`);
|
|
122
126
|
if (BUDGET_PASSWORD) {
|
|
123
127
|
const apiWithOptions = api;
|
|
@@ -149,6 +153,8 @@ class ActualConnectionPool {
|
|
|
149
153
|
catch (cleanupErr) {
|
|
150
154
|
logger.debug(`[ConnectionPool] Error during cleanup (ignoring): ${cleanupErr}`);
|
|
151
155
|
}
|
|
156
|
+
// Singleton is back to torn-down state regardless of cleanup outcome.
|
|
157
|
+
setApiInitialized(false);
|
|
152
158
|
// Ensure this session is not in the connections map
|
|
153
159
|
this.connections.delete(sessionId);
|
|
154
160
|
throw err;
|
|
@@ -177,6 +183,7 @@ class ActualConnectionPool {
|
|
|
177
183
|
serverURL: SERVER_URL,
|
|
178
184
|
password: PASSWORD,
|
|
179
185
|
});
|
|
186
|
+
setApiInitialized(true);
|
|
180
187
|
if (BUDGET_PASSWORD) {
|
|
181
188
|
const apiWithOptions = api;
|
|
182
189
|
await apiWithOptions.downloadBudget(BUDGET_SYNC_ID, { password: BUDGET_PASSWORD });
|
|
@@ -194,6 +201,7 @@ class ActualConnectionPool {
|
|
|
194
201
|
}
|
|
195
202
|
catch (err) {
|
|
196
203
|
logger.error('[ConnectionPool] Failed to initialize shared connection:', err);
|
|
204
|
+
setApiInitialized(false);
|
|
197
205
|
throw err;
|
|
198
206
|
}
|
|
199
207
|
}
|
|
@@ -213,12 +221,15 @@ class ActualConnectionPool {
|
|
|
213
221
|
}
|
|
214
222
|
conn.initialized = false;
|
|
215
223
|
this.connections.delete(sessionId);
|
|
224
|
+
setApiInitialized(false);
|
|
216
225
|
// NOTE: We do NOT delete the data directory because it's shared across all sessions
|
|
217
226
|
// Deleting it would cause data loss for other active sessions
|
|
218
227
|
logger.info(`[ConnectionPool] Connection shutdown complete for session: ${sessionId}`);
|
|
219
228
|
}
|
|
220
229
|
catch (err) {
|
|
221
230
|
logger.error(`[ConnectionPool] Error shutting down connection for session ${sessionId}:`, err);
|
|
231
|
+
// Even on error, the singleton is in an unknown state — don't reuse.
|
|
232
|
+
setApiInitialized(false);
|
|
222
233
|
}
|
|
223
234
|
}
|
|
224
235
|
/**
|
|
@@ -236,10 +247,12 @@ class ActualConnectionPool {
|
|
|
236
247
|
}
|
|
237
248
|
this.sharedConnection.initialized = false;
|
|
238
249
|
this.sharedConnection = null;
|
|
250
|
+
setApiInitialized(false);
|
|
239
251
|
logger.info('[ConnectionPool] Shared connection shutdown complete');
|
|
240
252
|
}
|
|
241
253
|
catch (err) {
|
|
242
254
|
logger.error('[ConnectionPool] Error shutting down shared connection:', err);
|
|
255
|
+
setApiInitialized(false);
|
|
243
256
|
}
|
|
244
257
|
}
|
|
245
258
|
/**
|
|
@@ -265,12 +278,21 @@ class ActualConnectionPool {
|
|
|
265
278
|
this.sharedConnection = null;
|
|
266
279
|
}
|
|
267
280
|
/**
|
|
268
|
-
* Start periodic cleanup of idle connections
|
|
281
|
+
* Start periodic cleanup of idle connections.
|
|
282
|
+
*
|
|
283
|
+
* The interval is `unref()`d so it does not keep the Node event loop alive
|
|
284
|
+
* on its own. Without this, importing the pool from a one-shot script
|
|
285
|
+
* (e.g. unit test, `--test-actual-connection`) would prevent natural
|
|
286
|
+
* process exit. The interval still fires while the server runs because
|
|
287
|
+
* other handles (HTTP listener, stdio transport, etc.) keep the loop alive.
|
|
269
288
|
*/
|
|
270
289
|
startCleanupTimer() {
|
|
271
290
|
this.cleanupInterval = setInterval(() => {
|
|
272
291
|
this.cleanupIdleConnections();
|
|
273
292
|
}, this.CLEANUP_INTERVAL);
|
|
293
|
+
if (typeof this.cleanupInterval.unref === 'function') {
|
|
294
|
+
this.cleanupInterval.unref();
|
|
295
|
+
}
|
|
274
296
|
}
|
|
275
297
|
/**
|
|
276
298
|
* Clean up idle connections that haven't been used recently
|
|
@@ -15,6 +15,9 @@ import retry from './retry.js';
|
|
|
15
15
|
import logger from '../logger.js';
|
|
16
16
|
import config from '../config.js';
|
|
17
17
|
import { parseBudgetRegistry } from './budget-registry.js';
|
|
18
|
+
import { requestContext } from './requestContext.js';
|
|
19
|
+
import { connectionPool } from './ActualConnectionPool.js';
|
|
20
|
+
import { isApiInitialized, setApiInitialized } from './apiState.js';
|
|
18
21
|
/**
|
|
19
22
|
* Budget registry — all budgets configured via ACTUAL_* and BUDGET_n_* env vars.
|
|
20
23
|
* Built once at startup; used by every withActualApi call.
|
|
@@ -53,12 +56,118 @@ function withApiLock(fn) {
|
|
|
53
56
|
_apiSessionLock = new Promise(resolve => { release = resolve; });
|
|
54
57
|
return prevLock.then(() => fn()).finally(() => release());
|
|
55
58
|
}
|
|
59
|
+
// ----------------------------------------------------------------------------
|
|
60
|
+
// Per-session pool cooperation — issue #134
|
|
61
|
+
// ----------------------------------------------------------------------------
|
|
62
|
+
// Pre-#134, every adapter call did api.init() + op + api.shutdown(). With many
|
|
63
|
+
// tool calls in quick succession this produced a burst of upstream logins and
|
|
64
|
+
// tripped Actual's auth rate-limiter (#127's root cause).
|
|
65
|
+
//
|
|
66
|
+
// Post-#134, when an MCP session has already initialised a per-session
|
|
67
|
+
// connection via ActualConnectionPool (httpServer.ts wires this on session
|
|
68
|
+
// open), withActualApi reuses that connection: no init, no shutdown. Writes
|
|
69
|
+
// commit via api.sync() (the same pattern processWriteQueue already uses).
|
|
70
|
+
// The pool tears down once at session close.
|
|
71
|
+
//
|
|
72
|
+
// Fallback: when there is no sessionId in AsyncLocalStorage (e.g. startup
|
|
73
|
+
// health checks, internal calls outside any MCP session, stdio transport
|
|
74
|
+
// callers that don't run inside requestContext.run), or when there is a
|
|
75
|
+
// sessionId but the pool has no initialised connection for it, withActualApi
|
|
76
|
+
// falls back to the legacy init+shutdown path so non-MCP callers keep working.
|
|
77
|
+
let connectionReuseCount = 0;
|
|
78
|
+
// The "is the @actual-app/api singleton currently live?" flag lives in
|
|
79
|
+
// src/lib/apiState.ts so both this module and ActualConnectionPool can
|
|
80
|
+
// update it without a circular import. The pool's hasConnection() returns
|
|
81
|
+
// true based on its own per-session record; this flag is the second guard
|
|
82
|
+
// — the singleton's actual state. Both must agree before reuse is safe.
|
|
83
|
+
function _resolveSessionId() {
|
|
84
|
+
return requestContext.getStore()?.sessionId;
|
|
85
|
+
}
|
|
86
|
+
function _hasPooledConnection(sessionId) {
|
|
87
|
+
if (!sessionId)
|
|
88
|
+
return false;
|
|
89
|
+
if (!isApiInitialized())
|
|
90
|
+
return false; // singleton was shut down by some other path
|
|
91
|
+
return connectionPool.hasConnection(sessionId);
|
|
92
|
+
}
|
|
93
|
+
/**
|
|
94
|
+
* Decide whether an error from the wrapped operation suggests the api
|
|
95
|
+
* singleton is in a corrupted state and the pool's session connection should
|
|
96
|
+
* be released so the next call re-inits cleanly.
|
|
97
|
+
*
|
|
98
|
+
* **Drop on**: infrastructure-level errors that imply the api singleton, the
|
|
99
|
+
* upstream connection, or process-level resources are no longer usable.
|
|
100
|
+
*
|
|
101
|
+
* **Keep on**: user-input validation errors, domain errors ("not found",
|
|
102
|
+
* "does not exist"), Zod schema failures — these don't corrupt the api
|
|
103
|
+
* singleton, so dropping the pool would discard a perfectly good connection
|
|
104
|
+
* and force every retry through the legacy init+shutdown path (which is
|
|
105
|
+
* exactly the auth-burst pattern #134 is trying to eliminate).
|
|
106
|
+
*
|
|
107
|
+
* Default: keep. We err on the side of preserving pool reuse — if the api is
|
|
108
|
+
* actually corrupted but the error pattern doesn't match, the next call's op
|
|
109
|
+
* will surface the same root cause and we'll catch it then.
|
|
110
|
+
*/
|
|
111
|
+
function _shouldDropPoolOnError(err) {
|
|
112
|
+
if (!(err instanceof Error))
|
|
113
|
+
return false;
|
|
114
|
+
const msg = err.message || '';
|
|
115
|
+
return (msg.includes('Authentication failed') ||
|
|
116
|
+
msg.includes('ECONNRESET') ||
|
|
117
|
+
msg.includes('ECONNREFUSED') ||
|
|
118
|
+
msg.includes('socket hang up') ||
|
|
119
|
+
msg.includes('ETIMEDOUT') ||
|
|
120
|
+
msg.includes('out of memory') ||
|
|
121
|
+
msg.includes('ENOMEM'));
|
|
122
|
+
}
|
|
56
123
|
/**
|
|
57
|
-
* Helper to
|
|
58
|
-
*
|
|
59
|
-
*
|
|
124
|
+
* Helper to run an operation with the Actual API ready, deciding the lifecycle
|
|
125
|
+
* mode automatically:
|
|
126
|
+
*
|
|
127
|
+
* - **Pooled mode** (preferred): when an MCP session is in the AsyncLocalStorage
|
|
128
|
+
* context AND the connection pool has an initialised connection for it.
|
|
129
|
+
* The operation runs against the existing connection. No init, no shutdown.
|
|
130
|
+
* If the operation throws, the pool's connection for that session is
|
|
131
|
+
* released so the next call gets a fresh init.
|
|
132
|
+
*
|
|
133
|
+
* - **Legacy mode** (fallback): the original per-op init → op → shutdown
|
|
134
|
+
* cycle. Used when there is no sessionId in context, or the pool has no
|
|
135
|
+
* connection for the sessionId. Preserves the original tombstone /
|
|
136
|
+
* persistence semantics for non-MCP callers.
|
|
137
|
+
*
|
|
138
|
+
* In either mode `withApiLock` serialises against concurrent callers because
|
|
139
|
+
* `@actual-app/api` is a process-wide singleton.
|
|
60
140
|
*/
|
|
61
|
-
async function withActualApi(operation) {
|
|
141
|
+
export async function withActualApi(operation) {
|
|
142
|
+
const sessionId = _resolveSessionId();
|
|
143
|
+
if (_hasPooledConnection(sessionId)) {
|
|
144
|
+
// Pooled mode: skip init+shutdown.
|
|
145
|
+
return withApiLock(async () => {
|
|
146
|
+
try {
|
|
147
|
+
connectionReuseCount++;
|
|
148
|
+
logger.debug(`[ADAPTER] Reusing pool connection for session ${sessionId} (reuses=${connectionReuseCount})`);
|
|
149
|
+
return await operation();
|
|
150
|
+
}
|
|
151
|
+
catch (err) {
|
|
152
|
+
// Only drop the pool connection on errors that suggest the api
|
|
153
|
+
// singleton itself is in a bad state. User-input validation /
|
|
154
|
+
// domain errors leave the connection fine and dropping it would
|
|
155
|
+
// re-introduce the auth-burst pattern #134 is fixing.
|
|
156
|
+
if (_shouldDropPoolOnError(err)) {
|
|
157
|
+
logger.warn(`[ADAPTER] Releasing pool connection for session ${sessionId} after infrastructure-level error`);
|
|
158
|
+
try {
|
|
159
|
+
await connectionPool.shutdownConnection(sessionId);
|
|
160
|
+
}
|
|
161
|
+
catch (_e) { /* swallow */ }
|
|
162
|
+
}
|
|
163
|
+
throw err;
|
|
164
|
+
}
|
|
165
|
+
});
|
|
166
|
+
}
|
|
167
|
+
if (sessionId) {
|
|
168
|
+
logger.warn(`[ADAPTER] Pool miss for session ${sessionId}; falling back to per-op init`);
|
|
169
|
+
}
|
|
170
|
+
// Legacy mode: init+shutdown around every operation.
|
|
62
171
|
return withApiLock(async () => {
|
|
63
172
|
try {
|
|
64
173
|
await initActualApiForOperation();
|
|
@@ -69,20 +178,225 @@ async function withActualApi(operation) {
|
|
|
69
178
|
}
|
|
70
179
|
});
|
|
71
180
|
}
|
|
181
|
+
/**
|
|
182
|
+
* Variant of `withActualApi` for write operations. Identical to `withActualApi`
|
|
183
|
+
* except that, in pooled mode, it explicitly calls `api.sync()` after the
|
|
184
|
+
* operation succeeds so writes propagate to the upstream Actual server (and so
|
|
185
|
+
* tombstones for deletes propagate). In legacy mode the existing
|
|
186
|
+
* `shutdownActualApi()` already handles the persistence flush — no extra sync
|
|
187
|
+
* call needed there.
|
|
188
|
+
*
|
|
189
|
+
* Pattern source: `processWriteQueue` already uses `api.sync()` between writes
|
|
190
|
+
* within a batch (without shutdown), so this is the same proven approach
|
|
191
|
+
* generalised to single-write call sites.
|
|
192
|
+
*/
|
|
193
|
+
export async function withActualApiWrite(operation) {
|
|
194
|
+
const sessionId = _resolveSessionId();
|
|
195
|
+
if (_hasPooledConnection(sessionId)) {
|
|
196
|
+
return withApiLock(async () => {
|
|
197
|
+
try {
|
|
198
|
+
connectionReuseCount++;
|
|
199
|
+
logger.debug(`[ADAPTER] Reusing pool connection for write session ${sessionId} (reuses=${connectionReuseCount})`);
|
|
200
|
+
const result = await operation();
|
|
201
|
+
// Propagate the write to the server so other clients (and our next
|
|
202
|
+
// read) see it. Pre-#134 this happened implicitly via api.shutdown().
|
|
203
|
+
try {
|
|
204
|
+
const apiAny = api;
|
|
205
|
+
if (typeof apiAny.sync === 'function') {
|
|
206
|
+
await apiAny.sync();
|
|
207
|
+
}
|
|
208
|
+
}
|
|
209
|
+
catch (syncErr) {
|
|
210
|
+
// Sync failure on a write IS infrastructure-level — drop the pool
|
|
211
|
+
// connection so the next call re-inits, then surface the error.
|
|
212
|
+
logger.error(`[ADAPTER] api.sync() failed after write in session ${sessionId}; releasing pool connection`);
|
|
213
|
+
try {
|
|
214
|
+
await connectionPool.shutdownConnection(sessionId);
|
|
215
|
+
}
|
|
216
|
+
catch (_e) { /* swallow */ }
|
|
217
|
+
throw syncErr;
|
|
218
|
+
}
|
|
219
|
+
return result;
|
|
220
|
+
}
|
|
221
|
+
catch (err) {
|
|
222
|
+
// Same policy as withActualApi: only drop the pool on errors that
|
|
223
|
+
// suggest the api singleton is corrupted. User-input / domain errors
|
|
224
|
+
// leave the connection fine.
|
|
225
|
+
if (_shouldDropPoolOnError(err)) {
|
|
226
|
+
logger.warn(`[ADAPTER] Releasing pool connection for write session ${sessionId} after infrastructure-level error`);
|
|
227
|
+
try {
|
|
228
|
+
await connectionPool.shutdownConnection(sessionId);
|
|
229
|
+
}
|
|
230
|
+
catch (_e) { /* swallow */ }
|
|
231
|
+
}
|
|
232
|
+
throw err;
|
|
233
|
+
}
|
|
234
|
+
});
|
|
235
|
+
}
|
|
236
|
+
if (sessionId) {
|
|
237
|
+
logger.warn(`[ADAPTER] Pool miss for session ${sessionId}; falling back to per-op init (write)`);
|
|
238
|
+
}
|
|
239
|
+
return withApiLock(async () => {
|
|
240
|
+
try {
|
|
241
|
+
await initActualApiForOperation();
|
|
242
|
+
return await operation();
|
|
243
|
+
}
|
|
244
|
+
finally {
|
|
245
|
+
await shutdownActualApi();
|
|
246
|
+
}
|
|
247
|
+
});
|
|
248
|
+
}
|
|
249
|
+
/**
|
|
250
|
+
* Test-only: reset the connection-reuse counter. NOT exported via the package
|
|
251
|
+
* public surface — only used by unit tests.
|
|
252
|
+
*/
|
|
253
|
+
export function _resetConnectionReuseCounterForTests() {
|
|
254
|
+
connectionReuseCount = 0;
|
|
255
|
+
}
|
|
256
|
+
/**
|
|
257
|
+
* Test-only: directly set the api-initialised flag. Lets unit tests exercise
|
|
258
|
+
* the pool-cooperation branch without driving a real api.init() against the
|
|
259
|
+
* upstream. NOT exported via the package public surface.
|
|
260
|
+
*/
|
|
261
|
+
export function _setApiInitializedForTests(value) {
|
|
262
|
+
setApiInitialized(value);
|
|
263
|
+
}
|
|
264
|
+
/**
|
|
265
|
+
* Test-only: short-circuit `initActualApiForOperation` and `shutdownActualApi`
|
|
266
|
+
* so the legacy fallback path can run without making network calls against a
|
|
267
|
+
* real upstream Actual server. Used by unit tests that want to verify the
|
|
268
|
+
* branch decision in `withActualApi` (pool vs legacy) without hanging on the
|
|
269
|
+
* real api.init network handshake.
|
|
270
|
+
*
|
|
271
|
+
* NOT exported via the package public surface.
|
|
272
|
+
*/
|
|
273
|
+
let _skipApiInitForTests = false;
|
|
274
|
+
export function _setSkipApiInitForTests(value) {
|
|
275
|
+
_skipApiInitForTests = value;
|
|
276
|
+
}
|
|
277
|
+
// ----------------------------------------------------------------------------
|
|
278
|
+
// Auth-rate-limit retry — issue #127
|
|
279
|
+
// ----------------------------------------------------------------------------
|
|
280
|
+
// The Actual Budget server returns "Authentication failed: too-many-requests"
|
|
281
|
+
// when many MCP sessions log in in quick succession (e.g. a burst of E2E
|
|
282
|
+
// tests). Without a retry-with-backoff at the adapter layer, the very first
|
|
283
|
+
// burst spike fails through to the test runner and cascades into the bearer
|
|
284
|
+
// MCP container's session-init crash (see #132).
|
|
285
|
+
//
|
|
286
|
+
// We retry only on errors known to be transient at the auth layer
|
|
287
|
+
// (too-many-requests, network-failure). invalid-password and other terminal
|
|
288
|
+
// errors propagate immediately so callers see the real cause.
|
|
289
|
+
//
|
|
290
|
+
// The retry budget is bounded so a rate-limited init cannot indefinitely
|
|
291
|
+
// hold the API mutex (withApiLock) and starve other operations.
|
|
292
|
+
// ----------------------------------------------------------------------------
|
|
293
|
+
let authRetryCount = 0; // monotonic, observability
|
|
294
|
+
let authRetryFailureCount = 0; // increments only when retry budget exhausted
|
|
295
|
+
// The auth-rate-limit path uses a deliberately LARGER backoff than the generic
|
|
296
|
+
// retry helper because Actual Budget's auth rate-limiter operates on a multi-
|
|
297
|
+
// second sliding window, not a per-request burst. The generic 200ms base
|
|
298
|
+
// would exhaust within 1.4s — well inside the upstream's window.
|
|
299
|
+
//
|
|
300
|
+
// Empirically (2026-05-06, #127):
|
|
301
|
+
// - 200ms base = 1.4s total: too short, every retry hits the throttle.
|
|
302
|
+
// - 2000ms base = 14s total: insufficient under heavy auth pressure
|
|
303
|
+
// (e.g. 10 rapid logins before a tool call still throttle 14s+).
|
|
304
|
+
// - 5000ms base = 5s + 10s + 10s = 25s total (each step capped by
|
|
305
|
+
// MAX_RETRY_DELAY_MS): clears the rate-limit window in light-pressure
|
|
306
|
+
// scenarios (3 rapid logins) without holding the API mutex unreasonably
|
|
307
|
+
// long.
|
|
308
|
+
//
|
|
309
|
+
// Beyond 25s, blocking the API mutex starts to harm tail latency for
|
|
310
|
+
// unrelated tool calls. The proper long-term fix for sustained-pressure
|
|
311
|
+
// scenarios is session reuse (avoid init+shutdown per op) — out of scope for
|
|
312
|
+
// this ticket; tracked as a follow-up.
|
|
313
|
+
const AUTH_RETRY_BASE_BACKOFF_MS = 5000;
|
|
314
|
+
export function isRetryableAuthError(err) {
|
|
315
|
+
if (!(err instanceof Error))
|
|
316
|
+
return false;
|
|
317
|
+
return (err.message.includes('Authentication failed: too-many-requests') ||
|
|
318
|
+
err.message.includes('Authentication failed: network-failure'));
|
|
319
|
+
}
|
|
320
|
+
/**
|
|
321
|
+
* Wrap an operation with retry-on-rate-limit. Used to wrap api.init() so
|
|
322
|
+
* transient too-many-requests errors are absorbed transparently. The retry
|
|
323
|
+
* budget is capped at DEFAULT_RETRY_ATTEMPTS (3) attempts and total wallclock
|
|
324
|
+
* is bounded by MAX_RETRY_DELAY_MS via exponential backoff.
|
|
325
|
+
*
|
|
326
|
+
* Test-friendly: opts.maxRetries / opts.baseBackoffMs override the defaults
|
|
327
|
+
* so unit tests can run fast.
|
|
328
|
+
*
|
|
329
|
+
* Log hygiene: this function never logs the upstream URL, password, or any
|
|
330
|
+
* config-derived value — only the error class and the Actual error code
|
|
331
|
+
* (extracted from the message) plus the attempt counter.
|
|
332
|
+
*/
|
|
333
|
+
export async function withAuthRetry(operation, opts) {
|
|
334
|
+
const maxRetries = opts?.maxRetries ?? DEFAULT_RETRY_ATTEMPTS;
|
|
335
|
+
const baseBackoffMs = opts?.baseBackoffMs ?? AUTH_RETRY_BASE_BACKOFF_MS;
|
|
336
|
+
let attempt = 0;
|
|
337
|
+
while (true) {
|
|
338
|
+
try {
|
|
339
|
+
return await operation();
|
|
340
|
+
}
|
|
341
|
+
catch (err) {
|
|
342
|
+
if (!isRetryableAuthError(err))
|
|
343
|
+
throw err;
|
|
344
|
+
attempt++;
|
|
345
|
+
if (attempt > maxRetries) {
|
|
346
|
+
// Budget exhausted: log + bump failure counter, but do NOT bump
|
|
347
|
+
// authRetryCount — that counter measures successful retry-and-sleep
|
|
348
|
+
// cycles, not failed final attempts.
|
|
349
|
+
authRetryFailureCount++;
|
|
350
|
+
const code = (err instanceof Error ? err.message.match(/Authentication failed: (\S+)/)?.[1] : null) || 'unknown';
|
|
351
|
+
logger.error(`[ADAPTER] Auth retry exhausted after ${maxRetries} retries (last code: ${code})`);
|
|
352
|
+
throw err;
|
|
353
|
+
}
|
|
354
|
+
// We're going to retry — count it and sleep with exponential backoff.
|
|
355
|
+
authRetryCount++;
|
|
356
|
+
const delay = Math.min(baseBackoffMs * Math.pow(2, attempt - 1), MAX_RETRY_DELAY_MS);
|
|
357
|
+
const code = (err instanceof Error ? err.message.match(/Authentication failed: (\S+)/)?.[1] : null) || 'unknown';
|
|
358
|
+
logger.debug(`[ADAPTER] Auth retry ${attempt}/${maxRetries} (code: ${code}) after ${delay}ms`);
|
|
359
|
+
await new Promise(r => setTimeout(r, delay));
|
|
360
|
+
}
|
|
361
|
+
}
|
|
362
|
+
}
|
|
363
|
+
/**
|
|
364
|
+
* Test-only: reset the auth retry observability counters. NOT exported via
|
|
365
|
+
* the package public surface — only used by unit tests.
|
|
366
|
+
*/
|
|
367
|
+
export function _resetAuthRetryCountersForTests() {
|
|
368
|
+
authRetryCount = 0;
|
|
369
|
+
authRetryFailureCount = 0;
|
|
370
|
+
}
|
|
72
371
|
/**
|
|
73
372
|
* Initialize Actual API - based on s-stefanov/actual-mcp pattern
|
|
74
373
|
* This calls api.init() and api.downloadBudget() for each operation
|
|
75
374
|
*/
|
|
76
375
|
async function initActualApiForOperation() {
|
|
376
|
+
if (_skipApiInitForTests) {
|
|
377
|
+
setApiInitialized(true);
|
|
378
|
+
return;
|
|
379
|
+
}
|
|
380
|
+
// If the api singleton is already live (e.g. the connection pool initialised
|
|
381
|
+
// it at MCP session open), don't redundantly call api.init() again — that
|
|
382
|
+
// would trigger an extra upstream login and reintroduce the auth-burst
|
|
383
|
+
// pattern #134 is fixing. The pool keeps the singleton alive across writes;
|
|
384
|
+
// we just join in.
|
|
385
|
+
if (isApiInitialized()) {
|
|
386
|
+
logger.debug('[ADAPTER] api already initialised; skipping redundant init');
|
|
387
|
+
return;
|
|
388
|
+
}
|
|
77
389
|
try {
|
|
78
390
|
const budget = getActiveBudgetConfig();
|
|
79
391
|
const DATA_DIR = config.MCP_BRIDGE_DATA_DIR;
|
|
80
392
|
logger.debug(`[ADAPTER] Initializing Actual API for operation (budget: "${budget.name}", server: ${budget.serverUrl})`);
|
|
81
|
-
|
|
393
|
+
// Wrap api.init in auth-rate-limit retry so a transient too-many-requests
|
|
394
|
+
// doesn't surface to the caller (and doesn't trigger #132's crash path).
|
|
395
|
+
await withAuthRetry(() => api.init({
|
|
82
396
|
dataDir: DATA_DIR,
|
|
83
397
|
serverURL: budget.serverUrl,
|
|
84
398
|
password: budget.password || '',
|
|
85
|
-
});
|
|
399
|
+
}));
|
|
86
400
|
logger.debug('[ADAPTER] Downloading budget');
|
|
87
401
|
if (budget.encryptionPassword) {
|
|
88
402
|
const apiWithOptions = api;
|
|
@@ -91,6 +405,7 @@ async function initActualApiForOperation() {
|
|
|
91
405
|
else {
|
|
92
406
|
await api.downloadBudget(budget.syncId);
|
|
93
407
|
}
|
|
408
|
+
setApiInitialized(true);
|
|
94
409
|
logger.debug('[ADAPTER] Actual API initialized for operation');
|
|
95
410
|
}
|
|
96
411
|
catch (err) {
|
|
@@ -99,6 +414,38 @@ async function initActualApiForOperation() {
|
|
|
99
414
|
}
|
|
100
415
|
}
|
|
101
416
|
async function shutdownActualApi() {
|
|
417
|
+
if (_skipApiInitForTests) {
|
|
418
|
+
setApiInitialized(false);
|
|
419
|
+
return;
|
|
420
|
+
}
|
|
421
|
+
// If the connection pool currently has any active per-session connections,
|
|
422
|
+
// those sessions own the api singleton's lifecycle — tearing it down here
|
|
423
|
+
// would invalidate every active session's pool entry and force the next
|
|
424
|
+
// tool call back through legacy init+shutdown (the very pattern #134 is
|
|
425
|
+
// eliminating). Instead, just sync (the persistence guarantee that
|
|
426
|
+
// shutdown was previously providing implicitly) and leave the singleton
|
|
427
|
+
// alive for the pool to manage.
|
|
428
|
+
try {
|
|
429
|
+
const stats = connectionPool.getStats();
|
|
430
|
+
if (stats.activeSessions > 0) {
|
|
431
|
+
try {
|
|
432
|
+
const apiAny = api;
|
|
433
|
+
if (typeof apiAny.sync === 'function') {
|
|
434
|
+
await apiAny.sync();
|
|
435
|
+
logger.debug('[ADAPTER] api.sync() instead of shutdown (pool has active sessions)');
|
|
436
|
+
}
|
|
437
|
+
}
|
|
438
|
+
catch (syncErr) {
|
|
439
|
+
logger.error('[ADAPTER] sync-without-shutdown failed:', syncErr);
|
|
440
|
+
// Don't propagate — shutdown was best-effort anyway.
|
|
441
|
+
}
|
|
442
|
+
return;
|
|
443
|
+
}
|
|
444
|
+
}
|
|
445
|
+
catch (statsErr) {
|
|
446
|
+
// Pool not available (e.g. early startup) — fall through to legacy shutdown.
|
|
447
|
+
logger.debug('[ADAPTER] could not read pool stats; defaulting to full shutdown:', statsErr);
|
|
448
|
+
}
|
|
102
449
|
try {
|
|
103
450
|
const maybeApi = api;
|
|
104
451
|
if (typeof maybeApi.shutdown === 'function') {
|
|
@@ -109,8 +456,14 @@ async function shutdownActualApi() {
|
|
|
109
456
|
catch (err) {
|
|
110
457
|
logger.error('[ADAPTER] Error during Actual API shutdown:', err);
|
|
111
458
|
}
|
|
459
|
+
finally {
|
|
460
|
+
// Always reset the flag — even if shutdown threw, the api singleton is
|
|
461
|
+
// no longer in a known-good state, so pool reuse must NOT be attempted
|
|
462
|
+
// until something explicitly re-inits.
|
|
463
|
+
setApiInitialized(false);
|
|
464
|
+
}
|
|
112
465
|
}
|
|
113
|
-
import { BANK_SYNC_SETTLE_MS, DEFAULT_CONCURRENCY_LIMIT, WRITE_SESSION_DELAY_MS } from './constants.js';
|
|
466
|
+
import { BANK_SYNC_SETTLE_MS, DEFAULT_CONCURRENCY_LIMIT, DEFAULT_RETRY_ATTEMPTS, MAX_RETRY_DELAY_MS, WRITE_SESSION_DELAY_MS } from './constants.js';
|
|
114
467
|
/**
|
|
115
468
|
* Very small concurrency limiter for adapter calls. This prevents bursts from
|
|
116
469
|
* overloading the actual server. It's intentionally tiny and in-memory; replace
|
|
@@ -247,7 +600,24 @@ function withConcurrency(fn) {
|
|
|
247
600
|
}
|
|
248
601
|
// Expose some helpers for testing concurrency
|
|
249
602
|
export function getConcurrencyState() {
|
|
250
|
-
return {
|
|
603
|
+
return {
|
|
604
|
+
running,
|
|
605
|
+
queueLength: queue.length,
|
|
606
|
+
maxConcurrency: MAX_CONCURRENCY,
|
|
607
|
+
// Auth-retry observability — issue #127. authRetries is monotonic over the
|
|
608
|
+
// process lifetime; authRetryFailures only increments when retry budget
|
|
609
|
+
// exhausted. A jump in authRetries without a matching jump in
|
|
610
|
+
// authRetryFailures means the retry-with-backoff is absorbing rate-limit
|
|
611
|
+
// pressure (healthy). Both jumping = upstream genuinely overloaded.
|
|
612
|
+
authRetries: authRetryCount,
|
|
613
|
+
authRetryFailures: authRetryFailureCount,
|
|
614
|
+
// Pool-cooperation observability — issue #134. connectionReuses increments
|
|
615
|
+
// every time withActualApi reused an existing per-session pool connection
|
|
616
|
+
// instead of running its own init+shutdown cycle. Pre-#134 this was
|
|
617
|
+
// structurally always 0; post-#134 it should grow at least linearly with
|
|
618
|
+
// tool-call volume on healthy MCP sessions.
|
|
619
|
+
connectionReuses: connectionReuseCount,
|
|
620
|
+
};
|
|
251
621
|
}
|
|
252
622
|
/**
|
|
253
623
|
* Sync local changes to the Actual Budget server.
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Shared module-level state for the @actual-app/api singleton's "live" flag.
|
|
3
|
+
*
|
|
4
|
+
* @actual-app/api is a process-wide singleton that gets `init()`d and
|
|
5
|
+
* `shutdown()`d by multiple paths in this codebase: the connection pool's
|
|
6
|
+
* per-session init (`ActualConnectionPool.getConnection`), the adapter's
|
|
7
|
+
* legacy per-op cycle (`initActualApiForOperation` / `shutdownActualApi`),
|
|
8
|
+
* and the write queue (`processWriteQueue`).
|
|
9
|
+
*
|
|
10
|
+
* The adapter's pool-cooperation logic (`withActualApi` in actual-adapter.ts)
|
|
11
|
+
* needs to know whether the singleton is currently live so it can safely
|
|
12
|
+
* skip the per-op init when the pool already has a connection. This module
|
|
13
|
+
* exposes a tiny shared flag that all init/shutdown paths update, so any
|
|
14
|
+
* caller can probe the truth without having to know about every path.
|
|
15
|
+
*
|
|
16
|
+
* Lives in src/lib/ rather than inside actual-adapter.ts so the connection
|
|
17
|
+
* pool can update it without creating a circular import (the pool is itself
|
|
18
|
+
* imported by the adapter).
|
|
19
|
+
*/
|
|
20
|
+
let _apiInitialized = false;
|
|
21
|
+
export function isApiInitialized() {
|
|
22
|
+
return _apiInitialized;
|
|
23
|
+
}
|
|
24
|
+
export function setApiInitialized(value) {
|
|
25
|
+
_apiInitialized = value;
|
|
26
|
+
}
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
import { AsyncLocalStorage } from 'async_hooks';
|
|
2
|
+
/**
|
|
3
|
+
* Per-request AsyncLocalStorage. Carries the active MCP sessionId across
|
|
4
|
+
* async boundaries so adapter / tool code can identify which session is
|
|
5
|
+
* making the call without threading an argument through every layer.
|
|
6
|
+
*
|
|
7
|
+
* Producer: src/server/httpServer.ts wraps each `transport.handleRequest()`
|
|
8
|
+
* call in `requestContext.run({ sessionId }, …)`.
|
|
9
|
+
*
|
|
10
|
+
* Consumer: src/lib/actual-adapter.ts uses `requestContext.getStore()?.sessionId`
|
|
11
|
+
* to decide whether the session has an initialised pool connection it can
|
|
12
|
+
* reuse (eliminating the per-op login burst — see #134).
|
|
13
|
+
*
|
|
14
|
+
* Lives in src/lib/ rather than src/server/ to avoid the circular import that
|
|
15
|
+
* would otherwise exist between httpServer.ts and actual-adapter.ts.
|
|
16
|
+
*/
|
|
17
|
+
export const requestContext = new AsyncLocalStorage();
|
|
@@ -1,5 +1,3 @@
|
|
|
1
|
-
// src/server/httpServer.ts
|
|
2
|
-
import { AsyncLocalStorage } from 'async_hooks';
|
|
3
1
|
import express from 'express';
|
|
4
2
|
import { randomUUID } from 'crypto';
|
|
5
3
|
import { Server } from '@modelcontextprotocol/sdk/server/index.js';
|
|
@@ -16,8 +14,12 @@ import { createMcpAuth } from '../auth/setup.js';
|
|
|
16
14
|
import { budgetAclMiddleware } from '../auth/budget-acl.js';
|
|
17
15
|
import * as https from 'node:https';
|
|
18
16
|
import * as fs from 'node:fs';
|
|
19
|
-
// AsyncLocalStorage for request context
|
|
20
|
-
|
|
17
|
+
// AsyncLocalStorage for request context — moved to src/lib/requestContext.ts
|
|
18
|
+
// so adapter code can import it without a circular dependency on httpServer.
|
|
19
|
+
// Re-exported here for backward compatibility with any callers that imported
|
|
20
|
+
// `requestContext` from this module.
|
|
21
|
+
import { requestContext } from '../lib/requestContext.js';
|
|
22
|
+
export { requestContext };
|
|
21
23
|
export async function startHttpServer(mcp, port, httpPath, capabilities, // was passed by index.ts
|
|
22
24
|
implementedTools, // was passed by index.ts
|
|
23
25
|
serverDescription, // was passed by index.ts
|
|
@@ -313,6 +315,13 @@ bindHost = 'localhost', advertisedUrl) {
|
|
|
313
315
|
resolveInit = resolve;
|
|
314
316
|
rejectInit = reject;
|
|
315
317
|
});
|
|
318
|
+
// Always-on safety net: if no concurrent code path is awaiting initPromise
|
|
319
|
+
// when rejectInit fires (e.g. session init fails before any tools/call
|
|
320
|
+
// arrives), the rejection would otherwise hit process.on('unhandledRejection')
|
|
321
|
+
// and exit the server. The original error is already logged inside the
|
|
322
|
+
// onsessioninitialized catch block below — we deliberately do NOT re-log here
|
|
323
|
+
// to avoid duplicate noise and any risk of leaking credentials.
|
|
324
|
+
initPromise.catch(() => { });
|
|
316
325
|
const transport = new StreamableHTTPServerTransport({
|
|
317
326
|
sessionIdGenerator: () => randomUUID(),
|
|
318
327
|
enableJsonResponse: true,
|
package/package.json
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "actual-mcp-server",
|
|
3
3
|
"displayName": "Actual MCP Server",
|
|
4
|
-
"version": "0.6.
|
|
4
|
+
"version": "0.6.2",
|
|
5
5
|
"engines": {
|
|
6
6
|
"node": ">=20.0.0",
|
|
7
7
|
"npm": ">=10.0.0"
|
|
@@ -30,7 +30,7 @@
|
|
|
30
30
|
"verify-tools": "npm run build && node scripts/verify-tools.js",
|
|
31
31
|
"check:coverage": "node scripts/list-actual-api-methods.mjs",
|
|
32
32
|
"direct-sync": "node scripts/direct-sync/bank-sync-direct.mjs",
|
|
33
|
-
"test:unit-js": "node tests/unit/transactions_create.test.js && node tests/unit/generated_tools.smoke.test.js && node tests/unit/schema_validation.test.js && node tests/unit/auth-acl.test.js && node tests/unit/bug76.test.js && node tests/unit/budgets_setAmount.test.js && node tests/unit/transactions_uncategorized.test.js",
|
|
33
|
+
"test:unit-js": "node tests/unit/transactions_create.test.js && node tests/unit/generated_tools.smoke.test.js && node tests/unit/schema_validation.test.js && node tests/unit/auth-acl.test.js && node tests/unit/bug76.test.js && node tests/unit/budgets_setAmount.test.js && node tests/unit/transactions_uncategorized.test.js && node tests/unit/httpServer_session_init.test.js && node tests/unit/manual_mcp_client_retry.test.js && node tests/unit/manual_mcp_client_session.test.js && node tests/unit/manual_mcp_client_circuit.test.js && node tests/unit/manual_runner_killswitch.test.js && node tests/unit/adapter_auth_rate_limit.test.js && node tests/unit/adapter_session_reuse.test.js",
|
|
34
34
|
"test:adapter": "npm run build && node dist/src/tests_adapter_runner.js",
|
|
35
35
|
"test:e2e": "npx playwright test",
|
|
36
36
|
"test:e2e:docker": "./tests/e2e/run-docker-e2e.sh",
|