@hotmeshio/long-tail 0.1.11 → 0.1.12

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -730,7 +730,12 @@ async function resolveEscalation(input, auth) {
730
730
  const handle = await client.workflow.getHandle(signalRouting.taskQueue, signalRouting.workflowType, signalRouting.workflowId);
731
731
  await handle.signal(signalRouting.signalId, signalPayload);
732
732
  }
733
- await escalationService.resolveEscalation(escalation.id, resolverPayload);
733
+ // For YAML workflows, the resolve worker inside the workflow calls
734
+ // claim_and_resolve to close the escalation transactionally. Only resolve
735
+ // here for Durable workflows that lack an in-workflow resolve step.
736
+ if (signalRouting.engine !== 'yaml') {
737
+ await escalationService.resolveEscalation(escalation.id, resolverPayload);
738
+ }
734
739
  (0, publish_1.publishEscalationEvent)({
735
740
  type: 'escalation.resolved',
736
741
  source: 'api',
@@ -740,7 +745,7 @@ async function resolveEscalation(input, auth) {
740
745
  taskId: escalation.task_id,
741
746
  escalationId: escalation.id,
742
747
  originId: escalation.origin_id ?? undefined,
743
- status: 'resolved',
748
+ status: signalRouting.engine === 'yaml' ? 'signaled' : 'resolved',
744
749
  });
745
750
  return {
746
751
  status: 200,
@@ -5,6 +5,19 @@ import type { LTMcpServerRecord, LTMcpToolManifest } from '../../../types';
5
5
  * when callServerTool is invoked with its name.
6
6
  */
7
7
  export declare function registerBuiltinServer(name: string, factory: () => Promise<any>): void;
8
+ /**
9
+ * Dispatch a tool call directly to a built-in server's handler,
10
+ * bypassing MCP Client/Transport entirely. Returns null if the server
11
+ * or tool is not a built-in — caller should fall through to MCP transport.
12
+ *
13
+ * Each built-in server is lazily instantiated once and cached. Tool handlers
14
+ * are called via server._registeredTools[toolName].handler(args). This
15
+ * eliminates the InMemoryTransport bottleneck under concurrent load.
16
+ */
17
+ export declare function dispatchBuiltinTool(serverId: string, toolName: string, args: Record<string, any>): Promise<{
18
+ dispatched: true;
19
+ result: any;
20
+ } | null>;
8
21
  /**
9
22
  * Connect to a registered MCP server.
10
23
  * Creates the appropriate transport based on transport_type,
@@ -34,6 +34,7 @@ var __importStar = (this && this.__importStar) || (function () {
34
34
  })();
35
35
  Object.defineProperty(exports, "__esModule", { value: true });
36
36
  exports.registerBuiltinServer = registerBuiltinServer;
37
+ exports.dispatchBuiltinTool = dispatchBuiltinTool;
37
38
  exports.connectToServer = connectToServer;
38
39
  exports.disconnectFromServer = disconnectFromServer;
39
40
  exports.resolveClient = resolveClient;
@@ -58,6 +59,12 @@ const clients = new Map();
58
59
  * rather than external stdio/SSE connections.
59
60
  */
60
61
  const builtinFactories = new Map();
62
+ /**
63
+ * Cached built-in McpServer instances -- keyed by canonical server name.
64
+ * Used by dispatchBuiltinTool() to call tool handlers directly without
65
+ * going through MCP Client/Transport. One instance per server.
66
+ */
67
+ const builtinServers = new Map();
61
68
  /**
62
69
  * Register a built-in server factory so it can be auto-connected
63
70
  * when callServerTool is invoked with its name.
@@ -65,6 +72,60 @@ const builtinFactories = new Map();
65
72
  function registerBuiltinServer(name, factory) {
66
73
  builtinFactories.set(name, factory);
67
74
  }
75
+ /**
76
+ * Dispatch a tool call directly to a built-in server's handler,
77
+ * bypassing MCP Client/Transport entirely. Returns null if the server
78
+ * or tool is not a built-in — caller should fall through to MCP transport.
79
+ *
80
+ * Each built-in server is lazily instantiated once and cached. Tool handlers
81
+ * are called via server._registeredTools[toolName].handler(args). This
82
+ * eliminates the InMemoryTransport bottleneck under concurrent load.
83
+ */
84
+ async function dispatchBuiltinTool(serverId, toolName, args) {
85
+ // Normalize and match against builtin factories
86
+ const norm = (s) => s.replace(/[^a-zA-Z0-9]/g, '').toLowerCase();
87
+ const normId = norm(serverId);
88
+ let matchedName = null;
89
+ for (const [name] of builtinFactories) {
90
+ const normName = norm(name);
91
+ if (normName === normId || normName.includes(normId) || normId.includes(normName)) {
92
+ matchedName = name;
93
+ break;
94
+ }
95
+ }
96
+ if (!matchedName)
97
+ return null;
98
+ // Lazily create and cache the server instance
99
+ if (!builtinServers.has(matchedName)) {
100
+ const factory = builtinFactories.get(matchedName);
101
+ const server = await factory();
102
+ builtinServers.set(matchedName, server);
103
+ logger_1.loggerRegistry.info(`[lt-mcp:builtin] ${matchedName} ready (direct dispatch)`);
104
+ }
105
+ const server = builtinServers.get(matchedName);
106
+ const tool = server._registeredTools?.[toolName];
107
+ if (!tool?.handler)
108
+ return null;
109
+ // Call the handler directly — no transport, no JSON-RPC.
110
+ // Tool handlers return MCP-shaped responses: { content: [{ type: 'text', text: '...' }] }
111
+ // Parse the text content the same way callServerTool does.
112
+ const mcpResponse = await tool.handler(args);
113
+ let parsed = mcpResponse;
114
+ if (mcpResponse && Array.isArray(mcpResponse.content)) {
115
+ const textContent = mcpResponse.content.find((c) => c.type === 'text');
116
+ if (textContent && 'text' in textContent) {
117
+ try {
118
+ parsed = JSON.parse(textContent.text);
119
+ }
120
+ catch {
121
+ parsed = mcpResponse.isError ? { error: textContent.text } : textContent.text;
122
+ }
123
+ }
124
+ }
125
+ const isError = parsed && typeof parsed === 'object' && 'error' in parsed;
126
+ logger_1.loggerRegistry.debug(`[lt-mcp:builtin] ${matchedName}/${toolName} ok=${!isError} resultKeys=[${typeof parsed === 'object' && parsed ? Object.keys(parsed).join(',') : 'raw'}]`);
127
+ return { dispatched: true, result: parsed };
128
+ }
68
129
  /**
69
130
  * Connect to a registered MCP server.
70
131
  * Creates the appropriate transport based on transport_type,
@@ -330,4 +391,5 @@ async function testConnection(transportType, transportConfig) {
330
391
  */
331
392
  function clear() {
332
393
  clients.clear();
394
+ builtinServers.clear();
333
395
  }
@@ -59,13 +59,8 @@ function deriveAuthFromToolContext() {
59
59
  */
60
60
  async function callServerTool(serverId, toolName, args, authContext) {
61
61
  logger_1.loggerRegistry.debug(`[lt-mcp:call] entering ${serverId}/${toolName} argKeys=[${Object.keys(args).join(',')}]`);
62
- const client = await (0, connection_1.resolveClient)(serverId);
63
- if (!client) {
64
- throw new Error(`MCP server ${serverId} is not connected`);
65
- }
66
- // Resolve auth: explicit authContext > ambient ToolContext > none
62
+ // Resolve auth context before dispatch — both paths need it
67
63
  const resolvedAuth = authContext ?? deriveAuthFromToolContext();
68
- // Inject auth context as a hidden _auth argument when available
69
64
  const toolArgs = resolvedAuth?.userId || resolvedAuth?.delegationToken
70
65
  ? { ...args, _auth: { userId: resolvedAuth.userId, token: resolvedAuth.delegationToken } }
71
66
  : args;
@@ -74,7 +69,25 @@ async function callServerTool(serverId, toolName, args, authContext) {
74
69
  if (ctx?.principal.id) {
75
70
  logger_1.loggerRegistry.debug(`[lt-mcp:audit] ${toolName} on ${serverId} by ${ctx.principal.type}:${ctx.principal.id}`);
76
71
  }
77
- const result = await client.callTool({ name: toolName, arguments: toolArgs }, undefined, { timeout: defaults_1.MCP_TOOL_TIMEOUT_MS });
72
+ // Direct dispatch for built-in servers bypasses MCP Client/Transport.
73
+ // Each built-in server is a cached singleton; tool handlers are called
74
+ // as plain functions. No transport contention under concurrent load.
75
+ const builtin = await (0, connection_1.dispatchBuiltinTool)(serverId, toolName, toolArgs);
76
+ if (builtin) {
77
+ logger_1.loggerRegistry.debug(`[lt-mcp:call] leaving ${serverId}/${toolName} (builtin) resultKeys=[${typeof builtin.result === 'object' && builtin.result ? Object.keys(builtin.result).join(',') : 'raw'}]`);
78
+ return builtin.result;
79
+ }
80
+ // External servers — use MCP Client/Transport with timeout guard
81
+ const client = await (0, connection_1.resolveClient)(serverId);
82
+ if (!client) {
83
+ throw new Error(`MCP server ${serverId} is not connected`);
84
+ }
85
+ // Guard against hung transports: the MCP SDK timeout relies on the transport
86
+ // to respond, which fails when InMemoryTransport is saturated under concurrency.
87
+ // Promise.race ensures we throw on timeout regardless of transport state.
88
+ const callPromise = client.callTool({ name: toolName, arguments: toolArgs }, undefined, { timeout: defaults_1.MCP_TOOL_TIMEOUT_MS });
89
+ const timeoutPromise = new Promise((_, reject) => setTimeout(() => reject(new Error(`MCP tool ${serverId}/${toolName} timed out after ${defaults_1.MCP_TOOL_TIMEOUT_MS}ms`)), defaults_1.MCP_TOOL_TIMEOUT_MS));
90
+ const result = await Promise.race([callPromise, timeoutPromise]);
78
91
  // Extract text content from MCP response
79
92
  if (Array.isArray(result.content)) {
80
93
  const textContent = result.content.find((c) => c.type === 'text');
@@ -40,6 +40,7 @@ const db_1 = require("../../../lib/db");
40
40
  const logger_1 = require("../../../lib/logger");
41
41
  const ephemeral_1 = require("../../iam/ephemeral");
42
42
  const mcpClient = __importStar(require("../../mcp/client"));
43
+ const connection_1 = require("../../mcp/client/connection");
43
44
  const yamlDb = __importStar(require("../db"));
44
45
  const scope_1 = require("./scope");
45
46
  const callbacks_1 = require("./callbacks");
@@ -167,14 +168,18 @@ async function registerWorkersForWorkflow(workflow) {
167
168
  if (!serverId)
168
169
  continue;
169
170
  const storedArgs = activity.tool_arguments;
170
- const yamlHookTopic = hookTopicByEscalationTool.get(activity.activity_id);
171
+ // For escalate_and_wait, resolve hookTopic at runtime from the activity
172
+ // context — multiple escalation workers share a single registered callback,
173
+ // so we look up by activity_id from the incoming metadata, not the static
174
+ // activity captured at registration time.
175
+ const staticHookTopic = hookTopicByEscalationTool.get(activity.activity_id);
171
176
  // Identify keys that are wired via input_mappings. When a wired key
172
177
  // resolves to nothing (upstream step failed/returned null), we must
173
178
  // NOT fall back to stored tool_arguments — that would leak hardcoded
174
179
  // values from the original execution trace.
175
180
  const wiredKeys = new Set(Object.keys(activity.input_mappings || {}).filter(k => k !== '_scope' && k !== 'workflowName'));
176
181
  if (toolName === 'escalate_and_wait') {
177
- logger_1.loggerRegistry.info(`[yaml-workflow] escalate_and_wait worker: activityId=${activity.activity_id}, hookTopic=${yamlHookTopic || 'NONE'}, mapKeys=[${[...hookTopicByEscalationTool.keys()].join(',')}]`);
182
+ logger_1.loggerRegistry.info(`[yaml-workflow] escalate_and_wait worker: activityId=${activity.activity_id}, hookTopic=${staticHookTopic || 'NONE'}, mapKeys=[${[...hookTopicByEscalationTool.keys()].join(',')}]`);
178
183
  }
179
184
  workerConfigs.push({
180
185
  topic: activity.topic,
@@ -201,7 +206,13 @@ async function registerWorkersForWorkflow(workflow) {
201
206
  }
202
207
  logger_1.loggerRegistry.debug(`[yaml-workflow:worker] merged mcp/${toolName} wf=${wfName} mergedKeys=[${Object.keys(mergedArgs).join(',')}]`);
203
208
  // For escalate_and_wait: inject YAML signal routing so the MCP tool
204
- // stores engine:'yaml' + hookTopic + jobId in the escalation metadata
209
+ // stores engine:'yaml' + hookTopic + jobId in the escalation metadata.
210
+ // Resolve hookTopic at runtime — multiple escalation workers share
211
+ // this callback, so we look up by the current activity_id from metadata.
212
+ const runtimeActivityId = data.metadata?.aid;
213
+ const yamlHookTopic = runtimeActivityId
214
+ ? hookTopicByEscalationTool.get(runtimeActivityId) || staticHookTopic
215
+ : staticHookTopic;
205
216
  if (yamlHookTopic) {
206
217
  const jid = data.metadata?.jid;
207
218
  mergedArgs._yaml_signal_routing = {
@@ -216,7 +227,16 @@ async function registerWorkersForWorkflow(workflow) {
216
227
  }
217
228
  const exchangedArgs = await (0, ephemeral_1.exchangeTokensInArgs)(mergedArgs);
218
229
  const coercedArgs = coerceNumericObjects(exchangedArgs);
219
- const result = await mcpClient.callServerTool(serverId, toolName, coercedArgs);
230
+ // Try direct dispatch for built-in servers (bypasses MCP transport).
231
+ // Falls through to mcpClient.callServerTool() for external servers.
232
+ let result;
233
+ const builtin = await (0, connection_1.dispatchBuiltinTool)(serverId, toolName, coercedArgs);
234
+ if (builtin) {
235
+ result = builtin.result;
236
+ }
237
+ else {
238
+ result = await mcpClient.callServerTool(serverId, toolName, coercedArgs);
239
+ }
220
240
  if (result && typeof result === 'object' && 'error' in result) {
221
241
  logger_1.loggerRegistry.error(`[yaml-workflow:worker] ${toolName} error: ${JSON.stringify(result).slice(0, 200)}`);
222
242
  }
package/docs/cloud.md CHANGED
@@ -270,3 +270,126 @@ services:
270
270
  ```
271
271
 
272
272
  The combined `index.js` entry point (used in development and the demo) calls `start()` with both server and workers enabled. In production, split them into `api.js` and `worker.js` with different `start()` configs.
273
+
274
+ ## PostgreSQL Performance Tuning
275
+
276
+ HotMesh's durable execution model is write-heavy. Every workflow creates a `jobs` row, and every field mutation creates rows in `jobs_attributes`. A simple 3-step workflow generates ~100 attribute rows per execution. At 1,000 concurrent workflows, that's 300K+ inserts in seconds.
277
+
278
+ The default Postgres configuration is tuned for mixed workloads on modest hardware. For Long Tail, the write-heavy profile needs specific adjustments.
279
+
280
+ ### Determining Your Profile
281
+
282
+ Run a baseline throughput test to understand your bottleneck:
283
+
284
+ ```bash
285
+ # Submit 100 minimal workflows, measure submit rate
286
+ time for i in $(seq 1 100); do
287
+ curl -s -X POST http://localhost:3000/api/workflows/basicEcho/invoke \
288
+ -H "Authorization: Bearer $TOKEN" \
289
+ -H 'Content-Type: application/json' \
290
+ -d '{"data":{"message":"test","sleepSeconds":0}}' > /dev/null
291
+ done
292
+ ```
293
+
294
+ Then check where Postgres is spending time:
295
+
296
+ ```sql
297
+ -- Check for write pressure (high buffers_checkpoint = WAL bottleneck)
298
+ SELECT * FROM pg_stat_bgwriter;
299
+
300
+ -- Check for connection saturation
301
+ SELECT count(*) as active, max_conn
302
+ FROM pg_stat_activity, (SELECT setting::int as max_conn FROM pg_settings WHERE name = 'max_connections') mc
303
+ WHERE state = 'active'
304
+ GROUP BY max_conn;
305
+
306
+ -- Check table bloat after burst writes
307
+ SELECT relname, n_live_tup, n_dead_tup,
308
+ round(100.0 * n_dead_tup / nullif(n_live_tup + n_dead_tup, 0), 1) as dead_pct
309
+ FROM pg_stat_user_tables
310
+ WHERE n_live_tup > 1000
311
+ ORDER BY n_dead_tup DESC LIMIT 10;
312
+ ```
313
+
314
+ ### Recommended Settings
315
+
316
+ | Parameter | Default | Recommended | Why |
317
+ |-----------|---------|-------------|-----|
318
+ | `shared_buffers` | 128MB | 25% of RAM (256MB–1GB) | Cache hot pages — `jobs_attributes` partitions are read/written constantly |
319
+ | `work_mem` | 4MB | 16MB | Workflow queries join across partitions; larger sort memory avoids disk spills |
320
+ | `maintenance_work_mem` | 64MB | 128MB–256MB | Speeds VACUUM on large `jobs_attributes` tables after burst writes |
321
+ | `wal_buffers` | -1 (auto) | 16MB | Write-heavy workloads saturate the default 8MB WAL buffer |
322
+ | `max_wal_size` | 1GB | 1GB–2GB | Prevents excessive checkpointing during sustained write bursts |
323
+ | `checkpoint_completion_target` | 0.9 | 0.9 | Spread checkpoint I/O over time — already optimal |
324
+ | `effective_cache_size` | 4GB | 50–75% of RAM | Query planner hint — tells Postgres how much OS cache to expect |
325
+ | `synchronous_commit` | on | off (dev/staging) | Trades durability for 2–5x write throughput. WAL is still written; only fsync is deferred. Acceptable for dev and staging. **Keep `on` in production** unless you understand the trade-off. |
326
+ | `max_connections` | 100 | 200 | HotMesh uses connection-per-worker; concurrent workflows can exhaust 100 connections |
327
+
328
+ ### Docker Compose Configuration
329
+
330
+ ```yaml
331
+ postgres:
332
+ image: postgres:16
333
+ command:
334
+ - postgres
335
+ - -c
336
+ - shared_buffers=256MB
337
+ - -c
338
+ - work_mem=16MB
339
+ - -c
340
+ - maintenance_work_mem=128MB
341
+ - -c
342
+ - wal_buffers=16MB
343
+ - -c
344
+ - max_wal_size=1GB
345
+ - -c
346
+ - checkpoint_completion_target=0.9
347
+ - -c
348
+ - effective_cache_size=512MB
349
+ - -c
350
+ - synchronous_commit=off
351
+ - -c
352
+ - max_connections=200
353
+ shm_size: 512m # Required: shared_buffers > 128MB needs larger /dev/shm
354
+ ```
355
+
356
+ The `shm_size` setting is critical — Docker defaults to 64MB for `/dev/shm`, but `shared_buffers=256MB` requires at least that much shared memory. Without it, Postgres will fail to start or silently fall back to smaller buffers.
357
+
358
+ ### Production (RDS / Cloud SQL)
359
+
360
+ For managed databases, apply the same parameters through parameter groups:
361
+
362
+ **AWS RDS:**
363
+ ```
364
+ # Custom parameter group
365
+ shared_buffers = {DBInstanceClassMemory/4}
366
+ work_mem = 16384 # 16MB in KB
367
+ maintenance_work_mem = 262144
368
+ wal_buffers = 16384
369
+ max_wal_size = 2048 # 2GB in MB
370
+ synchronous_commit = on # Keep on for production
371
+ max_connections = 200
372
+ ```
373
+
374
+ **GCP Cloud SQL:**
375
+ ```
376
+ # Database flags
377
+ shared_buffers: 25% of instance RAM (auto-tuned by Cloud SQL)
378
+ work_mem: 16MB
379
+ maintenance_work_mem: 256MB
380
+ max_wal_size: 2GB
381
+ synchronous_commit: on
382
+ max_connections: 200
383
+ ```
384
+
385
+ ### Maintenance
386
+
387
+ After burst workloads, dead tuples accumulate in `jobs_attributes`. Autovacuum handles this, but for large bursts (10K+ workflows), consider:
388
+
389
+ ```sql
390
+ -- Manual VACUUM after a load test or batch run
391
+ VACUUM ANALYZE durable.jobs_attributes;
392
+ VACUUM ANALYZE durable.engine_streams;
393
+ ```
394
+
395
+ Long Tail includes a built-in maintenance cron that prunes completed workflow data. Configure it via the dashboard or API to keep table sizes manageable.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@hotmeshio/long-tail",
3
- "version": "0.1.11",
3
+ "version": "0.1.12",
4
4
  "description": "Long Tail Workflows — Durable AI workflows with human-in-the-loop escalation. Powered by PostgreSQL.",
5
5
  "main": "./build/index.js",
6
6
  "types": "./build/index.d.ts",
@@ -59,7 +59,7 @@
59
59
  "author": "luke.birdeau@gmail.com",
60
60
  "license": "SEE LICENSE IN LICENSE",
61
61
  "dependencies": {
62
- "@anthropic-ai/sdk": "^0.82.0",
62
+ "@anthropic-ai/sdk": "^0.92.0",
63
63
  "@aws-sdk/client-s3": "^3.1017.0",
64
64
  "@hotmeshio/hotmesh": "^0.14.4",
65
65
  "@modelcontextprotocol/sdk": "^1.27.1",