@exaudeus/workrail 3.25.0 → 3.26.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (59) hide show
  1. package/dist/cli/commands/index.d.ts +5 -0
  2. package/dist/cli/commands/index.js +12 -1
  3. package/dist/cli/commands/worktrain-await.d.ts +35 -0
  4. package/dist/cli/commands/worktrain-await.js +207 -0
  5. package/dist/cli/commands/worktrain-inbox.d.ts +23 -0
  6. package/dist/cli/commands/worktrain-inbox.js +82 -0
  7. package/dist/cli/commands/worktrain-init.d.ts +23 -0
  8. package/dist/cli/commands/worktrain-init.js +338 -0
  9. package/dist/cli/commands/worktrain-spawn.d.ts +28 -0
  10. package/dist/cli/commands/worktrain-spawn.js +106 -0
  11. package/dist/cli/commands/worktrain-tell.d.ts +25 -0
  12. package/dist/cli/commands/worktrain-tell.js +32 -0
  13. package/dist/cli-worktrain.d.ts +2 -0
  14. package/dist/cli-worktrain.js +169 -0
  15. package/dist/cli.js +13 -3
  16. package/dist/config/config-file.d.ts +2 -0
  17. package/dist/config/config-file.js +55 -0
  18. package/dist/daemon/agent-loop.d.ts +90 -0
  19. package/dist/daemon/agent-loop.js +214 -0
  20. package/dist/daemon/pi-mono-loader.d.ts +0 -5
  21. package/dist/daemon/pi-mono-loader.js +0 -64
  22. package/dist/daemon/soul-template.d.ts +2 -0
  23. package/dist/daemon/soul-template.js +22 -0
  24. package/dist/daemon/workflow-runner.d.ts +24 -2
  25. package/dist/daemon/workflow-runner.js +244 -120
  26. package/dist/manifest.json +147 -51
  27. package/dist/mcp/output-schemas.d.ts +154 -154
  28. package/dist/mcp/transports/bridge-entry.js +20 -2
  29. package/dist/mcp/transports/bridge-events.d.ts +34 -0
  30. package/dist/mcp/transports/bridge-events.js +24 -0
  31. package/dist/mcp/transports/fatal-exit.d.ts +5 -0
  32. package/dist/mcp/transports/fatal-exit.js +82 -0
  33. package/dist/mcp/transports/http-entry.js +3 -0
  34. package/dist/mcp/transports/stdio-entry.js +3 -7
  35. package/dist/mcp/v2/tools.d.ts +7 -7
  36. package/dist/trigger/delivery-action.d.ts +37 -0
  37. package/dist/trigger/delivery-action.js +204 -0
  38. package/dist/trigger/delivery-client.d.ts +11 -0
  39. package/dist/trigger/delivery-client.js +27 -0
  40. package/dist/trigger/trigger-listener.d.ts +2 -0
  41. package/dist/trigger/trigger-listener.js +12 -2
  42. package/dist/trigger/trigger-router.d.ts +8 -2
  43. package/dist/trigger/trigger-router.js +164 -6
  44. package/dist/trigger/trigger-store.d.ts +11 -3
  45. package/dist/trigger/trigger-store.js +254 -13
  46. package/dist/trigger/types.d.ts +24 -0
  47. package/dist/trigger/types.js +4 -0
  48. package/dist/v2/durable-core/schemas/execution-snapshot/blocked-snapshot.d.ts +22 -22
  49. package/dist/v2/durable-core/schemas/execution-snapshot/execution-snapshot.v1.d.ts +114 -114
  50. package/dist/v2/durable-core/schemas/export-bundle/index.d.ts +454 -454
  51. package/dist/v2/durable-core/schemas/session/blockers.d.ts +14 -14
  52. package/dist/v2/durable-core/schemas/session/events.d.ts +93 -93
  53. package/dist/v2/durable-core/schemas/session/gaps.d.ts +2 -2
  54. package/dist/v2/durable-core/schemas/session/validation-event.d.ts +4 -4
  55. package/dist/v2/usecases/console-routes.js +33 -3
  56. package/package.json +6 -4
  57. package/spec/workflow-tags.json +1 -0
  58. package/workflows/classify-task-workflow.json +68 -0
  59. package/workflows/coding-task-workflow-agentic.lean.v2.json +43 -13
@@ -0,0 +1,22 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.DAEMON_SOUL_TEMPLATE = exports.DAEMON_SOUL_DEFAULT = void 0;
4
+ exports.DAEMON_SOUL_DEFAULT = `\
5
+ - Write code that follows the patterns already established in the codebase
6
+ - Never skip tests. Run existing tests before and after changes
7
+ - Prefer small, focused changes over large rewrites
8
+ - If a step asks you to write code, write actual code -- do not write pseudocode or placeholders
9
+ - Commit your work when you complete a logical unit`;
10
+ exports.DAEMON_SOUL_TEMPLATE = `\
11
+ # WorkRail Daemon Soul
12
+ #
13
+ # This file is injected into every WorkRail Auto daemon session system prompt under
14
+ # "## Agent Rules and Philosophy". Edit it to customize the agent's behavior for
15
+ # your environment: coding conventions, commit style, tool preferences, etc.
16
+ #
17
+ # Changes take effect on the next daemon session -- no restart required.
18
+ #
19
+ # The defaults below reflect general best practices. Override them freely.
20
+
21
+ ${exports.DAEMON_SOUL_DEFAULT}
22
+ `;
@@ -1,8 +1,10 @@
1
1
  import 'reflect-metadata';
2
+ import type { AgentTool } from "./agent-loop.js";
2
3
  import type { V2ToolContext } from '../mcp/types.js';
3
4
  import type { DaemonRegistry } from '../v2/infra/in-memory/daemon-registry/index.js';
5
+ import type { V2StartWorkflowOutputSchema } from '../mcp/output-schemas.js';
4
6
  export declare const DAEMON_SESSIONS_DIR: string;
5
- export declare const DAEMON_SOUL_DEFAULT = "- Write code that follows the patterns already established in the codebase\n- Never skip tests. Run existing tests before and after changes\n- Prefer small, focused changes over large rewrites\n- If a step asks you to write code, write actual code -- do not write pseudocode or placeholders\n- Commit your work when you complete a logical unit";
7
+ export { DAEMON_SOUL_DEFAULT, DAEMON_SOUL_TEMPLATE } from './soul-template.js';
6
8
  export interface WorkflowTrigger {
7
9
  readonly workflowId: string;
8
10
  readonly goal: string;
@@ -11,12 +13,17 @@ export interface WorkflowTrigger {
11
13
  readonly referenceUrls?: readonly string[];
12
14
  readonly agentConfig?: {
13
15
  readonly model?: string;
16
+ readonly maxSessionMinutes?: number;
17
+ readonly maxTurns?: number;
14
18
  };
19
+ readonly _preAllocatedStartResponse?: import('zod').infer<typeof V2StartWorkflowOutputSchema>;
20
+ readonly soulFile?: string;
15
21
  }
16
22
  export interface WorkflowRunSuccess {
17
23
  readonly _tag: 'success';
18
24
  readonly workflowId: string;
19
25
  readonly stopReason: string;
26
+ readonly lastStepNotes?: string;
20
27
  }
21
28
  export interface WorkflowRunError {
22
29
  readonly _tag: 'error';
@@ -24,7 +31,20 @@ export interface WorkflowRunError {
24
31
  readonly message: string;
25
32
  readonly stopReason: string;
26
33
  }
27
- export type WorkflowRunResult = WorkflowRunSuccess | WorkflowRunError;
34
+ export interface WorkflowRunTimeout {
35
+ readonly _tag: 'timeout';
36
+ readonly workflowId: string;
37
+ readonly reason: 'wall_clock' | 'max_turns';
38
+ readonly message: string;
39
+ readonly stopReason: string;
40
+ }
41
+ export interface WorkflowDeliveryFailed {
42
+ readonly _tag: 'delivery_failed';
43
+ readonly workflowId: string;
44
+ readonly stopReason: string;
45
+ readonly deliveryError: string;
46
+ }
47
+ export type WorkflowRunResult = WorkflowRunSuccess | WorkflowRunError | WorkflowRunTimeout | WorkflowDeliveryFailed;
28
48
  export interface OrphanedSession {
29
49
  readonly sessionId: string;
30
50
  readonly continueToken: string;
@@ -37,5 +57,7 @@ export declare function readDaemonSessionState(sessionId: string): Promise<{
37
57
  } | null>;
38
58
  export declare function readAllDaemonSessions(sessionsDir?: string): Promise<OrphanedSession[]>;
39
59
  export declare function runStartupRecovery(sessionsDir?: string): Promise<void>;
60
+ export declare function makeBashTool(workspacePath: string, schemas: Record<string, any>): AgentTool;
61
+ export declare function buildSessionRecap(notes: readonly string[]): string;
40
62
  export declare function buildSystemPrompt(trigger: WorkflowTrigger, sessionState: string, soulContent: string, workspaceContext: string | null): string;
41
63
  export declare function runWorkflow(trigger: WorkflowTrigger, ctx: V2ToolContext, apiKey: string, daemonRegistry?: DaemonRegistry): Promise<WorkflowRunResult>;
@@ -32,11 +32,16 @@ var __importStar = (this && this.__importStar) || (function () {
32
32
  return result;
33
33
  };
34
34
  })();
35
+ var __importDefault = (this && this.__importDefault) || function (mod) {
36
+ return (mod && mod.__esModule) ? mod : { "default": mod };
37
+ };
35
38
  Object.defineProperty(exports, "__esModule", { value: true });
36
- exports.DAEMON_SOUL_DEFAULT = exports.DAEMON_SESSIONS_DIR = void 0;
39
+ exports.DAEMON_SOUL_TEMPLATE = exports.DAEMON_SOUL_DEFAULT = exports.DAEMON_SESSIONS_DIR = void 0;
37
40
  exports.readDaemonSessionState = readDaemonSessionState;
38
41
  exports.readAllDaemonSessions = readAllDaemonSessions;
39
42
  exports.runStartupRecovery = runStartupRecovery;
43
+ exports.makeBashTool = makeBashTool;
44
+ exports.buildSessionRecap = buildSessionRecap;
40
45
  exports.buildSystemPrompt = buildSystemPrompt;
41
46
  exports.runWorkflow = runWorkflow;
42
47
  require("reflect-metadata");
@@ -46,12 +51,19 @@ const os = __importStar(require("node:os"));
46
51
  const node_child_process_1 = require("node:child_process");
47
52
  const node_util_1 = require("node:util");
48
53
  const node_crypto_1 = require("node:crypto");
49
- const pi_mono_loader_js_1 = require("./pi-mono-loader.js");
54
+ const sdk_1 = __importDefault(require("@anthropic-ai/sdk"));
55
+ const bedrock_sdk_1 = require("@anthropic-ai/bedrock-sdk");
56
+ const agent_loop_js_1 = require("./agent-loop.js");
50
57
  const start_js_1 = require("../mcp/handlers/v2-execution/start.js");
51
58
  const index_js_1 = require("../mcp/handlers/v2-execution/index.js");
59
+ const v2_token_ops_js_1 = require("../mcp/handlers/v2-token-ops.js");
60
+ const index_js_2 = require("../v2/durable-core/ids/index.js");
61
+ const node_outputs_js_1 = require("../v2/projections/node-outputs.js");
52
62
  const execAsync = (0, node_util_1.promisify)(node_child_process_1.exec);
53
63
  const BASH_TIMEOUT_MS = 5 * 60 * 1000;
54
- const WORKFLOW_TIMEOUT_MS = 30 * 60 * 1000;
64
+ const MAX_SESSION_RECAP_NOTES = 3;
65
+ const MAX_SESSION_NOTE_CHARS = 800;
66
+ const DEFAULT_SESSION_TIMEOUT_MINUTES = 30;
55
67
  exports.DAEMON_SESSIONS_DIR = path.join(os.homedir(), '.workrail', 'daemon-sessions');
56
68
  const MAX_ORPHAN_AGE_MS = 2 * 60 * 60 * 1000;
57
69
  const WORKRAIL_DIR = path.join(os.homedir(), '.workrail');
@@ -62,25 +74,10 @@ const WORKSPACE_CONTEXT_CANDIDATE_PATHS = [
62
74
  'AGENTS.md',
63
75
  '.github/AGENTS.md',
64
76
  ];
65
- exports.DAEMON_SOUL_DEFAULT = `\
66
- - Write code that follows the patterns already established in the codebase
67
- - Never skip tests. Run existing tests before and after changes
68
- - Prefer small, focused changes over large rewrites
69
- - If a step asks you to write code, write actual code -- do not write pseudocode or placeholders
70
- - Commit your work when you complete a logical unit`;
71
- const DAEMON_SOUL_TEMPLATE = `\
72
- # WorkRail Daemon Soul
73
- #
74
- # This file is injected into every WorkRail Auto daemon session system prompt under
75
- # "## Agent Rules and Philosophy". Edit it to customize the agent's behavior for
76
- # your environment: coding conventions, commit style, tool preferences, etc.
77
- #
78
- # Changes take effect on the next daemon session -- no restart required.
79
- #
80
- # The defaults below reflect general best practices. Override them freely.
81
-
82
- ${exports.DAEMON_SOUL_DEFAULT}
83
- `;
77
+ const soul_template_js_1 = require("./soul-template.js");
78
+ var soul_template_js_2 = require("./soul-template.js");
79
+ Object.defineProperty(exports, "DAEMON_SOUL_DEFAULT", { enumerable: true, get: function () { return soul_template_js_2.DAEMON_SOUL_DEFAULT; } });
80
+ Object.defineProperty(exports, "DAEMON_SOUL_TEMPLATE", { enumerable: true, get: function () { return soul_template_js_2.DAEMON_SOUL_TEMPLATE; } });
84
81
  async function persistTokens(sessionId, continueToken, checkpointToken) {
85
82
  await fs.mkdir(exports.DAEMON_SESSIONS_DIR, { recursive: true });
86
83
  const sessionPath = path.join(exports.DAEMON_SESSIONS_DIR, `${sessionId}.json`);
@@ -186,8 +183,8 @@ async function clearStrayTmpFiles(sessionsDir) {
186
183
  }
187
184
  }
188
185
  }
189
- async function loadDaemonSoul() {
190
- const soulPath = path.join(WORKRAIL_DIR, 'daemon-soul.md');
186
+ async function loadDaemonSoul(resolvedPath) {
187
+ const soulPath = resolvedPath ?? path.join(WORKRAIL_DIR, 'daemon-soul.md');
191
188
  try {
192
189
  return await fs.readFile(soulPath, 'utf8');
193
190
  }
@@ -195,8 +192,8 @@ async function loadDaemonSoul() {
195
192
  const isEnoent = err instanceof Error && 'code' in err && err.code === 'ENOENT';
196
193
  if (isEnoent) {
197
194
  try {
198
- await fs.mkdir(WORKRAIL_DIR, { recursive: true });
199
- await fs.writeFile(soulPath, DAEMON_SOUL_TEMPLATE, 'utf8');
195
+ await fs.mkdir(path.dirname(soulPath), { recursive: true });
196
+ await fs.writeFile(soulPath, soul_template_js_1.DAEMON_SOUL_TEMPLATE, 'utf8');
200
197
  console.log(`[WorkflowRunner] Created daemon-soul.md template at ${soulPath}`);
201
198
  }
202
199
  catch (writeErr) {
@@ -206,7 +203,7 @@ async function loadDaemonSoul() {
206
203
  else {
207
204
  console.warn(`[WorkflowRunner] Warning: could not read daemon-soul.md: ${err instanceof Error ? err.message : String(err)}`);
208
205
  }
209
- return exports.DAEMON_SOUL_DEFAULT;
206
+ return soul_template_js_1.DAEMON_SOUL_DEFAULT;
210
207
  }
211
208
  }
212
209
  async function loadWorkspaceContext(workspacePath) {
@@ -249,37 +246,94 @@ async function loadWorkspaceContext(workspacePath) {
249
246
  console.log(`[WorkflowRunner] Injecting workspace context from: ${WORKSPACE_CONTEXT_CANDIDATE_PATHS.filter((p) => parts.some((part) => part.startsWith(`### ${p}`))).join(', ')}`);
250
247
  return combined;
251
248
  }
249
+ async function loadSessionNotes(continueToken, ctx) {
250
+ try {
251
+ const resolvedResult = await (0, v2_token_ops_js_1.parseContinueTokenOrFail)(continueToken, ctx.v2.tokenCodecPorts, ctx.v2.tokenAliasStore);
252
+ if (resolvedResult.isErr()) {
253
+ console.warn(`[WorkflowRunner] Warning: could not decode continueToken for session recap: ${resolvedResult.error.message}`);
254
+ return [];
255
+ }
256
+ const sessionId = (0, index_js_2.asSessionId)(resolvedResult.value.sessionId);
257
+ const loadResult = await ctx.v2.sessionStore.load(sessionId);
258
+ if (loadResult.isErr()) {
259
+ console.warn(`[WorkflowRunner] Warning: could not load session store for recap: ${loadResult.error.code} -- ${loadResult.error.message}`);
260
+ return [];
261
+ }
262
+ const projectionResult = (0, node_outputs_js_1.projectNodeOutputsV2)(loadResult.value.events);
263
+ if (projectionResult.isErr()) {
264
+ console.warn(`[WorkflowRunner] Warning: could not project session outputs for recap: ${projectionResult.error.code} -- ${projectionResult.error.message}`);
265
+ return [];
266
+ }
267
+ const allNotes = [];
268
+ for (const nodeView of Object.values(projectionResult.value.nodesById)) {
269
+ for (const output of nodeView.currentByChannel.recap) {
270
+ if (output.payload.payloadKind === 'notes') {
271
+ const note = output.payload.notesMarkdown.length > MAX_SESSION_NOTE_CHARS
272
+ ? output.payload.notesMarkdown.slice(0, MAX_SESSION_NOTE_CHARS) + '\n[truncated]'
273
+ : output.payload.notesMarkdown;
274
+ allNotes.push(note);
275
+ }
276
+ }
277
+ }
278
+ return allNotes.slice(-MAX_SESSION_RECAP_NOTES);
279
+ }
280
+ catch (err) {
281
+ console.warn(`[WorkflowRunner] Warning: unexpected error loading session notes for recap: ${err instanceof Error ? err.message : String(err)}`);
282
+ return [];
283
+ }
284
+ }
252
285
  let _schemas = null;
253
- async function getSchemas() {
286
+ function getSchemas() {
254
287
  if (_schemas)
255
288
  return _schemas;
256
- const { Type } = await (0, pi_mono_loader_js_1.loadPiAi)();
257
289
  _schemas = {
258
- ContinueWorkflowParams: Type.Object({
259
- continueToken: Type.String({
260
- description: 'The continueToken from the previous start_workflow or continue_workflow call. Round-trip exactly as received.',
261
- }),
262
- intent: Type.Optional(Type.Union([Type.Literal('advance'), Type.Literal('rehydrate')], {
263
- description: 'advance: I completed this step. rehydrate: remind me what the current step is.',
264
- })),
265
- notesMarkdown: Type.Optional(Type.String({
266
- description: 'Notes on what you did in this step (10-30 lines, markdown).',
267
- })),
268
- context: Type.Optional(Type.Record(Type.String(), Type.Unknown(), {
269
- description: 'Updated context variables (only changed values).',
270
- })),
271
- }),
272
- BashParams: Type.Object({
273
- command: Type.String({ description: 'Shell command to execute' }),
274
- cwd: Type.Optional(Type.String({ description: 'Working directory for the command' })),
275
- }),
276
- ReadParams: Type.Object({
277
- filePath: Type.String({ description: 'Absolute path to the file to read' }),
278
- }),
279
- WriteParams: Type.Object({
280
- filePath: Type.String({ description: 'Absolute path to the file to write' }),
281
- content: Type.String({ description: 'Content to write to the file' }),
282
- }),
290
+ ContinueWorkflowParams: {
291
+ type: 'object',
292
+ properties: {
293
+ continueToken: {
294
+ type: 'string',
295
+ description: 'The continueToken from the previous start_workflow or continue_workflow call. Round-trip exactly as received.',
296
+ },
297
+ intent: {
298
+ type: 'string',
299
+ enum: ['advance', 'rehydrate'],
300
+ description: 'advance: I completed this step. rehydrate: remind me what the current step is.',
301
+ },
302
+ notesMarkdown: {
303
+ type: 'string',
304
+ description: 'Notes on what you did in this step (10-30 lines, markdown).',
305
+ },
306
+ context: {
307
+ type: 'object',
308
+ additionalProperties: true,
309
+ description: 'Updated context variables (only changed values).',
310
+ },
311
+ },
312
+ required: ['continueToken'],
313
+ },
314
+ BashParams: {
315
+ type: 'object',
316
+ properties: {
317
+ command: { type: 'string', description: 'Shell command to execute' },
318
+ cwd: { type: 'string', description: 'Working directory for the command' },
319
+ },
320
+ required: ['command'],
321
+ },
322
+ ReadParams: {
323
+ type: 'object',
324
+ properties: {
325
+ filePath: { type: 'string', description: 'Absolute path to the file to read' },
326
+ },
327
+ required: ['filePath'],
328
+ },
329
+ WriteParams: {
330
+ type: 'object',
331
+ properties: {
332
+ filePath: { type: 'string', description: 'Absolute path to the file to write' },
333
+ content: { type: 'string', description: 'Content to write to the file' },
334
+ },
335
+ required: ['filePath', 'content'],
336
+ },
283
337
  };
284
338
  return _schemas;
285
339
  }
@@ -288,7 +342,7 @@ function makeContinueWorkflowTool(sessionId, ctx, onAdvance, onComplete, schemas
288
342
  name: 'continue_workflow',
289
343
  description: 'Advance the WorkRail workflow to the next step. Call this after completing all work ' +
290
344
  'required by the current step. Include your notes in notesMarkdown.',
291
- parameters: schemas['ContinueWorkflowParams'],
345
+ inputSchema: schemas['ContinueWorkflowParams'],
292
346
  label: 'Continue Workflow',
293
347
  execute: async (_toolCallId, params) => {
294
348
  const result = await (0, index_js_1.executeContinueWorkflow)({
@@ -309,7 +363,7 @@ function makeContinueWorkflowTool(sessionId, ctx, onAdvance, onComplete, schemas
309
363
  await persistTokens(sessionId, continueToken, checkpointToken);
310
364
  }
311
365
  if (out.isComplete) {
312
- onComplete('Workflow session complete.');
366
+ onComplete(params.notesMarkdown);
313
367
  return {
314
368
  content: [{ type: 'text', text: 'Workflow complete. All steps have been executed.' }],
315
369
  details: out,
@@ -330,21 +384,44 @@ function makeContinueWorkflowTool(sessionId, ctx, onAdvance, onComplete, schemas
330
384
  function makeBashTool(workspacePath, schemas) {
331
385
  return {
332
386
  name: 'Bash',
333
- description: 'Execute a shell command. Throws on non-zero exit code. ' +
387
+ description: 'Execute a shell command. Throws on failure (non-zero exit with stderr, or exit code 2+). ' +
388
+ 'Exit code 1 with empty stderr is treated as "no match found" (standard grep semantics) and ' +
389
+ 'returns empty output without throwing. ' +
334
390
  `Maximum execution time: ${BASH_TIMEOUT_MS / 1000}s.`,
335
- parameters: schemas['BashParams'],
391
+ inputSchema: schemas['BashParams'],
336
392
  label: 'Bash',
337
393
  execute: async (_toolCallId, params) => {
338
394
  const cwd = params.cwd ?? workspacePath;
339
- const { stdout, stderr } = await execAsync(params.command, {
340
- cwd,
341
- timeout: BASH_TIMEOUT_MS,
342
- });
343
- const output = [stdout, stderr].filter(Boolean).join('\n');
344
- return {
345
- content: [{ type: 'text', text: output || '(no output)' }],
346
- details: { stdout, stderr },
347
- };
395
+ try {
396
+ const { stdout, stderr } = await execAsync(params.command, {
397
+ cwd,
398
+ timeout: BASH_TIMEOUT_MS,
399
+ });
400
+ const output = [stdout, stderr].filter(Boolean).join('\n');
401
+ return {
402
+ content: [{ type: 'text', text: output || '(no output)' }],
403
+ details: { stdout, stderr },
404
+ };
405
+ }
406
+ catch (err) {
407
+ const e = err;
408
+ const stdout = String(e.stdout ?? '');
409
+ const stderr = String(e.stderr ?? '');
410
+ const rawCode = e.code;
411
+ const signal = e.signal;
412
+ if (rawCode === 1 && !stderr.trim()) {
413
+ return {
414
+ content: [{ type: 'text', text: stdout || '(no output)' }],
415
+ details: { stdout, stderr },
416
+ };
417
+ }
418
+ const exitInfo = rawCode != null
419
+ ? `exit ${String(rawCode)}`
420
+ : signal
421
+ ? `signal ${String(signal)}`
422
+ : 'exit unknown';
423
+ throw new Error(`Command failed: ${params.command} (${exitInfo})\nSTDOUT:\n${stdout}\nSTDERR:\n${stderr}`);
424
+ }
348
425
  },
349
426
  };
350
427
  }
@@ -352,7 +429,7 @@ function makeReadTool(schemas) {
352
429
  return {
353
430
  name: 'Read',
354
431
  description: 'Read the contents of a file at the given absolute path.',
355
- parameters: schemas['ReadParams'],
432
+ inputSchema: schemas['ReadParams'],
356
433
  label: 'Read',
357
434
  execute: async (_toolCallId, params) => {
358
435
  const content = await fs.readFile(params.filePath, 'utf8');
@@ -367,7 +444,7 @@ function makeWriteTool(schemas) {
367
444
  return {
368
445
  name: 'Write',
369
446
  description: 'Write content to a file at the given absolute path. Creates parent directories if needed.',
370
- parameters: schemas['WriteParams'],
447
+ inputSchema: schemas['WriteParams'],
371
448
  label: 'Write',
372
449
  execute: async (_toolCallId, params) => {
373
450
  await fs.mkdir(path.dirname(params.filePath), { recursive: true });
@@ -379,6 +456,14 @@ function makeWriteTool(schemas) {
379
456
  },
380
457
  };
381
458
  }
459
+ function buildSessionRecap(notes) {
460
+ if (notes.length === 0)
461
+ return '';
462
+ const formattedNotes = notes
463
+ .map((note, i) => `### Prior step ${i + 1}\n${note}`)
464
+ .join('\n\n');
465
+ return `<workrail_session_state>\nThe following notes summarize prior steps from this session:\n\n${formattedNotes}\n</workrail_session_state>`;
466
+ }
382
467
  function buildSystemPrompt(trigger, sessionState, soulContent, workspaceContext) {
383
468
  const lines = [
384
469
  'You are WorkRail Auto, an autonomous agent that executes workflows step by step.',
@@ -428,57 +513,62 @@ async function runWorkflow(trigger, ctx, apiKey, daemonRegistry) {
428
513
  const sessionId = (0, node_crypto_1.randomUUID)();
429
514
  console.log(`[WorkflowRunner] Session started: sessionId=${sessionId} workflowId=${trigger.workflowId}`);
430
515
  daemonRegistry?.register(sessionId, trigger.workflowId);
431
- let model;
432
- try {
433
- const { getModel } = await (0, pi_mono_loader_js_1.loadPiAi)();
434
- if (trigger.agentConfig?.model) {
435
- const slashIdx = trigger.agentConfig.model.indexOf('/');
436
- if (slashIdx === -1) {
437
- throw new Error(`agentConfig.model must be in "provider/model-id" format, got: "${trigger.agentConfig.model}"`);
438
- }
439
- const provider = trigger.agentConfig.model.slice(0, slashIdx);
440
- const modelId = trigger.agentConfig.model.slice(slashIdx + 1);
441
- model = getModel(provider, modelId);
516
+ let agentClient;
517
+ let modelId;
518
+ if (trigger.agentConfig?.model) {
519
+ const slashIdx = trigger.agentConfig.model.indexOf('/');
520
+ if (slashIdx === -1) {
521
+ daemonRegistry?.unregister(sessionId, 'failed');
522
+ return {
523
+ _tag: 'error',
524
+ workflowId: trigger.workflowId,
525
+ message: `agentConfig.model must be in "provider/model-id" format, got: "${trigger.agentConfig.model}"`,
526
+ stopReason: 'error',
527
+ };
528
+ }
529
+ const provider = trigger.agentConfig.model.slice(0, slashIdx);
530
+ modelId = trigger.agentConfig.model.slice(slashIdx + 1);
531
+ agentClient = provider === 'amazon-bedrock' ? new bedrock_sdk_1.AnthropicBedrock() : new sdk_1.default({ apiKey });
532
+ }
533
+ else {
534
+ const usesBedrock = !!process.env['AWS_PROFILE'] || !!process.env['AWS_ACCESS_KEY_ID'];
535
+ if (usesBedrock) {
536
+ agentClient = new bedrock_sdk_1.AnthropicBedrock();
537
+ modelId = 'us.anthropic.claude-sonnet-4-6';
442
538
  }
443
539
  else {
444
- const usesBedrock = !!process.env['AWS_PROFILE'] || !!process.env['AWS_ACCESS_KEY_ID'];
445
- if (usesBedrock) {
446
- model = getModel('amazon-bedrock', 'us.anthropic.claude-sonnet-4-6');
447
- }
448
- else {
449
- model = getModel('anthropic', 'claude-sonnet-4-5');
450
- }
540
+ agentClient = new sdk_1.default({ apiKey });
541
+ modelId = 'claude-sonnet-4-5';
451
542
  }
452
543
  }
453
- catch (err) {
454
- daemonRegistry?.unregister(sessionId, 'failed');
455
- return {
456
- _tag: 'error',
457
- workflowId: trigger.workflowId,
458
- message: `Model not found: ${err instanceof Error ? err.message : String(err)}`,
459
- stopReason: 'error',
460
- };
461
- }
462
544
  let isComplete = false;
463
545
  let pendingSteerText = null;
546
+ let lastStepNotes;
464
547
  const onAdvance = (stepText, _continueToken) => {
465
548
  pendingSteerText = stepText;
466
549
  daemonRegistry?.heartbeat(sessionId);
467
550
  };
468
- const onComplete = (_notes) => {
551
+ const onComplete = (notes) => {
469
552
  isComplete = true;
553
+ lastStepNotes = notes;
470
554
  };
471
- const startResult = await (0, start_js_1.executeStartWorkflow)({ workflowId: trigger.workflowId, workspacePath: trigger.workspacePath, goal: trigger.goal }, ctx, { is_autonomous: 'true' });
472
- if (startResult.isErr()) {
473
- daemonRegistry?.unregister(sessionId, 'failed');
474
- return {
475
- _tag: 'error',
476
- workflowId: trigger.workflowId,
477
- message: `start_workflow failed: ${startResult.error.kind} -- ${JSON.stringify(startResult.error)}`,
478
- stopReason: 'error',
479
- };
555
+ let firstStep;
556
+ if (trigger._preAllocatedStartResponse !== undefined) {
557
+ firstStep = trigger._preAllocatedStartResponse;
558
+ }
559
+ else {
560
+ const startResult = await (0, start_js_1.executeStartWorkflow)({ workflowId: trigger.workflowId, workspacePath: trigger.workspacePath, goal: trigger.goal }, ctx, { is_autonomous: 'true' });
561
+ if (startResult.isErr()) {
562
+ daemonRegistry?.unregister(sessionId, 'failed');
563
+ return {
564
+ _tag: 'error',
565
+ workflowId: trigger.workflowId,
566
+ message: `start_workflow failed: ${startResult.error.kind} -- ${JSON.stringify(startResult.error)}`,
567
+ stopReason: 'error',
568
+ };
569
+ }
570
+ firstStep = startResult.value.response;
480
571
  }
481
- const firstStep = startResult.value.response;
482
572
  const startContinueToken = firstStep.continueToken ?? '';
483
573
  const startCheckpointToken = firstStep.checkpointToken ?? null;
484
574
  if (startContinueToken) {
@@ -489,36 +579,46 @@ async function runWorkflow(trigger, ctx, apiKey, daemonRegistry) {
489
579
  daemonRegistry?.unregister(sessionId, 'completed');
490
580
  return { _tag: 'success', workflowId: trigger.workflowId, stopReason: 'stop' };
491
581
  }
492
- const schemas = await getSchemas();
582
+ const schemas = getSchemas();
493
583
  const tools = [
494
584
  makeContinueWorkflowTool(sessionId, ctx, onAdvance, onComplete, schemas),
495
585
  makeBashTool(trigger.workspacePath, schemas),
496
586
  makeReadTool(schemas),
497
587
  makeWriteTool(schemas),
498
588
  ];
499
- const [soulContent, workspaceContext] = await Promise.all([
500
- loadDaemonSoul(),
589
+ const [soulContent, workspaceContext, sessionNotes] = await Promise.all([
590
+ loadDaemonSoul(trigger.soulFile),
501
591
  loadWorkspaceContext(trigger.workspacePath),
592
+ startContinueToken ? loadSessionNotes(startContinueToken, ctx) : Promise.resolve([]),
502
593
  ]);
594
+ const sessionState = buildSessionRecap(sessionNotes);
503
595
  const contextJson = trigger.context
504
596
  ? `\n\nTrigger context:\n\`\`\`json\n${JSON.stringify(trigger.context, null, 2)}\n\`\`\``
505
597
  : '';
506
598
  const initialPrompt = (firstStep.pending?.prompt ?? 'No step content available') +
507
599
  `\n\ncontinueToken: ${startContinueToken}` +
508
- contextJson;
509
- const { Agent } = await (0, pi_mono_loader_js_1.loadPiAgentCore)();
510
- const agent = new Agent({
511
- initialState: {
512
- systemPrompt: buildSystemPrompt(trigger, '', soulContent, workspaceContext),
513
- model,
514
- tools,
515
- },
516
- getApiKey: async (_provider) => apiKey ?? '',
600
+ contextJson +
601
+ '\n\nComplete all step work, then call continue_workflow with your notes to begin.';
602
+ const agent = new agent_loop_js_1.AgentLoop({
603
+ systemPrompt: buildSystemPrompt(trigger, sessionState, soulContent, workspaceContext),
604
+ modelId,
605
+ tools,
606
+ client: agentClient,
517
607
  toolExecution: 'sequential',
518
608
  });
609
+ const sessionTimeoutMs = (trigger.agentConfig?.maxSessionMinutes ?? DEFAULT_SESSION_TIMEOUT_MINUTES) * 60 * 1000;
610
+ const maxTurns = trigger.agentConfig?.maxTurns ?? 0;
611
+ let timeoutReason = null;
612
+ let turnCount = 0;
519
613
  const unsubscribe = agent.subscribe(async (event) => {
520
614
  if (event.type !== 'turn_end')
521
615
  return;
616
+ turnCount++;
617
+ if (maxTurns > 0 && turnCount >= maxTurns && timeoutReason === null) {
618
+ timeoutReason = 'max_turns';
619
+ agent.abort();
620
+ return;
621
+ }
522
622
  if (pendingSteerText !== null && !isComplete) {
523
623
  const text = pendingSteerText;
524
624
  pendingSteerText = null;
@@ -527,8 +627,16 @@ async function runWorkflow(trigger, ctx, apiKey, daemonRegistry) {
527
627
  });
528
628
  let stopReason = 'stop';
529
629
  let errorMessage;
630
+ let timeoutHandle;
530
631
  try {
531
- const timeoutPromise = new Promise((_, reject) => setTimeout(() => reject(new Error('Workflow timed out')), WORKFLOW_TIMEOUT_MS));
632
+ const timeoutPromise = new Promise((_, reject) => {
633
+ timeoutHandle = setTimeout(() => {
634
+ if (timeoutReason === null) {
635
+ timeoutReason = 'wall_clock';
636
+ }
637
+ reject(new Error('Workflow timed out'));
638
+ }, sessionTimeoutMs);
639
+ });
532
640
  await Promise.race([agent.prompt(buildUserMessage(initialPrompt)), timeoutPromise])
533
641
  .catch((err) => {
534
642
  agent.abort();
@@ -552,6 +660,21 @@ async function runWorkflow(trigger, ctx, apiKey, daemonRegistry) {
552
660
  }
553
661
  finally {
554
662
  unsubscribe();
663
+ if (timeoutHandle !== undefined)
664
+ clearTimeout(timeoutHandle);
665
+ }
666
+ if (timeoutReason !== null) {
667
+ daemonRegistry?.unregister(sessionId, 'failed');
668
+ const limitDescription = timeoutReason === 'wall_clock'
669
+ ? `${trigger.agentConfig?.maxSessionMinutes ?? DEFAULT_SESSION_TIMEOUT_MINUTES} minutes`
670
+ : `${trigger.agentConfig?.maxTurns} turns`;
671
+ return {
672
+ _tag: 'timeout',
673
+ workflowId: trigger.workflowId,
674
+ reason: timeoutReason,
675
+ message: `Workflow ${timeoutReason === 'wall_clock' ? 'timed out' : 'exceeded turn limit'} after ${limitDescription}`,
676
+ stopReason: 'aborted',
677
+ };
555
678
  }
556
679
  if (stopReason === 'error' || errorMessage) {
557
680
  daemonRegistry?.unregister(sessionId, 'failed');
@@ -569,5 +692,6 @@ async function runWorkflow(trigger, ctx, apiKey, daemonRegistry) {
569
692
  _tag: 'success',
570
693
  workflowId: trigger.workflowId,
571
694
  stopReason,
695
+ ...(lastStepNotes !== undefined ? { lastStepNotes } : {}),
572
696
  };
573
697
  }