@exaudeus/workrail 3.40.0 → 3.42.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (105) hide show
  1. package/dist/cli/commands/init.js +0 -3
  2. package/dist/cli-worktrain.js +48 -11
  3. package/dist/cli.js +0 -18
  4. package/dist/config/app-config.d.ts +0 -16
  5. package/dist/config/app-config.js +0 -14
  6. package/dist/config/config-file.js +0 -3
  7. package/dist/console-ui/assets/index-DGj8EsFR.css +1 -0
  8. package/dist/console-ui/assets/index-DwfWMKvv.js +28 -0
  9. package/dist/console-ui/index.html +2 -2
  10. package/dist/context-assembly/deps.d.ts +8 -0
  11. package/dist/context-assembly/deps.js +2 -0
  12. package/dist/context-assembly/index.d.ts +6 -0
  13. package/dist/context-assembly/index.js +50 -0
  14. package/dist/context-assembly/infra.d.ts +3 -0
  15. package/dist/context-assembly/infra.js +154 -0
  16. package/dist/context-assembly/types.d.ts +30 -0
  17. package/dist/context-assembly/types.js +2 -0
  18. package/dist/coordinators/pr-review.d.ts +20 -1
  19. package/dist/coordinators/pr-review.js +189 -4
  20. package/dist/daemon/daemon-events.d.ts +9 -1
  21. package/dist/daemon/soul-template.d.ts +2 -2
  22. package/dist/daemon/soul-template.js +11 -1
  23. package/dist/daemon/workflow-runner.d.ts +14 -1
  24. package/dist/daemon/workflow-runner.js +406 -25
  25. package/dist/di/container.js +1 -25
  26. package/dist/di/tokens.d.ts +0 -3
  27. package/dist/di/tokens.js +0 -3
  28. package/dist/domain/execution/state.d.ts +6 -6
  29. package/dist/engine/engine-factory.js +0 -1
  30. package/dist/infrastructure/console-defaults.d.ts +1 -0
  31. package/dist/infrastructure/console-defaults.js +4 -0
  32. package/dist/infrastructure/session/index.d.ts +0 -1
  33. package/dist/infrastructure/session/index.js +1 -3
  34. package/dist/manifest.json +138 -122
  35. package/dist/mcp/handlers/session.d.ts +1 -0
  36. package/dist/mcp/handlers/session.js +61 -13
  37. package/dist/mcp/handlers/v2-workflow.d.ts +2 -2
  38. package/dist/mcp/output-schemas.d.ts +234 -234
  39. package/dist/mcp/server.js +1 -18
  40. package/dist/mcp/tools.d.ts +2 -2
  41. package/dist/mcp/transports/http-entry.js +0 -2
  42. package/dist/mcp/transports/stdio-entry.js +1 -2
  43. package/dist/mcp/types.d.ts +0 -2
  44. package/dist/mcp/v2/tools.d.ts +24 -24
  45. package/dist/trigger/daemon-console.d.ts +2 -0
  46. package/dist/trigger/daemon-console.js +1 -1
  47. package/dist/trigger/trigger-listener.d.ts +2 -0
  48. package/dist/trigger/trigger-listener.js +3 -1
  49. package/dist/trigger/trigger-router.d.ts +4 -3
  50. package/dist/trigger/trigger-router.js +4 -3
  51. package/dist/trigger/trigger-store.js +17 -4
  52. package/dist/v2/durable-core/schemas/artifacts/assessment.d.ts +2 -2
  53. package/dist/v2/durable-core/schemas/artifacts/coordinator-signal.d.ts +2 -2
  54. package/dist/v2/durable-core/schemas/artifacts/loop-control.d.ts +6 -6
  55. package/dist/v2/durable-core/schemas/artifacts/review-verdict.d.ts +6 -6
  56. package/dist/v2/durable-core/schemas/compiled-workflow/index.d.ts +56 -56
  57. package/dist/v2/durable-core/schemas/execution-snapshot/blocked-snapshot.d.ts +83 -83
  58. package/dist/v2/durable-core/schemas/execution-snapshot/execution-snapshot.v1.d.ts +1024 -1024
  59. package/dist/v2/durable-core/schemas/export-bundle/index.d.ts +2336 -2336
  60. package/dist/v2/durable-core/schemas/session/dag-topology.d.ts +6 -6
  61. package/dist/v2/durable-core/schemas/session/events.d.ts +339 -339
  62. package/dist/v2/durable-core/schemas/session/gaps.d.ts +30 -30
  63. package/dist/v2/durable-core/schemas/session/manifest.d.ts +6 -6
  64. package/dist/v2/durable-core/schemas/session/outputs.d.ts +8 -8
  65. package/dist/v2/durable-core/schemas/session/validation-event.d.ts +3 -3
  66. package/dist/v2/usecases/console-routes.d.ts +2 -1
  67. package/dist/v2/usecases/console-routes.js +29 -5
  68. package/dist/v2/usecases/console-service.js +14 -0
  69. package/dist/v2/usecases/console-types.d.ts +1 -0
  70. package/docs/authoring.md +16 -16
  71. package/docs/design/context-assembly-design-candidates.md +199 -0
  72. package/docs/design/context-assembly-implementation-plan.md +211 -0
  73. package/docs/design/context-assembly-review-findings.md +112 -0
  74. package/docs/design/coordinator-message-queue-drain-plan.md +241 -0
  75. package/docs/design/coordinator-message-queue-drain-review.md +120 -0
  76. package/docs/design/coordinator-message-queue-drain.md +289 -0
  77. package/docs/design/shaping-workflow-external-research.md +119 -0
  78. package/docs/discovery/late-bound-goals-impl-plan.md +147 -0
  79. package/docs/discovery/late-bound-goals-review.md +82 -0
  80. package/docs/discovery/late-bound-goals.md +118 -0
  81. package/docs/discovery/steer-endpoint-design-candidates.md +288 -0
  82. package/docs/discovery/steer-endpoint-design-review-findings.md +104 -0
  83. package/docs/discovery/steer-endpoint-implementation-plan.md +284 -0
  84. package/docs/ideas/backlog.md +356 -0
  85. package/docs/ideas/design-candidates-console-session-tree-impl.md +64 -0
  86. package/docs/ideas/design-candidates-session-tree-view.md +196 -0
  87. package/docs/ideas/design-review-findings-console-session-tree-impl.md +75 -0
  88. package/docs/ideas/design-review-findings-session-tree-view.md +88 -0
  89. package/docs/ideas/implementation_plan_session_tree_view.md +238 -0
  90. package/package.json +2 -1
  91. package/spec/authoring-spec.json +16 -16
  92. package/spec/shape.schema.json +178 -0
  93. package/spec/workflow-tags.json +232 -47
  94. package/workflows/coding-task-workflow-agentic.json +491 -480
  95. package/workflows/wr.shaping.json +182 -0
  96. package/dist/console-ui/assets/index-8dh0Psu-.css +0 -1
  97. package/dist/console-ui/assets/index-CXWCAonr.js +0 -28
  98. package/dist/infrastructure/session/DashboardHeartbeat.d.ts +0 -8
  99. package/dist/infrastructure/session/DashboardHeartbeat.js +0 -39
  100. package/dist/infrastructure/session/DashboardLockRelease.d.ts +0 -2
  101. package/dist/infrastructure/session/DashboardLockRelease.js +0 -29
  102. package/dist/infrastructure/session/HttpServer.d.ts +0 -60
  103. package/dist/infrastructure/session/HttpServer.js +0 -912
  104. package/workflows/coding-task-workflow-agentic.lean.v2.json +0 -648
  105. package/workflows/coding-task-workflow-agentic.v2.json +0 -324
@@ -7,6 +7,11 @@ import type { V2StartWorkflowOutputSchema } from '../mcp/output-schemas.js';
7
7
  import type { DaemonEventEmitter } from './daemon-events.js';
8
8
  export declare const DAEMON_SESSIONS_DIR: string;
9
9
  export { DAEMON_SOUL_DEFAULT, DAEMON_SOUL_TEMPLATE } from './soul-template.js';
10
+ export type ReadFileState = {
11
+ content: string;
12
+ timestamp: number;
13
+ isPartialView: boolean;
14
+ };
10
15
  export interface WorkflowTrigger {
11
16
  readonly workflowId: string;
12
17
  readonly goal: string;
@@ -53,6 +58,7 @@ export interface WorkflowDeliveryFailed {
53
58
  }
54
59
  export type WorkflowRunResult = WorkflowRunSuccess | WorkflowRunError | WorkflowRunTimeout | WorkflowDeliveryFailed;
55
60
  export type ChildWorkflowRunResult = WorkflowRunSuccess | WorkflowRunError | WorkflowRunTimeout;
61
+ export type SteerRegistry = Map<string, (text: string) => void>;
56
62
  export interface OrphanedSession {
57
63
  readonly sessionId: string;
58
64
  readonly continueToken: string;
@@ -68,8 +74,15 @@ export declare function runStartupRecovery(sessionsDir?: string): Promise<void>;
68
74
  export declare function makeContinueWorkflowTool(sessionId: string, ctx: V2ToolContext, onAdvance: (nextStepText: string, continueToken: string) => void, onComplete: (notes: string | undefined, artifacts?: readonly unknown[]) => void, schemas: Record<string, any>, _executeContinueWorkflowFn?: typeof executeContinueWorkflow, emitter?: DaemonEventEmitter, workrailSessionId?: string | null): AgentTool;
69
75
  export declare function makeCompleteStepTool(sessionId: string, ctx: V2ToolContext, getCurrentToken: () => string, onAdvance: (nextStepText: string, continueToken: string) => void, onComplete: (notes: string | undefined, artifacts?: readonly unknown[]) => void, onTokenUpdate: (t: string) => void, schemas: Record<string, any>, _executeContinueWorkflowFn?: typeof executeContinueWorkflow, emitter?: DaemonEventEmitter, workrailSessionId?: string | null): AgentTool;
70
76
  export declare function makeBashTool(workspacePath: string, schemas: Record<string, any>, sessionId?: string, emitter?: DaemonEventEmitter, workrailSessionId?: string | null): AgentTool;
77
+ export declare function makeReadTool(readFileState: Map<string, ReadFileState>, schemas: Record<string, any>, sessionId?: string, emitter?: DaemonEventEmitter, workrailSessionId?: string | null): AgentTool;
78
+ export declare function makeWriteTool(readFileState: Map<string, ReadFileState>, schemas: Record<string, any>, sessionId?: string, emitter?: DaemonEventEmitter, workrailSessionId?: string | null): AgentTool;
79
+ export declare function makeGlobTool(workspacePath: string, schemas: Record<string, any>, sessionId?: string, emitter?: DaemonEventEmitter, workrailSessionId?: string | null): AgentTool;
80
+ export declare function makeGrepTool(workspacePath: string, schemas: Record<string, any>, sessionId?: string, emitter?: DaemonEventEmitter, workrailSessionId?: string | null): AgentTool;
81
+ export declare function makeEditTool(workspacePath: string, readFileState: Map<string, ReadFileState>, schemas: Record<string, any>, sessionId?: string, emitter?: DaemonEventEmitter, workrailSessionId?: string | null): AgentTool;
71
82
  export declare function makeSpawnAgentTool(sessionId: string, ctx: V2ToolContext, apiKey: string, thisWorkrailSessionId: string, currentDepth: number, maxDepth: number, runWorkflowFn: typeof runWorkflow, schemas: Record<string, any>, emitter?: DaemonEventEmitter): AgentTool;
72
83
  export declare function makeReportIssueTool(sessionId: string, emitter?: DaemonEventEmitter, workrailSessionId?: string | null, issuesDirOverride?: string, onIssueSummary?: (summary: string) => void): AgentTool;
84
+ export declare const DAEMON_SIGNALS_DIR: string;
85
+ export declare function makeSignalCoordinatorTool(sessionId: string, emitter?: DaemonEventEmitter, workrailSessionId?: string | null, signalsDirOverride?: string): AgentTool;
73
86
  export declare function buildSessionRecap(notes: readonly string[]): string;
74
87
  export declare function buildSystemPrompt(trigger: WorkflowTrigger, sessionState: string, soulContent: string, workspaceContext: string | null): string;
75
- export declare function runWorkflow(trigger: WorkflowTrigger, ctx: V2ToolContext, apiKey: string, daemonRegistry?: DaemonRegistry, emitter?: DaemonEventEmitter): Promise<WorkflowRunResult>;
88
+ export declare function runWorkflow(trigger: WorkflowTrigger, ctx: V2ToolContext, apiKey: string, daemonRegistry?: DaemonRegistry, emitter?: DaemonEventEmitter, steerRegistry?: SteerRegistry): Promise<WorkflowRunResult>;
@@ -36,15 +36,21 @@ var __importDefault = (this && this.__importDefault) || function (mod) {
36
36
  return (mod && mod.__esModule) ? mod : { "default": mod };
37
37
  };
38
38
  Object.defineProperty(exports, "__esModule", { value: true });
39
- exports.DAEMON_SOUL_TEMPLATE = exports.DAEMON_SOUL_DEFAULT = exports.DAEMON_SESSIONS_DIR = void 0;
39
+ exports.DAEMON_SIGNALS_DIR = exports.DAEMON_SOUL_TEMPLATE = exports.DAEMON_SOUL_DEFAULT = exports.DAEMON_SESSIONS_DIR = void 0;
40
40
  exports.readDaemonSessionState = readDaemonSessionState;
41
41
  exports.readAllDaemonSessions = readAllDaemonSessions;
42
42
  exports.runStartupRecovery = runStartupRecovery;
43
43
  exports.makeContinueWorkflowTool = makeContinueWorkflowTool;
44
44
  exports.makeCompleteStepTool = makeCompleteStepTool;
45
45
  exports.makeBashTool = makeBashTool;
46
+ exports.makeReadTool = makeReadTool;
47
+ exports.makeWriteTool = makeWriteTool;
48
+ exports.makeGlobTool = makeGlobTool;
49
+ exports.makeGrepTool = makeGrepTool;
50
+ exports.makeEditTool = makeEditTool;
46
51
  exports.makeSpawnAgentTool = makeSpawnAgentTool;
47
52
  exports.makeReportIssueTool = makeReportIssueTool;
53
+ exports.makeSignalCoordinatorTool = makeSignalCoordinatorTool;
48
54
  exports.buildSessionRecap = buildSessionRecap;
49
55
  exports.buildSystemPrompt = buildSystemPrompt;
50
56
  exports.runWorkflow = runWorkflow;
@@ -54,6 +60,7 @@ const path = __importStar(require("node:path"));
54
60
  const os = __importStar(require("node:os"));
55
61
  const node_child_process_1 = require("node:child_process");
56
62
  const node_util_1 = require("node:util");
63
+ const tinyglobby_1 = require("tinyglobby");
57
64
  const node_crypto_1 = require("node:crypto");
58
65
  const sdk_1 = __importDefault(require("@anthropic-ai/sdk"));
59
66
  const bedrock_sdk_1 = require("@anthropic-ai/bedrock-sdk");
@@ -65,6 +72,7 @@ const index_js_2 = require("../v2/durable-core/ids/index.js");
65
72
  const node_outputs_js_1 = require("../v2/projections/node-outputs.js");
66
73
  const assert_never_js_1 = require("../runtime/assert-never.js");
67
74
  const execAsync = (0, node_util_1.promisify)(node_child_process_1.exec);
75
+ const execFileAsync = (0, node_util_1.promisify)(node_child_process_1.execFile);
68
76
  const BASH_TIMEOUT_MS = 5 * 60 * 1000;
69
77
  const MAX_SESSION_RECAP_NOTES = 3;
70
78
  const MAX_SESSION_NOTE_CHARS = 800;
@@ -77,6 +85,7 @@ exports.DAEMON_SESSIONS_DIR = path.join(os.homedir(), '.workrail', 'daemon-sessi
77
85
  const MAX_ORPHAN_AGE_MS = 2 * 60 * 60 * 1000;
78
86
  const WORKRAIL_DIR = path.join(os.homedir(), '.workrail');
79
87
  const WORKSPACE_CONTEXT_MAX_BYTES = 32 * 1024;
88
+ const MAX_ASSEMBLED_CONTEXT_BYTES = 8192;
80
89
  const WORKSPACE_CONTEXT_CANDIDATE_PATHS = [
81
90
  '.claude/CLAUDE.md',
82
91
  'CLAUDE.md',
@@ -365,7 +374,9 @@ function getSchemas() {
365
374
  ReadParams: {
366
375
  type: 'object',
367
376
  properties: {
368
- filePath: { type: 'string', description: 'Absolute path to the file to read' },
377
+ filePath: { type: 'string', description: 'Absolute path to the file to read. Content is returned in cat -n format: each line prefixed with its 1-indexed line number and a tab character.' },
378
+ offset: { type: 'number', description: '0-indexed line number to start reading from (inclusive). Omit to read from the beginning.' },
379
+ limit: { type: 'number', description: 'Maximum number of lines to return. Omit to read to end of file.' },
369
380
  },
370
381
  required: ['filePath'],
371
382
  },
@@ -377,6 +388,39 @@ function getSchemas() {
377
388
  },
378
389
  required: ['filePath', 'content'],
379
390
  },
391
+ GlobParams: {
392
+ type: 'object',
393
+ properties: {
394
+ pattern: { type: 'string', description: 'Glob pattern to match (e.g. "**/*.ts"). Supports standard glob syntax.' },
395
+ path: { type: 'string', description: 'Absolute path to search root. Defaults to the workspace root.' },
396
+ },
397
+ required: ['pattern'],
398
+ },
399
+ GrepParams: {
400
+ type: 'object',
401
+ properties: {
402
+ pattern: { type: 'string', description: 'Regular expression pattern to search for in file contents.' },
403
+ path: { type: 'string', description: 'Absolute path to search in. Defaults to the workspace root.' },
404
+ glob: { type: 'string', description: 'Glob pattern to restrict which files are searched (e.g. "*.ts").' },
405
+ type: { type: 'string', description: 'File type filter for ripgrep (e.g. "ts", "js", "py").' },
406
+ output_mode: { type: 'string', enum: ['content', 'files_with_matches', 'count'], description: 'Output mode. "files_with_matches": only file paths (default). "content": matching lines with context. "count": match counts per file.' },
407
+ head_limit: { type: 'number', description: 'Maximum number of output lines to return. Default: 250.' },
408
+ context: { type: 'number', description: 'Number of lines of context to show before and after each match (output_mode=content only).' },
409
+ '-i': { type: 'boolean', description: 'Case-insensitive search.' },
410
+ },
411
+ required: ['pattern'],
412
+ },
413
+ EditParams: {
414
+ type: 'object',
415
+ properties: {
416
+ file_path: { type: 'string', description: 'Absolute path to the file to edit. The file must have been read in this session via the Read tool.' },
417
+ old_string: { type: 'string', description: 'Exact string to find and replace. Must appear exactly once in the file (or use replace_all=true for multiple occurrences). Do NOT include line-number prefixes from Read output.' },
418
+ new_string: { type: 'string', description: 'Replacement string. Must differ from old_string.' },
419
+ replace_all: { type: 'boolean', description: 'Replace all occurrences of old_string. Default: false (fails if more than one match).' },
420
+ },
421
+ required: ['file_path', 'old_string', 'new_string'],
422
+ additionalProperties: false,
423
+ },
380
424
  SpawnAgentParams: {
381
425
  type: 'object',
382
426
  properties: {
@@ -648,37 +692,282 @@ function makeBashTool(workspacePath, schemas, sessionId, emitter, workrailSessio
648
692
  },
649
693
  };
650
694
  }
651
- function makeReadTool(schemas, sessionId, emitter, workrailSessionId) {
695
+ function findActualString(fileContent, oldString) {
696
+ if (fileContent.includes(oldString))
697
+ return oldString;
698
+ const normalized = oldString
699
+ .replace(/[\u2018\u2019]/g, "'")
700
+ .replace(/[\u201C\u201D]/g, '"')
701
+ .replace(/\u2013/g, '-')
702
+ .replace(/\u2014/g, '--');
703
+ if (fileContent.includes(normalized))
704
+ return normalized;
705
+ return null;
706
+ }
707
+ const READ_SIZE_CAP_BYTES = 256 * 1024;
708
+ const GLOB_ALWAYS_EXCLUDE = ['**/node_modules/**', '**/.git/**', '**/dist/**', '**/build/**'];
709
+ function makeReadTool(readFileState, schemas, sessionId, emitter, workrailSessionId) {
652
710
  return {
653
711
  name: 'Read',
654
- description: 'Read the contents of a file at the given absolute path.',
712
+ description: 'Read the contents of a file at the given absolute path. ' +
713
+ 'Content is returned in cat -n format: each line is prefixed with its 1-indexed line number and a tab character (e.g. "1\\tline one\\n2\\tline two"). ' +
714
+ 'Use offset (0-indexed start line) and limit (max lines) to read a slice of a large file.',
655
715
  inputSchema: schemas['ReadParams'],
656
716
  label: 'Read',
657
717
  execute: async (_toolCallId, params) => {
718
+ const filePath = params.filePath;
658
719
  if (sessionId)
659
- emitter?.emit({ kind: 'tool_called', sessionId, toolName: 'Read', summary: String(params.filePath).slice(0, 80), ...withWorkrailSession(workrailSessionId) });
660
- const content = await fs.readFile(params.filePath, 'utf8');
720
+ emitter?.emit({ kind: 'tool_called', sessionId, toolName: 'Read', summary: filePath.slice(0, 80), ...withWorkrailSession(workrailSessionId) });
721
+ const devPaths = ['/dev/stdin', '/dev/tty', '/dev/zero', '/dev/random', '/dev/full', '/dev/urandom'];
722
+ if (devPaths.some(d => filePath === d)) {
723
+ throw new Error(`Refusing to read device path: ${filePath}`);
724
+ }
725
+ const stat = await fs.stat(filePath);
726
+ const offset = params.offset ?? 0;
727
+ const limit = params.limit;
728
+ const isPaginated = params.offset !== undefined || params.limit !== undefined;
729
+ if (!isPaginated && stat.size > READ_SIZE_CAP_BYTES) {
730
+ throw new Error(`File is too large to read at once (${stat.size} bytes, cap is ${READ_SIZE_CAP_BYTES} bytes). ` +
731
+ `Use offset and limit parameters to read a specific range of lines.`);
732
+ }
733
+ const rawContent = await fs.readFile(filePath, 'utf8');
734
+ const allLines = rawContent.split('\n');
735
+ const isPartialView = offset !== 0 || limit != null;
736
+ const slicedLines = limit != null ? allLines.slice(offset, offset + limit) : allLines.slice(offset);
737
+ const startLine = offset;
738
+ const formatted = slicedLines.map((l, i) => `${startLine + i + 1}\t${l}`).join('\n');
739
+ readFileState.set(filePath, { content: rawContent, timestamp: stat.mtimeMs, isPartialView });
661
740
  return {
662
- content: [{ type: 'text', text: content }],
663
- details: { filePath: params.filePath, length: content.length },
741
+ content: [{ type: 'text', text: formatted }],
742
+ details: { filePath, totalLines: allLines.length, returnedLines: slicedLines.length, offset, isPartialView },
664
743
  };
665
744
  },
666
745
  };
667
746
  }
668
- function makeWriteTool(schemas, sessionId, emitter, workrailSessionId) {
747
+ function makeWriteTool(readFileState, schemas, sessionId, emitter, workrailSessionId) {
669
748
  return {
670
749
  name: 'Write',
671
- description: 'Write content to a file at the given absolute path. Creates parent directories if needed.',
750
+ description: 'Write content to a file at the given absolute path. Creates parent directories if needed. ' +
751
+ 'For existing files: the file must have been read in this session and must not have changed on disk since then. ' +
752
+ 'For new files (path does not exist): no prior read is required.',
672
753
  inputSchema: schemas['WriteParams'],
673
754
  label: 'Write',
674
755
  execute: async (_toolCallId, params) => {
756
+ const filePath = params.filePath;
757
+ if (sessionId)
758
+ emitter?.emit({ kind: 'tool_called', sessionId, toolName: 'Write', summary: filePath.slice(0, 80), ...withWorkrailSession(workrailSessionId) });
759
+ let existsOnDisk = false;
760
+ try {
761
+ await fs.access(filePath);
762
+ existsOnDisk = true;
763
+ }
764
+ catch {
765
+ }
766
+ if (existsOnDisk) {
767
+ const state = readFileState.get(filePath);
768
+ if (!state) {
769
+ throw new Error(`File has not been read in this session. Call Read first before writing to it: ${filePath}`);
770
+ }
771
+ const stat = await fs.stat(filePath);
772
+ if (stat.mtimeMs !== state.timestamp) {
773
+ throw new Error(`File has been modified since it was read. Re-read before writing: ${filePath}`);
774
+ }
775
+ }
776
+ await fs.mkdir(path.dirname(filePath), { recursive: true });
777
+ await fs.writeFile(filePath, params.content, 'utf8');
778
+ const newStat = await fs.stat(filePath);
779
+ readFileState.set(filePath, { content: params.content, timestamp: newStat.mtimeMs, isPartialView: false });
780
+ return {
781
+ content: [{ type: 'text', text: `Written ${params.content.length} bytes to ${filePath}` }],
782
+ details: { filePath, length: params.content.length },
783
+ };
784
+ },
785
+ };
786
+ }
787
+ function makeGlobTool(workspacePath, schemas, sessionId, emitter, workrailSessionId) {
788
+ return {
789
+ name: 'Glob',
790
+ description: 'Find files matching a glob pattern. Returns newline-separated relative file paths, sorted by modification time descending. ' +
791
+ 'node_modules, .git, dist, and build directories are always excluded. ' +
792
+ 'Results are capped at 100 files.',
793
+ inputSchema: schemas['GlobParams'],
794
+ label: 'Glob',
795
+ execute: async (_toolCallId, params) => {
796
+ const pattern = params.pattern;
797
+ const searchRoot = params.path ?? workspacePath;
675
798
  if (sessionId)
676
- emitter?.emit({ kind: 'tool_called', sessionId, toolName: 'Write', summary: String(params.filePath).slice(0, 80), ...withWorkrailSession(workrailSessionId) });
677
- await fs.mkdir(path.dirname(params.filePath), { recursive: true });
678
- await fs.writeFile(params.filePath, params.content, 'utf8');
799
+ emitter?.emit({ kind: 'tool_called', sessionId, toolName: 'Glob', summary: pattern.slice(0, 80), ...withWorkrailSession(workrailSessionId) });
800
+ const GLOB_LIMIT = 100;
801
+ let paths;
802
+ try {
803
+ paths = await (0, tinyglobby_1.glob)(pattern, {
804
+ cwd: searchRoot,
805
+ ignore: GLOB_ALWAYS_EXCLUDE,
806
+ absolute: false,
807
+ });
808
+ }
809
+ catch {
810
+ paths = [];
811
+ }
812
+ const withMtimes = await Promise.all(paths.map(async (p) => {
813
+ try {
814
+ const stat = await fs.stat(path.join(searchRoot, p));
815
+ return { p, mtime: stat.mtimeMs };
816
+ }
817
+ catch {
818
+ return { p, mtime: 0 };
819
+ }
820
+ }));
821
+ withMtimes.sort((a, b) => b.mtime - a.mtime);
822
+ const sorted = withMtimes.map(x => x.p);
823
+ const truncated = sorted.length > GLOB_LIMIT;
824
+ const result = sorted.slice(0, GLOB_LIMIT);
825
+ let text = result.join('\n');
826
+ if (truncated) {
827
+ text += '\n[Results truncated at 100 files]';
828
+ }
679
829
  return {
680
- content: [{ type: 'text', text: `Written ${params.content.length} bytes to ${params.filePath}` }],
681
- details: { filePath: params.filePath, length: params.content.length },
830
+ content: [{ type: 'text', text: text || '(no matches)' }],
831
+ details: { pattern, searchRoot, matchCount: sorted.length, truncated },
832
+ };
833
+ },
834
+ };
835
+ }
836
+ function makeGrepTool(workspacePath, schemas, sessionId, emitter, workrailSessionId) {
837
+ return {
838
+ name: 'Grep',
839
+ description: 'Search file contents using ripgrep (rg). Fast regex search with optional context lines, file-type filtering, and case-insensitive mode. ' +
840
+ 'output_mode: "files_with_matches" (default) returns only file paths; "content" returns matching lines; "count" returns match counts per file. ' +
841
+ 'node_modules and .git are always excluded.',
842
+ inputSchema: schemas['GrepParams'],
843
+ label: 'Grep',
844
+ execute: async (_toolCallId, params) => {
845
+ const pattern = params.pattern;
846
+ const searchPath = params.path ?? workspacePath;
847
+ const outputMode = params.output_mode ?? 'files_with_matches';
848
+ const headLimit = params.head_limit ?? 250;
849
+ if (sessionId)
850
+ emitter?.emit({ kind: 'tool_called', sessionId, toolName: 'Grep', summary: pattern.slice(0, 80), ...withWorkrailSession(workrailSessionId) });
851
+ const args = [
852
+ '--hidden',
853
+ '--glob', '!node_modules',
854
+ '--glob', '!.git',
855
+ '--max-columns', '500',
856
+ ];
857
+ if (params['-i'])
858
+ args.push('-i');
859
+ if (params.glob) {
860
+ args.push('--glob', params.glob);
861
+ }
862
+ if (params.type) {
863
+ args.push('--type', params.type);
864
+ }
865
+ switch (outputMode) {
866
+ case 'files_with_matches':
867
+ args.push('--files-with-matches');
868
+ break;
869
+ case 'count':
870
+ args.push('--count');
871
+ break;
872
+ case 'content':
873
+ args.push('--vimgrep');
874
+ if (params.context != null) {
875
+ args.push('-C', String(params.context));
876
+ }
877
+ break;
878
+ }
879
+ args.push('--', pattern, searchPath);
880
+ let stdout;
881
+ try {
882
+ const result = await execFileAsync('rg', args, { cwd: workspacePath, maxBuffer: 10 * 1024 * 1024 });
883
+ stdout = result.stdout;
884
+ }
885
+ catch (err) {
886
+ const nodeErr = err;
887
+ if (nodeErr.code === 'ENOENT') {
888
+ throw new Error('ripgrep (rg) is not installed. Install it with: brew install ripgrep (macOS) or apt install ripgrep (Ubuntu/Debian).');
889
+ }
890
+ if (typeof nodeErr.code === 'number' && nodeErr.code === 1) {
891
+ return {
892
+ content: [{ type: 'text', text: '(no matches)' }],
893
+ details: { pattern, searchPath, outputMode },
894
+ };
895
+ }
896
+ throw new Error(`rg failed: ${nodeErr.message ?? String(err)}`);
897
+ }
898
+ const lines = stdout.split('\n').filter(l => l.length > 0);
899
+ const truncated = lines.length > headLimit;
900
+ let result = lines.slice(0, headLimit).join('\n');
901
+ if (truncated) {
902
+ result += `\n[Results truncated at ${headLimit} lines. Use a more specific pattern or increase head_limit.]`;
903
+ }
904
+ return {
905
+ content: [{ type: 'text', text: result || '(no matches)' }],
906
+ details: { pattern, searchPath, outputMode, lineCount: lines.length, truncated },
907
+ };
908
+ },
909
+ };
910
+ }
911
+ function makeEditTool(workspacePath, readFileState, schemas, sessionId, emitter, workrailSessionId) {
912
+ return {
913
+ name: 'Edit',
914
+ description: 'Perform an exact string replacement in a file. ' +
915
+ 'The file must have been read in this session via the Read tool. ' +
916
+ 'By default, old_string must appear exactly once; use replace_all=true to replace all occurrences. ' +
917
+ 'Do NOT include line-number prefixes (e.g. "1\\t") from Read output in old_string or new_string.',
918
+ inputSchema: schemas['EditParams'],
919
+ label: 'Edit',
920
+ execute: async (_toolCallId, params) => {
921
+ const rawFilePath = params.file_path;
922
+ const absoluteFilePath = path.isAbsolute(rawFilePath)
923
+ ? rawFilePath
924
+ : path.join(workspacePath, rawFilePath);
925
+ if (!absoluteFilePath.startsWith(workspacePath)) {
926
+ throw new Error(`Edit target is outside the workspace: ${rawFilePath}`);
927
+ }
928
+ const filePath = absoluteFilePath;
929
+ const oldString = params.old_string;
930
+ const newString = params.new_string;
931
+ const replaceAll = params.replace_all ?? false;
932
+ if (sessionId)
933
+ emitter?.emit({ kind: 'tool_called', sessionId, toolName: 'Edit', summary: filePath.slice(0, 80), ...withWorkrailSession(workrailSessionId) });
934
+ if (oldString === newString) {
935
+ throw new Error('old_string and new_string are identical. No edit needed.');
936
+ }
937
+ const state = readFileState.get(filePath);
938
+ if (!state) {
939
+ throw new Error(`File has not been read in this session. Call Read first before editing: ${filePath}`);
940
+ }
941
+ let stat;
942
+ try {
943
+ stat = await fs.stat(filePath);
944
+ }
945
+ catch {
946
+ throw new Error(`File not found: ${filePath}. It may have been deleted after it was read.`);
947
+ }
948
+ if (stat.mtimeMs !== state.timestamp) {
949
+ throw new Error(`File has been modified since it was read. Re-read before editing: ${filePath}`);
950
+ }
951
+ const currentContent = await fs.readFile(filePath, 'utf8');
952
+ const actualString = findActualString(currentContent, oldString);
953
+ if (actualString === null) {
954
+ throw new Error(`String to replace not found in file. Make sure old_string exactly matches the file content ` +
955
+ `(do not include line-number prefixes from Read output): ${filePath}`);
956
+ }
957
+ const occurrences = currentContent.split(actualString).length - 1;
958
+ if (!replaceAll && occurrences > 1) {
959
+ throw new Error(`old_string appears ${occurrences} times in the file. ` +
960
+ `Provide a more specific string that matches exactly once, or set replace_all=true to replace all occurrences.`);
961
+ }
962
+ const updatedContent = replaceAll
963
+ ? currentContent.split(actualString).join(newString)
964
+ : currentContent.replace(actualString, newString);
965
+ await fs.writeFile(filePath, updatedContent, 'utf8');
966
+ const newStat = await fs.stat(filePath);
967
+ readFileState.set(filePath, { content: updatedContent, timestamp: newStat.mtimeMs, isPartialView: false });
968
+ return {
969
+ content: [{ type: 'text', text: `The file ${filePath} has been updated successfully.` }],
970
+ details: { filePath, occurrencesReplaced: occurrences },
682
971
  };
683
972
  },
684
973
  };
@@ -691,7 +980,8 @@ function makeSpawnAgentTool(sessionId, ctx, apiKey, thisWorkrailSessionId, curre
691
980
  'Use this when a step requires delegating a well-defined sub-task to a separate workflow. ' +
692
981
  'IMPORTANT: The parent session\'s time limit (maxSessionMinutes) keeps ticking while the child runs. ' +
693
982
  'Configure the parent with enough time to cover both its own work and the child\'s work. ' +
694
- 'Returns: { childSessionId, outcome: "success"|"error"|"timeout", notes: string }. ' +
983
+ 'Returns: { childSessionId, outcome: "success"|"error"|"timeout", notes: string, artifacts?: readonly unknown[] }. ' +
984
+ 'On success, artifacts contains the child session\'s final step artifacts if any were produced. ' +
695
985
  'Check outcome before using notes -- on error/timeout, notes contains the error message.',
696
986
  inputSchema: schemas['SpawnAgentParams'],
697
987
  label: 'Spawn Agent',
@@ -755,6 +1045,7 @@ function makeSpawnAgentTool(sessionId, ctx, apiKey, thisWorkrailSessionId, curre
755
1045
  childSessionId,
756
1046
  outcome: 'success',
757
1047
  notes: childResult.lastStepNotes ?? '(no notes from child session)',
1048
+ ...(childResult.lastStepArtifacts !== undefined ? { artifacts: childResult.lastStepArtifacts } : {}),
758
1049
  };
759
1050
  }
760
1051
  else if (childResult._tag === 'error') {
@@ -873,6 +1164,74 @@ function makeReportIssueTool(sessionId, emitter, workrailSessionId, issuesDirOve
873
1164
  },
874
1165
  };
875
1166
  }
1167
+ exports.DAEMON_SIGNALS_DIR = path.join(os.homedir(), '.workrail', 'signals');
1168
+ async function appendSignalAsync(signalsDir, sessionId, record) {
1169
+ await fs.mkdir(signalsDir, { recursive: true });
1170
+ const filePath = path.join(signalsDir, `${sessionId}.jsonl`);
1171
+ const line = JSON.stringify({ ...record, ts: Date.now() }) + '\n';
1172
+ await fs.appendFile(filePath, line, 'utf8');
1173
+ }
1174
+ function makeSignalCoordinatorTool(sessionId, emitter, workrailSessionId, signalsDirOverride) {
1175
+ const signalsDir = signalsDirOverride ?? exports.DAEMON_SIGNALS_DIR;
1176
+ return {
1177
+ name: 'signal_coordinator',
1178
+ description: 'Emit a structured mid-session signal to the coordinator WITHOUT advancing the workflow step. ' +
1179
+ 'Use this to surface progress updates, intermediate findings, data requests, ' +
1180
+ 'approval requests, or blocking conditions while the session continues. ' +
1181
+ 'Always returns immediately -- fire-and-observe, never blocks. ' +
1182
+ 'Signal kinds: "progress" (heartbeat, no data needed), "finding" (intermediate result), ' +
1183
+ '"data_needed" (request external data), "approval_needed" (request coordinator approval), ' +
1184
+ '"blocked" (cannot continue without coordinator intervention).',
1185
+ inputSchema: {
1186
+ type: 'object',
1187
+ properties: {
1188
+ signalKind: {
1189
+ type: 'string',
1190
+ enum: ['progress', 'finding', 'data_needed', 'approval_needed', 'blocked'],
1191
+ description: 'The kind of signal to emit.',
1192
+ },
1193
+ payload: {
1194
+ type: 'object',
1195
+ additionalProperties: true,
1196
+ description: 'Structured data accompanying the signal. Pass {} for progress signals.',
1197
+ },
1198
+ },
1199
+ required: ['signalKind', 'payload'],
1200
+ additionalProperties: false,
1201
+ },
1202
+ label: 'signal_coordinator',
1203
+ execute: async (_toolCallId, params) => {
1204
+ const signalId = 'sig_' + (0, node_crypto_1.randomUUID)().replace(/-/g, '').slice(0, 8);
1205
+ const signalKind = String(params.signalKind ?? 'progress');
1206
+ const payload = (typeof params.payload === 'object' && params.payload !== null && !Array.isArray(params.payload))
1207
+ ? params.payload
1208
+ : {};
1209
+ console.log(`[WorkflowRunner] Tool: signal_coordinator sessionId=${sessionId} signalKind=${signalKind} signalId=${signalId}`);
1210
+ const record = {
1211
+ signalId,
1212
+ sessionId,
1213
+ ...(workrailSessionId != null ? { workrailSessionId } : {}),
1214
+ signalKind,
1215
+ payload,
1216
+ };
1217
+ void appendSignalAsync(signalsDir, sessionId, record).catch(() => {
1218
+ });
1219
+ emitter?.emit({
1220
+ kind: 'signal_emitted',
1221
+ sessionId,
1222
+ signalKind,
1223
+ signalId,
1224
+ payload,
1225
+ ...(workrailSessionId != null ? { workrailSessionId } : {}),
1226
+ });
1227
+ const result = { status: 'recorded', signalId };
1228
+ return {
1229
+ content: [{ type: 'text', text: JSON.stringify(result) }],
1230
+ details: result,
1231
+ };
1232
+ },
1233
+ };
1234
+ }
876
1235
  const BASE_SYSTEM_PROMPT = `\
877
1236
  You are WorkRail Auto, an autonomous agent that executes workflows step by step. You are running unattended -- there is no user watching. Your entire job is to faithfully complete the current workflow.
878
1237
 
@@ -900,6 +1259,7 @@ Good pattern: "Question: Should I check the middleware? Answer: The workflow ste
900
1259
  - \`Write\`: Write files.
901
1260
  - \`report_issue\`: Record a structured issue, error, or unexpected behavior. Call this AND complete_step (unless fatal). Does not stop the session -- it creates a record for the auto-fix coordinator.
902
1261
  - \`spawn_agent\`: Delegate a sub-task to a child WorkRail session. BLOCKS until the child completes. Returns \`{ childSessionId, outcome: "success"|"error"|"timeout", notes: string }\`. Always check \`outcome\` before using \`notes\`. IMPORTANT: your session's time limit (maxSessionMinutes) keeps running while the child executes -- ensure your parent session has enough time for both your work AND the child's work. Maximum spawn depth is 3 by default (configurable). Use only when a step explicitly asks for delegation or when a clearly separable sub-task would benefit from its own WorkRail audit trail.
1262
+ - \`signal_coordinator\`: Emit a structured mid-session signal to the coordinator WITHOUT advancing the workflow step. Use when the step asks you to surface a finding, request data, request approval, or report a blocking condition. Always returns immediately -- fire-and-observe. Signal kinds: "progress", "finding", "data_needed", "approval_needed", "blocked".
903
1263
 
904
1264
  ## Execution contract
905
1265
  1. Read the step carefully. Do ALL the work the step asks for.
@@ -948,6 +1308,16 @@ function buildSystemPrompt(trigger, sessionState, soulContent, workspaceContext)
948
1308
  lines.push('## Workspace Context (from AGENTS.md / CLAUDE.md)');
949
1309
  lines.push(workspaceContext);
950
1310
  }
1311
+ const assembledContextSummary = trigger.context?.['assembledContextSummary'];
1312
+ if (typeof assembledContextSummary === 'string' && assembledContextSummary.trim().length > 0) {
1313
+ let ctxStr = assembledContextSummary;
1314
+ if (Buffer.byteLength(ctxStr, 'utf8') > MAX_ASSEMBLED_CONTEXT_BYTES) {
1315
+ ctxStr = ctxStr.slice(0, MAX_ASSEMBLED_CONTEXT_BYTES) + '\n[Prior context truncated at 8KB]';
1316
+ }
1317
+ lines.push('');
1318
+ lines.push('## Prior Context');
1319
+ lines.push(ctxStr.trim());
1320
+ }
951
1321
  if (trigger.referenceUrls && trigger.referenceUrls.length > 0) {
952
1322
  lines.push('');
953
1323
  lines.push('## Reference documents');
@@ -964,7 +1334,7 @@ function buildUserMessage(text) {
964
1334
  timestamp: Date.now(),
965
1335
  };
966
1336
  }
967
- async function runWorkflow(trigger, ctx, apiKey, daemonRegistry, emitter) {
1337
+ async function runWorkflow(trigger, ctx, apiKey, daemonRegistry, emitter, steerRegistry) {
968
1338
  const sessionId = (0, node_crypto_1.randomUUID)();
969
1339
  console.log(`[WorkflowRunner] Session started: sessionId=${sessionId} workflowId=${trigger.workflowId}`);
970
1340
  emitter?.emit({
@@ -1002,7 +1372,7 @@ async function runWorkflow(trigger, ctx, apiKey, daemonRegistry, emitter) {
1002
1372
  }
1003
1373
  }
1004
1374
  let isComplete = false;
1005
- let pendingSteerText = null;
1375
+ const pendingSteerParts = [];
1006
1376
  let lastStepNotes;
1007
1377
  let lastStepArtifacts;
1008
1378
  let stepAdvanceCount = 0;
@@ -1011,7 +1381,7 @@ async function runWorkflow(trigger, ctx, apiKey, daemonRegistry, emitter) {
1011
1381
  const issueSummaries = [];
1012
1382
  const MAX_ISSUE_SUMMARIES = 10;
1013
1383
  const onAdvance = (stepText, continueToken) => {
1014
- pendingSteerText = stepText;
1384
+ pendingSteerParts.push(stepText);
1015
1385
  stepAdvanceCount++;
1016
1386
  currentContinueToken = continueToken;
1017
1387
  if (workrailSessionId !== null)
@@ -1054,6 +1424,9 @@ async function runWorkflow(trigger, ctx, apiKey, daemonRegistry, emitter) {
1054
1424
  if (workrailSessionId !== null) {
1055
1425
  daemonRegistry?.register(workrailSessionId, trigger.workflowId);
1056
1426
  }
1427
+ if (workrailSessionId !== null) {
1428
+ steerRegistry?.set(workrailSessionId, (text) => { pendingSteerParts.push(text); });
1429
+ }
1057
1430
  if (startContinueToken) {
1058
1431
  await persistTokens(sessionId, startContinueToken, startCheckpointToken);
1059
1432
  }
@@ -1067,18 +1440,23 @@ async function runWorkflow(trigger, ctx, apiKey, daemonRegistry, emitter) {
1067
1440
  const schemas = getSchemas();
1068
1441
  const spawnCurrentDepth = trigger.spawnDepth ?? 0;
1069
1442
  const spawnMaxDepth = trigger.agentConfig?.maxSubagentDepth ?? 3;
1443
+ const readFileState = new Map();
1070
1444
  const tools = [
1071
1445
  makeCompleteStepTool(sessionId, ctx, () => currentContinueToken, onAdvance, onComplete, (t) => { currentContinueToken = t; }, schemas, index_js_1.executeContinueWorkflow, emitter, workrailSessionId),
1072
1446
  makeContinueWorkflowTool(sessionId, ctx, onAdvance, onComplete, schemas, index_js_1.executeContinueWorkflow, emitter, workrailSessionId),
1073
1447
  makeBashTool(trigger.workspacePath, schemas, sessionId, emitter, workrailSessionId),
1074
- makeReadTool(schemas, sessionId, emitter, workrailSessionId),
1075
- makeWriteTool(schemas, sessionId, emitter, workrailSessionId),
1448
+ makeReadTool(readFileState, schemas, sessionId, emitter, workrailSessionId),
1449
+ makeWriteTool(readFileState, schemas, sessionId, emitter, workrailSessionId),
1450
+ makeGlobTool(trigger.workspacePath, schemas, sessionId, emitter, workrailSessionId),
1451
+ makeGrepTool(trigger.workspacePath, schemas, sessionId, emitter, workrailSessionId),
1452
+ makeEditTool(trigger.workspacePath, readFileState, schemas, sessionId, emitter, workrailSessionId),
1076
1453
  makeReportIssueTool(sessionId, emitter, workrailSessionId, undefined, (summary) => {
1077
1454
  if (issueSummaries.length < MAX_ISSUE_SUMMARIES) {
1078
1455
  issueSummaries.push(summary);
1079
1456
  }
1080
1457
  }),
1081
1458
  makeSpawnAgentTool(sessionId, ctx, apiKey, workrailSessionId ?? '', spawnCurrentDepth, spawnMaxDepth, runWorkflow, schemas, emitter),
1459
+ makeSignalCoordinatorTool(sessionId, emitter, workrailSessionId),
1082
1460
  ];
1083
1461
  const [soulContent, workspaceContext, sessionNotes] = await Promise.all([
1084
1462
  loadDaemonSoul(trigger.soulFile),
@@ -1193,10 +1571,10 @@ async function runWorkflow(trigger, ctx, apiKey, daemonRegistry, emitter) {
1193
1571
  ...withWorkrailSession(workrailSessionId),
1194
1572
  });
1195
1573
  }
1196
- if (pendingSteerText !== null && !isComplete) {
1197
- const text = pendingSteerText;
1198
- pendingSteerText = null;
1199
- agent.steer(buildUserMessage(text));
1574
+ if (pendingSteerParts.length > 0 && !isComplete) {
1575
+ const joined = pendingSteerParts.join('\n\n');
1576
+ pendingSteerParts.length = 0;
1577
+ agent.steer(buildUserMessage(joined));
1200
1578
  }
1201
1579
  });
1202
1580
  let stopReason = 'stop';
@@ -1237,6 +1615,9 @@ async function runWorkflow(trigger, ctx, apiKey, daemonRegistry, emitter) {
1237
1615
  unsubscribe();
1238
1616
  if (timeoutHandle !== undefined)
1239
1617
  clearTimeout(timeoutHandle);
1618
+ if (workrailSessionId !== null) {
1619
+ steerRegistry?.delete(workrailSessionId);
1620
+ }
1240
1621
  console.log(`[WorkflowRunner] Agent loop ended: sessionId=${sessionId} stopReason=${stopReason}${errorMessage ? ` error=${errorMessage.slice(0, 120)}` : ''}`);
1241
1622
  }
1242
1623
  if (timeoutReason !== null) {