@exaudeus/workrail 3.72.0 → 3.72.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/cli-worktrain.js +4 -6
- package/dist/console-ui/assets/{index-CTza1zb5.js → index-Yj9NHqbR.js} +1 -1
- package/dist/console-ui/index.html +1 -1
- package/dist/daemon/active-sessions.d.ts +17 -0
- package/dist/daemon/active-sessions.js +55 -0
- package/dist/daemon/context-loader.d.ts +32 -0
- package/dist/daemon/context-loader.js +34 -0
- package/dist/daemon/session-scope.d.ts +3 -2
- package/dist/daemon/tools/_shared.d.ts +38 -0
- package/dist/daemon/tools/_shared.js +101 -0
- package/dist/daemon/tools/bash.d.ts +3 -0
- package/dist/daemon/tools/bash.js +57 -0
- package/dist/daemon/tools/continue-workflow.d.ts +6 -0
- package/dist/daemon/tools/continue-workflow.js +208 -0
- package/dist/daemon/tools/file-tools.d.ts +6 -0
- package/dist/daemon/tools/file-tools.js +195 -0
- package/dist/daemon/tools/glob-grep.d.ts +4 -0
- package/dist/daemon/tools/glob-grep.js +172 -0
- package/dist/daemon/tools/report-issue.d.ts +3 -0
- package/dist/daemon/tools/report-issue.js +129 -0
- package/dist/daemon/tools/signal-coordinator.d.ts +4 -0
- package/dist/daemon/tools/signal-coordinator.js +105 -0
- package/dist/daemon/tools/spawn-agent.d.ts +6 -0
- package/dist/daemon/tools/spawn-agent.js +135 -0
- package/dist/daemon/workflow-runner.d.ts +54 -29
- package/dist/daemon/workflow-runner.js +156 -980
- package/dist/infrastructure/storage/workflow-resolution.js +5 -6
- package/dist/manifest.json +131 -27
- package/dist/mcp/handlers/shared/request-workflow-reader.js +14 -0
- package/dist/trigger/coordinator-deps.d.ts +15 -0
- package/dist/trigger/coordinator-deps.js +322 -0
- package/dist/trigger/delivery-pipeline.d.ts +18 -0
- package/dist/trigger/delivery-pipeline.js +148 -0
- package/dist/trigger/dispatch-deduplicator.d.ts +6 -0
- package/dist/trigger/dispatch-deduplicator.js +24 -0
- package/dist/trigger/trigger-listener.d.ts +2 -3
- package/dist/trigger/trigger-listener.js +9 -276
- package/dist/trigger/trigger-router.d.ts +8 -7
- package/dist/trigger/trigger-router.js +19 -97
- package/dist/v2/usecases/console-routes.js +10 -2
- package/docs/ideas/backlog.md +82 -48
- package/package.json +1 -1
|
@@ -36,7 +36,7 @@ var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
|
36
36
|
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
37
37
|
};
|
|
38
38
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
39
|
-
exports.
|
|
39
|
+
exports.DAEMON_SOUL_TEMPLATE = exports.DAEMON_SOUL_DEFAULT = exports.WORKTREES_DIR = exports.DEFAULT_MAX_TURNS = exports.DEFAULT_SESSION_TIMEOUT_MINUTES = exports.makeSignalCoordinatorTool = exports.makeReportIssueTool = exports.makeSpawnAgentTool = exports.makeGrepTool = exports.makeGlobTool = exports.makeEditTool = exports.makeWriteTool = exports.makeReadTool = exports.makeBashTool = exports.makeCompleteStepTool = exports.makeContinueWorkflowTool = exports.DAEMON_SIGNALS_DIR = exports.DAEMON_SESSIONS_DIR = void 0;
|
|
40
40
|
exports.readDaemonSessionState = readDaemonSessionState;
|
|
41
41
|
exports.readAllDaemonSessions = readAllDaemonSessions;
|
|
42
42
|
exports.runStartupRecovery = runStartupRecovery;
|
|
@@ -45,17 +45,6 @@ exports.clearQueueIssueSidecars = clearQueueIssueSidecars;
|
|
|
45
45
|
exports.stripFrontmatter = stripFrontmatter;
|
|
46
46
|
exports.loadWorkspaceContext = loadWorkspaceContext;
|
|
47
47
|
exports.loadSessionNotes = loadSessionNotes;
|
|
48
|
-
exports.makeContinueWorkflowTool = makeContinueWorkflowTool;
|
|
49
|
-
exports.makeCompleteStepTool = makeCompleteStepTool;
|
|
50
|
-
exports.makeBashTool = makeBashTool;
|
|
51
|
-
exports.makeReadTool = makeReadTool;
|
|
52
|
-
exports.makeWriteTool = makeWriteTool;
|
|
53
|
-
exports.makeGlobTool = makeGlobTool;
|
|
54
|
-
exports.makeGrepTool = makeGrepTool;
|
|
55
|
-
exports.makeEditTool = makeEditTool;
|
|
56
|
-
exports.makeSpawnAgentTool = makeSpawnAgentTool;
|
|
57
|
-
exports.makeReportIssueTool = makeReportIssueTool;
|
|
58
|
-
exports.makeSignalCoordinatorTool = makeSignalCoordinatorTool;
|
|
59
48
|
exports.buildSessionRecap = buildSessionRecap;
|
|
60
49
|
exports.buildSystemPrompt = buildSystemPrompt;
|
|
61
50
|
exports.tagToStatsOutcome = tagToStatsOutcome;
|
|
@@ -87,23 +76,40 @@ const v2_token_ops_js_1 = require("../mcp/handlers/v2-token-ops.js");
|
|
|
87
76
|
const index_js_2 = require("../v2/durable-core/ids/index.js");
|
|
88
77
|
const node_outputs_js_1 = require("../v2/projections/node-outputs.js");
|
|
89
78
|
const assert_never_js_1 = require("../runtime/assert-never.js");
|
|
90
|
-
const result_js_1 = require("../runtime/result.js");
|
|
91
79
|
const session_recovery_policy_js_1 = require("./session-recovery-policy.js");
|
|
92
80
|
const stats_summary_js_1 = require("./stats-summary.js");
|
|
93
81
|
const step_injector_js_1 = require("./turn-end/step-injector.js");
|
|
94
82
|
const conversation_flusher_js_1 = require("./turn-end/conversation-flusher.js");
|
|
95
83
|
const session_scope_js_1 = require("./session-scope.js");
|
|
96
|
-
const
|
|
84
|
+
const context_loader_js_1 = require("./context-loader.js");
|
|
85
|
+
const _shared_js_1 = require("./tools/_shared.js");
|
|
86
|
+
const continue_workflow_js_1 = require("./tools/continue-workflow.js");
|
|
87
|
+
Object.defineProperty(exports, "makeContinueWorkflowTool", { enumerable: true, get: function () { return continue_workflow_js_1.makeContinueWorkflowTool; } });
|
|
88
|
+
Object.defineProperty(exports, "makeCompleteStepTool", { enumerable: true, get: function () { return continue_workflow_js_1.makeCompleteStepTool; } });
|
|
89
|
+
const bash_js_1 = require("./tools/bash.js");
|
|
90
|
+
Object.defineProperty(exports, "makeBashTool", { enumerable: true, get: function () { return bash_js_1.makeBashTool; } });
|
|
91
|
+
const file_tools_js_1 = require("./tools/file-tools.js");
|
|
92
|
+
Object.defineProperty(exports, "makeReadTool", { enumerable: true, get: function () { return file_tools_js_1.makeReadTool; } });
|
|
93
|
+
Object.defineProperty(exports, "makeWriteTool", { enumerable: true, get: function () { return file_tools_js_1.makeWriteTool; } });
|
|
94
|
+
Object.defineProperty(exports, "makeEditTool", { enumerable: true, get: function () { return file_tools_js_1.makeEditTool; } });
|
|
95
|
+
const glob_grep_js_1 = require("./tools/glob-grep.js");
|
|
96
|
+
Object.defineProperty(exports, "makeGlobTool", { enumerable: true, get: function () { return glob_grep_js_1.makeGlobTool; } });
|
|
97
|
+
Object.defineProperty(exports, "makeGrepTool", { enumerable: true, get: function () { return glob_grep_js_1.makeGrepTool; } });
|
|
98
|
+
const spawn_agent_js_1 = require("./tools/spawn-agent.js");
|
|
99
|
+
Object.defineProperty(exports, "makeSpawnAgentTool", { enumerable: true, get: function () { return spawn_agent_js_1.makeSpawnAgentTool; } });
|
|
100
|
+
const report_issue_js_1 = require("./tools/report-issue.js");
|
|
101
|
+
Object.defineProperty(exports, "makeReportIssueTool", { enumerable: true, get: function () { return report_issue_js_1.makeReportIssueTool; } });
|
|
102
|
+
const signal_coordinator_js_1 = require("./tools/signal-coordinator.js");
|
|
103
|
+
Object.defineProperty(exports, "makeSignalCoordinatorTool", { enumerable: true, get: function () { return signal_coordinator_js_1.makeSignalCoordinatorTool; } });
|
|
104
|
+
var _shared_js_2 = require("./tools/_shared.js");
|
|
105
|
+
Object.defineProperty(exports, "DAEMON_SESSIONS_DIR", { enumerable: true, get: function () { return _shared_js_2.DAEMON_SESSIONS_DIR; } });
|
|
106
|
+
var signal_coordinator_js_2 = require("./tools/signal-coordinator.js");
|
|
107
|
+
Object.defineProperty(exports, "DAEMON_SIGNALS_DIR", { enumerable: true, get: function () { return signal_coordinator_js_2.DAEMON_SIGNALS_DIR; } });
|
|
97
108
|
const execFileAsync = (0, node_util_1.promisify)(node_child_process_1.execFile);
|
|
98
|
-
const BASH_TIMEOUT_MS = 5 * 60 * 1000;
|
|
99
109
|
const MAX_SESSION_RECAP_NOTES = 3;
|
|
100
110
|
const MAX_SESSION_NOTE_CHARS = 800;
|
|
101
111
|
exports.DEFAULT_SESSION_TIMEOUT_MINUTES = 30;
|
|
102
112
|
exports.DEFAULT_MAX_TURNS = 200;
|
|
103
|
-
function withWorkrailSession(sid) {
|
|
104
|
-
return sid != null ? { workrailSessionId: sid } : {};
|
|
105
|
-
}
|
|
106
|
-
exports.DAEMON_SESSIONS_DIR = path.join(os.homedir(), '.workrail', 'daemon-sessions');
|
|
107
113
|
const MAX_ORPHAN_AGE_MS = 2 * 60 * 60 * 1000;
|
|
108
114
|
const MAX_WORKTREE_ORPHAN_AGE_MS = 24 * 60 * 60 * 1000;
|
|
109
115
|
const WORKRAIL_DIR = path.join(os.homedir(), '.workrail');
|
|
@@ -130,40 +136,15 @@ const soul_template_js_1 = require("./soul-template.js");
|
|
|
130
136
|
var soul_template_js_2 = require("./soul-template.js");
|
|
131
137
|
Object.defineProperty(exports, "DAEMON_SOUL_DEFAULT", { enumerable: true, get: function () { return soul_template_js_2.DAEMON_SOUL_DEFAULT; } });
|
|
132
138
|
Object.defineProperty(exports, "DAEMON_SOUL_TEMPLATE", { enumerable: true, get: function () { return soul_template_js_2.DAEMON_SOUL_TEMPLATE; } });
|
|
133
|
-
async function persistTokens(sessionId, continueToken, checkpointToken, worktreePath, recoveryContext) {
|
|
134
|
-
try {
|
|
135
|
-
await fs.mkdir(exports.DAEMON_SESSIONS_DIR, { recursive: true });
|
|
136
|
-
const sessionPath = path.join(exports.DAEMON_SESSIONS_DIR, `${sessionId}.json`);
|
|
137
|
-
const state = JSON.stringify({
|
|
138
|
-
continueToken,
|
|
139
|
-
checkpointToken,
|
|
140
|
-
ts: Date.now(),
|
|
141
|
-
...(worktreePath !== undefined ? { worktreePath } : {}),
|
|
142
|
-
...(recoveryContext !== undefined ? {
|
|
143
|
-
workflowId: recoveryContext.workflowId,
|
|
144
|
-
goal: recoveryContext.goal,
|
|
145
|
-
workspacePath: recoveryContext.workspacePath,
|
|
146
|
-
} : {}),
|
|
147
|
-
}, null, 2);
|
|
148
|
-
const tmp = `${sessionPath}.tmp`;
|
|
149
|
-
await fs.writeFile(tmp, state, 'utf8');
|
|
150
|
-
await fs.rename(tmp, sessionPath);
|
|
151
|
-
return (0, result_js_1.ok)(undefined);
|
|
152
|
-
}
|
|
153
|
-
catch (e) {
|
|
154
|
-
const nodeErr = e;
|
|
155
|
-
return (0, result_js_1.err)({ code: nodeErr.code ?? 'UNKNOWN', message: nodeErr.message ?? String(e) });
|
|
156
|
-
}
|
|
157
|
-
}
|
|
158
139
|
async function appendConversationMessages(filePath, messages) {
|
|
159
140
|
if (messages.length === 0)
|
|
160
141
|
return;
|
|
161
142
|
const lines = messages.map((m) => JSON.stringify(m)).join('\n') + '\n';
|
|
162
|
-
await fs.mkdir(
|
|
143
|
+
await fs.mkdir(_shared_js_1.DAEMON_SESSIONS_DIR, { recursive: true });
|
|
163
144
|
await fs.appendFile(filePath, lines, 'utf8');
|
|
164
145
|
}
|
|
165
146
|
async function readDaemonSessionState(sessionId) {
|
|
166
|
-
const sessionPath = path.join(
|
|
147
|
+
const sessionPath = path.join(_shared_js_1.DAEMON_SESSIONS_DIR, `${sessionId}.json`);
|
|
167
148
|
try {
|
|
168
149
|
const raw = await fs.readFile(sessionPath, 'utf8');
|
|
169
150
|
const parsed = JSON.parse(raw);
|
|
@@ -173,7 +154,7 @@ async function readDaemonSessionState(sessionId) {
|
|
|
173
154
|
return null;
|
|
174
155
|
}
|
|
175
156
|
}
|
|
176
|
-
async function readAllDaemonSessions(sessionsDir =
|
|
157
|
+
async function readAllDaemonSessions(sessionsDir = _shared_js_1.DAEMON_SESSIONS_DIR) {
|
|
177
158
|
let entries;
|
|
178
159
|
try {
|
|
179
160
|
entries = await fs.readdir(sessionsDir);
|
|
@@ -215,7 +196,7 @@ async function readAllDaemonSessions(sessionsDir = exports.DAEMON_SESSIONS_DIR)
|
|
|
215
196
|
}
|
|
216
197
|
return sessions;
|
|
217
198
|
}
|
|
218
|
-
async function runStartupRecovery(sessionsDir =
|
|
199
|
+
async function runStartupRecovery(sessionsDir = _shared_js_1.DAEMON_SESSIONS_DIR, execFn = execFileAsync, ctx, _countStepAdvancesFn = countOrphanStepAdvances, _executeContinueWorkflowFn = index_js_1.executeContinueWorkflow, _runWorkflowFn = runWorkflow, apiKey = '') {
|
|
219
200
|
await clearQueueIssueSidecars(sessionsDir);
|
|
220
201
|
const sessions = await readAllDaemonSessions(sessionsDir);
|
|
221
202
|
if (sessions.length === 0) {
|
|
@@ -304,14 +285,12 @@ async function runStartupRecovery(sessionsDir = exports.DAEMON_SESSIONS_DIR, exe
|
|
|
304
285
|
`or has no pending step. Discarding.`);
|
|
305
286
|
break;
|
|
306
287
|
}
|
|
307
|
-
const
|
|
288
|
+
const recoveryAllocatedSession = {
|
|
308
289
|
continueToken: rehydrated.continueToken ?? '',
|
|
309
290
|
checkpointToken: rehydrated.checkpointToken,
|
|
291
|
+
firstStepPrompt: rehydrated.pending.prompt ?? '',
|
|
310
292
|
isComplete: rehydrated.isComplete,
|
|
311
|
-
|
|
312
|
-
preferences: rehydrated.preferences,
|
|
313
|
-
nextIntent: rehydrated.nextIntent,
|
|
314
|
-
nextCall: rehydrated.nextCall,
|
|
293
|
+
triggerSource: 'daemon',
|
|
315
294
|
};
|
|
316
295
|
const effectiveWorkspacePath = session.worktreePath ?? session.workspacePath;
|
|
317
296
|
const branchStrategy = 'none';
|
|
@@ -320,11 +299,15 @@ async function runStartupRecovery(sessionsDir = exports.DAEMON_SESSIONS_DIR, exe
|
|
|
320
299
|
goal: session.goal ?? 'Resumed session (crash recovery)',
|
|
321
300
|
workspacePath: effectiveWorkspacePath,
|
|
322
301
|
branchStrategy,
|
|
323
|
-
|
|
302
|
+
};
|
|
303
|
+
const recoverySource = {
|
|
304
|
+
kind: 'pre_allocated',
|
|
305
|
+
trigger: recoveredTrigger,
|
|
306
|
+
session: recoveryAllocatedSession,
|
|
324
307
|
};
|
|
325
308
|
console.log(`[WorkflowRunner] Startup recovery: resuming session ${session.sessionId} ` +
|
|
326
309
|
`workflowId=${session.workflowId} stepAdvances=${stepAdvances}`);
|
|
327
|
-
void _runWorkflowFn(recoveredTrigger, ctx, apiKey).then((result) => {
|
|
310
|
+
void _runWorkflowFn(recoveredTrigger, ctx, apiKey, undefined, undefined, undefined, undefined, undefined, recoverySource).then((result) => {
|
|
328
311
|
console.log(`[WorkflowRunner] Startup recovery: resumed session ${session.sessionId} completed: ${result._tag}`);
|
|
329
312
|
}).catch((err) => {
|
|
330
313
|
console.warn(`[WorkflowRunner] Startup recovery: resumed session ${session.sessionId} failed: ` +
|
|
@@ -713,673 +696,6 @@ function getSchemas() {
|
|
|
713
696
|
};
|
|
714
697
|
return _schemas;
|
|
715
698
|
}
|
|
716
|
-
function makeContinueWorkflowTool(sessionId, ctx, onAdvance, onComplete, schemas, _executeContinueWorkflowFn = index_js_1.executeContinueWorkflow, emitter, workrailSessionId) {
|
|
717
|
-
return {
|
|
718
|
-
name: 'continue_workflow',
|
|
719
|
-
description: '[DEPRECATED in daemon sessions -- use complete_step instead] ' +
|
|
720
|
-
'Advance the WorkRail workflow to the next step. Call this after completing all work ' +
|
|
721
|
-
'required by the current step. Include your notes in notesMarkdown. ' +
|
|
722
|
-
'When the step requires an assessment gate, include wr.assessment objects in artifacts.',
|
|
723
|
-
inputSchema: schemas['ContinueWorkflowParams'],
|
|
724
|
-
label: 'Continue Workflow',
|
|
725
|
-
execute: async (_toolCallId, params) => {
|
|
726
|
-
console.log(`[WorkflowRunner] Tool: continue_workflow sessionId=${sessionId}`);
|
|
727
|
-
emitter?.emit({ kind: 'tool_called', sessionId, toolName: 'continue_workflow', summary: params.intent ?? 'advance', ...withWorkrailSession(workrailSessionId) });
|
|
728
|
-
const result = await _executeContinueWorkflowFn({
|
|
729
|
-
continueToken: params.continueToken,
|
|
730
|
-
intent: (params.intent ?? 'advance'),
|
|
731
|
-
output: (params.notesMarkdown || params.artifacts?.length)
|
|
732
|
-
? {
|
|
733
|
-
...(params.notesMarkdown ? { notesMarkdown: params.notesMarkdown } : {}),
|
|
734
|
-
...(params.artifacts ? { artifacts: params.artifacts } : {}),
|
|
735
|
-
}
|
|
736
|
-
: undefined,
|
|
737
|
-
context: params.context,
|
|
738
|
-
}, ctx);
|
|
739
|
-
if (result.isErr()) {
|
|
740
|
-
throw new Error(`continue_workflow failed: ${result.error.kind} -- ${JSON.stringify(result.error)}`);
|
|
741
|
-
}
|
|
742
|
-
const out = result.value.response;
|
|
743
|
-
const continueToken = out.continueToken ?? '';
|
|
744
|
-
const checkpointToken = out.checkpointToken ?? null;
|
|
745
|
-
const persistToken = (out.kind === 'blocked' ? out.nextCall?.params.continueToken : undefined) ?? continueToken;
|
|
746
|
-
if (persistToken) {
|
|
747
|
-
const persistResult = await persistTokens(sessionId, persistToken, checkpointToken);
|
|
748
|
-
if (persistResult.kind === 'err') {
|
|
749
|
-
console.warn(`[WorkflowRunner] persistTokens failed (continue_workflow): ${persistResult.error.code} -- ${persistResult.error.message}`);
|
|
750
|
-
}
|
|
751
|
-
}
|
|
752
|
-
if (out.kind === 'blocked') {
|
|
753
|
-
const retryToken = out.nextCall?.params.continueToken ?? continueToken;
|
|
754
|
-
const lines = ['## Step blocked -- action required\n'];
|
|
755
|
-
for (const blocker of out.blockers.blockers) {
|
|
756
|
-
lines.push(blocker.message);
|
|
757
|
-
if (blocker.suggestedFix) {
|
|
758
|
-
lines.push(`\nWhat to do: ${blocker.suggestedFix}`);
|
|
759
|
-
}
|
|
760
|
-
lines.push('');
|
|
761
|
-
}
|
|
762
|
-
if (out.validation) {
|
|
763
|
-
if (out.validation.issues.length > 0) {
|
|
764
|
-
lines.push('**Issues:**');
|
|
765
|
-
for (const issue of out.validation.issues)
|
|
766
|
-
lines.push(`- ${issue}`);
|
|
767
|
-
lines.push('');
|
|
768
|
-
}
|
|
769
|
-
if (out.validation.suggestions.length > 0) {
|
|
770
|
-
lines.push('**Suggestions:**');
|
|
771
|
-
for (const s of out.validation.suggestions)
|
|
772
|
-
lines.push(`- ${s}`);
|
|
773
|
-
lines.push('');
|
|
774
|
-
}
|
|
775
|
-
}
|
|
776
|
-
if (out.assessmentFollowup) {
|
|
777
|
-
lines.push(`**Follow-up required:** ${out.assessmentFollowup.title}`);
|
|
778
|
-
lines.push(out.assessmentFollowup.guidance);
|
|
779
|
-
lines.push('');
|
|
780
|
-
}
|
|
781
|
-
if (out.retryable) {
|
|
782
|
-
lines.push(`Retry the same step with corrected output.\n\ncontinueToken: ${retryToken}`);
|
|
783
|
-
}
|
|
784
|
-
else {
|
|
785
|
-
lines.push(`You cannot proceed without resolving this. Inform the user and wait for their response, then call continue_workflow.\n\ncontinueToken: ${retryToken}`);
|
|
786
|
-
}
|
|
787
|
-
const feedback = lines.join('\n');
|
|
788
|
-
return {
|
|
789
|
-
content: [{ type: 'text', text: feedback }],
|
|
790
|
-
details: out,
|
|
791
|
-
};
|
|
792
|
-
}
|
|
793
|
-
if (out.isComplete) {
|
|
794
|
-
onComplete(params.notesMarkdown, Array.isArray(params.artifacts) ? params.artifacts : undefined);
|
|
795
|
-
return {
|
|
796
|
-
content: [{ type: 'text', text: 'Workflow complete. All steps have been executed.' }],
|
|
797
|
-
details: out,
|
|
798
|
-
};
|
|
799
|
-
}
|
|
800
|
-
const pending = out.pending;
|
|
801
|
-
const stepText = pending
|
|
802
|
-
? `## Next step: ${pending.title}\n\n${pending.prompt}\n\ncontinueToken: ${continueToken}`
|
|
803
|
-
: `Step advanced. continueToken: ${continueToken}`;
|
|
804
|
-
onAdvance(stepText, continueToken);
|
|
805
|
-
return {
|
|
806
|
-
content: [{ type: 'text', text: stepText }],
|
|
807
|
-
details: out,
|
|
808
|
-
};
|
|
809
|
-
},
|
|
810
|
-
};
|
|
811
|
-
}
|
|
812
|
-
function makeCompleteStepTool(sessionId, ctx, getCurrentToken, onAdvance, onComplete, onTokenUpdate, schemas, _executeContinueWorkflowFn = index_js_1.executeContinueWorkflow, emitter, workrailSessionId) {
|
|
813
|
-
return {
|
|
814
|
-
name: 'complete_step',
|
|
815
|
-
description: 'Mark the current WorkRail workflow step as complete and advance to the next one. ' +
|
|
816
|
-
'Call this after completing all work required by the current step. ' +
|
|
817
|
-
'Include your substantive notes (min 50 characters) describing what you did. ' +
|
|
818
|
-
'The daemon manages the session token internally -- you do not need a continueToken. ' +
|
|
819
|
-
'When the step requires an assessment gate, include wr.assessment objects in artifacts.',
|
|
820
|
-
inputSchema: schemas['CompleteStepParams'],
|
|
821
|
-
label: 'Complete Step',
|
|
822
|
-
execute: async (_toolCallId, params) => {
|
|
823
|
-
console.log(`[WorkflowRunner] Tool: complete_step sessionId=${sessionId}`);
|
|
824
|
-
emitter?.emit({ kind: 'tool_called', sessionId, toolName: 'complete_step', summary: 'advance', ...withWorkrailSession(workrailSessionId) });
|
|
825
|
-
const notes = params.notes;
|
|
826
|
-
if (!notes || notes.length < 50) {
|
|
827
|
-
throw new Error(`complete_step: notes is required and must be at least 50 characters. ` +
|
|
828
|
-
`Provide substantive notes describing what you did, what you produced, and any notable decisions. ` +
|
|
829
|
-
`Current length: ${notes?.length ?? 0} characters.`);
|
|
830
|
-
}
|
|
831
|
-
const continueToken = getCurrentToken();
|
|
832
|
-
const result = await _executeContinueWorkflowFn({
|
|
833
|
-
continueToken,
|
|
834
|
-
intent: 'advance',
|
|
835
|
-
output: (notes || params.artifacts?.length)
|
|
836
|
-
? {
|
|
837
|
-
notesMarkdown: notes,
|
|
838
|
-
...(params.artifacts?.length ? { artifacts: params.artifacts } : {}),
|
|
839
|
-
}
|
|
840
|
-
: undefined,
|
|
841
|
-
context: params.context,
|
|
842
|
-
}, ctx);
|
|
843
|
-
if (result.isErr()) {
|
|
844
|
-
throw new Error(`complete_step failed: ${result.error.kind} -- ${JSON.stringify(result.error)}`);
|
|
845
|
-
}
|
|
846
|
-
const out = result.value.response;
|
|
847
|
-
const newContinueToken = out.continueToken ?? '';
|
|
848
|
-
const checkpointToken = out.checkpointToken ?? null;
|
|
849
|
-
const persistToken = (out.kind === 'blocked' ? out.nextCall?.params.continueToken : undefined) ?? newContinueToken;
|
|
850
|
-
if (persistToken) {
|
|
851
|
-
const persistResult = await persistTokens(sessionId, persistToken, checkpointToken);
|
|
852
|
-
if (persistResult.kind === 'err') {
|
|
853
|
-
console.warn(`[WorkflowRunner] persistTokens failed (complete_step): ${persistResult.error.code} -- ${persistResult.error.message}`);
|
|
854
|
-
}
|
|
855
|
-
}
|
|
856
|
-
if (out.kind === 'blocked') {
|
|
857
|
-
const retryToken = out.nextCall?.params.continueToken ?? newContinueToken;
|
|
858
|
-
onTokenUpdate(retryToken);
|
|
859
|
-
const lines = ['## Step blocked -- action required\n'];
|
|
860
|
-
for (const blocker of out.blockers.blockers) {
|
|
861
|
-
lines.push(blocker.message);
|
|
862
|
-
if (blocker.suggestedFix) {
|
|
863
|
-
lines.push(`\nWhat to do: ${blocker.suggestedFix}`);
|
|
864
|
-
}
|
|
865
|
-
lines.push('');
|
|
866
|
-
}
|
|
867
|
-
if (out.validation) {
|
|
868
|
-
if (out.validation.issues.length > 0) {
|
|
869
|
-
lines.push('**Issues:**');
|
|
870
|
-
for (const issue of out.validation.issues)
|
|
871
|
-
lines.push(`- ${issue}`);
|
|
872
|
-
lines.push('');
|
|
873
|
-
}
|
|
874
|
-
if (out.validation.suggestions.length > 0) {
|
|
875
|
-
lines.push('**Suggestions:**');
|
|
876
|
-
for (const s of out.validation.suggestions)
|
|
877
|
-
lines.push(`- ${s}`);
|
|
878
|
-
lines.push('');
|
|
879
|
-
}
|
|
880
|
-
}
|
|
881
|
-
if (out.assessmentFollowup) {
|
|
882
|
-
lines.push(`**Follow-up required:** ${out.assessmentFollowup.title}`);
|
|
883
|
-
lines.push(out.assessmentFollowup.guidance);
|
|
884
|
-
lines.push('');
|
|
885
|
-
}
|
|
886
|
-
if (out.retryable) {
|
|
887
|
-
lines.push(`Retry the same step: call complete_step again with corrected notes.`);
|
|
888
|
-
}
|
|
889
|
-
else {
|
|
890
|
-
lines.push(`You cannot proceed without resolving this. Inform the user and wait for their response, then call complete_step.`);
|
|
891
|
-
}
|
|
892
|
-
const feedback = lines.join('\n');
|
|
893
|
-
return {
|
|
894
|
-
content: [{ type: 'text', text: feedback }],
|
|
895
|
-
details: out,
|
|
896
|
-
};
|
|
897
|
-
}
|
|
898
|
-
if (out.isComplete) {
|
|
899
|
-
onComplete(notes, Array.isArray(params.artifacts) ? params.artifacts : undefined);
|
|
900
|
-
return {
|
|
901
|
-
content: [{ type: 'text', text: JSON.stringify({ status: 'complete' }) }],
|
|
902
|
-
details: out,
|
|
903
|
-
};
|
|
904
|
-
}
|
|
905
|
-
const pending = out.pending;
|
|
906
|
-
const nextStepTitle = pending?.title ?? 'Next step';
|
|
907
|
-
const stepText = pending
|
|
908
|
-
? `${JSON.stringify({ status: 'advanced', nextStep: pending.title })}\n\n## ${pending.title}\n\n${pending.prompt}`
|
|
909
|
-
: JSON.stringify({ status: 'advanced', nextStep: nextStepTitle });
|
|
910
|
-
onAdvance(stepText, newContinueToken);
|
|
911
|
-
return {
|
|
912
|
-
content: [{ type: 'text', text: stepText }],
|
|
913
|
-
details: out,
|
|
914
|
-
};
|
|
915
|
-
},
|
|
916
|
-
};
|
|
917
|
-
}
|
|
918
|
-
function makeBashTool(workspacePath, schemas, sessionId, emitter, workrailSessionId) {
|
|
919
|
-
return {
|
|
920
|
-
name: 'Bash',
|
|
921
|
-
description: 'Execute a shell command. Throws on failure (non-zero exit with stderr, or exit code 2+). ' +
|
|
922
|
-
'Exit code 1 with empty stderr is treated as "no match found" (standard grep semantics) and ' +
|
|
923
|
-
'returns empty output without throwing. ' +
|
|
924
|
-
`Maximum execution time: ${BASH_TIMEOUT_MS / 1000}s.`,
|
|
925
|
-
inputSchema: schemas['BashParams'],
|
|
926
|
-
label: 'Bash',
|
|
927
|
-
execute: async (_toolCallId, params) => {
|
|
928
|
-
if (typeof params.command !== 'string' || !params.command)
|
|
929
|
-
throw new Error('Bash: command must be a non-empty string');
|
|
930
|
-
console.log(`[WorkflowRunner] Tool: bash "${String(params.command).slice(0, 80)}"`);
|
|
931
|
-
if (sessionId)
|
|
932
|
-
emitter?.emit({ kind: 'tool_called', sessionId, toolName: 'Bash', summary: String(params.command).slice(0, 80), ...withWorkrailSession(workrailSessionId) });
|
|
933
|
-
const cwd = params.cwd ?? workspacePath;
|
|
934
|
-
try {
|
|
935
|
-
const { stdout, stderr } = await execAsync(params.command, {
|
|
936
|
-
cwd,
|
|
937
|
-
timeout: BASH_TIMEOUT_MS,
|
|
938
|
-
shell: '/bin/bash',
|
|
939
|
-
});
|
|
940
|
-
const output = [stdout, stderr].filter(Boolean).join('\n');
|
|
941
|
-
return {
|
|
942
|
-
content: [{ type: 'text', text: output || '(no output)' }],
|
|
943
|
-
details: { stdout, stderr },
|
|
944
|
-
};
|
|
945
|
-
}
|
|
946
|
-
catch (err) {
|
|
947
|
-
const e = err;
|
|
948
|
-
const stdout = String(e.stdout ?? '');
|
|
949
|
-
const stderr = String(e.stderr ?? '');
|
|
950
|
-
const rawCode = e.code;
|
|
951
|
-
const signal = e.signal;
|
|
952
|
-
if (rawCode === 1 && !stderr.trim()) {
|
|
953
|
-
return {
|
|
954
|
-
content: [{ type: 'text', text: stdout || '(no output)' }],
|
|
955
|
-
details: { stdout, stderr },
|
|
956
|
-
};
|
|
957
|
-
}
|
|
958
|
-
const exitInfo = rawCode != null
|
|
959
|
-
? `exit ${String(rawCode)}`
|
|
960
|
-
: signal
|
|
961
|
-
? `signal ${String(signal)}`
|
|
962
|
-
: 'exit unknown';
|
|
963
|
-
throw new Error(`Command failed: ${params.command} (${exitInfo})\nSTDOUT:\n${stdout}\nSTDERR:\n${stderr}`);
|
|
964
|
-
}
|
|
965
|
-
},
|
|
966
|
-
};
|
|
967
|
-
}
|
|
968
|
-
function findActualString(fileContent, oldString) {
|
|
969
|
-
if (fileContent.includes(oldString))
|
|
970
|
-
return oldString;
|
|
971
|
-
const normalized = oldString
|
|
972
|
-
.replace(/[\u2018\u2019]/g, "'")
|
|
973
|
-
.replace(/[\u201C\u201D]/g, '"')
|
|
974
|
-
.replace(/\u2013/g, '-')
|
|
975
|
-
.replace(/\u2014/g, '--');
|
|
976
|
-
if (fileContent.includes(normalized))
|
|
977
|
-
return normalized;
|
|
978
|
-
return null;
|
|
979
|
-
}
|
|
980
|
-
const READ_SIZE_CAP_BYTES = 256 * 1024;
|
|
981
|
-
const GLOB_ALWAYS_EXCLUDE = ['**/node_modules/**', '**/.git/**', '**/dist/**', '**/build/**'];
|
|
982
|
-
function makeReadTool(readFileState, schemas, sessionId, emitter, workrailSessionId) {
|
|
983
|
-
return {
|
|
984
|
-
name: 'Read',
|
|
985
|
-
description: 'Read the contents of a file at the given absolute path. ' +
|
|
986
|
-
'Content is returned in cat -n format: each line is prefixed with its 1-indexed line number and a tab character (e.g. "1\\tline one\\n2\\tline two"). ' +
|
|
987
|
-
'Use offset (0-indexed start line) and limit (max lines) to read a slice of a large file.',
|
|
988
|
-
inputSchema: schemas['ReadParams'],
|
|
989
|
-
label: 'Read',
|
|
990
|
-
execute: async (_toolCallId, params) => {
|
|
991
|
-
if (typeof params.filePath !== 'string' || !params.filePath)
|
|
992
|
-
throw new Error('Read: filePath must be a non-empty string');
|
|
993
|
-
const filePath = params.filePath;
|
|
994
|
-
if (sessionId)
|
|
995
|
-
emitter?.emit({ kind: 'tool_called', sessionId, toolName: 'Read', summary: filePath.slice(0, 80), ...withWorkrailSession(workrailSessionId) });
|
|
996
|
-
const devPaths = ['/dev/stdin', '/dev/tty', '/dev/zero', '/dev/random', '/dev/full', '/dev/urandom'];
|
|
997
|
-
if (devPaths.some(d => filePath === d)) {
|
|
998
|
-
throw new Error(`Refusing to read device path: ${filePath}`);
|
|
999
|
-
}
|
|
1000
|
-
const stat = await fs.stat(filePath);
|
|
1001
|
-
const offset = params.offset ?? 0;
|
|
1002
|
-
const limit = params.limit;
|
|
1003
|
-
const isPaginated = params.offset !== undefined || params.limit !== undefined;
|
|
1004
|
-
if (!isPaginated && stat.size > READ_SIZE_CAP_BYTES) {
|
|
1005
|
-
throw new Error(`File is too large to read at once (${stat.size} bytes, cap is ${READ_SIZE_CAP_BYTES} bytes). ` +
|
|
1006
|
-
`Use offset and limit parameters to read a specific range of lines.`);
|
|
1007
|
-
}
|
|
1008
|
-
const rawContent = await fs.readFile(filePath, 'utf8');
|
|
1009
|
-
const allLines = rawContent.split('\n');
|
|
1010
|
-
const isPartialView = offset !== 0 || limit != null;
|
|
1011
|
-
const slicedLines = limit != null ? allLines.slice(offset, offset + limit) : allLines.slice(offset);
|
|
1012
|
-
const startLine = offset;
|
|
1013
|
-
const formatted = slicedLines.map((l, i) => `${startLine + i + 1}\t${l}`).join('\n');
|
|
1014
|
-
readFileState.set(filePath, { content: rawContent, timestamp: stat.mtimeMs, isPartialView });
|
|
1015
|
-
return {
|
|
1016
|
-
content: [{ type: 'text', text: formatted }],
|
|
1017
|
-
details: { filePath, totalLines: allLines.length, returnedLines: slicedLines.length, offset, isPartialView },
|
|
1018
|
-
};
|
|
1019
|
-
},
|
|
1020
|
-
};
|
|
1021
|
-
}
|
|
1022
|
-
function makeWriteTool(readFileState, schemas, sessionId, emitter, workrailSessionId) {
|
|
1023
|
-
return {
|
|
1024
|
-
name: 'Write',
|
|
1025
|
-
description: 'Write content to a file at the given absolute path. Creates parent directories if needed. ' +
|
|
1026
|
-
'For existing files: the file must have been read in this session and must not have changed on disk since then. ' +
|
|
1027
|
-
'For new files (path does not exist): no prior read is required.',
|
|
1028
|
-
inputSchema: schemas['WriteParams'],
|
|
1029
|
-
label: 'Write',
|
|
1030
|
-
execute: async (_toolCallId, params) => {
|
|
1031
|
-
if (typeof params.filePath !== 'string' || !params.filePath)
|
|
1032
|
-
throw new Error('Write: filePath must be a non-empty string');
|
|
1033
|
-
if (typeof params.content !== 'string')
|
|
1034
|
-
throw new Error('Write: content must be a string');
|
|
1035
|
-
const filePath = params.filePath;
|
|
1036
|
-
if (sessionId)
|
|
1037
|
-
emitter?.emit({ kind: 'tool_called', sessionId, toolName: 'Write', summary: filePath.slice(0, 80), ...withWorkrailSession(workrailSessionId) });
|
|
1038
|
-
let existsOnDisk = false;
|
|
1039
|
-
try {
|
|
1040
|
-
await fs.access(filePath);
|
|
1041
|
-
existsOnDisk = true;
|
|
1042
|
-
}
|
|
1043
|
-
catch {
|
|
1044
|
-
}
|
|
1045
|
-
if (existsOnDisk) {
|
|
1046
|
-
const state = readFileState.get(filePath);
|
|
1047
|
-
if (!state) {
|
|
1048
|
-
throw new Error(`File has not been read in this session. Call Read first before writing to it: ${filePath}`);
|
|
1049
|
-
}
|
|
1050
|
-
const stat = await fs.stat(filePath);
|
|
1051
|
-
if (stat.mtimeMs !== state.timestamp) {
|
|
1052
|
-
throw new Error(`File has been modified since it was read. Re-read before writing: ${filePath}`);
|
|
1053
|
-
}
|
|
1054
|
-
}
|
|
1055
|
-
await fs.mkdir(path.dirname(filePath), { recursive: true });
|
|
1056
|
-
await fs.writeFile(filePath, params.content, 'utf8');
|
|
1057
|
-
const newStat = await fs.stat(filePath);
|
|
1058
|
-
readFileState.set(filePath, { content: params.content, timestamp: newStat.mtimeMs, isPartialView: false });
|
|
1059
|
-
return {
|
|
1060
|
-
content: [{ type: 'text', text: `Written ${params.content.length} bytes to ${filePath}` }],
|
|
1061
|
-
details: { filePath, length: params.content.length },
|
|
1062
|
-
};
|
|
1063
|
-
},
|
|
1064
|
-
};
|
|
1065
|
-
}
|
|
1066
|
-
function makeGlobTool(workspacePath, schemas, sessionId, emitter, workrailSessionId) {
|
|
1067
|
-
return {
|
|
1068
|
-
name: 'Glob',
|
|
1069
|
-
description: 'Find files matching a glob pattern. Returns newline-separated relative file paths, sorted by modification time descending. ' +
|
|
1070
|
-
'node_modules, .git, dist, and build directories are always excluded. ' +
|
|
1071
|
-
'Results are capped at 100 files.',
|
|
1072
|
-
inputSchema: schemas['GlobParams'],
|
|
1073
|
-
label: 'Glob',
|
|
1074
|
-
execute: async (_toolCallId, params) => {
|
|
1075
|
-
if (typeof params.pattern !== 'string' || !params.pattern)
|
|
1076
|
-
throw new Error('Glob: pattern must be a non-empty string');
|
|
1077
|
-
const pattern = params.pattern;
|
|
1078
|
-
const searchRoot = params.path ?? workspacePath;
|
|
1079
|
-
if (sessionId)
|
|
1080
|
-
emitter?.emit({ kind: 'tool_called', sessionId, toolName: 'Glob', summary: pattern.slice(0, 80), ...withWorkrailSession(workrailSessionId) });
|
|
1081
|
-
const GLOB_LIMIT = 100;
|
|
1082
|
-
let paths;
|
|
1083
|
-
try {
|
|
1084
|
-
paths = await (0, tinyglobby_1.glob)(pattern, {
|
|
1085
|
-
cwd: searchRoot,
|
|
1086
|
-
ignore: GLOB_ALWAYS_EXCLUDE,
|
|
1087
|
-
absolute: false,
|
|
1088
|
-
});
|
|
1089
|
-
}
|
|
1090
|
-
catch {
|
|
1091
|
-
paths = [];
|
|
1092
|
-
}
|
|
1093
|
-
const withMtimes = await Promise.all(paths.map(async (p) => {
|
|
1094
|
-
try {
|
|
1095
|
-
const stat = await fs.stat(path.join(searchRoot, p));
|
|
1096
|
-
return { p, mtime: stat.mtimeMs };
|
|
1097
|
-
}
|
|
1098
|
-
catch {
|
|
1099
|
-
return { p, mtime: 0 };
|
|
1100
|
-
}
|
|
1101
|
-
}));
|
|
1102
|
-
withMtimes.sort((a, b) => b.mtime - a.mtime);
|
|
1103
|
-
const sorted = withMtimes.map(x => x.p);
|
|
1104
|
-
const truncated = sorted.length > GLOB_LIMIT;
|
|
1105
|
-
const result = sorted.slice(0, GLOB_LIMIT);
|
|
1106
|
-
let text = result.join('\n');
|
|
1107
|
-
if (truncated) {
|
|
1108
|
-
text += '\n[Results truncated at 100 files]';
|
|
1109
|
-
}
|
|
1110
|
-
return {
|
|
1111
|
-
content: [{ type: 'text', text: text || '(no matches)' }],
|
|
1112
|
-
details: { pattern, searchRoot, matchCount: sorted.length, truncated },
|
|
1113
|
-
};
|
|
1114
|
-
},
|
|
1115
|
-
};
|
|
1116
|
-
}
|
|
1117
|
-
function makeGrepTool(workspacePath, schemas, sessionId, emitter, workrailSessionId) {
|
|
1118
|
-
return {
|
|
1119
|
-
name: 'Grep',
|
|
1120
|
-
description: 'Search file contents using ripgrep (rg). Fast regex search with optional context lines, file-type filtering, and case-insensitive mode. ' +
|
|
1121
|
-
'output_mode: "files_with_matches" (default) returns only file paths; "content" returns matching lines; "count" returns match counts per file. ' +
|
|
1122
|
-
'node_modules and .git are always excluded.',
|
|
1123
|
-
inputSchema: schemas['GrepParams'],
|
|
1124
|
-
label: 'Grep',
|
|
1125
|
-
execute: async (_toolCallId, params) => {
|
|
1126
|
-
if (typeof params.pattern !== 'string' || !params.pattern)
|
|
1127
|
-
throw new Error('Grep: pattern must be a non-empty string');
|
|
1128
|
-
const pattern = params.pattern;
|
|
1129
|
-
const searchPath = params.path ?? workspacePath;
|
|
1130
|
-
const outputMode = params.output_mode ?? 'files_with_matches';
|
|
1131
|
-
const headLimit = params.head_limit ?? 250;
|
|
1132
|
-
if (sessionId)
|
|
1133
|
-
emitter?.emit({ kind: 'tool_called', sessionId, toolName: 'Grep', summary: pattern.slice(0, 80), ...withWorkrailSession(workrailSessionId) });
|
|
1134
|
-
const args = [
|
|
1135
|
-
'--hidden',
|
|
1136
|
-
'--glob', '!node_modules',
|
|
1137
|
-
'--glob', '!.git',
|
|
1138
|
-
'--max-columns', '500',
|
|
1139
|
-
];
|
|
1140
|
-
if (params['-i'])
|
|
1141
|
-
args.push('-i');
|
|
1142
|
-
if (params.glob) {
|
|
1143
|
-
args.push('--glob', params.glob);
|
|
1144
|
-
}
|
|
1145
|
-
if (params.type) {
|
|
1146
|
-
args.push('--type', params.type);
|
|
1147
|
-
}
|
|
1148
|
-
switch (outputMode) {
|
|
1149
|
-
case 'files_with_matches':
|
|
1150
|
-
args.push('--files-with-matches');
|
|
1151
|
-
break;
|
|
1152
|
-
case 'count':
|
|
1153
|
-
args.push('--count');
|
|
1154
|
-
break;
|
|
1155
|
-
case 'content':
|
|
1156
|
-
args.push('--vimgrep');
|
|
1157
|
-
if (params.context != null) {
|
|
1158
|
-
args.push('-C', String(params.context));
|
|
1159
|
-
}
|
|
1160
|
-
break;
|
|
1161
|
-
}
|
|
1162
|
-
args.push('--', pattern, searchPath);
|
|
1163
|
-
let stdout;
|
|
1164
|
-
try {
|
|
1165
|
-
const result = await execFileAsync('rg', args, { cwd: workspacePath, maxBuffer: 10 * 1024 * 1024 });
|
|
1166
|
-
stdout = result.stdout;
|
|
1167
|
-
}
|
|
1168
|
-
catch (err) {
|
|
1169
|
-
const nodeErr = err;
|
|
1170
|
-
if (nodeErr.code === 'ENOENT') {
|
|
1171
|
-
throw new Error('ripgrep (rg) is not installed. Install it with: brew install ripgrep (macOS) or apt install ripgrep (Ubuntu/Debian).');
|
|
1172
|
-
}
|
|
1173
|
-
if (typeof nodeErr.code === 'number' && nodeErr.code === 1) {
|
|
1174
|
-
return {
|
|
1175
|
-
content: [{ type: 'text', text: '(no matches)' }],
|
|
1176
|
-
details: { pattern, searchPath, outputMode },
|
|
1177
|
-
};
|
|
1178
|
-
}
|
|
1179
|
-
throw new Error(`rg failed: ${nodeErr.message ?? String(err)}`);
|
|
1180
|
-
}
|
|
1181
|
-
const lines = stdout.split('\n').filter(l => l.length > 0);
|
|
1182
|
-
const truncated = lines.length > headLimit;
|
|
1183
|
-
let result = lines.slice(0, headLimit).join('\n');
|
|
1184
|
-
if (truncated) {
|
|
1185
|
-
result += `\n[Results truncated at ${headLimit} lines. Use a more specific pattern or increase head_limit.]`;
|
|
1186
|
-
}
|
|
1187
|
-
return {
|
|
1188
|
-
content: [{ type: 'text', text: result || '(no matches)' }],
|
|
1189
|
-
details: { pattern, searchPath, outputMode, lineCount: lines.length, truncated },
|
|
1190
|
-
};
|
|
1191
|
-
},
|
|
1192
|
-
};
|
|
1193
|
-
}
|
|
1194
|
-
function makeEditTool(workspacePath, readFileState, schemas, sessionId, emitter, workrailSessionId) {
|
|
1195
|
-
return {
|
|
1196
|
-
name: 'Edit',
|
|
1197
|
-
description: 'Perform an exact string replacement in a file. ' +
|
|
1198
|
-
'The file must have been read in this session via the Read tool. ' +
|
|
1199
|
-
'By default, old_string must appear exactly once; use replace_all=true to replace all occurrences. ' +
|
|
1200
|
-
'Do NOT include line-number prefixes (e.g. "1\\t") from Read output in old_string or new_string.',
|
|
1201
|
-
inputSchema: schemas['EditParams'],
|
|
1202
|
-
label: 'Edit',
|
|
1203
|
-
execute: async (_toolCallId, params) => {
|
|
1204
|
-
if (typeof params.file_path !== 'string' || !params.file_path)
|
|
1205
|
-
throw new Error('Edit: file_path must be a non-empty string');
|
|
1206
|
-
if (typeof params.old_string !== 'string')
|
|
1207
|
-
throw new Error('Edit: old_string must be a string');
|
|
1208
|
-
if (typeof params.new_string !== 'string')
|
|
1209
|
-
throw new Error('Edit: new_string must be a string');
|
|
1210
|
-
const rawFilePath = params.file_path;
|
|
1211
|
-
const absoluteFilePath = path.isAbsolute(rawFilePath)
|
|
1212
|
-
? rawFilePath
|
|
1213
|
-
: path.join(workspacePath, rawFilePath);
|
|
1214
|
-
if (!absoluteFilePath.startsWith(workspacePath)) {
|
|
1215
|
-
throw new Error(`Edit target is outside the workspace: ${rawFilePath}`);
|
|
1216
|
-
}
|
|
1217
|
-
const filePath = absoluteFilePath;
|
|
1218
|
-
const oldString = params.old_string;
|
|
1219
|
-
const newString = params.new_string;
|
|
1220
|
-
const replaceAll = params.replace_all ?? false;
|
|
1221
|
-
if (sessionId)
|
|
1222
|
-
emitter?.emit({ kind: 'tool_called', sessionId, toolName: 'Edit', summary: filePath.slice(0, 80), ...withWorkrailSession(workrailSessionId) });
|
|
1223
|
-
if (oldString === newString) {
|
|
1224
|
-
throw new Error('old_string and new_string are identical. No edit needed.');
|
|
1225
|
-
}
|
|
1226
|
-
const state = readFileState.get(filePath);
|
|
1227
|
-
if (!state) {
|
|
1228
|
-
throw new Error(`File has not been read in this session. Call Read first before editing: ${filePath}`);
|
|
1229
|
-
}
|
|
1230
|
-
let stat;
|
|
1231
|
-
try {
|
|
1232
|
-
stat = await fs.stat(filePath);
|
|
1233
|
-
}
|
|
1234
|
-
catch {
|
|
1235
|
-
throw new Error(`File not found: ${filePath}. It may have been deleted after it was read.`);
|
|
1236
|
-
}
|
|
1237
|
-
if (stat.mtimeMs !== state.timestamp) {
|
|
1238
|
-
throw new Error(`File has been modified since it was read. Re-read before editing: ${filePath}`);
|
|
1239
|
-
}
|
|
1240
|
-
const currentContent = await fs.readFile(filePath, 'utf8');
|
|
1241
|
-
const actualString = findActualString(currentContent, oldString);
|
|
1242
|
-
if (actualString === null) {
|
|
1243
|
-
throw new Error(`String to replace not found in file. Make sure old_string exactly matches the file content ` +
|
|
1244
|
-
`(do not include line-number prefixes from Read output): ${filePath}`);
|
|
1245
|
-
}
|
|
1246
|
-
const occurrences = currentContent.split(actualString).length - 1;
|
|
1247
|
-
if (!replaceAll && occurrences > 1) {
|
|
1248
|
-
throw new Error(`old_string appears ${occurrences} times in the file. ` +
|
|
1249
|
-
`Provide a more specific string that matches exactly once, or set replace_all=true to replace all occurrences.`);
|
|
1250
|
-
}
|
|
1251
|
-
const updatedContent = replaceAll
|
|
1252
|
-
? currentContent.split(actualString).join(newString)
|
|
1253
|
-
: currentContent.replace(actualString, newString);
|
|
1254
|
-
await fs.writeFile(filePath, updatedContent, 'utf8');
|
|
1255
|
-
const newStat = await fs.stat(filePath);
|
|
1256
|
-
readFileState.set(filePath, { content: updatedContent, timestamp: newStat.mtimeMs, isPartialView: false });
|
|
1257
|
-
return {
|
|
1258
|
-
content: [{ type: 'text', text: `The file ${filePath} has been updated successfully.` }],
|
|
1259
|
-
details: { filePath, occurrencesReplaced: occurrences },
|
|
1260
|
-
};
|
|
1261
|
-
},
|
|
1262
|
-
};
|
|
1263
|
-
}
|
|
1264
|
-
function makeSpawnAgentTool(sessionId, ctx, apiKey, thisWorkrailSessionId, currentDepth, maxDepth, runWorkflowFn, schemas, emitter, abortRegistry) {
|
|
1265
|
-
return {
|
|
1266
|
-
name: 'spawn_agent',
|
|
1267
|
-
description: 'Spawn a child WorkRail session to handle a delegated sub-task. ' +
|
|
1268
|
-
'Blocks until the child session completes, then returns the child\'s outcome and notes. ' +
|
|
1269
|
-
'Use this when a step requires delegating a well-defined sub-task to a separate workflow. ' +
|
|
1270
|
-
'IMPORTANT: The parent session\'s time limit (maxSessionMinutes) keeps ticking while the child runs. ' +
|
|
1271
|
-
'Configure the parent with enough time to cover both its own work and the child\'s work. ' +
|
|
1272
|
-
'Per-trigger limits (maxOutputTokens, maxTurns, maxSessionMinutes) are NOT inherited by child sessions spawned via spawn_agent -- each child uses its own trigger\'s agentConfig. ' +
|
|
1273
|
-
'Returns: { childSessionId, outcome: "success"|"error"|"timeout", notes: string, artifacts?: readonly unknown[] }. ' +
|
|
1274
|
-
'On success, artifacts contains the child session\'s final step artifacts if any were produced. ' +
|
|
1275
|
-
'Check outcome before using notes -- on error/timeout, notes contains the error message.',
|
|
1276
|
-
inputSchema: schemas['SpawnAgentParams'],
|
|
1277
|
-
label: 'Spawn Agent',
|
|
1278
|
-
execute: async (_toolCallId, params) => {
|
|
1279
|
-
if (typeof params.workflowId !== 'string' || !params.workflowId)
|
|
1280
|
-
throw new Error('spawn_agent: workflowId must be a non-empty string');
|
|
1281
|
-
if (typeof params.goal !== 'string' || !params.goal)
|
|
1282
|
-
throw new Error('spawn_agent: goal must be a non-empty string');
|
|
1283
|
-
if (typeof params.workspacePath !== 'string' || !params.workspacePath)
|
|
1284
|
-
throw new Error('spawn_agent: workspacePath must be a non-empty string');
|
|
1285
|
-
console.log(`[WorkflowRunner] Tool: spawn_agent sessionId=${sessionId} workflowId=${String(params.workflowId)} depth=${currentDepth}/${maxDepth}`);
|
|
1286
|
-
emitter?.emit({ kind: 'tool_called', sessionId, toolName: 'spawn_agent', summary: `${String(params.workflowId)} depth=${currentDepth}`, ...withWorkrailSession(thisWorkrailSessionId) });
|
|
1287
|
-
if (currentDepth >= maxDepth) {
|
|
1288
|
-
const limitResult = {
|
|
1289
|
-
childSessionId: null,
|
|
1290
|
-
outcome: 'error',
|
|
1291
|
-
notes: `Max spawn depth exceeded (currentDepth=${currentDepth}, maxDepth=${maxDepth}). ` +
|
|
1292
|
-
`Cannot spawn a child session from this depth. ` +
|
|
1293
|
-
`Increase agentConfig.maxSubagentDepth if deeper delegation is intentional.`,
|
|
1294
|
-
};
|
|
1295
|
-
return {
|
|
1296
|
-
content: [{ type: 'text', text: JSON.stringify(limitResult) }],
|
|
1297
|
-
details: limitResult,
|
|
1298
|
-
};
|
|
1299
|
-
}
|
|
1300
|
-
const startInput = {
|
|
1301
|
-
workflowId: String(params.workflowId),
|
|
1302
|
-
workspacePath: String(params.workspacePath),
|
|
1303
|
-
goal: String(params.goal),
|
|
1304
|
-
};
|
|
1305
|
-
const startResult = await (0, start_js_1.executeStartWorkflow)(startInput, ctx, { is_autonomous: 'true', workspacePath: String(params.workspacePath), parentSessionId: thisWorkrailSessionId });
|
|
1306
|
-
if (startResult.isErr()) {
|
|
1307
|
-
const errResult = {
|
|
1308
|
-
childSessionId: null,
|
|
1309
|
-
outcome: 'error',
|
|
1310
|
-
notes: `Failed to start child workflow: ${startResult.error.kind} -- ${JSON.stringify(startResult.error)}`,
|
|
1311
|
-
};
|
|
1312
|
-
return {
|
|
1313
|
-
content: [{ type: 'text', text: JSON.stringify(errResult) }],
|
|
1314
|
-
details: errResult,
|
|
1315
|
-
};
|
|
1316
|
-
}
|
|
1317
|
-
let childSessionId = null;
|
|
1318
|
-
const childContinueToken = startResult.value.response.continueToken ?? '';
|
|
1319
|
-
if (childContinueToken) {
|
|
1320
|
-
const decoded = await (0, v2_token_ops_js_1.parseContinueTokenOrFail)(childContinueToken, ctx.v2.tokenCodecPorts, ctx.v2.tokenAliasStore);
|
|
1321
|
-
if (decoded.isOk()) {
|
|
1322
|
-
childSessionId = decoded.value.sessionId;
|
|
1323
|
-
}
|
|
1324
|
-
else {
|
|
1325
|
-
console.warn(`[WorkflowRunner] spawn_agent: could not decode childSessionId from continueToken -- ` +
|
|
1326
|
-
`childSessionId will be null in result. Reason: ${decoded.error.message}`);
|
|
1327
|
-
}
|
|
1328
|
-
}
|
|
1329
|
-
const childResult = await runWorkflowFn({
|
|
1330
|
-
workflowId: String(params.workflowId),
|
|
1331
|
-
goal: String(params.goal),
|
|
1332
|
-
workspacePath: String(params.workspacePath),
|
|
1333
|
-
context: params.context,
|
|
1334
|
-
spawnDepth: currentDepth + 1,
|
|
1335
|
-
parentSessionId: thisWorkrailSessionId,
|
|
1336
|
-
_preAllocatedStartResponse: startResult.value.response,
|
|
1337
|
-
}, ctx, apiKey, undefined, emitter, undefined, abortRegistry);
|
|
1338
|
-
let resultObj;
|
|
1339
|
-
if (childResult._tag === 'success') {
|
|
1340
|
-
resultObj = {
|
|
1341
|
-
childSessionId,
|
|
1342
|
-
outcome: 'success',
|
|
1343
|
-
notes: childResult.lastStepNotes ?? '(no notes from child session)',
|
|
1344
|
-
...(childResult.lastStepArtifacts !== undefined ? { artifacts: childResult.lastStepArtifacts } : {}),
|
|
1345
|
-
};
|
|
1346
|
-
}
|
|
1347
|
-
else if (childResult._tag === 'error') {
|
|
1348
|
-
resultObj = {
|
|
1349
|
-
childSessionId,
|
|
1350
|
-
outcome: 'error',
|
|
1351
|
-
notes: childResult.message,
|
|
1352
|
-
};
|
|
1353
|
-
}
|
|
1354
|
-
else if (childResult._tag === 'timeout') {
|
|
1355
|
-
resultObj = {
|
|
1356
|
-
childSessionId,
|
|
1357
|
-
outcome: 'timeout',
|
|
1358
|
-
notes: childResult.message,
|
|
1359
|
-
};
|
|
1360
|
-
}
|
|
1361
|
-
else if (childResult._tag === 'stuck') {
|
|
1362
|
-
resultObj = {
|
|
1363
|
-
childSessionId,
|
|
1364
|
-
outcome: 'stuck',
|
|
1365
|
-
notes: childResult.message,
|
|
1366
|
-
...(childResult.issueSummaries !== undefined
|
|
1367
|
-
? { issueSummaries: childResult.issueSummaries }
|
|
1368
|
-
: {}),
|
|
1369
|
-
};
|
|
1370
|
-
}
|
|
1371
|
-
else {
|
|
1372
|
-
(0, assert_never_js_1.assertNever)(childResult);
|
|
1373
|
-
}
|
|
1374
|
-
console.log(`[WorkflowRunner] spawn_agent completed: sessionId=${sessionId} childSessionId=${childSessionId ?? 'null'} outcome=${resultObj.outcome}`);
|
|
1375
|
-
emitter?.emit({ kind: 'tool_called', sessionId, toolName: 'spawn_agent_complete', summary: `outcome=${resultObj.outcome} child=${childSessionId ?? 'null'}`, ...withWorkrailSession(thisWorkrailSessionId) });
|
|
1376
|
-
return {
|
|
1377
|
-
content: [{ type: 'text', text: JSON.stringify(resultObj) }],
|
|
1378
|
-
details: resultObj,
|
|
1379
|
-
};
|
|
1380
|
-
},
|
|
1381
|
-
};
|
|
1382
|
-
}
|
|
1383
699
|
async function writeStuckOutboxEntry(opts) {
|
|
1384
700
|
try {
|
|
1385
701
|
const outboxPath = path.join(os.homedir(), '.workrail', 'outbox.jsonl');
|
|
@@ -1405,172 +721,6 @@ async function writeStuckOutboxEntry(opts) {
|
|
|
1405
721
|
`${err instanceof Error ? err.message : String(err)}`);
|
|
1406
722
|
}
|
|
1407
723
|
}
|
|
1408
|
-
async function appendIssueAsync(issuesDir, sessionId, record) {
|
|
1409
|
-
await fs.mkdir(issuesDir, { recursive: true });
|
|
1410
|
-
const filePath = path.join(issuesDir, `${sessionId}.jsonl`);
|
|
1411
|
-
const line = JSON.stringify({ ...record, ts: Date.now() }) + '\n';
|
|
1412
|
-
await fs.appendFile(filePath, line, 'utf8');
|
|
1413
|
-
}
|
|
1414
|
-
function makeReportIssueTool(sessionId, emitter, workrailSessionId, issuesDirOverride, onIssueSummary) {
|
|
1415
|
-
const issuesDir = issuesDirOverride ?? path.join(os.homedir(), '.workrail', 'issues');
|
|
1416
|
-
return {
|
|
1417
|
-
name: 'report_issue',
|
|
1418
|
-
description: "Record a structured issue, error, or unexpected behavior. Call this AND continue_workflow (unless fatal). " +
|
|
1419
|
-
"Does not stop the session -- it creates a record for the auto-fix coordinator.",
|
|
1420
|
-
inputSchema: {
|
|
1421
|
-
type: 'object',
|
|
1422
|
-
properties: {
|
|
1423
|
-
kind: {
|
|
1424
|
-
type: 'string',
|
|
1425
|
-
enum: ['tool_failure', 'blocked', 'unexpected_behavior', 'needs_human', 'self_correction'],
|
|
1426
|
-
description: 'Category of issue being reported.',
|
|
1427
|
-
},
|
|
1428
|
-
severity: {
|
|
1429
|
-
type: 'string',
|
|
1430
|
-
enum: ['info', 'warn', 'error', 'fatal'],
|
|
1431
|
-
description: 'Severity level. Fatal means the session cannot continue productively.',
|
|
1432
|
-
},
|
|
1433
|
-
summary: {
|
|
1434
|
-
type: 'string',
|
|
1435
|
-
description: 'One-line summary of the issue. Max 200 chars.',
|
|
1436
|
-
maxLength: 200,
|
|
1437
|
-
},
|
|
1438
|
-
context: {
|
|
1439
|
-
type: 'string',
|
|
1440
|
-
description: 'What you were trying to do when this issue occurred.',
|
|
1441
|
-
},
|
|
1442
|
-
toolName: {
|
|
1443
|
-
type: 'string',
|
|
1444
|
-
description: 'Name of the tool that failed or behaved unexpectedly, if applicable.',
|
|
1445
|
-
},
|
|
1446
|
-
command: {
|
|
1447
|
-
type: 'string',
|
|
1448
|
-
description: 'The shell command or expression that caused the issue, if applicable.',
|
|
1449
|
-
},
|
|
1450
|
-
suggestedFix: {
|
|
1451
|
-
type: 'string',
|
|
1452
|
-
description: 'A suggested fix or recovery action for the auto-fix coordinator.',
|
|
1453
|
-
},
|
|
1454
|
-
continueToken: {
|
|
1455
|
-
type: 'string',
|
|
1456
|
-
description: 'The current continueToken, so the coordinator can resume this session.',
|
|
1457
|
-
},
|
|
1458
|
-
},
|
|
1459
|
-
required: ['kind', 'severity', 'summary'],
|
|
1460
|
-
additionalProperties: false,
|
|
1461
|
-
},
|
|
1462
|
-
label: 'report_issue',
|
|
1463
|
-
execute: async (_toolCallId, params) => {
|
|
1464
|
-
if (typeof params.kind !== 'string' || !params.kind)
|
|
1465
|
-
throw new Error('report_issue: kind must be a non-empty string');
|
|
1466
|
-
if (typeof params.severity !== 'string' || !params.severity)
|
|
1467
|
-
throw new Error('report_issue: severity must be a non-empty string');
|
|
1468
|
-
if (typeof params.summary !== 'string' || !params.summary)
|
|
1469
|
-
throw new Error('report_issue: summary must be a non-empty string');
|
|
1470
|
-
const record = {
|
|
1471
|
-
sessionId,
|
|
1472
|
-
kind: params.kind,
|
|
1473
|
-
severity: params.severity,
|
|
1474
|
-
summary: String(params.summary ?? '').slice(0, 200),
|
|
1475
|
-
...(params.context !== undefined && { context: String(params.context) }),
|
|
1476
|
-
...(params.toolName !== undefined && { toolName: String(params.toolName) }),
|
|
1477
|
-
...(params.command !== undefined && { command: String(params.command) }),
|
|
1478
|
-
...(params.suggestedFix !== undefined && { suggestedFix: String(params.suggestedFix) }),
|
|
1479
|
-
...(params.continueToken !== undefined && { continueToken: String(params.continueToken) }),
|
|
1480
|
-
};
|
|
1481
|
-
void appendIssueAsync(issuesDir, sessionId, record).catch(() => {
|
|
1482
|
-
});
|
|
1483
|
-
emitter?.emit({
|
|
1484
|
-
kind: 'issue_reported',
|
|
1485
|
-
sessionId,
|
|
1486
|
-
issueKind: record.kind,
|
|
1487
|
-
severity: record.severity,
|
|
1488
|
-
summary: record.summary,
|
|
1489
|
-
...(record.continueToken !== undefined && { continueToken: record.continueToken }),
|
|
1490
|
-
...(workrailSessionId != null ? { workrailSessionId } : {}),
|
|
1491
|
-
});
|
|
1492
|
-
onIssueSummary?.(record.summary);
|
|
1493
|
-
const isFatal = record.severity === 'fatal';
|
|
1494
|
-
const message = isFatal
|
|
1495
|
-
? `FATAL issue recorded. Call continue_workflow with notes explaining the blocker, then the session will end.`
|
|
1496
|
-
: `Issue recorded (severity=${record.severity}). Continue with your work unless this is fatal.`;
|
|
1497
|
-
return {
|
|
1498
|
-
content: [{ type: 'text', text: message }],
|
|
1499
|
-
details: { sessionId, kind: record.kind, severity: record.severity },
|
|
1500
|
-
};
|
|
1501
|
-
},
|
|
1502
|
-
};
|
|
1503
|
-
}
|
|
1504
|
-
exports.DAEMON_SIGNALS_DIR = path.join(os.homedir(), '.workrail', 'signals');
|
|
1505
|
-
async function appendSignalAsync(signalsDir, sessionId, record) {
|
|
1506
|
-
await fs.mkdir(signalsDir, { recursive: true });
|
|
1507
|
-
const filePath = path.join(signalsDir, `${sessionId}.jsonl`);
|
|
1508
|
-
const line = JSON.stringify({ ...record, ts: Date.now() }) + '\n';
|
|
1509
|
-
await fs.appendFile(filePath, line, 'utf8');
|
|
1510
|
-
}
|
|
1511
|
-
function makeSignalCoordinatorTool(sessionId, emitter, workrailSessionId, signalsDirOverride) {
|
|
1512
|
-
const signalsDir = signalsDirOverride ?? exports.DAEMON_SIGNALS_DIR;
|
|
1513
|
-
return {
|
|
1514
|
-
name: 'signal_coordinator',
|
|
1515
|
-
description: 'Emit a structured mid-session signal to the coordinator WITHOUT advancing the workflow step. ' +
|
|
1516
|
-
'Use this to surface progress updates, intermediate findings, data requests, ' +
|
|
1517
|
-
'approval requests, or blocking conditions while the session continues. ' +
|
|
1518
|
-
'Always returns immediately -- fire-and-observe, never blocks. ' +
|
|
1519
|
-
'Signal kinds: "progress" (heartbeat, no data needed), "finding" (intermediate result), ' +
|
|
1520
|
-
'"data_needed" (request external data), "approval_needed" (request coordinator approval), ' +
|
|
1521
|
-
'"blocked" (cannot continue without coordinator intervention).',
|
|
1522
|
-
inputSchema: {
|
|
1523
|
-
type: 'object',
|
|
1524
|
-
properties: {
|
|
1525
|
-
signalKind: {
|
|
1526
|
-
type: 'string',
|
|
1527
|
-
enum: ['progress', 'finding', 'data_needed', 'approval_needed', 'blocked'],
|
|
1528
|
-
description: 'The kind of signal to emit.',
|
|
1529
|
-
},
|
|
1530
|
-
payload: {
|
|
1531
|
-
type: 'object',
|
|
1532
|
-
additionalProperties: true,
|
|
1533
|
-
description: 'Structured data accompanying the signal. Pass {} for progress signals.',
|
|
1534
|
-
},
|
|
1535
|
-
},
|
|
1536
|
-
required: ['signalKind', 'payload'],
|
|
1537
|
-
additionalProperties: false,
|
|
1538
|
-
},
|
|
1539
|
-
label: 'signal_coordinator',
|
|
1540
|
-
execute: async (_toolCallId, params) => {
|
|
1541
|
-
if (typeof params.signalKind !== 'string' || !params.signalKind)
|
|
1542
|
-
throw new Error('signal_coordinator: signalKind must be a non-empty string');
|
|
1543
|
-
const signalId = 'sig_' + (0, node_crypto_1.randomUUID)().replace(/-/g, '').slice(0, 8);
|
|
1544
|
-
const signalKind = String(params.signalKind ?? 'progress');
|
|
1545
|
-
const payload = (typeof params.payload === 'object' && params.payload !== null && !Array.isArray(params.payload))
|
|
1546
|
-
? params.payload
|
|
1547
|
-
: {};
|
|
1548
|
-
console.log(`[WorkflowRunner] Tool: signal_coordinator sessionId=${sessionId} signalKind=${signalKind} signalId=${signalId}`);
|
|
1549
|
-
const record = {
|
|
1550
|
-
signalId,
|
|
1551
|
-
sessionId,
|
|
1552
|
-
...(workrailSessionId != null ? { workrailSessionId } : {}),
|
|
1553
|
-
signalKind,
|
|
1554
|
-
payload,
|
|
1555
|
-
};
|
|
1556
|
-
void appendSignalAsync(signalsDir, sessionId, record).catch(() => {
|
|
1557
|
-
});
|
|
1558
|
-
emitter?.emit({
|
|
1559
|
-
kind: 'signal_emitted',
|
|
1560
|
-
sessionId,
|
|
1561
|
-
signalKind,
|
|
1562
|
-
signalId,
|
|
1563
|
-
payload,
|
|
1564
|
-
...(workrailSessionId != null ? { workrailSessionId } : {}),
|
|
1565
|
-
});
|
|
1566
|
-
const result = { status: 'recorded', signalId };
|
|
1567
|
-
return {
|
|
1568
|
-
content: [{ type: 'text', text: JSON.stringify(result) }],
|
|
1569
|
-
details: result,
|
|
1570
|
-
};
|
|
1571
|
-
},
|
|
1572
|
-
};
|
|
1573
|
-
}
|
|
1574
724
|
const BASE_SYSTEM_PROMPT = `\
|
|
1575
725
|
You are WorkRail Auto, an autonomous agent that executes workflows step by step. You are running unattended -- there is no user watching. Your entire job is to faithfully complete the current workflow.
|
|
1576
726
|
|
|
@@ -1790,7 +940,7 @@ async function finalizeSession(result, ctx) {
|
|
|
1790
940
|
workflowId: ctx.workflowId,
|
|
1791
941
|
outcome,
|
|
1792
942
|
detail,
|
|
1793
|
-
...withWorkrailSession(ctx.workrailSessionId),
|
|
943
|
+
...(0, _shared_js_1.withWorkrailSession)(ctx.workrailSessionId),
|
|
1794
944
|
});
|
|
1795
945
|
if (ctx.workrailSessionId !== null) {
|
|
1796
946
|
ctx.daemonRegistry?.unregister(ctx.workrailSessionId, result._tag === 'success' || result._tag === 'delivery_failed' ? 'completed' : 'failed');
|
|
@@ -1810,20 +960,22 @@ async function finalizeSession(result, ctx) {
|
|
|
1810
960
|
await fs.unlink(ctx.conversationPath).catch(() => { });
|
|
1811
961
|
}
|
|
1812
962
|
}
|
|
1813
|
-
function buildSessionContext(trigger,
|
|
1814
|
-
const
|
|
1815
|
-
const
|
|
963
|
+
function buildSessionContext(trigger, context, firstStepPrompt) {
|
|
964
|
+
const workspaceContext = context.workspaceRules[0]?.content ?? null;
|
|
965
|
+
const sessionNotes = context.sessionHistory.map((n) => n.content);
|
|
966
|
+
const sessionState = buildSessionRecap(sessionNotes);
|
|
967
|
+
const systemPrompt = buildSystemPrompt(trigger, sessionState, context.soulContent, workspaceContext);
|
|
1816
968
|
const contextJson = trigger.context
|
|
1817
969
|
? `\n\nTrigger context:\n\`\`\`json\n${JSON.stringify(trigger.context, null, 2)}\n\`\`\``
|
|
1818
970
|
: '';
|
|
1819
|
-
const initialPrompt =
|
|
971
|
+
const initialPrompt = firstStepPrompt +
|
|
1820
972
|
contextJson +
|
|
1821
973
|
'\n\nComplete all step work, then call complete_step with your notes to advance.';
|
|
1822
974
|
const sessionTimeoutMs = (trigger.agentConfig?.maxSessionMinutes ?? exports.DEFAULT_SESSION_TIMEOUT_MINUTES) * 60 * 1000;
|
|
1823
975
|
const maxTurns = trigger.agentConfig?.maxTurns ?? exports.DEFAULT_MAX_TURNS;
|
|
1824
976
|
return { systemPrompt, initialPrompt, sessionTimeoutMs, maxTurns };
|
|
1825
977
|
}
|
|
1826
|
-
async function buildPreAgentSession(trigger, ctx, apiKey, sessionId, startMs, statsDir, sessionsDir, emitter, daemonRegistry,
|
|
978
|
+
async function buildPreAgentSession(trigger, ctx, apiKey, sessionId, startMs, statsDir, sessionsDir, emitter, daemonRegistry, activeSessionSet, source) {
|
|
1827
979
|
let agentClient;
|
|
1828
980
|
let modelId;
|
|
1829
981
|
try {
|
|
@@ -1847,9 +999,17 @@ async function buildPreAgentSession(trigger, ctx, apiKey, sessionId, startMs, st
|
|
|
1847
999
|
return { kind: 'complete', result: { _tag: 'error', workflowId: trigger.workflowId, message, stopReason: 'error' } };
|
|
1848
1000
|
}
|
|
1849
1001
|
const state = createSessionState('');
|
|
1850
|
-
let
|
|
1851
|
-
|
|
1852
|
-
|
|
1002
|
+
let continueToken;
|
|
1003
|
+
let checkpointToken;
|
|
1004
|
+
let firstStepPrompt;
|
|
1005
|
+
let isComplete;
|
|
1006
|
+
const effectiveSource = source ?? { kind: 'allocate', trigger };
|
|
1007
|
+
if (effectiveSource.kind === 'pre_allocated') {
|
|
1008
|
+
const s = effectiveSource.session;
|
|
1009
|
+
continueToken = s.continueToken;
|
|
1010
|
+
checkpointToken = s.checkpointToken ?? null;
|
|
1011
|
+
firstStepPrompt = s.firstStepPrompt;
|
|
1012
|
+
isComplete = s.isComplete;
|
|
1853
1013
|
}
|
|
1854
1014
|
else {
|
|
1855
1015
|
const startResult = await (0, start_js_1.executeStartWorkflow)({ workflowId: trigger.workflowId, workspacePath: trigger.workspacePath, goal: trigger.goal }, ctx, { is_autonomous: 'true', workspacePath: trigger.workspacePath });
|
|
@@ -1865,10 +1025,12 @@ async function buildPreAgentSession(trigger, ctx, apiKey, sessionId, startMs, st
|
|
|
1865
1025
|
},
|
|
1866
1026
|
};
|
|
1867
1027
|
}
|
|
1868
|
-
|
|
1028
|
+
const r = startResult.value.response;
|
|
1029
|
+
continueToken = r.continueToken ?? '';
|
|
1030
|
+
checkpointToken = r.checkpointToken ?? null;
|
|
1031
|
+
firstStepPrompt = r.pending?.prompt ?? '';
|
|
1032
|
+
isComplete = r.isComplete;
|
|
1869
1033
|
}
|
|
1870
|
-
const continueToken = firstStep.continueToken ?? '';
|
|
1871
|
-
const checkpointToken = firstStep.checkpointToken ?? null;
|
|
1872
1034
|
state.currentContinueToken = continueToken;
|
|
1873
1035
|
if (continueToken) {
|
|
1874
1036
|
const decoded = await (0, v2_token_ops_js_1.parseContinueTokenOrFail)(continueToken, ctx.v2.tokenCodecPorts, ctx.v2.tokenAliasStore);
|
|
@@ -1880,7 +1042,7 @@ async function buildPreAgentSession(trigger, ctx, apiKey, sessionId, startMs, st
|
|
|
1880
1042
|
}
|
|
1881
1043
|
}
|
|
1882
1044
|
if (continueToken) {
|
|
1883
|
-
const persistResult = await persistTokens(sessionId, continueToken, checkpointToken, undefined, {
|
|
1045
|
+
const persistResult = await (0, _shared_js_1.persistTokens)(sessionId, continueToken, checkpointToken, undefined, {
|
|
1884
1046
|
workflowId: trigger.workflowId,
|
|
1885
1047
|
goal: trigger.goal,
|
|
1886
1048
|
workspacePath: trigger.workspacePath,
|
|
@@ -1915,7 +1077,7 @@ async function buildPreAgentSession(trigger, ctx, apiKey, sessionId, startMs, st
|
|
|
1915
1077
|
'-b', `${branchPrefix}${sessionId}`,
|
|
1916
1078
|
`origin/${baseBranch}`,
|
|
1917
1079
|
]);
|
|
1918
|
-
const worktreePersistResult = await persistTokens(sessionId, continueToken ?? state.currentContinueToken, checkpointToken, sessionWorktreePath, { workflowId: trigger.workflowId, goal: trigger.goal, workspacePath: trigger.workspacePath });
|
|
1080
|
+
const worktreePersistResult = await (0, _shared_js_1.persistTokens)(sessionId, continueToken ?? state.currentContinueToken, checkpointToken, sessionWorktreePath, { workflowId: trigger.workflowId, goal: trigger.goal, workspacePath: trigger.workspacePath });
|
|
1919
1081
|
if (worktreePersistResult.kind === 'err') {
|
|
1920
1082
|
console.error(`[WorkflowRunner] Worktree sidecar persist failed: ${worktreePersistResult.error.code} -- ${worktreePersistResult.error.message}`);
|
|
1921
1083
|
try {
|
|
@@ -1945,19 +1107,20 @@ async function buildPreAgentSession(trigger, ctx, apiKey, sessionId, startMs, st
|
|
|
1945
1107
|
};
|
|
1946
1108
|
}
|
|
1947
1109
|
}
|
|
1110
|
+
let handle;
|
|
1948
1111
|
if (state.workrailSessionId !== null) {
|
|
1949
1112
|
daemonRegistry?.register(state.workrailSessionId, trigger.workflowId);
|
|
1950
|
-
|
|
1113
|
+
handle = activeSessionSet?.register(state.workrailSessionId, (text) => { state.pendingSteerParts.push(text); });
|
|
1951
1114
|
}
|
|
1952
|
-
if (
|
|
1115
|
+
if (isComplete) {
|
|
1953
1116
|
const lifecycle = sidecardLifecycleFor('success', trigger.branchStrategy);
|
|
1954
1117
|
if (lifecycle.kind === 'delete_now') {
|
|
1955
1118
|
await fs.unlink(path.join(sessionsDir, `${sessionId}.json`)).catch(() => { });
|
|
1956
1119
|
}
|
|
1957
|
-
emitter?.emit({ kind: 'session_completed', sessionId, workflowId: trigger.workflowId, outcome: 'success', detail: 'stop', ...withWorkrailSession(state.workrailSessionId) });
|
|
1120
|
+
emitter?.emit({ kind: 'session_completed', sessionId, workflowId: trigger.workflowId, outcome: 'success', detail: 'stop', ...(0, _shared_js_1.withWorkrailSession)(state.workrailSessionId) });
|
|
1958
1121
|
if (state.workrailSessionId !== null) {
|
|
1959
1122
|
daemonRegistry?.unregister(state.workrailSessionId, 'completed');
|
|
1960
|
-
|
|
1123
|
+
handle?.dispose();
|
|
1961
1124
|
}
|
|
1962
1125
|
writeExecutionStats(statsDir, sessionId, trigger.workflowId, startMs, 'success', 0);
|
|
1963
1126
|
return {
|
|
@@ -1981,7 +1144,7 @@ async function buildPreAgentSession(trigger, ctx, apiKey, sessionId, startMs, st
|
|
|
1981
1144
|
checkpointToken,
|
|
1982
1145
|
sessionWorkspacePath,
|
|
1983
1146
|
sessionWorktreePath,
|
|
1984
|
-
|
|
1147
|
+
firstStepPrompt,
|
|
1985
1148
|
state,
|
|
1986
1149
|
spawnCurrentDepth: trigger.spawnDepth ?? 0,
|
|
1987
1150
|
spawnMaxDepth: trigger.agentConfig?.maxSubagentDepth ?? 3,
|
|
@@ -1989,31 +1152,32 @@ async function buildPreAgentSession(trigger, ctx, apiKey, sessionId, startMs, st
|
|
|
1989
1152
|
agentClient,
|
|
1990
1153
|
modelId,
|
|
1991
1154
|
startMs,
|
|
1155
|
+
...(handle !== undefined ? { handle } : {}),
|
|
1992
1156
|
},
|
|
1993
1157
|
};
|
|
1994
1158
|
}
|
|
1995
1159
|
function constructTools(session, ctx, apiKey, schemas, scope) {
|
|
1996
1160
|
const { state, sessionWorkspacePath, spawnCurrentDepth, spawnMaxDepth } = session;
|
|
1997
|
-
const { fileTracker, onAdvance, onComplete, emitter,
|
|
1161
|
+
const { fileTracker, onAdvance, onComplete, emitter, activeSessionSet, maxIssueSummaries } = scope;
|
|
1998
1162
|
const sid = scope.sessionId;
|
|
1999
1163
|
const workrailSid = scope.workrailSessionId;
|
|
2000
1164
|
const readFileStateMap = fileTracker.toMap();
|
|
2001
1165
|
return [
|
|
2002
|
-
makeCompleteStepTool(sid, ctx, () => state.currentContinueToken, onAdvance, onComplete, (t) => { state.currentContinueToken = t; }, schemas, index_js_1.executeContinueWorkflow, emitter, workrailSid),
|
|
2003
|
-
makeContinueWorkflowTool(sid, ctx, onAdvance, onComplete, schemas, index_js_1.executeContinueWorkflow, emitter, workrailSid),
|
|
2004
|
-
makeBashTool(sessionWorkspacePath, schemas, sid, emitter, workrailSid),
|
|
2005
|
-
makeReadTool(readFileStateMap, schemas, sid, emitter, workrailSid),
|
|
2006
|
-
makeWriteTool(readFileStateMap, schemas, sid, emitter, workrailSid),
|
|
2007
|
-
makeGlobTool(sessionWorkspacePath, schemas, sid, emitter, workrailSid),
|
|
2008
|
-
makeGrepTool(sessionWorkspacePath, schemas, sid, emitter, workrailSid),
|
|
2009
|
-
makeEditTool(sessionWorkspacePath, readFileStateMap, schemas, sid, emitter, workrailSid),
|
|
2010
|
-
makeReportIssueTool(sid, emitter, workrailSid, undefined, (summary) => {
|
|
1166
|
+
(0, continue_workflow_js_1.makeCompleteStepTool)(sid, ctx, () => state.currentContinueToken, onAdvance, onComplete, (t) => { state.currentContinueToken = t; }, schemas, index_js_1.executeContinueWorkflow, emitter, workrailSid),
|
|
1167
|
+
(0, continue_workflow_js_1.makeContinueWorkflowTool)(sid, ctx, onAdvance, onComplete, schemas, index_js_1.executeContinueWorkflow, emitter, workrailSid),
|
|
1168
|
+
(0, bash_js_1.makeBashTool)(sessionWorkspacePath, schemas, sid, emitter, workrailSid),
|
|
1169
|
+
(0, file_tools_js_1.makeReadTool)(readFileStateMap, schemas, sid, emitter, workrailSid),
|
|
1170
|
+
(0, file_tools_js_1.makeWriteTool)(readFileStateMap, schemas, sid, emitter, workrailSid),
|
|
1171
|
+
(0, glob_grep_js_1.makeGlobTool)(sessionWorkspacePath, schemas, sid, emitter, workrailSid),
|
|
1172
|
+
(0, glob_grep_js_1.makeGrepTool)(sessionWorkspacePath, schemas, sid, emitter, workrailSid),
|
|
1173
|
+
(0, file_tools_js_1.makeEditTool)(sessionWorkspacePath, readFileStateMap, schemas, sid, emitter, workrailSid),
|
|
1174
|
+
(0, report_issue_js_1.makeReportIssueTool)(sid, emitter, workrailSid, undefined, (summary) => {
|
|
2011
1175
|
if (state.issueSummaries.length < maxIssueSummaries) {
|
|
2012
1176
|
state.issueSummaries.push(summary);
|
|
2013
1177
|
}
|
|
2014
1178
|
}),
|
|
2015
|
-
makeSpawnAgentTool(sid, ctx, apiKey, workrailSid ?? '', spawnCurrentDepth, spawnMaxDepth, runWorkflow, schemas, emitter,
|
|
2016
|
-
makeSignalCoordinatorTool(sid, emitter, workrailSid),
|
|
1179
|
+
(0, spawn_agent_js_1.makeSpawnAgentTool)(sid, ctx, apiKey, workrailSid ?? '', spawnCurrentDepth, spawnMaxDepth, runWorkflow, schemas, emitter, activeSessionSet),
|
|
1180
|
+
(0, signal_coordinator_js_1.makeSignalCoordinatorTool)(sid, emitter, workrailSid),
|
|
2017
1181
|
];
|
|
2018
1182
|
}
|
|
2019
1183
|
function buildTurnEndSubscriber(ctx) {
|
|
@@ -2023,7 +1187,7 @@ function buildTurnEndSubscriber(ctx) {
|
|
|
2023
1187
|
for (const toolResult of event.toolResults) {
|
|
2024
1188
|
if (toolResult.isError) {
|
|
2025
1189
|
const errorText = toolResult.result?.content[0]?.text ?? 'tool error';
|
|
2026
|
-
ctx.emitter?.emit({ kind: 'tool_error', sessionId: ctx.sessionId, toolName: toolResult.toolName, error: errorText.slice(0, 200), ...withWorkrailSession(ctx.state.workrailSessionId) });
|
|
1190
|
+
ctx.emitter?.emit({ kind: 'tool_error', sessionId: ctx.sessionId, toolName: toolResult.toolName, error: errorText.slice(0, 200), ...(0, _shared_js_1.withWorkrailSession)(ctx.state.workrailSessionId) });
|
|
2027
1191
|
}
|
|
2028
1192
|
}
|
|
2029
1193
|
ctx.state.turnCount++;
|
|
@@ -2031,12 +1195,12 @@ function buildTurnEndSubscriber(ctx) {
|
|
|
2031
1195
|
if (signal !== null) {
|
|
2032
1196
|
if (signal.kind === 'max_turns_exceeded') {
|
|
2033
1197
|
ctx.state.timeoutReason = 'max_turns';
|
|
2034
|
-
ctx.emitter?.emit({ kind: 'agent_stuck', sessionId: ctx.sessionId, reason: 'timeout_imminent', detail: 'Max-turn limit reached', ...withWorkrailSession(ctx.state.workrailSessionId) });
|
|
1198
|
+
ctx.emitter?.emit({ kind: 'agent_stuck', sessionId: ctx.sessionId, reason: 'timeout_imminent', detail: 'Max-turn limit reached', ...(0, _shared_js_1.withWorkrailSession)(ctx.state.workrailSessionId) });
|
|
2035
1199
|
ctx.agent.abort();
|
|
2036
1200
|
return;
|
|
2037
1201
|
}
|
|
2038
1202
|
else if (signal.kind === 'repeated_tool_call') {
|
|
2039
|
-
ctx.emitter?.emit({ kind: 'agent_stuck', sessionId: ctx.sessionId, reason: 'repeated_tool_call', detail: `Same tool+args called ${ctx.stuckRepeatThreshold} times: ${signal.toolName}`, toolName: signal.toolName, argsSummary: signal.argsSummary, ...withWorkrailSession(ctx.state.workrailSessionId) });
|
|
1203
|
+
ctx.emitter?.emit({ kind: 'agent_stuck', sessionId: ctx.sessionId, reason: 'repeated_tool_call', detail: `Same tool+args called ${ctx.stuckRepeatThreshold} times: ${signal.toolName}`, toolName: signal.toolName, argsSummary: signal.argsSummary, ...(0, _shared_js_1.withWorkrailSession)(ctx.state.workrailSessionId) });
|
|
2040
1204
|
void writeStuckOutboxEntry({ workflowId: ctx.workflowId, reason: 'repeated_tool_call', ...(ctx.state.issueSummaries.length > 0 ? { issueSummaries: [...ctx.state.issueSummaries] } : {}) });
|
|
2041
1205
|
if (ctx.stuckConfig.stuckAbortPolicy !== 'notify_only' && ctx.state.stuckReason === null && ctx.state.timeoutReason === null) {
|
|
2042
1206
|
ctx.state.stuckReason = 'repeated_tool_call';
|
|
@@ -2045,7 +1209,7 @@ function buildTurnEndSubscriber(ctx) {
|
|
|
2045
1209
|
}
|
|
2046
1210
|
}
|
|
2047
1211
|
else if (signal.kind === 'no_progress') {
|
|
2048
|
-
ctx.emitter?.emit({ kind: 'agent_stuck', sessionId: ctx.sessionId, reason: 'no_progress', detail: `${signal.turnCount} turns used, 0 step advances (${signal.maxTurns} turn limit)`, ...withWorkrailSession(ctx.state.workrailSessionId) });
|
|
1212
|
+
ctx.emitter?.emit({ kind: 'agent_stuck', sessionId: ctx.sessionId, reason: 'no_progress', detail: `${signal.turnCount} turns used, 0 step advances (${signal.maxTurns} turn limit)`, ...(0, _shared_js_1.withWorkrailSession)(ctx.state.workrailSessionId) });
|
|
2049
1213
|
if (ctx.stuckConfig.noProgressAbortEnabled) {
|
|
2050
1214
|
void writeStuckOutboxEntry({ workflowId: ctx.workflowId, reason: 'no_progress', ...(ctx.state.issueSummaries.length > 0 ? { issueSummaries: [...ctx.state.issueSummaries] } : {}) });
|
|
2051
1215
|
if (ctx.stuckConfig.stuckAbortPolicy !== 'notify_only' && ctx.state.stuckReason === null && ctx.state.timeoutReason === null) {
|
|
@@ -2056,7 +1220,7 @@ function buildTurnEndSubscriber(ctx) {
|
|
|
2056
1220
|
}
|
|
2057
1221
|
}
|
|
2058
1222
|
else if (signal.kind === 'timeout_imminent') {
|
|
2059
|
-
ctx.emitter?.emit({ kind: 'agent_stuck', sessionId: ctx.sessionId, reason: 'timeout_imminent', detail: `${signal.timeoutReason === 'wall_clock' ? 'Wall-clock timeout' : 'Max-turn limit'} reached`, ...withWorkrailSession(ctx.state.workrailSessionId) });
|
|
1223
|
+
ctx.emitter?.emit({ kind: 'agent_stuck', sessionId: ctx.sessionId, reason: 'timeout_imminent', detail: `${signal.timeoutReason === 'wall_clock' ? 'Wall-clock timeout' : 'Max-turn limit'} reached`, ...(0, _shared_js_1.withWorkrailSession)(ctx.state.workrailSessionId) });
|
|
2060
1224
|
}
|
|
2061
1225
|
else {
|
|
2062
1226
|
(0, assert_never_js_1.assertNever)(signal);
|
|
@@ -2069,22 +1233,22 @@ function buildTurnEndSubscriber(ctx) {
|
|
|
2069
1233
|
function buildAgentCallbacks(sessionId, state, modelId, emitter, stuckRepeatThreshold) {
|
|
2070
1234
|
return {
|
|
2071
1235
|
onLlmTurnStarted: ({ messageCount }) => {
|
|
2072
|
-
emitter?.emit({ kind: 'llm_turn_started', sessionId, messageCount, modelId, ...withWorkrailSession(state.workrailSessionId) });
|
|
1236
|
+
emitter?.emit({ kind: 'llm_turn_started', sessionId, messageCount, modelId, ...(0, _shared_js_1.withWorkrailSession)(state.workrailSessionId) });
|
|
2073
1237
|
},
|
|
2074
1238
|
onLlmTurnCompleted: ({ stopReason, outputTokens, inputTokens, toolNamesRequested }) => {
|
|
2075
|
-
emitter?.emit({ kind: 'llm_turn_completed', sessionId, stopReason, outputTokens, inputTokens, toolNamesRequested, ...withWorkrailSession(state.workrailSessionId) });
|
|
1239
|
+
emitter?.emit({ kind: 'llm_turn_completed', sessionId, stopReason, outputTokens, inputTokens, toolNamesRequested, ...(0, _shared_js_1.withWorkrailSession)(state.workrailSessionId) });
|
|
2076
1240
|
},
|
|
2077
1241
|
onToolCallStarted: ({ toolName, argsSummary }) => {
|
|
2078
|
-
emitter?.emit({ kind: 'tool_call_started', sessionId, toolName, argsSummary, ...withWorkrailSession(state.workrailSessionId) });
|
|
1242
|
+
emitter?.emit({ kind: 'tool_call_started', sessionId, toolName, argsSummary, ...(0, _shared_js_1.withWorkrailSession)(state.workrailSessionId) });
|
|
2079
1243
|
state.lastNToolCalls.push({ toolName, argsSummary });
|
|
2080
1244
|
if (state.lastNToolCalls.length > stuckRepeatThreshold)
|
|
2081
1245
|
state.lastNToolCalls.shift();
|
|
2082
1246
|
},
|
|
2083
1247
|
onToolCallCompleted: ({ toolName, durationMs, resultSummary }) => {
|
|
2084
|
-
emitter?.emit({ kind: 'tool_call_completed', sessionId, toolName, durationMs, resultSummary, ...withWorkrailSession(state.workrailSessionId) });
|
|
1248
|
+
emitter?.emit({ kind: 'tool_call_completed', sessionId, toolName, durationMs, resultSummary, ...(0, _shared_js_1.withWorkrailSession)(state.workrailSessionId) });
|
|
2085
1249
|
},
|
|
2086
1250
|
onToolCallFailed: ({ toolName, durationMs, errorMessage }) => {
|
|
2087
|
-
emitter?.emit({ kind: 'tool_call_failed', sessionId, toolName, durationMs, errorMessage, ...withWorkrailSession(state.workrailSessionId) });
|
|
1251
|
+
emitter?.emit({ kind: 'tool_call_failed', sessionId, toolName, durationMs, errorMessage, ...(0, _shared_js_1.withWorkrailSession)(state.workrailSessionId) });
|
|
2088
1252
|
},
|
|
2089
1253
|
};
|
|
2090
1254
|
}
|
|
@@ -2143,25 +1307,10 @@ function buildSessionResult(state, stopReason, errorMessage, trigger, sessionId,
|
|
|
2143
1307
|
...(trigger.botIdentity !== undefined ? { botIdentity: trigger.botIdentity } : {}),
|
|
2144
1308
|
};
|
|
2145
1309
|
}
|
|
2146
|
-
async function
|
|
2147
|
-
const
|
|
2148
|
-
const
|
|
2149
|
-
const
|
|
2150
|
-
const sessionId = (0, node_crypto_1.randomUUID)();
|
|
2151
|
-
console.log(`[WorkflowRunner] Session started: sessionId=${sessionId} workflowId=${trigger.workflowId}`);
|
|
2152
|
-
emitter?.emit({
|
|
2153
|
-
kind: 'session_started',
|
|
2154
|
-
sessionId,
|
|
2155
|
-
workflowId: trigger.workflowId,
|
|
2156
|
-
workspacePath: trigger.workspacePath,
|
|
2157
|
-
});
|
|
2158
|
-
const preResult = await buildPreAgentSession(trigger, ctx, apiKey, sessionId, startMs, statsDir, sessionsDir, emitter, daemonRegistry, steerRegistry);
|
|
2159
|
-
if (preResult.kind === 'complete') {
|
|
2160
|
-
return preResult.result;
|
|
2161
|
-
}
|
|
2162
|
-
const session = preResult.session;
|
|
2163
|
-
const { state, firstStep, sessionWorkspacePath, sessionWorktreePath, agentClient, modelId } = session;
|
|
2164
|
-
const startContinueToken = session.continueToken;
|
|
1310
|
+
async function buildAgentReadySession(preAgentSession, trigger, ctx, apiKey, sessionId, emitter, daemonRegistry, activeSessionSet) {
|
|
1311
|
+
const { state, firstStepPrompt, sessionWorkspacePath, sessionWorktreePath, agentClient, modelId } = preAgentSession;
|
|
1312
|
+
const startContinueToken = preAgentSession.continueToken;
|
|
1313
|
+
const handle = preAgentSession.handle;
|
|
2165
1314
|
const MAX_ISSUE_SUMMARIES = 10;
|
|
2166
1315
|
const STUCK_REPEAT_THRESHOLD = 3;
|
|
2167
1316
|
const onAdvance = (stepText, continueToken) => {
|
|
@@ -2170,41 +1319,30 @@ async function runWorkflow(trigger, ctx, apiKey, daemonRegistry, emitter, steerR
|
|
|
2170
1319
|
state.currentContinueToken = continueToken;
|
|
2171
1320
|
if (state.workrailSessionId !== null)
|
|
2172
1321
|
daemonRegistry?.heartbeat(state.workrailSessionId);
|
|
2173
|
-
emitter?.emit({ kind: 'step_advanced', sessionId, ...withWorkrailSession(state.workrailSessionId) });
|
|
1322
|
+
emitter?.emit({ kind: 'step_advanced', sessionId, ...(0, _shared_js_1.withWorkrailSession)(state.workrailSessionId) });
|
|
2174
1323
|
};
|
|
2175
1324
|
const onComplete = (notes, artifacts) => {
|
|
2176
1325
|
state.isComplete = true;
|
|
2177
1326
|
state.lastStepNotes = notes;
|
|
2178
1327
|
state.lastStepArtifacts = artifacts;
|
|
2179
|
-
state.stepAdvanceCount++;
|
|
2180
|
-
if (state.workrailSessionId !== null)
|
|
2181
|
-
daemonRegistry?.heartbeat(state.workrailSessionId);
|
|
2182
|
-
emitter?.emit({ kind: 'step_advanced', sessionId, ...withWorkrailSession(state.workrailSessionId) });
|
|
2183
1328
|
};
|
|
2184
1329
|
const schemas = getSchemas();
|
|
2185
1330
|
const scope = {
|
|
2186
|
-
fileTracker: new session_scope_js_1.DefaultFileStateTracker(
|
|
1331
|
+
fileTracker: new session_scope_js_1.DefaultFileStateTracker(preAgentSession.readFileState),
|
|
2187
1332
|
onAdvance,
|
|
2188
1333
|
onComplete,
|
|
2189
1334
|
workrailSessionId: state.workrailSessionId,
|
|
2190
1335
|
emitter,
|
|
2191
1336
|
sessionId,
|
|
2192
1337
|
workflowId: trigger.workflowId,
|
|
2193
|
-
|
|
1338
|
+
activeSessionSet,
|
|
2194
1339
|
maxIssueSummaries: MAX_ISSUE_SUMMARIES,
|
|
2195
1340
|
};
|
|
2196
|
-
const tools = constructTools(
|
|
2197
|
-
const
|
|
2198
|
-
|
|
2199
|
-
|
|
2200
|
-
|
|
2201
|
-
]);
|
|
2202
|
-
const sessionCtx = buildSessionContext(trigger, {
|
|
2203
|
-
soulContent,
|
|
2204
|
-
workspaceContext,
|
|
2205
|
-
sessionNotes,
|
|
2206
|
-
firstStepPrompt: firstStep.pending?.prompt ?? 'No step content available',
|
|
2207
|
-
});
|
|
1341
|
+
const tools = constructTools(preAgentSession, ctx, apiKey, schemas, scope);
|
|
1342
|
+
const contextLoader = new context_loader_js_1.DefaultContextLoader(loadDaemonSoul, loadWorkspaceContext, loadSessionNotes, ctx);
|
|
1343
|
+
const baseCtx = await contextLoader.loadBase(trigger);
|
|
1344
|
+
const contextBundle = await contextLoader.loadSession(startContinueToken, baseCtx);
|
|
1345
|
+
const sessionCtx = buildSessionContext(trigger, contextBundle, firstStepPrompt || 'No step content available');
|
|
2208
1346
|
const agentCallbacks = buildAgentCallbacks(sessionId, state, modelId, emitter, STUCK_REPEAT_THRESHOLD);
|
|
2209
1347
|
const agent = new agent_loop_js_1.AgentLoop({
|
|
2210
1348
|
systemPrompt: sessionCtx.systemPrompt,
|
|
@@ -2217,17 +1355,33 @@ async function runWorkflow(trigger, ctx, apiKey, daemonRegistry, emitter, steerR
|
|
|
2217
1355
|
? { maxTokens: trigger.agentConfig.maxOutputTokens }
|
|
2218
1356
|
: {}),
|
|
2219
1357
|
});
|
|
2220
|
-
|
|
2221
|
-
|
|
2222
|
-
|
|
1358
|
+
handle?.setAgent(agent);
|
|
1359
|
+
return {
|
|
1360
|
+
preAgentSession,
|
|
1361
|
+
contextBundle,
|
|
1362
|
+
scope,
|
|
1363
|
+
tools,
|
|
1364
|
+
sessionCtx,
|
|
1365
|
+
handle,
|
|
1366
|
+
sessionId,
|
|
1367
|
+
workflowId: trigger.workflowId,
|
|
1368
|
+
worktreePath: sessionWorktreePath,
|
|
1369
|
+
agent,
|
|
1370
|
+
stuckRepeatThreshold: STUCK_REPEAT_THRESHOLD,
|
|
1371
|
+
};
|
|
1372
|
+
}
|
|
1373
|
+
async function runAgentLoop(session, trigger, conversationPath) {
|
|
1374
|
+
const { agent, preAgentSession, sessionCtx, sessionId, handle } = session;
|
|
1375
|
+
const { state } = preAgentSession;
|
|
1376
|
+
const { emitter } = session.scope;
|
|
1377
|
+
const { stuckRepeatThreshold } = session;
|
|
2223
1378
|
const { sessionTimeoutMs, maxTurns } = sessionCtx;
|
|
2224
1379
|
const stuckConfig = {
|
|
2225
1380
|
maxTurns,
|
|
2226
1381
|
stuckAbortPolicy: trigger.agentConfig?.stuckAbortPolicy ?? 'abort',
|
|
2227
1382
|
noProgressAbortEnabled: trigger.agentConfig?.noProgressAbortEnabled ?? false,
|
|
2228
|
-
stuckRepeatThreshold
|
|
1383
|
+
stuckRepeatThreshold,
|
|
2229
1384
|
};
|
|
2230
|
-
const conversationPath = path.join(sessionsDir, `${sessionId}-conversation.jsonl`);
|
|
2231
1385
|
const lastFlushedRef = { count: 0 };
|
|
2232
1386
|
const unsubscribe = agent.subscribe(buildTurnEndSubscriber({
|
|
2233
1387
|
agent,
|
|
@@ -2238,7 +1392,7 @@ async function runWorkflow(trigger, ctx, apiKey, daemonRegistry, emitter, steerR
|
|
|
2238
1392
|
emitter,
|
|
2239
1393
|
conversationPath,
|
|
2240
1394
|
lastFlushedRef,
|
|
2241
|
-
stuckRepeatThreshold
|
|
1395
|
+
stuckRepeatThreshold,
|
|
2242
1396
|
}));
|
|
2243
1397
|
let stopReason = 'stop';
|
|
2244
1398
|
let errorMessage;
|
|
@@ -2252,7 +1406,7 @@ async function runWorkflow(trigger, ctx, apiKey, daemonRegistry, emitter, steerR
|
|
|
2252
1406
|
reject(new Error('Workflow timed out'));
|
|
2253
1407
|
}, sessionTimeoutMs);
|
|
2254
1408
|
});
|
|
2255
|
-
console.log(`[WorkflowRunner] Agent loop started: sessionId=${sessionId} workflowId=${trigger.workflowId} modelId=${modelId}`);
|
|
1409
|
+
console.log(`[WorkflowRunner] Agent loop started: sessionId=${sessionId} workflowId=${trigger.workflowId} modelId=${preAgentSession.modelId}`);
|
|
2256
1410
|
await Promise.race([agent.prompt(buildUserMessage(sessionCtx.initialPrompt)), timeoutPromise])
|
|
2257
1411
|
.catch((err) => {
|
|
2258
1412
|
agent.abort();
|
|
@@ -2280,14 +1434,36 @@ async function runWorkflow(trigger, ctx, apiKey, daemonRegistry, emitter, steerR
|
|
|
2280
1434
|
void appendConversationMessages(conversationPath, remainingMessages).catch(() => { });
|
|
2281
1435
|
if (timeoutHandle !== undefined)
|
|
2282
1436
|
clearTimeout(timeoutHandle);
|
|
2283
|
-
|
|
2284
|
-
steerRegistry?.delete(state.workrailSessionId);
|
|
2285
|
-
}
|
|
2286
|
-
if (state.workrailSessionId !== null) {
|
|
2287
|
-
abortRegistry?.delete(state.workrailSessionId);
|
|
2288
|
-
}
|
|
1437
|
+
handle?.dispose();
|
|
2289
1438
|
console.log(`[WorkflowRunner] Agent loop ended: sessionId=${sessionId} stopReason=${stopReason}${errorMessage ? ` error=${errorMessage.slice(0, 120)}` : ''}`);
|
|
2290
1439
|
}
|
|
1440
|
+
if (stopReason === 'error') {
|
|
1441
|
+
return { kind: 'aborted', errorMessage };
|
|
1442
|
+
}
|
|
1443
|
+
return { kind: 'completed', stopReason, errorMessage };
|
|
1444
|
+
}
|
|
1445
|
+
async function runWorkflow(trigger, ctx, apiKey, daemonRegistry, emitter, activeSessionSet, _statsDir, _sessionsDir, source) {
|
|
1446
|
+
const statsDir = _statsDir ?? DAEMON_STATS_DIR;
|
|
1447
|
+
const sessionsDir = _sessionsDir ?? _shared_js_1.DAEMON_SESSIONS_DIR;
|
|
1448
|
+
const startMs = Date.now();
|
|
1449
|
+
const sessionId = (0, node_crypto_1.randomUUID)();
|
|
1450
|
+
console.log(`[WorkflowRunner] Session started: sessionId=${sessionId} workflowId=${trigger.workflowId}`);
|
|
1451
|
+
emitter?.emit({
|
|
1452
|
+
kind: 'session_started',
|
|
1453
|
+
sessionId,
|
|
1454
|
+
workflowId: trigger.workflowId,
|
|
1455
|
+
workspacePath: trigger.workspacePath,
|
|
1456
|
+
});
|
|
1457
|
+
const preResult = await buildPreAgentSession(trigger, ctx, apiKey, sessionId, startMs, statsDir, sessionsDir, emitter, daemonRegistry, activeSessionSet, source);
|
|
1458
|
+
if (preResult.kind === 'complete') {
|
|
1459
|
+
return preResult.result;
|
|
1460
|
+
}
|
|
1461
|
+
const readySession = await buildAgentReadySession(preResult.session, trigger, ctx, apiKey, sessionId, emitter, daemonRegistry, activeSessionSet);
|
|
1462
|
+
const conversationPath = path.join(sessionsDir, `${sessionId}-conversation.jsonl`);
|
|
1463
|
+
const outcome = await runAgentLoop(readySession, trigger, conversationPath);
|
|
1464
|
+
const stopReason = outcome.kind === 'aborted' ? 'error' : outcome.stopReason;
|
|
1465
|
+
const errorMessage = outcome.errorMessage;
|
|
1466
|
+
const { state, sessionWorktreePath } = readySession.preAgentSession;
|
|
2291
1467
|
const finalizationCtx = {
|
|
2292
1468
|
sessionId,
|
|
2293
1469
|
workrailSessionId: state.workrailSessionId,
|