@exaudeus/workrail 3.24.4 → 3.26.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/cli/commands/index.d.ts +6 -0
- package/dist/cli/commands/index.js +14 -1
- package/dist/cli/commands/version.d.ts +6 -0
- package/dist/cli/commands/version.js +14 -0
- package/dist/cli/commands/worktrain-await.d.ts +35 -0
- package/dist/cli/commands/worktrain-await.js +207 -0
- package/dist/cli/commands/worktrain-inbox.d.ts +23 -0
- package/dist/cli/commands/worktrain-inbox.js +82 -0
- package/dist/cli/commands/worktrain-init.d.ts +23 -0
- package/dist/cli/commands/worktrain-init.js +338 -0
- package/dist/cli/commands/worktrain-spawn.d.ts +28 -0
- package/dist/cli/commands/worktrain-spawn.js +106 -0
- package/dist/cli/commands/worktrain-tell.d.ts +25 -0
- package/dist/cli/commands/worktrain-tell.js +32 -0
- package/dist/cli-worktrain.d.ts +2 -0
- package/dist/cli-worktrain.js +169 -0
- package/dist/cli.js +100 -0
- package/dist/config/config-file.d.ts +2 -0
- package/dist/config/config-file.js +55 -0
- package/dist/console/assets/index-8dh0Psu-.css +1 -0
- package/dist/console/assets/{index-TMfptYpQ.js → index-HhtarvD5.js} +10 -10
- package/dist/console/index.html +2 -2
- package/dist/daemon/agent-loop.d.ts +90 -0
- package/dist/daemon/agent-loop.js +214 -0
- package/dist/daemon/pi-mono-loader.d.ts +0 -0
- package/dist/daemon/pi-mono-loader.js +1 -0
- package/dist/daemon/soul-template.d.ts +2 -0
- package/dist/daemon/soul-template.js +22 -0
- package/dist/daemon/workflow-runner.d.ts +63 -0
- package/dist/daemon/workflow-runner.js +689 -0
- package/dist/infrastructure/session/HttpServer.js +2 -2
- package/dist/manifest.json +226 -50
- package/dist/mcp/handlers/v2-execution/start.d.ts +2 -1
- package/dist/mcp/handlers/v2-execution/start.js +4 -3
- package/dist/mcp/output-schemas.d.ts +154 -154
- package/dist/mcp/server.js +1 -1
- package/dist/mcp/transports/bridge-entry.js +20 -2
- package/dist/mcp/transports/bridge-events.d.ts +34 -0
- package/dist/mcp/transports/bridge-events.js +24 -0
- package/dist/mcp/transports/fatal-exit.d.ts +5 -0
- package/dist/mcp/transports/fatal-exit.js +82 -0
- package/dist/mcp/transports/http-entry.js +3 -0
- package/dist/mcp/transports/stdio-entry.js +3 -7
- package/dist/mcp/v2/tools.d.ts +7 -7
- package/dist/trigger/delivery-action.d.ts +37 -0
- package/dist/trigger/delivery-action.js +204 -0
- package/dist/trigger/delivery-client.d.ts +11 -0
- package/dist/trigger/delivery-client.js +27 -0
- package/dist/trigger/index.d.ts +5 -0
- package/dist/trigger/index.js +8 -0
- package/dist/trigger/trigger-listener.d.ts +32 -0
- package/dist/trigger/trigger-listener.js +176 -0
- package/dist/trigger/trigger-router.d.ts +38 -0
- package/dist/trigger/trigger-router.js +343 -0
- package/dist/trigger/trigger-store.d.ts +39 -0
- package/dist/trigger/trigger-store.js +698 -0
- package/dist/trigger/types.d.ts +70 -0
- package/dist/trigger/types.js +10 -0
- package/dist/v2/durable-core/schemas/execution-snapshot/blocked-snapshot.d.ts +22 -22
- package/dist/v2/durable-core/schemas/execution-snapshot/execution-snapshot.v1.d.ts +114 -114
- package/dist/v2/durable-core/schemas/export-bundle/index.d.ts +454 -454
- package/dist/v2/durable-core/schemas/session/blockers.d.ts +14 -14
- package/dist/v2/durable-core/schemas/session/events.d.ts +93 -93
- package/dist/v2/durable-core/schemas/session/gaps.d.ts +2 -2
- package/dist/v2/durable-core/schemas/session/validation-event.d.ts +4 -4
- package/dist/v2/infra/in-memory/daemon-registry/index.d.ts +14 -0
- package/dist/v2/infra/in-memory/daemon-registry/index.js +32 -0
- package/dist/v2/infra/in-memory/keyed-async-queue/index.d.ts +5 -0
- package/dist/v2/infra/in-memory/keyed-async-queue/index.js +32 -0
- package/dist/v2/usecases/console-routes.d.ts +3 -1
- package/dist/v2/usecases/console-routes.js +132 -1
- package/dist/v2/usecases/console-service.d.ts +2 -0
- package/dist/v2/usecases/console-service.js +18 -2
- package/dist/v2/usecases/console-types.d.ts +2 -0
- package/package.json +6 -2
- package/spec/workflow-tags.json +1 -0
- package/workflows/classify-task-workflow.json +68 -0
- package/workflows/coding-task-workflow-agentic.lean.v2.json +43 -13
- package/workflows/workflow-for-workflows.json +4 -2
- package/workflows/workflow-for-workflows.v2.json +4 -2
- package/dist/console/assets/index-BXRk3te_.css +0 -1
- package/workflows/rich-object-contribution.json +0 -258
|
@@ -0,0 +1,689 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
|
|
3
|
+
if (k2 === undefined) k2 = k;
|
|
4
|
+
var desc = Object.getOwnPropertyDescriptor(m, k);
|
|
5
|
+
if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
|
|
6
|
+
desc = { enumerable: true, get: function() { return m[k]; } };
|
|
7
|
+
}
|
|
8
|
+
Object.defineProperty(o, k2, desc);
|
|
9
|
+
}) : (function(o, m, k, k2) {
|
|
10
|
+
if (k2 === undefined) k2 = k;
|
|
11
|
+
o[k2] = m[k];
|
|
12
|
+
}));
|
|
13
|
+
var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
|
|
14
|
+
Object.defineProperty(o, "default", { enumerable: true, value: v });
|
|
15
|
+
}) : function(o, v) {
|
|
16
|
+
o["default"] = v;
|
|
17
|
+
});
|
|
18
|
+
var __importStar = (this && this.__importStar) || (function () {
|
|
19
|
+
var ownKeys = function(o) {
|
|
20
|
+
ownKeys = Object.getOwnPropertyNames || function (o) {
|
|
21
|
+
var ar = [];
|
|
22
|
+
for (var k in o) if (Object.prototype.hasOwnProperty.call(o, k)) ar[ar.length] = k;
|
|
23
|
+
return ar;
|
|
24
|
+
};
|
|
25
|
+
return ownKeys(o);
|
|
26
|
+
};
|
|
27
|
+
return function (mod) {
|
|
28
|
+
if (mod && mod.__esModule) return mod;
|
|
29
|
+
var result = {};
|
|
30
|
+
if (mod != null) for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
|
|
31
|
+
__setModuleDefault(result, mod);
|
|
32
|
+
return result;
|
|
33
|
+
};
|
|
34
|
+
})();
|
|
35
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
36
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
37
|
+
};
|
|
38
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
39
|
+
exports.DAEMON_SOUL_TEMPLATE = exports.DAEMON_SOUL_DEFAULT = exports.DAEMON_SESSIONS_DIR = void 0;
|
|
40
|
+
exports.readDaemonSessionState = readDaemonSessionState;
|
|
41
|
+
exports.readAllDaemonSessions = readAllDaemonSessions;
|
|
42
|
+
exports.runStartupRecovery = runStartupRecovery;
|
|
43
|
+
exports.makeBashTool = makeBashTool;
|
|
44
|
+
exports.buildSessionRecap = buildSessionRecap;
|
|
45
|
+
exports.buildSystemPrompt = buildSystemPrompt;
|
|
46
|
+
exports.runWorkflow = runWorkflow;
|
|
47
|
+
require("reflect-metadata");
|
|
48
|
+
const fs = __importStar(require("node:fs/promises"));
|
|
49
|
+
const path = __importStar(require("node:path"));
|
|
50
|
+
const os = __importStar(require("node:os"));
|
|
51
|
+
const node_child_process_1 = require("node:child_process");
|
|
52
|
+
const node_util_1 = require("node:util");
|
|
53
|
+
const node_crypto_1 = require("node:crypto");
|
|
54
|
+
const sdk_1 = __importDefault(require("@anthropic-ai/sdk"));
|
|
55
|
+
const bedrock_sdk_1 = require("@anthropic-ai/bedrock-sdk");
|
|
56
|
+
const agent_loop_js_1 = require("./agent-loop.js");
|
|
57
|
+
const start_js_1 = require("../mcp/handlers/v2-execution/start.js");
|
|
58
|
+
const index_js_1 = require("../mcp/handlers/v2-execution/index.js");
|
|
59
|
+
const v2_token_ops_js_1 = require("../mcp/handlers/v2-token-ops.js");
|
|
60
|
+
const index_js_2 = require("../v2/durable-core/ids/index.js");
|
|
61
|
+
const node_outputs_js_1 = require("../v2/projections/node-outputs.js");
|
|
62
|
+
const execAsync = (0, node_util_1.promisify)(node_child_process_1.exec);
|
|
63
|
+
const BASH_TIMEOUT_MS = 5 * 60 * 1000;
|
|
64
|
+
const MAX_SESSION_RECAP_NOTES = 3;
|
|
65
|
+
const MAX_SESSION_NOTE_CHARS = 800;
|
|
66
|
+
const DEFAULT_SESSION_TIMEOUT_MINUTES = 30;
|
|
67
|
+
exports.DAEMON_SESSIONS_DIR = path.join(os.homedir(), '.workrail', 'daemon-sessions');
|
|
68
|
+
const MAX_ORPHAN_AGE_MS = 2 * 60 * 60 * 1000;
|
|
69
|
+
const WORKRAIL_DIR = path.join(os.homedir(), '.workrail');
|
|
70
|
+
const WORKSPACE_CONTEXT_MAX_BYTES = 32 * 1024;
|
|
71
|
+
const WORKSPACE_CONTEXT_CANDIDATE_PATHS = [
|
|
72
|
+
'.claude/CLAUDE.md',
|
|
73
|
+
'CLAUDE.md',
|
|
74
|
+
'AGENTS.md',
|
|
75
|
+
'.github/AGENTS.md',
|
|
76
|
+
];
|
|
77
|
+
const soul_template_js_1 = require("./soul-template.js");
|
|
78
|
+
var soul_template_js_2 = require("./soul-template.js");
|
|
79
|
+
Object.defineProperty(exports, "DAEMON_SOUL_DEFAULT", { enumerable: true, get: function () { return soul_template_js_2.DAEMON_SOUL_DEFAULT; } });
|
|
80
|
+
Object.defineProperty(exports, "DAEMON_SOUL_TEMPLATE", { enumerable: true, get: function () { return soul_template_js_2.DAEMON_SOUL_TEMPLATE; } });
|
|
81
|
+
async function persistTokens(sessionId, continueToken, checkpointToken) {
|
|
82
|
+
await fs.mkdir(exports.DAEMON_SESSIONS_DIR, { recursive: true });
|
|
83
|
+
const sessionPath = path.join(exports.DAEMON_SESSIONS_DIR, `${sessionId}.json`);
|
|
84
|
+
const state = JSON.stringify({ continueToken, checkpointToken, ts: Date.now() }, null, 2);
|
|
85
|
+
const tmp = `${sessionPath}.tmp`;
|
|
86
|
+
await fs.writeFile(tmp, state, 'utf8');
|
|
87
|
+
await fs.rename(tmp, sessionPath);
|
|
88
|
+
}
|
|
89
|
+
async function readDaemonSessionState(sessionId) {
|
|
90
|
+
const sessionPath = path.join(exports.DAEMON_SESSIONS_DIR, `${sessionId}.json`);
|
|
91
|
+
try {
|
|
92
|
+
const raw = await fs.readFile(sessionPath, 'utf8');
|
|
93
|
+
const parsed = JSON.parse(raw);
|
|
94
|
+
return { continueToken: parsed.continueToken, checkpointToken: parsed.checkpointToken };
|
|
95
|
+
}
|
|
96
|
+
catch {
|
|
97
|
+
return null;
|
|
98
|
+
}
|
|
99
|
+
}
|
|
100
|
+
async function readAllDaemonSessions(sessionsDir = exports.DAEMON_SESSIONS_DIR) {
|
|
101
|
+
let entries;
|
|
102
|
+
try {
|
|
103
|
+
entries = await fs.readdir(sessionsDir);
|
|
104
|
+
}
|
|
105
|
+
catch (err) {
|
|
106
|
+
const isEnoent = err instanceof Error && 'code' in err && err.code === 'ENOENT';
|
|
107
|
+
if (!isEnoent) {
|
|
108
|
+
console.warn(`[WorkflowRunner] Could not read sessions directory ${sessionsDir}: ${err instanceof Error ? err.message : String(err)}`);
|
|
109
|
+
}
|
|
110
|
+
return [];
|
|
111
|
+
}
|
|
112
|
+
const sessions = [];
|
|
113
|
+
for (const entry of entries) {
|
|
114
|
+
if (!entry.endsWith('.json'))
|
|
115
|
+
continue;
|
|
116
|
+
const sessionId = entry.slice(0, -5);
|
|
117
|
+
const filePath = path.join(sessionsDir, entry);
|
|
118
|
+
try {
|
|
119
|
+
const raw = await fs.readFile(filePath, 'utf8');
|
|
120
|
+
const parsed = JSON.parse(raw);
|
|
121
|
+
if (typeof parsed.continueToken !== 'string' || typeof parsed.ts !== 'number') {
|
|
122
|
+
console.warn(`[WorkflowRunner] Skipping malformed session file: ${filePath}`);
|
|
123
|
+
continue;
|
|
124
|
+
}
|
|
125
|
+
sessions.push({
|
|
126
|
+
sessionId,
|
|
127
|
+
continueToken: parsed.continueToken,
|
|
128
|
+
checkpointToken: typeof parsed.checkpointToken === 'string' ? parsed.checkpointToken : null,
|
|
129
|
+
ts: parsed.ts,
|
|
130
|
+
});
|
|
131
|
+
}
|
|
132
|
+
catch (err) {
|
|
133
|
+
console.warn(`[WorkflowRunner] Skipping unreadable session file ${filePath}: ${err instanceof Error ? err.message : String(err)}`);
|
|
134
|
+
}
|
|
135
|
+
}
|
|
136
|
+
return sessions;
|
|
137
|
+
}
|
|
138
|
+
async function runStartupRecovery(sessionsDir = exports.DAEMON_SESSIONS_DIR) {
|
|
139
|
+
const sessions = await readAllDaemonSessions(sessionsDir);
|
|
140
|
+
if (sessions.length === 0) {
|
|
141
|
+
await clearStrayTmpFiles(sessionsDir);
|
|
142
|
+
return;
|
|
143
|
+
}
|
|
144
|
+
console.log(`[WorkflowRunner] Startup recovery: found ${sessions.length} orphaned session(s).`);
|
|
145
|
+
const now = Date.now();
|
|
146
|
+
let cleared = 0;
|
|
147
|
+
for (const session of sessions) {
|
|
148
|
+
const ageMs = now - session.ts;
|
|
149
|
+
const isStale = ageMs > MAX_ORPHAN_AGE_MS;
|
|
150
|
+
const ageSec = Math.round(ageMs / 1000);
|
|
151
|
+
const label = isStale ? 'stale orphaned session' : 'orphaned session';
|
|
152
|
+
console.log(`[WorkflowRunner] Clearing ${label}: sessionId=${session.sessionId} age=${ageSec}s`);
|
|
153
|
+
try {
|
|
154
|
+
await fs.unlink(path.join(sessionsDir, `${session.sessionId}.json`));
|
|
155
|
+
cleared++;
|
|
156
|
+
}
|
|
157
|
+
catch (err) {
|
|
158
|
+
const isEnoent = err instanceof Error && 'code' in err && err.code === 'ENOENT';
|
|
159
|
+
if (!isEnoent) {
|
|
160
|
+
console.warn(`[WorkflowRunner] Could not clear session file ${session.sessionId}: ${err instanceof Error ? err.message : String(err)}`);
|
|
161
|
+
}
|
|
162
|
+
}
|
|
163
|
+
}
|
|
164
|
+
await clearStrayTmpFiles(sessionsDir);
|
|
165
|
+
console.log(`[WorkflowRunner] Startup recovery complete: cleared ${cleared}/${sessions.length} orphaned session(s).`);
|
|
166
|
+
}
|
|
167
|
+
async function clearStrayTmpFiles(sessionsDir) {
|
|
168
|
+
let entries;
|
|
169
|
+
try {
|
|
170
|
+
entries = await fs.readdir(sessionsDir);
|
|
171
|
+
}
|
|
172
|
+
catch {
|
|
173
|
+
return;
|
|
174
|
+
}
|
|
175
|
+
for (const entry of entries) {
|
|
176
|
+
if (!entry.endsWith('.tmp'))
|
|
177
|
+
continue;
|
|
178
|
+
try {
|
|
179
|
+
await fs.unlink(path.join(sessionsDir, entry));
|
|
180
|
+
console.log(`[WorkflowRunner] Cleared stray temp file: ${entry}`);
|
|
181
|
+
}
|
|
182
|
+
catch {
|
|
183
|
+
}
|
|
184
|
+
}
|
|
185
|
+
}
|
|
186
|
+
async function loadDaemonSoul(resolvedPath) {
|
|
187
|
+
const soulPath = resolvedPath ?? path.join(WORKRAIL_DIR, 'daemon-soul.md');
|
|
188
|
+
try {
|
|
189
|
+
return await fs.readFile(soulPath, 'utf8');
|
|
190
|
+
}
|
|
191
|
+
catch (err) {
|
|
192
|
+
const isEnoent = err instanceof Error && 'code' in err && err.code === 'ENOENT';
|
|
193
|
+
if (isEnoent) {
|
|
194
|
+
try {
|
|
195
|
+
await fs.mkdir(path.dirname(soulPath), { recursive: true });
|
|
196
|
+
await fs.writeFile(soulPath, soul_template_js_1.DAEMON_SOUL_TEMPLATE, 'utf8');
|
|
197
|
+
console.log(`[WorkflowRunner] Created daemon-soul.md template at ${soulPath}`);
|
|
198
|
+
}
|
|
199
|
+
catch (writeErr) {
|
|
200
|
+
console.warn(`[WorkflowRunner] Warning: could not write daemon-soul.md template: ${writeErr instanceof Error ? writeErr.message : String(writeErr)}`);
|
|
201
|
+
}
|
|
202
|
+
}
|
|
203
|
+
else {
|
|
204
|
+
console.warn(`[WorkflowRunner] Warning: could not read daemon-soul.md: ${err instanceof Error ? err.message : String(err)}`);
|
|
205
|
+
}
|
|
206
|
+
return soul_template_js_1.DAEMON_SOUL_DEFAULT;
|
|
207
|
+
}
|
|
208
|
+
}
|
|
209
|
+
async function loadWorkspaceContext(workspacePath) {
|
|
210
|
+
const parts = [];
|
|
211
|
+
let combinedBytes = 0;
|
|
212
|
+
let truncated = false;
|
|
213
|
+
for (const relativePath of WORKSPACE_CONTEXT_CANDIDATE_PATHS) {
|
|
214
|
+
if (truncated)
|
|
215
|
+
break;
|
|
216
|
+
const fullPath = path.join(workspacePath, relativePath);
|
|
217
|
+
let content;
|
|
218
|
+
try {
|
|
219
|
+
content = await fs.readFile(fullPath, 'utf8');
|
|
220
|
+
}
|
|
221
|
+
catch (err) {
|
|
222
|
+
const isEnoent = err instanceof Error && 'code' in err && err.code === 'ENOENT';
|
|
223
|
+
if (!isEnoent) {
|
|
224
|
+
console.warn(`[WorkflowRunner] Skipping ${fullPath}: ${err instanceof Error ? err.message : String(err)}`);
|
|
225
|
+
}
|
|
226
|
+
continue;
|
|
227
|
+
}
|
|
228
|
+
const contentBytes = Buffer.byteLength(content, 'utf8');
|
|
229
|
+
if (combinedBytes + contentBytes > WORKSPACE_CONTEXT_MAX_BYTES) {
|
|
230
|
+
const remaining = WORKSPACE_CONTEXT_MAX_BYTES - combinedBytes;
|
|
231
|
+
const truncatedContent = content.slice(0, remaining);
|
|
232
|
+
parts.push(`### ${relativePath}\n${truncatedContent}`);
|
|
233
|
+
truncated = true;
|
|
234
|
+
}
|
|
235
|
+
else {
|
|
236
|
+
parts.push(`### ${relativePath}\n${content}`);
|
|
237
|
+
combinedBytes += contentBytes;
|
|
238
|
+
}
|
|
239
|
+
}
|
|
240
|
+
if (parts.length === 0)
|
|
241
|
+
return null;
|
|
242
|
+
let combined = parts.join('\n\n');
|
|
243
|
+
if (truncated) {
|
|
244
|
+
combined += '\n\n[Workspace context truncated: combined size exceeded 32 KB limit. Some files may be missing.]';
|
|
245
|
+
}
|
|
246
|
+
console.log(`[WorkflowRunner] Injecting workspace context from: ${WORKSPACE_CONTEXT_CANDIDATE_PATHS.filter((p) => parts.some((part) => part.startsWith(`### ${p}`))).join(', ')}`);
|
|
247
|
+
return combined;
|
|
248
|
+
}
|
|
249
|
+
async function loadSessionNotes(continueToken, ctx) {
|
|
250
|
+
try {
|
|
251
|
+
const resolvedResult = await (0, v2_token_ops_js_1.parseContinueTokenOrFail)(continueToken, ctx.v2.tokenCodecPorts, ctx.v2.tokenAliasStore);
|
|
252
|
+
if (resolvedResult.isErr()) {
|
|
253
|
+
console.warn(`[WorkflowRunner] Warning: could not decode continueToken for session recap: ${resolvedResult.error.message}`);
|
|
254
|
+
return [];
|
|
255
|
+
}
|
|
256
|
+
const sessionId = (0, index_js_2.asSessionId)(resolvedResult.value.sessionId);
|
|
257
|
+
const loadResult = await ctx.v2.sessionStore.load(sessionId);
|
|
258
|
+
if (loadResult.isErr()) {
|
|
259
|
+
console.warn(`[WorkflowRunner] Warning: could not load session store for recap: ${loadResult.error.code} -- ${loadResult.error.message}`);
|
|
260
|
+
return [];
|
|
261
|
+
}
|
|
262
|
+
const projectionResult = (0, node_outputs_js_1.projectNodeOutputsV2)(loadResult.value.events);
|
|
263
|
+
if (projectionResult.isErr()) {
|
|
264
|
+
console.warn(`[WorkflowRunner] Warning: could not project session outputs for recap: ${projectionResult.error.code} -- ${projectionResult.error.message}`);
|
|
265
|
+
return [];
|
|
266
|
+
}
|
|
267
|
+
const allNotes = [];
|
|
268
|
+
for (const nodeView of Object.values(projectionResult.value.nodesById)) {
|
|
269
|
+
for (const output of nodeView.currentByChannel.recap) {
|
|
270
|
+
if (output.payload.payloadKind === 'notes') {
|
|
271
|
+
const note = output.payload.notesMarkdown.length > MAX_SESSION_NOTE_CHARS
|
|
272
|
+
? output.payload.notesMarkdown.slice(0, MAX_SESSION_NOTE_CHARS) + '\n[truncated]'
|
|
273
|
+
: output.payload.notesMarkdown;
|
|
274
|
+
allNotes.push(note);
|
|
275
|
+
}
|
|
276
|
+
}
|
|
277
|
+
}
|
|
278
|
+
return allNotes.slice(-MAX_SESSION_RECAP_NOTES);
|
|
279
|
+
}
|
|
280
|
+
catch (err) {
|
|
281
|
+
console.warn(`[WorkflowRunner] Warning: unexpected error loading session notes for recap: ${err instanceof Error ? err.message : String(err)}`);
|
|
282
|
+
return [];
|
|
283
|
+
}
|
|
284
|
+
}
|
|
285
|
+
let _schemas = null;
|
|
286
|
+
function getSchemas() {
|
|
287
|
+
if (_schemas)
|
|
288
|
+
return _schemas;
|
|
289
|
+
_schemas = {
|
|
290
|
+
ContinueWorkflowParams: {
|
|
291
|
+
type: 'object',
|
|
292
|
+
properties: {
|
|
293
|
+
continueToken: {
|
|
294
|
+
type: 'string',
|
|
295
|
+
description: 'The continueToken from the previous start_workflow or continue_workflow call. Round-trip exactly as received.',
|
|
296
|
+
},
|
|
297
|
+
intent: {
|
|
298
|
+
type: 'string',
|
|
299
|
+
enum: ['advance', 'rehydrate'],
|
|
300
|
+
description: 'advance: I completed this step. rehydrate: remind me what the current step is.',
|
|
301
|
+
},
|
|
302
|
+
notesMarkdown: {
|
|
303
|
+
type: 'string',
|
|
304
|
+
description: 'Notes on what you did in this step (10-30 lines, markdown).',
|
|
305
|
+
},
|
|
306
|
+
context: {
|
|
307
|
+
type: 'object',
|
|
308
|
+
additionalProperties: true,
|
|
309
|
+
description: 'Updated context variables (only changed values).',
|
|
310
|
+
},
|
|
311
|
+
},
|
|
312
|
+
required: ['continueToken'],
|
|
313
|
+
},
|
|
314
|
+
BashParams: {
|
|
315
|
+
type: 'object',
|
|
316
|
+
properties: {
|
|
317
|
+
command: { type: 'string', description: 'Shell command to execute' },
|
|
318
|
+
cwd: { type: 'string', description: 'Working directory for the command' },
|
|
319
|
+
},
|
|
320
|
+
required: ['command'],
|
|
321
|
+
},
|
|
322
|
+
ReadParams: {
|
|
323
|
+
type: 'object',
|
|
324
|
+
properties: {
|
|
325
|
+
filePath: { type: 'string', description: 'Absolute path to the file to read' },
|
|
326
|
+
},
|
|
327
|
+
required: ['filePath'],
|
|
328
|
+
},
|
|
329
|
+
WriteParams: {
|
|
330
|
+
type: 'object',
|
|
331
|
+
properties: {
|
|
332
|
+
filePath: { type: 'string', description: 'Absolute path to the file to write' },
|
|
333
|
+
content: { type: 'string', description: 'Content to write to the file' },
|
|
334
|
+
},
|
|
335
|
+
required: ['filePath', 'content'],
|
|
336
|
+
},
|
|
337
|
+
};
|
|
338
|
+
return _schemas;
|
|
339
|
+
}
|
|
340
|
+
function makeContinueWorkflowTool(sessionId, ctx, onAdvance, onComplete, schemas) {
|
|
341
|
+
return {
|
|
342
|
+
name: 'continue_workflow',
|
|
343
|
+
description: 'Advance the WorkRail workflow to the next step. Call this after completing all work ' +
|
|
344
|
+
'required by the current step. Include your notes in notesMarkdown.',
|
|
345
|
+
inputSchema: schemas['ContinueWorkflowParams'],
|
|
346
|
+
label: 'Continue Workflow',
|
|
347
|
+
execute: async (_toolCallId, params) => {
|
|
348
|
+
const result = await (0, index_js_1.executeContinueWorkflow)({
|
|
349
|
+
continueToken: params.continueToken,
|
|
350
|
+
intent: (params.intent ?? 'advance'),
|
|
351
|
+
output: params.notesMarkdown
|
|
352
|
+
? { notesMarkdown: params.notesMarkdown }
|
|
353
|
+
: undefined,
|
|
354
|
+
context: params.context,
|
|
355
|
+
}, ctx);
|
|
356
|
+
if (result.isErr()) {
|
|
357
|
+
throw new Error(`continue_workflow failed: ${result.error.kind} -- ${JSON.stringify(result.error)}`);
|
|
358
|
+
}
|
|
359
|
+
const out = result.value.response;
|
|
360
|
+
const continueToken = out.continueToken ?? '';
|
|
361
|
+
const checkpointToken = out.checkpointToken ?? null;
|
|
362
|
+
if (continueToken) {
|
|
363
|
+
await persistTokens(sessionId, continueToken, checkpointToken);
|
|
364
|
+
}
|
|
365
|
+
if (out.isComplete) {
|
|
366
|
+
onComplete(params.notesMarkdown);
|
|
367
|
+
return {
|
|
368
|
+
content: [{ type: 'text', text: 'Workflow complete. All steps have been executed.' }],
|
|
369
|
+
details: out,
|
|
370
|
+
};
|
|
371
|
+
}
|
|
372
|
+
const pending = out.pending;
|
|
373
|
+
const stepText = pending
|
|
374
|
+
? `## Next step: ${pending.title}\n\n${pending.prompt}\n\ncontinueToken: ${continueToken}`
|
|
375
|
+
: `Step advanced. continueToken: ${continueToken}`;
|
|
376
|
+
onAdvance(stepText, continueToken);
|
|
377
|
+
return {
|
|
378
|
+
content: [{ type: 'text', text: stepText }],
|
|
379
|
+
details: out,
|
|
380
|
+
};
|
|
381
|
+
},
|
|
382
|
+
};
|
|
383
|
+
}
|
|
384
|
+
function makeBashTool(workspacePath, schemas) {
|
|
385
|
+
return {
|
|
386
|
+
name: 'Bash',
|
|
387
|
+
description: 'Execute a shell command. Throws on non-zero exit code. ' +
|
|
388
|
+
`Maximum execution time: ${BASH_TIMEOUT_MS / 1000}s.`,
|
|
389
|
+
inputSchema: schemas['BashParams'],
|
|
390
|
+
label: 'Bash',
|
|
391
|
+
execute: async (_toolCallId, params) => {
|
|
392
|
+
const cwd = params.cwd ?? workspacePath;
|
|
393
|
+
try {
|
|
394
|
+
const { stdout, stderr } = await execAsync(params.command, {
|
|
395
|
+
cwd,
|
|
396
|
+
timeout: BASH_TIMEOUT_MS,
|
|
397
|
+
});
|
|
398
|
+
const output = [stdout, stderr].filter(Boolean).join('\n');
|
|
399
|
+
return {
|
|
400
|
+
content: [{ type: 'text', text: output || '(no output)' }],
|
|
401
|
+
details: { stdout, stderr },
|
|
402
|
+
};
|
|
403
|
+
}
|
|
404
|
+
catch (err) {
|
|
405
|
+
const e = err;
|
|
406
|
+
const stdout = String(e.stdout ?? '');
|
|
407
|
+
const stderr = String(e.stderr ?? '');
|
|
408
|
+
const rawCode = e.code;
|
|
409
|
+
const signal = e.signal;
|
|
410
|
+
const exitInfo = rawCode != null
|
|
411
|
+
? `exit ${String(rawCode)}`
|
|
412
|
+
: signal
|
|
413
|
+
? `signal ${String(signal)}`
|
|
414
|
+
: 'exit unknown';
|
|
415
|
+
throw new Error(`Command failed: ${params.command} (${exitInfo})\nSTDOUT:\n${stdout}\nSTDERR:\n${stderr}`);
|
|
416
|
+
}
|
|
417
|
+
},
|
|
418
|
+
};
|
|
419
|
+
}
|
|
420
|
+
function makeReadTool(schemas) {
|
|
421
|
+
return {
|
|
422
|
+
name: 'Read',
|
|
423
|
+
description: 'Read the contents of a file at the given absolute path.',
|
|
424
|
+
inputSchema: schemas['ReadParams'],
|
|
425
|
+
label: 'Read',
|
|
426
|
+
execute: async (_toolCallId, params) => {
|
|
427
|
+
const content = await fs.readFile(params.filePath, 'utf8');
|
|
428
|
+
return {
|
|
429
|
+
content: [{ type: 'text', text: content }],
|
|
430
|
+
details: { filePath: params.filePath, length: content.length },
|
|
431
|
+
};
|
|
432
|
+
},
|
|
433
|
+
};
|
|
434
|
+
}
|
|
435
|
+
function makeWriteTool(schemas) {
|
|
436
|
+
return {
|
|
437
|
+
name: 'Write',
|
|
438
|
+
description: 'Write content to a file at the given absolute path. Creates parent directories if needed.',
|
|
439
|
+
inputSchema: schemas['WriteParams'],
|
|
440
|
+
label: 'Write',
|
|
441
|
+
execute: async (_toolCallId, params) => {
|
|
442
|
+
await fs.mkdir(path.dirname(params.filePath), { recursive: true });
|
|
443
|
+
await fs.writeFile(params.filePath, params.content, 'utf8');
|
|
444
|
+
return {
|
|
445
|
+
content: [{ type: 'text', text: `Written ${params.content.length} bytes to ${params.filePath}` }],
|
|
446
|
+
details: { filePath: params.filePath, length: params.content.length },
|
|
447
|
+
};
|
|
448
|
+
},
|
|
449
|
+
};
|
|
450
|
+
}
|
|
451
|
+
function buildSessionRecap(notes) {
|
|
452
|
+
if (notes.length === 0)
|
|
453
|
+
return '';
|
|
454
|
+
const formattedNotes = notes
|
|
455
|
+
.map((note, i) => `### Prior step ${i + 1}\n${note}`)
|
|
456
|
+
.join('\n\n');
|
|
457
|
+
return `<workrail_session_state>\nThe following notes summarize prior steps from this session:\n\n${formattedNotes}\n</workrail_session_state>`;
|
|
458
|
+
}
|
|
459
|
+
function buildSystemPrompt(trigger, sessionState, soulContent, workspaceContext) {
|
|
460
|
+
const lines = [
|
|
461
|
+
'You are WorkRail Auto, an autonomous agent that executes workflows step by step.',
|
|
462
|
+
'',
|
|
463
|
+
'## Your tools',
|
|
464
|
+
'- `continue_workflow`: Advance to the next step. Call this after completing each step\'s work.',
|
|
465
|
+
' Always include your notes in notesMarkdown and round-trip the continueToken exactly.',
|
|
466
|
+
'- `Bash`: Run shell commands. Use for building, testing, running scripts.',
|
|
467
|
+
'- `Read`: Read files.',
|
|
468
|
+
'- `Write`: Write files.',
|
|
469
|
+
'',
|
|
470
|
+
'## Execution contract',
|
|
471
|
+
'1. Read the step carefully. Do ALL the work the step asks for.',
|
|
472
|
+
'2. Call `continue_workflow` with your notes. Include the continueToken exactly.',
|
|
473
|
+
'3. Repeat until the workflow reports it is complete.',
|
|
474
|
+
'4. Do NOT skip steps. Do NOT call `continue_workflow` without completing the step\'s work.',
|
|
475
|
+
'',
|
|
476
|
+
`<workrail_session_state>${sessionState}</workrail_session_state>`,
|
|
477
|
+
'',
|
|
478
|
+
'## Agent Rules and Philosophy',
|
|
479
|
+
soulContent,
|
|
480
|
+
'',
|
|
481
|
+
`## Workspace: ${trigger.workspacePath}`,
|
|
482
|
+
];
|
|
483
|
+
if (workspaceContext !== null) {
|
|
484
|
+
lines.push('');
|
|
485
|
+
lines.push('## Workspace Context (from AGENTS.md / CLAUDE.md)');
|
|
486
|
+
lines.push(workspaceContext);
|
|
487
|
+
}
|
|
488
|
+
if (trigger.referenceUrls && trigger.referenceUrls.length > 0) {
|
|
489
|
+
lines.push('');
|
|
490
|
+
lines.push('## Reference documents');
|
|
491
|
+
lines.push('Before starting, fetch and read these reference documents: ' +
|
|
492
|
+
trigger.referenceUrls.join(' '));
|
|
493
|
+
lines.push('If you cannot fetch any of these documents, note their unavailability and proceed.');
|
|
494
|
+
}
|
|
495
|
+
return lines.join('\n');
|
|
496
|
+
}
|
|
497
|
+
function buildUserMessage(text) {
|
|
498
|
+
return {
|
|
499
|
+
role: 'user',
|
|
500
|
+
content: text,
|
|
501
|
+
timestamp: Date.now(),
|
|
502
|
+
};
|
|
503
|
+
}
|
|
504
|
+
async function runWorkflow(trigger, ctx, apiKey, daemonRegistry) {
|
|
505
|
+
const sessionId = (0, node_crypto_1.randomUUID)();
|
|
506
|
+
console.log(`[WorkflowRunner] Session started: sessionId=${sessionId} workflowId=${trigger.workflowId}`);
|
|
507
|
+
daemonRegistry?.register(sessionId, trigger.workflowId);
|
|
508
|
+
let agentClient;
|
|
509
|
+
let modelId;
|
|
510
|
+
if (trigger.agentConfig?.model) {
|
|
511
|
+
const slashIdx = trigger.agentConfig.model.indexOf('/');
|
|
512
|
+
if (slashIdx === -1) {
|
|
513
|
+
daemonRegistry?.unregister(sessionId, 'failed');
|
|
514
|
+
return {
|
|
515
|
+
_tag: 'error',
|
|
516
|
+
workflowId: trigger.workflowId,
|
|
517
|
+
message: `agentConfig.model must be in "provider/model-id" format, got: "${trigger.agentConfig.model}"`,
|
|
518
|
+
stopReason: 'error',
|
|
519
|
+
};
|
|
520
|
+
}
|
|
521
|
+
const provider = trigger.agentConfig.model.slice(0, slashIdx);
|
|
522
|
+
modelId = trigger.agentConfig.model.slice(slashIdx + 1);
|
|
523
|
+
agentClient = provider === 'amazon-bedrock' ? new bedrock_sdk_1.AnthropicBedrock() : new sdk_1.default({ apiKey });
|
|
524
|
+
}
|
|
525
|
+
else {
|
|
526
|
+
const usesBedrock = !!process.env['AWS_PROFILE'] || !!process.env['AWS_ACCESS_KEY_ID'];
|
|
527
|
+
if (usesBedrock) {
|
|
528
|
+
agentClient = new bedrock_sdk_1.AnthropicBedrock();
|
|
529
|
+
modelId = 'us.anthropic.claude-sonnet-4-6';
|
|
530
|
+
}
|
|
531
|
+
else {
|
|
532
|
+
agentClient = new sdk_1.default({ apiKey });
|
|
533
|
+
modelId = 'claude-sonnet-4-5';
|
|
534
|
+
}
|
|
535
|
+
}
|
|
536
|
+
let isComplete = false;
|
|
537
|
+
let pendingSteerText = null;
|
|
538
|
+
let lastStepNotes;
|
|
539
|
+
const onAdvance = (stepText, _continueToken) => {
|
|
540
|
+
pendingSteerText = stepText;
|
|
541
|
+
daemonRegistry?.heartbeat(sessionId);
|
|
542
|
+
};
|
|
543
|
+
const onComplete = (notes) => {
|
|
544
|
+
isComplete = true;
|
|
545
|
+
lastStepNotes = notes;
|
|
546
|
+
};
|
|
547
|
+
let firstStep;
|
|
548
|
+
if (trigger._preAllocatedStartResponse !== undefined) {
|
|
549
|
+
firstStep = trigger._preAllocatedStartResponse;
|
|
550
|
+
}
|
|
551
|
+
else {
|
|
552
|
+
const startResult = await (0, start_js_1.executeStartWorkflow)({ workflowId: trigger.workflowId, workspacePath: trigger.workspacePath, goal: trigger.goal }, ctx, { is_autonomous: 'true' });
|
|
553
|
+
if (startResult.isErr()) {
|
|
554
|
+
daemonRegistry?.unregister(sessionId, 'failed');
|
|
555
|
+
return {
|
|
556
|
+
_tag: 'error',
|
|
557
|
+
workflowId: trigger.workflowId,
|
|
558
|
+
message: `start_workflow failed: ${startResult.error.kind} -- ${JSON.stringify(startResult.error)}`,
|
|
559
|
+
stopReason: 'error',
|
|
560
|
+
};
|
|
561
|
+
}
|
|
562
|
+
firstStep = startResult.value.response;
|
|
563
|
+
}
|
|
564
|
+
const startContinueToken = firstStep.continueToken ?? '';
|
|
565
|
+
const startCheckpointToken = firstStep.checkpointToken ?? null;
|
|
566
|
+
if (startContinueToken) {
|
|
567
|
+
await persistTokens(sessionId, startContinueToken, startCheckpointToken);
|
|
568
|
+
}
|
|
569
|
+
if (firstStep.isComplete) {
|
|
570
|
+
await fs.unlink(path.join(exports.DAEMON_SESSIONS_DIR, `${sessionId}.json`)).catch(() => { });
|
|
571
|
+
daemonRegistry?.unregister(sessionId, 'completed');
|
|
572
|
+
return { _tag: 'success', workflowId: trigger.workflowId, stopReason: 'stop' };
|
|
573
|
+
}
|
|
574
|
+
const schemas = getSchemas();
|
|
575
|
+
const tools = [
|
|
576
|
+
makeContinueWorkflowTool(sessionId, ctx, onAdvance, onComplete, schemas),
|
|
577
|
+
makeBashTool(trigger.workspacePath, schemas),
|
|
578
|
+
makeReadTool(schemas),
|
|
579
|
+
makeWriteTool(schemas),
|
|
580
|
+
];
|
|
581
|
+
const [soulContent, workspaceContext, sessionNotes] = await Promise.all([
|
|
582
|
+
loadDaemonSoul(trigger.soulFile),
|
|
583
|
+
loadWorkspaceContext(trigger.workspacePath),
|
|
584
|
+
startContinueToken ? loadSessionNotes(startContinueToken, ctx) : Promise.resolve([]),
|
|
585
|
+
]);
|
|
586
|
+
const sessionState = buildSessionRecap(sessionNotes);
|
|
587
|
+
const contextJson = trigger.context
|
|
588
|
+
? `\n\nTrigger context:\n\`\`\`json\n${JSON.stringify(trigger.context, null, 2)}\n\`\`\``
|
|
589
|
+
: '';
|
|
590
|
+
const initialPrompt = (firstStep.pending?.prompt ?? 'No step content available') +
|
|
591
|
+
`\n\ncontinueToken: ${startContinueToken}` +
|
|
592
|
+
contextJson +
|
|
593
|
+
'\n\nComplete all step work, then call continue_workflow with your notes to begin.';
|
|
594
|
+
const agent = new agent_loop_js_1.AgentLoop({
|
|
595
|
+
systemPrompt: buildSystemPrompt(trigger, sessionState, soulContent, workspaceContext),
|
|
596
|
+
modelId,
|
|
597
|
+
tools,
|
|
598
|
+
client: agentClient,
|
|
599
|
+
toolExecution: 'sequential',
|
|
600
|
+
});
|
|
601
|
+
const sessionTimeoutMs = (trigger.agentConfig?.maxSessionMinutes ?? DEFAULT_SESSION_TIMEOUT_MINUTES) * 60 * 1000;
|
|
602
|
+
const maxTurns = trigger.agentConfig?.maxTurns ?? 0;
|
|
603
|
+
let timeoutReason = null;
|
|
604
|
+
let turnCount = 0;
|
|
605
|
+
const unsubscribe = agent.subscribe(async (event) => {
|
|
606
|
+
if (event.type !== 'turn_end')
|
|
607
|
+
return;
|
|
608
|
+
turnCount++;
|
|
609
|
+
if (maxTurns > 0 && turnCount >= maxTurns && timeoutReason === null) {
|
|
610
|
+
timeoutReason = 'max_turns';
|
|
611
|
+
agent.abort();
|
|
612
|
+
return;
|
|
613
|
+
}
|
|
614
|
+
if (pendingSteerText !== null && !isComplete) {
|
|
615
|
+
const text = pendingSteerText;
|
|
616
|
+
pendingSteerText = null;
|
|
617
|
+
agent.steer(buildUserMessage(text));
|
|
618
|
+
}
|
|
619
|
+
});
|
|
620
|
+
let stopReason = 'stop';
|
|
621
|
+
let errorMessage;
|
|
622
|
+
let timeoutHandle;
|
|
623
|
+
try {
|
|
624
|
+
const timeoutPromise = new Promise((_, reject) => {
|
|
625
|
+
timeoutHandle = setTimeout(() => {
|
|
626
|
+
if (timeoutReason === null) {
|
|
627
|
+
timeoutReason = 'wall_clock';
|
|
628
|
+
}
|
|
629
|
+
reject(new Error('Workflow timed out'));
|
|
630
|
+
}, sessionTimeoutMs);
|
|
631
|
+
});
|
|
632
|
+
await Promise.race([agent.prompt(buildUserMessage(initialPrompt)), timeoutPromise])
|
|
633
|
+
.catch((err) => {
|
|
634
|
+
agent.abort();
|
|
635
|
+
throw err;
|
|
636
|
+
});
|
|
637
|
+
const messages = agent.state.messages;
|
|
638
|
+
let lastAssistant;
|
|
639
|
+
for (let i = messages.length - 1; i >= 0; i--) {
|
|
640
|
+
const m = messages[i];
|
|
641
|
+
if ('role' in m && m.role === 'assistant') {
|
|
642
|
+
lastAssistant = m;
|
|
643
|
+
break;
|
|
644
|
+
}
|
|
645
|
+
}
|
|
646
|
+
stopReason = lastAssistant?.stopReason ?? 'stop';
|
|
647
|
+
errorMessage = lastAssistant?.errorMessage;
|
|
648
|
+
}
|
|
649
|
+
catch (err) {
|
|
650
|
+
errorMessage = err instanceof Error ? err.message : String(err);
|
|
651
|
+
stopReason = 'error';
|
|
652
|
+
}
|
|
653
|
+
finally {
|
|
654
|
+
unsubscribe();
|
|
655
|
+
if (timeoutHandle !== undefined)
|
|
656
|
+
clearTimeout(timeoutHandle);
|
|
657
|
+
}
|
|
658
|
+
if (timeoutReason !== null) {
|
|
659
|
+
daemonRegistry?.unregister(sessionId, 'failed');
|
|
660
|
+
const limitDescription = timeoutReason === 'wall_clock'
|
|
661
|
+
? `${trigger.agentConfig?.maxSessionMinutes ?? DEFAULT_SESSION_TIMEOUT_MINUTES} minutes`
|
|
662
|
+
: `${trigger.agentConfig?.maxTurns} turns`;
|
|
663
|
+
return {
|
|
664
|
+
_tag: 'timeout',
|
|
665
|
+
workflowId: trigger.workflowId,
|
|
666
|
+
reason: timeoutReason,
|
|
667
|
+
message: `Workflow ${timeoutReason === 'wall_clock' ? 'timed out' : 'exceeded turn limit'} after ${limitDescription}`,
|
|
668
|
+
stopReason: 'aborted',
|
|
669
|
+
};
|
|
670
|
+
}
|
|
671
|
+
if (stopReason === 'error' || errorMessage) {
|
|
672
|
+
daemonRegistry?.unregister(sessionId, 'failed');
|
|
673
|
+
return {
|
|
674
|
+
_tag: 'error',
|
|
675
|
+
workflowId: trigger.workflowId,
|
|
676
|
+
message: errorMessage ?? 'Agent stopped with error reason',
|
|
677
|
+
stopReason,
|
|
678
|
+
};
|
|
679
|
+
}
|
|
680
|
+
await fs.unlink(path.join(exports.DAEMON_SESSIONS_DIR, `${sessionId}.json`)).catch(() => {
|
|
681
|
+
});
|
|
682
|
+
daemonRegistry?.unregister(sessionId, 'completed');
|
|
683
|
+
return {
|
|
684
|
+
_tag: 'success',
|
|
685
|
+
workflowId: trigger.workflowId,
|
|
686
|
+
stopReason,
|
|
687
|
+
...(lastStepNotes !== undefined ? { lastStepNotes } : {}),
|
|
688
|
+
};
|
|
689
|
+
}
|