@exaudeus/workrail 3.24.3 → 3.25.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/cli/commands/index.d.ts +1 -0
- package/dist/cli/commands/index.js +3 -1
- package/dist/cli/commands/version.d.ts +6 -0
- package/dist/cli/commands/version.js +14 -0
- package/dist/cli.js +90 -0
- package/dist/console/assets/index-8dh0Psu-.css +1 -0
- package/dist/console/assets/{index-TMfptYpQ.js → index-HhtarvD5.js} +10 -10
- package/dist/console/index.html +2 -2
- package/dist/daemon/pi-mono-loader.d.ts +5 -0
- package/dist/daemon/pi-mono-loader.js +65 -0
- package/dist/daemon/workflow-runner.d.ts +41 -0
- package/dist/daemon/workflow-runner.js +573 -0
- package/dist/infrastructure/session/HttpServer.js +2 -2
- package/dist/manifest.json +112 -32
- package/dist/mcp/handlers/v2-execution/start.d.ts +2 -1
- package/dist/mcp/handlers/v2-execution/start.js +4 -3
- package/dist/mcp/server.js +1 -1
- package/dist/mcp/transports/http-entry.js +1 -1
- package/dist/trigger/index.d.ts +5 -0
- package/dist/trigger/index.js +8 -0
- package/dist/trigger/trigger-listener.d.ts +30 -0
- package/dist/trigger/trigger-listener.js +166 -0
- package/dist/trigger/trigger-router.d.ts +32 -0
- package/dist/trigger/trigger-router.js +185 -0
- package/dist/trigger/trigger-store.d.ts +31 -0
- package/dist/trigger/trigger-store.js +457 -0
- package/dist/trigger/types.d.ts +46 -0
- package/dist/trigger/types.js +6 -0
- package/dist/v2/infra/in-memory/daemon-registry/index.d.ts +14 -0
- package/dist/v2/infra/in-memory/daemon-registry/index.js +32 -0
- package/dist/v2/infra/in-memory/keyed-async-queue/index.d.ts +5 -0
- package/dist/v2/infra/in-memory/keyed-async-queue/index.js +32 -0
- package/dist/v2/usecases/console-routes.d.ts +3 -1
- package/dist/v2/usecases/console-routes.js +102 -1
- package/dist/v2/usecases/console-service.d.ts +2 -0
- package/dist/v2/usecases/console-service.js +18 -2
- package/dist/v2/usecases/console-types.d.ts +2 -0
- package/package.json +5 -3
- package/workflows/coding-task-workflow-agentic.lean.v2.json +1 -1
- package/workflows/workflow-for-workflows.json +4 -2
- package/workflows/workflow-for-workflows.v2.json +4 -2
- package/dist/console/assets/index-BXRk3te_.css +0 -1
- package/workflows/rich-object-contribution.json +0 -258
|
@@ -0,0 +1,573 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
|
|
3
|
+
if (k2 === undefined) k2 = k;
|
|
4
|
+
var desc = Object.getOwnPropertyDescriptor(m, k);
|
|
5
|
+
if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
|
|
6
|
+
desc = { enumerable: true, get: function() { return m[k]; } };
|
|
7
|
+
}
|
|
8
|
+
Object.defineProperty(o, k2, desc);
|
|
9
|
+
}) : (function(o, m, k, k2) {
|
|
10
|
+
if (k2 === undefined) k2 = k;
|
|
11
|
+
o[k2] = m[k];
|
|
12
|
+
}));
|
|
13
|
+
var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
|
|
14
|
+
Object.defineProperty(o, "default", { enumerable: true, value: v });
|
|
15
|
+
}) : function(o, v) {
|
|
16
|
+
o["default"] = v;
|
|
17
|
+
});
|
|
18
|
+
var __importStar = (this && this.__importStar) || (function () {
|
|
19
|
+
var ownKeys = function(o) {
|
|
20
|
+
ownKeys = Object.getOwnPropertyNames || function (o) {
|
|
21
|
+
var ar = [];
|
|
22
|
+
for (var k in o) if (Object.prototype.hasOwnProperty.call(o, k)) ar[ar.length] = k;
|
|
23
|
+
return ar;
|
|
24
|
+
};
|
|
25
|
+
return ownKeys(o);
|
|
26
|
+
};
|
|
27
|
+
return function (mod) {
|
|
28
|
+
if (mod && mod.__esModule) return mod;
|
|
29
|
+
var result = {};
|
|
30
|
+
if (mod != null) for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
|
|
31
|
+
__setModuleDefault(result, mod);
|
|
32
|
+
return result;
|
|
33
|
+
};
|
|
34
|
+
})();
|
|
35
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
36
|
+
exports.DAEMON_SOUL_DEFAULT = exports.DAEMON_SESSIONS_DIR = void 0;
|
|
37
|
+
exports.readDaemonSessionState = readDaemonSessionState;
|
|
38
|
+
exports.readAllDaemonSessions = readAllDaemonSessions;
|
|
39
|
+
exports.runStartupRecovery = runStartupRecovery;
|
|
40
|
+
exports.buildSystemPrompt = buildSystemPrompt;
|
|
41
|
+
exports.runWorkflow = runWorkflow;
|
|
42
|
+
require("reflect-metadata");
|
|
43
|
+
const fs = __importStar(require("node:fs/promises"));
|
|
44
|
+
const path = __importStar(require("node:path"));
|
|
45
|
+
const os = __importStar(require("node:os"));
|
|
46
|
+
const node_child_process_1 = require("node:child_process");
|
|
47
|
+
const node_util_1 = require("node:util");
|
|
48
|
+
const node_crypto_1 = require("node:crypto");
|
|
49
|
+
const pi_mono_loader_js_1 = require("./pi-mono-loader.js");
|
|
50
|
+
const start_js_1 = require("../mcp/handlers/v2-execution/start.js");
|
|
51
|
+
const index_js_1 = require("../mcp/handlers/v2-execution/index.js");
|
|
52
|
+
const execAsync = (0, node_util_1.promisify)(node_child_process_1.exec);
|
|
53
|
+
const BASH_TIMEOUT_MS = 5 * 60 * 1000;
|
|
54
|
+
const WORKFLOW_TIMEOUT_MS = 30 * 60 * 1000;
|
|
55
|
+
exports.DAEMON_SESSIONS_DIR = path.join(os.homedir(), '.workrail', 'daemon-sessions');
|
|
56
|
+
const MAX_ORPHAN_AGE_MS = 2 * 60 * 60 * 1000;
|
|
57
|
+
const WORKRAIL_DIR = path.join(os.homedir(), '.workrail');
|
|
58
|
+
const WORKSPACE_CONTEXT_MAX_BYTES = 32 * 1024;
|
|
59
|
+
const WORKSPACE_CONTEXT_CANDIDATE_PATHS = [
|
|
60
|
+
'.claude/CLAUDE.md',
|
|
61
|
+
'CLAUDE.md',
|
|
62
|
+
'AGENTS.md',
|
|
63
|
+
'.github/AGENTS.md',
|
|
64
|
+
];
|
|
65
|
+
exports.DAEMON_SOUL_DEFAULT = `\
|
|
66
|
+
- Write code that follows the patterns already established in the codebase
|
|
67
|
+
- Never skip tests. Run existing tests before and after changes
|
|
68
|
+
- Prefer small, focused changes over large rewrites
|
|
69
|
+
- If a step asks you to write code, write actual code -- do not write pseudocode or placeholders
|
|
70
|
+
- Commit your work when you complete a logical unit`;
|
|
71
|
+
const DAEMON_SOUL_TEMPLATE = `\
|
|
72
|
+
# WorkRail Daemon Soul
|
|
73
|
+
#
|
|
74
|
+
# This file is injected into every WorkRail Auto daemon session system prompt under
|
|
75
|
+
# "## Agent Rules and Philosophy". Edit it to customize the agent's behavior for
|
|
76
|
+
# your environment: coding conventions, commit style, tool preferences, etc.
|
|
77
|
+
#
|
|
78
|
+
# Changes take effect on the next daemon session -- no restart required.
|
|
79
|
+
#
|
|
80
|
+
# The defaults below reflect general best practices. Override them freely.
|
|
81
|
+
|
|
82
|
+
${exports.DAEMON_SOUL_DEFAULT}
|
|
83
|
+
`;
|
|
84
|
+
async function persistTokens(sessionId, continueToken, checkpointToken) {
|
|
85
|
+
await fs.mkdir(exports.DAEMON_SESSIONS_DIR, { recursive: true });
|
|
86
|
+
const sessionPath = path.join(exports.DAEMON_SESSIONS_DIR, `${sessionId}.json`);
|
|
87
|
+
const state = JSON.stringify({ continueToken, checkpointToken, ts: Date.now() }, null, 2);
|
|
88
|
+
const tmp = `${sessionPath}.tmp`;
|
|
89
|
+
await fs.writeFile(tmp, state, 'utf8');
|
|
90
|
+
await fs.rename(tmp, sessionPath);
|
|
91
|
+
}
|
|
92
|
+
async function readDaemonSessionState(sessionId) {
|
|
93
|
+
const sessionPath = path.join(exports.DAEMON_SESSIONS_DIR, `${sessionId}.json`);
|
|
94
|
+
try {
|
|
95
|
+
const raw = await fs.readFile(sessionPath, 'utf8');
|
|
96
|
+
const parsed = JSON.parse(raw);
|
|
97
|
+
return { continueToken: parsed.continueToken, checkpointToken: parsed.checkpointToken };
|
|
98
|
+
}
|
|
99
|
+
catch {
|
|
100
|
+
return null;
|
|
101
|
+
}
|
|
102
|
+
}
|
|
103
|
+
async function readAllDaemonSessions(sessionsDir = exports.DAEMON_SESSIONS_DIR) {
|
|
104
|
+
let entries;
|
|
105
|
+
try {
|
|
106
|
+
entries = await fs.readdir(sessionsDir);
|
|
107
|
+
}
|
|
108
|
+
catch (err) {
|
|
109
|
+
const isEnoent = err instanceof Error && 'code' in err && err.code === 'ENOENT';
|
|
110
|
+
if (!isEnoent) {
|
|
111
|
+
console.warn(`[WorkflowRunner] Could not read sessions directory ${sessionsDir}: ${err instanceof Error ? err.message : String(err)}`);
|
|
112
|
+
}
|
|
113
|
+
return [];
|
|
114
|
+
}
|
|
115
|
+
const sessions = [];
|
|
116
|
+
for (const entry of entries) {
|
|
117
|
+
if (!entry.endsWith('.json'))
|
|
118
|
+
continue;
|
|
119
|
+
const sessionId = entry.slice(0, -5);
|
|
120
|
+
const filePath = path.join(sessionsDir, entry);
|
|
121
|
+
try {
|
|
122
|
+
const raw = await fs.readFile(filePath, 'utf8');
|
|
123
|
+
const parsed = JSON.parse(raw);
|
|
124
|
+
if (typeof parsed.continueToken !== 'string' || typeof parsed.ts !== 'number') {
|
|
125
|
+
console.warn(`[WorkflowRunner] Skipping malformed session file: ${filePath}`);
|
|
126
|
+
continue;
|
|
127
|
+
}
|
|
128
|
+
sessions.push({
|
|
129
|
+
sessionId,
|
|
130
|
+
continueToken: parsed.continueToken,
|
|
131
|
+
checkpointToken: typeof parsed.checkpointToken === 'string' ? parsed.checkpointToken : null,
|
|
132
|
+
ts: parsed.ts,
|
|
133
|
+
});
|
|
134
|
+
}
|
|
135
|
+
catch (err) {
|
|
136
|
+
console.warn(`[WorkflowRunner] Skipping unreadable session file ${filePath}: ${err instanceof Error ? err.message : String(err)}`);
|
|
137
|
+
}
|
|
138
|
+
}
|
|
139
|
+
return sessions;
|
|
140
|
+
}
|
|
141
|
+
async function runStartupRecovery(sessionsDir = exports.DAEMON_SESSIONS_DIR) {
|
|
142
|
+
const sessions = await readAllDaemonSessions(sessionsDir);
|
|
143
|
+
if (sessions.length === 0) {
|
|
144
|
+
await clearStrayTmpFiles(sessionsDir);
|
|
145
|
+
return;
|
|
146
|
+
}
|
|
147
|
+
console.log(`[WorkflowRunner] Startup recovery: found ${sessions.length} orphaned session(s).`);
|
|
148
|
+
const now = Date.now();
|
|
149
|
+
let cleared = 0;
|
|
150
|
+
for (const session of sessions) {
|
|
151
|
+
const ageMs = now - session.ts;
|
|
152
|
+
const isStale = ageMs > MAX_ORPHAN_AGE_MS;
|
|
153
|
+
const ageSec = Math.round(ageMs / 1000);
|
|
154
|
+
const label = isStale ? 'stale orphaned session' : 'orphaned session';
|
|
155
|
+
console.log(`[WorkflowRunner] Clearing ${label}: sessionId=${session.sessionId} age=${ageSec}s`);
|
|
156
|
+
try {
|
|
157
|
+
await fs.unlink(path.join(sessionsDir, `${session.sessionId}.json`));
|
|
158
|
+
cleared++;
|
|
159
|
+
}
|
|
160
|
+
catch (err) {
|
|
161
|
+
const isEnoent = err instanceof Error && 'code' in err && err.code === 'ENOENT';
|
|
162
|
+
if (!isEnoent) {
|
|
163
|
+
console.warn(`[WorkflowRunner] Could not clear session file ${session.sessionId}: ${err instanceof Error ? err.message : String(err)}`);
|
|
164
|
+
}
|
|
165
|
+
}
|
|
166
|
+
}
|
|
167
|
+
await clearStrayTmpFiles(sessionsDir);
|
|
168
|
+
console.log(`[WorkflowRunner] Startup recovery complete: cleared ${cleared}/${sessions.length} orphaned session(s).`);
|
|
169
|
+
}
|
|
170
|
+
async function clearStrayTmpFiles(sessionsDir) {
|
|
171
|
+
let entries;
|
|
172
|
+
try {
|
|
173
|
+
entries = await fs.readdir(sessionsDir);
|
|
174
|
+
}
|
|
175
|
+
catch {
|
|
176
|
+
return;
|
|
177
|
+
}
|
|
178
|
+
for (const entry of entries) {
|
|
179
|
+
if (!entry.endsWith('.tmp'))
|
|
180
|
+
continue;
|
|
181
|
+
try {
|
|
182
|
+
await fs.unlink(path.join(sessionsDir, entry));
|
|
183
|
+
console.log(`[WorkflowRunner] Cleared stray temp file: ${entry}`);
|
|
184
|
+
}
|
|
185
|
+
catch {
|
|
186
|
+
}
|
|
187
|
+
}
|
|
188
|
+
}
|
|
189
|
+
async function loadDaemonSoul() {
|
|
190
|
+
const soulPath = path.join(WORKRAIL_DIR, 'daemon-soul.md');
|
|
191
|
+
try {
|
|
192
|
+
return await fs.readFile(soulPath, 'utf8');
|
|
193
|
+
}
|
|
194
|
+
catch (err) {
|
|
195
|
+
const isEnoent = err instanceof Error && 'code' in err && err.code === 'ENOENT';
|
|
196
|
+
if (isEnoent) {
|
|
197
|
+
try {
|
|
198
|
+
await fs.mkdir(WORKRAIL_DIR, { recursive: true });
|
|
199
|
+
await fs.writeFile(soulPath, DAEMON_SOUL_TEMPLATE, 'utf8');
|
|
200
|
+
console.log(`[WorkflowRunner] Created daemon-soul.md template at ${soulPath}`);
|
|
201
|
+
}
|
|
202
|
+
catch (writeErr) {
|
|
203
|
+
console.warn(`[WorkflowRunner] Warning: could not write daemon-soul.md template: ${writeErr instanceof Error ? writeErr.message : String(writeErr)}`);
|
|
204
|
+
}
|
|
205
|
+
}
|
|
206
|
+
else {
|
|
207
|
+
console.warn(`[WorkflowRunner] Warning: could not read daemon-soul.md: ${err instanceof Error ? err.message : String(err)}`);
|
|
208
|
+
}
|
|
209
|
+
return exports.DAEMON_SOUL_DEFAULT;
|
|
210
|
+
}
|
|
211
|
+
}
|
|
212
|
+
async function loadWorkspaceContext(workspacePath) {
|
|
213
|
+
const parts = [];
|
|
214
|
+
let combinedBytes = 0;
|
|
215
|
+
let truncated = false;
|
|
216
|
+
for (const relativePath of WORKSPACE_CONTEXT_CANDIDATE_PATHS) {
|
|
217
|
+
if (truncated)
|
|
218
|
+
break;
|
|
219
|
+
const fullPath = path.join(workspacePath, relativePath);
|
|
220
|
+
let content;
|
|
221
|
+
try {
|
|
222
|
+
content = await fs.readFile(fullPath, 'utf8');
|
|
223
|
+
}
|
|
224
|
+
catch (err) {
|
|
225
|
+
const isEnoent = err instanceof Error && 'code' in err && err.code === 'ENOENT';
|
|
226
|
+
if (!isEnoent) {
|
|
227
|
+
console.warn(`[WorkflowRunner] Skipping ${fullPath}: ${err instanceof Error ? err.message : String(err)}`);
|
|
228
|
+
}
|
|
229
|
+
continue;
|
|
230
|
+
}
|
|
231
|
+
const contentBytes = Buffer.byteLength(content, 'utf8');
|
|
232
|
+
if (combinedBytes + contentBytes > WORKSPACE_CONTEXT_MAX_BYTES) {
|
|
233
|
+
const remaining = WORKSPACE_CONTEXT_MAX_BYTES - combinedBytes;
|
|
234
|
+
const truncatedContent = content.slice(0, remaining);
|
|
235
|
+
parts.push(`### ${relativePath}\n${truncatedContent}`);
|
|
236
|
+
truncated = true;
|
|
237
|
+
}
|
|
238
|
+
else {
|
|
239
|
+
parts.push(`### ${relativePath}\n${content}`);
|
|
240
|
+
combinedBytes += contentBytes;
|
|
241
|
+
}
|
|
242
|
+
}
|
|
243
|
+
if (parts.length === 0)
|
|
244
|
+
return null;
|
|
245
|
+
let combined = parts.join('\n\n');
|
|
246
|
+
if (truncated) {
|
|
247
|
+
combined += '\n\n[Workspace context truncated: combined size exceeded 32 KB limit. Some files may be missing.]';
|
|
248
|
+
}
|
|
249
|
+
console.log(`[WorkflowRunner] Injecting workspace context from: ${WORKSPACE_CONTEXT_CANDIDATE_PATHS.filter((p) => parts.some((part) => part.startsWith(`### ${p}`))).join(', ')}`);
|
|
250
|
+
return combined;
|
|
251
|
+
}
|
|
252
|
+
let _schemas = null;
|
|
253
|
+
async function getSchemas() {
|
|
254
|
+
if (_schemas)
|
|
255
|
+
return _schemas;
|
|
256
|
+
const { Type } = await (0, pi_mono_loader_js_1.loadPiAi)();
|
|
257
|
+
_schemas = {
|
|
258
|
+
ContinueWorkflowParams: Type.Object({
|
|
259
|
+
continueToken: Type.String({
|
|
260
|
+
description: 'The continueToken from the previous start_workflow or continue_workflow call. Round-trip exactly as received.',
|
|
261
|
+
}),
|
|
262
|
+
intent: Type.Optional(Type.Union([Type.Literal('advance'), Type.Literal('rehydrate')], {
|
|
263
|
+
description: 'advance: I completed this step. rehydrate: remind me what the current step is.',
|
|
264
|
+
})),
|
|
265
|
+
notesMarkdown: Type.Optional(Type.String({
|
|
266
|
+
description: 'Notes on what you did in this step (10-30 lines, markdown).',
|
|
267
|
+
})),
|
|
268
|
+
context: Type.Optional(Type.Record(Type.String(), Type.Unknown(), {
|
|
269
|
+
description: 'Updated context variables (only changed values).',
|
|
270
|
+
})),
|
|
271
|
+
}),
|
|
272
|
+
BashParams: Type.Object({
|
|
273
|
+
command: Type.String({ description: 'Shell command to execute' }),
|
|
274
|
+
cwd: Type.Optional(Type.String({ description: 'Working directory for the command' })),
|
|
275
|
+
}),
|
|
276
|
+
ReadParams: Type.Object({
|
|
277
|
+
filePath: Type.String({ description: 'Absolute path to the file to read' }),
|
|
278
|
+
}),
|
|
279
|
+
WriteParams: Type.Object({
|
|
280
|
+
filePath: Type.String({ description: 'Absolute path to the file to write' }),
|
|
281
|
+
content: Type.String({ description: 'Content to write to the file' }),
|
|
282
|
+
}),
|
|
283
|
+
};
|
|
284
|
+
return _schemas;
|
|
285
|
+
}
|
|
286
|
+
function makeContinueWorkflowTool(sessionId, ctx, onAdvance, onComplete, schemas) {
|
|
287
|
+
return {
|
|
288
|
+
name: 'continue_workflow',
|
|
289
|
+
description: 'Advance the WorkRail workflow to the next step. Call this after completing all work ' +
|
|
290
|
+
'required by the current step. Include your notes in notesMarkdown.',
|
|
291
|
+
parameters: schemas['ContinueWorkflowParams'],
|
|
292
|
+
label: 'Continue Workflow',
|
|
293
|
+
execute: async (_toolCallId, params) => {
|
|
294
|
+
const result = await (0, index_js_1.executeContinueWorkflow)({
|
|
295
|
+
continueToken: params.continueToken,
|
|
296
|
+
intent: (params.intent ?? 'advance'),
|
|
297
|
+
output: params.notesMarkdown
|
|
298
|
+
? { notesMarkdown: params.notesMarkdown }
|
|
299
|
+
: undefined,
|
|
300
|
+
context: params.context,
|
|
301
|
+
}, ctx);
|
|
302
|
+
if (result.isErr()) {
|
|
303
|
+
throw new Error(`continue_workflow failed: ${result.error.kind} -- ${JSON.stringify(result.error)}`);
|
|
304
|
+
}
|
|
305
|
+
const out = result.value.response;
|
|
306
|
+
const continueToken = out.continueToken ?? '';
|
|
307
|
+
const checkpointToken = out.checkpointToken ?? null;
|
|
308
|
+
if (continueToken) {
|
|
309
|
+
await persistTokens(sessionId, continueToken, checkpointToken);
|
|
310
|
+
}
|
|
311
|
+
if (out.isComplete) {
|
|
312
|
+
onComplete('Workflow session complete.');
|
|
313
|
+
return {
|
|
314
|
+
content: [{ type: 'text', text: 'Workflow complete. All steps have been executed.' }],
|
|
315
|
+
details: out,
|
|
316
|
+
};
|
|
317
|
+
}
|
|
318
|
+
const pending = out.pending;
|
|
319
|
+
const stepText = pending
|
|
320
|
+
? `## Next step: ${pending.title}\n\n${pending.prompt}\n\ncontinueToken: ${continueToken}`
|
|
321
|
+
: `Step advanced. continueToken: ${continueToken}`;
|
|
322
|
+
onAdvance(stepText, continueToken);
|
|
323
|
+
return {
|
|
324
|
+
content: [{ type: 'text', text: stepText }],
|
|
325
|
+
details: out,
|
|
326
|
+
};
|
|
327
|
+
},
|
|
328
|
+
};
|
|
329
|
+
}
|
|
330
|
+
function makeBashTool(workspacePath, schemas) {
|
|
331
|
+
return {
|
|
332
|
+
name: 'Bash',
|
|
333
|
+
description: 'Execute a shell command. Throws on non-zero exit code. ' +
|
|
334
|
+
`Maximum execution time: ${BASH_TIMEOUT_MS / 1000}s.`,
|
|
335
|
+
parameters: schemas['BashParams'],
|
|
336
|
+
label: 'Bash',
|
|
337
|
+
execute: async (_toolCallId, params) => {
|
|
338
|
+
const cwd = params.cwd ?? workspacePath;
|
|
339
|
+
const { stdout, stderr } = await execAsync(params.command, {
|
|
340
|
+
cwd,
|
|
341
|
+
timeout: BASH_TIMEOUT_MS,
|
|
342
|
+
});
|
|
343
|
+
const output = [stdout, stderr].filter(Boolean).join('\n');
|
|
344
|
+
return {
|
|
345
|
+
content: [{ type: 'text', text: output || '(no output)' }],
|
|
346
|
+
details: { stdout, stderr },
|
|
347
|
+
};
|
|
348
|
+
},
|
|
349
|
+
};
|
|
350
|
+
}
|
|
351
|
+
function makeReadTool(schemas) {
|
|
352
|
+
return {
|
|
353
|
+
name: 'Read',
|
|
354
|
+
description: 'Read the contents of a file at the given absolute path.',
|
|
355
|
+
parameters: schemas['ReadParams'],
|
|
356
|
+
label: 'Read',
|
|
357
|
+
execute: async (_toolCallId, params) => {
|
|
358
|
+
const content = await fs.readFile(params.filePath, 'utf8');
|
|
359
|
+
return {
|
|
360
|
+
content: [{ type: 'text', text: content }],
|
|
361
|
+
details: { filePath: params.filePath, length: content.length },
|
|
362
|
+
};
|
|
363
|
+
},
|
|
364
|
+
};
|
|
365
|
+
}
|
|
366
|
+
function makeWriteTool(schemas) {
|
|
367
|
+
return {
|
|
368
|
+
name: 'Write',
|
|
369
|
+
description: 'Write content to a file at the given absolute path. Creates parent directories if needed.',
|
|
370
|
+
parameters: schemas['WriteParams'],
|
|
371
|
+
label: 'Write',
|
|
372
|
+
execute: async (_toolCallId, params) => {
|
|
373
|
+
await fs.mkdir(path.dirname(params.filePath), { recursive: true });
|
|
374
|
+
await fs.writeFile(params.filePath, params.content, 'utf8');
|
|
375
|
+
return {
|
|
376
|
+
content: [{ type: 'text', text: `Written ${params.content.length} bytes to ${params.filePath}` }],
|
|
377
|
+
details: { filePath: params.filePath, length: params.content.length },
|
|
378
|
+
};
|
|
379
|
+
},
|
|
380
|
+
};
|
|
381
|
+
}
|
|
382
|
+
function buildSystemPrompt(trigger, sessionState, soulContent, workspaceContext) {
|
|
383
|
+
const lines = [
|
|
384
|
+
'You are WorkRail Auto, an autonomous agent that executes workflows step by step.',
|
|
385
|
+
'',
|
|
386
|
+
'## Your tools',
|
|
387
|
+
'- `continue_workflow`: Advance to the next step. Call this after completing each step\'s work.',
|
|
388
|
+
' Always include your notes in notesMarkdown and round-trip the continueToken exactly.',
|
|
389
|
+
'- `Bash`: Run shell commands. Use for building, testing, running scripts.',
|
|
390
|
+
'- `Read`: Read files.',
|
|
391
|
+
'- `Write`: Write files.',
|
|
392
|
+
'',
|
|
393
|
+
'## Execution contract',
|
|
394
|
+
'1. Read the step carefully. Do ALL the work the step asks for.',
|
|
395
|
+
'2. Call `continue_workflow` with your notes. Include the continueToken exactly.',
|
|
396
|
+
'3. Repeat until the workflow reports it is complete.',
|
|
397
|
+
'4. Do NOT skip steps. Do NOT call `continue_workflow` without completing the step\'s work.',
|
|
398
|
+
'',
|
|
399
|
+
`<workrail_session_state>${sessionState}</workrail_session_state>`,
|
|
400
|
+
'',
|
|
401
|
+
'## Agent Rules and Philosophy',
|
|
402
|
+
soulContent,
|
|
403
|
+
'',
|
|
404
|
+
`## Workspace: ${trigger.workspacePath}`,
|
|
405
|
+
];
|
|
406
|
+
if (workspaceContext !== null) {
|
|
407
|
+
lines.push('');
|
|
408
|
+
lines.push('## Workspace Context (from AGENTS.md / CLAUDE.md)');
|
|
409
|
+
lines.push(workspaceContext);
|
|
410
|
+
}
|
|
411
|
+
if (trigger.referenceUrls && trigger.referenceUrls.length > 0) {
|
|
412
|
+
lines.push('');
|
|
413
|
+
lines.push('## Reference documents');
|
|
414
|
+
lines.push('Before starting, fetch and read these reference documents: ' +
|
|
415
|
+
trigger.referenceUrls.join(' '));
|
|
416
|
+
lines.push('If you cannot fetch any of these documents, note their unavailability and proceed.');
|
|
417
|
+
}
|
|
418
|
+
return lines.join('\n');
|
|
419
|
+
}
|
|
420
|
+
function buildUserMessage(text) {
|
|
421
|
+
return {
|
|
422
|
+
role: 'user',
|
|
423
|
+
content: text,
|
|
424
|
+
timestamp: Date.now(),
|
|
425
|
+
};
|
|
426
|
+
}
|
|
427
|
+
async function runWorkflow(trigger, ctx, apiKey, daemonRegistry) {
|
|
428
|
+
const sessionId = (0, node_crypto_1.randomUUID)();
|
|
429
|
+
console.log(`[WorkflowRunner] Session started: sessionId=${sessionId} workflowId=${trigger.workflowId}`);
|
|
430
|
+
daemonRegistry?.register(sessionId, trigger.workflowId);
|
|
431
|
+
let model;
|
|
432
|
+
try {
|
|
433
|
+
const { getModel } = await (0, pi_mono_loader_js_1.loadPiAi)();
|
|
434
|
+
if (trigger.agentConfig?.model) {
|
|
435
|
+
const slashIdx = trigger.agentConfig.model.indexOf('/');
|
|
436
|
+
if (slashIdx === -1) {
|
|
437
|
+
throw new Error(`agentConfig.model must be in "provider/model-id" format, got: "${trigger.agentConfig.model}"`);
|
|
438
|
+
}
|
|
439
|
+
const provider = trigger.agentConfig.model.slice(0, slashIdx);
|
|
440
|
+
const modelId = trigger.agentConfig.model.slice(slashIdx + 1);
|
|
441
|
+
model = getModel(provider, modelId);
|
|
442
|
+
}
|
|
443
|
+
else {
|
|
444
|
+
const usesBedrock = !!process.env['AWS_PROFILE'] || !!process.env['AWS_ACCESS_KEY_ID'];
|
|
445
|
+
if (usesBedrock) {
|
|
446
|
+
model = getModel('amazon-bedrock', 'us.anthropic.claude-sonnet-4-6');
|
|
447
|
+
}
|
|
448
|
+
else {
|
|
449
|
+
model = getModel('anthropic', 'claude-sonnet-4-5');
|
|
450
|
+
}
|
|
451
|
+
}
|
|
452
|
+
}
|
|
453
|
+
catch (err) {
|
|
454
|
+
daemonRegistry?.unregister(sessionId, 'failed');
|
|
455
|
+
return {
|
|
456
|
+
_tag: 'error',
|
|
457
|
+
workflowId: trigger.workflowId,
|
|
458
|
+
message: `Model not found: ${err instanceof Error ? err.message : String(err)}`,
|
|
459
|
+
stopReason: 'error',
|
|
460
|
+
};
|
|
461
|
+
}
|
|
462
|
+
let isComplete = false;
|
|
463
|
+
let pendingSteerText = null;
|
|
464
|
+
const onAdvance = (stepText, _continueToken) => {
|
|
465
|
+
pendingSteerText = stepText;
|
|
466
|
+
daemonRegistry?.heartbeat(sessionId);
|
|
467
|
+
};
|
|
468
|
+
const onComplete = (_notes) => {
|
|
469
|
+
isComplete = true;
|
|
470
|
+
};
|
|
471
|
+
const startResult = await (0, start_js_1.executeStartWorkflow)({ workflowId: trigger.workflowId, workspacePath: trigger.workspacePath, goal: trigger.goal }, ctx, { is_autonomous: 'true' });
|
|
472
|
+
if (startResult.isErr()) {
|
|
473
|
+
daemonRegistry?.unregister(sessionId, 'failed');
|
|
474
|
+
return {
|
|
475
|
+
_tag: 'error',
|
|
476
|
+
workflowId: trigger.workflowId,
|
|
477
|
+
message: `start_workflow failed: ${startResult.error.kind} -- ${JSON.stringify(startResult.error)}`,
|
|
478
|
+
stopReason: 'error',
|
|
479
|
+
};
|
|
480
|
+
}
|
|
481
|
+
const firstStep = startResult.value.response;
|
|
482
|
+
const startContinueToken = firstStep.continueToken ?? '';
|
|
483
|
+
const startCheckpointToken = firstStep.checkpointToken ?? null;
|
|
484
|
+
if (startContinueToken) {
|
|
485
|
+
await persistTokens(sessionId, startContinueToken, startCheckpointToken);
|
|
486
|
+
}
|
|
487
|
+
if (firstStep.isComplete) {
|
|
488
|
+
await fs.unlink(path.join(exports.DAEMON_SESSIONS_DIR, `${sessionId}.json`)).catch(() => { });
|
|
489
|
+
daemonRegistry?.unregister(sessionId, 'completed');
|
|
490
|
+
return { _tag: 'success', workflowId: trigger.workflowId, stopReason: 'stop' };
|
|
491
|
+
}
|
|
492
|
+
const schemas = await getSchemas();
|
|
493
|
+
const tools = [
|
|
494
|
+
makeContinueWorkflowTool(sessionId, ctx, onAdvance, onComplete, schemas),
|
|
495
|
+
makeBashTool(trigger.workspacePath, schemas),
|
|
496
|
+
makeReadTool(schemas),
|
|
497
|
+
makeWriteTool(schemas),
|
|
498
|
+
];
|
|
499
|
+
const [soulContent, workspaceContext] = await Promise.all([
|
|
500
|
+
loadDaemonSoul(),
|
|
501
|
+
loadWorkspaceContext(trigger.workspacePath),
|
|
502
|
+
]);
|
|
503
|
+
const contextJson = trigger.context
|
|
504
|
+
? `\n\nTrigger context:\n\`\`\`json\n${JSON.stringify(trigger.context, null, 2)}\n\`\`\``
|
|
505
|
+
: '';
|
|
506
|
+
const initialPrompt = (firstStep.pending?.prompt ?? 'No step content available') +
|
|
507
|
+
`\n\ncontinueToken: ${startContinueToken}` +
|
|
508
|
+
contextJson;
|
|
509
|
+
const { Agent } = await (0, pi_mono_loader_js_1.loadPiAgentCore)();
|
|
510
|
+
const agent = new Agent({
|
|
511
|
+
initialState: {
|
|
512
|
+
systemPrompt: buildSystemPrompt(trigger, '', soulContent, workspaceContext),
|
|
513
|
+
model,
|
|
514
|
+
tools,
|
|
515
|
+
},
|
|
516
|
+
getApiKey: async (_provider) => apiKey ?? '',
|
|
517
|
+
toolExecution: 'sequential',
|
|
518
|
+
});
|
|
519
|
+
const unsubscribe = agent.subscribe(async (event) => {
|
|
520
|
+
if (event.type !== 'turn_end')
|
|
521
|
+
return;
|
|
522
|
+
if (pendingSteerText !== null && !isComplete) {
|
|
523
|
+
const text = pendingSteerText;
|
|
524
|
+
pendingSteerText = null;
|
|
525
|
+
agent.steer(buildUserMessage(text));
|
|
526
|
+
}
|
|
527
|
+
});
|
|
528
|
+
let stopReason = 'stop';
|
|
529
|
+
let errorMessage;
|
|
530
|
+
try {
|
|
531
|
+
const timeoutPromise = new Promise((_, reject) => setTimeout(() => reject(new Error('Workflow timed out')), WORKFLOW_TIMEOUT_MS));
|
|
532
|
+
await Promise.race([agent.prompt(buildUserMessage(initialPrompt)), timeoutPromise])
|
|
533
|
+
.catch((err) => {
|
|
534
|
+
agent.abort();
|
|
535
|
+
throw err;
|
|
536
|
+
});
|
|
537
|
+
const messages = agent.state.messages;
|
|
538
|
+
let lastAssistant;
|
|
539
|
+
for (let i = messages.length - 1; i >= 0; i--) {
|
|
540
|
+
const m = messages[i];
|
|
541
|
+
if ('role' in m && m.role === 'assistant') {
|
|
542
|
+
lastAssistant = m;
|
|
543
|
+
break;
|
|
544
|
+
}
|
|
545
|
+
}
|
|
546
|
+
stopReason = lastAssistant?.stopReason ?? 'stop';
|
|
547
|
+
errorMessage = lastAssistant?.errorMessage;
|
|
548
|
+
}
|
|
549
|
+
catch (err) {
|
|
550
|
+
errorMessage = err instanceof Error ? err.message : String(err);
|
|
551
|
+
stopReason = 'error';
|
|
552
|
+
}
|
|
553
|
+
finally {
|
|
554
|
+
unsubscribe();
|
|
555
|
+
}
|
|
556
|
+
if (stopReason === 'error' || errorMessage) {
|
|
557
|
+
daemonRegistry?.unregister(sessionId, 'failed');
|
|
558
|
+
return {
|
|
559
|
+
_tag: 'error',
|
|
560
|
+
workflowId: trigger.workflowId,
|
|
561
|
+
message: errorMessage ?? 'Agent stopped with error reason',
|
|
562
|
+
stopReason,
|
|
563
|
+
};
|
|
564
|
+
}
|
|
565
|
+
await fs.unlink(path.join(exports.DAEMON_SESSIONS_DIR, `${sessionId}.json`)).catch(() => {
|
|
566
|
+
});
|
|
567
|
+
daemonRegistry?.unregister(sessionId, 'completed');
|
|
568
|
+
return {
|
|
569
|
+
_tag: 'success',
|
|
570
|
+
workflowId: trigger.workflowId,
|
|
571
|
+
stopReason,
|
|
572
|
+
};
|
|
573
|
+
}
|
|
@@ -531,7 +531,7 @@ let HttpServer = class HttpServer {
|
|
|
531
531
|
reject(error);
|
|
532
532
|
});
|
|
533
533
|
const listenPort = this.port;
|
|
534
|
-
this.server.listen(listenPort, () => {
|
|
534
|
+
this.server.listen(listenPort, '127.0.0.1', () => {
|
|
535
535
|
this.baseUrl = `http://localhost:${listenPort}`;
|
|
536
536
|
this.printBanner();
|
|
537
537
|
resolve();
|
|
@@ -552,7 +552,7 @@ let HttpServer = class HttpServer {
|
|
|
552
552
|
reject(error);
|
|
553
553
|
}
|
|
554
554
|
});
|
|
555
|
-
this.server.listen(this.port, () => {
|
|
555
|
+
this.server.listen(this.port, '127.0.0.1', () => {
|
|
556
556
|
resolve();
|
|
557
557
|
});
|
|
558
558
|
});
|