chainlesschain 0.45.81 → 0.47.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +10 -0
- package/bin/chainlesschain.js +0 -0
- package/package.json +1 -1
- package/src/assets/web-panel/.build-hash +1 -1
- package/src/assets/web-panel/assets/{Analytics-C1AnPdMx.js → Analytics-DgypYeUB.js} +2 -2
- package/src/assets/web-panel/assets/AppLayout-Bzf3mSZI.js +1 -0
- package/src/assets/web-panel/assets/AppLayout-DQyDwGut.css +1 -0
- package/src/assets/web-panel/assets/{Backup-D31iZX3l.js → Backup-Ba9UybpT.js} +1 -1
- package/src/assets/web-panel/assets/{Chat-DiXJ3TuK.js → Chat-BwXskT21.js} +1 -1
- package/src/assets/web-panel/assets/Cowork-CXuhlHew.css +1 -0
- package/src/assets/web-panel/assets/Cowork-UmOe7qvE.js +7 -0
- package/src/assets/web-panel/assets/{Cron-DBt1ueXh.js → Cron-JHS-rc-4.js} +2 -2
- package/src/assets/web-panel/assets/{Dashboard-HPh9FcPt.js → Dashboard-B95cMCO7.js} +2 -2
- package/src/assets/web-panel/assets/Dashboard-CKeMmCoT.css +1 -0
- package/src/assets/web-panel/assets/{Git-hwQ1oZHj.js → Git-CSYO0_zk.js} +2 -2
- package/src/assets/web-panel/assets/{Logs-4D9p6PRM.js → Logs-Hxw_K0km.js} +2 -2
- package/src/assets/web-panel/assets/{McpTools-CyAUjbbs.js → McpTools-DIE75TrB.js} +2 -2
- package/src/assets/web-panel/assets/{Memory-BMqOR7S-.js → Memory-C4KVnLlp.js} +2 -2
- package/src/assets/web-panel/assets/{Notes-Cmas8i4E.js → Notes-DuzrHMAk.js} +2 -2
- package/src/assets/web-panel/assets/{Organization-DnSa58Tl.js → Organization-DTq6uF82.js} +4 -4
- package/src/assets/web-panel/assets/{P2P-BxksIBWs.js → P2P-C0hjlhsR.js} +2 -2
- package/src/assets/web-panel/assets/{Permissions-Bq5Qn2s3.js → Permissions-Ec0NH-xC.js} +4 -4
- package/src/assets/web-panel/assets/{Projects-B7EM0uPg.js → Projects-U8D0asCS.js} +2 -2
- package/src/assets/web-panel/assets/{Providers-DAwgG5KV.js → Providers-BngtTLvJ.js} +2 -2
- package/src/assets/web-panel/assets/{RssFeed-HSZoRXvS.js → RssFeed-B9NbwCKM.js} +3 -3
- package/src/assets/web-panel/assets/{Security-Cz17qBny.js → Security-BL5Rkr1T.js} +3 -3
- package/src/assets/web-panel/assets/{Services-D2EsLq-v.js → Services-D4MJzLld.js} +2 -2
- package/src/assets/web-panel/assets/{Skills-C9v-f3vZ.js → Skills-CQTOMDwF.js} +1 -1
- package/src/assets/web-panel/assets/{Tasks-yMEcU0n7.js → Tasks-DepbJMnL.js} +1 -1
- package/src/assets/web-panel/assets/{Templates-l7SvlKuB.js → Templates-C24PVZPu.js} +1 -1
- package/src/assets/web-panel/assets/{Wallet-BHWhLWn9.js → Wallet-PQoSpN_P.js} +3 -3
- package/src/assets/web-panel/assets/{WebAuthn-kWhFYaUK.js → WebAuthn-BcuyQ4Lr.js} +4 -4
- package/src/assets/web-panel/assets/WorkflowEditor-C-SvXbHW.js +1 -0
- package/src/assets/web-panel/assets/WorkflowEditor-D5bX6woe.css +1 -0
- package/src/assets/web-panel/assets/{antd-D6h4fDFf.js → antd-DEjZPGMj.js} +82 -82
- package/src/assets/web-panel/assets/index-CwvzTTw_.js +2 -0
- package/src/assets/web-panel/assets/{markdown-BZsB-Dsv.js → markdown-CusdXFxb.js} +1 -1
- package/src/assets/web-panel/index.html +2 -2
- package/src/commands/cowork.js +867 -0
- package/src/gateways/ws/action-protocol.js +182 -2
- package/src/gateways/ws/message-dispatcher.js +5 -0
- package/src/gateways/ws/ws-server.js +21 -0
- package/src/lib/cowork-cron.js +474 -0
- package/src/lib/cowork-evomap-adapter.js +121 -0
- package/src/lib/cowork-learning.js +438 -0
- package/src/lib/cowork-mcp-tools.js +182 -0
- package/src/lib/cowork-observe-html.js +108 -0
- package/src/lib/cowork-observe.js +160 -0
- package/src/lib/cowork-share.js +322 -0
- package/src/lib/cowork-task-runner.js +317 -3
- package/src/lib/cowork-task-templates.js +101 -13
- package/src/lib/cowork-template-marketplace.js +205 -0
- package/src/lib/cowork-workflow.js +571 -0
- package/src/lib/provider-options.js +133 -0
- package/src/lib/skill-loader.js +65 -0
- package/src/lib/sub-agent-context.js +54 -2
- package/src/lib/sub-agent-profiles.js +164 -0
- package/src/lib/todo-manager.js +108 -0
- package/src/lib/turn-context.js +95 -0
- package/src/lib/web-fetch.js +224 -0
- package/src/lib/workflow-expr.js +318 -0
- package/src/repl/agent-repl.js +4 -0
- package/src/runtime/agent-core.js +135 -3
- package/src/runtime/coding-agent-contract-shared.cjs +131 -0
- package/src/runtime/coding-agent-policy.cjs +30 -0
- package/src/assets/web-panel/assets/AppLayout-YdvJBMHH.js +0 -1
- package/src/assets/web-panel/assets/AppLayout-cxfKLu-m.css +0 -1
- package/src/assets/web-panel/assets/Cowork-BnrHWwZw.js +0 -7
- package/src/assets/web-panel/assets/Cowork-CcSoS3eX.css +0 -1
- package/src/assets/web-panel/assets/Dashboard-BS-tzGNj.css +0 -1
- package/src/assets/web-panel/assets/index-ByUk2Wmr.js +0 -2
|
@@ -0,0 +1,571 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Cowork Workflow — chain multiple Cowork tasks into a DAG.
|
|
3
|
+
*
|
|
4
|
+
* A workflow is a declarative set of steps with optional dependencies. Each
|
|
5
|
+
* step invokes a Cowork template (or free mode) with a user message that can
|
|
6
|
+
* reference earlier steps' results via `${step.<id>.summary}` placeholders.
|
|
7
|
+
*
|
|
8
|
+
* The executor:
|
|
9
|
+
* 1. topologically sorts steps by `dependsOn`
|
|
10
|
+
* 2. runs independent steps in parallel (bounded by `maxParallel`)
|
|
11
|
+
* 3. substitutes placeholders in `message` from completed step outputs
|
|
12
|
+
* 4. halts on first failure unless `continueOnError` is set
|
|
13
|
+
*
|
|
14
|
+
* Persistence mirrors the cron scheduler: one JSON file per workflow under
|
|
15
|
+
* `.chainlesschain/cowork/workflows/<id>.json`, plus a `run-history.jsonl`
|
|
16
|
+
* capturing each execution.
|
|
17
|
+
*
|
|
18
|
+
* The runner itself is injected via `_deps.runTask` to avoid a circular import
|
|
19
|
+
* with `cowork-task-runner.js`.
|
|
20
|
+
*
|
|
21
|
+
* @module cowork-workflow
|
|
22
|
+
*/
|
|
23
|
+
|
|
24
|
+
import {
|
|
25
|
+
existsSync,
|
|
26
|
+
mkdirSync,
|
|
27
|
+
readFileSync,
|
|
28
|
+
writeFileSync,
|
|
29
|
+
readdirSync,
|
|
30
|
+
unlinkSync,
|
|
31
|
+
appendFileSync,
|
|
32
|
+
} from "node:fs";
|
|
33
|
+
import { join } from "node:path";
|
|
34
|
+
import { evaluate as evalExpr, resolveReference } from "./workflow-expr.js";
|
|
35
|
+
|
|
36
|
+
/** Maximum number of items a single forEach step can expand into. */
|
|
37
|
+
export const MAX_FAN_OUT = 500;
|
|
38
|
+
|
|
39
|
+
export const _deps = {
|
|
40
|
+
existsSync,
|
|
41
|
+
mkdirSync,
|
|
42
|
+
readFileSync,
|
|
43
|
+
writeFileSync,
|
|
44
|
+
readdirSync,
|
|
45
|
+
unlinkSync,
|
|
46
|
+
appendFileSync,
|
|
47
|
+
now: () => Date.now(),
|
|
48
|
+
runTask: null, // injected by CLI
|
|
49
|
+
};
|
|
50
|
+
|
|
51
|
+
// ─── Paths ───────────────────────────────────────────────────────────────────
|
|
52
|
+
|
|
53
|
+
function workflowsDir(cwd) {
|
|
54
|
+
return join(cwd, ".chainlesschain", "cowork", "workflows");
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
function workflowFile(cwd, id) {
|
|
58
|
+
return join(workflowsDir(cwd), `${id}.json`);
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
function historyFile(cwd) {
|
|
62
|
+
return join(cwd, ".chainlesschain", "cowork", "workflow-history.jsonl");
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
// ─── Validation ──────────────────────────────────────────────────────────────
|
|
66
|
+
|
|
67
|
+
/**
|
|
68
|
+
* Validate a workflow definition. Returns `{ valid, errors }`.
|
|
69
|
+
*/
|
|
70
|
+
export function validateWorkflow(wf) {
|
|
71
|
+
const errors = [];
|
|
72
|
+
if (!wf || typeof wf !== "object") {
|
|
73
|
+
return { valid: false, errors: ["workflow must be an object"] };
|
|
74
|
+
}
|
|
75
|
+
if (!wf.id || typeof wf.id !== "string") errors.push("id required");
|
|
76
|
+
if (!wf.name || typeof wf.name !== "string") errors.push("name required");
|
|
77
|
+
if (!Array.isArray(wf.steps) || wf.steps.length === 0) {
|
|
78
|
+
errors.push("steps must be a non-empty array");
|
|
79
|
+
} else {
|
|
80
|
+
const ids = new Set();
|
|
81
|
+
for (const [i, s] of wf.steps.entries()) {
|
|
82
|
+
if (!s.id || typeof s.id !== "string") {
|
|
83
|
+
errors.push(`steps[${i}].id required`);
|
|
84
|
+
continue;
|
|
85
|
+
}
|
|
86
|
+
if (ids.has(s.id)) errors.push(`duplicate step id '${s.id}'`);
|
|
87
|
+
ids.add(s.id);
|
|
88
|
+
if (!s.message || typeof s.message !== "string") {
|
|
89
|
+
errors.push(`steps[${i}].message required`);
|
|
90
|
+
}
|
|
91
|
+
if (s.dependsOn && !Array.isArray(s.dependsOn)) {
|
|
92
|
+
errors.push(`steps[${i}].dependsOn must be an array`);
|
|
93
|
+
}
|
|
94
|
+
if (s.when !== undefined && typeof s.when !== "string") {
|
|
95
|
+
errors.push(`steps[${i}].when must be a string expression`);
|
|
96
|
+
}
|
|
97
|
+
if (s.forEach !== undefined) {
|
|
98
|
+
const f = s.forEach;
|
|
99
|
+
const ok =
|
|
100
|
+
Array.isArray(f) || (typeof f === "string" && f.trim().length > 0);
|
|
101
|
+
if (!ok) {
|
|
102
|
+
errors.push(
|
|
103
|
+
`steps[${i}].forEach must be an array or reference string`,
|
|
104
|
+
);
|
|
105
|
+
}
|
|
106
|
+
}
|
|
107
|
+
}
|
|
108
|
+
// Check dependsOn references exist
|
|
109
|
+
for (const s of wf.steps) {
|
|
110
|
+
for (const dep of s.dependsOn || []) {
|
|
111
|
+
if (!ids.has(dep)) {
|
|
112
|
+
errors.push(`step '${s.id}' dependsOn unknown step '${dep}'`);
|
|
113
|
+
}
|
|
114
|
+
}
|
|
115
|
+
}
|
|
116
|
+
// Detect cycles via topo-sort
|
|
117
|
+
if (errors.length === 0) {
|
|
118
|
+
try {
|
|
119
|
+
topoSort(wf.steps);
|
|
120
|
+
} catch (e) {
|
|
121
|
+
errors.push(e.message);
|
|
122
|
+
}
|
|
123
|
+
}
|
|
124
|
+
}
|
|
125
|
+
return { valid: errors.length === 0, errors };
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
// ─── Topological sort ────────────────────────────────────────────────────────
|
|
129
|
+
|
|
130
|
+
/**
|
|
131
|
+
* Return steps in execution order (Kahn's algorithm). Throws on cycle.
|
|
132
|
+
* The result is a flat array; independent steps appear adjacently but the
|
|
133
|
+
* executor separately groups them into parallel batches.
|
|
134
|
+
*/
|
|
135
|
+
export function topoSort(steps) {
|
|
136
|
+
const incoming = new Map();
|
|
137
|
+
const outgoing = new Map();
|
|
138
|
+
for (const s of steps) {
|
|
139
|
+
incoming.set(s.id, new Set(s.dependsOn || []));
|
|
140
|
+
outgoing.set(s.id, []);
|
|
141
|
+
}
|
|
142
|
+
for (const s of steps) {
|
|
143
|
+
for (const dep of s.dependsOn || []) {
|
|
144
|
+
if (outgoing.has(dep)) outgoing.get(dep).push(s.id);
|
|
145
|
+
}
|
|
146
|
+
}
|
|
147
|
+
|
|
148
|
+
const ready = [];
|
|
149
|
+
for (const [id, incs] of incoming) {
|
|
150
|
+
if (incs.size === 0) ready.push(id);
|
|
151
|
+
}
|
|
152
|
+
|
|
153
|
+
const order = [];
|
|
154
|
+
const byId = new Map(steps.map((s) => [s.id, s]));
|
|
155
|
+
while (ready.length > 0) {
|
|
156
|
+
const id = ready.shift();
|
|
157
|
+
order.push(byId.get(id));
|
|
158
|
+
for (const next of outgoing.get(id)) {
|
|
159
|
+
const incs = incoming.get(next);
|
|
160
|
+
incs.delete(id);
|
|
161
|
+
if (incs.size === 0) ready.push(next);
|
|
162
|
+
}
|
|
163
|
+
}
|
|
164
|
+
|
|
165
|
+
if (order.length !== steps.length) {
|
|
166
|
+
throw new Error("workflow contains a cycle");
|
|
167
|
+
}
|
|
168
|
+
return order;
|
|
169
|
+
}
|
|
170
|
+
|
|
171
|
+
/**
|
|
172
|
+
* Group steps into parallel batches based on dependencies. Within a batch,
|
|
173
|
+
* all steps are independent and can run concurrently.
|
|
174
|
+
*/
|
|
175
|
+
export function planBatches(steps) {
|
|
176
|
+
const byId = new Map(steps.map((s) => [s.id, s]));
|
|
177
|
+
const done = new Set();
|
|
178
|
+
const batches = [];
|
|
179
|
+
const remaining = new Set(steps.map((s) => s.id));
|
|
180
|
+
|
|
181
|
+
while (remaining.size > 0) {
|
|
182
|
+
const batch = [];
|
|
183
|
+
for (const id of remaining) {
|
|
184
|
+
const s = byId.get(id);
|
|
185
|
+
const deps = s.dependsOn || [];
|
|
186
|
+
if (deps.every((d) => done.has(d))) batch.push(s);
|
|
187
|
+
}
|
|
188
|
+
if (batch.length === 0) throw new Error("workflow contains a cycle");
|
|
189
|
+
batches.push(batch);
|
|
190
|
+
for (const s of batch) {
|
|
191
|
+
done.add(s.id);
|
|
192
|
+
remaining.delete(s.id);
|
|
193
|
+
}
|
|
194
|
+
}
|
|
195
|
+
return batches;
|
|
196
|
+
}
|
|
197
|
+
|
|
198
|
+
// ─── forEach expansion ───────────────────────────────────────────────────────
|
|
199
|
+
|
|
200
|
+
/**
|
|
201
|
+
* Resolve the array source for a `forEach` step. Accepts either:
|
|
202
|
+
* - an array literal (returned verbatim)
|
|
203
|
+
* - a `${...}` reference string resolving to an array on a prior step result
|
|
204
|
+
*
|
|
205
|
+
* Throws if the resolved value isn't an array or exceeds MAX_FAN_OUT.
|
|
206
|
+
*/
|
|
207
|
+
export function resolveForEachItems(forEach, resultsById) {
|
|
208
|
+
if (Array.isArray(forEach)) {
|
|
209
|
+
if (forEach.length > MAX_FAN_OUT) {
|
|
210
|
+
throw new Error(
|
|
211
|
+
`forEach array exceeds MAX_FAN_OUT=${MAX_FAN_OUT} (got ${forEach.length})`,
|
|
212
|
+
);
|
|
213
|
+
}
|
|
214
|
+
return forEach;
|
|
215
|
+
}
|
|
216
|
+
if (typeof forEach === "string") {
|
|
217
|
+
const trimmed = forEach.trim();
|
|
218
|
+
// Accept bare `${...}` wrapper; resolve inner ref.
|
|
219
|
+
const m = trimmed.match(/^\$\{(.+)\}$/);
|
|
220
|
+
if (!m) {
|
|
221
|
+
throw new Error(`forEach ref must be wrapped in \${...}: ${trimmed}`);
|
|
222
|
+
}
|
|
223
|
+
const value = resolveReference(m[1].trim(), { step: resultsById });
|
|
224
|
+
if (!Array.isArray(value)) {
|
|
225
|
+
throw new Error(
|
|
226
|
+
`forEach ref did not resolve to an array: ${trimmed} (got ${typeof value})`,
|
|
227
|
+
);
|
|
228
|
+
}
|
|
229
|
+
if (value.length > MAX_FAN_OUT) {
|
|
230
|
+
throw new Error(
|
|
231
|
+
`forEach expansion exceeds MAX_FAN_OUT=${MAX_FAN_OUT} (got ${value.length})`,
|
|
232
|
+
);
|
|
233
|
+
}
|
|
234
|
+
return value;
|
|
235
|
+
}
|
|
236
|
+
throw new Error("forEach must be an array or reference string");
|
|
237
|
+
}
|
|
238
|
+
|
|
239
|
+
/** Substitute `${item}` tokens in a template string. Non-string → stringify. */
|
|
240
|
+
export function substituteItem(template, item) {
|
|
241
|
+
if (typeof template !== "string") return template;
|
|
242
|
+
const repl = typeof item === "string" ? item : JSON.stringify(item);
|
|
243
|
+
return template.replace(/\$\{item\}/g, repl);
|
|
244
|
+
}
|
|
245
|
+
|
|
246
|
+
/** Evaluate a step's `when` expression. Missing expression → always true. */
|
|
247
|
+
export function shouldRunStep(step, resultsById) {
|
|
248
|
+
if (!step.when) return true;
|
|
249
|
+
try {
|
|
250
|
+
return evalExpr(step.when, { step: resultsById });
|
|
251
|
+
} catch (err) {
|
|
252
|
+
throw new Error(`invalid when on step '${step.id}': ${err.message}`);
|
|
253
|
+
}
|
|
254
|
+
}
|
|
255
|
+
|
|
256
|
+
// ─── Placeholder substitution ────────────────────────────────────────────────
|
|
257
|
+
|
|
258
|
+
/**
|
|
259
|
+
* Replace `${step.<id>.<field>}` tokens in `template` using the map of
|
|
260
|
+
* completed step results. Missing tokens resolve to an empty string.
|
|
261
|
+
*
|
|
262
|
+
* Supported fields: `summary`, `status`, `taskId`, `tokenCount`,
|
|
263
|
+
* `iterationCount`.
|
|
264
|
+
*/
|
|
265
|
+
export function substitutePlaceholders(template, resultsById) {
|
|
266
|
+
if (typeof template !== "string") return template;
|
|
267
|
+
return template.replace(
|
|
268
|
+
/\$\{step\.([\w-]+)\.([\w-]+)\}/g,
|
|
269
|
+
(_, stepId, field) => {
|
|
270
|
+
const entry = resultsById.get(stepId);
|
|
271
|
+
if (!entry) return "";
|
|
272
|
+
if (field === "summary") return entry.result?.summary ?? "";
|
|
273
|
+
if (field === "status") return entry.status ?? "";
|
|
274
|
+
if (field === "taskId") return entry.taskId ?? "";
|
|
275
|
+
if (field === "tokenCount") return String(entry.result?.tokenCount ?? 0);
|
|
276
|
+
if (field === "iterationCount")
|
|
277
|
+
return String(entry.result?.iterationCount ?? 0);
|
|
278
|
+
return "";
|
|
279
|
+
},
|
|
280
|
+
);
|
|
281
|
+
}
|
|
282
|
+
|
|
283
|
+
// ─── Persistence ─────────────────────────────────────────────────────────────
|
|
284
|
+
|
|
285
|
+
export function listWorkflows(cwd) {
|
|
286
|
+
const dir = workflowsDir(cwd);
|
|
287
|
+
if (!_deps.existsSync(dir)) return [];
|
|
288
|
+
const entries = _deps.readdirSync(dir) || [];
|
|
289
|
+
const out = [];
|
|
290
|
+
for (const name of entries) {
|
|
291
|
+
if (!name.endsWith(".json")) continue;
|
|
292
|
+
try {
|
|
293
|
+
const body = _deps.readFileSync(join(dir, name), "utf-8");
|
|
294
|
+
out.push(JSON.parse(body));
|
|
295
|
+
} catch (_e) {
|
|
296
|
+
// skip malformed files
|
|
297
|
+
}
|
|
298
|
+
}
|
|
299
|
+
return out;
|
|
300
|
+
}
|
|
301
|
+
|
|
302
|
+
export function getWorkflow(cwd, id) {
|
|
303
|
+
const file = workflowFile(cwd, id);
|
|
304
|
+
if (!_deps.existsSync(file)) return null;
|
|
305
|
+
try {
|
|
306
|
+
return JSON.parse(_deps.readFileSync(file, "utf-8"));
|
|
307
|
+
} catch (_e) {
|
|
308
|
+
return null;
|
|
309
|
+
}
|
|
310
|
+
}
|
|
311
|
+
|
|
312
|
+
export function saveWorkflow(cwd, wf) {
|
|
313
|
+
const { valid, errors } = validateWorkflow(wf);
|
|
314
|
+
if (!valid) throw new Error(`Invalid workflow: ${errors.join("; ")}`);
|
|
315
|
+
const dir = workflowsDir(cwd);
|
|
316
|
+
_deps.mkdirSync(dir, { recursive: true });
|
|
317
|
+
_deps.writeFileSync(
|
|
318
|
+
workflowFile(cwd, wf.id),
|
|
319
|
+
JSON.stringify(wf, null, 2),
|
|
320
|
+
"utf-8",
|
|
321
|
+
);
|
|
322
|
+
return wf;
|
|
323
|
+
}
|
|
324
|
+
|
|
325
|
+
export function removeWorkflow(cwd, id) {
|
|
326
|
+
const file = workflowFile(cwd, id);
|
|
327
|
+
if (!_deps.existsSync(file)) return false;
|
|
328
|
+
_deps.unlinkSync(file);
|
|
329
|
+
return true;
|
|
330
|
+
}
|
|
331
|
+
|
|
332
|
+
// ─── Execution ───────────────────────────────────────────────────────────────
|
|
333
|
+
|
|
334
|
+
/**
|
|
335
|
+
* Execute a workflow. The runner for individual tasks must be injected via
|
|
336
|
+
* `_deps.runTask` (signature matches `runCoworkTask`).
|
|
337
|
+
*
|
|
338
|
+
* @param {object} options
|
|
339
|
+
* @param {object} options.workflow - Workflow definition
|
|
340
|
+
* @param {string} [options.cwd] - Working directory for history
|
|
341
|
+
* @param {number} [options.maxParallel] - Max parallel steps per batch
|
|
342
|
+
* @param {boolean} [options.continueOnError] - Keep running after a failure
|
|
343
|
+
* @param {object} [options.llmOptions] - Forwarded to each task
|
|
344
|
+
* @param {function} [options.onStepStart]
|
|
345
|
+
* @param {function} [options.onStepComplete]
|
|
346
|
+
* @returns {Promise<{
|
|
347
|
+
* workflowId: string,
|
|
348
|
+
* status: "completed"|"failed"|"partial",
|
|
349
|
+
* steps: Array<{ id, status, taskId, result }>,
|
|
350
|
+
* startedAt: string,
|
|
351
|
+
* finishedAt: string,
|
|
352
|
+
* }>}
|
|
353
|
+
*/
|
|
354
|
+
export async function executeWorkflow(options = {}) {
|
|
355
|
+
const {
|
|
356
|
+
workflow,
|
|
357
|
+
cwd = process.cwd(),
|
|
358
|
+
maxParallel = 4,
|
|
359
|
+
continueOnError = false,
|
|
360
|
+
llmOptions = {},
|
|
361
|
+
onStepStart,
|
|
362
|
+
onStepComplete,
|
|
363
|
+
} = options;
|
|
364
|
+
|
|
365
|
+
const { valid, errors } = validateWorkflow(workflow);
|
|
366
|
+
if (!valid) throw new Error(`Invalid workflow: ${errors.join("; ")}`);
|
|
367
|
+
if (typeof _deps.runTask !== "function") {
|
|
368
|
+
throw new Error(
|
|
369
|
+
"cowork-workflow: _deps.runTask is not injected (wire runCoworkTask in CLI before executing)",
|
|
370
|
+
);
|
|
371
|
+
}
|
|
372
|
+
|
|
373
|
+
const batches = planBatches(workflow.steps);
|
|
374
|
+
const resultsById = new Map();
|
|
375
|
+
const stepOutcomes = [];
|
|
376
|
+
const startedAt = new Date(_deps.now()).toISOString();
|
|
377
|
+
let anyFailure = false;
|
|
378
|
+
|
|
379
|
+
for (const batch of batches) {
|
|
380
|
+
// Respect maxParallel by slicing batch into chunks
|
|
381
|
+
const chunks = [];
|
|
382
|
+
for (let i = 0; i < batch.length; i += maxParallel) {
|
|
383
|
+
chunks.push(batch.slice(i, i + maxParallel));
|
|
384
|
+
}
|
|
385
|
+
|
|
386
|
+
for (const chunk of chunks) {
|
|
387
|
+
// Expand forEach / when into concrete tasks for this chunk
|
|
388
|
+
const runnable = []; // { step, message, recordId, parentId }
|
|
389
|
+
const preOutcomes = []; // outcomes produced synchronously (skipped)
|
|
390
|
+
for (const step of chunk) {
|
|
391
|
+
if (anyFailure && !continueOnError) {
|
|
392
|
+
const outcome = {
|
|
393
|
+
id: step.id,
|
|
394
|
+
status: "skipped",
|
|
395
|
+
taskId: null,
|
|
396
|
+
result: { summary: "skipped due to earlier failure" },
|
|
397
|
+
};
|
|
398
|
+
resultsById.set(step.id, outcome);
|
|
399
|
+
preOutcomes.push(outcome);
|
|
400
|
+
continue;
|
|
401
|
+
}
|
|
402
|
+
// when-gate
|
|
403
|
+
let runThis = true;
|
|
404
|
+
try {
|
|
405
|
+
runThis = shouldRunStep(step, resultsById);
|
|
406
|
+
} catch (err) {
|
|
407
|
+
anyFailure = true;
|
|
408
|
+
const outcome = {
|
|
409
|
+
id: step.id,
|
|
410
|
+
status: "failed",
|
|
411
|
+
taskId: null,
|
|
412
|
+
result: { summary: err.message },
|
|
413
|
+
};
|
|
414
|
+
resultsById.set(step.id, outcome);
|
|
415
|
+
preOutcomes.push(outcome);
|
|
416
|
+
continue;
|
|
417
|
+
}
|
|
418
|
+
if (!runThis) {
|
|
419
|
+
const outcome = {
|
|
420
|
+
id: step.id,
|
|
421
|
+
status: "skipped",
|
|
422
|
+
taskId: null,
|
|
423
|
+
result: { summary: "when-condition false" },
|
|
424
|
+
};
|
|
425
|
+
resultsById.set(step.id, outcome);
|
|
426
|
+
preOutcomes.push(outcome);
|
|
427
|
+
continue;
|
|
428
|
+
}
|
|
429
|
+
// forEach-expansion
|
|
430
|
+
if (step.forEach !== undefined) {
|
|
431
|
+
let items;
|
|
432
|
+
try {
|
|
433
|
+
items = resolveForEachItems(step.forEach, resultsById);
|
|
434
|
+
} catch (err) {
|
|
435
|
+
anyFailure = true;
|
|
436
|
+
const outcome = {
|
|
437
|
+
id: step.id,
|
|
438
|
+
status: "failed",
|
|
439
|
+
taskId: null,
|
|
440
|
+
result: { summary: err.message },
|
|
441
|
+
};
|
|
442
|
+
resultsById.set(step.id, outcome);
|
|
443
|
+
preOutcomes.push(outcome);
|
|
444
|
+
continue;
|
|
445
|
+
}
|
|
446
|
+
if (items.length === 0) {
|
|
447
|
+
const outcome = {
|
|
448
|
+
id: step.id,
|
|
449
|
+
status: "skipped",
|
|
450
|
+
taskId: null,
|
|
451
|
+
result: { summary: "forEach items empty" },
|
|
452
|
+
};
|
|
453
|
+
resultsById.set(step.id, outcome);
|
|
454
|
+
preOutcomes.push(outcome);
|
|
455
|
+
continue;
|
|
456
|
+
}
|
|
457
|
+
for (let k = 0; k < items.length; k++) {
|
|
458
|
+
const childId = `${step.id}[${k}]`;
|
|
459
|
+
const withItem = substituteItem(step.message, items[k]);
|
|
460
|
+
const msg = substitutePlaceholders(withItem, resultsById);
|
|
461
|
+
runnable.push({
|
|
462
|
+
step,
|
|
463
|
+
message: msg,
|
|
464
|
+
recordId: childId,
|
|
465
|
+
parentId: step.id,
|
|
466
|
+
});
|
|
467
|
+
}
|
|
468
|
+
continue;
|
|
469
|
+
}
|
|
470
|
+
const message = substitutePlaceholders(step.message, resultsById);
|
|
471
|
+
runnable.push({ step, message, recordId: step.id, parentId: null });
|
|
472
|
+
}
|
|
473
|
+
|
|
474
|
+
const promises = runnable.map(async ({ step, message, recordId }) => {
|
|
475
|
+
if (onStepStart) onStepStart({ stepId: recordId, message });
|
|
476
|
+
try {
|
|
477
|
+
const entry = await _deps.runTask({
|
|
478
|
+
templateId: step.templateId || null,
|
|
479
|
+
userMessage: message,
|
|
480
|
+
files: step.files || [],
|
|
481
|
+
cwd,
|
|
482
|
+
llmOptions,
|
|
483
|
+
});
|
|
484
|
+
const outcome = {
|
|
485
|
+
id: recordId,
|
|
486
|
+
status: entry.status,
|
|
487
|
+
taskId: entry.taskId,
|
|
488
|
+
result: entry.result,
|
|
489
|
+
};
|
|
490
|
+
resultsById.set(recordId, outcome);
|
|
491
|
+
if (entry.status !== "completed") anyFailure = true;
|
|
492
|
+
if (onStepComplete) onStepComplete(outcome);
|
|
493
|
+
return outcome;
|
|
494
|
+
} catch (err) {
|
|
495
|
+
anyFailure = true;
|
|
496
|
+
const outcome = {
|
|
497
|
+
id: recordId,
|
|
498
|
+
status: "failed",
|
|
499
|
+
taskId: null,
|
|
500
|
+
result: { summary: `Step threw: ${err.message}` },
|
|
501
|
+
};
|
|
502
|
+
resultsById.set(recordId, outcome);
|
|
503
|
+
if (onStepComplete) onStepComplete(outcome);
|
|
504
|
+
return outcome;
|
|
505
|
+
}
|
|
506
|
+
});
|
|
507
|
+
|
|
508
|
+
const results = await Promise.all(promises);
|
|
509
|
+
stepOutcomes.push(...preOutcomes, ...results);
|
|
510
|
+
|
|
511
|
+
// Aggregate forEach children into a parent entry so downstream
|
|
512
|
+
// `${step.<parent>.summary}` references still work.
|
|
513
|
+
const byParent = new Map();
|
|
514
|
+
for (let k = 0; k < runnable.length; k++) {
|
|
515
|
+
const r = runnable[k];
|
|
516
|
+
if (!r.parentId) continue;
|
|
517
|
+
if (!byParent.has(r.parentId)) byParent.set(r.parentId, []);
|
|
518
|
+
byParent.get(r.parentId).push(results[k]);
|
|
519
|
+
}
|
|
520
|
+
for (const [parentId, children] of byParent) {
|
|
521
|
+
const allOk = children.every((c) => c.status === "completed");
|
|
522
|
+
const anyOk = children.some((c) => c.status === "completed");
|
|
523
|
+
const status = allOk ? "completed" : anyOk ? "partial" : "failed";
|
|
524
|
+
resultsById.set(parentId, {
|
|
525
|
+
id: parentId,
|
|
526
|
+
status,
|
|
527
|
+
taskId: null,
|
|
528
|
+
result: {
|
|
529
|
+
summary: children.map((c) => c.result?.summary ?? "").join("\n"),
|
|
530
|
+
children: children.length,
|
|
531
|
+
},
|
|
532
|
+
});
|
|
533
|
+
}
|
|
534
|
+
}
|
|
535
|
+
|
|
536
|
+
if (anyFailure && !continueOnError) break;
|
|
537
|
+
}
|
|
538
|
+
|
|
539
|
+
const finishedAt = new Date(_deps.now()).toISOString();
|
|
540
|
+
const allCompleted = stepOutcomes.every((s) => s.status === "completed");
|
|
541
|
+
const status = allCompleted
|
|
542
|
+
? "completed"
|
|
543
|
+
: stepOutcomes.some((s) => s.status === "completed")
|
|
544
|
+
? "partial"
|
|
545
|
+
: "failed";
|
|
546
|
+
|
|
547
|
+
const record = {
|
|
548
|
+
workflowId: workflow.id,
|
|
549
|
+
workflowName: workflow.name,
|
|
550
|
+
status,
|
|
551
|
+
steps: stepOutcomes,
|
|
552
|
+
startedAt,
|
|
553
|
+
finishedAt,
|
|
554
|
+
};
|
|
555
|
+
_appendHistory(cwd, record);
|
|
556
|
+
return record;
|
|
557
|
+
}
|
|
558
|
+
|
|
559
|
+
function _appendHistory(cwd, record) {
|
|
560
|
+
try {
|
|
561
|
+
const dir = join(cwd, ".chainlesschain", "cowork");
|
|
562
|
+
_deps.mkdirSync(dir, { recursive: true });
|
|
563
|
+
_deps.appendFileSync(
|
|
564
|
+
historyFile(cwd),
|
|
565
|
+
JSON.stringify(record) + "\n",
|
|
566
|
+
"utf-8",
|
|
567
|
+
);
|
|
568
|
+
} catch (_e) {
|
|
569
|
+
// best-effort
|
|
570
|
+
}
|
|
571
|
+
}
|
|
@@ -0,0 +1,133 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Provider-options three-layer deep merge — inspired by open-agents'
|
|
3
|
+
* getAnthropicSettings + mergeProviderOptions pattern.
|
|
4
|
+
*
|
|
5
|
+
* Resolves per-call LLM provider options as a deep merge of:
|
|
6
|
+
* 1. PROVIDER_DEFAULTS[provider] — hand-curated baseline per provider
|
|
7
|
+
* 2. MODEL_INFERENCE(modelId) — model-specific overrides (e.g. o1
|
|
8
|
+
* disables temperature, claude-opus
|
|
9
|
+
* enables extended thinking)
|
|
10
|
+
* 3. callOverrides — whatever the caller passes
|
|
11
|
+
*
|
|
12
|
+
* Later layers win at leaf keys; objects are merged recursively, arrays are
|
|
13
|
+
* replaced (not concatenated) to keep behavior predictable.
|
|
14
|
+
*
|
|
15
|
+
* @module provider-options
|
|
16
|
+
*/
|
|
17
|
+
|
|
18
|
+
// ─── Layer 1: per-provider defaults ────────────────────────────────────────
|
|
19
|
+
|
|
20
|
+
export const PROVIDER_DEFAULTS = Object.freeze({
|
|
21
|
+
anthropic: {
|
|
22
|
+
maxTokens: 8192,
|
|
23
|
+
temperature: 1.0,
|
|
24
|
+
anthropic: { thinking: { type: "disabled" } },
|
|
25
|
+
},
|
|
26
|
+
openai: {
|
|
27
|
+
maxTokens: 4096,
|
|
28
|
+
temperature: 0.7,
|
|
29
|
+
},
|
|
30
|
+
ollama: {
|
|
31
|
+
temperature: 0.7,
|
|
32
|
+
},
|
|
33
|
+
deepseek: {
|
|
34
|
+
maxTokens: 4096,
|
|
35
|
+
temperature: 0.7,
|
|
36
|
+
},
|
|
37
|
+
gemini: {
|
|
38
|
+
maxTokens: 8192,
|
|
39
|
+
temperature: 0.7,
|
|
40
|
+
},
|
|
41
|
+
custom: {
|
|
42
|
+
maxTokens: 4096,
|
|
43
|
+
temperature: 0.7,
|
|
44
|
+
},
|
|
45
|
+
});
|
|
46
|
+
|
|
47
|
+
// ─── Layer 2: model-id inference ───────────────────────────────────────────
|
|
48
|
+
|
|
49
|
+
/**
|
|
50
|
+
* Derive per-model overrides from the model id string. Pure function, no I/O.
|
|
51
|
+
*
|
|
52
|
+
* @param {string} modelId
|
|
53
|
+
* @returns {object} partial options to merge on top of provider defaults.
|
|
54
|
+
*/
|
|
55
|
+
export function inferModelOverrides(modelId) {
|
|
56
|
+
if (!modelId || typeof modelId !== "string") return {};
|
|
57
|
+
const id = modelId.toLowerCase();
|
|
58
|
+
|
|
59
|
+
// OpenAI o1/o3 reasoning models — temperature is unsupported.
|
|
60
|
+
if (
|
|
61
|
+
id.startsWith("o1") ||
|
|
62
|
+
id.startsWith("o3") ||
|
|
63
|
+
id.includes("-o1-") ||
|
|
64
|
+
id.includes("-o3-")
|
|
65
|
+
) {
|
|
66
|
+
return { temperature: undefined, reasoning: { effort: "medium" } };
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
// Claude Opus — enable extended thinking by default (users can turn off).
|
|
70
|
+
if (id.includes("opus-4") || id.includes("opus-3")) {
|
|
71
|
+
return {
|
|
72
|
+
maxTokens: 16384,
|
|
73
|
+
anthropic: { thinking: { type: "enabled", budgetTokens: 8000 } },
|
|
74
|
+
};
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
// Claude Haiku — cheaper, smaller output by default.
|
|
78
|
+
if (id.includes("haiku")) {
|
|
79
|
+
return { maxTokens: 4096 };
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
// DeepSeek reasoner — reasoning tokens need headroom.
|
|
83
|
+
if (id.includes("deepseek-reasoner")) {
|
|
84
|
+
return { maxTokens: 8192, reasoning: { enabled: true } };
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
return {};
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
// ─── Deep merge primitive ──────────────────────────────────────────────────
|
|
91
|
+
|
|
92
|
+
function _isPlainObject(v) {
|
|
93
|
+
return (
|
|
94
|
+
v !== null &&
|
|
95
|
+
typeof v === "object" &&
|
|
96
|
+
!Array.isArray(v) &&
|
|
97
|
+
Object.getPrototypeOf(v) === Object.prototype
|
|
98
|
+
);
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
export function deepMerge(...layers) {
|
|
102
|
+
const out = {};
|
|
103
|
+
for (const layer of layers) {
|
|
104
|
+
if (!_isPlainObject(layer)) continue;
|
|
105
|
+
for (const [key, value] of Object.entries(layer)) {
|
|
106
|
+
if (value === undefined) {
|
|
107
|
+
// explicit undefined → erase from accumulator (used to disable fields)
|
|
108
|
+
delete out[key];
|
|
109
|
+
} else if (_isPlainObject(value) && _isPlainObject(out[key])) {
|
|
110
|
+
out[key] = deepMerge(out[key], value);
|
|
111
|
+
} else {
|
|
112
|
+
out[key] = value;
|
|
113
|
+
}
|
|
114
|
+
}
|
|
115
|
+
}
|
|
116
|
+
return out;
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
// ─── Public API ────────────────────────────────────────────────────────────
|
|
120
|
+
|
|
121
|
+
/**
|
|
122
|
+
* Merge three layers into a single options object for a given LLM call.
|
|
123
|
+
*
|
|
124
|
+
* @param {string} provider
|
|
125
|
+
* @param {string} modelId
|
|
126
|
+
* @param {object} [callOverrides]
|
|
127
|
+
* @returns {object}
|
|
128
|
+
*/
|
|
129
|
+
export function mergeProviderOptions(provider, modelId, callOverrides = {}) {
|
|
130
|
+
const defaults = PROVIDER_DEFAULTS[provider] || {};
|
|
131
|
+
const modelLayer = inferModelOverrides(modelId);
|
|
132
|
+
return deepMerge(defaults, modelLayer, callOverrides || {});
|
|
133
|
+
}
|