chainlesschain 0.45.4 → 0.45.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,397 @@
1
+ /**
2
+ * AgentRouter — multi-path agent dispatch.
3
+ *
4
+ * Routes coding subtasks across multiple agent backends in parallel:
5
+ * - claude (Claude Code CLI — best for complex reasoning)
6
+ * - codex (GitHub Copilot CLI — good for repo context)
7
+ * - gemini (Google Gemini via ChainlessChain LLM provider)
8
+ * - openai (GPT-4o via ChainlessChain LLM provider)
9
+ * - ollama (Local LLM — offline / private)
10
+ *
11
+ * Routing strategies:
12
+ * round-robin — distribute evenly across available backends
13
+ * by-type — route based on task type keywords
14
+ * parallel-all — every subtask runs on ALL backends; pick best result
15
+ * primary — use first available backend, others as fallback
16
+ *
17
+ * Usage:
18
+ * const router = new AgentRouter({
19
+ * backends: [
20
+ * { type: "claude", weight: 2 },
21
+ * { type: "gemini", apiKey: "...", model: "gemini-1.5-pro", weight: 1 },
22
+ * ],
23
+ * strategy: "round-robin",
24
+ * });
25
+ * const results = await router.dispatch(subtasks, { cwd: "/my/project" });
26
+ */
27
+
28
+ import { EventEmitter } from "events";
29
+ import {
30
+ ClaudeCodePool,
31
+ detectClaudeCode,
32
+ detectCodex,
33
+ } from "./claude-code-bridge.js";
34
+ import { createChatFn } from "./cowork-adapter.js";
35
+
36
+ // ─── Backend type constants ───────────────────────────────────────
37
+ export const BACKEND_TYPE = {
38
+ CLAUDE: "claude",
39
+ CODEX: "codex",
40
+ GEMINI: "gemini",
41
+ OPENAI: "openai",
42
+ ANTHROPIC: "anthropic",
43
+ OLLAMA: "ollama",
44
+ };
45
+
46
+ // ─── Task type → preferred backend mapping ────────────────────────
47
+ const TASK_TYPE_ROUTING = {
48
+ "code-generation": BACKEND_TYPE.CLAUDE,
49
+ "code-review": BACKEND_TYPE.CLAUDE,
50
+ testing: BACKEND_TYPE.CLAUDE,
51
+ documentation: BACKEND_TYPE.OPENAI,
52
+ "data-analysis": BACKEND_TYPE.GEMINI,
53
+ research: BACKEND_TYPE.GEMINI,
54
+ };
55
+
56
+ // Keywords for auto-detecting task type
57
+ const TYPE_KEYWORDS = {
58
+ "code-generation": [
59
+ "implement",
60
+ "create",
61
+ "build",
62
+ "add feature",
63
+ "fix",
64
+ "refactor",
65
+ ],
66
+ "code-review": ["review", "audit", "check", "inspect", "analyze"],
67
+ testing: ["test", "spec", "unit test", "e2e", "coverage"],
68
+ documentation: ["document", "readme", "comment", "docstring", "explain"],
69
+ "data-analysis": ["data", "analyze", "statistics", "report", "chart"],
70
+ research: ["research", "investigate", "explore", "compare"],
71
+ };
72
+
73
+ function detectTaskType(description) {
74
+ const lower = description.toLowerCase();
75
+ for (const [type, keywords] of Object.entries(TYPE_KEYWORDS)) {
76
+ if (keywords.some((kw) => lower.includes(kw))) return type;
77
+ }
78
+ return "code-generation";
79
+ }
80
+
81
+ // ─── API-based agent executor ─────────────────────────────────────
82
+
83
+ /**
84
+ * Execute a task using an LLM API backend (Gemini, GPT, Ollama, etc.)
85
+ * through ChainlessChain's existing llm-providers infrastructure.
86
+ */
87
+ async function executeViaAPI(task, options) {
88
+ const { provider, model, apiKey, baseUrl, cwd, timeout = 120_000 } = options;
89
+
90
+ const chat = createChatFn({ provider, model, apiKey, baseUrl });
91
+
92
+ const systemPrompt =
93
+ "You are an expert software engineer. Implement the requested changes precisely. " +
94
+ "Respond with the complete implementation, file paths, and explanations. " +
95
+ `Working directory: ${cwd}`;
96
+
97
+ const startTime = Date.now();
98
+ try {
99
+ const timeoutPromise = new Promise((_, reject) =>
100
+ setTimeout(() => reject(new Error("LLM API timeout")), timeout),
101
+ );
102
+ const output = await Promise.race([
103
+ chat(
104
+ [
105
+ { role: "system", content: systemPrompt },
106
+ {
107
+ role: "user",
108
+ content: task.context
109
+ ? `Context:\n${task.context}\n\nTask:\n${task.description}`
110
+ : task.description,
111
+ },
112
+ ],
113
+ { maxTokens: 4096 },
114
+ ),
115
+ timeoutPromise,
116
+ ]);
117
+
118
+ return {
119
+ success: true,
120
+ output,
121
+ exitCode: 0,
122
+ duration: Date.now() - startTime,
123
+ agentId: `api-${provider}`,
124
+ };
125
+ } catch (err) {
126
+ return {
127
+ success: false,
128
+ output: "",
129
+ exitCode: -1,
130
+ duration: Date.now() - startTime,
131
+ agentId: `api-${provider}`,
132
+ error: err.message,
133
+ };
134
+ }
135
+ }
136
+
137
+ // ─── AgentRouter ──────────────────────────────────────────────────
138
+
139
+ export class AgentRouter extends EventEmitter {
140
+ /**
141
+ * @param {object} options
142
+ * @param {Array} options.backends - Backend configs (see examples above)
143
+ * @param {string} options.strategy - "round-robin"|"by-type"|"parallel-all"|"primary"
144
+ * @param {number} options.maxParallel - Max concurrent agent tasks (default 3)
145
+ */
146
+ constructor(options = {}) {
147
+ super();
148
+ this.strategy = options.strategy || "round-robin";
149
+ this.maxParallel = options.maxParallel || 3;
150
+ this._backends = this._resolveBackends(options.backends || []);
151
+ this._rrIndex = 0; // round-robin cursor
152
+ }
153
+
154
+ /**
155
+ * Auto-detect available backends from the environment.
156
+ * Includes CLI tools and API providers based on env vars.
157
+ */
158
+ static autoDetect(options = {}) {
159
+ const backends = [];
160
+
161
+ if (detectClaudeCode().found) {
162
+ backends.push({ type: BACKEND_TYPE.CLAUDE, weight: 3 });
163
+ }
164
+ if (detectCodex().found) {
165
+ backends.push({ type: BACKEND_TYPE.CODEX, weight: 2 });
166
+ }
167
+ if (process.env.GEMINI_API_KEY) {
168
+ backends.push({ type: BACKEND_TYPE.GEMINI, weight: 2 });
169
+ }
170
+ if (process.env.OPENAI_API_KEY) {
171
+ backends.push({ type: BACKEND_TYPE.OPENAI, weight: 2 });
172
+ }
173
+ if (process.env.ANTHROPIC_API_KEY) {
174
+ backends.push({ type: BACKEND_TYPE.ANTHROPIC, weight: 2 });
175
+ }
176
+ // Always include Ollama as local fallback
177
+ backends.push({ type: BACKEND_TYPE.OLLAMA, weight: 1 });
178
+
179
+ return new AgentRouter({ ...options, backends });
180
+ }
181
+
182
+ /**
183
+ * Dispatch subtasks to agent backends according to the routing strategy.
184
+ *
185
+ * @param {Array<{id, description, context?, type?}>} subtasks
186
+ * @param {object} options
187
+ * @param {string} options.cwd
188
+ * @returns {Promise<Array<{taskId, agentId, backendType, success, output, duration}>>}
189
+ */
190
+ async dispatch(subtasks, options = {}) {
191
+ const { cwd = process.cwd() } = options;
192
+
193
+ if (this._backends.length === 0) {
194
+ throw new Error(
195
+ "No agent backends available. Install Claude Code: npm i -g @anthropic-ai/claude-code",
196
+ );
197
+ }
198
+
199
+ switch (this.strategy) {
200
+ case "parallel-all":
201
+ return this._dispatchParallelAll(subtasks, { cwd });
202
+ case "by-type":
203
+ return this._dispatchByType(subtasks, { cwd });
204
+ case "primary":
205
+ return this._dispatchPrimary(subtasks, { cwd });
206
+ default: // round-robin
207
+ return this._dispatchRoundRobin(subtasks, { cwd });
208
+ }
209
+ }
210
+
211
+ // ─── Strategies ────────────────────────────────────────────────
212
+
213
+ /** Round-robin: distribute tasks evenly across all backends. */
214
+ async _dispatchRoundRobin(subtasks, { cwd }) {
215
+ // Assign each subtask a backend
216
+ const assignments = subtasks.map((task) => {
217
+ const backend = this._weightedNext();
218
+ return { task, backend };
219
+ });
220
+
221
+ return this._runAssignments(assignments, { cwd });
222
+ }
223
+
224
+ /** By-type: route task to the best backend for its type. */
225
+ async _dispatchByType(subtasks, { cwd }) {
226
+ const assignments = subtasks.map((task) => {
227
+ const taskType = task.type || detectTaskType(task.description);
228
+ const preferredType = TASK_TYPE_ROUTING[taskType];
229
+ const backend = this._findBackend(preferredType) || this._weightedNext();
230
+ return { task, backend };
231
+ });
232
+
233
+ return this._runAssignments(assignments, { cwd });
234
+ }
235
+
236
+ /** Primary: all tasks go to first backend; fallback on failure. */
237
+ async _dispatchPrimary(subtasks, { cwd }) {
238
+ const primary = this._backends[0];
239
+ const assignments = subtasks.map((task) => ({ task, backend: primary }));
240
+ const results = await this._runAssignments(assignments, { cwd });
241
+
242
+ // Retry failed tasks on next available backend
243
+ const retries = [];
244
+ for (let i = 0; i < results.length; i++) {
245
+ if (!results[i].success && this._backends.length > 1) {
246
+ const fallback = this._backends[1];
247
+ retries.push(
248
+ this._runSingleTask(subtasks[i], fallback, { cwd }).then((r) => {
249
+ results[i] = r;
250
+ }),
251
+ );
252
+ }
253
+ }
254
+ await Promise.all(retries);
255
+ return results;
256
+ }
257
+
258
+ /** Parallel-all: run every task on ALL backends; return best result per task. */
259
+ async _dispatchParallelAll(subtasks, { cwd }) {
260
+ const results = [];
261
+ for (const task of subtasks) {
262
+ const allResults = await Promise.all(
263
+ this._backends.map((backend) =>
264
+ this._runSingleTask(task, backend, { cwd }),
265
+ ),
266
+ );
267
+ // Pick the first successful result; if all fail, pick the first
268
+ const best = allResults.find((r) => r.success) || allResults[0];
269
+ best.allResults = allResults; // attach all results for inspection
270
+ results.push(best);
271
+ }
272
+ return results;
273
+ }
274
+
275
+ // ─── Execution ─────────────────────────────────────────────────
276
+
277
+ async _runAssignments(assignments, { cwd }) {
278
+ const results = new Array(assignments.length);
279
+
280
+ // Process in parallel batches
281
+ for (let i = 0; i < assignments.length; i += this.maxParallel) {
282
+ const batch = assignments.slice(i, i + this.maxParallel);
283
+ const batchResults = await Promise.all(
284
+ batch.map(({ task, backend }) =>
285
+ this._runSingleTask(task, backend, { cwd }),
286
+ ),
287
+ );
288
+ for (let j = 0; j < batchResults.length; j++) {
289
+ results[i + j] = batchResults[j];
290
+ }
291
+ }
292
+
293
+ return results;
294
+ }
295
+
296
+ async _runSingleTask(task, backend, { cwd }) {
297
+ this.emit("agent:start", { taskId: task.id, backend: backend.type });
298
+
299
+ let result;
300
+ if (backend.isCLI) {
301
+ // Use ClaudeCodePool for CLI-based backends
302
+ const pool = backend._pool;
303
+ const [r] = await pool.dispatch([task], { cwd });
304
+ result = r;
305
+ } else {
306
+ // Use LLM API for API-based backends
307
+ result = await executeViaAPI(task, {
308
+ provider: backend.provider,
309
+ model: backend.model,
310
+ apiKey: backend.apiKey,
311
+ baseUrl: backend.baseUrl,
312
+ cwd,
313
+ timeout: backend.timeout,
314
+ });
315
+ }
316
+
317
+ result.taskId = task.id;
318
+ result.backendType = backend.type;
319
+
320
+ this.emit("agent:complete", result);
321
+ return result;
322
+ }
323
+
324
+ // ─── Backend resolution ─────────────────────────────────────────
325
+
326
+ _resolveBackends(configs) {
327
+ return configs.map((cfg) => {
328
+ const type = cfg.type || BACKEND_TYPE.CLAUDE;
329
+
330
+ if (type === BACKEND_TYPE.CLAUDE || type === BACKEND_TYPE.CODEX) {
331
+ const pool = new ClaudeCodePool({
332
+ maxParallel: 1,
333
+ cliCommand: type === BACKEND_TYPE.CODEX ? "codex" : "claude",
334
+ model: cfg.model || null,
335
+ });
336
+ pool.on("agent:output", (ev) => this.emit("agent:output", ev));
337
+ return {
338
+ type,
339
+ isCLI: true,
340
+ weight: cfg.weight || 1,
341
+ _pool: pool,
342
+ timeout: cfg.timeout || 300_000,
343
+ };
344
+ }
345
+
346
+ // API-based backend
347
+ const providerMap = {
348
+ [BACKEND_TYPE.GEMINI]: "gemini",
349
+ [BACKEND_TYPE.OPENAI]: "openai",
350
+ [BACKEND_TYPE.ANTHROPIC]: "anthropic",
351
+ [BACKEND_TYPE.OLLAMA]: "ollama",
352
+ };
353
+
354
+ return {
355
+ type,
356
+ isCLI: false,
357
+ weight: cfg.weight || 1,
358
+ provider: providerMap[type] || type,
359
+ model: cfg.model || null,
360
+ apiKey: cfg.apiKey || null,
361
+ baseUrl: cfg.baseUrl || null,
362
+ timeout: cfg.timeout || 120_000,
363
+ };
364
+ });
365
+ }
366
+
367
+ /** Find the first backend matching a given type. */
368
+ _findBackend(type) {
369
+ return this._backends.find((b) => b.type === type) || null;
370
+ }
371
+
372
+ /** Pick next backend using weighted round-robin. */
373
+ _weightedNext() {
374
+ if (this._backends.length === 0) throw new Error("No backends");
375
+ if (this._backends.length === 1) return this._backends[0];
376
+
377
+ // Build weighted list
378
+ const weighted = [];
379
+ for (const b of this._backends) {
380
+ for (let i = 0; i < (b.weight || 1); i++) weighted.push(b);
381
+ }
382
+
383
+ const backend = weighted[this._rrIndex % weighted.length];
384
+ this._rrIndex++;
385
+ return backend;
386
+ }
387
+
388
+ /** Summary of all configured backends. */
389
+ summary() {
390
+ return this._backends.map((b) => ({
391
+ type: b.type,
392
+ kind: b.isCLI ? "cli" : "api",
393
+ provider: b.provider || b.type,
394
+ weight: b.weight,
395
+ }));
396
+ }
397
+ }