@jackchen_me/open-multi-agent 0.1.0 → 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.github/ISSUE_TEMPLATE/bug_report.md +40 -0
- package/.github/ISSUE_TEMPLATE/feature_request.md +23 -0
- package/.github/pull_request_template.md +14 -0
- package/.github/workflows/ci.yml +23 -0
- package/CLAUDE.md +72 -0
- package/CODE_OF_CONDUCT.md +48 -0
- package/CONTRIBUTING.md +72 -0
- package/DECISIONS.md +43 -0
- package/README.md +73 -140
- package/README_zh.md +217 -0
- package/SECURITY.md +17 -0
- package/dist/agent/agent.d.ts +5 -0
- package/dist/agent/agent.d.ts.map +1 -1
- package/dist/agent/agent.js +90 -3
- package/dist/agent/agent.js.map +1 -1
- package/dist/agent/structured-output.d.ts +33 -0
- package/dist/agent/structured-output.d.ts.map +1 -0
- package/dist/agent/structured-output.js +116 -0
- package/dist/agent/structured-output.js.map +1 -0
- package/dist/index.d.ts +2 -1
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +2 -1
- package/dist/index.js.map +1 -1
- package/dist/llm/adapter.d.ts +9 -4
- package/dist/llm/adapter.d.ts.map +1 -1
- package/dist/llm/adapter.js +17 -5
- package/dist/llm/adapter.js.map +1 -1
- package/dist/llm/anthropic.d.ts +1 -1
- package/dist/llm/anthropic.d.ts.map +1 -1
- package/dist/llm/anthropic.js +2 -1
- package/dist/llm/anthropic.js.map +1 -1
- package/dist/llm/copilot.d.ts +92 -0
- package/dist/llm/copilot.d.ts.map +1 -0
- package/dist/llm/copilot.js +426 -0
- package/dist/llm/copilot.js.map +1 -0
- package/dist/llm/openai-common.d.ts +47 -0
- package/dist/llm/openai-common.d.ts.map +1 -0
- package/dist/llm/openai-common.js +209 -0
- package/dist/llm/openai-common.js.map +1 -0
- package/dist/llm/openai.d.ts +1 -1
- package/dist/llm/openai.d.ts.map +1 -1
- package/dist/llm/openai.js +3 -224
- package/dist/llm/openai.js.map +1 -1
- package/dist/orchestrator/orchestrator.d.ts +25 -1
- package/dist/orchestrator/orchestrator.d.ts.map +1 -1
- package/dist/orchestrator/orchestrator.js +130 -37
- package/dist/orchestrator/orchestrator.js.map +1 -1
- package/dist/task/queue.js +1 -1
- package/dist/task/queue.js.map +1 -1
- package/dist/task/task.d.ts +3 -0
- package/dist/task/task.d.ts.map +1 -1
- package/dist/task/task.js +5 -1
- package/dist/task/task.js.map +1 -1
- package/dist/team/messaging.d.ts.map +1 -1
- package/dist/team/messaging.js +2 -1
- package/dist/team/messaging.js.map +1 -1
- package/dist/types.d.ts +31 -3
- package/dist/types.d.ts.map +1 -1
- package/examples/05-copilot-test.ts +49 -0
- package/examples/06-local-model.ts +199 -0
- package/examples/07-fan-out-aggregate.ts +209 -0
- package/examples/08-gemma4-local.ts +203 -0
- package/examples/09-gemma4-auto-orchestration.ts +162 -0
- package/package.json +4 -3
- package/src/agent/agent.ts +115 -6
- package/src/agent/structured-output.ts +126 -0
- package/src/index.ts +2 -1
- package/src/llm/adapter.ts +18 -5
- package/src/llm/anthropic.ts +2 -1
- package/src/llm/copilot.ts +551 -0
- package/src/llm/openai-common.ts +255 -0
- package/src/llm/openai.ts +8 -258
- package/src/orchestrator/orchestrator.ts +164 -38
- package/src/task/queue.ts +1 -1
- package/src/task/task.ts +8 -1
- package/src/team/messaging.ts +3 -1
- package/src/types.ts +31 -2
- package/tests/semaphore.test.ts +57 -0
- package/tests/shared-memory.test.ts +122 -0
- package/tests/structured-output.test.ts +331 -0
- package/tests/task-queue.test.ts +244 -0
- package/tests/task-retry.test.ts +368 -0
- package/tests/task-utils.test.ts +155 -0
- package/tests/tool-executor.test.ts +193 -0
|
@@ -0,0 +1,162 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Example 09 — Gemma 4 Auto-Orchestration (runTeam, 100% Local)
|
|
3
|
+
*
|
|
4
|
+
* Demonstrates the framework's key feature — automatic task decomposition —
|
|
5
|
+
* powered entirely by a local Gemma 4 model. No cloud API needed.
|
|
6
|
+
*
|
|
7
|
+
* What happens:
|
|
8
|
+
* 1. A Gemma 4 "coordinator" receives the goal + agent roster
|
|
9
|
+
* 2. It outputs a structured JSON task array (title, description, assignee, dependsOn)
|
|
10
|
+
* 3. The framework resolves dependencies, schedules tasks, and runs agents
|
|
11
|
+
* 4. The coordinator synthesises all task results into a final answer
|
|
12
|
+
*
|
|
13
|
+
* This is the hardest test for a local model — it must produce valid JSON
|
|
14
|
+
* for task decomposition AND do tool-calling for actual task execution.
|
|
15
|
+
* Gemma 4 e2b (5.1B params) handles both reliably.
|
|
16
|
+
*
|
|
17
|
+
* Run:
|
|
18
|
+
* no_proxy=localhost npx tsx examples/09-gemma4-auto-orchestration.ts
|
|
19
|
+
*
|
|
20
|
+
* Prerequisites:
|
|
21
|
+
* 1. Ollama >= 0.20.0 installed and running: https://ollama.com
|
|
22
|
+
* 2. Pull the model: ollama pull gemma4:e2b
|
|
23
|
+
* 3. No API keys needed!
|
|
24
|
+
*
|
|
25
|
+
* Note: The no_proxy=localhost prefix is needed if you have an HTTP proxy
|
|
26
|
+
* configured, since the OpenAI SDK would otherwise route Ollama requests
|
|
27
|
+
* through the proxy.
|
|
28
|
+
*/
|
|
29
|
+
|
|
30
|
+
import { OpenMultiAgent } from '../src/index.js'
|
|
31
|
+
import type { AgentConfig, OrchestratorEvent, Task } from '../src/types.js'
|
|
32
|
+
|
|
33
|
+
// ---------------------------------------------------------------------------
|
|
34
|
+
// Configuration
|
|
35
|
+
// ---------------------------------------------------------------------------
|
|
36
|
+
|
|
37
|
+
// See available tags at https://ollama.com/library/gemma4
|
|
38
|
+
const OLLAMA_MODEL = 'gemma4:e2b' // or 'gemma4:e4b', 'gemma4:26b'
|
|
39
|
+
const OLLAMA_BASE_URL = 'http://localhost:11434/v1'
|
|
40
|
+
|
|
41
|
+
// ---------------------------------------------------------------------------
|
|
42
|
+
// Agents — the coordinator is created automatically by runTeam()
|
|
43
|
+
// ---------------------------------------------------------------------------
|
|
44
|
+
|
|
45
|
+
const researcher: AgentConfig = {
|
|
46
|
+
name: 'researcher',
|
|
47
|
+
model: OLLAMA_MODEL,
|
|
48
|
+
provider: 'openai',
|
|
49
|
+
baseURL: OLLAMA_BASE_URL,
|
|
50
|
+
apiKey: 'ollama',
|
|
51
|
+
systemPrompt: `You are a system researcher. Use bash to run non-destructive,
|
|
52
|
+
read-only commands and report the results concisely.`,
|
|
53
|
+
tools: ['bash'],
|
|
54
|
+
maxTurns: 4,
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
const writer: AgentConfig = {
|
|
58
|
+
name: 'writer',
|
|
59
|
+
model: OLLAMA_MODEL,
|
|
60
|
+
provider: 'openai',
|
|
61
|
+
baseURL: OLLAMA_BASE_URL,
|
|
62
|
+
apiKey: 'ollama',
|
|
63
|
+
systemPrompt: `You are a technical writer. Use file_write to create clear,
|
|
64
|
+
structured Markdown reports based on the information provided.`,
|
|
65
|
+
tools: ['file_write'],
|
|
66
|
+
maxTurns: 4,
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
// ---------------------------------------------------------------------------
|
|
70
|
+
// Progress handler
|
|
71
|
+
// ---------------------------------------------------------------------------
|
|
72
|
+
|
|
73
|
+
function handleProgress(event: OrchestratorEvent): void {
|
|
74
|
+
const ts = new Date().toISOString().slice(11, 23)
|
|
75
|
+
switch (event.type) {
|
|
76
|
+
case 'task_start': {
|
|
77
|
+
const task = event.data as Task | undefined
|
|
78
|
+
console.log(`[${ts}] TASK START "${task?.title ?? event.task}" → ${task?.assignee ?? '?'}`)
|
|
79
|
+
break
|
|
80
|
+
}
|
|
81
|
+
case 'task_complete':
|
|
82
|
+
console.log(`[${ts}] TASK DONE "${event.task}"`)
|
|
83
|
+
break
|
|
84
|
+
case 'agent_start':
|
|
85
|
+
console.log(`[${ts}] AGENT START ${event.agent}`)
|
|
86
|
+
break
|
|
87
|
+
case 'agent_complete':
|
|
88
|
+
console.log(`[${ts}] AGENT DONE ${event.agent}`)
|
|
89
|
+
break
|
|
90
|
+
case 'error':
|
|
91
|
+
console.error(`[${ts}] ERROR ${event.agent ?? ''} task=${event.task ?? '?'}`)
|
|
92
|
+
break
|
|
93
|
+
}
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
// ---------------------------------------------------------------------------
|
|
97
|
+
// Orchestrator — defaultModel is used for the coordinator agent
|
|
98
|
+
// ---------------------------------------------------------------------------
|
|
99
|
+
|
|
100
|
+
const orchestrator = new OpenMultiAgent({
|
|
101
|
+
defaultModel: OLLAMA_MODEL,
|
|
102
|
+
defaultProvider: 'openai',
|
|
103
|
+
defaultBaseURL: OLLAMA_BASE_URL,
|
|
104
|
+
defaultApiKey: 'ollama',
|
|
105
|
+
maxConcurrency: 1, // local model serves one request at a time
|
|
106
|
+
onProgress: handleProgress,
|
|
107
|
+
})
|
|
108
|
+
|
|
109
|
+
const team = orchestrator.createTeam('gemma4-auto', {
|
|
110
|
+
name: 'gemma4-auto',
|
|
111
|
+
agents: [researcher, writer],
|
|
112
|
+
sharedMemory: true,
|
|
113
|
+
})
|
|
114
|
+
|
|
115
|
+
// ---------------------------------------------------------------------------
|
|
116
|
+
// Give a goal — the framework handles the rest
|
|
117
|
+
// ---------------------------------------------------------------------------
|
|
118
|
+
|
|
119
|
+
const goal = `Check this machine's Node.js version, npm version, and OS info,
|
|
120
|
+
then write a short Markdown summary report to /tmp/gemma4-auto/report.md`
|
|
121
|
+
|
|
122
|
+
console.log('Gemma 4 Auto-Orchestration — Zero API Cost')
|
|
123
|
+
console.log('='.repeat(60))
|
|
124
|
+
console.log(` model → ${OLLAMA_MODEL} via Ollama (all agents + coordinator)`)
|
|
125
|
+
console.log(` researcher → bash`)
|
|
126
|
+
console.log(` writer → file_write`)
|
|
127
|
+
console.log(` coordinator → auto-created by runTeam()`)
|
|
128
|
+
console.log()
|
|
129
|
+
console.log(`Goal: ${goal.replace(/\n/g, ' ').trim()}`)
|
|
130
|
+
console.log('='.repeat(60))
|
|
131
|
+
|
|
132
|
+
const start = Date.now()
|
|
133
|
+
const result = await orchestrator.runTeam(team, goal)
|
|
134
|
+
const totalTime = Date.now() - start
|
|
135
|
+
|
|
136
|
+
// ---------------------------------------------------------------------------
|
|
137
|
+
// Results
|
|
138
|
+
// ---------------------------------------------------------------------------
|
|
139
|
+
|
|
140
|
+
console.log('\n' + '='.repeat(60))
|
|
141
|
+
console.log('Pipeline complete.\n')
|
|
142
|
+
console.log(`Overall success: ${result.success}`)
|
|
143
|
+
console.log(`Total time: ${(totalTime / 1000).toFixed(1)}s`)
|
|
144
|
+
console.log(`Tokens — input: ${result.totalTokenUsage.input_tokens}, output: ${result.totalTokenUsage.output_tokens}`)
|
|
145
|
+
|
|
146
|
+
console.log('\nPer-agent results:')
|
|
147
|
+
for (const [name, r] of result.agentResults) {
|
|
148
|
+
const icon = r.success ? 'OK ' : 'FAIL'
|
|
149
|
+
const tools = r.toolCalls.length > 0 ? r.toolCalls.map(c => c.toolName).join(', ') : '(none)'
|
|
150
|
+
console.log(` [${icon}] ${name.padEnd(24)} tools: ${tools}`)
|
|
151
|
+
}
|
|
152
|
+
|
|
153
|
+
// Print the coordinator's final synthesis
|
|
154
|
+
const coordResult = result.agentResults.get('coordinator')
|
|
155
|
+
if (coordResult?.success) {
|
|
156
|
+
console.log('\nFinal synthesis (from local Gemma 4 coordinator):')
|
|
157
|
+
console.log('-'.repeat(60))
|
|
158
|
+
console.log(coordResult.output)
|
|
159
|
+
console.log('-'.repeat(60))
|
|
160
|
+
}
|
|
161
|
+
|
|
162
|
+
console.log('\nAll processing done locally. $0 API cost.')
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@jackchen_me/open-multi-agent",
|
|
3
|
-
"version": "0.
|
|
3
|
+
"version": "0.2.0",
|
|
4
4
|
"description": "Production-grade multi-agent orchestration framework. Model-agnostic, supports team collaboration, task scheduling, and inter-agent communication.",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"main": "dist/index.js",
|
|
@@ -42,8 +42,9 @@
|
|
|
42
42
|
"zod": "^3.23.0"
|
|
43
43
|
},
|
|
44
44
|
"devDependencies": {
|
|
45
|
+
"@types/node": "^22.0.0",
|
|
46
|
+
"tsx": "^4.21.0",
|
|
45
47
|
"typescript": "^5.6.0",
|
|
46
|
-
"vitest": "^2.1.0"
|
|
47
|
-
"@types/node": "^22.0.0"
|
|
48
|
+
"vitest": "^2.1.0"
|
|
48
49
|
}
|
|
49
50
|
}
|
package/src/agent/agent.ts
CHANGED
|
@@ -35,7 +35,12 @@ import type {
|
|
|
35
35
|
import type { ToolDefinition as FrameworkToolDefinition, ToolRegistry } from '../tool/framework.js'
|
|
36
36
|
import type { ToolExecutor } from '../tool/executor.js'
|
|
37
37
|
import { createAdapter } from '../llm/adapter.js'
|
|
38
|
-
import { AgentRunner, type RunnerOptions, type RunOptions } from './runner.js'
|
|
38
|
+
import { AgentRunner, type RunnerOptions, type RunOptions, type RunResult } from './runner.js'
|
|
39
|
+
import {
|
|
40
|
+
buildStructuredOutputInstruction,
|
|
41
|
+
extractJSON,
|
|
42
|
+
validateOutput,
|
|
43
|
+
} from './structured-output.js'
|
|
39
44
|
|
|
40
45
|
// ---------------------------------------------------------------------------
|
|
41
46
|
// Internal helpers
|
|
@@ -109,11 +114,20 @@ export class Agent {
|
|
|
109
114
|
}
|
|
110
115
|
|
|
111
116
|
const provider = this.config.provider ?? 'anthropic'
|
|
112
|
-
const adapter = await createAdapter(provider)
|
|
117
|
+
const adapter = await createAdapter(provider, this.config.apiKey, this.config.baseURL)
|
|
118
|
+
|
|
119
|
+
// Append structured-output instructions when an outputSchema is configured.
|
|
120
|
+
let effectiveSystemPrompt = this.config.systemPrompt
|
|
121
|
+
if (this.config.outputSchema) {
|
|
122
|
+
const instruction = buildStructuredOutputInstruction(this.config.outputSchema)
|
|
123
|
+
effectiveSystemPrompt = effectiveSystemPrompt
|
|
124
|
+
? effectiveSystemPrompt + '\n' + instruction
|
|
125
|
+
: instruction
|
|
126
|
+
}
|
|
113
127
|
|
|
114
128
|
const runnerOptions: RunnerOptions = {
|
|
115
129
|
model: this.config.model,
|
|
116
|
-
systemPrompt:
|
|
130
|
+
systemPrompt: effectiveSystemPrompt,
|
|
117
131
|
maxTurns: this.config.maxTurns,
|
|
118
132
|
maxTokens: this.config.maxTokens,
|
|
119
133
|
temperature: this.config.temperature,
|
|
@@ -264,10 +278,19 @@ export class Agent {
|
|
|
264
278
|
}
|
|
265
279
|
|
|
266
280
|
const result = await runner.run(messages, runOptions)
|
|
267
|
-
|
|
268
281
|
this.state.tokenUsage = addUsage(this.state.tokenUsage, result.tokenUsage)
|
|
269
|
-
this.transitionTo('completed')
|
|
270
282
|
|
|
283
|
+
// --- Structured output validation ---
|
|
284
|
+
if (this.config.outputSchema) {
|
|
285
|
+
return this.validateStructuredOutput(
|
|
286
|
+
messages,
|
|
287
|
+
result,
|
|
288
|
+
runner,
|
|
289
|
+
runOptions,
|
|
290
|
+
)
|
|
291
|
+
}
|
|
292
|
+
|
|
293
|
+
this.transitionTo('completed')
|
|
271
294
|
return this.toAgentRunResult(result, true)
|
|
272
295
|
} catch (err) {
|
|
273
296
|
const error = err instanceof Error ? err : new Error(String(err))
|
|
@@ -279,6 +302,90 @@ export class Agent {
|
|
|
279
302
|
messages: [],
|
|
280
303
|
tokenUsage: ZERO_USAGE,
|
|
281
304
|
toolCalls: [],
|
|
305
|
+
structured: undefined,
|
|
306
|
+
}
|
|
307
|
+
}
|
|
308
|
+
}
|
|
309
|
+
|
|
310
|
+
/**
|
|
311
|
+
* Validate agent output against the configured `outputSchema`.
|
|
312
|
+
* On first validation failure, retry once with error feedback.
|
|
313
|
+
*/
|
|
314
|
+
private async validateStructuredOutput(
|
|
315
|
+
originalMessages: LLMMessage[],
|
|
316
|
+
result: RunResult,
|
|
317
|
+
runner: AgentRunner,
|
|
318
|
+
runOptions: RunOptions,
|
|
319
|
+
): Promise<AgentRunResult> {
|
|
320
|
+
const schema = this.config.outputSchema!
|
|
321
|
+
|
|
322
|
+
// First attempt
|
|
323
|
+
let firstAttemptError: unknown
|
|
324
|
+
try {
|
|
325
|
+
const parsed = extractJSON(result.output)
|
|
326
|
+
const validated = validateOutput(schema, parsed)
|
|
327
|
+
this.transitionTo('completed')
|
|
328
|
+
return this.toAgentRunResult(result, true, validated)
|
|
329
|
+
} catch (e) {
|
|
330
|
+
firstAttemptError = e
|
|
331
|
+
}
|
|
332
|
+
|
|
333
|
+
// Retry: send full context + error feedback
|
|
334
|
+
const errorMsg = firstAttemptError instanceof Error
|
|
335
|
+
? firstAttemptError.message
|
|
336
|
+
: String(firstAttemptError)
|
|
337
|
+
|
|
338
|
+
const errorFeedbackMessage: LLMMessage = {
|
|
339
|
+
role: 'user' as const,
|
|
340
|
+
content: [{
|
|
341
|
+
type: 'text' as const,
|
|
342
|
+
text: [
|
|
343
|
+
'Your previous response did not produce valid JSON matching the required schema.',
|
|
344
|
+
'',
|
|
345
|
+
`Error: ${errorMsg}`,
|
|
346
|
+
'',
|
|
347
|
+
'Please try again. Respond with ONLY valid JSON, no other text.',
|
|
348
|
+
].join('\n'),
|
|
349
|
+
}],
|
|
350
|
+
}
|
|
351
|
+
|
|
352
|
+
const retryMessages: LLMMessage[] = [
|
|
353
|
+
...originalMessages,
|
|
354
|
+
...result.messages,
|
|
355
|
+
errorFeedbackMessage,
|
|
356
|
+
]
|
|
357
|
+
|
|
358
|
+
const retryResult = await runner.run(retryMessages, runOptions)
|
|
359
|
+
this.state.tokenUsage = addUsage(this.state.tokenUsage, retryResult.tokenUsage)
|
|
360
|
+
|
|
361
|
+
const mergedTokenUsage = addUsage(result.tokenUsage, retryResult.tokenUsage)
|
|
362
|
+
// Include the error feedback turn to maintain alternating user/assistant roles,
|
|
363
|
+
// which is required by Anthropic's API for subsequent prompt() calls.
|
|
364
|
+
const mergedMessages = [...result.messages, errorFeedbackMessage, ...retryResult.messages]
|
|
365
|
+
const mergedToolCalls = [...result.toolCalls, ...retryResult.toolCalls]
|
|
366
|
+
|
|
367
|
+
try {
|
|
368
|
+
const parsed = extractJSON(retryResult.output)
|
|
369
|
+
const validated = validateOutput(schema, parsed)
|
|
370
|
+
this.transitionTo('completed')
|
|
371
|
+
return {
|
|
372
|
+
success: true,
|
|
373
|
+
output: retryResult.output,
|
|
374
|
+
messages: mergedMessages,
|
|
375
|
+
tokenUsage: mergedTokenUsage,
|
|
376
|
+
toolCalls: mergedToolCalls,
|
|
377
|
+
structured: validated,
|
|
378
|
+
}
|
|
379
|
+
} catch {
|
|
380
|
+
// Retry also failed
|
|
381
|
+
this.transitionTo('completed')
|
|
382
|
+
return {
|
|
383
|
+
success: false,
|
|
384
|
+
output: retryResult.output,
|
|
385
|
+
messages: mergedMessages,
|
|
386
|
+
tokenUsage: mergedTokenUsage,
|
|
387
|
+
toolCalls: mergedToolCalls,
|
|
388
|
+
structured: undefined,
|
|
282
389
|
}
|
|
283
390
|
}
|
|
284
391
|
}
|
|
@@ -331,8 +438,9 @@ export class Agent {
|
|
|
331
438
|
// -------------------------------------------------------------------------
|
|
332
439
|
|
|
333
440
|
private toAgentRunResult(
|
|
334
|
-
result:
|
|
441
|
+
result: RunResult,
|
|
335
442
|
success: boolean,
|
|
443
|
+
structured?: unknown,
|
|
336
444
|
): AgentRunResult {
|
|
337
445
|
return {
|
|
338
446
|
success,
|
|
@@ -340,6 +448,7 @@ export class Agent {
|
|
|
340
448
|
messages: result.messages,
|
|
341
449
|
tokenUsage: result.tokenUsage,
|
|
342
450
|
toolCalls: result.toolCalls,
|
|
451
|
+
structured,
|
|
343
452
|
}
|
|
344
453
|
}
|
|
345
454
|
|
|
@@ -0,0 +1,126 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* @fileoverview Structured output utilities for agent responses.
|
|
3
|
+
*
|
|
4
|
+
* Provides JSON extraction, Zod validation, and system-prompt injection so
|
|
5
|
+
* that agents can return typed, schema-validated output.
|
|
6
|
+
*/
|
|
7
|
+
|
|
8
|
+
import { type ZodSchema } from 'zod'
|
|
9
|
+
import { zodToJsonSchema } from '../tool/framework.js'
|
|
10
|
+
|
|
11
|
+
// ---------------------------------------------------------------------------
|
|
12
|
+
// System-prompt instruction builder
|
|
13
|
+
// ---------------------------------------------------------------------------
|
|
14
|
+
|
|
15
|
+
/**
|
|
16
|
+
* Build a JSON-mode instruction block to append to the agent's system prompt.
|
|
17
|
+
*
|
|
18
|
+
* Converts the Zod schema to JSON Schema and formats it as a clear directive
|
|
19
|
+
* for the LLM to respond with valid JSON matching the schema.
|
|
20
|
+
*/
|
|
21
|
+
export function buildStructuredOutputInstruction(schema: ZodSchema): string {
|
|
22
|
+
const jsonSchema = zodToJsonSchema(schema)
|
|
23
|
+
return [
|
|
24
|
+
'',
|
|
25
|
+
'## Output Format (REQUIRED)',
|
|
26
|
+
'You MUST respond with ONLY valid JSON that conforms to the following JSON Schema.',
|
|
27
|
+
'Do NOT include any text, markdown fences, or explanation outside the JSON object.',
|
|
28
|
+
'Do NOT wrap the JSON in ```json code fences.',
|
|
29
|
+
'',
|
|
30
|
+
'```',
|
|
31
|
+
JSON.stringify(jsonSchema, null, 2),
|
|
32
|
+
'```',
|
|
33
|
+
].join('\n')
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
// ---------------------------------------------------------------------------
|
|
37
|
+
// JSON extraction
|
|
38
|
+
// ---------------------------------------------------------------------------
|
|
39
|
+
|
|
40
|
+
/**
|
|
41
|
+
* Attempt to extract and parse JSON from the agent's raw text output.
|
|
42
|
+
*
|
|
43
|
+
* Handles three cases in order:
|
|
44
|
+
* 1. The output is already valid JSON (ideal case)
|
|
45
|
+
* 2. The output contains a ` ```json ` fenced block
|
|
46
|
+
* 3. The output contains a bare JSON object/array (first `{`/`[` to last `}`/`]`)
|
|
47
|
+
*
|
|
48
|
+
* @throws {Error} when no valid JSON can be extracted
|
|
49
|
+
*/
|
|
50
|
+
export function extractJSON(raw: string): unknown {
|
|
51
|
+
const trimmed = raw.trim()
|
|
52
|
+
|
|
53
|
+
// Case 1: Direct parse
|
|
54
|
+
try {
|
|
55
|
+
return JSON.parse(trimmed)
|
|
56
|
+
} catch {
|
|
57
|
+
// Continue to fallback strategies
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
// Case 2a: Prefer ```json tagged fence
|
|
61
|
+
const jsonFenceMatch = trimmed.match(/```json\s*([\s\S]*?)```/)
|
|
62
|
+
if (jsonFenceMatch?.[1]) {
|
|
63
|
+
try {
|
|
64
|
+
return JSON.parse(jsonFenceMatch[1].trim())
|
|
65
|
+
} catch {
|
|
66
|
+
// Continue
|
|
67
|
+
}
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
// Case 2b: Fall back to bare ``` fence
|
|
71
|
+
const bareFenceMatch = trimmed.match(/```\s*([\s\S]*?)```/)
|
|
72
|
+
if (bareFenceMatch?.[1]) {
|
|
73
|
+
try {
|
|
74
|
+
return JSON.parse(bareFenceMatch[1].trim())
|
|
75
|
+
} catch {
|
|
76
|
+
// Continue
|
|
77
|
+
}
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
// Case 3: Find first { to last } (object)
|
|
81
|
+
const objStart = trimmed.indexOf('{')
|
|
82
|
+
const objEnd = trimmed.lastIndexOf('}')
|
|
83
|
+
if (objStart !== -1 && objEnd > objStart) {
|
|
84
|
+
try {
|
|
85
|
+
return JSON.parse(trimmed.slice(objStart, objEnd + 1))
|
|
86
|
+
} catch {
|
|
87
|
+
// Fall through
|
|
88
|
+
}
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
// Case 3b: Find first [ to last ] (array)
|
|
92
|
+
const arrStart = trimmed.indexOf('[')
|
|
93
|
+
const arrEnd = trimmed.lastIndexOf(']')
|
|
94
|
+
if (arrStart !== -1 && arrEnd > arrStart) {
|
|
95
|
+
try {
|
|
96
|
+
return JSON.parse(trimmed.slice(arrStart, arrEnd + 1))
|
|
97
|
+
} catch {
|
|
98
|
+
// Fall through
|
|
99
|
+
}
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
throw new Error(
|
|
103
|
+
`Failed to extract JSON from output. Raw output begins with: "${trimmed.slice(0, 100)}"`,
|
|
104
|
+
)
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
// ---------------------------------------------------------------------------
|
|
108
|
+
// Zod validation
|
|
109
|
+
// ---------------------------------------------------------------------------
|
|
110
|
+
|
|
111
|
+
/**
|
|
112
|
+
* Validate a parsed JSON value against a Zod schema.
|
|
113
|
+
*
|
|
114
|
+
* @returns The validated (and potentially transformed) value on success.
|
|
115
|
+
* @throws {Error} with a human-readable Zod error message on failure.
|
|
116
|
+
*/
|
|
117
|
+
export function validateOutput(schema: ZodSchema, data: unknown): unknown {
|
|
118
|
+
const result = schema.safeParse(data)
|
|
119
|
+
if (result.success) {
|
|
120
|
+
return result.data
|
|
121
|
+
}
|
|
122
|
+
const issues = result.error.issues
|
|
123
|
+
.map(issue => ` - ${issue.path.length > 0 ? issue.path.join('.') : '(root)'}: ${issue.message}`)
|
|
124
|
+
.join('\n')
|
|
125
|
+
throw new Error(`Output validation failed:\n${issues}`)
|
|
126
|
+
}
|
package/src/index.ts
CHANGED
|
@@ -54,7 +54,7 @@
|
|
|
54
54
|
// Orchestrator (primary entry point)
|
|
55
55
|
// ---------------------------------------------------------------------------
|
|
56
56
|
|
|
57
|
-
export { OpenMultiAgent } from './orchestrator/orchestrator.js'
|
|
57
|
+
export { OpenMultiAgent, executeWithRetry, computeRetryDelay } from './orchestrator/orchestrator.js'
|
|
58
58
|
export { Scheduler } from './orchestrator/scheduler.js'
|
|
59
59
|
export type { SchedulingStrategy } from './orchestrator/scheduler.js'
|
|
60
60
|
|
|
@@ -63,6 +63,7 @@ export type { SchedulingStrategy } from './orchestrator/scheduler.js'
|
|
|
63
63
|
// ---------------------------------------------------------------------------
|
|
64
64
|
|
|
65
65
|
export { Agent } from './agent/agent.js'
|
|
66
|
+
export { buildStructuredOutputInstruction, extractJSON, validateOutput } from './agent/structured-output.js'
|
|
66
67
|
export { AgentPool, Semaphore } from './agent/pool.js'
|
|
67
68
|
export type { PoolStatus } from './agent/pool.js'
|
|
68
69
|
|
package/src/llm/adapter.ts
CHANGED
|
@@ -37,33 +37,46 @@ import type { LLMAdapter } from '../types.js'
|
|
|
37
37
|
* Additional providers can be integrated by implementing {@link LLMAdapter}
|
|
38
38
|
* directly and bypassing this factory.
|
|
39
39
|
*/
|
|
40
|
-
export type SupportedProvider = 'anthropic' | 'openai'
|
|
40
|
+
export type SupportedProvider = 'anthropic' | 'copilot' | 'openai'
|
|
41
41
|
|
|
42
42
|
/**
|
|
43
43
|
* Instantiate the appropriate {@link LLMAdapter} for the given provider.
|
|
44
44
|
*
|
|
45
|
-
* API keys fall back to the standard environment variables
|
|
46
|
-
*
|
|
45
|
+
* API keys fall back to the standard environment variables when not supplied
|
|
46
|
+
* explicitly:
|
|
47
|
+
* - `anthropic` → `ANTHROPIC_API_KEY`
|
|
48
|
+
* - `openai` → `OPENAI_API_KEY`
|
|
49
|
+
* - `copilot` → `GITHUB_COPILOT_TOKEN` / `GITHUB_TOKEN`, or interactive
|
|
50
|
+
* OAuth2 device flow if neither is set
|
|
47
51
|
*
|
|
48
52
|
* Adapters are imported lazily so that projects using only one provider
|
|
49
53
|
* are not forced to install the SDK for the other.
|
|
50
54
|
*
|
|
51
55
|
* @param provider - Which LLM provider to target.
|
|
52
56
|
* @param apiKey - Optional API key override; falls back to env var.
|
|
57
|
+
* @param baseURL - Optional base URL for OpenAI-compatible APIs (Ollama, vLLM, etc.).
|
|
53
58
|
* @throws {Error} When the provider string is not recognised.
|
|
54
59
|
*/
|
|
55
60
|
export async function createAdapter(
|
|
56
61
|
provider: SupportedProvider,
|
|
57
62
|
apiKey?: string,
|
|
63
|
+
baseURL?: string,
|
|
58
64
|
): Promise<LLMAdapter> {
|
|
59
65
|
switch (provider) {
|
|
60
66
|
case 'anthropic': {
|
|
61
67
|
const { AnthropicAdapter } = await import('./anthropic.js')
|
|
62
|
-
return new AnthropicAdapter(apiKey)
|
|
68
|
+
return new AnthropicAdapter(apiKey, baseURL)
|
|
69
|
+
}
|
|
70
|
+
case 'copilot': {
|
|
71
|
+
if (baseURL) {
|
|
72
|
+
console.warn('[open-multi-agent] baseURL is not supported for the copilot provider and will be ignored.')
|
|
73
|
+
}
|
|
74
|
+
const { CopilotAdapter } = await import('./copilot.js')
|
|
75
|
+
return new CopilotAdapter(apiKey)
|
|
63
76
|
}
|
|
64
77
|
case 'openai': {
|
|
65
78
|
const { OpenAIAdapter } = await import('./openai.js')
|
|
66
|
-
return new OpenAIAdapter(apiKey)
|
|
79
|
+
return new OpenAIAdapter(apiKey, baseURL)
|
|
67
80
|
}
|
|
68
81
|
default: {
|
|
69
82
|
// The `never` cast here makes TypeScript enforce exhaustiveness.
|
package/src/llm/anthropic.ts
CHANGED
|
@@ -189,9 +189,10 @@ export class AnthropicAdapter implements LLMAdapter {
|
|
|
189
189
|
|
|
190
190
|
readonly #client: Anthropic
|
|
191
191
|
|
|
192
|
-
constructor(apiKey?: string) {
|
|
192
|
+
constructor(apiKey?: string, baseURL?: string) {
|
|
193
193
|
this.#client = new Anthropic({
|
|
194
194
|
apiKey: apiKey ?? process.env['ANTHROPIC_API_KEY'],
|
|
195
|
+
baseURL,
|
|
195
196
|
})
|
|
196
197
|
}
|
|
197
198
|
|