@jackchen_me/open-multi-agent 1.0.0 → 1.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (80) hide show
  1. package/package.json +8 -2
  2. package/.github/ISSUE_TEMPLATE/bug_report.md +0 -40
  3. package/.github/ISSUE_TEMPLATE/feature_request.md +0 -23
  4. package/.github/pull_request_template.md +0 -14
  5. package/.github/workflows/ci.yml +0 -23
  6. package/CLAUDE.md +0 -80
  7. package/CODE_OF_CONDUCT.md +0 -48
  8. package/CONTRIBUTING.md +0 -72
  9. package/DECISIONS.md +0 -43
  10. package/README_zh.md +0 -277
  11. package/SECURITY.md +0 -17
  12. package/examples/01-single-agent.ts +0 -131
  13. package/examples/02-team-collaboration.ts +0 -167
  14. package/examples/03-task-pipeline.ts +0 -201
  15. package/examples/04-multi-model-team.ts +0 -261
  16. package/examples/05-copilot-test.ts +0 -49
  17. package/examples/06-local-model.ts +0 -200
  18. package/examples/07-fan-out-aggregate.ts +0 -209
  19. package/examples/08-gemma4-local.ts +0 -192
  20. package/examples/09-structured-output.ts +0 -73
  21. package/examples/10-task-retry.ts +0 -132
  22. package/examples/11-trace-observability.ts +0 -133
  23. package/examples/12-grok.ts +0 -154
  24. package/examples/13-gemini.ts +0 -48
  25. package/src/agent/agent.ts +0 -622
  26. package/src/agent/loop-detector.ts +0 -137
  27. package/src/agent/pool.ts +0 -285
  28. package/src/agent/runner.ts +0 -542
  29. package/src/agent/structured-output.ts +0 -126
  30. package/src/index.ts +0 -182
  31. package/src/llm/adapter.ts +0 -98
  32. package/src/llm/anthropic.ts +0 -389
  33. package/src/llm/copilot.ts +0 -552
  34. package/src/llm/gemini.ts +0 -378
  35. package/src/llm/grok.ts +0 -29
  36. package/src/llm/openai-common.ts +0 -294
  37. package/src/llm/openai.ts +0 -292
  38. package/src/memory/shared.ts +0 -181
  39. package/src/memory/store.ts +0 -124
  40. package/src/orchestrator/orchestrator.ts +0 -1071
  41. package/src/orchestrator/scheduler.ts +0 -352
  42. package/src/task/queue.ts +0 -464
  43. package/src/task/task.ts +0 -239
  44. package/src/team/messaging.ts +0 -232
  45. package/src/team/team.ts +0 -334
  46. package/src/tool/built-in/bash.ts +0 -187
  47. package/src/tool/built-in/file-edit.ts +0 -154
  48. package/src/tool/built-in/file-read.ts +0 -105
  49. package/src/tool/built-in/file-write.ts +0 -81
  50. package/src/tool/built-in/grep.ts +0 -362
  51. package/src/tool/built-in/index.ts +0 -50
  52. package/src/tool/executor.ts +0 -178
  53. package/src/tool/framework.ts +0 -557
  54. package/src/tool/text-tool-extractor.ts +0 -219
  55. package/src/types.ts +0 -542
  56. package/src/utils/semaphore.ts +0 -89
  57. package/src/utils/trace.ts +0 -34
  58. package/tests/agent-hooks.test.ts +0 -473
  59. package/tests/agent-pool.test.ts +0 -212
  60. package/tests/approval.test.ts +0 -464
  61. package/tests/built-in-tools.test.ts +0 -393
  62. package/tests/gemini-adapter.test.ts +0 -97
  63. package/tests/grok-adapter.test.ts +0 -74
  64. package/tests/llm-adapters.test.ts +0 -357
  65. package/tests/loop-detection.test.ts +0 -456
  66. package/tests/openai-fallback.test.ts +0 -159
  67. package/tests/orchestrator.test.ts +0 -281
  68. package/tests/scheduler.test.ts +0 -221
  69. package/tests/semaphore.test.ts +0 -57
  70. package/tests/shared-memory.test.ts +0 -122
  71. package/tests/structured-output.test.ts +0 -331
  72. package/tests/task-queue.test.ts +0 -244
  73. package/tests/task-retry.test.ts +0 -368
  74. package/tests/task-utils.test.ts +0 -155
  75. package/tests/team-messaging.test.ts +0 -329
  76. package/tests/text-tool-extractor.test.ts +0 -170
  77. package/tests/tool-executor.test.ts +0 -193
  78. package/tests/trace.test.ts +0 -453
  79. package/tsconfig.json +0 -25
  80. package/vitest.config.ts +0 -9
@@ -1,622 +0,0 @@
1
- /**
2
- * @fileoverview High-level Agent class for open-multi-agent.
3
- *
4
- * {@link Agent} is the primary interface most consumers interact with.
5
- * It wraps {@link AgentRunner} with:
6
- * - Persistent conversation history (`prompt()`)
7
- * - Fresh-conversation semantics (`run()`)
8
- * - Streaming support (`stream()`)
9
- * - Dynamic tool registration at runtime
10
- * - Full lifecycle state tracking (`idle → running → completed | error`)
11
- *
12
- * @example
13
- * ```ts
14
- * const agent = new Agent({
15
- * name: 'researcher',
16
- * model: 'claude-opus-4-6',
17
- * systemPrompt: 'You are a rigorous research assistant.',
18
- * tools: ['web_search', 'read_file'],
19
- * })
20
- *
21
- * const result = await agent.run('Summarise the last 3 IPCC reports.')
22
- * console.log(result.output)
23
- * ```
24
- */
25
-
26
- import type {
27
- AgentConfig,
28
- AgentState,
29
- AgentRunResult,
30
- BeforeRunHookContext,
31
- LLMMessage,
32
- StreamEvent,
33
- TokenUsage,
34
- ToolUseContext,
35
- } from '../types.js'
36
- import { emitTrace, generateRunId } from '../utils/trace.js'
37
- import type { ToolDefinition as FrameworkToolDefinition, ToolRegistry } from '../tool/framework.js'
38
- import type { ToolExecutor } from '../tool/executor.js'
39
- import { createAdapter } from '../llm/adapter.js'
40
- import { AgentRunner, type RunnerOptions, type RunOptions, type RunResult } from './runner.js'
41
- import {
42
- buildStructuredOutputInstruction,
43
- extractJSON,
44
- validateOutput,
45
- } from './structured-output.js'
46
-
47
- // ---------------------------------------------------------------------------
48
- // Internal helpers
49
- // ---------------------------------------------------------------------------
50
-
51
- const ZERO_USAGE: TokenUsage = { input_tokens: 0, output_tokens: 0 }
52
-
53
- /**
54
- * Combine two {@link AbortSignal}s so that aborting either one cancels the
55
- * returned signal. Works on Node 18+ (no `AbortSignal.any` required).
56
- */
57
- function mergeAbortSignals(a: AbortSignal, b: AbortSignal): AbortSignal {
58
- const controller = new AbortController()
59
- if (a.aborted || b.aborted) { controller.abort(); return controller.signal }
60
- const abort = () => controller.abort()
61
- a.addEventListener('abort', abort, { once: true })
62
- b.addEventListener('abort', abort, { once: true })
63
- return controller.signal
64
- }
65
-
66
- function addUsage(a: TokenUsage, b: TokenUsage): TokenUsage {
67
- return {
68
- input_tokens: a.input_tokens + b.input_tokens,
69
- output_tokens: a.output_tokens + b.output_tokens,
70
- }
71
- }
72
-
73
- // ---------------------------------------------------------------------------
74
- // Agent
75
- // ---------------------------------------------------------------------------
76
-
77
- /**
78
- * High-level wrapper around {@link AgentRunner} that manages conversation
79
- * history, state transitions, and tool lifecycle.
80
- */
81
- export class Agent {
82
- readonly name: string
83
- readonly config: AgentConfig
84
-
85
- private runner: AgentRunner | null = null
86
- private state: AgentState
87
- private readonly _toolRegistry: ToolRegistry
88
- private readonly _toolExecutor: ToolExecutor
89
- private messageHistory: LLMMessage[] = []
90
-
91
- /**
92
- * @param config - Static configuration for this agent.
93
- * @param toolRegistry - Registry used to resolve and manage tools.
94
- * @param toolExecutor - Executor that dispatches tool calls.
95
- *
96
- * `toolRegistry` and `toolExecutor` are injected rather than instantiated
97
- * internally so that teams of agents can share a single registry.
98
- */
99
- constructor(
100
- config: AgentConfig,
101
- toolRegistry: ToolRegistry,
102
- toolExecutor: ToolExecutor,
103
- ) {
104
- this.name = config.name
105
- this.config = config
106
- this._toolRegistry = toolRegistry
107
- this._toolExecutor = toolExecutor
108
-
109
- this.state = {
110
- status: 'idle',
111
- messages: [],
112
- tokenUsage: ZERO_USAGE,
113
- }
114
- }
115
-
116
- // -------------------------------------------------------------------------
117
- // Initialisation (async, called lazily)
118
- // -------------------------------------------------------------------------
119
-
120
- /**
121
- * Lazily create the {@link AgentRunner}.
122
- *
123
- * The adapter is created asynchronously (it may lazy-import provider SDKs),
124
- * so we defer construction until the first `run` / `prompt` / `stream` call.
125
- */
126
- private async getRunner(): Promise<AgentRunner> {
127
- if (this.runner !== null) {
128
- return this.runner
129
- }
130
-
131
- const provider = this.config.provider ?? 'anthropic'
132
- const adapter = await createAdapter(provider, this.config.apiKey, this.config.baseURL)
133
-
134
- // Append structured-output instructions when an outputSchema is configured.
135
- let effectiveSystemPrompt = this.config.systemPrompt
136
- if (this.config.outputSchema) {
137
- const instruction = buildStructuredOutputInstruction(this.config.outputSchema)
138
- effectiveSystemPrompt = effectiveSystemPrompt
139
- ? effectiveSystemPrompt + '\n' + instruction
140
- : instruction
141
- }
142
-
143
- const runnerOptions: RunnerOptions = {
144
- model: this.config.model,
145
- systemPrompt: effectiveSystemPrompt,
146
- maxTurns: this.config.maxTurns,
147
- maxTokens: this.config.maxTokens,
148
- temperature: this.config.temperature,
149
- allowedTools: this.config.tools,
150
- agentName: this.name,
151
- agentRole: this.config.systemPrompt?.slice(0, 50) ?? 'assistant',
152
- loopDetection: this.config.loopDetection,
153
- }
154
-
155
- this.runner = new AgentRunner(
156
- adapter,
157
- this._toolRegistry,
158
- this._toolExecutor,
159
- runnerOptions,
160
- )
161
-
162
- return this.runner
163
- }
164
-
165
- // -------------------------------------------------------------------------
166
- // Primary execution methods
167
- // -------------------------------------------------------------------------
168
-
169
- /**
170
- * Run `prompt` in a fresh conversation (history is NOT used).
171
- *
172
- * Equivalent to constructing a brand-new messages array `[{ role:'user', … }]`
173
- * and calling the runner once. The agent's persistent history is not modified.
174
- *
175
- * Use this for one-shot queries where past context is irrelevant.
176
- */
177
- async run(prompt: string, runOptions?: Partial<RunOptions>): Promise<AgentRunResult> {
178
- const messages: LLMMessage[] = [
179
- { role: 'user', content: [{ type: 'text', text: prompt }] },
180
- ]
181
-
182
- return this.executeRun(messages, runOptions)
183
- }
184
-
185
- /**
186
- * Run `prompt` as part of the ongoing conversation.
187
- *
188
- * Appends the user message to the persistent history, runs the agent, then
189
- * appends the resulting messages to the history for the next call.
190
- *
191
- * Use this for multi-turn interactions.
192
- */
193
- // TODO(#18): accept optional RunOptions to forward trace context
194
- async prompt(message: string): Promise<AgentRunResult> {
195
- const userMessage: LLMMessage = {
196
- role: 'user',
197
- content: [{ type: 'text', text: message }],
198
- }
199
-
200
- this.messageHistory.push(userMessage)
201
-
202
- const result = await this.executeRun([...this.messageHistory])
203
-
204
- // Persist the new messages into history so the next `prompt` sees them.
205
- for (const msg of result.messages) {
206
- this.messageHistory.push(msg)
207
- }
208
-
209
- return result
210
- }
211
-
212
- /**
213
- * Stream a fresh-conversation response, yielding {@link StreamEvent}s.
214
- *
215
- * Like {@link run}, this does not use or update the persistent history.
216
- */
217
- // TODO(#18): accept optional RunOptions to forward trace context
218
- async *stream(prompt: string): AsyncGenerator<StreamEvent> {
219
- const messages: LLMMessage[] = [
220
- { role: 'user', content: [{ type: 'text', text: prompt }] },
221
- ]
222
-
223
- yield* this.executeStream(messages)
224
- }
225
-
226
- // -------------------------------------------------------------------------
227
- // State management
228
- // -------------------------------------------------------------------------
229
-
230
- /** Return a snapshot of the current agent state (does not clone nested objects). */
231
- getState(): AgentState {
232
- return { ...this.state, messages: [...this.state.messages] }
233
- }
234
-
235
- /** Return a copy of the persistent message history. */
236
- getHistory(): LLMMessage[] {
237
- return [...this.messageHistory]
238
- }
239
-
240
- /**
241
- * Clear the persistent conversation history and reset state to `idle`.
242
- * Does NOT discard the runner instance — the adapter connection is reused.
243
- */
244
- reset(): void {
245
- this.messageHistory = []
246
- this.state = {
247
- status: 'idle',
248
- messages: [],
249
- tokenUsage: ZERO_USAGE,
250
- }
251
- }
252
-
253
- // -------------------------------------------------------------------------
254
- // Dynamic tool management
255
- // -------------------------------------------------------------------------
256
-
257
- /**
258
- * Register a new tool with this agent's tool registry at runtime.
259
- *
260
- * The tool becomes available to the next LLM call — no restart required.
261
- */
262
- addTool(tool: FrameworkToolDefinition): void {
263
- this._toolRegistry.register(tool)
264
- }
265
-
266
- /**
267
- * Deregister a tool by name.
268
- * If the tool is not registered this is a no-op (no error is thrown).
269
- */
270
- removeTool(name: string): void {
271
- this._toolRegistry.deregister(name)
272
- }
273
-
274
- /** Return the names of all currently registered tools. */
275
- getTools(): string[] {
276
- return this._toolRegistry.list().map((t) => t.name)
277
- }
278
-
279
- // -------------------------------------------------------------------------
280
- // Private execution core
281
- // -------------------------------------------------------------------------
282
-
283
- /**
284
- * Shared execution path used by both `run` and `prompt`.
285
- * Handles state transitions and error wrapping.
286
- */
287
- private async executeRun(
288
- messages: LLMMessage[],
289
- callerOptions?: Partial<RunOptions>,
290
- ): Promise<AgentRunResult> {
291
- this.transitionTo('running')
292
-
293
- const agentStartMs = Date.now()
294
-
295
- try {
296
- // --- beforeRun hook ---
297
- if (this.config.beforeRun) {
298
- const hookCtx = this.buildBeforeRunHookContext(messages)
299
- const modified = await this.config.beforeRun(hookCtx)
300
- this.applyHookContext(messages, modified, hookCtx.prompt)
301
- }
302
-
303
- const runner = await this.getRunner()
304
- const internalOnMessage = (msg: LLMMessage) => {
305
- this.state.messages.push(msg)
306
- callerOptions?.onMessage?.(msg)
307
- }
308
- // Auto-generate runId when onTrace is provided but runId is missing
309
- const needsRunId = callerOptions?.onTrace && !callerOptions.runId
310
- // Create a fresh timeout signal per run (not per runner) so that
311
- // each run() / prompt() call gets its own timeout window.
312
- const timeoutSignal = this.config.timeoutMs !== undefined && this.config.timeoutMs > 0
313
- ? AbortSignal.timeout(this.config.timeoutMs)
314
- : undefined
315
- // Merge caller-provided abortSignal with the timeout signal so that
316
- // either cancellation source is respected.
317
- const callerAbort = callerOptions?.abortSignal
318
- const effectiveAbort = timeoutSignal && callerAbort
319
- ? mergeAbortSignals(timeoutSignal, callerAbort)
320
- : timeoutSignal ?? callerAbort
321
- const runOptions: RunOptions = {
322
- ...callerOptions,
323
- onMessage: internalOnMessage,
324
- ...(needsRunId ? { runId: generateRunId() } : undefined),
325
- ...(effectiveAbort ? { abortSignal: effectiveAbort } : undefined),
326
- }
327
-
328
- const result = await runner.run(messages, runOptions)
329
- this.state.tokenUsage = addUsage(this.state.tokenUsage, result.tokenUsage)
330
-
331
- // --- Structured output validation ---
332
- if (this.config.outputSchema) {
333
- let validated = await this.validateStructuredOutput(
334
- messages,
335
- result,
336
- runner,
337
- runOptions,
338
- )
339
- // --- afterRun hook ---
340
- if (this.config.afterRun) {
341
- validated = await this.config.afterRun(validated)
342
- }
343
- this.emitAgentTrace(callerOptions, agentStartMs, validated)
344
- return validated
345
- }
346
-
347
- let agentResult = this.toAgentRunResult(result, true)
348
-
349
- // --- afterRun hook ---
350
- if (this.config.afterRun) {
351
- agentResult = await this.config.afterRun(agentResult)
352
- }
353
-
354
- this.transitionTo('completed')
355
- this.emitAgentTrace(callerOptions, agentStartMs, agentResult)
356
- return agentResult
357
- } catch (err) {
358
- const error = err instanceof Error ? err : new Error(String(err))
359
- this.transitionToError(error)
360
-
361
- const errorResult: AgentRunResult = {
362
- success: false,
363
- output: error.message,
364
- messages: [],
365
- tokenUsage: ZERO_USAGE,
366
- toolCalls: [],
367
- structured: undefined,
368
- }
369
- this.emitAgentTrace(callerOptions, agentStartMs, errorResult)
370
- return errorResult
371
- }
372
- }
373
-
374
- /** Emit an `agent` trace event if `onTrace` is provided. */
375
- private emitAgentTrace(
376
- options: Partial<RunOptions> | undefined,
377
- startMs: number,
378
- result: AgentRunResult,
379
- ): void {
380
- if (!options?.onTrace) return
381
- const endMs = Date.now()
382
- emitTrace(options.onTrace, {
383
- type: 'agent',
384
- runId: options.runId ?? '',
385
- taskId: options.taskId,
386
- agent: options.traceAgent ?? this.name,
387
- turns: result.messages.filter(m => m.role === 'assistant').length,
388
- tokens: result.tokenUsage,
389
- toolCalls: result.toolCalls.length,
390
- startMs,
391
- endMs,
392
- durationMs: endMs - startMs,
393
- })
394
- }
395
-
396
- /**
397
- * Validate agent output against the configured `outputSchema`.
398
- * On first validation failure, retry once with error feedback.
399
- */
400
- private async validateStructuredOutput(
401
- originalMessages: LLMMessage[],
402
- result: RunResult,
403
- runner: AgentRunner,
404
- runOptions: RunOptions,
405
- ): Promise<AgentRunResult> {
406
- const schema = this.config.outputSchema!
407
-
408
- // First attempt
409
- let firstAttemptError: unknown
410
- try {
411
- const parsed = extractJSON(result.output)
412
- const validated = validateOutput(schema, parsed)
413
- this.transitionTo('completed')
414
- return this.toAgentRunResult(result, true, validated)
415
- } catch (e) {
416
- firstAttemptError = e
417
- }
418
-
419
- // Retry: send full context + error feedback
420
- const errorMsg = firstAttemptError instanceof Error
421
- ? firstAttemptError.message
422
- : String(firstAttemptError)
423
-
424
- const errorFeedbackMessage: LLMMessage = {
425
- role: 'user' as const,
426
- content: [{
427
- type: 'text' as const,
428
- text: [
429
- 'Your previous response did not produce valid JSON matching the required schema.',
430
- '',
431
- `Error: ${errorMsg}`,
432
- '',
433
- 'Please try again. Respond with ONLY valid JSON, no other text.',
434
- ].join('\n'),
435
- }],
436
- }
437
-
438
- const retryMessages: LLMMessage[] = [
439
- ...originalMessages,
440
- ...result.messages,
441
- errorFeedbackMessage,
442
- ]
443
-
444
- const retryResult = await runner.run(retryMessages, runOptions)
445
- this.state.tokenUsage = addUsage(this.state.tokenUsage, retryResult.tokenUsage)
446
-
447
- const mergedTokenUsage = addUsage(result.tokenUsage, retryResult.tokenUsage)
448
- // Include the error feedback turn to maintain alternating user/assistant roles,
449
- // which is required by Anthropic's API for subsequent prompt() calls.
450
- const mergedMessages = [...result.messages, errorFeedbackMessage, ...retryResult.messages]
451
- const mergedToolCalls = [...result.toolCalls, ...retryResult.toolCalls]
452
-
453
- try {
454
- const parsed = extractJSON(retryResult.output)
455
- const validated = validateOutput(schema, parsed)
456
- this.transitionTo('completed')
457
- return {
458
- success: true,
459
- output: retryResult.output,
460
- messages: mergedMessages,
461
- tokenUsage: mergedTokenUsage,
462
- toolCalls: mergedToolCalls,
463
- structured: validated,
464
- }
465
- } catch {
466
- // Retry also failed
467
- this.transitionTo('completed')
468
- return {
469
- success: false,
470
- output: retryResult.output,
471
- messages: mergedMessages,
472
- tokenUsage: mergedTokenUsage,
473
- toolCalls: mergedToolCalls,
474
- structured: undefined,
475
- }
476
- }
477
- }
478
-
479
- /**
480
- * Shared streaming path used by `stream`.
481
- * Handles state transitions and error wrapping.
482
- */
483
- private async *executeStream(messages: LLMMessage[]): AsyncGenerator<StreamEvent> {
484
- this.transitionTo('running')
485
-
486
- try {
487
- // --- beforeRun hook ---
488
- if (this.config.beforeRun) {
489
- const hookCtx = this.buildBeforeRunHookContext(messages)
490
- const modified = await this.config.beforeRun(hookCtx)
491
- this.applyHookContext(messages, modified, hookCtx.prompt)
492
- }
493
-
494
- const runner = await this.getRunner()
495
- // Fresh timeout per stream call, same as executeRun.
496
- const timeoutSignal = this.config.timeoutMs !== undefined && this.config.timeoutMs > 0
497
- ? AbortSignal.timeout(this.config.timeoutMs)
498
- : undefined
499
-
500
- for await (const event of runner.stream(messages, timeoutSignal ? { abortSignal: timeoutSignal } : {})) {
501
- if (event.type === 'done') {
502
- const result = event.data as import('./runner.js').RunResult
503
- this.state.tokenUsage = addUsage(this.state.tokenUsage, result.tokenUsage)
504
-
505
- let agentResult = this.toAgentRunResult(result, true)
506
- if (this.config.afterRun) {
507
- agentResult = await this.config.afterRun(agentResult)
508
- }
509
- this.transitionTo('completed')
510
- yield { type: 'done', data: agentResult } satisfies StreamEvent
511
- continue
512
- } else if (event.type === 'error') {
513
- const error = event.data instanceof Error
514
- ? event.data
515
- : new Error(String(event.data))
516
- this.transitionToError(error)
517
- }
518
-
519
- yield event
520
- }
521
- } catch (err) {
522
- const error = err instanceof Error ? err : new Error(String(err))
523
- this.transitionToError(error)
524
- yield { type: 'error', data: error } satisfies StreamEvent
525
- }
526
- }
527
-
528
- // -------------------------------------------------------------------------
529
- // Hook helpers
530
- // -------------------------------------------------------------------------
531
-
532
- /** Extract the prompt text from the last user message to build hook context. */
533
- private buildBeforeRunHookContext(messages: LLMMessage[]): BeforeRunHookContext {
534
- let prompt = ''
535
- for (let i = messages.length - 1; i >= 0; i--) {
536
- if (messages[i]!.role === 'user') {
537
- prompt = messages[i]!.content
538
- .filter((b): b is import('../types.js').TextBlock => b.type === 'text')
539
- .map(b => b.text)
540
- .join('')
541
- break
542
- }
543
- }
544
- // Strip hook functions to avoid circular self-references in the context
545
- const { beforeRun, afterRun, ...agentInfo } = this.config
546
- return { prompt, agent: agentInfo as AgentConfig }
547
- }
548
-
549
- /**
550
- * Apply a (possibly modified) hook context back to the messages array.
551
- *
552
- * Only text blocks in the last user message are replaced; non-text content
553
- * (images, tool results) is preserved. The array element is replaced (not
554
- * mutated in place) so that shallow copies of the original array (e.g. from
555
- * `prompt()`) are not affected.
556
- */
557
- private applyHookContext(messages: LLMMessage[], ctx: BeforeRunHookContext, originalPrompt: string): void {
558
- if (ctx.prompt === originalPrompt) return
559
-
560
- for (let i = messages.length - 1; i >= 0; i--) {
561
- if (messages[i]!.role === 'user') {
562
- const nonTextBlocks = messages[i]!.content.filter(b => b.type !== 'text')
563
- messages[i] = {
564
- role: 'user',
565
- content: [{ type: 'text', text: ctx.prompt }, ...nonTextBlocks],
566
- }
567
- break
568
- }
569
- }
570
- }
571
-
572
- // -------------------------------------------------------------------------
573
- // State transition helpers
574
- // -------------------------------------------------------------------------
575
-
576
- private transitionTo(status: 'idle' | 'running' | 'completed' | 'error'): void {
577
- this.state = { ...this.state, status }
578
- }
579
-
580
- private transitionToError(error: Error): void {
581
- this.state = { ...this.state, status: 'error', error }
582
- }
583
-
584
- // -------------------------------------------------------------------------
585
- // Result mapping
586
- // -------------------------------------------------------------------------
587
-
588
- private toAgentRunResult(
589
- result: RunResult,
590
- success: boolean,
591
- structured?: unknown,
592
- ): AgentRunResult {
593
- return {
594
- success,
595
- output: result.output,
596
- messages: result.messages,
597
- tokenUsage: result.tokenUsage,
598
- toolCalls: result.toolCalls,
599
- structured,
600
- ...(result.loopDetected ? { loopDetected: true } : {}),
601
- }
602
- }
603
-
604
- // -------------------------------------------------------------------------
605
- // ToolUseContext builder (for direct use by subclasses or advanced callers)
606
- // -------------------------------------------------------------------------
607
-
608
- /**
609
- * Build a {@link ToolUseContext} that identifies this agent.
610
- * Exposed so team orchestrators can inject richer context (e.g. `TeamInfo`).
611
- */
612
- buildToolContext(abortSignal?: AbortSignal): ToolUseContext {
613
- return {
614
- agent: {
615
- name: this.name,
616
- role: this.config.systemPrompt?.slice(0, 60) ?? 'assistant',
617
- model: this.config.model,
618
- },
619
- abortSignal,
620
- }
621
- }
622
- }