@cuylabs/agent-core 0.4.0 → 0.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (66) hide show
  1. package/README.md +81 -323
  2. package/dist/builder-BKkipazh.d.ts +34 -0
  3. package/dist/capabilities/index.d.ts +97 -0
  4. package/dist/capabilities/index.js +46 -0
  5. package/dist/chunk-3C4VKG4P.js +2149 -0
  6. package/dist/chunk-6TDTQJ4P.js +116 -0
  7. package/dist/chunk-7MUFEN4K.js +559 -0
  8. package/dist/chunk-BDBZ3SLK.js +745 -0
  9. package/dist/chunk-DWYX7ASF.js +26 -0
  10. package/dist/chunk-FG4MD5MU.js +54 -0
  11. package/dist/chunk-IVUJDISU.js +556 -0
  12. package/dist/chunk-LRHOS4ZN.js +584 -0
  13. package/dist/chunk-O2ZCFQL6.js +764 -0
  14. package/dist/chunk-P6YF7USR.js +182 -0
  15. package/dist/chunk-QAQADS4X.js +258 -0
  16. package/dist/chunk-QWFMX226.js +879 -0
  17. package/dist/{chunk-6VKLWNRE.js → chunk-SDSBEQXG.js} +1 -132
  18. package/dist/chunk-VBWWUHWI.js +724 -0
  19. package/dist/chunk-VEKUXUVF.js +41 -0
  20. package/dist/chunk-X635CM2F.js +305 -0
  21. package/dist/chunk-YUUJK53A.js +91 -0
  22. package/dist/chunk-ZXAKHMWH.js +283 -0
  23. package/dist/config-D2xeGEHK.d.ts +52 -0
  24. package/dist/context/index.d.ts +259 -0
  25. package/dist/context/index.js +26 -0
  26. package/dist/identifiers-BLUxFqV_.d.ts +12 -0
  27. package/dist/index-DZQJD_hp.d.ts +1067 -0
  28. package/dist/index-ipP3_ztp.d.ts +198 -0
  29. package/dist/index.d.ts +210 -5736
  30. package/dist/index.js +2132 -7767
  31. package/dist/mcp/index.d.ts +26 -0
  32. package/dist/mcp/index.js +14 -0
  33. package/dist/messages-BYWGn8TY.d.ts +110 -0
  34. package/dist/middleware/index.d.ts +8 -0
  35. package/dist/middleware/index.js +12 -0
  36. package/dist/models/index.d.ts +33 -0
  37. package/dist/models/index.js +12 -0
  38. package/dist/network-D76DS5ot.d.ts +5 -0
  39. package/dist/prompt/index.d.ts +225 -0
  40. package/dist/prompt/index.js +45 -0
  41. package/dist/reasoning/index.d.ts +71 -0
  42. package/dist/reasoning/index.js +47 -0
  43. package/dist/registry-CuRWWtcT.d.ts +164 -0
  44. package/dist/resolver-DOfZ-xuk.d.ts +254 -0
  45. package/dist/runner-G1wxEgac.d.ts +852 -0
  46. package/dist/runtime/index.d.ts +357 -0
  47. package/dist/runtime/index.js +64 -0
  48. package/dist/session-manager-Uawm2Le7.d.ts +274 -0
  49. package/dist/skill/index.d.ts +103 -0
  50. package/dist/skill/index.js +39 -0
  51. package/dist/storage/index.d.ts +167 -0
  52. package/dist/storage/index.js +50 -0
  53. package/dist/sub-agent/index.d.ts +14 -0
  54. package/dist/sub-agent/index.js +15 -0
  55. package/dist/tool/index.d.ts +174 -1
  56. package/dist/tool/index.js +12 -3
  57. package/dist/tool-DYp6-cC3.d.ts +239 -0
  58. package/dist/tool-pFAnJc5Y.d.ts +419 -0
  59. package/dist/tracker-DClqYqTj.d.ts +96 -0
  60. package/dist/tracking/index.d.ts +109 -0
  61. package/dist/tracking/index.js +20 -0
  62. package/dist/types-BWo810L_.d.ts +648 -0
  63. package/dist/types-CQaXbRsS.d.ts +47 -0
  64. package/dist/types-VQgymC1N.d.ts +156 -0
  65. package/package.json +89 -5
  66. package/dist/index-BlSTfS-W.d.ts +0 -470
@@ -0,0 +1,852 @@
1
+ import * as ai from 'ai';
2
+ import { LanguageModel, ModelMessage, TelemetrySettings } from 'ai';
3
+ import { T as TokenUsage, M as Message } from './messages-BYWGn8TY.js';
4
+ import { d as SkillConfig, T as Tool } from './tool-pFAnJc5Y.js';
5
+ import { b as ToolContext } from './tool-DYp6-cC3.js';
6
+ import { R as ReasoningLevel } from './types-CQaXbRsS.js';
7
+
8
+ /**
9
+ * Stream types for @cuylabs/agent-core
10
+ *
11
+ * Defines the canonical StreamChunk union and related types
12
+ * for both AI SDK native and custom stream providers.
13
+ */
14
+ /**
15
+ * Stream chunk types (AI SDK compatible + custom streams)
16
+ *
17
+ * This is the single canonical definition — used by both the
18
+ * streaming module and custom stream providers.
19
+ */
20
+ type StreamChunk = {
21
+ type: "text-start";
22
+ } | {
23
+ type: "text-delta";
24
+ text: string;
25
+ } | {
26
+ type: "text-end";
27
+ } | {
28
+ type: "reasoning-start";
29
+ id: string;
30
+ } | {
31
+ type: "reasoning-delta";
32
+ id: string;
33
+ text: string;
34
+ } | {
35
+ type: "reasoning-end";
36
+ id: string;
37
+ } | {
38
+ type: "tool-call";
39
+ toolName: string;
40
+ toolCallId: string;
41
+ input: unknown;
42
+ } | {
43
+ type: "tool-result";
44
+ toolName: string;
45
+ toolCallId: string;
46
+ output: unknown;
47
+ } | {
48
+ type: "tool-error";
49
+ toolName: string;
50
+ toolCallId: string;
51
+ error: unknown;
52
+ } | {
53
+ type: "finish-step";
54
+ usage?: {
55
+ inputTokens?: number;
56
+ outputTokens?: number;
57
+ totalTokens?: number;
58
+ };
59
+ finishReason?: string;
60
+ } | {
61
+ type: "finish";
62
+ totalUsage?: {
63
+ inputTokens?: number;
64
+ outputTokens?: number;
65
+ totalTokens?: number;
66
+ };
67
+ } | {
68
+ type: "error";
69
+ error: unknown;
70
+ } | {
71
+ type: "start-step";
72
+ } | {
73
+ type: "start";
74
+ } | {
75
+ type: "abort";
76
+ } | {
77
+ type: "computer-call";
78
+ callId: string;
79
+ action: unknown;
80
+ pendingSafetyChecks?: unknown[];
81
+ } | {
82
+ type: "step-usage";
83
+ usage: {
84
+ inputTokens: number;
85
+ outputTokens: number;
86
+ totalTokens: number;
87
+ };
88
+ };
89
+ /**
90
+ * Custom stream provider function type.
91
+ *
92
+ * This matches the signature needed for agent-core's LLM module,
93
+ * returning a StreamProviderResult-compatible object.
94
+ */
95
+ type StreamProvider = (input: StreamProviderInput) => Promise<StreamProviderResult>;
96
+ /**
97
+ * Input for custom stream providers
98
+ */
99
+ interface StreamProviderInput {
100
+ /** System prompt */
101
+ system: string;
102
+ /** Messages to send */
103
+ messages: Array<{
104
+ role: string;
105
+ content: unknown;
106
+ }>;
107
+ /** Abort signal */
108
+ abortSignal?: AbortSignal;
109
+ /** Max iterations */
110
+ maxSteps?: number;
111
+ }
112
+ /**
113
+ * Result from custom stream providers (AI SDK StreamTextResult compatible)
114
+ */
115
+ interface StreamProviderResult {
116
+ /** Async iterable of stream chunks */
117
+ fullStream: AsyncIterable<StreamChunk>;
118
+ /** Promise resolving to final text */
119
+ text: Promise<string>;
120
+ /** Promise resolving to usage stats */
121
+ usage: Promise<{
122
+ inputTokens: number;
123
+ outputTokens: number;
124
+ totalTokens: number;
125
+ }>;
126
+ /** Promise resolving to finish reason */
127
+ finishReason: Promise<string>;
128
+ }
129
+ /**
130
+ * Configuration for stream provider factory.
131
+ * Contains everything needed to create a stream provider for a specific model.
132
+ */
133
+ interface StreamProviderConfig {
134
+ /** API key to use */
135
+ apiKey?: string;
136
+ /** Display dimensions */
137
+ display?: {
138
+ width: number;
139
+ height: number;
140
+ };
141
+ /** Environment type */
142
+ environment?: string;
143
+ /** Enable debug logging */
144
+ debug?: boolean;
145
+ }
146
+ /**
147
+ * Stream provider factory - creates a stream provider for a given model.
148
+ *
149
+ * This is attached to enhanced tools (like computer tools) to allow
150
+ * the Agent to automatically configure the right stream provider
151
+ * when a model requires custom handling (e.g., OpenAI computer-use-preview).
152
+ */
153
+ type StreamProviderFactory = (modelId: string, config: StreamProviderConfig) => StreamProvider;
154
+ /**
155
+ * Enhanced tools array with additional capabilities.
156
+ *
157
+ * This extends the standard Tool.AnyInfo[] with optional metadata
158
+ * that the Agent can use for automatic configuration.
159
+ */
160
+ interface EnhancedTools extends Array<unknown> {
161
+ /**
162
+ * Factory to create a stream provider for models that need custom streaming.
163
+ * Called by the Agent when it detects a model that requires special handling.
164
+ */
165
+ __streamProviderFactory?: StreamProviderFactory;
166
+ /**
167
+ * Model patterns that require the custom stream provider.
168
+ * Used by the Agent to detect when to use the factory.
169
+ * Default patterns: ["computer-use-preview"]
170
+ */
171
+ __customStreamModels?: string[];
172
+ }
173
+
174
+ /** Agent status for UI display */
175
+ type AgentStatus = "idle" | "processing" | "thinking" | "reasoning" | "calling-tool" | "waiting-approval" | "error";
176
+ /** Approval request for UI */
177
+ interface ApprovalEvent {
178
+ id: string;
179
+ tool: string;
180
+ args: unknown;
181
+ description: string;
182
+ risk: "safe" | "moderate" | "dangerous";
183
+ }
184
+ /** Neutral turn-commit boundaries for runtime/durability integrations */
185
+ type AgentTurnBoundaryKind = "input-commit-start" | "input-commit-finish" | "intervention-commit-start" | "intervention-commit-finish" | "step-commit-start" | "step-commit-finish" | "output-commit-start" | "output-commit-finish";
186
+ /**
187
+ * Events emitted during agent execution
188
+ *
189
+ * These events are designed for UI consumption:
190
+ * - status: Overall agent state for status indicators
191
+ * - approval-request: User confirmation needed
192
+ * - progress: Step counts for progress bars
193
+ */
194
+ type AgentEvent = {
195
+ type: "status";
196
+ status: AgentStatus;
197
+ } | {
198
+ type: "approval-request";
199
+ request: ApprovalEvent;
200
+ } | {
201
+ type: "approval-resolved";
202
+ id: string;
203
+ action: "allow" | "deny" | "remember";
204
+ } | {
205
+ type: "step-start";
206
+ step: number;
207
+ maxSteps: number;
208
+ } | {
209
+ type: "step-finish";
210
+ step: number;
211
+ usage?: TokenUsage;
212
+ finishReason?: string;
213
+ } | {
214
+ type: "turn-boundary";
215
+ boundary: AgentTurnBoundaryKind;
216
+ step?: number;
217
+ messageRole?: Message["role"];
218
+ pendingToolCallCount?: number;
219
+ } | {
220
+ type: "message";
221
+ message: Message;
222
+ } | {
223
+ type: "text-start";
224
+ } | {
225
+ type: "text-delta";
226
+ text: string;
227
+ } | {
228
+ type: "text-end";
229
+ } | {
230
+ type: "reasoning-start";
231
+ id: string;
232
+ } | {
233
+ type: "reasoning-delta";
234
+ id: string;
235
+ text: string;
236
+ } | {
237
+ type: "reasoning-end";
238
+ id: string;
239
+ } | {
240
+ type: "tool-start";
241
+ toolName: string;
242
+ toolCallId: string;
243
+ input: unknown;
244
+ } | {
245
+ type: "tool-result";
246
+ toolName: string;
247
+ toolCallId: string;
248
+ result: unknown;
249
+ } | {
250
+ type: "tool-error";
251
+ toolName: string;
252
+ toolCallId: string;
253
+ error: string;
254
+ } | {
255
+ type: "computer-call";
256
+ callId: string;
257
+ action: unknown;
258
+ pendingSafetyChecks?: unknown[];
259
+ } | {
260
+ type: "computer-result";
261
+ callId: string;
262
+ result: unknown;
263
+ } | {
264
+ type: "intervention-applied";
265
+ id: string;
266
+ message: string;
267
+ } | {
268
+ type: "doom-loop";
269
+ toolName: string;
270
+ repeatCount: number;
271
+ } | {
272
+ type: "context-overflow";
273
+ inputTokens: number;
274
+ limit: number;
275
+ } | {
276
+ type: "turn-summary";
277
+ turnId: string;
278
+ files: Array<{
279
+ path: string;
280
+ type: "created" | "modified" | "deleted" | "unchanged";
281
+ additions: number;
282
+ deletions: number;
283
+ }>;
284
+ additions: number;
285
+ deletions: number;
286
+ } | {
287
+ type: "retry";
288
+ attempt: number;
289
+ delayMs: number;
290
+ error: Error;
291
+ } | {
292
+ type: "error";
293
+ error: Error;
294
+ } | {
295
+ type: "complete";
296
+ usage?: TokenUsage;
297
+ output?: string;
298
+ };
299
+ /**
300
+ * Processor result - what happens after processing a turn
301
+ */
302
+ type ProcessorResult = "continue" | "stop" | "compact";
303
+ /**
304
+ * Stream input for the LLM
305
+ */
306
+ interface StreamInput {
307
+ sessionID: string;
308
+ model: ai.LanguageModel;
309
+ system: string[];
310
+ messages: ai.ModelMessage[];
311
+ abort: AbortSignal;
312
+ tools: Record<string, unknown>;
313
+ }
314
+
315
+ /**
316
+ * Prompt Pipeline Types
317
+ *
318
+ * Types for the layered system prompt architecture.
319
+ * The prompt pipeline composes a system prompt from multiple sources:
320
+ *
321
+ * Base Template → Environment → Instructions → Custom Sections → Per-Turn
322
+ *
323
+ * Each layer is optional, composable, and can be toggled on/off.
324
+ */
325
+
326
+ /**
327
+ * Model family identifier for prompt template selection.
328
+ *
329
+ * Each family gets a base template optimized for its strengths:
330
+ * - `anthropic`: Claude models — structured sections with XML tags
331
+ * - `openai`: GPT/o-series models — clear directives with markdown
332
+ * - `google`: Gemini models — balanced approach
333
+ * - `deepseek`: DeepSeek models — code-focused emphasis
334
+ * - `default`: Generic template for any model
335
+ */
336
+ type ModelFamily = "anthropic" | "openai" | "google" | "deepseek" | "default";
337
+ /**
338
+ * Runtime environment information injected into the system prompt.
339
+ * Gives the model awareness of the working context.
340
+ */
341
+ interface EnvironmentInfo {
342
+ /** Current working directory */
343
+ cwd: string;
344
+ /** Operating system (e.g. "macOS (darwin arm64)") */
345
+ platform: string;
346
+ /** Current date/time formatted string */
347
+ date: string;
348
+ /** User's shell (e.g. "/bin/zsh") */
349
+ shell?: string;
350
+ /** Active git branch, if inside a repo */
351
+ gitBranch?: string;
352
+ /** Whether the working tree is clean */
353
+ gitClean?: boolean;
354
+ /** Git repo root path */
355
+ gitRoot?: string;
356
+ }
357
+ /**
358
+ * An instruction file discovered on disk (e.g. AGENTS.md).
359
+ *
360
+ * Instruction files provide project-specific or workspace-level guidance
361
+ * that gets injected into every prompt. They're discovered by walking up
362
+ * the directory tree from cwd.
363
+ */
364
+ interface InstructionFile {
365
+ /** Absolute path to the file */
366
+ path: string;
367
+ /** File content (trimmed) */
368
+ content: string;
369
+ /** How the file was discovered */
370
+ source: "project" | "workspace" | "global";
371
+ /** Depth from cwd (0 = same directory) */
372
+ depth: number;
373
+ }
374
+ /**
375
+ * A composable section in the prompt pipeline.
376
+ *
377
+ * Sections are the building blocks of the final system prompt.
378
+ * Each section has a priority that determines its position in the output.
379
+ *
380
+ * Default priority ranges:
381
+ * - 10: Base template
382
+ * - 20: Environment block
383
+ * - 30: Instruction files
384
+ * - 50: Custom sections (default for user-added)
385
+ * - 70: Reserved for future use (e.g. Skills)
386
+ * - 90: Per-turn overrides
387
+ */
388
+ interface PromptSection {
389
+ /** Unique identifier for this section */
390
+ id: string;
391
+ /** Human-readable label (useful for debugging/logging) */
392
+ label: string;
393
+ /** The text content of this section */
394
+ content: string;
395
+ /** Sort priority — lower values appear earlier (default: 50) */
396
+ priority?: number;
397
+ /** Whether this section is active (default: true) */
398
+ enabled?: boolean;
399
+ }
400
+ /**
401
+ * Context passed to the builder for each prompt build.
402
+ *
403
+ * This provides the runtime information needed to compose
404
+ * the system prompt for a specific conversation turn.
405
+ */
406
+ interface PromptBuildContext {
407
+ /** Current working directory */
408
+ cwd: string;
409
+ /** The language model being used */
410
+ model: LanguageModel;
411
+ /** Names of available tools (for template customization) */
412
+ toolNames?: string[];
413
+ /** Per-turn additional instructions (from chat options) */
414
+ override?: string;
415
+ /** Current session ID */
416
+ sessionId?: string;
417
+ }
418
+ /**
419
+ * Configuration for the prompt pipeline.
420
+ *
421
+ * Controls which layers are active and how they behave.
422
+ * All options have sensible defaults — an empty config `{}` gives you
423
+ * the full pipeline with auto-detection.
424
+ *
425
+ * @example
426
+ * ```typescript
427
+ * // Minimal — use all defaults
428
+ * const builder = createPromptBuilder();
429
+ *
430
+ * // Custom base template but keep environment + instructions
431
+ * const builder = createPromptBuilder({
432
+ * baseTemplate: "You are a security auditor...",
433
+ * includeEnvironment: true,
434
+ * includeInstructions: true,
435
+ * });
436
+ *
437
+ * // Fully custom — disable auto features, add your own sections
438
+ * const builder = createPromptBuilder({
439
+ * includeEnvironment: false,
440
+ * includeInstructions: false,
441
+ * sections: [
442
+ * { id: "role", label: "Role", content: "You audit code.", priority: 10 },
443
+ * { id: "rules", label: "Rules", content: "Never modify files.", priority: 20 },
444
+ * ],
445
+ * });
446
+ * ```
447
+ */
448
+ interface PromptConfig {
449
+ /**
450
+ * Override the base template entirely.
451
+ * If set, replaces the model-family-specific template.
452
+ */
453
+ baseTemplate?: string;
454
+ /**
455
+ * Force a specific model family for template selection.
456
+ * Auto-detected from the model if not provided.
457
+ */
458
+ modelFamily?: ModelFamily;
459
+ /**
460
+ * Inject runtime environment info (cwd, platform, git, date).
461
+ * @default true
462
+ */
463
+ includeEnvironment?: boolean;
464
+ /**
465
+ * Discover and include instruction files (AGENTS.md, etc.).
466
+ * @default true
467
+ */
468
+ includeInstructions?: boolean;
469
+ /**
470
+ * File name patterns to search for when discovering instructions.
471
+ * Searched in each directory walking up from cwd.
472
+ *
473
+ * @default ["AGENTS.md", "CLAUDE.md", "COPILOT.md", ".cuylabs/instructions.md"]
474
+ */
475
+ instructionPatterns?: string[];
476
+ /**
477
+ * Absolute paths to global instruction files (always included
478
+ * regardless of cwd). Useful for organization-wide rules.
479
+ */
480
+ globalInstructions?: string[];
481
+ /**
482
+ * Maximum depth to walk up from cwd when searching for instructions.
483
+ * Prevents scanning the entire filesystem.
484
+ * @default 10
485
+ */
486
+ instructionMaxDepth?: number;
487
+ /**
488
+ * Maximum file size in bytes for instruction files.
489
+ * Files larger than this are skipped to avoid bloating the prompt.
490
+ * @default 51200 (50KB)
491
+ */
492
+ instructionMaxSize?: number;
493
+ /**
494
+ * Pre-defined sections to include in every prompt build.
495
+ * These are merged with auto-generated sections (template, environment, etc.).
496
+ */
497
+ sections?: PromptSection[];
498
+ /**
499
+ * Separator between sections in the final composed prompt.
500
+ * @default "\n\n"
501
+ */
502
+ separator?: string;
503
+ /**
504
+ * Skill discovery and loading configuration.
505
+ *
506
+ * When provided, the prompt pipeline will:
507
+ * 1. Discover SKILL.md files from project, user, and global directories
508
+ * 2. Inject skill summaries at PRIORITY_SKILLS (70) in the system prompt
509
+ * 3. Make `skill` and `skill_resource` tools available to the agent
510
+ *
511
+ * Skills use progressive disclosure:
512
+ * - L1 (summary): always in system prompt — names + descriptions
513
+ * - L2 (content): loaded on demand via `skill` tool
514
+ * - L3 (resources): loaded on demand via `skill_resource` tool
515
+ *
516
+ * @example
517
+ * ```typescript
518
+ * const agent = createAgent({
519
+ * model: anthropic("claude-sonnet-4-20250514"),
520
+ * prompt: {
521
+ * skills: {
522
+ * externalDirs: [".agents", ".claude"],
523
+ * roots: ["./company-skills"],
524
+ * },
525
+ * },
526
+ * });
527
+ * ```
528
+ */
529
+ skills?: SkillConfig;
530
+ }
531
+
532
+ /**
533
+ * Middleware Types
534
+ *
535
+ * Defines the composable middleware interface for agent lifecycle hooks.
536
+ *
537
+ * Middleware is just a plain object with optional hook methods — no
538
+ * base classes, no discovery, no installation. Pass it in code:
539
+ *
540
+ * ```typescript
541
+ * const agent = createAgent({
542
+ * middleware: [myLoggerMiddleware, myApprovalMiddleware],
543
+ * });
544
+ * ```
545
+ *
546
+ * Hooks run in array order for "before" operations and reverse order
547
+ * for "after" operations (like middleware stacks everywhere).
548
+ */
549
+
550
+ /**
551
+ * Action returned by `beforeToolCall` — determines whether
552
+ * the tool call proceeds or is blocked.
553
+ */
554
+ interface ToolCallDecision {
555
+ /** Whether to allow or deny the tool call */
556
+ action: "allow" | "deny";
557
+ /** Reason for denial — returned to the model as the tool output */
558
+ reason?: string;
559
+ }
560
+ /**
561
+ * Mutable model invocation payload presented to middleware before an LLM call.
562
+ */
563
+ interface ModelCallInput {
564
+ model: LanguageModel;
565
+ system: string[];
566
+ messages: ModelMessage[];
567
+ temperature?: number;
568
+ topP?: number;
569
+ maxOutputTokens?: number;
570
+ maxSteps?: number;
571
+ reasoningLevel?: ReasoningLevel;
572
+ telemetry?: TelemetrySettings;
573
+ customStreamProvider?: StreamProvider;
574
+ toolExecutionMode?: "auto" | "plan";
575
+ }
576
+ /**
577
+ * Context shared across model middleware hooks for a single step.
578
+ */
579
+ interface ModelCallContext {
580
+ sessionID: string;
581
+ step: number;
582
+ cwd: string;
583
+ abort: AbortSignal;
584
+ model: LanguageModel;
585
+ toolNames: string[];
586
+ mcpToolNames: string[];
587
+ }
588
+ /**
589
+ * Return this from `model.input(...)` to stop an LLM call entirely.
590
+ */
591
+ interface BlockedModelCall {
592
+ block: true;
593
+ reason: string;
594
+ }
595
+ /**
596
+ * Final step result exposed to model middleware after the stream is processed.
597
+ */
598
+ interface ModelCallOutput {
599
+ text: string;
600
+ usage?: TokenUsage;
601
+ finishReason?: string;
602
+ }
603
+ /**
604
+ * Optional model-specific hooks grouped under `middleware.model`.
605
+ */
606
+ interface AgentModelHooks {
607
+ /**
608
+ * Review or rewrite the model request before the LLM is invoked.
609
+ *
610
+ * Return:
611
+ * - `undefined` to keep the current request
612
+ * - a `ModelCallInput` object to replace the request
613
+ * - `{ block: true, reason }` to cancel the model call
614
+ *
615
+ * Hooks run in middleware array order. The first block stops the call.
616
+ */
617
+ input?(input: ModelCallInput, ctx: ModelCallContext): Promise<ModelCallInput | BlockedModelCall | void>;
618
+ /**
619
+ * Review or rewrite raw stream chunks as they arrive from the model.
620
+ *
621
+ * Return:
622
+ * - `undefined` to keep the current chunk
623
+ * - a `StreamChunk` to replace it
624
+ * - `null` to drop it from downstream processing
625
+ *
626
+ * Hooks run in middleware array order.
627
+ */
628
+ chunk?(chunk: StreamChunk, ctx: ModelCallContext): Promise<StreamChunk | null | void>;
629
+ /**
630
+ * Review or rewrite the fully processed model result for the current step.
631
+ *
632
+ * Hooks run in reverse middleware order so outer middleware sees the final
633
+ * shaped result, matching `afterToolCall(...)`.
634
+ */
635
+ output?(output: ModelCallOutput, ctx: ModelCallContext): Promise<ModelCallOutput | void>;
636
+ }
637
+ /**
638
+ * Agent middleware — composable lifecycle hooks.
639
+ *
640
+ * All methods are optional. Implement only what you need.
641
+ *
642
+ * Ordering:
643
+ * - `beforeToolCall`: runs in array order, first "deny" wins
644
+ * - `afterToolCall`: runs in reverse order (innermost first)
645
+ * - `model.input`: runs in array order, first block wins
646
+ * - `model.chunk`: runs in array order on raw stream chunks
647
+ * - `model.output`: runs in reverse order
648
+ * - `promptSections`: all run, sections merged
649
+ * - `onEvent`: all run in parallel (non-blocking)
650
+ * - `onChatStart` / `onChatEnd`: run in array order, awaited sequentially
651
+ *
652
+ * @example
653
+ * ```typescript
654
+ * // A simple logging middleware
655
+ * const logger: AgentMiddleware = {
656
+ * name: "logger",
657
+ * beforeToolCall: async (tool, args) => {
658
+ * console.log(`→ ${tool}`, args);
659
+ * return { action: "allow" };
660
+ * },
661
+ * afterToolCall: async (tool, args, result) => {
662
+ * console.log(`← ${tool}`, result.title);
663
+ * return result;
664
+ * },
665
+ * };
666
+ * ```
667
+ */
668
+ interface AgentMiddleware {
669
+ /** Middleware name (for logging and debugging) */
670
+ name: string;
671
+ /**
672
+ * Optional hooks for shaping model requests, raw stream chunks,
673
+ * and final step outputs.
674
+ */
675
+ model?: AgentModelHooks;
676
+ /**
677
+ * Intercept a tool call before execution.
678
+ *
679
+ * Return `{ action: "allow" }` to proceed, or `{ action: "deny", reason }` to
680
+ * block the call. When denied, `reason` is returned to the model as the tool
681
+ * output so it can adjust its approach.
682
+ *
683
+ * Runs in array order. The first middleware that returns "deny" short-circuits
684
+ * the chain — remaining middleware and the tool itself are skipped.
685
+ *
686
+ * @param tool - Tool name (e.g. "bash", "write_file")
687
+ * @param args - Parsed tool arguments
688
+ * @param ctx - Tool execution context (cwd, sessionID, host, etc.)
689
+ */
690
+ beforeToolCall?(tool: string, args: unknown, ctx: ToolContext): Promise<ToolCallDecision>;
691
+ /**
692
+ * Transform or observe a tool result after execution.
693
+ *
694
+ * Receives the result and must return a result (can be the same object
695
+ * or a modified copy). Runs in reverse array order so the outermost
696
+ * middleware sees the final transformed result.
697
+ *
698
+ * @param tool - Tool name
699
+ * @param args - Original tool arguments
700
+ * @param result - Tool execution result
701
+ * @param ctx - Tool execution context
702
+ */
703
+ afterToolCall?(tool: string, args: unknown, result: Tool.ExecuteResult, ctx: ToolContext): Promise<Tool.ExecuteResult>;
704
+ /**
705
+ * Inject dynamic prompt sections at build time.
706
+ *
707
+ * Called during `PromptBuilder.build()` for each middleware. Return one
708
+ * or more sections to inject into the system prompt. Return `undefined`
709
+ * or an empty array to inject nothing.
710
+ *
711
+ * Sections follow the same priority system as static sections — use
712
+ * `priority` to control placement relative to base template (10),
713
+ * environment (20), instructions (30), custom (50), skills (70),
714
+ * and per-turn overrides (90).
715
+ *
716
+ * @param ctx - Build context with cwd, model, toolNames, sessionId
717
+ */
718
+ promptSections?(ctx: PromptBuildContext): PromptSection | PromptSection[] | undefined;
719
+ /**
720
+ * Observe agent events (read-only, non-blocking).
721
+ *
722
+ * Fires for every event emitted during `chat()`. Errors thrown by
723
+ * handlers are caught and logged — they never interrupt the stream.
724
+ *
725
+ * This is intentionally synchronous (void return) to prevent
726
+ * event observers from blocking the streaming pipeline.
727
+ *
728
+ * @param event - The agent event
729
+ */
730
+ onEvent?(event: AgentEvent): void;
731
+ /**
732
+ * Called when `chat()` starts, before the LLM stream is created.
733
+ *
734
+ * Use this for setup: initializing loggers, recording start time,
735
+ * resetting per-turn state, etc. Runs in array order, awaited
736
+ * sequentially.
737
+ *
738
+ * @param sessionId - Session identifier
739
+ * @param message - The user message being sent
740
+ */
741
+ onChatStart?(sessionId: string, message: string): Promise<void>;
742
+ /**
743
+ * Called when `chat()` completes (or errors), after all events
744
+ * have been yielded.
745
+ *
746
+ * Use this for teardown: flushing logs, recording metrics, etc.
747
+ * Runs in array order, awaited sequentially. Always called, even
748
+ * if the stream errored.
749
+ *
750
+ * @param sessionId - Session identifier
751
+ * @param result - Completion info (usage stats and optional error)
752
+ */
753
+ onChatEnd?(sessionId: string, result: {
754
+ usage?: TokenUsage;
755
+ error?: Error;
756
+ output?: string;
757
+ }): Promise<void>;
758
+ /**
759
+ * Get the active OTel context for a session.
760
+ *
761
+ * Used internally by the LLM layer to wrap `streamText()` so that
762
+ * AI SDK spans are nested under the middleware's parent span.
763
+ * Only implemented by the `otelMiddleware`.
764
+ */
765
+ getOtelContext?(sessionId: string): unknown;
766
+ }
767
+
768
+ /**
769
+ * Middleware Runner
770
+ *
771
+ * Executes middleware hooks in the correct order with proper
772
+ * error handling and short-circuit semantics.
773
+ *
774
+ * This is the internal engine — consumers never see it.
775
+ * They interact with middleware through AgentConfig.middleware.
776
+ */
777
+
778
+ /**
779
+ * Middleware runner — holds an ordered list of middleware and
780
+ * exposes methods to run each hook type with correct semantics.
781
+ *
782
+ * Immutable after construction. Fork creates a new runner
783
+ * (with inherited + additional middleware).
784
+ */
785
+ declare class MiddlewareRunner {
786
+ private readonly stack;
787
+ constructor(middleware?: AgentMiddleware[]);
788
+ /** Number of registered middleware */
789
+ get count(): number;
790
+ /** Whether any middleware is registered */
791
+ get hasMiddleware(): boolean;
792
+ /** Get the middleware list (for fork inheritance) */
793
+ getMiddleware(): readonly AgentMiddleware[];
794
+ runModelInput(input: ModelCallInput, ctx: ModelCallContext): Promise<ModelCallInput | BlockedModelCall>;
795
+ runModelChunk(chunk: StreamChunk, ctx: ModelCallContext): Promise<StreamChunk | undefined>;
796
+ runModelOutput(output: ModelCallOutput, ctx: ModelCallContext): Promise<ModelCallOutput>;
797
+ /**
798
+ * Run all `beforeToolCall` hooks in order.
799
+ *
800
+ * Returns `{ action: "allow" }` if all middleware allow (or have no hook).
801
+ * Returns `{ action: "deny", reason }` on first denial — remaining
802
+ * middleware are skipped.
803
+ */
804
+ runBeforeToolCall(tool: string, args: unknown, ctx: ToolContext): Promise<ToolCallDecision>;
805
+ /**
806
+ * Run all `afterToolCall` hooks in reverse order.
807
+ *
808
+ * Each hook receives the result from the previous hook (or the
809
+ * original tool result for the first hook). Errors are caught
810
+ * and logged — the original result passes through.
811
+ */
812
+ runAfterToolCall(tool: string, args: unknown, result: Tool.ExecuteResult, ctx: ToolContext): Promise<Tool.ExecuteResult>;
813
+ /**
814
+ * Collect prompt sections from all middleware.
815
+ *
816
+ * Returns a flat array of sections. Each middleware can return a single
817
+ * section, an array of sections, or undefined/empty.
818
+ */
819
+ collectPromptSections(ctx: PromptBuildContext): PromptSection[];
820
+ /**
821
+ * Broadcast an event to all middleware observers.
822
+ *
823
+ * Non-blocking — errors are caught and logged. This never
824
+ * slows down the streaming pipeline.
825
+ */
826
+ emitEvent(event: AgentEvent): void;
827
+ /**
828
+ * Get the OTel context for a session from the telemetry middleware.
829
+ * Returns undefined if no telemetry middleware is registered.
830
+ */
831
+ getOtelContext(sessionId: string): unknown | undefined;
832
+ /**
833
+ * Run all `onChatStart` hooks in order.
834
+ *
835
+ * Errors are caught and logged — a broken logger should not
836
+ * prevent the chat from starting.
837
+ */
838
+ runChatStart(sessionId: string, message: string): Promise<void>;
839
+ /**
840
+ * Run all `onChatEnd` hooks in order.
841
+ *
842
+ * Always called, even when the stream errored. Errors in handlers
843
+ * are caught and logged.
844
+ */
845
+ runChatEnd(sessionId: string, result: {
846
+ usage?: TokenUsage;
847
+ error?: Error;
848
+ output?: string;
849
+ }): Promise<void>;
850
+ }
851
+
852
+ export { type AgentEvent as A, type BlockedModelCall as B, type EnhancedTools as E, type InstructionFile as I, MiddlewareRunner as M, type PromptConfig as P, type StreamChunk as S, type ToolCallDecision as T, type PromptBuildContext as a, type PromptSection as b, type ModelFamily as c, type ProcessorResult as d, type StreamProvider as e, type ModelCallInput as f, type AgentTurnBoundaryKind as g, type AgentMiddleware as h, type AgentModelHooks as i, type AgentStatus as j, type ApprovalEvent as k, type EnvironmentInfo as l, type ModelCallContext as m, type ModelCallOutput as n, type StreamInput as o, type StreamProviderConfig as p, type StreamProviderFactory as q, type StreamProviderInput as r, type StreamProviderResult as s };