@cuylabs/agent-core 0.6.0 → 0.8.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (74) hide show
  1. package/README.md +5 -1
  2. package/dist/{builder-BKkipazh.d.ts → builder-UpOWQMW3.d.ts} +2 -2
  3. package/dist/{chunk-3C4VKG4P.js → chunk-4BDA7DQY.js} +273 -807
  4. package/dist/chunk-7VKQ4WPB.js +73 -0
  5. package/dist/chunk-BFM2YHNM.js +222 -0
  6. package/dist/chunk-CAA7FHIH.js +280 -0
  7. package/dist/chunk-KUVSERLJ.js +50 -0
  8. package/dist/chunk-N6HWIEEA.js +423 -0
  9. package/dist/chunk-N7P4PN3O.js +84 -0
  10. package/dist/{chunk-QWFMX226.js → chunk-RFEKJKTO.js} +252 -13
  11. package/dist/chunk-RZITT45F.js +202 -0
  12. package/dist/{chunk-X635CM2F.js → chunk-SQU2AJHO.js} +1 -1
  13. package/dist/chunk-VNQBHPCT.js +398 -0
  14. package/dist/{chunk-QAQADS4X.js → chunk-WWYYNWEW.js} +2 -1
  15. package/dist/{chunk-O2ZCFQL6.js → chunk-YSLSEQ6B.js} +105 -220
  16. package/dist/context/index.js +1 -1
  17. package/dist/errors/index.d.ts +11 -0
  18. package/dist/errors/index.js +16 -0
  19. package/dist/events-CE72w8W4.d.ts +149 -0
  20. package/dist/host/index.d.ts +45 -0
  21. package/dist/host/index.js +8 -0
  22. package/dist/{index-DZQJD_hp.d.ts → index-CWSchSql.d.ts} +42 -51
  23. package/dist/index.d.ts +98 -190
  24. package/dist/index.js +476 -939
  25. package/dist/inference/index.d.ts +62 -0
  26. package/dist/inference/index.js +27 -0
  27. package/dist/llm-error-D93FNNLY.d.ts +32 -0
  28. package/dist/middleware/index.d.ts +246 -5
  29. package/dist/middleware/index.js +7 -3
  30. package/dist/models/index.d.ts +226 -3
  31. package/dist/models/index.js +41 -3
  32. package/dist/presets/index.d.ts +53 -0
  33. package/dist/presets/index.js +28 -0
  34. package/dist/prompt/index.d.ts +12 -7
  35. package/dist/reasoning/index.d.ts +53 -8
  36. package/dist/reasoning/index.js +2 -7
  37. package/dist/{registry-CuRWWtcT.d.ts → registry-DwYqsQkX.d.ts} +1 -1
  38. package/dist/{runner-G1wxEgac.d.ts → runner-e2YRcUoX.d.ts} +82 -148
  39. package/dist/runtime/index.d.ts +44 -7
  40. package/dist/runtime/index.js +16 -5
  41. package/dist/safety/index.d.ts +38 -0
  42. package/dist/safety/index.js +12 -0
  43. package/dist/scope/index.d.ts +10 -0
  44. package/dist/scope/index.js +14 -0
  45. package/dist/{session-manager-Uawm2Le7.d.ts → session-manager-B_CWGTsl.d.ts} +1 -1
  46. package/dist/signal/index.d.ts +28 -0
  47. package/dist/signal/index.js +6 -0
  48. package/dist/skill/index.d.ts +8 -5
  49. package/dist/storage/index.d.ts +2 -2
  50. package/dist/sub-agent/index.d.ts +17 -8
  51. package/dist/tool/index.d.ts +9 -4
  52. package/dist/tool/index.js +4 -3
  53. package/dist/tool-BHbyUAy3.d.ts +150 -0
  54. package/dist/{tool-DYp6-cC3.d.ts → tool-DLXAR9Ce.d.ts} +5 -99
  55. package/dist/tracking/index.d.ts +3 -1
  56. package/dist/{tool-pFAnJc5Y.d.ts → types-BfNpU8NS.d.ts} +1 -150
  57. package/dist/types-BnpEOYV-.d.ts +50 -0
  58. package/dist/types-CHiPh8U2.d.ts +100 -0
  59. package/dist/types-CQL-SvTn.d.ts +29 -0
  60. package/dist/types-CWm-7rvB.d.ts +55 -0
  61. package/dist/types-KKDrdU9Y.d.ts +325 -0
  62. package/dist/{resolver-DOfZ-xuk.d.ts → types-QA4WhEfz.d.ts} +1 -117
  63. package/dist/types-QKHHQLLq.d.ts +336 -0
  64. package/dist/types-YuWV4ag7.d.ts +72 -0
  65. package/package.json +74 -8
  66. package/dist/capabilities/index.d.ts +0 -97
  67. package/dist/capabilities/index.js +0 -46
  68. package/dist/chunk-6TDTQJ4P.js +0 -116
  69. package/dist/chunk-FG4MD5MU.js +0 -54
  70. package/dist/config-D2xeGEHK.d.ts +0 -52
  71. package/dist/identifiers-BLUxFqV_.d.ts +0 -12
  72. package/dist/index-ipP3_ztp.d.ts +0 -198
  73. package/dist/network-D76DS5ot.d.ts +0 -5
  74. package/dist/types-BWo810L_.d.ts +0 -648
@@ -0,0 +1,100 @@
1
+ /**
2
+ * ToolHost — execution environment abstraction for agent tools.
3
+ *
4
+ * A ToolHost defines *where* tools execute: local machine, Docker container,
5
+ * SSH remote, etc. Every host provides two surfaces:
6
+ *
7
+ * 1. **File system** — read, write, stat, list, check existence
8
+ * 2. **Process execution** — spawn commands, kill processes
9
+ *
10
+ * Tools call `ctx.host.readFile()` / `ctx.host.exec()` instead of importing
11
+ * `fs` and `child_process` directly. The host implementation handles the rest.
12
+ *
13
+ * @example
14
+ * ```typescript
15
+ * // Default: runs everything locally
16
+ * const agent = createAgent({ host: localHost() });
17
+ *
18
+ * // Future: run tools inside a Docker container
19
+ * const agent = createAgent({ host: dockerHost("my-container") });
20
+ * ```
21
+ */
22
+ /** Options for spawning a process. */
23
+ interface ExecOptions {
24
+ /** Working directory for the command. */
25
+ cwd?: string;
26
+ /** Environment variables (merged with host defaults). */
27
+ env?: Record<string, string | undefined>;
28
+ /** Abort signal for cancellation. */
29
+ signal?: AbortSignal;
30
+ /** Timeout in milliseconds. 0 = no timeout. */
31
+ timeout?: number;
32
+ /** Callback for stdout data as it arrives. */
33
+ onStdout?: (data: Buffer) => void;
34
+ /** Callback for stderr data as it arrives. */
35
+ onStderr?: (data: Buffer) => void;
36
+ }
37
+ /** Result of a process execution. */
38
+ interface ExecResult {
39
+ /** Combined stdout output. */
40
+ stdout: string;
41
+ /** Combined stderr output. */
42
+ stderr: string;
43
+ /** Exit code (null if killed by signal). */
44
+ exitCode: number | null;
45
+ /** Whether the process was killed due to timeout. */
46
+ timedOut: boolean;
47
+ }
48
+ /** Minimal stat result — only the fields tools actually need. */
49
+ interface FileStat {
50
+ /** File size in bytes. */
51
+ size: number;
52
+ /** Last modification time. */
53
+ mtime: Date;
54
+ /** Whether this entry is a directory. */
55
+ isDirectory: boolean;
56
+ /** Whether this entry is a regular file. */
57
+ isFile: boolean;
58
+ }
59
+ /** A directory entry returned by `readdir`. */
60
+ interface DirEntry {
61
+ /** Entry name (not full path). */
62
+ name: string;
63
+ /** Whether this entry is a directory. */
64
+ isDirectory: boolean;
65
+ /** Whether this entry is a regular file. */
66
+ isFile: boolean;
67
+ }
68
+ /**
69
+ * The execution environment for agent tools.
70
+ *
71
+ * Abstracts filesystem and process operations so tools work identically
72
+ * whether running locally, in Docker, over SSH, or in any other environment.
73
+ */
74
+ interface ToolHost {
75
+ /** Human-readable host identifier (e.g. "local", "docker:my-container"). */
76
+ readonly name: string;
77
+ /** Read a file as a UTF-8 string. Throws if the file doesn't exist. */
78
+ readFile(path: string): Promise<string>;
79
+ /** Read raw bytes from a file. Throws if the file doesn't exist. */
80
+ readBytes(path: string, offset: number, length: number): Promise<Buffer>;
81
+ /** Write a UTF-8 string to a file. Creates parent directories as needed. */
82
+ writeFile(path: string, content: string): Promise<void>;
83
+ /** Check if a path exists. Never throws. */
84
+ exists(path: string): Promise<boolean>;
85
+ /** Get file metadata. Throws if the path doesn't exist. */
86
+ stat(path: string): Promise<FileStat>;
87
+ /** List directory entries. Throws if the path is not a directory. */
88
+ readdir(path: string): Promise<DirEntry[]>;
89
+ /** Create directories recursively. No-op if they already exist. */
90
+ mkdir(path: string): Promise<void>;
91
+ /**
92
+ * Execute a shell command.
93
+ *
94
+ * The host decides which shell to use (e.g. local host uses the user's
95
+ * `$SHELL`, Docker host uses `docker exec`, SSH host uses remote shell).
96
+ */
97
+ exec(command: string, options?: ExecOptions): Promise<ExecResult>;
98
+ }
99
+
100
+ export type { DirEntry as D, ExecOptions as E, FileStat as F, ToolHost as T, ExecResult as a };
@@ -0,0 +1,29 @@
1
+ type ScopeKind = "task" | "turn" | "model" | "tool" | "commit" | "sub-agent" | "activity";
2
+ type ScopeAttributeValue = string | number | boolean | null;
3
+ type ScopeAttributes = Record<string, ScopeAttributeValue | undefined>;
4
+ interface Scope {
5
+ id: string;
6
+ rootId: string;
7
+ kind: ScopeKind;
8
+ name: string;
9
+ parentId?: string;
10
+ depth: number;
11
+ startedAt: string;
12
+ sessionId?: string;
13
+ taskId?: string;
14
+ step?: number;
15
+ attributes: ScopeAttributes;
16
+ }
17
+ type ScopeSnapshot = Scope;
18
+ interface ScopeOptions {
19
+ id?: string;
20
+ kind: ScopeKind;
21
+ name: string;
22
+ parent?: ScopeSnapshot | null;
23
+ sessionId?: string;
24
+ taskId?: string;
25
+ step?: number;
26
+ attributes?: ScopeAttributes;
27
+ }
28
+
29
+ export type { ScopeSnapshot as S, Scope as a, ScopeAttributeValue as b, ScopeAttributes as c, ScopeKind as d, ScopeOptions as e };
@@ -0,0 +1,55 @@
1
+ /**
2
+ * Risk level for operations.
3
+ */
4
+ type RiskLevel = "safe" | "moderate" | "dangerous";
5
+ /**
6
+ * User response to an approval request.
7
+ */
8
+ type ApprovalAction = "allow" | "deny" | "remember";
9
+ /**
10
+ * Approval request sent to the UI/handler.
11
+ */
12
+ interface ApprovalRequest {
13
+ /** Unique request ID */
14
+ id: string;
15
+ /** Session ID */
16
+ sessionId: string;
17
+ /** Tool name */
18
+ tool: string;
19
+ /** Tool arguments */
20
+ args: unknown;
21
+ /** Human-readable description */
22
+ description: string;
23
+ /** Risk level */
24
+ risk: RiskLevel;
25
+ /** Patterns that would be remembered if "remember" is chosen */
26
+ patterns: string[];
27
+ /** Timestamp */
28
+ timestamp: number;
29
+ }
30
+ /**
31
+ * Rule for auto-approving/denying operations.
32
+ */
33
+ interface ApprovalRule {
34
+ /** Pattern to match (glob-style) */
35
+ pattern: string;
36
+ /** Tool to match (`*` for all) */
37
+ tool: string;
38
+ /** Action to take */
39
+ action: "allow" | "deny";
40
+ }
41
+ /**
42
+ * Configuration for the approval handler.
43
+ */
44
+ interface ApprovalConfig {
45
+ /** Default action when no rule matches (default: "ask") */
46
+ defaultAction?: "allow" | "deny" | "ask";
47
+ /** Pre-configured rules */
48
+ rules?: ApprovalRule[];
49
+ /** Handler for approval requests */
50
+ onRequest?: (request: ApprovalRequest) => Promise<ApprovalAction>;
51
+ /** Timeout for approval requests in ms (default: 5 minutes) */
52
+ timeout?: number;
53
+ }
54
+
55
+ export type { ApprovalAction as A, RiskLevel as R, ApprovalConfig as a, ApprovalRequest as b, ApprovalRule as c };
@@ -0,0 +1,325 @@
1
+ import { StreamTextResult, ToolSet, Output, LanguageModel, ModelMessage, TelemetrySettings } from 'ai';
2
+ import { T as Tool } from './tool-BHbyUAy3.js';
3
+ import { T as ToolHost } from './types-CHiPh8U2.js';
4
+ import { S as StreamChunk, d as StreamProvider, M as MiddlewareRunner, e as ModelCallInput } from './runner-e2YRcUoX.js';
5
+ import { R as ReasoningLevel } from './types-CQaXbRsS.js';
6
+ import { d as TurnTrackerContext } from './tool-DLXAR9Ce.js';
7
+ import { L as LLMError } from './llm-error-D93FNNLY.js';
8
+
9
+ /**
10
+ * Intervention Controller for @cuylabs/agent-core
11
+ *
12
+ * Manages mid-turn message injection into running agent turns.
13
+ * Uses Vercel AI SDK v6's `prepareStep` hook for zero-overhead
14
+ * integration at step boundaries (between LLM calls in a multi-step turn).
15
+ *
16
+ * Instead of polling a queue after each tool execution, this leverages
17
+ * the SDK's native step lifecycle. The
18
+ * `prepareStep` callback fires before every LLM call, giving us a
19
+ * natural injection point with no custom loop machinery.
20
+ *
21
+ * Two injection modes:
22
+ * - **Immediate**: `intervene(msg)` — injected at the next step boundary
23
+ * - **Deferred**: `queueNext(msg)` — held until the turn completes
24
+ */
25
+ /** A queued intervention message waiting to be applied */
26
+ interface PendingIntervention {
27
+ /** Unique identifier for tracking */
28
+ readonly id: string;
29
+ /** The user message to inject */
30
+ readonly message: string;
31
+ /** When the intervention was created */
32
+ readonly createdAt: Date;
33
+ }
34
+ /**
35
+ * Callback fired when an intervention is applied to a step.
36
+ *
37
+ * The agent uses this to push `intervention-applied` events into
38
+ * the streaming event queue so they reach the consumer in the
39
+ * correct order relative to other events.
40
+ */
41
+ type OnInterventionApplied = (intervention: PendingIntervention) => void;
42
+ /**
43
+ * Manages mid-turn message injection for agents.
44
+ *
45
+ * This is the core primitive that connects user redirection intent
46
+ * to the AI SDK's multi-step loop. The flow:
47
+ *
48
+ * 1. Consumer calls `agent.intervene("focus on auth.ts")` from any async context
49
+ * 2. The message is queued as a `PendingIntervention`
50
+ * 3. At the next step boundary, `prepareStep` drains the queue
51
+ * 4. The message is appended to the LLM's input messages
52
+ * 5. The LLM responds to the redirect naturally
53
+ *
54
+ * Thread safety: All operations are synchronous and run on the
55
+ * same event loop. No locks needed — Node.js guarantees ordering.
56
+ *
57
+ * @example
58
+ * ```typescript
59
+ * const ctrl = new InterventionController();
60
+ *
61
+ * // Queue an intervention (from a UI handler, WebSocket, etc.)
62
+ * const id = ctrl.intervene("stop refactoring, fix the failing test first");
63
+ *
64
+ * // Later, in prepareStep:
65
+ * const pending = ctrl.drainImmediate();
66
+ * // → [{ id, message: "stop refactoring...", createdAt }]
67
+ * ```
68
+ */
69
+ declare class InterventionController {
70
+ /** Immediate interventions — applied at the next step boundary */
71
+ private immediate;
72
+ /** Deferred messages — held until the turn completes */
73
+ private deferred;
74
+ /**
75
+ * Callback fired when an intervention is wired into a step.
76
+ * Set by the Agent before starting a chat turn, cleared after.
77
+ */
78
+ onApplied?: OnInterventionApplied;
79
+ /**
80
+ * Inject a message at the next LLM step boundary.
81
+ *
82
+ * The message is appended as a user message to the conversation
83
+ * before the next LLM call in the current multi-step turn. The
84
+ * LLM will see it and can adjust its behavior accordingly.
85
+ *
86
+ * Safe to call from any async context while `chat()` is running.
87
+ * If called when no turn is active, the message will be picked up
88
+ * by the first step of the next `chat()` call.
89
+ *
90
+ * @param message - The user message to inject
91
+ * @returns Intervention ID for tracking
92
+ */
93
+ intervene(message: string): string;
94
+ /**
95
+ * Drain and return all pending immediate interventions.
96
+ * The internal queue is cleared atomically.
97
+ *
98
+ * @internal Called by the LLM stream's `prepareStep` hook
99
+ */
100
+ drainImmediate(): PendingIntervention[];
101
+ /** Whether there are pending immediate interventions */
102
+ get hasPending(): boolean;
103
+ /** Number of pending immediate interventions */
104
+ get pendingCount(): number;
105
+ /**
106
+ * Queue a message for after the current turn completes.
107
+ *
108
+ * Unlike `intervene()`, this does **not** inject mid-turn. The
109
+ * message is held and available via `drainDeferred()` after
110
+ * `chat()` finishes. The consumer decides whether to send it
111
+ * as a new turn.
112
+ *
113
+ * @param message - The message to queue
114
+ * @returns Intervention ID for tracking
115
+ */
116
+ queueNext(message: string): string;
117
+ /**
118
+ * Drain and return all deferred messages.
119
+ * The internal queue is cleared atomically.
120
+ */
121
+ drainDeferred(): PendingIntervention[];
122
+ /** Whether there are deferred messages */
123
+ get hasDeferred(): boolean;
124
+ /** Number of deferred messages */
125
+ get deferredCount(): number;
126
+ /** Clear all queues (immediate + deferred) */
127
+ clear(): void;
128
+ /** Reset the controller for a new turn (clears onApplied, keeps queues) */
129
+ resetCallbacks(): void;
130
+ }
131
+
132
+ /**
133
+ * Retry logic with exponential backoff for @cuylabs/agent-core
134
+ *
135
+ * Provides configurable retry behavior with header-aware delays,
136
+ * exponential backoff, and abort signal support.
137
+ */
138
+
139
+ /**
140
+ * Retry configuration
141
+ */
142
+ interface RetryConfig {
143
+ /** Maximum number of retry attempts (default: 3) */
144
+ maxAttempts?: number;
145
+ /** Initial delay in ms (default: 2000) */
146
+ initialDelayMs?: number;
147
+ /** Backoff multiplier (default: 2) */
148
+ backoffFactor?: number;
149
+ /** Maximum delay without headers in ms (default: 30000) */
150
+ maxDelayMs?: number;
151
+ /** Whether to jitter delays (default: true) */
152
+ jitter?: boolean;
153
+ /** Callback when retrying */
154
+ onRetry?: (attempt: number, delayMs: number, error: LLMError) => void;
155
+ }
156
+ /**
157
+ * Default retry configuration
158
+ */
159
+ declare const DEFAULT_RETRY_CONFIG: Required<Omit<RetryConfig, "onRetry">>;
160
+ /**
161
+ * Tracks retry state across attempts
162
+ */
163
+ interface RetryState {
164
+ /** Current attempt number (1-indexed) */
165
+ attempt: number;
166
+ /** Total errors encountered */
167
+ errors: LLMError[];
168
+ /** Whether more retries are available */
169
+ canRetry: boolean;
170
+ /** Delay until next retry (if applicable) */
171
+ nextDelayMs?: number;
172
+ }
173
+ /**
174
+ * Creates initial retry state
175
+ */
176
+ declare function createRetryState(): RetryState;
177
+ /**
178
+ * Calculate delay for a retry attempt
179
+ */
180
+ declare function calculateDelay(attempt: number, error: LLMError | undefined, config: Required<Omit<RetryConfig, "onRetry">>): number;
181
+ /**
182
+ * Sleep for a duration, respecting abort signal
183
+ */
184
+ declare function sleep(ms: number, signal?: AbortSignal): Promise<void>;
185
+ /**
186
+ * Execute a function with retry logic
187
+ */
188
+ declare function withRetry<T>(fn: (attempt: number) => Promise<T>, config?: RetryConfig, signal?: AbortSignal): Promise<T>;
189
+ /**
190
+ * Options for retry handler
191
+ */
192
+ interface RetryHandlerOptions extends RetryConfig {
193
+ /** Abort signal */
194
+ signal?: AbortSignal;
195
+ }
196
+ /**
197
+ * Creates a retry handler that wraps stream creation
198
+ */
199
+ declare function createRetryHandler(options?: RetryHandlerOptions): <T>(createStream: (attempt: number) => Promise<T>) => Promise<T>;
200
+ /**
201
+ * Checks if more retries should be attempted
202
+ */
203
+ declare function shouldRetry(error: unknown, attempt: number, maxAttempts?: number): boolean;
204
+
205
+ /** Stream result type - uses default Output for text streaming. */
206
+ type InferenceStreamResult = StreamTextResult<ToolSet, Output.Output<string, string, never>>;
207
+ /**
208
+ * @deprecated Use `InferenceStreamResult`.
209
+ */
210
+ type LLMStreamResult = InferenceStreamResult;
211
+
212
+ /**
213
+ * Custom stream result type - compatible shape from external providers.
214
+ */
215
+ type InferenceCustomResult = {
216
+ fullStream: AsyncIterable<StreamChunk>;
217
+ text: Promise<string>;
218
+ usage: Promise<{
219
+ inputTokens: number;
220
+ outputTokens: number;
221
+ totalTokens: number;
222
+ }>;
223
+ finishReason: Promise<string>;
224
+ };
225
+ /** Union type for stream results - either AI SDK or custom. */
226
+ type AnyInferenceResult = InferenceStreamResult | InferenceCustomResult;
227
+ /**
228
+ * @deprecated Use `InferenceCustomResult`.
229
+ */
230
+ type CustomStreamResult = InferenceCustomResult;
231
+ /**
232
+ * @deprecated Use `AnyInferenceResult`.
233
+ */
234
+ type AnyStreamResult = AnyInferenceResult;
235
+ /** Default max output tokens. */
236
+ declare const DEFAULT_MAX_OUTPUT_TOKENS = 32000;
237
+ /**
238
+ * @deprecated Use `DEFAULT_MAX_OUTPUT_TOKENS`.
239
+ */
240
+ declare const OUTPUT_TOKEN_MAX = 32000;
241
+ /**
242
+ * @deprecated Use StreamProvider from types instead.
243
+ */
244
+ type CustomStreamProvider = StreamProvider;
245
+ /** Control whether AI SDK tool definitions auto-execute or only expose calls. */
246
+ type ToolExecutionMode = "auto" | "plan";
247
+ /**
248
+ * Input for model inference stream creation.
249
+ */
250
+ interface InferenceStreamInput {
251
+ /** Session ID */
252
+ sessionID: string;
253
+ /** Step number within the current turn */
254
+ step?: number;
255
+ /** Model to use */
256
+ model: LanguageModel;
257
+ /** System prompt parts */
258
+ system: string[];
259
+ /** Messages to send */
260
+ messages: ModelMessage[];
261
+ /** Abort signal */
262
+ abort: AbortSignal;
263
+ /** Tools to use */
264
+ tools: Record<string, Tool.Info>;
265
+ /** Whether tools should auto-execute or only be surfaced as tool calls. */
266
+ toolExecutionMode?: ToolExecutionMode;
267
+ /** Working directory */
268
+ cwd: string;
269
+ /** Execution environment for tools */
270
+ host?: ToolHost;
271
+ /** Temperature */
272
+ temperature?: number;
273
+ /** Top-p */
274
+ topP?: number;
275
+ /** Max output tokens */
276
+ maxOutputTokens?: number;
277
+ /** Max steps (tool iterations) */
278
+ maxSteps?: number;
279
+ /** Reasoning level for extended-thinking models */
280
+ reasoningLevel?: ReasoningLevel;
281
+ /** Retry configuration */
282
+ retry?: RetryConfig;
283
+ /** Callback for step completion */
284
+ onStepFinish?: (step: InferenceStepInfo) => void | Promise<void>;
285
+ /** Custom stream provider */
286
+ customStreamProvider?: StreamProvider;
287
+ /** Turn tracker for automatic file baseline capture */
288
+ turnTracker?: TurnTrackerContext;
289
+ /** Pre-built MCP tools */
290
+ mcpTools?: ToolSet;
291
+ /** Intervention controller for mid-turn message injection */
292
+ intervention?: InterventionController;
293
+ /** Middleware runner for lifecycle hooks */
294
+ middleware?: MiddlewareRunner;
295
+ /** AI SDK telemetry settings */
296
+ telemetry?: TelemetrySettings;
297
+ /** Internal snapshot of the resolved model request after middleware input hooks. */
298
+ activeModelCall?: ModelCallInput;
299
+ }
300
+ /**
301
+ * Step information surfaced to callers.
302
+ */
303
+ interface InferenceStepInfo {
304
+ toolResults?: Array<{
305
+ toolName: string;
306
+ toolCallId: string;
307
+ output: unknown;
308
+ }>;
309
+ usage?: {
310
+ inputTokens?: number;
311
+ outputTokens?: number;
312
+ totalTokens?: number;
313
+ };
314
+ finishReason?: string;
315
+ }
316
+ /**
317
+ * @deprecated Use `InferenceStreamInput`.
318
+ */
319
+ type LLMStreamInput = InferenceStreamInput;
320
+ /**
321
+ * @deprecated Use `InferenceStepInfo`.
322
+ */
323
+ type StepInfo = InferenceStepInfo;
324
+
325
+ export { type AnyInferenceResult as A, type CustomStreamProvider as C, DEFAULT_MAX_OUTPUT_TOKENS as D, type InferenceStreamInput as I, type LLMStreamInput as L, OUTPUT_TOKEN_MAX as O, type PendingIntervention as P, type RetryConfig as R, type StepInfo as S, type ToolExecutionMode as T, type AnyStreamResult as a, type CustomStreamResult as b, type InferenceCustomResult as c, type InferenceStepInfo as d, type InferenceStreamResult as e, type LLMStreamResult as f, InterventionController as g, DEFAULT_RETRY_CONFIG as h, type OnInterventionApplied as i, type RetryHandlerOptions as j, type RetryState as k, calculateDelay as l, createRetryHandler as m, createRetryState as n, sleep as o, shouldRetry as s, withRetry as w };
@@ -1,5 +1,3 @@
1
- import { LanguageModel } from 'ai';
2
-
3
1
  /**
4
2
  * Model Capability Types for @cuylabs/agent-core
5
3
  *
@@ -137,118 +135,4 @@ interface ResolverOptions {
137
135
  */
138
136
  declare const DEFAULT_RESOLVER_OPTIONS: Required<ResolverOptions>;
139
137
 
140
- interface NetworkStatus {
141
- online: boolean;
142
- lastSuccess?: number;
143
- lastError?: string;
144
- failureCount: number;
145
- }
146
-
147
- /**
148
- * Model Capability Resolver for @cuylabs/agent-core
149
- *
150
- * Main orchestrator that combines multiple capability sources:
151
- * 1. User overrides (highest priority)
152
- * 2. Local cache (fast, persisted)
153
- * 3. Pattern matching (always available)
154
- * 4. Remote API (optional, network-dependent)
155
- *
156
- * Designed for the Vercel AI SDK v6 ecosystem.
157
- */
158
-
159
- /**
160
- * Extract model ID from LanguageModel
161
- */
162
- declare function extractModelId(model: LanguageModel): string;
163
- /**
164
- * Extract provider from LanguageModel
165
- */
166
- declare function extractProvider(model: LanguageModel): string | undefined;
167
- /**
168
- * Resolution result with source information
169
- */
170
- interface ResolutionResult {
171
- /** The resolved model entry */
172
- entry: ModelEntry;
173
- /** Which source provided the primary result */
174
- source: SourcePriority;
175
- /** Whether this is a confident match */
176
- confident: boolean;
177
- /** Resolution timing in ms */
178
- resolveTimeMs: number;
179
- }
180
- /**
181
- * Model Capability Resolver
182
- *
183
- * Provides a unified API for querying model capabilities with
184
- * automatic fallback through multiple sources.
185
- */
186
- declare class ModelCapabilityResolver {
187
- private options;
188
- private cache;
189
- private sources;
190
- private initialized;
191
- private initPromise;
192
- constructor(options?: Partial<ResolverOptions>);
193
- /**
194
- * Initialize the resolver (load cache, optionally fetch remote)
195
- */
196
- initialize(): Promise<void>;
197
- /**
198
- * Resolve capabilities for a model
199
- */
200
- resolve(model: LanguageModel): Promise<ResolutionResult>;
201
- /**
202
- * Quick check if a model supports reasoning
203
- * Uses cache/patterns only for speed
204
- */
205
- supportsReasoning(model: LanguageModel): Promise<boolean>;
206
- /**
207
- * Get capabilities for a model
208
- */
209
- getCapabilities(model: LanguageModel): Promise<ModelCapabilities>;
210
- /**
211
- * Get provider compatibility settings
212
- */
213
- getCompatibility(model: LanguageModel): Promise<ProviderCompatibility | undefined>;
214
- /**
215
- * Force refresh from remote API
216
- */
217
- refreshRemote(): Promise<void>;
218
- /**
219
- * Get current network status
220
- */
221
- getNetworkStatus(): NetworkStatus;
222
- /**
223
- * Get resolver statistics
224
- */
225
- getStats(): {
226
- cacheSize: number;
227
- cacheLoaded: boolean;
228
- remoteFetchEnabled: boolean;
229
- networkOnline: boolean;
230
- };
231
- /**
232
- * Clear all cached data
233
- */
234
- clearCache(): Promise<void>;
235
- /**
236
- * List all available models
237
- * Fetches from remote if cache is empty and remote is enabled
238
- */
239
- listModels(): Promise<ModelEntry[]>;
240
- /**
241
- * List all available models grouped by provider
242
- */
243
- listModelsByProvider(): Promise<Record<string, ModelEntry[]>>;
244
- }
245
- /**
246
- * Get the default resolver instance
247
- */
248
- declare function getDefaultResolver(): ModelCapabilityResolver;
249
- /**
250
- * Configure the default resolver with custom options
251
- */
252
- declare function configureResolver(options: Partial<ResolverOptions>): void;
253
-
254
- export { type CapabilitySource as C, DEFAULT_RESOLVER_OPTIONS as D, type InputModality as I, type ModelEntry as M, type NetworkStatus as N, type OutputModality as O, type ProviderCompatibility as P, type ResolverOptions as R, SourcePriority as S, extractProvider as a, type SourceResult as b, type ModelCapabilities as c, ModelCapabilityResolver as d, extractModelId as e, type ResolutionResult as f, configureResolver as g, getDefaultResolver as h };
138
+ export { type CapabilitySource as C, DEFAULT_RESOLVER_OPTIONS as D, type InputModality as I, type ModelCapabilities as M, type OutputModality as O, type ProviderCompatibility as P, type ResolverOptions as R, SourcePriority as S, type ModelEntry as a, type SourceResult as b };