mirascope 2.0.0-alpha.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. package/README.md +397 -0
  2. package/dist/bun.cjs +447 -0
  3. package/dist/bun.cjs.map +1 -0
  4. package/dist/bun.d.cts +53 -0
  5. package/dist/bun.d.ts +53 -0
  6. package/dist/bun.js +94 -0
  7. package/dist/bun.js.map +1 -0
  8. package/dist/chunk-2R5IW35Y.js +116 -0
  9. package/dist/chunk-2R5IW35Y.js.map +1 -0
  10. package/dist/chunk-A6ZCB7BU.js +6826 -0
  11. package/dist/chunk-A6ZCB7BU.js.map +1 -0
  12. package/dist/chunk-NSBPE2FW.js +15 -0
  13. package/dist/chunk-NSBPE2FW.js.map +1 -0
  14. package/dist/chunk-RMNCGJYW.js +49 -0
  15. package/dist/chunk-RMNCGJYW.js.map +1 -0
  16. package/dist/chunk-U4MFJ4DP.js +358 -0
  17. package/dist/chunk-U4MFJ4DP.js.map +1 -0
  18. package/dist/index.cjs +7705 -0
  19. package/dist/index.cjs.map +1 -0
  20. package/dist/index.d.cts +4859 -0
  21. package/dist/index.d.ts +4859 -0
  22. package/dist/index.js +324 -0
  23. package/dist/index.js.map +1 -0
  24. package/dist/model-T6IQ7UUA.js +4 -0
  25. package/dist/model-T6IQ7UUA.js.map +1 -0
  26. package/dist/tool-schema-Dh-RLHhC.d.cts +45 -0
  27. package/dist/tool-schema-Dh-RLHhC.d.ts +45 -0
  28. package/dist/transform/index.cjs +525 -0
  29. package/dist/transform/index.cjs.map +1 -0
  30. package/dist/transform/index.d.cts +89 -0
  31. package/dist/transform/index.d.ts +89 -0
  32. package/dist/transform/index.js +6 -0
  33. package/dist/transform/index.js.map +1 -0
  34. package/dist/transform/plugins/esbuild.cjs +472 -0
  35. package/dist/transform/plugins/esbuild.cjs.map +1 -0
  36. package/dist/transform/plugins/esbuild.d.cts +46 -0
  37. package/dist/transform/plugins/esbuild.d.ts +46 -0
  38. package/dist/transform/plugins/esbuild.js +5 -0
  39. package/dist/transform/plugins/esbuild.js.map +1 -0
  40. package/dist/transform/plugins/vite.cjs +405 -0
  41. package/dist/transform/plugins/vite.cjs.map +1 -0
  42. package/dist/transform/plugins/vite.d.cts +50 -0
  43. package/dist/transform/plugins/vite.d.ts +50 -0
  44. package/dist/transform/plugins/vite.js +5 -0
  45. package/dist/transform/plugins/vite.js.map +1 -0
  46. package/package.json +127 -0
@@ -0,0 +1,4859 @@
1
+ import { T as ToolSchema, a as ToolParameterSchema, J as JsonSchemaProperty } from './tool-schema-Dh-RLHhC.cjs';
2
+
3
+ /**
4
+ * A type that represents JSON-serializable values.
5
+ *
6
+ * This includes primitives (null, string, number, boolean),
7
+ * arrays of Jsonable values, and objects with string keys and Jsonable values.
8
+ */
9
+ type Jsonable = null | string | number | boolean | readonly Jsonable[] | {
10
+ readonly [key: string]: Jsonable;
11
+ };
12
+
13
+ /**
14
+ * Type representing no variables for prompts and calls.
15
+ * `Record<never, never>` creates an object type with no keys,
16
+ * ensuring `keyof NoVars` is `never`.
17
+ */
18
+ type NoVars = Record<never, never>;
19
+
20
+ /**
21
+ * Anthropic model information.
22
+ *
23
+ * This file is auto-generated by typescript/scripts/codegen/anthropic.ts
24
+ * Do not edit manually - run `bun run codegen` to update.
25
+ */
26
+ /**
27
+ * Array of all known Anthropic model IDs.
28
+ * This is the source of truth - the type and Set are derived from it.
29
+ */
30
+ declare const ANTHROPIC_KNOWN_MODELS_ARRAY: readonly ["anthropic/claude-3-5-haiku", "anthropic/claude-3-5-haiku-20241022", "anthropic/claude-3-5-haiku-latest", "anthropic/claude-3-7-sonnet", "anthropic/claude-3-7-sonnet-20250219", "anthropic/claude-3-7-sonnet-latest", "anthropic/claude-3-haiku", "anthropic/claude-3-haiku-20240307", "anthropic/claude-3-haiku-latest", "anthropic/claude-3-opus", "anthropic/claude-3-opus-20240229", "anthropic/claude-3-opus-latest", "anthropic/claude-haiku-4-5", "anthropic/claude-haiku-4-5-0", "anthropic/claude-haiku-4-5-0-20251001", "anthropic/claude-haiku-4-5-0-latest", "anthropic/claude-haiku-4-5-20251001", "anthropic/claude-haiku-4-5-latest", "anthropic/claude-opus-4", "anthropic/claude-opus-4-0", "anthropic/claude-opus-4-0-20250514", "anthropic/claude-opus-4-0-latest", "anthropic/claude-opus-4-1", "anthropic/claude-opus-4-1-0", "anthropic/claude-opus-4-1-0-20250805", "anthropic/claude-opus-4-1-0-latest", "anthropic/claude-opus-4-1-20250805", "anthropic/claude-opus-4-1-latest", "anthropic/claude-opus-4-20250514", "anthropic/claude-opus-4-5", "anthropic/claude-opus-4-5-0", "anthropic/claude-opus-4-5-0-20251101", "anthropic/claude-opus-4-5-0-latest", "anthropic/claude-opus-4-5-20251101", "anthropic/claude-opus-4-5-latest", "anthropic/claude-opus-4-latest", "anthropic/claude-sonnet-4", "anthropic/claude-sonnet-4-0", "anthropic/claude-sonnet-4-0-20250514", "anthropic/claude-sonnet-4-0-latest", "anthropic/claude-sonnet-4-20250514", "anthropic/claude-sonnet-4-5", "anthropic/claude-sonnet-4-5-0", "anthropic/claude-sonnet-4-5-0-20250929", "anthropic/claude-sonnet-4-5-0-latest", "anthropic/claude-sonnet-4-5-20250929", "anthropic/claude-sonnet-4-5-latest", "anthropic/claude-sonnet-4-latest"];
31
+ /**
32
+ * Valid Anthropic model IDs.
33
+ */
34
+ type AnthropicKnownModels = (typeof ANTHROPIC_KNOWN_MODELS_ARRAY)[number];
35
+
36
+ /**
37
+ * Anthropic registered LLM models.
38
+ */
39
+
40
+ /**
41
+ * The Anthropic model IDs registered with Mirascope.
42
+ */
43
+ type AnthropicModelId = AnthropicKnownModels | (string & {});
44
+
45
+ /**
46
+ * Context for LLM calls with dependency injection.
47
+ *
48
+ * Context allows you to pass dependencies (like database connections, user info,
49
+ * configuration, etc.) through to your prompts and tools in a type-safe way.
50
+ */
51
+ /**
52
+ * Symbol marker used to identify Context objects at runtime.
53
+ * This enables reliable detection of Context instances in unified call/prompt functions.
54
+ */
55
+ declare const CONTEXT_MARKER: unique symbol;
56
+ /**
57
+ * Context for LLM calls with dependency injection.
58
+ *
59
+ * @template DepsT - The type of dependencies contained in the context.
60
+ *
61
+ * @example
62
+ * ```typescript
63
+ * interface MyDeps {
64
+ * userId: string;
65
+ * db: Database;
66
+ * }
67
+ *
68
+ * const ctx = createContext<MyDeps>({ userId: '123', db: myDb });
69
+ * const response = await myPrompt(model, ctx, { greeting: 'Hello' });
70
+ * ```
71
+ */
72
+ interface Context<DepsT> {
73
+ /**
74
+ * Marker property for runtime Context detection.
75
+ * @internal
76
+ */
77
+ readonly [CONTEXT_MARKER]: true;
78
+ /**
79
+ * The dependencies available in this context.
80
+ */
81
+ readonly deps: DepsT;
82
+ }
83
+ /**
84
+ * Type guard to check if a value is a Context object.
85
+ *
86
+ * @param value - The value to check.
87
+ * @returns True if the value is a Context, false otherwise.
88
+ *
89
+ * @example
90
+ * ```typescript
91
+ * const maybeCtx = getArgument();
92
+ * if (isContext(maybeCtx)) {
93
+ * console.log(maybeCtx.deps); // TypeScript knows this is a Context
94
+ * }
95
+ * ```
96
+ */
97
+ declare function isContext(value: unknown): value is Context<unknown>;
98
+ /**
99
+ * Create a context with the given dependencies.
100
+ *
101
+ * @template DepsT - The type of dependencies.
102
+ * @param deps - The dependencies to include in the context.
103
+ * @returns A Context containing the provided dependencies.
104
+ *
105
+ * @example
106
+ * ```typescript
107
+ * interface MyDeps {
108
+ * userId: string;
109
+ * db: Database;
110
+ * }
111
+ *
112
+ * const ctx = createContext<MyDeps>({ userId: '123', db: myDb });
113
+ * ```
114
+ */
115
+ declare function createContext<DepsT>(deps: DepsT): Context<DepsT>;
116
+
117
+ /**
118
+ * Tool call content from an assistant message.
119
+ *
120
+ * Represents a request from the assistant to call a tool/function.
121
+ * The args field contains the stringified JSON arguments.
122
+ */
123
+ type ToolCall = {
124
+ readonly type: "tool_call";
125
+ /** A unique identifier for this tool call. */
126
+ readonly id: string;
127
+ /** The name of the tool to call. */
128
+ readonly name: string;
129
+ /** The arguments to pass to the tool, stored as stringified JSON. */
130
+ readonly args: string;
131
+ };
132
+ /**
133
+ * Signals the start of a tool call in the stream.
134
+ */
135
+ type ToolCallStartChunk = {
136
+ readonly type: "tool_call_start_chunk";
137
+ readonly contentType: "tool_call";
138
+ /** Unique identifier for this tool call. */
139
+ readonly id: string;
140
+ /** The name of the tool to call. */
141
+ readonly name: string;
142
+ };
143
+ /**
144
+ * Contains incremental tool call arguments (JSON).
145
+ */
146
+ type ToolCallChunk = {
147
+ readonly type: "tool_call_chunk";
148
+ readonly contentType: "tool_call";
149
+ /** Unique identifier for this tool call. */
150
+ readonly id: string;
151
+ /** The incremental JSON args added in this chunk. */
152
+ readonly delta: string;
153
+ };
154
+ /**
155
+ * Signals the end of a tool call in the stream.
156
+ */
157
+ type ToolCallEndChunk = {
158
+ readonly type: "tool_call_end_chunk";
159
+ readonly contentType: "tool_call";
160
+ /** Unique identifier for this tool call. */
161
+ readonly id: string;
162
+ };
163
+ /** Create a ToolCallStartChunk */
164
+ declare function toolCallStart(id: string, name: string): ToolCallStartChunk;
165
+ /** Create a ToolCallChunk */
166
+ declare function toolCallChunk(id: string, delta: string): ToolCallChunk;
167
+ /** Create a ToolCallEndChunk */
168
+ declare function toolCallEnd(id: string): ToolCallEndChunk;
169
+
170
+ /**
171
+ * Tool output content representing the result of a tool call.
172
+ *
173
+ * Contains the result of executing a tool, or an error if the tool failed.
174
+ * The generic type T allows for typed results when the tool output type is known.
175
+ */
176
+ type ToolOutput<T extends Jsonable = Jsonable> = {
177
+ readonly type: "tool_output";
178
+ /** The ID of the tool call that this output is for. */
179
+ readonly id: string;
180
+ /** The name of the tool that created this output. */
181
+ readonly name: string;
182
+ /**
183
+ * The result of calling the tool.
184
+ *
185
+ * If the tool executed successfully, this will be the tool output.
186
+ * If the tool errored, this will be the error message, as a string.
187
+ */
188
+ readonly result: T | string;
189
+ /** The error from calling the tool, if any. */
190
+ readonly error: Error | null;
191
+ };
192
+ /**
193
+ * Factory methods for creating ToolOutput instances.
194
+ */
195
+ declare const ToolOutput: {
196
+ /**
197
+ * Create a ToolOutput with explicit parameters.
198
+ */
199
+ create: <T extends Jsonable>(id: string, name: string, result: T | string, error?: Error | null) => ToolOutput<T>;
200
+ /**
201
+ * Create a successful ToolOutput.
202
+ */
203
+ success: <T extends Jsonable>(id: string, name: string, result: T) => ToolOutput<T>;
204
+ /**
205
+ * Create a failed ToolOutput.
206
+ */
207
+ failure: (id: string, name: string, error: Error) => ToolOutput;
208
+ };
209
+
210
+ /**
211
+ * Base class for provider-native tools.
212
+ *
213
+ * Unlike regular tools which define functions that you execute locally,
214
+ * provider tools are capabilities built into the provider's API.
215
+ * The provider handles execution entirely server-side.
216
+ */
217
+ declare class ProviderTool {
218
+ readonly name: string;
219
+ constructor(name: string);
220
+ }
221
+ /**
222
+ * Type guard to check if a value is a ProviderTool.
223
+ */
224
+ declare function isProviderTool(value: unknown): value is ProviderTool;
225
+
226
+ /**
227
+ * Tool type definitions for LLM function calling.
228
+ *
229
+ * Provides interfaces for tools that can be called by LLMs,
230
+ * including base types for toolkit storage and callable types
231
+ * for direct invocation.
232
+ */
233
+
234
+ /**
235
+ * Duck-typed Zod schema interface for optional Zod support.
236
+ *
237
+ * This allows accepting Zod schemas without requiring Zod as a dependency.
238
+ * We detect Zod schemas at runtime by checking for these properties.
239
+ *
240
+ * Compatible with both Zod 3 (description in _def.description) and
241
+ * Zod 4 (description may be at schema.description or _def.description).
242
+ */
243
+ interface ZodLike {
244
+ /** Internal Zod definition - structure varies by version */
245
+ readonly _def: any;
246
+ /** Description may be at top level in Zod 4 */
247
+ readonly description?: string;
248
+ /** Output type for type inference (Zod internal) */
249
+ readonly _output?: unknown;
250
+ safeParse(data: unknown): {
251
+ success: boolean;
252
+ data?: unknown;
253
+ error?: unknown;
254
+ };
255
+ }
256
+ /**
257
+ * Infer the output type from a Zod-like schema.
258
+ *
259
+ * Uses Zod's internal `_output` property for type inference.
260
+ * This allows TypeScript to infer tool argument types from Zod schemas.
261
+ *
262
+ * @template Z - The Zod schema type.
263
+ */
264
+ type InferZod<Z extends ZodLike> = Z extends {
265
+ _output: infer T;
266
+ } ? T & Record<string, unknown> : Record<string, unknown>;
267
+ /**
268
+ * Type discriminator for regular tools.
269
+ * Used at runtime to distinguish from context tools.
270
+ */
271
+ declare const TOOL_TYPE: "tool";
272
+ /**
273
+ * Type discriminator for context tools.
274
+ * Used at runtime to distinguish from regular tools.
275
+ */
276
+ declare const CONTEXT_TOOL_TYPE: "context_tool";
277
+ /**
278
+ * Base interface for tools without the callable signature.
279
+ *
280
+ * This is used by Toolkit to store tools without variance issues.
281
+ * The callable signature in Tool<T> causes contravariance problems
282
+ * when storing tools with different argument types in a collection.
283
+ */
284
+ interface BaseTool extends ToolSchema {
285
+ /**
286
+ * Type discriminator - always 'tool' for regular tools.
287
+ * Used at runtime to distinguish from context tools.
288
+ */
289
+ readonly __toolType: typeof TOOL_TYPE;
290
+ /**
291
+ * Execute the tool from a ToolCall.
292
+ *
293
+ * @param toolCall - The tool call from the LLM.
294
+ * @returns A ToolOutput with the result or error.
295
+ */
296
+ execute(toolCall: ToolCall): Promise<ToolOutput<Jsonable>>;
297
+ /**
298
+ * The Zod validator used for this tool, if any.
299
+ * Present when tool was created with `validator` option.
300
+ */
301
+ readonly validator: ZodLike | undefined;
302
+ }
303
+ /**
304
+ * A defined tool that can be called by an LLM.
305
+ *
306
+ * Tools extend ToolSchema (a tool IS a schema) and are callable.
307
+ * Calling a tool directly executes it with the provided arguments.
308
+ *
309
+ * @template T - The type of arguments the tool accepts.
310
+ */
311
+ interface Tool<T extends Record<string, unknown> = Record<string, unknown>> extends BaseTool {
312
+ /**
313
+ * Call the tool directly with arguments.
314
+ * This is equivalent to Python's `tool.__call__(*args, **kwargs)`.
315
+ *
316
+ * @param args - The arguments to pass to the tool.
317
+ * @returns The tool result.
318
+ */
319
+ (args: T): Promise<Jsonable>;
320
+ }
321
+ /**
322
+ * Base interface for context tools without the callable signature.
323
+ *
324
+ * This is used by ContextToolkit to store tools without variance issues.
325
+ *
326
+ * @template DepsT - The type of dependencies in the context.
327
+ */
328
+ interface BaseContextTool<DepsT = unknown> extends ToolSchema {
329
+ /**
330
+ * Type discriminator - always 'context_tool' for context tools.
331
+ * Used at runtime to distinguish from regular tools.
332
+ */
333
+ readonly __toolType: typeof CONTEXT_TOOL_TYPE;
334
+ /**
335
+ * Execute the tool from a ToolCall with context.
336
+ *
337
+ * @param ctx - The context containing dependencies.
338
+ * @param toolCall - The tool call from the LLM.
339
+ * @returns A ToolOutput with the result or error.
340
+ */
341
+ execute(ctx: Context<DepsT>, toolCall: ToolCall): Promise<ToolOutput<Jsonable>>;
342
+ /**
343
+ * The Zod validator used for this tool, if any.
344
+ * Present when tool was created with `validator` option.
345
+ */
346
+ readonly validator: ZodLike | undefined;
347
+ }
348
+ /**
349
+ * A defined context tool with dependency injection.
350
+ *
351
+ * Context tools extend ToolSchema and are callable with a context argument.
352
+ *
353
+ * @template T - The type of arguments the tool accepts.
354
+ * @template DepsT - The type of dependencies in the context.
355
+ */
356
+ interface ContextTool<T extends Record<string, unknown> = Record<string, unknown>, DepsT = unknown> extends BaseContextTool<DepsT> {
357
+ /**
358
+ * Call the tool directly with context and arguments.
359
+ * This is equivalent to Python's `tool.__call__(ctx, *args, **kwargs)`.
360
+ *
361
+ * @param ctx - The context containing dependencies.
362
+ * @param args - The arguments to pass to the tool.
363
+ * @returns The tool result.
364
+ */
365
+ (ctx: Context<DepsT>, args: T): Promise<Jsonable>;
366
+ }
367
+ /**
368
+ * Union type for any tool (regular or context).
369
+ */
370
+ type AnyTool = Tool | ContextTool;
371
+ /**
372
+ * Union type for tools that can be used in context paths.
373
+ *
374
+ * Context paths accept BOTH regular tools AND context tools,
375
+ * matching Python's `ContextTools[DepsT] = Tool | ContextTool[DepsT]`.
376
+ *
377
+ * @template DepsT - The type of dependencies in the context.
378
+ */
379
+ type AnyContextTool<DepsT = unknown> = BaseTool | BaseContextTool<DepsT>;
380
+ /**
381
+ * Type alias for an array of regular tools.
382
+ * Matches Python's `Tools = Sequence[Tool | ProviderTool]`.
383
+ */
384
+ type Tools = readonly (BaseTool | ProviderTool)[];
385
+ /**
386
+ * Type alias for an array of tools usable in context paths.
387
+ * Accepts both regular tools AND context tools, plus provider tools.
388
+ * Matches Python's `ContextTools[DepsT] = Sequence[Tool | ContextTool[DepsT] | ProviderTool]`.
389
+ *
390
+ * @template DepsT - The type of dependencies in the context.
391
+ */
392
+ type ContextTools<DepsT = unknown> = readonly (AnyContextTool<DepsT> | ProviderTool)[];
393
+ type ToolFn<T extends Record<string, unknown> = Record<string, unknown>, R extends Jsonable = Jsonable> = (args: T) => Promise<R> | R;
394
+ type ContextToolFn<T extends Record<string, unknown> = Record<string, unknown>, DepsT = unknown, R extends Jsonable = Jsonable> = (ctx: Context<DepsT>, args: T) => Promise<R> | R;
395
+ type AnyToolFn = ToolFn | ContextToolFn;
396
+ /**
397
+ * Type guard to check if a tool is a context tool.
398
+ *
399
+ * @param tool - The tool to check.
400
+ * @returns True if the tool is a context tool.
401
+ */
402
+ declare function isContextTool<DepsT = unknown>(tool: AnyContextTool<DepsT>): tool is BaseContextTool<DepsT>;
403
+
404
+ /**
405
+ * Tool definition functions for creating LLM-callable tools.
406
+ *
407
+ * Provides `defineTool()` and `defineContextTool()` for creating tools
408
+ * with type-safe argument inference.
409
+ *
410
+ * Two patterns are supported:
411
+ * 1. **Zod-native**: Use `validator` option with a Zod schema (no transformer needed)
412
+ * 2. **Transformer-based**: Use generic type parameter (requires transformer for schema injection)
413
+ */
414
+
415
+ /**
416
+ * Check if a value is a Zod-like schema.
417
+ */
418
+ declare function isZodLike(value: unknown): value is ZodLike;
419
+ /**
420
+ * Arguments for defining a tool using a Zod schema.
421
+ *
422
+ * This pattern does NOT require the compile-time transformer.
423
+ * The schema is extracted from the Zod validator at runtime.
424
+ *
425
+ * @template Z - The Zod schema type.
426
+ */
427
+ interface ZodToolArgs<Z extends ZodLike> {
428
+ /** The name of the tool. */
429
+ name: string;
430
+ /** A description of what the tool does. */
431
+ description: string;
432
+ /**
433
+ * Whether to use strict mode for the tool schema.
434
+ * When true, providers that support it will enforce strict schema validation.
435
+ */
436
+ strict?: boolean;
437
+ /**
438
+ * Zod schema for both schema generation AND runtime validation.
439
+ * Use `.describe()` on fields to add descriptions.
440
+ */
441
+ validator: Z;
442
+ /** The tool implementation function. */
443
+ tool: (args: InferZod<Z>) => Jsonable | Promise<Jsonable>;
444
+ }
445
+ /**
446
+ * Arguments for defining a context tool using a Zod schema.
447
+ *
448
+ * This pattern does NOT require the compile-time transformer.
449
+ *
450
+ * @template Z - The Zod schema type.
451
+ * @template DepsT - The type of dependencies in the context.
452
+ */
453
+ interface ZodContextToolArgs<Z extends ZodLike, DepsT = unknown> {
454
+ /** The name of the tool. */
455
+ name: string;
456
+ /** A description of what the tool does. */
457
+ description: string;
458
+ /**
459
+ * Whether to use strict mode for the tool schema.
460
+ * When true, providers that support it will enforce strict schema validation.
461
+ */
462
+ strict?: boolean;
463
+ /**
464
+ * Zod schema for both schema generation AND runtime validation.
465
+ * Use `.describe()` on fields to add descriptions.
466
+ */
467
+ validator: Z;
468
+ /** The tool implementation function with context. */
469
+ tool: (ctx: Context<DepsT>, args: InferZod<Z>) => Jsonable | Promise<Jsonable>;
470
+ }
471
+ /**
472
+ * Arguments for defining a tool using the compile-time transformer.
473
+ *
474
+ * @template T - The type of arguments the tool accepts.
475
+ */
476
+ interface ToolArgs<T extends Record<string, unknown>> {
477
+ /** The name of the tool. */
478
+ name: string;
479
+ /** A description of what the tool does. */
480
+ description: string;
481
+ /**
482
+ * Whether to use strict mode for the tool schema.
483
+ * When true, providers that support it will enforce strict schema validation.
484
+ */
485
+ strict?: boolean;
486
+ /** The tool implementation function. */
487
+ tool: (args: T) => Jsonable | Promise<Jsonable>;
488
+ /**
489
+ * Internal: JSON schema injected by the compile-time transformer.
490
+ * Users should not set this directly - it's populated automatically.
491
+ */
492
+ __schema?: ToolParameterSchema;
493
+ }
494
+ /**
495
+ * Arguments for defining a context tool using the compile-time transformer.
496
+ *
497
+ * @template T - The type of arguments the tool accepts.
498
+ * @template DepsT - The type of dependencies in the context.
499
+ */
500
+ interface ContextToolArgs<T extends Record<string, unknown>, DepsT = unknown> {
501
+ /** The name of the tool. */
502
+ name: string;
503
+ /** A description of what the tool does. */
504
+ description: string;
505
+ /**
506
+ * Whether to use strict mode for the tool schema.
507
+ * When true, providers that support it will enforce strict schema validation.
508
+ */
509
+ strict?: boolean;
510
+ /** The tool implementation function with context. */
511
+ tool: (ctx: Context<DepsT>, args: T) => Jsonable | Promise<Jsonable>;
512
+ /**
513
+ * Internal: JSON schema injected by the compile-time transformer.
514
+ * Users should not set this directly - it's populated automatically.
515
+ */
516
+ __schema?: ToolParameterSchema;
517
+ }
518
+ /**
519
+ * Define a tool using a Zod schema (no transformer needed).
520
+ *
521
+ * @template Z - The Zod schema type.
522
+ * @param args - The tool definition arguments with validator.
523
+ * @returns A Tool instance with args inferred from the Zod schema.
524
+ *
525
+ * @example
526
+ * ```typescript
527
+ * import { z } from 'zod';
528
+ *
529
+ * const getWeather = defineTool({
530
+ * name: 'get_weather',
531
+ * description: 'Get weather for a city',
532
+ * validator: z.object({
533
+ * city: z.string().describe('The city name'),
534
+ * }),
535
+ * tool: ({ city }) => ({ temp: 72, city }),
536
+ * });
537
+ * ```
538
+ */
539
+ declare function defineTool<Z extends ZodLike>(args: ZodToolArgs<Z>): Tool<InferZod<Z>>;
540
+ /**
541
+ * Define a tool using the compile-time transformer.
542
+ *
543
+ * @template T - The type of arguments the tool accepts.
544
+ * @param args - The tool definition arguments.
545
+ * @returns A Tool instance.
546
+ *
547
+ * @example
548
+ * ```typescript
549
+ * interface WeatherArgs {
550
+ * // JSDoc comments become field descriptions via transformer
551
+ * city: string;
552
+ * }
553
+ *
554
+ * const getWeather = defineTool<WeatherArgs>({
555
+ * name: 'get_weather',
556
+ * description: 'Get weather for a city',
557
+ * tool: ({ city }) => ({ temp: 72, city }),
558
+ * });
559
+ * ```
560
+ */
561
+ declare function defineTool<T extends Record<string, unknown>>(args: ToolArgs<T>): Tool<T>;
562
+ /**
563
+ * Define a context tool using a Zod schema (no transformer needed).
564
+ *
565
+ * @template Z - The Zod schema type.
566
+ * @template DepsT - The type of dependencies in the context.
567
+ * @param args - The tool definition arguments with validator.
568
+ * @returns A ContextTool instance with args inferred from the Zod schema.
569
+ *
570
+ * @example
571
+ * ```typescript
572
+ * import { z } from 'zod';
573
+ *
574
+ * const searchDatabase = defineContextTool({
575
+ * name: 'search_database',
576
+ * description: 'Search the database',
577
+ * validator: z.object({
578
+ * query: z.string().describe('The search query'),
579
+ * }),
580
+ * tool: (ctx, { query }) => ctx.deps.db.search(query),
581
+ * });
582
+ * ```
583
+ */
584
+ declare function defineContextTool<Z extends ZodLike, DepsT = unknown>(args: ZodContextToolArgs<Z, DepsT>): ContextTool<InferZod<Z>, DepsT>;
585
+ /**
586
+ * Define a context tool using the compile-time transformer.
587
+ *
588
+ * @template T - The type of arguments the tool accepts.
589
+ * @template DepsT - The type of dependencies in the context.
590
+ * @param args - The tool definition arguments.
591
+ * @returns A ContextTool instance.
592
+ *
593
+ * @example
594
+ * ```typescript
595
+ * interface MyDeps {
596
+ * db: Database;
597
+ * }
598
+ *
599
+ * interface SearchArgs {
600
+ * query: string;
601
+ * }
602
+ *
603
+ * const searchDatabase = defineContextTool<SearchArgs, MyDeps>({
604
+ * name: 'search_database',
605
+ * description: 'Search the database',
606
+ * tool: (ctx, { query }) => ctx.deps.db.search(query),
607
+ * });
608
+ * ```
609
+ */
610
+ declare function defineContextTool<T extends Record<string, unknown>, DepsT = unknown>(args: ContextToolArgs<T, DepsT>): ContextTool<T, DepsT>;
611
+
612
+ /**
613
+ * Toolkit class for managing collections of tools.
614
+ *
615
+ * Provides a unified interface for executing tool calls against
616
+ * a collection of registered tools.
617
+ */
618
+
619
+ /**
620
+ * Base interface that all toolkit types implement.
621
+ *
622
+ * This provides a unified interface for accessing tool schemas and looking
623
+ * up tools by name. Matches Python's `BaseToolkit[ToolSchemaT]` pattern.
624
+ *
625
+ * @template T - The type of tools stored in this toolkit.
626
+ */
627
+ interface BaseToolkit<T extends ToolSchema = ToolSchema> {
628
+ /**
629
+ * Get all tools in the toolkit (including provider tools).
630
+ */
631
+ readonly tools: readonly (T | ProviderTool)[];
632
+ /**
633
+ * Get the schemas for all tools in the toolkit.
634
+ * Does NOT include provider tools (they have no schema).
635
+ */
636
+ readonly schemas: readonly ToolSchema[];
637
+ /**
638
+ * Get the provider tools in the toolkit.
639
+ */
640
+ readonly providerTools: readonly ProviderTool[];
641
+ /**
642
+ * Get a tool by name.
643
+ *
644
+ * @param name - The name of the tool.
645
+ * @returns The tool, or undefined if not found.
646
+ */
647
+ get(name: string): T | undefined;
648
+ /**
649
+ * Check if a tool with the given name exists.
650
+ *
651
+ * @param name - The name of the tool.
652
+ * @returns True if the tool exists.
653
+ */
654
+ has(name: string): boolean;
655
+ }
656
+ /**
657
+ * A toolkit for managing and executing regular tools.
658
+ *
659
+ * Implements BaseToolkit<BaseTool> for type-safe tool access.
660
+ *
661
+ * @example
662
+ * ```typescript
663
+ * const toolkit = new Toolkit([getWeather, searchWeb]);
664
+ *
665
+ * // Execute a tool call from the LLM
666
+ * const output = await toolkit.execute(toolCall);
667
+ * ```
668
+ */
669
+ declare class Toolkit implements BaseToolkit<BaseTool> {
670
+ private readonly toolMap;
671
+ private readonly providerToolMap;
672
+ private readonly _tools;
673
+ /**
674
+ * Create a new Toolkit with the given tools.
675
+ *
676
+ * @param tools - The tools to include in the toolkit, or null/undefined for empty.
677
+ * @throws Error if multiple tools have the same name.
678
+ */
679
+ constructor(tools: readonly (BaseTool | ProviderTool)[] | null | undefined);
680
+ /**
681
+ * Get all tools in the toolkit (including provider tools).
682
+ */
683
+ get tools(): readonly (BaseTool | ProviderTool)[];
684
+ /**
685
+ * Get the schemas for all tools in the toolkit.
686
+ * Does NOT include provider tools (they have no schema).
687
+ */
688
+ get schemas(): readonly ToolSchema[];
689
+ /**
690
+ * Get the provider tools in the toolkit.
691
+ */
692
+ get providerTools(): readonly ProviderTool[];
693
+ /**
694
+ * Get a tool by name.
695
+ *
696
+ * @param name - The name of the tool.
697
+ * @returns The tool, or undefined if not found.
698
+ */
699
+ get(name: string): BaseTool | undefined;
700
+ /**
701
+ * Check if a tool with the given name exists.
702
+ *
703
+ * @param name - The name of the tool.
704
+ * @returns True if the tool exists.
705
+ */
706
+ has(name: string): boolean;
707
+ /**
708
+ * Execute a tool call.
709
+ *
710
+ * Finds the tool matching the call's name and executes it.
711
+ * Returns a ToolOutput with the result or error.
712
+ *
713
+ * @param toolCall - The tool call from the LLM.
714
+ * @returns A ToolOutput with the result or error.
715
+ */
716
+ execute(toolCall: ToolCall): Promise<ToolOutput<Jsonable>>;
717
+ }
718
+ /**
719
+ * A toolkit for managing and executing context tools.
720
+ *
721
+ * This toolkit supports BOTH regular tools (BaseTool) AND context tools
722
+ * (BaseContextTool), matching Python's `ContextToolkit[DepsT]` pattern.
723
+ * Regular tools are executed without context; context tools receive the
724
+ * context for dependency injection.
725
+ *
726
+ * Implements BaseToolkit<AnyContextTool<DepsT>> for type-safe tool access.
727
+ *
728
+ * @template DepsT - The type of dependencies in the context.
729
+ *
730
+ * @example
731
+ * ```typescript
732
+ * interface MyDeps { db: Database; }
733
+ *
734
+ * // Can mix regular and context tools
735
+ * const toolkit = new ContextToolkit<MyDeps>([
736
+ * regularTool, // BaseTool - no context needed
737
+ * searchDatabase, // BaseContextTool<MyDeps> - receives context
738
+ * ]);
739
+ *
740
+ * // Execute with context - polymorphic dispatch handles both types
741
+ * const ctx = createContext<MyDeps>({ db: myDatabase });
742
+ * const output = await toolkit.execute(ctx, toolCall);
743
+ * ```
744
+ */
745
+ declare class ContextToolkit<DepsT = unknown> implements BaseToolkit<AnyContextTool<DepsT>> {
746
+ private readonly toolMap;
747
+ private readonly providerToolMap;
748
+ private readonly _tools;
749
+ /**
750
+ * Create a new ContextToolkit with the given tools.
751
+ *
752
+ * Accepts regular tools (BaseTool), context tools (BaseContextTool), and provider tools.
753
+ *
754
+ * @param tools - The tools to include in the toolkit, or null/undefined for empty.
755
+ * @throws Error if multiple tools have the same name.
756
+ */
757
+ constructor(tools: readonly (AnyContextTool<DepsT> | ProviderTool)[] | null | undefined);
758
+ /**
759
+ * Get all tools in the toolkit (including provider tools).
760
+ */
761
+ get tools(): readonly (AnyContextTool<DepsT> | ProviderTool)[];
762
+ /**
763
+ * Get the schemas for all tools in the toolkit.
764
+ * Does NOT include provider tools (they have no schema).
765
+ */
766
+ get schemas(): readonly ToolSchema[];
767
+ /**
768
+ * Get the provider tools in the toolkit.
769
+ */
770
+ get providerTools(): readonly ProviderTool[];
771
+ /**
772
+ * Get a tool by name.
773
+ *
774
+ * @param name - The name of the tool.
775
+ * @returns The tool, or undefined if not found.
776
+ */
777
+ get(name: string): AnyContextTool<DepsT> | undefined;
778
+ /**
779
+ * Check if a tool with the given name exists.
780
+ *
781
+ * @param name - The name of the tool.
782
+ * @returns True if the tool exists.
783
+ */
784
+ has(name: string): boolean;
785
+ /**
786
+ * Execute a tool call with context.
787
+ *
788
+ * Uses polymorphic dispatch to handle both regular tools and context tools:
789
+ * - Regular tools (BaseTool) are executed without context
790
+ * - Context tools (BaseContextTool) receive the context for dependency injection
791
+ *
792
+ * @param ctx - The context containing dependencies.
793
+ * @param toolCall - The tool call from the LLM.
794
+ * @returns A ToolOutput with the result or error.
795
+ */
796
+ execute(ctx: Context<DepsT>, toolCall: ToolCall): Promise<ToolOutput<Jsonable>>;
797
+ /**
798
+ * Execute multiple tool calls in parallel with context.
799
+ *
800
+ * @param ctx - The context containing dependencies.
801
+ * @param toolCalls - The tool calls to execute.
802
+ * @returns An array of ToolOutputs in the same order as the input.
803
+ */
804
+ executeAll(ctx: Context<DepsT>, toolCalls: readonly ToolCall[]): Promise<ToolOutput<Jsonable>[]>;
805
+ }
806
+ /**
807
+ * Create a toolkit from an array of tools.
808
+ *
809
+ * This is a convenience function for creating Toolkit instances.
810
+ *
811
+ * @param tools - The tools to include in the toolkit.
812
+ * @returns A new Toolkit instance.
813
+ */
814
+ declare function createToolkit(tools: readonly (BaseTool | ProviderTool)[]): Toolkit;
815
+ /**
816
+ * Create a context toolkit from an array of tools.
817
+ *
818
+ * This is a convenience function for creating ContextToolkit instances.
819
+ * Accepts regular tools (BaseTool), context tools (BaseContextTool), and provider tools.
820
+ *
821
+ * @template DepsT - The type of dependencies in the context.
822
+ * @param tools - The tools to include in the toolkit.
823
+ * @returns A new ContextToolkit instance.
824
+ */
825
+ declare function createContextToolkit<DepsT = unknown>(tools: readonly (AnyContextTool<DepsT> | ProviderTool)[]): ContextToolkit<DepsT>;
826
+
827
+ /**
828
+ * Web search tool for provider-native web search capabilities.
829
+ */
830
+
831
+ /**
832
+ * Web search tool that allows the model to search the internet.
833
+ *
834
+ * This is a provider tool - the search is executed server-side by the provider,
835
+ * not by your code. The model decides when to search based on the prompt,
836
+ * and the provider returns search results with citations.
837
+ *
838
+ * Supported providers include Anthropic, Google, and OpenAI (when using the Responses API).
839
+ *
840
+ * @example
841
+ * ```typescript
842
+ * import { defineCall, WebSearchTool } from '@anthropic-ai/mirascope';
843
+ *
844
+ * const searchWeb = defineCall({
845
+ * model: 'anthropic/claude-sonnet-4-5',
846
+ * tools: [new WebSearchTool()],
847
+ * }, () => 'Search the web for: Who won the 2024 Super Bowl?');
848
+ *
849
+ * const response = await searchWeb();
850
+ * console.log(response.text()); // Response includes citations from web search
851
+ * ```
852
+ */
853
+ declare class WebSearchTool extends ProviderTool {
854
+ constructor();
855
+ }
856
+ /**
857
+ * Type guard to check if a value is a WebSearchTool.
858
+ */
859
+ declare function isWebSearchTool(value: unknown): value is WebSearchTool;
860
+
861
+ /**
862
+ * Supported audio MIME types.
863
+ */
864
+ type AudioMimeType = "audio/wav" | "audio/mp3" | "audio/aiff" | "audio/aac" | "audio/ogg" | "audio/flac";
865
+ /**
866
+ * Audio data encoded as base64.
867
+ */
868
+ type Base64AudioSource = {
869
+ readonly type: "base64_audio_source";
870
+ /** The audio data, as a base64 encoded string. */
871
+ readonly data: string;
872
+ /** The mime type of the audio (e.g. audio/mp3). */
873
+ readonly mimeType: AudioMimeType;
874
+ };
875
+ /**
876
+ * Audio content for a message.
877
+ *
878
+ * Audio can be included in user messages for multimodal models that support it.
879
+ */
880
+ type Audio = {
881
+ readonly type: "audio";
882
+ readonly source: Base64AudioSource;
883
+ };
884
+ /**
885
+ * Factory methods for creating Audio instances.
886
+ */
887
+ declare const Audio: {
888
+ /**
889
+ * Download audio from a URL and encode as base64.
890
+ *
891
+ * @throws Error if download fails or audio exceeds maxSize
892
+ */
893
+ download: (url: string, maxSize?: number) => Promise<Audio>;
894
+ /**
895
+ * Create Audio from raw bytes.
896
+ *
897
+ * @throws Error if data exceeds maxSize or type cannot be inferred
898
+ */
899
+ fromBytes: (data: Uint8Array, maxSize?: number) => Audio;
900
+ };
901
+
902
+ /**
903
+ * MIME types for text-based documents.
904
+ */
905
+ type DocumentTextMimeType = "application/json" | "text/plain" | "application/x-javascript" | "text/javascript" | "application/x-python" | "text/x-python" | "text/html" | "text/css" | "text/xml" | "text/rtf";
906
+ /**
907
+ * MIME types for binary documents (base64 encoded).
908
+ */
909
+ type DocumentBase64MimeType = "application/pdf";
910
+ /**
911
+ * Document data encoded as base64 (for binary formats like PDF).
912
+ */
913
+ type Base64DocumentSource = {
914
+ readonly type: "base64_document_source";
915
+ /** The document data, as a base64 encoded string. */
916
+ readonly data: string;
917
+ /** The media type of the document (e.g. application/pdf). */
918
+ readonly mediaType: DocumentBase64MimeType;
919
+ };
920
+ /**
921
+ * Document data as plain text.
922
+ */
923
+ type TextDocumentSource = {
924
+ readonly type: "text_document_source";
925
+ /** The document data, as plain text. */
926
+ readonly data: string;
927
+ /** The media type of the document (e.g. text/plain, text/csv). */
928
+ readonly mediaType: DocumentTextMimeType;
929
+ };
930
+ /**
931
+ * Document referenced by URL.
932
+ */
933
+ type URLDocumentSource = {
934
+ readonly type: "url_document_source";
935
+ /** The url of the document (e.g. https://example.com/paper.pdf). */
936
+ readonly url: string;
937
+ };
938
+ /**
939
+ * Document content for a message.
940
+ *
941
+ * Documents can be included in user messages for models that support them.
942
+ * Supports text documents (JSON, plain text, code) and binary documents (PDF).
943
+ */
944
+ type Document = {
945
+ readonly type: "document";
946
+ readonly source: Base64DocumentSource | TextDocumentSource | URLDocumentSource;
947
+ };
948
+ /**
949
+ * Factory methods for creating Document instances.
950
+ *
951
+ * Note: These are stubs that throw NotImplementedError.
952
+ * Full implementation is deferred to a later phase.
953
+ */
954
+ declare const Document: {
955
+ /**
956
+ * Create a Document from a URL reference.
957
+ *
958
+ * @throws Error (not implemented)
959
+ */
960
+ fromUrl: (_url: string, _options?: {
961
+ download?: boolean;
962
+ }) => Document;
963
+ /**
964
+ * Create a Document from raw bytes.
965
+ *
966
+ * @throws Error (not implemented)
967
+ */
968
+ fromBytes: (_data: Uint8Array, _options?: {
969
+ mimeType?: DocumentTextMimeType | DocumentBase64MimeType;
970
+ }) => Document;
971
+ };
972
+
973
+ /**
974
+ * Supported image MIME types.
975
+ */
976
+ type ImageMimeType = "image/png" | "image/jpeg" | "image/webp" | "image/gif" | "image/heic" | "image/heif";
977
+ /**
978
+ * Image data encoded as base64.
979
+ */
980
+ type Base64ImageSource = {
981
+ readonly type: "base64_image_source";
982
+ /** The image data, as a base64 encoded string. */
983
+ readonly data: string;
984
+ /** The mime type of the image (e.g. image/png). */
985
+ readonly mimeType: ImageMimeType;
986
+ };
987
+ /**
988
+ * Image referenced by URL.
989
+ */
990
+ type URLImageSource = {
991
+ readonly type: "url_image_source";
992
+ /** The url of the image (e.g. https://example.com/image.png). */
993
+ readonly url: string;
994
+ };
995
+ /**
996
+ * Image content for a message.
997
+ *
998
+ * Images can be included in user messages for multimodal models.
999
+ * The source can be either base64-encoded data or a URL reference.
1000
+ */
1001
+ type Image = {
1002
+ readonly type: "image";
1003
+ readonly source: Base64ImageSource | URLImageSource;
1004
+ };
1005
+ /**
1006
+ * Factory methods for creating Image instances.
1007
+ */
1008
+ declare const Image: {
1009
+ /**
1010
+ * Create an Image from a URL reference (no download).
1011
+ */
1012
+ fromUrl: (url: string) => Image;
1013
+ /**
1014
+ * Download an image from a URL and encode as base64.
1015
+ *
1016
+ * @throws Error if download fails or image exceeds maxSize
1017
+ */
1018
+ download: (url: string, maxSize?: number) => Promise<Image>;
1019
+ /**
1020
+ * Create an Image from raw bytes.
1021
+ *
1022
+ * @throws Error if data exceeds maxSize or type cannot be inferred
1023
+ */
1024
+ fromBytes: (data: Uint8Array, maxSize?: number) => Image;
1025
+ };
1026
+
1027
+ /**
1028
+ * Text content for a message.
1029
+ *
1030
+ * Represents plain text content in a message. This is the most common
1031
+ * content type for both user and assistant messages.
1032
+ */
1033
+ type Text = {
1034
+ readonly type: "text";
1035
+ /** The text content. */
1036
+ readonly text: string;
1037
+ };
1038
+ /**
1039
+ * Signals the start of a text content block in the stream.
1040
+ */
1041
+ type TextStartChunk = {
1042
+ readonly type: "text_start_chunk";
1043
+ readonly contentType: "text";
1044
+ };
1045
+ /**
1046
+ * Contains incremental text content.
1047
+ */
1048
+ type TextChunk = {
1049
+ readonly type: "text_chunk";
1050
+ readonly contentType: "text";
1051
+ /** The incremental text added in this chunk. */
1052
+ readonly delta: string;
1053
+ };
1054
+ /**
1055
+ * Signals the end of a text content block in the stream.
1056
+ */
1057
+ type TextEndChunk = {
1058
+ readonly type: "text_end_chunk";
1059
+ readonly contentType: "text";
1060
+ };
1061
+ /** Create a TextStartChunk */
1062
+ declare function textStart(): TextStartChunk;
1063
+ /** Create a TextChunk */
1064
+ declare function textChunk(delta: string): TextChunk;
1065
+ /** Create a TextEndChunk */
1066
+ declare function textEnd(): TextEndChunk;
1067
+
1068
+ /**
1069
+ * Thought content from an assistant's extended thinking.
1070
+ *
1071
+ * Represents the reasoning or thinking process of an assistant model
1072
+ * that supports extended thinking (e.g., Claude with thinking enabled).
1073
+ */
1074
+ type Thought = {
1075
+ readonly type: "thought";
1076
+ /** The thoughts or reasoning of the assistant. */
1077
+ readonly thought: string;
1078
+ };
1079
+ /**
1080
+ * Signals the start of a thought content block in the stream.
1081
+ */
1082
+ type ThoughtStartChunk = {
1083
+ readonly type: "thought_start_chunk";
1084
+ readonly contentType: "thought";
1085
+ };
1086
+ /**
1087
+ * Contains incremental thought content.
1088
+ */
1089
+ type ThoughtChunk = {
1090
+ readonly type: "thought_chunk";
1091
+ readonly contentType: "thought";
1092
+ /** The incremental thought text added in this chunk. */
1093
+ readonly delta: string;
1094
+ };
1095
+ /**
1096
+ * Signals the end of a thought content block in the stream.
1097
+ */
1098
+ type ThoughtEndChunk = {
1099
+ readonly type: "thought_end_chunk";
1100
+ readonly contentType: "thought";
1101
+ };
1102
+ /** Create a ThoughtStartChunk */
1103
+ declare function thoughtStart(): ThoughtStartChunk;
1104
+ /** Create a ThoughtChunk */
1105
+ declare function thoughtChunk(delta: string): ThoughtChunk;
1106
+ /** Create a ThoughtEndChunk */
1107
+ declare function thoughtEnd(): ThoughtEndChunk;
1108
+
1109
+ /**
1110
+ * All content types that can appear in messages.
1111
+ */
1112
+ type ContentPart = Text | Image | Audio | Document | ToolOutput | ToolCall | Thought;
1113
+ /**
1114
+ * Content types that can appear in user messages.
1115
+ */
1116
+ type UserContentPart = Text | Image | Audio | Document | ToolOutput;
1117
+ /**
1118
+ * Content types that can appear in assistant messages.
1119
+ */
1120
+ type AssistantContentPart = Text | ToolCall | Thought;
1121
+ /**
1122
+ * Chunks of assistant content that may be streamed as generated by the LLM.
1123
+ */
1124
+ type AssistantContentChunk = TextStartChunk | TextChunk | TextEndChunk | ThoughtStartChunk | ThoughtChunk | ThoughtEndChunk | ToolCallStartChunk | ToolCallChunk | ToolCallEndChunk;
1125
+
1126
+ /**
1127
+ * The `Message` types and utility constructors.
1128
+ */
1129
+
1130
+ /**
1131
+ * A system message that sets context and instructions for the conversation.
1132
+ */
1133
+ type SystemMessage = {
1134
+ /** The role of this message. Always "system". */
1135
+ readonly role: "system";
1136
+ /** The content of this SystemMessage. */
1137
+ readonly content: Text;
1138
+ };
1139
+ /**
1140
+ * A user message containing input from the user.
1141
+ */
1142
+ type UserMessage = {
1143
+ /** The role of this message. Always "user". */
1144
+ readonly role: "user";
1145
+ /** The content of the user message. */
1146
+ readonly content: readonly UserContentPart[];
1147
+ /** A name identifying the creator of this message. */
1148
+ readonly name: string | null;
1149
+ };
1150
+ /**
1151
+ * An assistant message containing the model's response.
1152
+ */
1153
+ type AssistantMessage = {
1154
+ /** The role of this message. Always "assistant". */
1155
+ readonly role: "assistant";
1156
+ /** The content of the assistant message. */
1157
+ readonly content: readonly AssistantContentPart[];
1158
+ /** A name identifying the creator of this message. */
1159
+ readonly name: string | null;
1160
+ /** The LLM provider that generated this assistant message, if available. */
1161
+ readonly providerId: ProviderId | null;
1162
+ /** The model identifier of the LLM that generated this assistant message, if available. */
1163
+ readonly modelId: ModelId | null;
1164
+ /** The provider-specific model identifier (e.g. "gpt-5:responses"), if available. */
1165
+ readonly providerModelName: string | null;
1166
+ /**
1167
+ * The provider-specific raw representation of this assistant message, if available.
1168
+ *
1169
+ * If raw_content is truthy, then it may be used for provider-specific behavior when
1170
+ * resuming an LLM interaction that included this assistant message. For example, we can
1171
+ * reuse the provider-specific raw encoding rather than re-encoding the message from it's
1172
+ * Mirascope content representation. This may also take advantage of server-side provider
1173
+ * context, e.g. identifiers of reasoning context tokens that the provider generated.
1174
+ *
1175
+ * If present, the content should be encoded as JSON-serializable data, and in a format
1176
+ * that matches representation the provider expects representing the Mirascope data.
1177
+ *
1178
+ * Raw content is not required, as the Mirascope content can also be used to generate
1179
+ * a valid input to the provider (potentially without taking advantage of provider-specific
1180
+ * reasoning caches, etc). In that case raw content should be left empty.
1181
+ */
1182
+ readonly rawMessage: Jsonable | null;
1183
+ };
1184
+ /**
1185
+ * A message in an LLM interaction.
1186
+ *
1187
+ * Messages have a role (system, user, or assistant) and content that is a sequence
1188
+ * of content parts. The content can include text, images, audio, documents, and
1189
+ * tool interactions.
1190
+ *
1191
+ * For most use cases, prefer the convenience functions `system()`, `user()`, and
1192
+ * `assistant()` instead of directly creating `Message` objects.
1193
+ *
1194
+ * @example
1195
+ * ```typescript
1196
+ * import { messages } from 'mirascope/llm';
1197
+ *
1198
+ * const msgs = [
1199
+ * messages.system("You are a helpful assistant."),
1200
+ * messages.user("Hello, how are you?"),
1201
+ * ];
1202
+ * ```
1203
+ */
1204
+ type Message = SystemMessage | UserMessage | AssistantMessage;
1205
+ /**
1206
+ * Type alias for content that can fit into a `UserMessage`.
1207
+ */
1208
+ type UserContent = string | UserContentPart | readonly (string | UserContentPart)[];
1209
+ /**
1210
+ * Type alias for content that can fit into an `AssistantMessage`.
1211
+ */
1212
+ type AssistantContent = string | AssistantContentPart | readonly (string | AssistantContentPart)[];
1213
+ /**
1214
+ * Type alias for content that can fit into a `SystemMessage`.
1215
+ */
1216
+ type SystemContent = string | Text;
1217
+ /**
1218
+ * Creates a system message.
1219
+ *
1220
+ * @param content - The content of the message, which must be a string or Text content.
1221
+ * @returns A SystemMessage.
1222
+ */
1223
+ declare function system(content: SystemContent): SystemMessage;
1224
+ /**
1225
+ * Creates a user message.
1226
+ *
1227
+ * @param content - The content of the message, which can be a string or any UserContent,
1228
+ * or a sequence of such user content pieces.
1229
+ * @param options - Optional parameters.
1230
+ * @param options.name - Optional name to identify a specific user in multi-party conversations.
1231
+ * @returns A UserMessage.
1232
+ */
1233
+ declare function user(content: UserContent, options?: {
1234
+ name?: string | null;
1235
+ }): UserMessage;
1236
+ /**
1237
+ * Creates an assistant message.
1238
+ *
1239
+ * @param content - The content of the message, which can be a string or any AssistantContent,
1240
+ * or a sequence of assistant content pieces.
1241
+ * @param options - Required and optional parameters.
1242
+ * @param options.modelId - Optional id of the model that produced this message.
1243
+ * @param options.providerId - Optional identifier of the provider that produced this message.
1244
+ * @param options.providerModelName - Optional provider-specific model name.
1245
+ * @param options.rawMessage - Optional Jsonable object with provider-specific raw data.
1246
+ * @param options.name - Optional name to identify a specific assistant in multi-party conversations.
1247
+ * @returns An AssistantMessage.
1248
+ */
1249
+ declare function assistant(content: AssistantContent, options: {
1250
+ modelId: ModelId | null;
1251
+ providerId: ProviderId | null;
1252
+ providerModelName?: string | null;
1253
+ rawMessage?: Jsonable | null;
1254
+ name?: string | null;
1255
+ }): AssistantMessage;
1256
+
1257
+ /**
1258
+ * Utility functions for message handling.
1259
+ */
1260
+
1261
+ /**
1262
+ * Type guard that checks if the content is a sequence of Messages.
1263
+ *
1264
+ * @param content - Either user content or a sequence of messages.
1265
+ * @returns True if content is a sequence of Messages, false otherwise.
1266
+ * @throws Error if an empty array is provided.
1267
+ */
1268
+ declare function isMessages(content: UserContent | readonly Message[]): content is readonly Message[];
1269
+ /**
1270
+ * Promote a prompt result to a list of messages.
1271
+ *
1272
+ * If the result is already a list of Messages, returns it as-is.
1273
+ * If the result is str/UserContentPart/Sequence of content parts, wraps it in a user message.
1274
+ *
1275
+ * @param content - Either user content or a sequence of messages.
1276
+ * @returns A sequence of Messages.
1277
+ */
1278
+ declare function promoteToMessages(content: UserContent | readonly Message[]): readonly Message[];
1279
+
1280
+ /**
1281
+ * The messages module for LLM interactions.
1282
+ *
1283
+ * This module defines the message types used in LLM interactions. Messages are represented
1284
+ * as a unified `Message` type with different roles (system, user, assistant) and flexible
1285
+ * content arrays that can include text, images, audio, documents, and tool interactions.
1286
+ */
1287
+
1288
+ type index$1_AssistantContent = AssistantContent;
1289
+ type index$1_AssistantMessage = AssistantMessage;
1290
+ type index$1_Message = Message;
1291
+ type index$1_SystemContent = SystemContent;
1292
+ type index$1_SystemMessage = SystemMessage;
1293
+ type index$1_UserContent = UserContent;
1294
+ type index$1_UserMessage = UserMessage;
1295
+ declare const index$1_assistant: typeof assistant;
1296
+ declare const index$1_isMessages: typeof isMessages;
1297
+ declare const index$1_promoteToMessages: typeof promoteToMessages;
1298
+ declare const index$1_system: typeof system;
1299
+ declare const index$1_user: typeof user;
1300
+ declare namespace index$1 {
1301
+ export { type index$1_AssistantContent as AssistantContent, type index$1_AssistantMessage as AssistantMessage, type index$1_Message as Message, type index$1_SystemContent as SystemContent, type index$1_SystemMessage as SystemMessage, type index$1_UserContent as UserContent, type index$1_UserMessage as UserMessage, index$1_assistant as assistant, index$1_isMessages as isMessages, index$1_promoteToMessages as promoteToMessages, index$1_system as system, index$1_user as user };
1302
+ }
1303
+
1304
+ /**
1305
+ * Configuration for extended reasoning/thinking in LLM responses.
1306
+ */
1307
+ /**
1308
+ * Level of effort/reasoning to apply to thinking.
1309
+ */
1310
+ type ThinkingLevel = "none" | "default" | "minimal" | "low" | "medium" | "high" | "max";
1311
+ /**
1312
+ * Configuration for extended reasoning/thinking in LLM responses.
1313
+ *
1314
+ * Thinking is a process where the model spends additional tokens reasoning about
1315
+ * the prompt before generating a response. Providing any `ThinkingConfig` will enable
1316
+ * thinking (unless it is specifically disabled via level="minimal"). Depending on
1317
+ * the provider and model, thinking may always be active regardless of user settings.
1318
+ */
1319
+ interface ThinkingConfig {
1320
+ /**
1321
+ * Level of effort/reasoning to apply to thinking.
1322
+ *
1323
+ * - none: Disable thinking entirely. Minimizes cost and latency.
1324
+ * - default: Use the provider's default
1325
+ * - minimal: Use the provider's lowest setting for reasoning
1326
+ * - medium: Use a moderate amount of reasoning tokens
1327
+ * - high: Allow extensive resources for thinking
1328
+ * - max: Uses as much thinking as allowed by the provider.
1329
+ *
1330
+ * Mirascope makes a best effort to apply the chosen thinking level, but exact behavior
1331
+ * varies by provider and model. For example, some models may not support thinking,
1332
+ * while other models may not allow disabling it.
1333
+ */
1334
+ level: ThinkingLevel;
1335
+ /**
1336
+ * Whether to include Thought content in the model output.
1337
+ *
1338
+ * Depending on the model and provider, enabling includeThoughts to true may
1339
+ * request reasoning summaries (which are not the underlying reasoning tokens,
1340
+ * but a readable summary produced by another model), or it may be the original
1341
+ * reasoning tokens.
1342
+ *
1343
+ * When includeThoughts is false, no summaries will be requested, and thoughts
1344
+ * will not be included in the output even if they were provided by the provider.
1345
+ *
1346
+ * Defaults to false.
1347
+ */
1348
+ includeThoughts?: boolean;
1349
+ /**
1350
+ * Re-encode Thought content as text for model consumption.
1351
+ *
1352
+ * If `true`, when an `AssistantMessage` contains `Thoughts` and is passed back
1353
+ * to an LLM, those `Thoughts` will be encoded as `Text`, ensuring the assistant
1354
+ * can read its prior reasoning. This contrasts with provider defaults which may
1355
+ * ignore prior thoughts, particularly if tool calls are not involved.
1356
+ *
1357
+ * When `true`, Mirascope will re-encode messages rather than reusing raw provider
1358
+ * response content, which may disable provider-specific optimizations like cached
1359
+ * reasoning tokens.
1360
+ *
1361
+ * Defaults to `false` if unset.
1362
+ */
1363
+ encodeThoughtsAsText?: boolean;
1364
+ }
1365
+
1366
+ /**
1367
+ * Base parameters for LLM providers.
1368
+ */
1369
+
1370
+ /**
1371
+ * Common parameters shared across LLM providers.
1372
+ *
1373
+ * Note: Each provider may handle these parameters differently or not support them at all.
1374
+ * Please check provider-specific documentation for parameter support and behavior.
1375
+ */
1376
+ interface Params {
1377
+ /**
1378
+ * Controls randomness in the output (0.0 to 1.0).
1379
+ *
1380
+ * Lower temperatures are good for prompts that require a less open-ended or
1381
+ * creative response, while higher temperatures can lead to more diverse or
1382
+ * creative results.
1383
+ */
1384
+ temperature?: number;
1385
+ /**
1386
+ * Maximum number of tokens to generate.
1387
+ */
1388
+ maxTokens?: number;
1389
+ /**
1390
+ * Nucleus sampling parameter (0.0 to 1.0).
1391
+ *
1392
+ * Tokens are selected from the most to least probable until the sum of their
1393
+ * probabilities equals this value. Use a lower value for less random responses and a
1394
+ * higher value for more random responses.
1395
+ */
1396
+ topP?: number;
1397
+ /**
1398
+ * Limits token selection to the k most probable tokens (typically 1 to 100).
1399
+ *
1400
+ * For each token selection step, the `topK` tokens with the
1401
+ * highest probabilities are sampled. Then tokens are further filtered based
1402
+ * on `topP` with the final token selected using temperature sampling. Use
1403
+ * a lower number for less random responses and a higher number for more
1404
+ * random responses.
1405
+ */
1406
+ topK?: number;
1407
+ /**
1408
+ * Random seed for reproducibility.
1409
+ *
1410
+ * When `seed` is fixed to a specific number, the model makes a best
1411
+ * effort to provide the same response for repeated requests.
1412
+ *
1413
+ * Not supported by all providers, and does not guarantee strict reproducibility.
1414
+ */
1415
+ seed?: number;
1416
+ /**
1417
+ * Stop sequences to end generation.
1418
+ *
1419
+ * The model will stop generating text if one of these strings is encountered in the
1420
+ * response.
1421
+ */
1422
+ stopSequences?: string[];
1423
+ /**
1424
+ * Configuration for extended reasoning/thinking.
1425
+ *
1426
+ * Pass a `ThinkingConfig` to configure thinking behavior. The `level` field controls
1427
+ * whether thinking is enabled and how much reasoning to use. Level may be one of
1428
+ * "minimal", "low", "medium", or "high". If level is unset, then thinking is enabled
1429
+ * with a provider-specific default level.
1430
+ *
1431
+ * `ThinkingConfig` can also include `encodeThoughtsAsText`, which is an advanced
1432
+ * feature for providing past thoughts back to the model as text content. This is
1433
+ * primarily useful for making thoughts transferable when passing a conversation
1434
+ * to a different model or provider than the one that generated the thinking.
1435
+ */
1436
+ thinking?: ThinkingConfig | null;
1437
+ }
1438
+
1439
+ /**
1440
+ * Google model information.
1441
+ *
1442
+ * This file is auto-generated by typescript/scripts/codegen/google.ts
1443
+ * Do not edit manually - run `bun run codegen` to update.
1444
+ */
1445
+ /**
1446
+ * Array of all known Google model IDs.
1447
+ * This is the source of truth - the type and Set are derived from it.
1448
+ */
1449
+ declare const GOOGLE_KNOWN_MODELS_ARRAY: readonly ["google/gemini-2.0-flash", "google/gemini-2.0-flash-001", "google/gemini-2.0-flash-exp", "google/gemini-2.0-flash-exp-image-generation", "google/gemini-2.0-flash-lite", "google/gemini-2.0-flash-lite-001", "google/gemini-2.0-flash-lite-preview", "google/gemini-2.0-flash-lite-preview-02-05", "google/gemini-2.5-flash", "google/gemini-2.5-flash-image", "google/gemini-2.5-flash-image-preview", "google/gemini-2.5-flash-lite", "google/gemini-2.5-flash-lite-preview-09-2025", "google/gemini-2.5-flash-preview-09-2025", "google/gemini-2.5-pro", "google/gemini-3-flash-preview", "google/gemini-3-pro-image-preview", "google/gemini-3-pro-preview", "google/gemini-flash-latest", "google/gemini-flash-lite-latest", "google/gemini-pro-latest", "google/gemini-robotics-er-1.5-preview", "google/gemma-3-12b-it", "google/gemma-3-1b-it", "google/gemma-3-27b-it", "google/gemma-3-4b-it", "google/gemma-3n-e2b-it", "google/gemma-3n-e4b-it", "google/nano-banana-pro-preview"];
1450
+ /**
1451
+ * Valid Google model IDs.
1452
+ */
1453
+ type GoogleKnownModels = (typeof GOOGLE_KNOWN_MODELS_ARRAY)[number];
1454
+
1455
+ /**
1456
+ * Google registered LLM models.
1457
+ */
1458
+
1459
+ /**
1460
+ * The Google model IDs registered with Mirascope.
1461
+ */
1462
+ type GoogleModelId = GoogleKnownModels | (string & {});
1463
+
1464
+ /**
1465
+ * Finish reason indicating why the model stopped generating.
1466
+ */
1467
+ /**
1468
+ * Possible reasons why generation stopped.
1469
+ */
1470
+ declare const FinishReason: {
1471
+ /** Model ran out of tokens. */
1472
+ readonly MAX_TOKENS: "max_tokens";
1473
+ /** Model refused to generate. */
1474
+ readonly REFUSAL: "refusal";
1475
+ /** Context length exceeded. */
1476
+ readonly CONTEXT_LENGTH_EXCEEDED: "context_length_exceeded";
1477
+ };
1478
+ /**
1479
+ * Type representing a finish reason value.
1480
+ */
1481
+ type FinishReason = (typeof FinishReason)[keyof typeof FinishReason];
1482
+
1483
+ /**
1484
+ * Token usage tracking for LLM responses.
1485
+ */
1486
+ /**
1487
+ * Token usage statistics for a response.
1488
+ */
1489
+ interface Usage {
1490
+ /**
1491
+ * Total input tokens (includes cache tokens).
1492
+ */
1493
+ readonly inputTokens: number;
1494
+ /**
1495
+ * Total output tokens (includes reasoning tokens).
1496
+ */
1497
+ readonly outputTokens: number;
1498
+ /**
1499
+ * Tokens read from cache.
1500
+ */
1501
+ readonly cacheReadTokens: number;
1502
+ /**
1503
+ * Tokens written to cache.
1504
+ */
1505
+ readonly cacheWriteTokens: number;
1506
+ /**
1507
+ * Tokens used for thinking/reasoning.
1508
+ */
1509
+ readonly reasoningTokens: number;
1510
+ /**
1511
+ * Provider-specific raw usage object.
1512
+ */
1513
+ readonly raw: unknown;
1514
+ }
1515
+ /**
1516
+ * Create a Usage object with default values.
1517
+ */
1518
+ declare function createUsage(options?: Partial<Usage>): Usage;
1519
+ /**
1520
+ * Calculate total tokens from usage.
1521
+ */
1522
+ declare function totalTokens(usage: Usage): number;
1523
+
1524
+ /**
1525
+ * Shared base of Response implementations.
1526
+ */
1527
+
1528
+ /**
1529
+ * Initialization options for creating a BaseResponse.
1530
+ */
1531
+ interface BaseResponseInit {
1532
+ /**
1533
+ * The raw response from the LLM provider.
1534
+ */
1535
+ raw: unknown;
1536
+ /**
1537
+ * The provider that generated this response.
1538
+ */
1539
+ providerId: ProviderId;
1540
+ /**
1541
+ * The model ID that generated this response.
1542
+ */
1543
+ modelId: ModelId;
1544
+ /**
1545
+ * Provider-specific model name (may include additional info like API mode).
1546
+ */
1547
+ providerModelName: string;
1548
+ /**
1549
+ * The parameters used to generate this response.
1550
+ */
1551
+ params: Params;
1552
+ /**
1553
+ * The toolkit containing all tools available for this response.
1554
+ * Can be a Toolkit or ContextToolkit.
1555
+ */
1556
+ toolkit: BaseToolkit;
1557
+ /**
1558
+ * The Format describing the structured response format, if any.
1559
+ */
1560
+ format?: Format | null;
1561
+ /**
1562
+ * The input messages (before the assistant response).
1563
+ */
1564
+ inputMessages: readonly Message[];
1565
+ /**
1566
+ * The assistant message containing the response content.
1567
+ */
1568
+ assistantMessage: AssistantMessage;
1569
+ /**
1570
+ * The reason generation stopped, if not normal completion.
1571
+ */
1572
+ finishReason: FinishReason | null;
1573
+ /**
1574
+ * Token usage statistics, if available.
1575
+ */
1576
+ usage: Usage | null;
1577
+ }
1578
+ /**
1579
+ * Base response class with constructor logic.
1580
+ *
1581
+ * This class processes the assistant message content, organizing it by type
1582
+ * (text, tool calls, thoughts) for easy access.
1583
+ *
1584
+ * @template F - The type of the formatted output when using structured outputs.
1585
+ */
1586
+ declare class BaseResponse<F = unknown> extends RootResponse<F> {
1587
+ readonly raw: unknown;
1588
+ readonly providerId: ProviderId;
1589
+ readonly modelId: ModelId;
1590
+ readonly providerModelName: string;
1591
+ readonly params: Params;
1592
+ readonly messages: readonly Message[];
1593
+ readonly content: readonly AssistantContentPart[];
1594
+ readonly texts: readonly Text[];
1595
+ readonly toolCalls: readonly ToolCall[];
1596
+ readonly thoughts: readonly Thought[];
1597
+ readonly finishReason: FinishReason | null;
1598
+ readonly usage: Usage | null;
1599
+ readonly format: Format | null;
1600
+ readonly toolkit: BaseToolkit;
1601
+ constructor(init: BaseResponseInit);
1602
+ }
1603
+
1604
+ /**
1605
+ * Response class for LLM calls.
1606
+ */
1607
+
1608
+ /**
1609
+ * Initialization options for creating a Response.
1610
+ *
1611
+ * Accepts `tools` as either a Toolkit or a list of tools, which gets
1612
+ * converted to a Toolkit before passing to BaseResponse.
1613
+ */
1614
+ interface ResponseInit extends Omit<BaseResponseInit, "toolkit"> {
1615
+ /**
1616
+ * The tools available for this response.
1617
+ * Can be a Toolkit instance or an array of tools.
1618
+ */
1619
+ tools?: Tools | Toolkit;
1620
+ }
1621
+ /**
1622
+ * The response generated by an LLM.
1623
+ *
1624
+ * This is the primary response class for non-streaming LLM calls. Since TypeScript
1625
+ * IO is always async, this single class handles all async response scenarios.
1626
+ *
1627
+ * @template F - The type of the formatted output when using structured outputs.
1628
+ *
1629
+ * @example
1630
+ * ```typescript
1631
+ * const response = await model.call([user("Hello!")]);
1632
+ * console.log(response.text());
1633
+ * ```
1634
+ *
1635
+ * @example With structured output
1636
+ * ```typescript
1637
+ * interface Book { title: string; author: string; }
1638
+ * const response = await model.call<Book>('Recommend a book', { format: BookSchema });
1639
+ * const book = response.parse(); // Type: Book | DeepPartial<Book> | null
1640
+ * ```
1641
+ */
1642
+ declare class Response<F = unknown> extends BaseResponse<F> {
1643
+ /**
1644
+ * Override base toolkit with correct type for execute() support.
1645
+ */
1646
+ readonly toolkit: Toolkit;
1647
+ constructor(init: ResponseInit);
1648
+ /**
1649
+ * Execute all tool calls in this response using the registered tools.
1650
+ *
1651
+ * @returns An array of ToolOutput objects, one for each tool call.
1652
+ *
1653
+ * @example
1654
+ * ```typescript
1655
+ * const response = await model.call('What is the weather?', [weatherTool]);
1656
+ * if (response.toolCalls.length > 0) {
1657
+ * const outputs = await response.executeTools();
1658
+ * const followUp = await response.resume(outputs);
1659
+ * }
1660
+ * ```
1661
+ */
1662
+ executeTools(): Promise<ToolOutput<Jsonable>[]>;
1663
+ /**
1664
+ * Generate a new Response using this response's messages with additional user content.
1665
+ *
1666
+ * Uses this response's tools and format type. Also uses this response's provider,
1667
+ * model, and params.
1668
+ *
1669
+ * @param content - The new user message content to append to the message history.
1670
+ * @returns A new Response instance generated from the extended message history.
1671
+ *
1672
+ * @example
1673
+ * ```typescript
1674
+ * const response = await model.call('Hello!');
1675
+ * console.log(response.text());
1676
+ *
1677
+ * // Continue the conversation
1678
+ * const followUp = await response.resume('Tell me more about that');
1679
+ * console.log(followUp.text());
1680
+ * ```
1681
+ */
1682
+ resume(content: UserContent): Promise<Response<F>>;
1683
+ }
1684
+
1685
+ /**
1686
+ * ContextResponse class for context-based LLM calls.
1687
+ *
1688
+ * Extends BaseResponse with context-aware functionality including:
1689
+ * - executeTools(): Execute tool calls with context dependency injection
1690
+ * - resume(): Continue the conversation with additional user content
1691
+ */
1692
+
1693
+ /**
1694
+ * Initialization options for creating a ContextResponse.
1695
+ *
1696
+ * Accepts `tools` as either a ContextToolkit or a list of tools, which gets
1697
+ * converted to a ContextToolkit before passing to BaseResponse.
1698
+ *
1699
+ * Supports both regular tools (BaseTool) and context tools (BaseContextTool),
1700
+ * matching Python's `ContextTools[DepsT]` pattern.
1701
+ *
1702
+ * @template DepsT - The type of dependencies in the context.
1703
+ */
1704
+ interface ContextResponseInit<DepsT = unknown> extends Omit<BaseResponseInit, "toolkit"> {
1705
+ /**
1706
+ * The tools available for this response.
1707
+ * Can be a ContextToolkit instance or an array of tools.
1708
+ * Accepts both regular tools and context tools.
1709
+ */
1710
+ tools?: ContextTools<DepsT> | ContextToolkit<DepsT>;
1711
+ }
1712
+ /**
1713
+ * The response generated by an LLM from a context call.
1714
+ *
1715
+ * This class provides context-aware functionality on top of the standard response:
1716
+ * - `executeTools()`: Execute all tool calls with context dependency injection
1717
+ * - `resume()`: Continue the conversation with additional user content
1718
+ *
1719
+ * @template DepsT - The type of dependencies in the context.
1720
+ * @template F - The type of the formatted output when using structured outputs.
1721
+ *
1722
+ * @example
1723
+ * ```typescript
1724
+ * interface MyDeps { userId: string; }
1725
+ *
1726
+ * const ctx = createContext<MyDeps>({ userId: '123' });
1727
+ * const response = await myPrompt(model, ctx);
1728
+ * console.log(response.text());
1729
+ *
1730
+ * // Continue the conversation
1731
+ * const followUp = await response.resume(ctx, 'Tell me more');
1732
+ * ```
1733
+ */
1734
+ declare class ContextResponse<DepsT = unknown, F = unknown> extends BaseResponse<F> {
1735
+ /**
1736
+ * The context toolkit containing tools that can receive context.
1737
+ */
1738
+ readonly contextToolkit: ContextToolkit<DepsT>;
1739
+ constructor(init: ContextResponseInit<DepsT>);
1740
+ /**
1741
+ * Execute all tool calls in this response using the registered context tools.
1742
+ *
1743
+ * @param ctx - The context containing dependencies to pass to tools.
1744
+ * @returns An array of ToolOutput objects, one for each tool call.
1745
+ *
1746
+ * @example
1747
+ * ```typescript
1748
+ * const response = await myPrompt(model, ctx, [searchTool]);
1749
+ * if (response.toolCalls.length > 0) {
1750
+ * const outputs = await response.executeTools(ctx);
1751
+ * const followUp = await response.resume(ctx, outputs);
1752
+ * }
1753
+ * ```
1754
+ */
1755
+ executeTools(ctx: Context<DepsT>): Promise<ToolOutput<Jsonable>[]>;
1756
+ /**
1757
+ * Generate a new ContextResponse using this response's messages with additional user content.
1758
+ *
1759
+ * Uses this response's tools and format type. Also uses this response's provider,
1760
+ * model, and params.
1761
+ *
1762
+ * @param ctx - A Context with the required deps type.
1763
+ * @param content - The new user message content to append to the message history.
1764
+ * @returns A new ContextResponse instance generated from the extended message history.
1765
+ *
1766
+ * @example
1767
+ * ```typescript
1768
+ * const response = await myPrompt(model, ctx);
1769
+ * console.log(response.text());
1770
+ *
1771
+ * // Continue the conversation
1772
+ * const followUp = await response.resume(ctx, 'Tell me more about that');
1773
+ * console.log(followUp.text());
1774
+ * ```
1775
+ */
1776
+ resume(ctx: Context<DepsT>, content: UserContent): Promise<ContextResponse<DepsT, F>>;
1777
+ }
1778
+
1779
+ /**
1780
+ * Streaming metadata chunk types for provider-agnostic streaming responses.
1781
+ *
1782
+ * Content-specific chunks (Text, Thought, ToolCall) are defined in content/.
1783
+ * This file only contains metadata chunks.
1784
+ */
1785
+
1786
+ /**
1787
+ * Contains the finish reason when the stream completes.
1788
+ */
1789
+ interface FinishReasonChunk {
1790
+ readonly type: "finish_reason_chunk";
1791
+ /** The reason the stream finished */
1792
+ readonly finishReason: FinishReason;
1793
+ }
1794
+ /**
1795
+ * Contains incremental token usage information.
1796
+ */
1797
+ interface UsageDeltaChunk {
1798
+ readonly type: "usage_delta_chunk";
1799
+ /** Delta in input tokens */
1800
+ readonly inputTokens: number;
1801
+ /** Delta in output tokens */
1802
+ readonly outputTokens: number;
1803
+ /** Delta in cache read tokens */
1804
+ readonly cacheReadTokens: number;
1805
+ /** Delta in cache write tokens */
1806
+ readonly cacheWriteTokens: number;
1807
+ /** Delta in reasoning/thinking tokens */
1808
+ readonly reasoningTokens: number;
1809
+ }
1810
+ /**
1811
+ * Contains a raw stream event from the underlying provider.
1812
+ */
1813
+ interface RawStreamEventChunk {
1814
+ readonly type: "raw_stream_event_chunk";
1815
+ /** The raw stream event from the underlying provider */
1816
+ readonly rawStreamEvent: unknown;
1817
+ }
1818
+ /**
1819
+ * Contains provider-specific raw message content.
1820
+ */
1821
+ interface RawMessageChunk {
1822
+ readonly type: "raw_message_chunk";
1823
+ /** Provider-specific raw content */
1824
+ readonly rawMessage: Jsonable;
1825
+ }
1826
+ /**
1827
+ * All possible chunk types in a streaming response.
1828
+ */
1829
+ type StreamResponseChunk = AssistantContentChunk | FinishReasonChunk | UsageDeltaChunk | RawStreamEventChunk | RawMessageChunk;
1830
+ /**
1831
+ * Async iterator type for streaming chunks.
1832
+ */
1833
+ type AsyncChunkIterator = AsyncIterator<StreamResponseChunk>;
1834
+
1835
+ /**
1836
+ * Stream classes for streaming assistant content parts.
1837
+ *
1838
+ * Each stream wraps a chunk iterator and provides:
1839
+ * - Async iteration via [Symbol.asyncIterator]() yielding delta strings
1840
+ * - collect() to consume remaining chunks and return the final content object
1841
+ * - Partial accumulation properties to track content as it arrives
1842
+ */
1843
+
1844
+ /**
1845
+ * Stream for text content.
1846
+ *
1847
+ * Wraps a TextChunk iterator and yields text delta strings.
1848
+ * Accumulates text in partialText as chunks are consumed.
1849
+ */
1850
+ declare class TextStream {
1851
+ readonly type: "text_stream";
1852
+ readonly contentType: "text";
1853
+ /** The accumulated text content as chunks are received. */
1854
+ partialText: string;
1855
+ private _chunkIterator;
1856
+ constructor(chunkIterator: AsyncIterator<TextChunk>);
1857
+ /**
1858
+ * Iterate over text deltas as they are received.
1859
+ */
1860
+ [Symbol.asyncIterator](): AsyncGenerator<string>;
1861
+ /**
1862
+ * Collect all chunks and return the final Text content.
1863
+ */
1864
+ collect(): Promise<Text>;
1865
+ }
1866
+ /**
1867
+ * Stream for thought/reasoning content.
1868
+ *
1869
+ * Wraps a ThoughtChunk iterator and yields thought delta strings.
1870
+ * Accumulates thought in partialThought as chunks are consumed.
1871
+ */
1872
+ declare class ThoughtStream {
1873
+ readonly type: "thought_stream";
1874
+ readonly contentType: "thought";
1875
+ /** The accumulated thought content as chunks are received. */
1876
+ partialThought: string;
1877
+ private _chunkIterator;
1878
+ constructor(chunkIterator: AsyncIterator<ThoughtChunk>);
1879
+ /**
1880
+ * Iterate over thought deltas as they are received.
1881
+ */
1882
+ [Symbol.asyncIterator](): AsyncGenerator<string>;
1883
+ /**
1884
+ * Collect all chunks and return the final Thought content.
1885
+ */
1886
+ collect(): Promise<Thought>;
1887
+ }
1888
+ /**
1889
+ * Stream for tool call content.
1890
+ *
1891
+ * Wraps a ToolCallChunk iterator and yields argument delta strings.
1892
+ * Accumulates args in partialArgs as chunks are consumed.
1893
+ */
1894
+ declare class ToolCallStream {
1895
+ readonly type: "tool_call_stream";
1896
+ readonly contentType: "tool_call";
1897
+ /** A unique identifier for this tool call. */
1898
+ readonly toolId: string;
1899
+ /** The name of the tool being called. */
1900
+ readonly toolName: string;
1901
+ /** The accumulated tool arguments as chunks are received. */
1902
+ partialArgs: string;
1903
+ private _chunkIterator;
1904
+ constructor(toolId: string, toolName: string, chunkIterator: AsyncIterator<ToolCallChunk>);
1905
+ /**
1906
+ * Iterate over tool call argument deltas as they are received.
1907
+ */
1908
+ [Symbol.asyncIterator](): AsyncGenerator<string>;
1909
+ /**
1910
+ * Collect all chunks and return the final ToolCall content.
1911
+ */
1912
+ collect(): Promise<ToolCall>;
1913
+ }
1914
+ /**
1915
+ * A stream for any assistant content type.
1916
+ */
1917
+ type Stream = TextStream | ThoughtStream | ToolCallStream;
1918
+
1919
+ /**
1920
+ * BaseStreamResponse class for handling streaming LLM responses.
1921
+ *
1922
+ * Extends RootResponse to provide the standard response interface while adding
1923
+ * streaming-specific methods:
1924
+ * - chunkStream(): Raw chunks as async iterator (with caching for replay)
1925
+ * - textStream(): Text deltas only
1926
+ * - thoughtStream(): Thought deltas only
1927
+ * - structuredStream(): Partial parsed objects for structured output streaming
1928
+ *
1929
+ * Accumulates content as chunks arrive, with final state accessible via
1930
+ * the standard RootResponse interface (text(), content, finishReason, usage, etc.).
1931
+ *
1932
+ * This base class contains all streaming logic but no resume methods.
1933
+ * StreamResponse and ContextStreamResponse extend this class and add their
1934
+ * own resume methods with appropriate signatures.
1935
+ */
1936
+
1937
+ /**
1938
+ * Arguments for constructing a BaseStreamResponse.
1939
+ */
1940
+ interface BaseStreamResponseInit {
1941
+ /** The provider ID (e.g., 'anthropic', 'openai') */
1942
+ providerId: ProviderId;
1943
+ /** The model ID (e.g., 'anthropic/claude-sonnet-4-20250514') */
1944
+ modelId: ModelId;
1945
+ /** The provider's model name from the response */
1946
+ providerModelName: string;
1947
+ /** Parameters used for the request */
1948
+ params: Params;
1949
+ /** The toolkit containing all tools available for this response */
1950
+ toolkit: BaseToolkit;
1951
+ /** The Format describing the structured response format, if any */
1952
+ format?: Format | null;
1953
+ /** Input messages sent in the request */
1954
+ inputMessages: readonly Message[];
1955
+ /** Async iterator of streaming chunks from the provider */
1956
+ chunkIterator: AsyncIterator<StreamResponseChunk>;
1957
+ }
1958
+ /**
1959
+ * Base streaming response that consumes chunks from an async iterator.
1960
+ *
1961
+ * Extends RootResponse to provide the standard response interface. Chunks are
1962
+ * processed lazily and cached for replay. Content is accumulated as chunks
1963
+ * arrive, with text/thought objects mutated in place for efficiency.
1964
+ *
1965
+ * This class contains all streaming logic but no resume methods. Subclasses
1966
+ * (StreamResponse, ContextStreamResponse) add their own resume methods.
1967
+ *
1968
+ * @template F - The type of the formatted output when using structured outputs.
1969
+ */
1970
+ declare class BaseStreamResponse<F = unknown> extends RootResponse<F> {
1971
+ /** Raw stream events from provider */
1972
+ get raw(): readonly unknown[];
1973
+ readonly providerId: ProviderId;
1974
+ readonly modelId: ModelId;
1975
+ readonly providerModelName: string;
1976
+ readonly params: Params;
1977
+ readonly toolkit: BaseToolkit;
1978
+ readonly format: Format | null;
1979
+ /** Input messages sent in the request */
1980
+ readonly inputMessages: readonly Message[];
1981
+ get messages(): readonly Message[];
1982
+ get content(): readonly AssistantContentPart[];
1983
+ get texts(): readonly Text[];
1984
+ get toolCalls(): readonly ToolCall[];
1985
+ get thoughts(): readonly Thought[];
1986
+ get finishReason(): FinishReason | null;
1987
+ get usage(): Usage | null;
1988
+ /** Cached chunks for replay */
1989
+ private readonly _chunks;
1990
+ /** Accumulated content parts */
1991
+ private readonly _content;
1992
+ /** Accumulated text parts */
1993
+ private readonly _texts;
1994
+ /** Accumulated thought parts */
1995
+ private readonly _thoughts;
1996
+ /** Accumulated tool calls */
1997
+ private readonly _toolCalls;
1998
+ /** Raw stream events from provider */
1999
+ private readonly _rawStreamEvents;
2000
+ /** The chunk iterator from the provider */
2001
+ private _chunkIterator;
2002
+ /** Current content block being built (Text or Thought) */
2003
+ private _currentContent;
2004
+ /** In-progress tool calls tracked by ID (for interleaved streaming) */
2005
+ private readonly _currentToolCalls;
2006
+ /** Whether the iterator has been fully consumed */
2007
+ private _consumed;
2008
+ /** Whether we're currently processing a FORMAT_TOOL call (transforming to text) */
2009
+ private _processingFormatTool;
2010
+ /** Finish reason once stream is complete */
2011
+ private _finishReason;
2012
+ /** Accumulated usage statistics */
2013
+ private _usage;
2014
+ /** Raw message from provider */
2015
+ private _rawMessage;
2016
+ constructor(args: BaseStreamResponseInit);
2017
+ /**
2018
+ * Get the cached chunks.
2019
+ */
2020
+ get chunks(): readonly AssistantContentChunk[];
2021
+ /**
2022
+ * Whether the stream has been fully consumed.
2023
+ */
2024
+ get consumed(): boolean;
2025
+ /**
2026
+ * Get the accumulated thought content.
2027
+ * Joins all thought parts with the specified separator.
2028
+ */
2029
+ thought(separator?: string): string;
2030
+ /**
2031
+ * Build the assistant message from accumulated content.
2032
+ */
2033
+ get assistantMessage(): AssistantMessage;
2034
+ /**
2035
+ * Stream content chunks with caching for replay.
2036
+ *
2037
+ * First yields any cached chunks, then continues consuming from the iterator.
2038
+ * All chunks are cached so subsequent calls can replay from the beginning.
2039
+ */
2040
+ chunkStream(): AsyncGenerator<AssistantContentChunk>;
2041
+ /**
2042
+ * Stream only text deltas.
2043
+ *
2044
+ * Yields the text string from each TextChunk, filtering out other chunk types.
2045
+ */
2046
+ textStream(): AsyncGenerator<string>;
2047
+ /**
2048
+ * Stream only thought deltas.
2049
+ *
2050
+ * Yields the thought string from each ThoughtChunk, filtering out other chunk types.
2051
+ */
2052
+ thoughtStream(): AsyncGenerator<string>;
2053
+ /**
2054
+ * Stream partial parsed objects as they arrive during structured output streaming.
2055
+ *
2056
+ * This method drives the stream forward, parsing the accumulated text content
2057
+ * as partial JSON after each text chunk arrives. The yielded objects may have
2058
+ * missing or incomplete fields early in the stream, gradually becoming complete.
2059
+ *
2060
+ * @yields DeepPartial<F> - Partial objects with fields populated as they arrive.
2061
+ * @throws Error if format was not specified or uses OutputParser.
2062
+ *
2063
+ * @example
2064
+ * ```typescript
2065
+ * interface Book { title: string; author: string; }
2066
+ *
2067
+ * const stream = await model.stream<Book>('Recommend a book', {
2068
+ * format: BookSchema
2069
+ * });
2070
+ *
2071
+ * for await (const partial of stream.structuredStream()) {
2072
+ * console.log(partial.title); // May be undefined initially
2073
+ * console.log(partial.author); // Populated as it arrives
2074
+ * }
2075
+ *
2076
+ * // After stream completes, get the final validated result
2077
+ * const book = stream.parse();
2078
+ * ```
2079
+ */
2080
+ structuredStream(): AsyncGenerator<DeepPartial<F>>;
2081
+ /**
2082
+ * Consume the entire stream without processing chunks.
2083
+ * Useful when you just want the final accumulated state.
2084
+ */
2085
+ consume(): Promise<void>;
2086
+ /**
2087
+ * Returns an async iterator that yields streams for each content part in the response.
2088
+ *
2089
+ * Each content part in the response will correspond to one stream, which will yield
2090
+ * chunks of content as they come in from the underlying LLM.
2091
+ *
2092
+ * Fully iterating through this iterator will fully consume the underlying stream,
2093
+ * updating the Response with all collected content.
2094
+ *
2095
+ * As content is consumed, it is cached on the StreamResponse. If a new iterator
2096
+ * is constructed via calling `streams()`, it will start by replaying the cached
2097
+ * content from the response, and (if there is still more content to consume from
2098
+ * the LLM), it will proceed to consume it once it has iterated through all the
2099
+ * cached chunks.
2100
+ */
2101
+ streams(): AsyncGenerator<Stream>;
2102
+ /**
2103
+ * Replace the chunk iterator with a wrapped version.
2104
+ * Used internally by providers to wrap errors during iteration.
2105
+ * @internal
2106
+ */
2107
+ wrapChunkIterator(wrapper: (iterator: AsyncIterator<StreamResponseChunk>) => AsyncIterator<StreamResponseChunk>): void;
2108
+ /**
2109
+ * Transform FORMAT_TOOL chunks to text chunks.
2110
+ *
2111
+ * When the format uses tool mode, the LLM generates tool calls to the
2112
+ * FORMAT_TOOL. These chunks are transformed to text chunks so that the
2113
+ * structured output appears as text content for parse() to consume.
2114
+ *
2115
+ * @param chunk - The original chunk from the provider.
2116
+ * @returns The transformed chunk (tool_call → text) or the original chunk.
2117
+ */
2118
+ private _transformFormatToolChunk;
2119
+ /**
2120
+ * Process a single chunk, handling metadata and content accumulation.
2121
+ * Returns the content chunk if it should be yielded, null otherwise.
2122
+ */
2123
+ private _processChunk;
2124
+ /**
2125
+ * Handle text chunks, accumulating content.
2126
+ */
2127
+ private _handleTextChunk;
2128
+ /**
2129
+ * Handle thought chunks, accumulating content.
2130
+ */
2131
+ private _handleThoughtChunk;
2132
+ /**
2133
+ * Handle tool call chunks, accumulating content.
2134
+ *
2135
+ * Unlike text/thought which are added immediately to content,
2136
+ * tool calls are only added once the end chunk is received.
2137
+ * Multiple tool calls can be in progress simultaneously (interleaved).
2138
+ */
2139
+ private _handleToolCallChunk;
2140
+ /**
2141
+ * Accumulate usage statistics from a usage delta chunk.
2142
+ */
2143
+ private _accumulateUsage;
2144
+ }
2145
+
2146
+ /**
2147
+ * StreamResponse class for handling streaming LLM responses.
2148
+ *
2149
+ * Extends BaseStreamResponse with resume methods for continuing conversations.
2150
+ */
2151
+
2152
+ /**
2153
+ * Arguments for constructing a StreamResponse.
2154
+ *
2155
+ * Accepts `tools` as either a Toolkit or a list of tools, which gets
2156
+ * converted to a Toolkit before passing to BaseStreamResponse.
2157
+ */
2158
+ interface StreamResponseInit extends Omit<BaseStreamResponseInit, "toolkit"> {
2159
+ /**
2160
+ * The tools available for this response.
2161
+ * Can be a Toolkit instance or an array of tools.
2162
+ */
2163
+ tools?: Tools | Toolkit;
2164
+ }
2165
+ /**
2166
+ * Streaming response that consumes chunks from an async iterator.
2167
+ *
2168
+ * Extends BaseStreamResponse to provide the standard streaming interface,
2169
+ * adding resume methods for continuing conversations.
2170
+ *
2171
+ * @template F - The type of the formatted output when using structured outputs.
2172
+ *
2173
+ * @example
2174
+ * ```typescript
2175
+ * const response = await model.stream('Hello!');
2176
+ *
2177
+ * for await (const text of response.textStream()) {
2178
+ * process.stdout.write(text);
2179
+ * }
2180
+ *
2181
+ * // Continue the conversation
2182
+ * const followUp = await response.resume('Tell me more');
2183
+ * ```
2184
+ *
2185
+ * @example With structured output
2186
+ * ```typescript
2187
+ * interface Book { title: string; author: string; }
2188
+ * const response = await model.stream<Book>('Recommend a book', { format: BookSchema });
2189
+ * for await (const partial of response.structuredStream()) {
2190
+ * console.log(partial.title); // Typed as DeepPartial<Book>
2191
+ * }
2192
+ * ```
2193
+ */
2194
+ declare class StreamResponse<F = unknown> extends BaseStreamResponse<F> {
2195
+ /**
2196
+ * Override base toolkit with correct type for execute() support.
2197
+ */
2198
+ readonly toolkit: Toolkit;
2199
+ constructor(args: StreamResponseInit);
2200
+ /**
2201
+ * Execute all tool calls in this response using the registered tools.
2202
+ *
2203
+ * Note: The stream must be consumed before calling executeTools() to ensure
2204
+ * all tool calls have been received.
2205
+ *
2206
+ * @returns An array of ToolOutput objects, one for each tool call.
2207
+ *
2208
+ * @example
2209
+ * ```typescript
2210
+ * const response = await model.stream('What is the weather?', [weatherTool]);
2211
+ *
2212
+ * // Consume the stream first
2213
+ * for await (const text of response.textStream()) {
2214
+ * process.stdout.write(text);
2215
+ * }
2216
+ *
2217
+ * // Then execute tools
2218
+ * if (response.toolCalls.length > 0) {
2219
+ * const outputs = await response.executeTools();
2220
+ * const followUp = await response.resume(outputs);
2221
+ * }
2222
+ * ```
2223
+ */
2224
+ executeTools(): Promise<ToolOutput<Jsonable>[]>;
2225
+ /**
2226
+ * Generate a new StreamResponse using this response's messages with additional user content.
2227
+ *
2228
+ * Uses this response's tools and format type. Also uses this response's provider,
2229
+ * model, and params.
2230
+ *
2231
+ * Note: The stream must be consumed before calling resume() to ensure
2232
+ * the assistant message is complete.
2233
+ *
2234
+ * @param content - The new user message content to append to the message history.
2235
+ * @returns A new StreamResponse instance generated from the extended message history.
2236
+ *
2237
+ * @example
2238
+ * ```typescript
2239
+ * const response = await model.stream('Hello!');
2240
+ *
2241
+ * // Consume the stream first
2242
+ * for await (const text of response.textStream()) {
2243
+ * process.stdout.write(text);
2244
+ * }
2245
+ *
2246
+ * // Then resume with streaming
2247
+ * const followUp = await response.resume('Tell me more about that');
2248
+ * for await (const text of followUp.textStream()) {
2249
+ * process.stdout.write(text);
2250
+ * }
2251
+ * ```
2252
+ */
2253
+ resume(content: UserContent): Promise<StreamResponse<F>>;
2254
+ }
2255
+
2256
+ /**
2257
+ * ContextStreamResponse class for context-based streaming LLM calls.
2258
+ *
2259
+ * Extends BaseStreamResponse with context-aware functionality including:
2260
+ * - executeTools(): Execute tool calls with context dependency injection
2261
+ * - resume(): Continue the conversation with additional user content
2262
+ */
2263
+
2264
+ /**
2265
+ * Arguments for constructing a ContextStreamResponse.
2266
+ *
2267
+ * Accepts `tools` as either a ContextToolkit or a list of tools, which gets
2268
+ * converted to a ContextToolkit before passing to BaseStreamResponse.
2269
+ *
2270
+ * Supports both regular tools (BaseTool) and context tools (BaseContextTool),
2271
+ * matching Python's `ContextTools[DepsT]` pattern.
2272
+ *
2273
+ * @template DepsT - The type of dependencies in the context.
2274
+ */
2275
+ interface ContextStreamResponseInit<DepsT = unknown> extends Omit<BaseStreamResponseInit, "toolkit"> {
2276
+ /**
2277
+ * The tools available for this response.
2278
+ * Can be a ContextToolkit instance or an array of tools.
2279
+ * Accepts both regular tools and context tools.
2280
+ */
2281
+ tools?: ContextTools<DepsT> | ContextToolkit<DepsT>;
2282
+ }
2283
+ /**
2284
+ * A streaming response from a context-based LLM call.
2285
+ *
2286
+ * This class provides context-aware functionality on top of the standard streaming response:
2287
+ * - `executeTools()`: Execute all tool calls with context dependency injection
2288
+ * - `resume()`: Continue the conversation with additional user content
2289
+ *
2290
+ * @template DepsT - The type of dependencies in the context.
2291
+ * @template F - The type of the formatted output when using structured outputs.
2292
+ *
2293
+ * @example
2294
+ * ```typescript
2295
+ * interface MyDeps { userId: string; }
2296
+ *
2297
+ * const ctx = createContext<MyDeps>({ userId: '123' });
2298
+ * const response = await myPrompt.stream(model, ctx);
2299
+ *
2300
+ * for await (const text of response.textStream()) {
2301
+ * process.stdout.write(text);
2302
+ * }
2303
+ *
2304
+ * // Continue the conversation
2305
+ * const followUp = await response.resume(ctx, 'Tell me more');
2306
+ * ```
2307
+ */
2308
+ declare class ContextStreamResponse<DepsT = unknown, F = unknown> extends BaseStreamResponse<F> {
2309
+ /**
2310
+ * The context toolkit containing tools that can receive context.
2311
+ */
2312
+ readonly contextToolkit: ContextToolkit<DepsT>;
2313
+ constructor(args: ContextStreamResponseInit<DepsT>);
2314
+ /**
2315
+ * Execute all tool calls in this response using the registered context tools.
2316
+ *
2317
+ * Note: The stream must be consumed before calling executeTools() to ensure
2318
+ * all tool calls have been received.
2319
+ *
2320
+ * @param ctx - The context containing dependencies to pass to tools.
2321
+ * @returns An array of ToolOutput objects, one for each tool call.
2322
+ *
2323
+ * @example
2324
+ * ```typescript
2325
+ * const response = await myPrompt.stream(model, ctx, [searchTool]);
2326
+ *
2327
+ * // Consume the stream first
2328
+ * for await (const text of response.textStream()) {
2329
+ * process.stdout.write(text);
2330
+ * }
2331
+ *
2332
+ * // Then execute tools
2333
+ * if (response.toolCalls.length > 0) {
2334
+ * const outputs = await response.executeTools(ctx);
2335
+ * const followUp = await response.resume(ctx, outputs);
2336
+ * }
2337
+ * ```
2338
+ */
2339
+ executeTools(ctx: Context<DepsT>): Promise<ToolOutput<Jsonable>[]>;
2340
+ /**
2341
+ * Generate a new ContextStreamResponse using this response's messages with additional user content.
2342
+ *
2343
+ * Uses this response's tools and format type. Also uses this response's provider,
2344
+ * model, and params.
2345
+ *
2346
+ * Note: The stream must be consumed before calling resume() to ensure
2347
+ * the assistant message is complete.
2348
+ *
2349
+ * @param ctx - A Context with the required deps type.
2350
+ * @param content - The new user message content to append to the message history.
2351
+ * @returns A new ContextStreamResponse instance generated from the extended message history.
2352
+ *
2353
+ * @example
2354
+ * ```typescript
2355
+ * const response = await myPrompt.stream(model, ctx);
2356
+ *
2357
+ * // Consume the stream first
2358
+ * for await (const text of response.textStream()) {
2359
+ * process.stdout.write(text);
2360
+ * }
2361
+ *
2362
+ * // Then resume with streaming
2363
+ * const followUp = await response.resume(ctx, 'Tell me more about that');
2364
+ * for await (const text of followUp.textStream()) {
2365
+ * process.stdout.write(text);
2366
+ * }
2367
+ * ```
2368
+ */
2369
+ resume(ctx: Context<DepsT>, content: UserContent): Promise<ContextStreamResponse<DepsT, F>>;
2370
+ }
2371
+
2372
+ /**
2373
+ * OpenAI model information.
2374
+ *
2375
+ * This file is auto-generated by typescript/scripts/codegen/openai.ts
2376
+ * Do not edit manually - run `bun run codegen` to update.
2377
+ */
2378
+ /**
2379
+ * Array of all known OpenAI model IDs.
2380
+ * This is the source of truth - the type and Set are derived from it.
2381
+ */
2382
+ declare const OPENAI_KNOWN_MODELS_ARRAY: readonly ["openai/chatgpt-4o-latest", "openai/chatgpt-4o-latest:completions", "openai/chatgpt-4o-latest:responses", "openai/codex-mini-latest", "openai/codex-mini-latest:responses", "openai/gpt-3.5-turbo", "openai/gpt-3.5-turbo:completions", "openai/gpt-3.5-turbo:responses", "openai/gpt-3.5-turbo-0125", "openai/gpt-3.5-turbo-0125:completions", "openai/gpt-3.5-turbo-0125:responses", "openai/gpt-3.5-turbo-1106", "openai/gpt-3.5-turbo-1106:completions", "openai/gpt-3.5-turbo-1106:responses", "openai/gpt-3.5-turbo-16k", "openai/gpt-3.5-turbo-16k:completions", "openai/gpt-4", "openai/gpt-4:completions", "openai/gpt-4:responses", "openai/gpt-4-0125-preview", "openai/gpt-4-0125-preview:completions", "openai/gpt-4-0125-preview:responses", "openai/gpt-4-0613", "openai/gpt-4-0613:completions", "openai/gpt-4-0613:responses", "openai/gpt-4-1106-preview", "openai/gpt-4-1106-preview:completions", "openai/gpt-4-1106-preview:responses", "openai/gpt-4-turbo", "openai/gpt-4-turbo:completions", "openai/gpt-4-turbo:responses", "openai/gpt-4-turbo-2024-04-09", "openai/gpt-4-turbo-2024-04-09:completions", "openai/gpt-4-turbo-2024-04-09:responses", "openai/gpt-4-turbo-preview", "openai/gpt-4-turbo-preview:completions", "openai/gpt-4-turbo-preview:responses", "openai/gpt-4.1", "openai/gpt-4.1:completions", "openai/gpt-4.1:responses", "openai/gpt-4.1-2025-04-14", "openai/gpt-4.1-2025-04-14:completions", "openai/gpt-4.1-2025-04-14:responses", "openai/gpt-4.1-mini", "openai/gpt-4.1-mini:completions", "openai/gpt-4.1-mini:responses", "openai/gpt-4.1-mini-2025-04-14", "openai/gpt-4.1-mini-2025-04-14:completions", "openai/gpt-4.1-mini-2025-04-14:responses", "openai/gpt-4.1-nano", "openai/gpt-4.1-nano:completions", "openai/gpt-4.1-nano:responses", "openai/gpt-4.1-nano-2025-04-14", "openai/gpt-4.1-nano-2025-04-14:completions", "openai/gpt-4.1-nano-2025-04-14:responses", "openai/gpt-4o", "openai/gpt-4o:completions", "openai/gpt-4o:responses", "openai/gpt-4o-2024-05-13", "openai/gpt-4o-2024-05-13:completions", "openai/gpt-4o-2024-05-13:responses", "openai/gpt-4o-2024-08-06", "openai/gpt-4o-2024-08-06:completions", "openai/gpt-4o-2024-08-06:responses", "openai/gpt-4o-2024-11-20", "openai/gpt-4o-2024-11-20:completions", "openai/gpt-4o-2024-11-20:responses", "openai/gpt-4o-mini", "openai/gpt-4o-mini:completions", "openai/gpt-4o-mini:responses", "openai/gpt-4o-mini-2024-07-18", "openai/gpt-4o-mini-2024-07-18:completions", "openai/gpt-4o-mini-2024-07-18:responses", "openai/gpt-4o-mini-search-preview", "openai/gpt-4o-mini-search-preview:completions", "openai/gpt-4o-mini-search-preview-2025-03-11", "openai/gpt-4o-mini-search-preview-2025-03-11:completions", "openai/gpt-4o-search-preview", "openai/gpt-4o-search-preview:completions", "openai/gpt-4o-search-preview-2025-03-11", "openai/gpt-4o-search-preview-2025-03-11:completions", "openai/gpt-5", "openai/gpt-5:completions", "openai/gpt-5:responses", "openai/gpt-5-2025-08-07", "openai/gpt-5-2025-08-07:completions", "openai/gpt-5-2025-08-07:responses", "openai/gpt-5-chat-latest", "openai/gpt-5-chat-latest:completions", "openai/gpt-5-chat-latest:responses", "openai/gpt-5-codex", "openai/gpt-5-codex:responses", "openai/gpt-5-mini", "openai/gpt-5-mini:completions", "openai/gpt-5-mini:responses", "openai/gpt-5-mini-2025-08-07", "openai/gpt-5-mini-2025-08-07:completions", "openai/gpt-5-mini-2025-08-07:responses", "openai/gpt-5-nano", "openai/gpt-5-nano:completions", "openai/gpt-5-nano:responses", "openai/gpt-5-nano-2025-08-07", "openai/gpt-5-nano-2025-08-07:completions", "openai/gpt-5-nano-2025-08-07:responses", "openai/gpt-5-pro", "openai/gpt-5-pro:responses", "openai/gpt-5-pro-2025-10-06", "openai/gpt-5-pro-2025-10-06:responses", "openai/gpt-5-search-api", "openai/gpt-5-search-api:completions", "openai/gpt-5-search-api-2025-10-14", "openai/gpt-5-search-api-2025-10-14:completions", "openai/gpt-5.1", "openai/gpt-5.1:responses", "openai/gpt-5.1-2025-11-13", "openai/gpt-5.1-2025-11-13:responses", "openai/gpt-5.1-chat-latest", "openai/gpt-5.1-chat-latest:completions", "openai/gpt-5.1-chat-latest:responses", "openai/gpt-5.1-codex", "openai/gpt-5.1-codex:responses", "openai/gpt-5.1-codex-max", "openai/gpt-5.1-codex-max:responses", "openai/gpt-5.1-codex-mini", "openai/gpt-5.1-codex-mini:responses", "openai/gpt-5.2", "openai/gpt-5.2:completions", "openai/gpt-5.2:responses", "openai/gpt-5.2-2025-12-11", "openai/gpt-5.2-2025-12-11:completions", "openai/gpt-5.2-2025-12-11:responses", "openai/gpt-5.2-chat-latest", "openai/gpt-5.2-chat-latest:completions", "openai/gpt-5.2-chat-latest:responses", "openai/gpt-5.2-pro", "openai/gpt-5.2-pro:responses", "openai/gpt-5.2-pro-2025-12-11", "openai/gpt-5.2-pro-2025-12-11:responses", "openai/o1", "openai/o1:completions", "openai/o1:responses", "openai/o1-2024-12-17", "openai/o1-2024-12-17:completions", "openai/o1-2024-12-17:responses", "openai/o1-pro", "openai/o1-pro:responses", "openai/o1-pro-2025-03-19", "openai/o1-pro-2025-03-19:responses", "openai/o3", "openai/o3:completions", "openai/o3:responses", "openai/o3-2025-04-16", "openai/o3-2025-04-16:completions", "openai/o3-2025-04-16:responses", "openai/o3-mini", "openai/o3-mini:completions", "openai/o3-mini:responses", "openai/o3-mini-2025-01-31", "openai/o3-mini-2025-01-31:completions", "openai/o3-mini-2025-01-31:responses", "openai/o3-pro", "openai/o3-pro:responses", "openai/o3-pro-2025-06-10", "openai/o3-pro-2025-06-10:responses", "openai/o4-mini", "openai/o4-mini:completions", "openai/o4-mini:responses", "openai/o4-mini-2025-04-16", "openai/o4-mini-2025-04-16:completions", "openai/o4-mini-2025-04-16:responses"];
2383
+ /**
2384
+ * Valid OpenAI model IDs including API-specific variants.
2385
+ */
2386
+ type OpenAIKnownModels = (typeof OPENAI_KNOWN_MODELS_ARRAY)[number];
2387
+
2388
+ /**
2389
+ * OpenAI model IDs and related utilities.
2390
+ */
2391
+
2392
+ /**
2393
+ * The OpenAI model IDs registered with Mirascope.
2394
+ */
2395
+ type OpenAIModelId = OpenAIKnownModels | (string & {});
2396
+ /**
2397
+ * API mode for OpenAI requests.
2398
+ */
2399
+ type ApiMode = "responses" | "completions";
2400
+
2401
+ /**
2402
+ * Union of all provider model IDs.
2403
+ */
2404
+
2405
+ /**
2406
+ * Model identifier for any supported provider.
2407
+ *
2408
+ * This is a union of all provider-specific model IDs.
2409
+ */
2410
+ type ModelId = AnthropicModelId | GoogleModelId | OpenAIModelId | (string & {});
2411
+
2412
+ /**
2413
+ * Identifiers for all registered providers.
2414
+ */
2415
+ /**
2416
+ * Array of known provider IDs for runtime checks.
2417
+ */
2418
+ declare const KNOWN_PROVIDER_IDS: readonly ["anthropic", "google", "mirascope", "ollama", "openai", "together"];
2419
+ /**
2420
+ * Known provider identifiers.
2421
+ */
2422
+ type KnownProviderId = (typeof KNOWN_PROVIDER_IDS)[number];
2423
+ /**
2424
+ * Provider identifier.
2425
+ *
2426
+ * Can be a known provider or any custom string for extensibility.
2427
+ */
2428
+ type ProviderId = KnownProviderId | (string & {});
2429
+
2430
+ /**
2431
+ * The Model class - unified interface for LLM calls.
2432
+ */
2433
+
2434
+ /**
2435
+ * The unified LLM interface that delegates to provider-specific clients.
2436
+ *
2437
+ * This class provides a consistent interface for interacting with language models
2438
+ * from various providers. It handles the common operations like generating responses
2439
+ * by delegating to the appropriate provider methods.
2440
+ *
2441
+ * @example
2442
+ * ```typescript
2443
+ * import { Model } from 'mirascope/llm';
2444
+ *
2445
+ * const model = new Model('anthropic/claude-sonnet-4-20250514');
2446
+ * const response = await model.call('Hello!');
2447
+ * console.log(response.text());
2448
+ * ```
2449
+ *
2450
+ * @example With parameters
2451
+ * ```typescript
2452
+ * const model = new Model('anthropic/claude-sonnet-4-20250514', {
2453
+ * temperature: 0.7,
2454
+ * maxTokens: 1000,
2455
+ * });
2456
+ * const response = await model.call('Write a haiku about coding.');
2457
+ * ```
2458
+ */
2459
+ declare class Model {
2460
+ /**
2461
+ * The model ID being used (e.g., "anthropic/claude-sonnet-4-20250514").
2462
+ */
2463
+ readonly modelId: ModelId;
2464
+ /**
2465
+ * The default parameters for the model (temperature, maxTokens, etc.).
2466
+ */
2467
+ readonly params: Params;
2468
+ /**
2469
+ * Initialize the Model with a model ID and optional parameters.
2470
+ *
2471
+ * @param modelId - The model ID in "provider/model-name" format.
2472
+ * @param params - Optional parameters for the model.
2473
+ * @throws Error if the model ID format is invalid.
2474
+ */
2475
+ constructor(modelId: ModelId, params?: Params);
2476
+ /**
2477
+ * The provider being used (e.g., an AnthropicProvider).
2478
+ *
2479
+ * This property dynamically looks up the provider from the registry based on
2480
+ * the current modelId. This allows provider overrides via registerProvider()
2481
+ * to take effect even after the model instance is created.
2482
+ *
2483
+ * @throws NoRegisteredProviderError if no provider is available for the modelId.
2484
+ */
2485
+ get provider(): BaseProvider;
2486
+ /**
2487
+ * The string ID of the provider being used (e.g., "anthropic").
2488
+ *
2489
+ * @throws NoRegisteredProviderError if no provider is available for the modelId.
2490
+ */
2491
+ get providerId(): ProviderId;
2492
+ /**
2493
+ * Generate a Response by calling this model's LLM provider.
2494
+ *
2495
+ * @param content - Content to send to the LLM. Can be a string (converted to user
2496
+ * message), UserContent, a sequence of UserContent, or a sequence of Messages
2497
+ * for full control.
2498
+ * @param options - Optional configuration for the call.
2499
+ * @param options.tools - Optional tools to make available to the model.
2500
+ * @param options.format - Optional format for structured output.
2501
+ * @returns A Response object containing the LLM-generated content.
2502
+ *
2503
+ * @example Simple string input
2504
+ * ```typescript
2505
+ * const response = await model.call('What is the capital of France?');
2506
+ * console.log(response.text());
2507
+ * ```
2508
+ *
2509
+ * @example With message array
2510
+ * ```typescript
2511
+ * import { system, user } from 'mirascope/llm/messages';
2512
+ *
2513
+ * const response = await model.call([
2514
+ * system('You are a helpful assistant.'),
2515
+ * user('What is the capital of France?'),
2516
+ * ]);
2517
+ * ```
2518
+ *
2519
+ * @example With structured output format
2520
+ * ```typescript
2521
+ * import { defineFormat } from 'mirascope/llm';
2522
+ *
2523
+ * interface Book { title: string; author: string; }
2524
+ * const bookFormat = defineFormat<Book>({ mode: 'tool' });
2525
+ *
2526
+ * const response = await model.call('Recommend a book', { format: bookFormat });
2527
+ * const book = response.parse<Book>();
2528
+ * ```
2529
+ */
2530
+ call(content: UserContent | readonly Message[], options?: {
2531
+ tools?: Tools;
2532
+ format?: Format | null;
2533
+ }): Promise<Response>;
2534
+ /**
2535
+ * Generate a streaming Response by calling this model's LLM provider.
2536
+ *
2537
+ * @param content - Content to send to the LLM. Can be a string (converted to user
2538
+ * message), UserContent, a sequence of UserContent, or a sequence of Messages
2539
+ * for full control.
2540
+ * @param options - Optional configuration for the stream.
2541
+ * @param options.tools - Optional tools to make available to the model.
2542
+ * @param options.format - Optional format for structured output.
2543
+ * @returns A StreamResponse object for consuming the streamed content.
2544
+ *
2545
+ * @example Simple string input
2546
+ * ```typescript
2547
+ * const response = await model.stream('What is the capital of France?');
2548
+ * for await (const text of response.textStream()) {
2549
+ * process.stdout.write(text);
2550
+ * }
2551
+ * ```
2552
+ *
2553
+ * @example With message array
2554
+ * ```typescript
2555
+ * import { system, user } from 'mirascope/llm/messages';
2556
+ *
2557
+ * const response = await model.stream([
2558
+ * system('You are a helpful assistant.'),
2559
+ * user('What is the capital of France?'),
2560
+ * ]);
2561
+ *
2562
+ * for await (const text of response.textStream()) {
2563
+ * process.stdout.write(text);
2564
+ * }
2565
+ *
2566
+ * // After consuming the stream, get the full text
2567
+ * console.log(response.text());
2568
+ * ```
2569
+ *
2570
+ * @example With structured streaming
2571
+ * ```typescript
2572
+ * import { defineFormat } from 'mirascope/llm';
2573
+ *
2574
+ * interface Book { title: string; author: string; }
2575
+ * const bookFormat = defineFormat<Book>({ mode: 'tool' });
2576
+ *
2577
+ * const response = await model.stream('Recommend a book', { format: bookFormat });
2578
+ * for await (const partial of response.structuredStream<Book>()) {
2579
+ * console.log('Partial:', partial);
2580
+ * }
2581
+ * const book = response.parse<Book>();
2582
+ * ```
2583
+ */
2584
+ stream(content: UserContent | readonly Message[], options?: {
2585
+ tools?: Tools;
2586
+ format?: Format | null;
2587
+ }): Promise<StreamResponse>;
2588
+ /**
2589
+ * Generate a ContextResponse by calling this model's LLM provider with context.
2590
+ *
2591
+ * This method accepts a context for dependency injection, enabling context-aware
2592
+ * tools and prompts.
2593
+ *
2594
+ * @template DepsT - The type of dependencies in the context.
2595
+ * @param ctx - The context containing dependencies for tools.
2596
+ * @param content - Content to send to the LLM.
2597
+ * @param options - Optional configuration for the call.
2598
+ * @param options.tools - Optional tools to make available to the model.
2599
+ * @param options.format - Optional format for structured output.
2600
+ * @returns A ContextResponse object containing the LLM-generated content.
2601
+ *
2602
+ * @example
2603
+ * ```typescript
2604
+ * interface MyDeps { userId: string; }
2605
+ *
2606
+ * const ctx = createContext<MyDeps>({ userId: '123' });
2607
+ * const response = await model.contextCall(ctx, 'Hello!');
2608
+ * console.log(response.text());
2609
+ * ```
2610
+ */
2611
+ contextCall<DepsT>(ctx: Context<DepsT>, content: UserContent | readonly Message[], options?: {
2612
+ tools?: ContextTools<DepsT>;
2613
+ format?: Format | null;
2614
+ }): Promise<ContextResponse<DepsT>>;
2615
+ /**
2616
+ * Generate a streaming ContextStreamResponse by calling this model's LLM provider with context.
2617
+ *
2618
+ * This method accepts a context for dependency injection, enabling context-aware
2619
+ * tools and prompts.
2620
+ *
2621
+ * @template DepsT - The type of dependencies in the context.
2622
+ * @param ctx - The context containing dependencies for tools.
2623
+ * @param content - Content to send to the LLM.
2624
+ * @param options - Optional configuration for the stream.
2625
+ * @param options.tools - Optional tools to make available to the model.
2626
+ * @param options.format - Optional format for structured output.
2627
+ * @returns A ContextStreamResponse object for consuming the streamed content.
2628
+ *
2629
+ * @example
2630
+ * ```typescript
2631
+ * interface MyDeps { userId: string; }
2632
+ *
2633
+ * const ctx = createContext<MyDeps>({ userId: '123' });
2634
+ * const response = await model.contextStream(ctx, 'Hello!');
2635
+ * for await (const text of response.textStream()) {
2636
+ * process.stdout.write(text);
2637
+ * }
2638
+ * ```
2639
+ */
2640
+ contextStream<DepsT>(ctx: Context<DepsT>, content: UserContent | readonly Message[], options?: {
2641
+ tools?: ContextTools<DepsT>;
2642
+ format?: Format | null;
2643
+ }): Promise<ContextStreamResponse<DepsT>>;
2644
+ /**
2645
+ * Generate a new Response by extending a previous response's messages with additional user content.
2646
+ *
2647
+ * Uses the previous response's tools and output format, and this model's params.
2648
+ *
2649
+ * @param response - Previous response to extend.
2650
+ * @param content - Additional user content to append.
2651
+ * @returns A new Response object containing the extended conversation.
2652
+ *
2653
+ * @example
2654
+ * ```typescript
2655
+ * const response = await model.call('Hello!');
2656
+ * const followUp = await model.resume(response, 'Tell me more');
2657
+ * console.log(followUp.text());
2658
+ * ```
2659
+ */
2660
+ resume(response: RootResponse, content: UserContent): Promise<Response>;
2661
+ /**
2662
+ * Generate a new StreamResponse by extending a previous response's messages with additional user content.
2663
+ *
2664
+ * Uses the previous response's tools and output format, and this model's params.
2665
+ *
2666
+ * @param response - Previous response to extend.
2667
+ * @param content - Additional user content to append.
2668
+ * @returns A new StreamResponse object for consuming the streamed content.
2669
+ *
2670
+ * @example
2671
+ * ```typescript
2672
+ * const response = await model.call('Hello!');
2673
+ * const followUp = await model.resumeStream(response, 'Tell me more');
2674
+ * for await (const text of followUp.textStream()) {
2675
+ * process.stdout.write(text);
2676
+ * }
2677
+ * ```
2678
+ */
2679
+ resumeStream(response: RootResponse, content: UserContent): Promise<StreamResponse>;
2680
+ /**
2681
+ * Generate a new ContextResponse by extending a previous response's messages with additional user content.
2682
+ *
2683
+ * Uses the previous response's tools and output format, and this model's params.
2684
+ *
2685
+ * @template DepsT - The type of dependencies in the context.
2686
+ * @param ctx - The context containing dependencies for tools.
2687
+ * @param response - Previous response to extend.
2688
+ * @param content - Additional user content to append.
2689
+ * @returns A new ContextResponse object containing the extended conversation.
2690
+ *
2691
+ * @example
2692
+ * ```typescript
2693
+ * const response = await model.contextCall(ctx, 'Hello!');
2694
+ * const followUp = await model.contextResume(ctx, response, 'Tell me more');
2695
+ * console.log(followUp.text());
2696
+ * ```
2697
+ */
2698
+ contextResume<DepsT>(ctx: Context<DepsT>, response: RootResponse, content: UserContent): Promise<ContextResponse<DepsT>>;
2699
+ /**
2700
+ * Generate a new ContextStreamResponse by extending a previous response's messages with additional user content.
2701
+ *
2702
+ * Uses the previous response's tools and output format, and this model's params.
2703
+ *
2704
+ * @template DepsT - The type of dependencies in the context.
2705
+ * @param ctx - The context containing dependencies for tools.
2706
+ * @param response - Previous response to extend.
2707
+ * @param content - Additional user content to append.
2708
+ * @returns A new ContextStreamResponse object for consuming the streamed content.
2709
+ *
2710
+ * @example
2711
+ * ```typescript
2712
+ * const response = await model.contextStream(ctx, 'Hello!');
2713
+ * await response.consume();
2714
+ * const followUp = await model.contextResumeStream(ctx, response, 'Tell me more');
2715
+ * for await (const text of followUp.textStream()) {
2716
+ * process.stdout.write(text);
2717
+ * }
2718
+ * ```
2719
+ */
2720
+ contextResumeStream<DepsT>(ctx: Context<DepsT>, response: RootResponse, content: UserContent): Promise<ContextStreamResponse<DepsT>>;
2721
+ }
2722
+ /**
2723
+ * Helper for creating a Model instance.
2724
+ *
2725
+ * This is just an alias for the Model constructor, added for convenience.
2726
+ *
2727
+ * @param modelId - A model ID string (e.g., "anthropic/claude-sonnet-4-20250514").
2728
+ * @param params - Optional parameters to configure the model.
2729
+ * @returns A Model instance.
2730
+ *
2731
+ * @example
2732
+ * ```typescript
2733
+ * import { model } from 'mirascope/llm';
2734
+ *
2735
+ * const m = model('anthropic/claude-sonnet-4-20250514');
2736
+ * const response = await m.call('Hello!');
2737
+ * ```
2738
+ */
2739
+ declare function model(modelId: ModelId, params?: Params): Model;
2740
+
2741
+ /**
2742
+ * Model context management for runtime model switching.
2743
+ *
2744
+ * This module provides a way to set a model context that overrides
2745
+ * the default model in `defineCall` and `definePrompt`, similar to
2746
+ * Python's `with llm.model(...):` context manager.
2747
+ *
2748
+ * ## Cross-Platform Implementation
2749
+ *
2750
+ * - **Node.js**: Uses native `AsyncLocalStorage` from `async_hooks` for proper
2751
+ * async context propagation. This handles concurrent async operations correctly.
2752
+ *
2753
+ * - **Browser**: Uses a stack-based fallback. This works correctly for:
2754
+ * - Synchronous code
2755
+ * - Sequential async/await chains
2756
+ * - Nested `withModel` calls (as long as they are sequential)
2757
+ *
2758
+ * ## Browser Limitations
2759
+ *
2760
+ * The browser fallback has a known limitation with **concurrent** async operations:
2761
+ *
2762
+ * ```typescript
2763
+ * // WARNING: In browsers, this may have unexpected behavior:
2764
+ * await Promise.all([
2765
+ * llm.withModel(modelA, async () => { await call(); }), // May see modelB!
2766
+ * llm.withModel(modelB, async () => { await call(); }), // May see modelA!
2767
+ * ]);
2768
+ * ```
2769
+ *
2770
+ * This is because the browser lacks a native async context mechanism. The stack
2771
+ * can become interleaved when multiple async operations run concurrently.
2772
+ *
2773
+ * ## Future: TC39 AsyncContext Proposal
2774
+ *
2775
+ * The TC39 "Async Context" proposal (https://github.com/tc39/proposal-async-context)
2776
+ * aims to bring `AsyncLocalStorage`-like functionality to all JavaScript environments.
2777
+ * Once this proposal is standardized and widely supported, we can update the browser
2778
+ * implementation to use native async context, eliminating the concurrent operation
2779
+ * limitation.
2780
+ *
2781
+ * @see https://github.com/tc39/proposal-async-context
2782
+ *
2783
+ * @example
2784
+ * ```typescript
2785
+ * const call = llm.defineCall({
2786
+ * model: llm.model("openai/gpt-4o"),
2787
+ * template: () => "Hello"
2788
+ * });
2789
+ *
2790
+ * // Without context - uses default model
2791
+ * await call();
2792
+ *
2793
+ * // With context - overrides default
2794
+ * await llm.withModel(llm.model("anthropic/claude-sonnet-4-0"), async () => {
2795
+ * await call(); // Uses Claude
2796
+ * });
2797
+ * ```
2798
+ */
2799
+
2800
+ /**
2801
+ * Get the model currently set via context, if any.
2802
+ *
2803
+ * @returns The current context model, or undefined if none is set.
2804
+ *
2805
+ * @example
2806
+ * ```typescript
2807
+ * await llm.withModel(llm.model("anthropic/claude-sonnet-4-0"), async () => {
2808
+ * const model = llm.modelFromContext();
2809
+ * console.log(model?.modelId); // "anthropic/claude-sonnet-4-0"
2810
+ * });
2811
+ *
2812
+ * const outsideModel = llm.modelFromContext(); // undefined
2813
+ * ```
2814
+ */
2815
+ declare function modelFromContext(): Model | undefined;
2816
+ /**
2817
+ * Execute a function with a model set in context.
2818
+ *
2819
+ * All calls to `defineCall` and `definePrompt` within the callback
2820
+ * will use the context model instead of their default model.
2821
+ *
2822
+ * @param model - The model to set in context (Model instance or model ID string).
2823
+ * @param paramsOrFn - Either params object (if using string model ID) or the function to execute.
2824
+ * @param fn - The function to execute (if params were provided).
2825
+ * @returns The return value of the function.
2826
+ *
2827
+ * @example With Model instance
2828
+ * ```typescript
2829
+ * const response = await llm.withModel(llm.model("openai/gpt-4o"), async () => {
2830
+ * return await call();
2831
+ * });
2832
+ * ```
2833
+ *
2834
+ * @example With model ID string
2835
+ * ```typescript
2836
+ * const response = await llm.withModel("openai/gpt-4o", async () => {
2837
+ * return await call();
2838
+ * });
2839
+ * ```
2840
+ *
2841
+ * @example With model ID and params
2842
+ * ```typescript
2843
+ * const response = await llm.withModel("anthropic/claude-sonnet-4-0", { temperature: 0.9 }, async () => {
2844
+ * return await call();
2845
+ * });
2846
+ * ```
2847
+ *
2848
+ * @example Nested contexts
2849
+ * ```typescript
2850
+ * await llm.withModel("anthropic/claude-sonnet-4-0", async () => {
2851
+ * const model1 = llm.modelFromContext(); // Claude
2852
+ *
2853
+ * await llm.withModel("openai/gpt-4o", async () => {
2854
+ * const model2 = llm.modelFromContext(); // GPT-4o
2855
+ * });
2856
+ *
2857
+ * const model3 = llm.modelFromContext(); // Claude (restored)
2858
+ * });
2859
+ * ```
2860
+ */
2861
+ declare function withModel<T>(model: Model, fn: () => T): T;
2862
+ declare function withModel<T>(modelId: ModelId, fn: () => T): T;
2863
+ declare function withModel<T>(modelId: ModelId, params: Params, fn: () => T): T;
2864
+ /**
2865
+ * Get the model from context if available, otherwise use the provided model.
2866
+ *
2867
+ * This function implements the fallback pattern:
2868
+ * 1. If a model is set in context, return it (context takes precedence)
2869
+ * 2. Otherwise, if a Model instance is provided, return it
2870
+ * 3. Otherwise, if a string model ID is provided, create a new Model
2871
+ *
2872
+ * @param modelOrId - A Model instance or model ID string.
2873
+ * @param params - Optional parameters when creating a new Model from string ID.
2874
+ * @returns The resolved Model instance.
2875
+ *
2876
+ * @example
2877
+ * ```typescript
2878
+ * // Outside context - returns the provided model
2879
+ * const model1 = llm.useModel("openai/gpt-4o"); // Creates new Model
2880
+ *
2881
+ * // Inside context - returns context model
2882
+ * await llm.withModel(llm.model("anthropic/claude-sonnet-4-0"), async () => {
2883
+ * const model2 = llm.useModel("openai/gpt-4o"); // Returns Claude model
2884
+ * });
2885
+ * ```
2886
+ */
2887
+ declare function useModel(model: Model | ModelId, params?: Params): Model;
2888
+
2889
+ /**
2890
+ * Base interface for all LLM responses.
2891
+ */
2892
+
2893
+ /**
2894
+ * Type alias for RootResponse with any format type.
2895
+ * Useful for functions that accept any response type.
2896
+ */
2897
+ type AnyResponse = RootResponse<unknown>;
2898
+ /**
2899
+ * Base class for LLM responses.
2900
+ *
2901
+ * This abstract class defines the core interface that all response types must implement.
2902
+ * It provides the common properties and methods shared across all response variants.
2903
+ *
2904
+ * @template F - The type of the formatted output when using structured outputs.
2905
+ */
2906
+ declare abstract class RootResponse<F = unknown> {
2907
+ /**
2908
+ * The raw response from the LLM.
2909
+ */
2910
+ abstract readonly raw: unknown;
2911
+ /**
2912
+ * The provider that generated this response.
2913
+ */
2914
+ abstract readonly providerId: ProviderId;
2915
+ /**
2916
+ * The model ID that generated this response.
2917
+ */
2918
+ abstract readonly modelId: ModelId;
2919
+ /**
2920
+ * Provider-specific model name (may include additional info like API mode).
2921
+ */
2922
+ abstract readonly providerModelName: string;
2923
+ /**
2924
+ * The parameters used to generate this response.
2925
+ */
2926
+ abstract readonly params: Params;
2927
+ /**
2928
+ * The message history, including the most recent assistant message.
2929
+ */
2930
+ abstract readonly messages: readonly Message[];
2931
+ /**
2932
+ * The content generated by the LLM.
2933
+ */
2934
+ abstract readonly content: readonly AssistantContentPart[];
2935
+ /**
2936
+ * The text content in the generated response, if any.
2937
+ */
2938
+ abstract readonly texts: readonly Text[];
2939
+ /**
2940
+ * The tools the LLM wants called on its behalf, if any.
2941
+ */
2942
+ abstract readonly toolCalls: readonly ToolCall[];
2943
+ /**
2944
+ * The readable thoughts from the model's thinking process, if any.
2945
+ *
2946
+ * The thoughts may be direct output from the model thinking process, or may be a
2947
+ * generated summary. (This depends on the provider; newer models tend to summarize.)
2948
+ */
2949
+ abstract readonly thoughts: readonly Thought[];
2950
+ /**
2951
+ * The reason why the LLM finished generating a response, if set.
2952
+ *
2953
+ * `finishReason` is only set if the response did not finish generating normally,
2954
+ * e.g. `FinishReason.MAX_TOKENS` if the model ran out of tokens before completing.
2955
+ * When the response generates normally, `response.finishReason` will be `null`.
2956
+ */
2957
+ abstract readonly finishReason: FinishReason | null;
2958
+ /**
2959
+ * Token usage statistics for this response, if available.
2960
+ */
2961
+ abstract readonly usage: Usage | null;
2962
+ /**
2963
+ * The Format describing the structured response format, if available.
2964
+ *
2965
+ * When a format is specified in the call, this contains the resolved Format
2966
+ * with schema and validator information used for parsing.
2967
+ */
2968
+ abstract readonly format: Format | null;
2969
+ /**
2970
+ * Return all text content from this response as a single string.
2971
+ *
2972
+ * Joins the text from all `Text` parts in the response content using the
2973
+ * specified separator.
2974
+ *
2975
+ * @param sep - The separator to use when joining multiple text parts.
2976
+ * Defaults to newline ("\n").
2977
+ * @returns A string containing all text content joined by the separator.
2978
+ * Returns an empty string if the response contains no text parts.
2979
+ *
2980
+ * @example
2981
+ * ```typescript
2982
+ * response.text() // Join with newlines (default): "Hello\nWorld"
2983
+ * response.text(" ") // Join with spaces: "Hello World"
2984
+ * response.text("") // Concatenate directly: "HelloWorld"
2985
+ * ```
2986
+ */
2987
+ text(sep?: string): string;
2988
+ /**
2989
+ * Return a string representation of all response content.
2990
+ *
2991
+ * The response content will be represented in a way that emphasizes clarity and
2992
+ * readability, but may not include all metadata (like thinking signatures or tool
2993
+ * call ids), and thus cannot be used to reconstruct the response.
2994
+ *
2995
+ * @example
2996
+ * ```
2997
+ * **Thinking:**
2998
+ * The user is asking a math problem. I should use the calculator tool.
2999
+ *
3000
+ * **ToolCall (calculator):** {"operation": "mult", "a": 1337, "b": 4242}
3001
+ *
3002
+ * I am going to use the calculator and answer your question for you!
3003
+ * ```
3004
+ */
3005
+ pretty(): string;
3006
+ /**
3007
+ * A Model with parameters matching this response.
3008
+ *
3009
+ * Creates a new Model instance with the same model ID and parameters that
3010
+ * were used to generate this response. This is useful for resuming
3011
+ * conversations or making follow-up calls with consistent settings.
3012
+ *
3013
+ * Uses dynamic import to avoid circular dependencies.
3014
+ *
3015
+ * @returns A Promise resolving to a Model instance configured with this response's model ID and params.
3016
+ *
3017
+ * @example
3018
+ * ```typescript
3019
+ * const response = await model.call('Hello!');
3020
+ * const sameModel = await response.model;
3021
+ * const followUp = await sameModel.call('Tell me more');
3022
+ * ```
3023
+ */
3024
+ get model(): Promise<Model>;
3025
+ /**
3026
+ * Parse the response with partial parsing enabled (for streaming).
3027
+ *
3028
+ * Use this during streaming to parse incomplete JSON as it arrives.
3029
+ * Returns a DeepPartial version where all fields are optional.
3030
+ *
3031
+ * @returns The partially parsed response with optional fields.
3032
+ *
3033
+ * @example
3034
+ * ```typescript
3035
+ * for await (const chunk of streamResponse.chunkStream()) {
3036
+ * const partial = streamResponse.parse({ partial: true });
3037
+ * // partial.title may be undefined initially
3038
+ * }
3039
+ * ```
3040
+ */
3041
+ parse(options: {
3042
+ partial: true;
3043
+ }): DeepPartial<F>;
3044
+ /**
3045
+ * Parse the response according to the response format.
3046
+ *
3047
+ * If a format was specified in the call, parses the response content
3048
+ * according to that format. For JSON-based formats, extracts and parses
3049
+ * the JSON. For OutputParser formats, calls the custom parser.
3050
+ *
3051
+ * When a format is specified, returns the parsed value of type F.
3052
+ * When no format is specified (F = unknown), returns null at runtime.
3053
+ *
3054
+ * @returns The parsed response.
3055
+ * @throws ParseError if parsing fails.
3056
+ *
3057
+ * @example
3058
+ * ```typescript
3059
+ * interface Book { title: string; author: string; }
3060
+ *
3061
+ * const response = await model.call<Book>('Recommend a book', {
3062
+ * format: BookSchema
3063
+ * });
3064
+ *
3065
+ * const book = response.parse(); // Type: Book
3066
+ * console.log(book.title);
3067
+ * ```
3068
+ */
3069
+ parse(options?: {
3070
+ partial?: false;
3071
+ }): F;
3072
+ }
3073
+
3074
+ /**
3075
+ * Output parser for custom format parsing.
3076
+ *
3077
+ * Provides OutputParser type and defineOutputParser function for creating
3078
+ * custom parsers for non-JSON formats like XML, YAML, CSV, or any custom
3079
+ * text structure.
3080
+ */
3081
+
3082
+ /**
3083
+ * Type discriminator symbol for OutputParser.
3084
+ * Used at runtime to distinguish OutputParsers from other format types.
3085
+ */
3086
+ declare const OUTPUT_PARSER_TYPE: unique symbol;
3087
+ /**
3088
+ * Represents a custom output parser for non-JSON formats.
3089
+ *
3090
+ * OutputParser wraps a parsing function and stores formatting instructions.
3091
+ * Unlike structured outputs (JSON schema, tools, strict mode), OutputParser
3092
+ * works with raw text responses and custom parsing logic.
3093
+ *
3094
+ * @template T - The type of the parsed output.
3095
+ *
3096
+ * @example
3097
+ * ```typescript
3098
+ * const xmlParser = defineOutputParser({
3099
+ * formattingInstructions: 'Return XML: <book><title>...</title></book>',
3100
+ * parser: (response) => {
3101
+ * const text = response.text();
3102
+ * // Parse XML and return Book
3103
+ * return { title: '...', author: '...' };
3104
+ * },
3105
+ * });
3106
+ *
3107
+ * const response = await model.call('Recommend a book', { format: xmlParser });
3108
+ * const book = response.parse(); // Returns parsed Book
3109
+ * ```
3110
+ */
3111
+ interface OutputParser<T = unknown> {
3112
+ /**
3113
+ * Type discriminator for runtime identification.
3114
+ */
3115
+ readonly __outputParserType: typeof OUTPUT_PARSER_TYPE;
3116
+ /**
3117
+ * The name of the output parser (derived from parser function name).
3118
+ */
3119
+ readonly name: string;
3120
+ /**
3121
+ * Instructions for the LLM on how to format its output.
3122
+ * These are added to the system prompt.
3123
+ */
3124
+ readonly formattingInstructions: string;
3125
+ /**
3126
+ * Parse the response using the wrapped function.
3127
+ *
3128
+ * @param response - The response from the LLM call.
3129
+ * @returns The parsed output of type T.
3130
+ */
3131
+ (response: AnyResponse): T;
3132
+ }
3133
+ /**
3134
+ * Arguments for defining an output parser.
3135
+ *
3136
+ * @template T - The type of the parsed output.
3137
+ */
3138
+ interface OutputParserArgs<T> {
3139
+ /**
3140
+ * Instructions for the LLM on how to format the output.
3141
+ * These will be added to the system prompt.
3142
+ */
3143
+ formattingInstructions: string;
3144
+ /**
3145
+ * The parsing function that takes a Response and returns the parsed output.
3146
+ *
3147
+ * @param response - The response from the LLM call.
3148
+ * @returns The parsed output of type T.
3149
+ */
3150
+ parser: (response: AnyResponse) => T;
3151
+ }
3152
+ /**
3153
+ * Create an output parser for custom format parsing.
3154
+ *
3155
+ * Use this function to create custom parsers for non-JSON formats like
3156
+ * XML, YAML, CSV, or any custom text structure. The parser function
3157
+ * receives the full Response object and returns the parsed output.
3158
+ *
3159
+ * @template T - The type of the parsed output.
3160
+ * @param args - The output parser arguments.
3161
+ * @returns An OutputParser instance.
3162
+ *
3163
+ * @example XML parsing
3164
+ * ```typescript
3165
+ * interface Book {
3166
+ * title: string;
3167
+ * author: string;
3168
+ * }
3169
+ *
3170
+ * const bookXmlParser = defineOutputParser<Book>({
3171
+ * formattingInstructions: `
3172
+ * Return the book information in this XML structure:
3173
+ * <book>
3174
+ * <title>Book Title</title>
3175
+ * <author>Author Name</author>
3176
+ * </book>
3177
+ * `,
3178
+ * parser: (response) => {
3179
+ * const text = response.text();
3180
+ * // Parse XML (example using regex, use proper parser in production)
3181
+ * const titleMatch = text.match(/<title>([^<]+)<\/title>/);
3182
+ * const authorMatch = text.match(/<author>([^<]+)<\/author>/);
3183
+ * return {
3184
+ * title: titleMatch?.[1] ?? '',
3185
+ * author: authorMatch?.[1] ?? '',
3186
+ * };
3187
+ * },
3188
+ * });
3189
+ *
3190
+ * const response = await model.call('Recommend a fantasy book', {
3191
+ * format: bookXmlParser,
3192
+ * });
3193
+ * const book = response.parse(); // Type: Book
3194
+ * ```
3195
+ *
3196
+ * @example CSV parsing
3197
+ * ```typescript
3198
+ * interface Book {
3199
+ * title: string;
3200
+ * author: string;
3201
+ * rating: number;
3202
+ * }
3203
+ *
3204
+ * const booksCsvParser = defineOutputParser<Book[]>({
3205
+ * formattingInstructions: `
3206
+ * Return book information as CSV format with header:
3207
+ * title,author,rating
3208
+ * Book 1,Author 1,5
3209
+ * Book 2,Author 2,4
3210
+ * `,
3211
+ * parser: (response) => {
3212
+ * const text = response.text();
3213
+ * const lines = text.trim().split('\n').slice(1); // Skip header
3214
+ * return lines.map((line) => {
3215
+ * const [title, author, rating] = line.split(',').map((s) => s.trim());
3216
+ * return { title, author, rating: parseInt(rating, 10) };
3217
+ * });
3218
+ * },
3219
+ * });
3220
+ * ```
3221
+ */
3222
+ declare function defineOutputParser<T>(args: OutputParserArgs<T>): OutputParser<T>;
3223
+ /**
3224
+ * Check if an object is an OutputParser.
3225
+ *
3226
+ * This is a type guard function that narrows the type of `obj` to
3227
+ * `OutputParser<unknown>` when it returns true.
3228
+ *
3229
+ * @param obj - The object to check.
3230
+ * @returns True if the object is an OutputParser, false otherwise.
3231
+ */
3232
+ declare function isOutputParser(obj: unknown): obj is OutputParser<unknown>;
3233
+
3234
+ /**
3235
+ * Format class and utilities for structured LLM outputs.
3236
+ *
3237
+ * Provides the Format interface and functions for creating and resolving
3238
+ * structured output formats.
3239
+ */
3240
+
3241
+ /**
3242
+ * Reserved tool name for formatted output tools.
3243
+ * This is used internally to convert formatted output tool calls to textual output.
3244
+ */
3245
+ declare const FORMAT_TOOL_NAME = "__mirascope_formatted_output_tool__";
3246
+ /**
3247
+ * System prompt instructions for tool mode formatting.
3248
+ */
3249
+ declare const TOOL_MODE_INSTRUCTIONS = "Always respond to the user's query using the __mirascope_formatted_output_tool__ tool for structured output.";
3250
+ /**
3251
+ * System prompt instructions for JSON mode formatting.
3252
+ * The {json_schema} placeholder will be replaced with the actual schema.
3253
+ */
3254
+ declare const JSON_MODE_INSTRUCTIONS = "Respond only with valid JSON that matches this exact schema:\n{json_schema}";
3255
+ /**
3256
+ * Type discriminator symbol for Format.
3257
+ */
3258
+ declare const FORMAT_TYPE: unique symbol;
3259
+ /**
3260
+ * Represents a resolved structured output format.
3261
+ *
3262
+ * A Format contains all metadata needed to describe a structured output type
3263
+ * to the LLM, including the JSON schema, formatting mode, and optional validator.
3264
+ *
3265
+ * Format objects are created by `defineFormat` or `resolveFormat` and are used
3266
+ * internally by providers to configure structured output requests.
3267
+ *
3268
+ * @template T - The type of the formatted output.
3269
+ */
3270
+ interface Format<T = unknown> {
3271
+ /**
3272
+ * Type discriminator for runtime identification.
3273
+ */
3274
+ readonly __formatType: typeof FORMAT_TYPE;
3275
+ /**
3276
+ * The name of the format (derived from the type name).
3277
+ */
3278
+ readonly name: string;
3279
+ /**
3280
+ * A description of the format, if available.
3281
+ */
3282
+ readonly description: string | null;
3283
+ /**
3284
+ * JSON schema representation of the structured output format.
3285
+ */
3286
+ readonly schema: ToolParameterSchema;
3287
+ /**
3288
+ * The formatting mode determining how the LLM is configured.
3289
+ */
3290
+ readonly mode: FormattingMode;
3291
+ /**
3292
+ * Optional Zod schema for runtime validation.
3293
+ */
3294
+ readonly validator: ZodLike | null;
3295
+ /**
3296
+ * Optional OutputParser for custom parsing.
3297
+ * When set, indicates this format uses custom parsing instead of JSON.
3298
+ */
3299
+ readonly outputParser: OutputParser<T> | null;
3300
+ /**
3301
+ * The formatting instructions to add to the system prompt.
3302
+ */
3303
+ readonly formattingInstructions: string | null;
3304
+ /**
3305
+ * Create a ToolSchema for this format.
3306
+ * Used when mode is 'tool' to generate a tool for structured output.
3307
+ */
3308
+ createToolSchema(): ToolSchema;
3309
+ }
3310
+ /**
3311
+ * Check if an object is a Format.
3312
+ *
3313
+ * @param obj - The object to check.
3314
+ * @returns True if the object is a Format.
3315
+ */
3316
+ declare function isFormat(obj: unknown): obj is Format<unknown>;
3317
+ /**
3318
+ * Options for defineFormat when using TypeScript interfaces.
3319
+ *
3320
+ * The `__schema` field is injected by the compile-time transformer based on
3321
+ * the generic type parameter T.
3322
+ */
3323
+ interface DefineFormatOptions<T = unknown> {
3324
+ /**
3325
+ * The formatting mode to use.
3326
+ */
3327
+ mode: FormattingMode;
3328
+ /**
3329
+ * Optional Zod schema for runtime validation.
3330
+ */
3331
+ validator?: ZodLike;
3332
+ /**
3333
+ * Internal: JSON schema injected by compile-time transformer.
3334
+ * Do not set this manually - it is populated automatically when using
3335
+ * the Mirascope transformer with TypeScript interfaces.
3336
+ */
3337
+ __schema?: ToolParameterSchema;
3338
+ }
3339
+ /**
3340
+ * Create a Format with explicit mode control.
3341
+ *
3342
+ * Use this to create structured output formats for LLM responses. The format
3343
+ * can be derived from a Zod schema (via `validator`) or a TypeScript interface
3344
+ * (via the compile-time transformer which injects `__schema`).
3345
+ *
3346
+ * @template T - The type of the formatted output.
3347
+ * @param options - The format options including mode and optional validator.
3348
+ * @returns A Format object.
3349
+ *
3350
+ * @example With Zod schema
3351
+ * ```typescript
3352
+ * const BookSchema = z.object({ title: z.string(), author: z.string() });
3353
+ * const bookFormat = defineFormat<Book>({ mode: 'tool', validator: BookSchema });
3354
+ * const response = await model.call('Recommend a book', { format: bookFormat });
3355
+ * const book = response.parse(); // Typed as Book
3356
+ * ```
3357
+ *
3358
+ * @example With TypeScript interface (requires transformer)
3359
+ * ```typescript
3360
+ * interface Book { title: string; author: string; }
3361
+ *
3362
+ * // Transformer injects __schema at compile time
3363
+ * const bookFormat = defineFormat<Book>({ mode: 'json' });
3364
+ * ```
3365
+ */
3366
+ declare function defineFormat<T>(options: DefineFormatOptions<T>): Format<T>;
3367
+ /**
3368
+ * Resolve any format input to internal Format representation.
3369
+ *
3370
+ * This function handles all possible format inputs and converts them to
3371
+ * a Format object. It's used internally by providers to normalize format
3372
+ * specifications.
3373
+ *
3374
+ * @template T - The type of the formatted output.
3375
+ * @param formatArg - The format specification (can be Format, FormatSpec, ZodLike, OutputParser, or null).
3376
+ * @param defaultMode - The default mode to use when not explicitly specified.
3377
+ * @returns A Format object or null.
3378
+ */
3379
+ declare function resolveFormat<T>(formatArg: Format<T> | FormatSpec<T> | ZodLike | OutputParser<T> | null | undefined, defaultMode: FormattingMode): Format<T> | null;
3380
+
3381
+ /**
3382
+ * Formatting types for structured LLM outputs.
3383
+ *
3384
+ * Provides types for defining structured output formats that LLMs
3385
+ * should follow when generating responses.
3386
+ */
3387
+
3388
+ /**
3389
+ * Formatting mode determines how the structured output is requested from the LLM.
3390
+ *
3391
+ * - `strict`: Use provider's strict structured output mode (requires provider support)
3392
+ * - `json`: Use JSON mode with schema instructions in system prompt
3393
+ * - `tool`: Use tool calling with a format tool to get structured output
3394
+ * - `parser`: Use custom OutputParser for non-JSON formats
3395
+ */
3396
+ type FormattingMode = "strict" | "json" | "tool" | "parser";
3397
+ /**
3398
+ * User-facing format specification.
3399
+ *
3400
+ * Can be one of:
3401
+ * - A TypeScript interface/type (transformer injects __schema)
3402
+ * - A Zod schema (used for both schema and validation)
3403
+ * - { schema: T, validator: ZodSchema } for explicit control
3404
+ * - Format (from defineFormat)
3405
+ * - OutputParser (from defineOutputParser)
3406
+ *
3407
+ * @template T - The type of the formatted output.
3408
+ */
3409
+ interface FormatSpec<T = unknown> {
3410
+ /**
3411
+ * The schema marker for the TypeScript type.
3412
+ * The compile-time transformer uses this to determine which type to extract schema from.
3413
+ */
3414
+ schema?: T;
3415
+ /**
3416
+ * Optional Zod schema for runtime validation.
3417
+ * When provided, parsed output will be validated against this schema.
3418
+ */
3419
+ validator?: ZodLike;
3420
+ /**
3421
+ * Internal: JSON schema injected by the compile-time transformer.
3422
+ * Users should not set this directly.
3423
+ */
3424
+ __schema?: ToolParameterSchema;
3425
+ }
3426
+ /**
3427
+ * Any format input that can be provided to defineCall/definePrompt.
3428
+ * Used for constraining format type parameters.
3429
+ *
3430
+ * @template F - The output type of the format. Defaults to unknown.
3431
+ */
3432
+ type FormatInput<F = unknown> = Format<F> | FormatSpec<F> | OutputParser<F> | ZodLike | null | undefined;
3433
+ /**
3434
+ * Union of all possible format input types (non-generic version).
3435
+ * Used for type narrowing in resolveFormat.
3436
+ */
3437
+ type AnyFormatInput = Format<unknown> | FormatSpec<unknown> | OutputParser<unknown> | ZodLike | null | undefined;
3438
+ /**
3439
+ * Extract the format output type from a format input.
3440
+ *
3441
+ * This utility type enables automatic type inference when using formats
3442
+ * with defineCall and definePrompt, eliminating the need to specify the
3443
+ * format type parameter explicitly.
3444
+ *
3445
+ * @template F - The format input type.
3446
+ * @returns The extracted output type, or `unknown` if not determinable.
3447
+ *
3448
+ * @example
3449
+ * ```typescript
3450
+ * // ExtractFormatType<Format<Book>> = Book
3451
+ * // ExtractFormatType<OutputParser<Recipe>> = Recipe
3452
+ * // ExtractFormatType<typeof BookSchema> = z.infer<typeof BookSchema>
3453
+ * // ExtractFormatType<null> = unknown
3454
+ * ```
3455
+ */
3456
+ type ExtractFormatType<F> = F extends Format<infer F> ? F : F extends OutputParser<infer F> ? F : F extends FormatSpec<infer F> ? F : F extends {
3457
+ _output: infer O;
3458
+ } ? O : F extends null | undefined ? unknown : unknown;
3459
+
3460
+ /**
3461
+ * Deep partial type for streaming structured outputs.
3462
+ *
3463
+ * Used when parsing incomplete JSON during structured streaming,
3464
+ * where fields arrive incrementally.
3465
+ */
3466
+ /**
3467
+ * Deep partial type - all properties become optional recursively.
3468
+ *
3469
+ * This type is used for streaming structured outputs where fields
3470
+ * arrive incrementally. During streaming, the parsed output may
3471
+ * not have all fields populated yet.
3472
+ *
3473
+ * @template T - The original type to make deeply partial.
3474
+ *
3475
+ * @example
3476
+ * ```typescript
3477
+ * interface Book {
3478
+ * title: string;
3479
+ * author: string;
3480
+ * chapters: { name: string; pages: number }[];
3481
+ * }
3482
+ *
3483
+ * // DeepPartial<Book> =
3484
+ * // {
3485
+ * // title?: string | null;
3486
+ * // author?: string | null;
3487
+ * // chapters?: ({ name?: string | null; pages?: number | null } | null)[] | null;
3488
+ * // }
3489
+ *
3490
+ * // During streaming:
3491
+ * for await (const partial of response.structuredStream()) {
3492
+ * // partial.title may be undefined at first
3493
+ * if (partial.title) {
3494
+ * console.log('Got title:', partial.title);
3495
+ * }
3496
+ * }
3497
+ * ```
3498
+ */
3499
+ type DeepPartial<T> = T extends (infer U)[] ? (DeepPartial<U> | null)[] | null : T extends object ? {
3500
+ [P in keyof T]?: DeepPartial<T[P]> | null;
3501
+ } : T | null;
3502
+
3503
+ /**
3504
+ * Mirascope LLM exception hierarchy for unified error handling across providers.
3505
+ */
3506
+
3507
+ /**
3508
+ * Base exception for all Mirascope LLM errors.
3509
+ */
3510
+ declare class MirascopeError extends Error {
3511
+ constructor(message: string);
3512
+ }
3513
+ /**
3514
+ * Options for constructing a ProviderError.
3515
+ */
3516
+ interface ProviderErrorOptions {
3517
+ provider: ProviderId;
3518
+ originalException?: Error | null;
3519
+ }
3520
+ /**
3521
+ * Base class for errors that originate from a provider SDK.
3522
+ *
3523
+ * This wraps exceptions from provider libraries (OpenAI, Anthropic, etc.)
3524
+ * and provides a unified interface for error handling.
3525
+ */
3526
+ declare class ProviderError extends MirascopeError {
3527
+ /**
3528
+ * The provider that raised this error.
3529
+ */
3530
+ readonly provider: ProviderId;
3531
+ /**
3532
+ * The original exception from the provider SDK, if available.
3533
+ */
3534
+ readonly originalException: Error | null;
3535
+ constructor(message: string, options: ProviderErrorOptions);
3536
+ }
3537
+ /**
3538
+ * Options for constructing an APIError.
3539
+ */
3540
+ interface APIErrorOptions extends ProviderErrorOptions {
3541
+ statusCode?: number | null;
3542
+ }
3543
+ /**
3544
+ * Base class for HTTP-level API errors.
3545
+ */
3546
+ declare class APIError extends ProviderError {
3547
+ /**
3548
+ * The HTTP status code, if available.
3549
+ */
3550
+ readonly statusCode: number | null;
3551
+ constructor(message: string, options: APIErrorOptions);
3552
+ }
3553
+ /**
3554
+ * Raised for authentication failures (401, invalid API keys).
3555
+ */
3556
+ declare class AuthenticationError extends APIError {
3557
+ constructor(message: string, options: APIErrorOptions);
3558
+ }
3559
+ /**
3560
+ * Raised for permission/authorization failures (403).
3561
+ */
3562
+ declare class PermissionError extends APIError {
3563
+ constructor(message: string, options: APIErrorOptions);
3564
+ }
3565
+ /**
3566
+ * Raised for malformed requests (400, 422).
3567
+ */
3568
+ declare class BadRequestError extends APIError {
3569
+ constructor(message: string, options: APIErrorOptions);
3570
+ }
3571
+ /**
3572
+ * Raised when requested resource is not found (404).
3573
+ */
3574
+ declare class NotFoundError extends APIError {
3575
+ constructor(message: string, options: APIErrorOptions);
3576
+ }
3577
+ /**
3578
+ * Raised when rate limits are exceeded (429).
3579
+ */
3580
+ declare class RateLimitError extends APIError {
3581
+ constructor(message: string, options: APIErrorOptions);
3582
+ }
3583
+ /**
3584
+ * Raised for server-side errors (500+).
3585
+ */
3586
+ declare class ServerError extends APIError {
3587
+ constructor(message: string, options: APIErrorOptions);
3588
+ }
3589
+ /**
3590
+ * Raised when unable to connect to the API (network issues, timeouts).
3591
+ */
3592
+ declare class ConnectionError extends ProviderError {
3593
+ constructor(message: string, options: ProviderErrorOptions);
3594
+ }
3595
+ /**
3596
+ * Raised when requests timeout or deadline exceeded.
3597
+ */
3598
+ declare class TimeoutError extends ProviderError {
3599
+ constructor(message: string, options: ProviderErrorOptions);
3600
+ }
3601
+ /**
3602
+ * Raised when API response fails validation.
3603
+ *
3604
+ * This wraps the APIResponseValidationErrors that OpenAI and Anthropic both return.
3605
+ */
3606
+ declare class ResponseValidationError extends ProviderError {
3607
+ constructor(message: string, options: ProviderErrorOptions);
3608
+ }
3609
+ /**
3610
+ * Base class for errors that occur during tool execution.
3611
+ */
3612
+ declare class ToolError extends MirascopeError {
3613
+ }
3614
+ /**
3615
+ * Raised if an uncaught exception is thrown while executing a tool.
3616
+ */
3617
+ declare class ToolExecutionError extends ToolError {
3618
+ /**
3619
+ * The exception that was thrown while executing the tool.
3620
+ */
3621
+ readonly toolException: Error;
3622
+ constructor(toolException: Error | string);
3623
+ }
3624
+ /**
3625
+ * Raised if a tool call does not match any registered tool.
3626
+ */
3627
+ declare class ToolNotFoundError extends ToolError {
3628
+ /**
3629
+ * The name of the tool that was not found.
3630
+ */
3631
+ readonly toolName: string;
3632
+ constructor(toolName: string);
3633
+ }
3634
+ /**
3635
+ * Raised when response.parse() fails to parse the response content.
3636
+ *
3637
+ * This wraps errors from JSON extraction, JSON parsing, Pydantic validation,
3638
+ * or custom OutputParser functions.
3639
+ */
3640
+ declare class ParseError extends MirascopeError {
3641
+ /**
3642
+ * The original exception that caused the parse failure.
3643
+ */
3644
+ readonly originalException: Error;
3645
+ constructor(message: string, originalException: Error);
3646
+ /**
3647
+ * Generate a message suitable for retrying with the LLM.
3648
+ *
3649
+ * Returns a user-friendly message describing what went wrong,
3650
+ * suitable for including in a retry prompt.
3651
+ */
3652
+ retryMessage(): string;
3653
+ }
3654
+ /**
3655
+ * Raised if a Mirascope feature is unsupported by chosen provider.
3656
+ *
3657
+ * If compatibility is model-specific, then `modelId` should be specified.
3658
+ * If the feature is not supported by the provider at all, then it may be `null`.
3659
+ */
3660
+ declare class FeatureNotSupportedError extends MirascopeError {
3661
+ /**
3662
+ * The provider that does not support this feature.
3663
+ */
3664
+ readonly providerId: ProviderId;
3665
+ /**
3666
+ * The model that does not support this feature, if model-specific.
3667
+ */
3668
+ readonly modelId: ModelId | null;
3669
+ /**
3670
+ * The name of the unsupported feature.
3671
+ */
3672
+ readonly feature: string;
3673
+ constructor(feature: string, providerId: ProviderId, modelId?: ModelId | null, message?: string | null);
3674
+ }
3675
+ /**
3676
+ * Raised when no provider is registered for a given model_id.
3677
+ */
3678
+ declare class NoRegisteredProviderError extends MirascopeError {
3679
+ /**
3680
+ * The model ID that has no registered provider.
3681
+ */
3682
+ readonly modelId: string;
3683
+ constructor(modelId: string);
3684
+ }
3685
+ /**
3686
+ * Raised when no API key is available for a provider.
3687
+ *
3688
+ * This error is raised during auto-registration when the required API key
3689
+ * environment variable is not set. If a Mirascope fallback is available,
3690
+ * the error message will suggest using MIRASCOPE_API_KEY as an alternative.
3691
+ */
3692
+ declare class MissingAPIKeyError extends MirascopeError {
3693
+ /**
3694
+ * The provider that requires an API key.
3695
+ */
3696
+ readonly providerId: string;
3697
+ /**
3698
+ * The environment variable that should contain the API key.
3699
+ */
3700
+ readonly envVar: string;
3701
+ constructor(providerId: string, envVar: string, hasMirascopeFallback?: boolean);
3702
+ }
3703
+
3704
+ /**
3705
+ * Base abstract interface for provider clients.
3706
+ */
3707
+
3708
+ /**
3709
+ * A Mirascope error class constructor.
3710
+ */
3711
+ type MirascopeErrorClass = (new (message: string, options: ProviderErrorOptions) => ProviderError) | (new (message: string, options: APIErrorOptions) => APIError);
3712
+ /**
3713
+ * Mapping from provider SDK exception types to Mirascope error classes.
3714
+ */
3715
+ type ProviderErrorMap = Array<[
3716
+ new (...args: any[]) => Error,
3717
+ MirascopeErrorClass
3718
+ ]>;
3719
+ /**
3720
+ * Base abstract provider for LLM interactions.
3721
+ *
3722
+ * This class defines the interface for provider implementations.
3723
+ * Each provider (Anthropic, OpenAI, etc.) extends this class.
3724
+ */
3725
+ declare abstract class BaseProvider {
3726
+ /**
3727
+ * Provider identifier (e.g., "anthropic", "openai").
3728
+ */
3729
+ abstract readonly id: ProviderId;
3730
+ /**
3731
+ * Mapping from provider SDK exceptions to Mirascope error types.
3732
+ */
3733
+ protected abstract readonly errorMap: ProviderErrorMap;
3734
+ /**
3735
+ * Generate a Response by calling this provider's LLM.
3736
+ *
3737
+ * This method wraps the provider-specific implementation with error handling,
3738
+ * converting provider SDK exceptions to Mirascope error types.
3739
+ */
3740
+ call(args: {
3741
+ modelId: string;
3742
+ messages: readonly Message[];
3743
+ tools?: Tools;
3744
+ format?: Format | null;
3745
+ params?: Params;
3746
+ }): Promise<Response>;
3747
+ /**
3748
+ * Provider-specific implementation of call().
3749
+ *
3750
+ * Subclasses implement this method to handle the actual API call.
3751
+ */
3752
+ protected abstract _call(args: {
3753
+ modelId: string;
3754
+ messages: readonly Message[];
3755
+ tools?: Tools;
3756
+ format?: Format | null;
3757
+ params?: Params;
3758
+ }): Promise<Response>;
3759
+ /**
3760
+ * Generate a StreamResponse by calling this provider's LLM with streaming.
3761
+ *
3762
+ * This method wraps the provider-specific implementation with error handling,
3763
+ * converting provider SDK exceptions to Mirascope error types. It also wraps
3764
+ * the chunk iterator to catch errors during iteration (not just initial call).
3765
+ */
3766
+ stream(args: {
3767
+ modelId: string;
3768
+ messages: readonly Message[];
3769
+ tools?: Tools;
3770
+ format?: Format | null;
3771
+ params?: Params;
3772
+ }): Promise<StreamResponse>;
3773
+ /**
3774
+ * Wrap an async iterator to catch errors during iteration.
3775
+ * Converts provider SDK exceptions to Mirascope error types.
3776
+ */
3777
+ private _wrapIteratorErrors;
3778
+ /**
3779
+ * Provider-specific implementation of stream().
3780
+ *
3781
+ * Subclasses implement this method to handle the actual streaming API call.
3782
+ */
3783
+ protected abstract _stream(args: {
3784
+ modelId: string;
3785
+ messages: readonly Message[];
3786
+ tools?: Tools;
3787
+ format?: Format | null;
3788
+ params?: Params;
3789
+ }): Promise<StreamResponse>;
3790
+ /**
3791
+ * Generate a ContextResponse by calling this provider's LLM with context.
3792
+ *
3793
+ * This method accepts a context for dependency injection, enabling context-aware
3794
+ * tools and prompts.
3795
+ *
3796
+ * @template DepsT - The type of dependencies in the context.
3797
+ */
3798
+ contextCall<DepsT>(args: {
3799
+ ctx: Context<DepsT>;
3800
+ modelId: string;
3801
+ messages: readonly Message[];
3802
+ tools?: ContextTools<DepsT>;
3803
+ format?: Format | null;
3804
+ params?: Params;
3805
+ }): Promise<ContextResponse<DepsT>>;
3806
+ /**
3807
+ * Provider-specific implementation of contextCall().
3808
+ *
3809
+ * Subclasses implement this method to handle the actual API call with context.
3810
+ * Currently functionally equivalent to _call() since context-aware tools are
3811
+ * not yet implemented. When context-aware tools are added, this method will
3812
+ * handle passing context to tools during execution.
3813
+ */
3814
+ protected abstract _contextCall<DepsT>(args: {
3815
+ ctx: Context<DepsT>;
3816
+ modelId: string;
3817
+ messages: readonly Message[];
3818
+ tools?: ContextTools<DepsT>;
3819
+ format?: Format | null;
3820
+ params?: Params;
3821
+ }): Promise<ContextResponse<DepsT>>;
3822
+ /**
3823
+ * Generate a ContextStreamResponse by calling this provider's LLM with context and streaming.
3824
+ *
3825
+ * This method accepts a context for dependency injection, enabling context-aware
3826
+ * tools and prompts.
3827
+ *
3828
+ * @template DepsT - The type of dependencies in the context.
3829
+ */
3830
+ contextStream<DepsT>(args: {
3831
+ ctx: Context<DepsT>;
3832
+ modelId: string;
3833
+ messages: readonly Message[];
3834
+ tools?: ContextTools<DepsT>;
3835
+ format?: Format | null;
3836
+ params?: Params;
3837
+ }): Promise<ContextStreamResponse<DepsT>>;
3838
+ /**
3839
+ * Provider-specific implementation of contextStream().
3840
+ *
3841
+ * Subclasses implement this method to handle the actual streaming API call with context.
3842
+ * Currently functionally equivalent to _stream() since context-aware tools are
3843
+ * not yet implemented. When context-aware tools are added, this method will
3844
+ * handle passing context to tools during execution.
3845
+ */
3846
+ protected abstract _contextStream<DepsT>(args: {
3847
+ ctx: Context<DepsT>;
3848
+ modelId: string;
3849
+ messages: readonly Message[];
3850
+ tools?: ContextTools<DepsT>;
3851
+ format?: Format | null;
3852
+ params?: Params;
3853
+ }): Promise<ContextStreamResponse<DepsT>>;
3854
+ /**
3855
+ * Generate a new Response by extending a previous response's messages with additional user content.
3856
+ *
3857
+ * Uses the previous response's tools and output format. The default implementation
3858
+ * appends a user message and delegates to call().
3859
+ *
3860
+ * @param args.modelId - The model ID to use.
3861
+ * @param args.response - The previous response to extend.
3862
+ * @param args.content - The new user content to append.
3863
+ * @param args.params - Optional parameters for the request.
3864
+ * @returns A new Response containing the extended conversation.
3865
+ */
3866
+ resume(args: {
3867
+ modelId: string;
3868
+ response: RootResponse;
3869
+ content: UserContent;
3870
+ params?: Params;
3871
+ }): Promise<Response>;
3872
+ /**
3873
+ * Generate a new StreamResponse by extending a previous response's messages with additional user content.
3874
+ *
3875
+ * Uses the previous response's tools and output format. The default implementation
3876
+ * appends a user message and delegates to stream().
3877
+ *
3878
+ * @param args.modelId - The model ID to use.
3879
+ * @param args.response - The previous response to extend.
3880
+ * @param args.content - The new user content to append.
3881
+ * @param args.params - Optional parameters for the request.
3882
+ * @returns A new StreamResponse for consuming the extended conversation.
3883
+ */
3884
+ resumeStream(args: {
3885
+ modelId: string;
3886
+ response: RootResponse;
3887
+ content: UserContent;
3888
+ params?: Params;
3889
+ }): Promise<StreamResponse>;
3890
+ /**
3891
+ * Generate a new ContextResponse by extending a previous response's messages with additional user content.
3892
+ *
3893
+ * Uses the previous response's tools and output format. The default implementation
3894
+ * appends a user message and delegates to contextCall().
3895
+ *
3896
+ * @template DepsT - The type of dependencies in the context.
3897
+ * @param args.ctx - The context containing dependencies for tools.
3898
+ * @param args.modelId - The model ID to use.
3899
+ * @param args.response - The previous response to extend.
3900
+ * @param args.content - The new user content to append.
3901
+ * @param args.params - Optional parameters for the request.
3902
+ * @returns A new ContextResponse containing the extended conversation.
3903
+ */
3904
+ contextResume<DepsT>(args: {
3905
+ ctx: Context<DepsT>;
3906
+ modelId: string;
3907
+ response: RootResponse;
3908
+ content: UserContent;
3909
+ params?: Params;
3910
+ }): Promise<ContextResponse<DepsT>>;
3911
+ /**
3912
+ * Generate a new ContextStreamResponse by extending a previous response's messages with additional user content.
3913
+ *
3914
+ * Uses the previous response's tools and output format. The default implementation
3915
+ * appends a user message and delegates to contextStream().
3916
+ *
3917
+ * @template DepsT - The type of dependencies in the context.
3918
+ * @param args.ctx - The context containing dependencies for tools.
3919
+ * @param args.modelId - The model ID to use.
3920
+ * @param args.response - The previous response to extend.
3921
+ * @param args.content - The new user content to append.
3922
+ * @param args.params - Optional parameters for the request.
3923
+ * @returns A new ContextStreamResponse for consuming the extended conversation.
3924
+ */
3925
+ contextResumeStream<DepsT>(args: {
3926
+ ctx: Context<DepsT>;
3927
+ modelId: string;
3928
+ response: RootResponse;
3929
+ content: UserContent;
3930
+ params?: Params;
3931
+ }): Promise<ContextStreamResponse<DepsT>>;
3932
+ /**
3933
+ * Wrap a provider SDK exception in the appropriate Mirascope error type.
3934
+ */
3935
+ protected wrapError(e: unknown): Error;
3936
+ /**
3937
+ * Extract HTTP status code from provider-specific exception.
3938
+ *
3939
+ * Different SDKs store status codes differently (e.g., .status_code vs .code).
3940
+ * Each provider implements this to handle their SDK's convention.
3941
+ *
3942
+ * @param e - The exception to extract status code from.
3943
+ * @returns The HTTP status code if available, undefined otherwise.
3944
+ */
3945
+ protected abstract getErrorStatus(e: Error): number | undefined;
3946
+ }
3947
+ /**
3948
+ * Type alias for any provider instance.
3949
+ * Equivalent to Python's `Provider = BaseProvider[Any]`.
3950
+ */
3951
+ type Provider = BaseProvider;
3952
+
3953
+ /**
3954
+ * Provider registry for managing provider instances.
3955
+ *
3956
+ * Provides automatic provider resolution based on model ID prefixes.
3957
+ */
3958
+
3959
+ /**
3960
+ * Reset the provider registry, clearing all registered providers.
3961
+ * Primarily useful for testing.
3962
+ */
3963
+ declare function resetProviderRegistry(): void;
3964
+ /**
3965
+ * Register a provider with scope(s) in the global registry.
3966
+ *
3967
+ * Scopes use prefix matching on model IDs:
3968
+ * - "anthropic/" matches "anthropic/*"
3969
+ * - "anthropic/claude-sonnet-4" matches "anthropic/claude-sonnet-4*"
3970
+ *
3971
+ * When multiple scopes match a model_id, the longest match wins.
3972
+ *
3973
+ * @example
3974
+ * ```typescript
3975
+ * // Register with default scope
3976
+ * registerProvider('anthropic', { apiKey: 'key' });
3977
+ *
3978
+ * // Register for specific models
3979
+ * registerProvider('anthropic', { scope: 'anthropic/claude-sonnet-4' });
3980
+ *
3981
+ * // Register a custom instance
3982
+ * const custom = new AnthropicProvider({ apiKey: 'team-key' });
3983
+ * registerProvider(custom, { scope: 'anthropic/claude-sonnet-4' });
3984
+ * ```
3985
+ */
3986
+ declare function registerProvider(provider: ProviderId | BaseProvider, options?: {
3987
+ scope?: string | string[];
3988
+ apiKey?: string;
3989
+ baseURL?: string;
3990
+ }): BaseProvider;
3991
+ /**
3992
+ * Get the provider for a model ID based on the registry.
3993
+ *
3994
+ * Uses longest prefix matching to find the most specific provider for the model.
3995
+ * If no explicit registration is found, checks for auto-registration defaults
3996
+ * and automatically registers the provider on first use.
3997
+ *
3998
+ * @param modelId - The full model ID (e.g., "anthropic/claude-sonnet-4-20250514").
3999
+ * @returns The provider instance registered for this model.
4000
+ * @throws NoRegisteredProviderError if no provider scope matches the model ID.
4001
+ * @throws MissingAPIKeyError if no provider has its API key set.
4002
+ *
4003
+ * @example
4004
+ * ```typescript
4005
+ * // Auto-registration on first use:
4006
+ * const provider = getProviderForModel('anthropic/claude-sonnet-4-20250514');
4007
+ * // Automatically loads and registers AnthropicProvider for "anthropic/"
4008
+ * ```
4009
+ */
4010
+ declare function getProviderForModel(modelId: string): BaseProvider;
4011
+
4012
+ /**
4013
+ * Prompt definition and creation utilities.
4014
+ *
4015
+ * This module provides a unified `definePrompt` function that automatically
4016
+ * detects whether a prompt is context-aware based on the template type parameter.
4017
+ * If T includes `ctx: Context<DepsT>`, a ContextPrompt is returned; otherwise
4018
+ * a regular Prompt is returned.
4019
+ */
4020
+
4021
+ /**
4022
+ * Extract DepsT from T if T has a `ctx: Context<DepsT>` property.
4023
+ * Returns `never` if T doesn't have a context property.
4024
+ */
4025
+ type ExtractDeps<T> = T extends {
4026
+ ctx: Context<infer D>;
4027
+ } ? D : never;
4028
+ /**
4029
+ * Extract the variables type from T by removing the `ctx` property.
4030
+ */
4031
+ type ExtractVars<T> = Omit<T, "ctx">;
4032
+ /**
4033
+ * A template function that generates message content from variables.
4034
+ *
4035
+ * @template T - The type of variables the template accepts.
4036
+ */
4037
+ type MessageTemplate<T> = (vars: T) => UserContent | readonly Message[];
4038
+ /**
4039
+ * Template function type - either takes no args or takes vars of type T.
4040
+ */
4041
+ type TemplateFunc<T> = (() => UserContent | readonly Message[]) | ((vars: T) => UserContent | readonly Message[]);
4042
+ /**
4043
+ * The combined context and variables object passed to context template functions.
4044
+ *
4045
+ * @template T - The type of variables the template accepts.
4046
+ * @template DepsT - The type of dependencies in the context.
4047
+ */
4048
+ type ContextMessageTemplate<T, DepsT> = {
4049
+ ctx: Context<DepsT>;
4050
+ } & T;
4051
+ /**
4052
+ * Context template function type - takes an object with ctx and vars.
4053
+ *
4054
+ * @template T - The type of variables the template accepts.
4055
+ * @template DepsT - The type of dependencies in the context.
4056
+ */
4057
+ type ContextTemplateFunc<T, DepsT> = (args: ContextMessageTemplate<T, DepsT>) => UserContent | readonly Message[];
4058
+ /**
4059
+ * Arguments for defining a prompt.
4060
+ *
4061
+ * @template T - The type of variables the template accepts. Defaults to NoVars.
4062
+ * @template F - The format input type. The output type F is derived via ExtractFormatType<F>.
4063
+ */
4064
+ interface PromptArgs<T = NoVars, F extends AnyFormatInput = undefined> {
4065
+ /** Optional tools to make available to the model. */
4066
+ tools?: Tools;
4067
+ /**
4068
+ * Optional format specification for structured output.
4069
+ * Can be a Zod schema, Format, FormatSpec, or OutputParser.
4070
+ * The output type is automatically inferred from this format.
4071
+ */
4072
+ format?: F;
4073
+ /** A function that generates message content (optionally from variables). */
4074
+ template: TemplateFunc<T>;
4075
+ }
4076
+ /**
4077
+ * Arguments for defining a context-aware prompt.
4078
+ * Used when T includes `ctx: Context<DepsT>`.
4079
+ *
4080
+ * @template T - The full template parameter type including ctx.
4081
+ * @template F - The format input type.
4082
+ */
4083
+ interface ContextPromptArgs<T = NoVars, F extends AnyFormatInput = undefined> {
4084
+ /** Optional tools to make available to the model. */
4085
+ tools?: ExtractDeps<T> extends never ? Tools : ContextTools<ExtractDeps<T>>;
4086
+ /**
4087
+ * Optional format specification for structured output.
4088
+ * Can be a Zod schema, Format, FormatSpec, or OutputParser.
4089
+ */
4090
+ format?: F;
4091
+ /** A function that generates message content from context (and optionally variables). */
4092
+ template: TemplateFunc<T>;
4093
+ }
4094
+ /**
4095
+ * A prompt that can be called with a model to generate a response.
4096
+ *
4097
+ * Created by `definePrompt()`. The prompt is callable and also has a
4098
+ * `messages()` method for getting the raw messages without calling the LLM.
4099
+ *
4100
+ * @template T - The type of variables the prompt accepts. Defaults to empty object.
4101
+ * @template F - The format input type. The output type F is derived via ExtractFormatType<F>.
4102
+ *
4103
+ * @example With variables
4104
+ * ```typescript
4105
+ * const recommendBook = definePrompt<{ genre: string }>({
4106
+ * template: ({ genre }) => `Recommend a ${genre} book`,
4107
+ * });
4108
+ *
4109
+ * const response = await recommendBook(model, { genre: 'fantasy' });
4110
+ * const messages = recommendBook.messages({ genre: 'fantasy' });
4111
+ * ```
4112
+ *
4113
+ * @example Without variables
4114
+ * ```typescript
4115
+ * const sayHello = definePrompt({
4116
+ * template: () => 'Hello!',
4117
+ * });
4118
+ * const response = await sayHello(model);
4119
+ * ```
4120
+ */
4121
+ interface Prompt<T = NoVars, F extends AnyFormatInput = undefined> {
4122
+ /**
4123
+ * Call the prompt with a model and variables to generate a response.
4124
+ *
4125
+ * @param model - The model to use, either a Model instance or model ID string.
4126
+ * @param vars - The variables to pass to the template.
4127
+ * @returns A promise that resolves to the LLM response.
4128
+ */
4129
+ (model: Model | ModelId, ...args: keyof T extends never ? [] : [vars: T]): Promise<Response<ExtractFormatType<F>>>;
4130
+ /**
4131
+ * Call the prompt with a model and variables to generate a response.
4132
+ * This is the method form of the callable interface.
4133
+ *
4134
+ * @param model - The model to use, either a Model instance or model ID string.
4135
+ * @param vars - The variables to pass to the template.
4136
+ * @returns A promise that resolves to the LLM response.
4137
+ */
4138
+ call(model: Model | ModelId, ...args: keyof T extends never ? [] : [vars: T]): Promise<Response<ExtractFormatType<F>>>;
4139
+ /**
4140
+ * Stream the prompt with a model and variables to generate a streaming response.
4141
+ *
4142
+ * @param model - The model to use, either a Model instance or model ID string.
4143
+ * @param vars - The variables to pass to the template.
4144
+ * @returns A promise that resolves to the streaming LLM response.
4145
+ *
4146
+ * @example
4147
+ * ```typescript
4148
+ * const response = await prompt.stream(model, { genre: 'fantasy' });
4149
+ * for await (const text of response.textStream()) {
4150
+ * process.stdout.write(text);
4151
+ * }
4152
+ * ```
4153
+ */
4154
+ stream(model: Model | ModelId, ...args: keyof T extends never ? [] : [vars: T]): Promise<StreamResponse<ExtractFormatType<F>>>;
4155
+ /**
4156
+ * Get the messages for this prompt without calling the LLM.
4157
+ *
4158
+ * @param vars - The variables to pass to the template.
4159
+ * @returns The messages that would be sent to the LLM.
4160
+ */
4161
+ messages(...args: keyof T extends never ? [] : [vars: T]): readonly Message[];
4162
+ /**
4163
+ * The tools available to this prompt.
4164
+ */
4165
+ readonly tools: Tools | undefined;
4166
+ /**
4167
+ * The format specification for structured output, if any.
4168
+ */
4169
+ readonly format: FormatInput<ExtractFormatType<F>>;
4170
+ /**
4171
+ * The underlying template function.
4172
+ */
4173
+ readonly template: TemplateFunc<T>;
4174
+ }
4175
+ /**
4176
+ * A context-aware prompt that can be called with a model and context to generate a response.
4177
+ *
4178
+ * Created by `definePrompt()` when the template type includes `ctx: Context<DepsT>`.
4179
+ * The prompt is callable and also has a `messages()` method for getting the raw messages.
4180
+ *
4181
+ * @template T - The type of variables the prompt accepts. Defaults to empty object.
4182
+ * @template DepsT - The type of dependencies in the context.
4183
+ * @template F - The format input type. The output type is derived via ExtractFormatType<F>.
4184
+ *
4185
+ * @example With variables
4186
+ * ```typescript
4187
+ * interface MyDeps { userId: string; }
4188
+ *
4189
+ * const greetUser = definePrompt<{ ctx: Context<MyDeps>; greeting: string }>({
4190
+ * template: ({ ctx, greeting }) => `${greeting}, user ${ctx.deps.userId}!`,
4191
+ * });
4192
+ *
4193
+ * const ctx = createContext<MyDeps>({ userId: '123' });
4194
+ * const response = await greetUser(model, ctx, { greeting: 'Hello' });
4195
+ * const messages = greetUser.messages(ctx, { greeting: 'Hello' });
4196
+ * ```
4197
+ *
4198
+ * @example Without variables
4199
+ * ```typescript
4200
+ * interface MyDeps { userId: string; }
4201
+ *
4202
+ * const sayHello = definePrompt<{ ctx: Context<MyDeps> }>({
4203
+ * template: ({ ctx }) => `Hello, user ${ctx.deps.userId}!`,
4204
+ * });
4205
+ *
4206
+ * const response = await sayHello(model, ctx);
4207
+ * ```
4208
+ */
4209
+ interface ContextPrompt<T = NoVars, DepsT = unknown, F extends AnyFormatInput = undefined> {
4210
+ /**
4211
+ * Call the prompt with a model, context, and variables to generate a response.
4212
+ *
4213
+ * @param model - The model to use, either a Model instance or model ID string.
4214
+ * @param ctx - The context containing dependencies.
4215
+ * @param vars - The variables to pass to the template.
4216
+ * @returns A promise that resolves to the LLM response.
4217
+ */
4218
+ (model: Model | ModelId, ctx: Context<DepsT>, ...args: keyof T extends never ? [] : [vars: T]): Promise<ContextResponse<DepsT, ExtractFormatType<F>>>;
4219
+ /**
4220
+ * Call the prompt with a model, context, and variables to generate a response.
4221
+ * This is the method form of the callable interface.
4222
+ *
4223
+ * @param model - The model to use, either a Model instance or model ID string.
4224
+ * @param ctx - The context containing dependencies.
4225
+ * @param vars - The variables to pass to the template.
4226
+ * @returns A promise that resolves to the LLM response.
4227
+ */
4228
+ call(model: Model | ModelId, ctx: Context<DepsT>, ...args: keyof T extends never ? [] : [vars: T]): Promise<ContextResponse<DepsT, ExtractFormatType<F>>>;
4229
+ /**
4230
+ * Stream the prompt with a model, context, and variables to generate a streaming response.
4231
+ *
4232
+ * @param model - The model to use, either a Model instance or model ID string.
4233
+ * @param ctx - The context containing dependencies.
4234
+ * @param vars - The variables to pass to the template.
4235
+ * @returns A promise that resolves to the streaming LLM response.
4236
+ *
4237
+ * @example
4238
+ * ```typescript
4239
+ * const response = await prompt.stream(model, ctx, { greeting: 'Hello' });
4240
+ * for await (const text of response.textStream()) {
4241
+ * process.stdout.write(text);
4242
+ * }
4243
+ * ```
4244
+ */
4245
+ stream(model: Model | ModelId, ctx: Context<DepsT>, ...args: keyof T extends never ? [] : [vars: T]): Promise<ContextStreamResponse<DepsT, ExtractFormatType<F>>>;
4246
+ /**
4247
+ * Get the messages for this prompt without calling the LLM.
4248
+ *
4249
+ * @param ctx - The context containing dependencies.
4250
+ * @param vars - The variables to pass to the template.
4251
+ * @returns The messages that would be sent to the LLM.
4252
+ */
4253
+ messages(ctx: Context<DepsT>, ...args: keyof T extends never ? [] : [vars: T]): readonly Message[];
4254
+ /**
4255
+ * The tools available to this prompt.
4256
+ */
4257
+ readonly tools: ContextTools<DepsT> | undefined;
4258
+ /**
4259
+ * The format specification for structured output, if any.
4260
+ */
4261
+ readonly format: Format<ExtractFormatType<F>> | FormatSpec<ExtractFormatType<F>> | ZodLike | OutputParser<ExtractFormatType<F>> | null | undefined;
4262
+ /**
4263
+ * The underlying template function.
4264
+ */
4265
+ readonly template: ContextTemplateFunc<T, DepsT>;
4266
+ }
4267
+ /**
4268
+ * Unified prompt type that returns either Prompt or ContextPrompt
4269
+ * based on whether T includes `ctx: Context<DepsT>`.
4270
+ */
4271
+ type UnifiedPrompt<T, F extends AnyFormatInput = undefined> = ExtractDeps<T> extends never ? Prompt<T, F> : ContextPrompt<ExtractVars<T>, ExtractDeps<T>, F>;
4272
+ /**
4273
+ * Define a prompt that automatically detects context from the template type.
4274
+ *
4275
+ * When T includes `ctx: Context<DepsT>`, returns a ContextPrompt.
4276
+ * Otherwise returns a regular Prompt.
4277
+ *
4278
+ * @template T - The type of the template parameter (including ctx if context-aware).
4279
+ * @template F - The format input type. The output type is derived via ExtractFormatType<F>.
4280
+ * @param args - The prompt arguments including the template.
4281
+ * @returns A callable prompt (context-aware if T includes ctx).
4282
+ *
4283
+ * @example Prompt without variables
4284
+ * ```typescript
4285
+ * const sayHello = definePrompt({
4286
+ * template: () => 'Hello!',
4287
+ * });
4288
+ * const response = await sayHello(model);
4289
+ * ```
4290
+ *
4291
+ * @example Regular prompt with variables
4292
+ * ```typescript
4293
+ * const recommendBook = definePrompt<{ genre: string }>({
4294
+ * template: ({ genre }) => `Recommend a ${genre} book`,
4295
+ * });
4296
+ * const response = await recommendBook(model, { genre: 'fantasy' });
4297
+ * ```
4298
+ *
4299
+ * @example Context-aware prompt
4300
+ * ```typescript
4301
+ * interface MyDeps { userId: string; }
4302
+ *
4303
+ * const greetUser = definePrompt<{ ctx: Context<MyDeps>; greeting: string }>({
4304
+ * template: ({ ctx, greeting }) => `${greeting}, user ${ctx.deps.userId}!`,
4305
+ * });
4306
+ *
4307
+ * const ctx = createContext<MyDeps>({ userId: '123' });
4308
+ * const response = await greetUser(model, ctx, { greeting: 'Hello' });
4309
+ * ```
4310
+ */
4311
+ declare function definePrompt<T extends Record<string, unknown> = NoVars, F extends AnyFormatInput = undefined>(args: PromptArgs<T, F> | ContextPromptArgs<T, F>): UnifiedPrompt<T, F>;
4312
+
4313
+ /**
4314
+ * Call definition and creation utilities.
4315
+ *
4316
+ * A Call is a Prompt with a bundled Model - it can be invoked directly
4317
+ * without passing a model argument.
4318
+ *
4319
+ * This module provides a unified `defineCall` function that automatically
4320
+ * detects whether a call is context-aware based on the template type parameter.
4321
+ * If T includes `ctx: Context<DepsT>`, a ContextCall is returned; otherwise
4322
+ * a regular Call is returned.
4323
+ */
4324
+
4325
+ /**
4326
+ * Arguments for defining a call.
4327
+ *
4328
+ * @template T - The type of variables the template accepts. Defaults to NoVars.
4329
+ * @template F - The format input type. The output type F is derived via ExtractFormatType<F>.
4330
+ */
4331
+ interface CallArgs<T = NoVars, F extends AnyFormatInput = undefined> extends Params {
4332
+ /** The model to use, either a Model instance or model ID string. */
4333
+ model: Model | ModelId;
4334
+ /** Optional tools to make available to the model. */
4335
+ tools?: Tools;
4336
+ /**
4337
+ * Optional format specification for structured output.
4338
+ * Can be a Zod schema, Format, FormatSpec, or OutputParser.
4339
+ * The output type is automatically inferred from this format.
4340
+ */
4341
+ format?: F;
4342
+ /** A function that generates message content (optionally from variables). */
4343
+ template: TemplateFunc<T>;
4344
+ }
4345
+ /**
4346
+ * Arguments for defining a context-aware call.
4347
+ * Used when T includes `ctx: Context<DepsT>`.
4348
+ *
4349
+ * @template T - The full template parameter type including ctx.
4350
+ * @template F - The format input type.
4351
+ */
4352
+ interface ContextCallArgs<T = NoVars, F extends AnyFormatInput = undefined> extends Params {
4353
+ /** The model to use, either a Model instance or model ID string. */
4354
+ model: Model | ModelId;
4355
+ /** Optional tools to make available to the model. */
4356
+ tools?: ExtractDeps<T> extends never ? Tools : ContextTools<ExtractDeps<T>>;
4357
+ /**
4358
+ * Optional format specification for structured output.
4359
+ * Can be a Zod schema, Format, FormatSpec, or OutputParser.
4360
+ */
4361
+ format?: F;
4362
+ /** A function that generates message content from context (and optionally variables). */
4363
+ template: TemplateFunc<T>;
4364
+ }
4365
+ /**
4366
+ * A call that can be invoked directly to generate a response.
4367
+ *
4368
+ * Created by `defineCall()`. Unlike a `Prompt`, a `Call` has a model bundled in,
4369
+ * so it can be invoked without passing a model argument.
4370
+ *
4371
+ * @template T - The type of variables the call accepts. Defaults to empty object.
4372
+ * @template F - The format input type. The output type F is derived via ExtractFormatType<F>.
4373
+ *
4374
+ * @example With variables
4375
+ * ```typescript
4376
+ * const recommendBook = defineCall<{ genre: string }>({
4377
+ * model: 'anthropic/claude-sonnet-4-20250514',
4378
+ * template: ({ genre }) => `Recommend a ${genre} book`,
4379
+ * });
4380
+ *
4381
+ * const response = await recommendBook({ genre: 'fantasy' });
4382
+ * ```
4383
+ *
4384
+ * @example Without variables
4385
+ * ```typescript
4386
+ * const sayHello = defineCall({
4387
+ * model: 'anthropic/claude-sonnet-4-20250514',
4388
+ * template: () => 'Hello!',
4389
+ * });
4390
+ * const response = await sayHello();
4391
+ * ```
4392
+ */
4393
+ interface Call<T = NoVars, F extends AnyFormatInput = undefined> {
4394
+ /**
4395
+ * Call directly to generate a response (model is bundled).
4396
+ *
4397
+ * @param vars - The variables to pass to the template.
4398
+ * @returns A promise that resolves to the LLM response.
4399
+ */
4400
+ (...args: keyof T extends never ? [] : [vars: T]): Promise<Response<ExtractFormatType<F>>>;
4401
+ /**
4402
+ * Call directly to generate a response (model is bundled).
4403
+ * This is the method form of the callable interface.
4404
+ *
4405
+ * @param vars - The variables to pass to the template.
4406
+ * @returns A promise that resolves to the LLM response.
4407
+ */
4408
+ call(...args: keyof T extends never ? [] : [vars: T]): Promise<Response<ExtractFormatType<F>>>;
4409
+ /**
4410
+ * Stream directly to generate a streaming response (model is bundled).
4411
+ *
4412
+ * @param vars - The variables to pass to the template.
4413
+ * @returns A promise that resolves to the streaming LLM response.
4414
+ *
4415
+ * @example
4416
+ * ```typescript
4417
+ * const response = await call.stream({ genre: 'fantasy' });
4418
+ * for await (const text of response.textStream()) {
4419
+ * process.stdout.write(text);
4420
+ * }
4421
+ * ```
4422
+ */
4423
+ stream(...args: keyof T extends never ? [] : [vars: T]): Promise<StreamResponse<ExtractFormatType<F>>>;
4424
+ /**
4425
+ * The model used for generating responses.
4426
+ * Returns the context model if one is set via `withModel`, otherwise returns `defaultModel`.
4427
+ */
4428
+ readonly model: Model;
4429
+ /**
4430
+ * The default model configured when defining this call.
4431
+ * Use `model` to get the effective model (which respects context).
4432
+ */
4433
+ readonly defaultModel: Model;
4434
+ /**
4435
+ * The tools available to this call.
4436
+ */
4437
+ readonly tools: Tools | undefined;
4438
+ /**
4439
+ * The format specification for structured output, if any.
4440
+ */
4441
+ readonly format: FormatInput<ExtractFormatType<F>>;
4442
+ /**
4443
+ * The underlying prompt.
4444
+ */
4445
+ readonly prompt: Prompt<T, F>;
4446
+ /**
4447
+ * The underlying template function.
4448
+ */
4449
+ readonly template: TemplateFunc<T>;
4450
+ }
4451
+ /**
4452
+ * A context-aware call that can be invoked directly with a context to generate a response.
4453
+ *
4454
+ * Created by `defineCall()` when the template type includes `ctx: Context<DepsT>`.
4455
+ * Unlike a `ContextPrompt`, a `ContextCall` has a model bundled in, so it can be
4456
+ * invoked without passing a model argument.
4457
+ *
4458
+ * @template T - The type of variables the call accepts. Defaults to empty object.
4459
+ * @template DepsT - The type of dependencies in the context.
4460
+ * @template F - The format input type. The output type is derived via ExtractFormatType<F>.
4461
+ *
4462
+ * @example With variables
4463
+ * ```typescript
4464
+ * interface MyDeps { userId: string; }
4465
+ *
4466
+ * const greetUser = defineCall<{ ctx: Context<MyDeps>; greeting: string }>({
4467
+ * model: 'anthropic/claude-sonnet-4-20250514',
4468
+ * template: ({ ctx, greeting }) => `${greeting}, user ${ctx.deps.userId}!`,
4469
+ * });
4470
+ *
4471
+ * const ctx = createContext<MyDeps>({ userId: '123' });
4472
+ * const response = await greetUser(ctx, { greeting: 'Hello' });
4473
+ * ```
4474
+ *
4475
+ * @example Without variables
4476
+ * ```typescript
4477
+ * interface MyDeps { userId: string; }
4478
+ *
4479
+ * const sayHello = defineCall<{ ctx: Context<MyDeps> }>({
4480
+ * model: 'anthropic/claude-sonnet-4-20250514',
4481
+ * template: ({ ctx }) => `Hello, user ${ctx.deps.userId}!`,
4482
+ * });
4483
+ *
4484
+ * const ctx = createContext<MyDeps>({ userId: '123' });
4485
+ * const response = await sayHello(ctx);
4486
+ * ```
4487
+ */
4488
+ interface ContextCall<T = NoVars, DepsT = unknown, F extends AnyFormatInput = undefined> {
4489
+ /**
4490
+ * Call directly with context to generate a response (model is bundled).
4491
+ *
4492
+ * @param ctx - The context containing dependencies.
4493
+ * @param vars - The variables to pass to the template.
4494
+ * @returns A promise that resolves to the LLM response.
4495
+ */
4496
+ (ctx: Context<DepsT>, ...args: keyof T extends never ? [] : [vars: T]): Promise<ContextResponse<DepsT, ExtractFormatType<F>>>;
4497
+ /**
4498
+ * Call directly with context to generate a response (model is bundled).
4499
+ * This is the method form of the callable interface.
4500
+ *
4501
+ * @param ctx - The context containing dependencies.
4502
+ * @param vars - The variables to pass to the template.
4503
+ * @returns A promise that resolves to the LLM response.
4504
+ */
4505
+ call(ctx: Context<DepsT>, ...args: keyof T extends never ? [] : [vars: T]): Promise<ContextResponse<DepsT, ExtractFormatType<F>>>;
4506
+ /**
4507
+ * Stream directly with context to generate a streaming response (model is bundled).
4508
+ *
4509
+ * @param ctx - The context containing dependencies.
4510
+ * @param vars - The variables to pass to the template.
4511
+ * @returns A promise that resolves to the streaming LLM response.
4512
+ *
4513
+ * @example
4514
+ * ```typescript
4515
+ * const response = await call.stream(ctx, { greeting: 'Hello' });
4516
+ * for await (const text of response.textStream()) {
4517
+ * process.stdout.write(text);
4518
+ * }
4519
+ * ```
4520
+ */
4521
+ stream(ctx: Context<DepsT>, ...args: keyof T extends never ? [] : [vars: T]): Promise<ContextStreamResponse<DepsT, ExtractFormatType<F>>>;
4522
+ /**
4523
+ * The model used for generating responses.
4524
+ * Returns the context model if one is set via `withModel`, otherwise returns `defaultModel`.
4525
+ */
4526
+ readonly model: Model;
4527
+ /**
4528
+ * The default model configured when defining this call.
4529
+ * Use `model` to get the effective model (which respects context).
4530
+ */
4531
+ readonly defaultModel: Model;
4532
+ /**
4533
+ * The tools available to this call.
4534
+ */
4535
+ readonly tools: ContextTools<DepsT> | undefined;
4536
+ /**
4537
+ * The format specification for structured output, if any.
4538
+ */
4539
+ readonly format: Format<ExtractFormatType<F>> | FormatSpec<ExtractFormatType<F>> | ZodLike | OutputParser<ExtractFormatType<F>> | null | undefined;
4540
+ /**
4541
+ * The underlying context prompt.
4542
+ */
4543
+ readonly prompt: ContextPrompt<T, DepsT, F>;
4544
+ /**
4545
+ * The underlying template function.
4546
+ */
4547
+ readonly template: ContextTemplateFunc<T, DepsT>;
4548
+ }
4549
+ /**
4550
+ * Unified call type that returns either Call or ContextCall
4551
+ * based on whether T includes `ctx: Context<DepsT>`.
4552
+ */
4553
+ type UnifiedCall<T, F extends AnyFormatInput = undefined> = ExtractDeps<T> extends never ? Call<T, F> : ContextCall<ExtractVars<T>, ExtractDeps<T>, F>;
4554
+ /**
4555
+ * A builder function returned when defineCall is called with only a type parameter.
4556
+ * Allows specifying the variables type explicitly while inferring the format type.
4557
+ *
4558
+ * @template T - The type of variables the call accepts (may include ctx for context calls).
4559
+ */
4560
+ interface CallBuilder<T extends Record<string, unknown>> {
4561
+ <F extends AnyFormatInput = undefined>(args: CallArgs<T, F> | ContextCallArgs<T, F>): UnifiedCall<T, F>;
4562
+ }
4563
+ /**
4564
+ * Define a call with explicit variables type, returning a builder that infers format.
4565
+ *
4566
+ * This overload enables specifying the variables type upfront while still allowing
4567
+ * the format type to be inferred from the `format` property.
4568
+ *
4569
+ * @template T - The type of variables the template accepts (may include ctx for context calls).
4570
+ * @returns A builder function that accepts call arguments and infers the format type.
4571
+ *
4572
+ * @example With explicit variables and inferred format
4573
+ * ```typescript
4574
+ * const recommendBook = defineCall<{ genre: string }>()({
4575
+ * model: 'anthropic/claude-sonnet-4-20250514',
4576
+ * format: defineFormat<Book>({ mode: 'tool' }),
4577
+ * template: ({ genre }) => `Recommend a ${genre} book`,
4578
+ * });
4579
+ *
4580
+ * const response = await recommendBook({ genre: 'fantasy' });
4581
+ * const book = response.parse(); // Typed as Book
4582
+ * ```
4583
+ *
4584
+ * @example Context-aware call with explicit type
4585
+ * ```typescript
4586
+ * interface MyDeps { userId: string; }
4587
+ *
4588
+ * const greetUser = defineCall<{ ctx: Context<MyDeps>; greeting: string }>()({
4589
+ * model: 'anthropic/claude-sonnet-4-20250514',
4590
+ * template: ({ ctx, greeting }) => `${greeting}, user ${ctx.deps.userId}!`,
4591
+ * });
4592
+ *
4593
+ * const ctx = createContext<MyDeps>({ userId: '123' });
4594
+ * const response = await greetUser(ctx, { greeting: 'Hello' });
4595
+ * ```
4596
+ */
4597
+ declare function defineCall<T extends Record<string, unknown>>(): CallBuilder<T>;
4598
+ /**
4599
+ * Define a call that automatically detects context from the template type.
4600
+ *
4601
+ * When T includes `ctx: Context<DepsT>`, returns a ContextCall.
4602
+ * Otherwise returns a regular Call.
4603
+ *
4604
+ * Both the variables type and format type are inferred from the arguments.
4605
+ * Type the template parameter to enable variables inference.
4606
+ *
4607
+ * @template T - The type of variables the template accepts (may include ctx for context calls).
4608
+ * @template F - The format input type (inferred from format property).
4609
+ * @param args - The call arguments including model, template, and optional parameters.
4610
+ * @returns A callable that can be invoked directly with variables (and context if T includes ctx).
4611
+ *
4612
+ * @example Call without variables
4613
+ * ```typescript
4614
+ * const sayHello = defineCall({
4615
+ * model: 'anthropic/claude-sonnet-4-20250514',
4616
+ * template: () => 'Hello!',
4617
+ * });
4618
+ * const response = await sayHello();
4619
+ * ```
4620
+ *
4621
+ * @example Regular call with variables
4622
+ * ```typescript
4623
+ * const recommendBook = defineCall({
4624
+ * model: 'anthropic/claude-sonnet-4-20250514',
4625
+ * format: defineFormat<Book>({ mode: 'tool' }),
4626
+ * template: ({ genre }: { genre: string }) => `Recommend a ${genre} book`,
4627
+ * });
4628
+ * const response = await recommendBook({ genre: 'fantasy' });
4629
+ * ```
4630
+ *
4631
+ * @example Context-aware call
4632
+ * ```typescript
4633
+ * interface MyDeps { userId: string; }
4634
+ *
4635
+ * const greetUser = defineCall({
4636
+ * model: 'anthropic/claude-sonnet-4-20250514',
4637
+ * template: ({ ctx, greeting }: { ctx: Context<MyDeps>; greeting: string }) =>
4638
+ * `${greeting}, user ${ctx.deps.userId}!`,
4639
+ * });
4640
+ *
4641
+ * const ctx = createContext<MyDeps>({ userId: '123' });
4642
+ * const response = await greetUser(ctx, { greeting: 'Hello' });
4643
+ * ```
4644
+ */
4645
+ declare function defineCall<T extends Record<string, unknown> = NoVars, F extends AnyFormatInput = undefined>(args: CallArgs<T, F> | ContextCallArgs<T, F>): UnifiedCall<T, F>;
4646
+
4647
+ type index_APIError = APIError;
4648
+ declare const index_APIError: typeof APIError;
4649
+ type index_AnthropicModelId = AnthropicModelId;
4650
+ type index_AnyContextTool<DepsT = unknown> = AnyContextTool<DepsT>;
4651
+ type index_AnyResponse = AnyResponse;
4652
+ type index_AnyTool = AnyTool;
4653
+ type index_AnyToolFn = AnyToolFn;
4654
+ type index_ApiMode = ApiMode;
4655
+ type index_AssistantContent = AssistantContent;
4656
+ type index_AssistantContentChunk = AssistantContentChunk;
4657
+ type index_AssistantContentPart = AssistantContentPart;
4658
+ type index_AssistantMessage = AssistantMessage;
4659
+ type index_AsyncChunkIterator = AsyncChunkIterator;
4660
+ declare const index_Audio: typeof Audio;
4661
+ type index_AudioMimeType = AudioMimeType;
4662
+ type index_AuthenticationError = AuthenticationError;
4663
+ declare const index_AuthenticationError: typeof AuthenticationError;
4664
+ type index_BadRequestError = BadRequestError;
4665
+ declare const index_BadRequestError: typeof BadRequestError;
4666
+ type index_Base64AudioSource = Base64AudioSource;
4667
+ type index_Base64DocumentSource = Base64DocumentSource;
4668
+ type index_Base64ImageSource = Base64ImageSource;
4669
+ type index_BaseContextTool<DepsT = unknown> = BaseContextTool<DepsT>;
4670
+ type index_BaseResponse<F = unknown> = BaseResponse<F>;
4671
+ declare const index_BaseResponse: typeof BaseResponse;
4672
+ type index_BaseResponseInit = BaseResponseInit;
4673
+ type index_BaseTool = BaseTool;
4674
+ declare const index_CONTEXT_MARKER: typeof CONTEXT_MARKER;
4675
+ declare const index_CONTEXT_TOOL_TYPE: typeof CONTEXT_TOOL_TYPE;
4676
+ type index_Call<T = NoVars, F extends AnyFormatInput = undefined> = Call<T, F>;
4677
+ type index_CallArgs<T = NoVars, F extends AnyFormatInput = undefined> = CallArgs<T, F>;
4678
+ type index_ConnectionError = ConnectionError;
4679
+ declare const index_ConnectionError: typeof ConnectionError;
4680
+ type index_ContentPart = ContentPart;
4681
+ type index_Context<DepsT> = Context<DepsT>;
4682
+ type index_ContextCall<T = NoVars, DepsT = unknown, F extends AnyFormatInput = undefined> = ContextCall<T, DepsT, F>;
4683
+ type index_ContextCallArgs<T = NoVars, F extends AnyFormatInput = undefined> = ContextCallArgs<T, F>;
4684
+ type index_ContextMessageTemplate<T, DepsT> = ContextMessageTemplate<T, DepsT>;
4685
+ type index_ContextPrompt<T = NoVars, DepsT = unknown, F extends AnyFormatInput = undefined> = ContextPrompt<T, DepsT, F>;
4686
+ type index_ContextPromptArgs<T = NoVars, F extends AnyFormatInput = undefined> = ContextPromptArgs<T, F>;
4687
+ type index_ContextResponse<DepsT = unknown, F = unknown> = ContextResponse<DepsT, F>;
4688
+ declare const index_ContextResponse: typeof ContextResponse;
4689
+ type index_ContextResponseInit<DepsT = unknown> = ContextResponseInit<DepsT>;
4690
+ type index_ContextStreamResponse<DepsT = unknown, F = unknown> = ContextStreamResponse<DepsT, F>;
4691
+ declare const index_ContextStreamResponse: typeof ContextStreamResponse;
4692
+ type index_ContextStreamResponseInit<DepsT = unknown> = ContextStreamResponseInit<DepsT>;
4693
+ type index_ContextTemplateFunc<T, DepsT> = ContextTemplateFunc<T, DepsT>;
4694
+ type index_ContextTool<T extends Record<string, unknown> = Record<string, unknown>, DepsT = unknown> = ContextTool<T, DepsT>;
4695
+ type index_ContextToolArgs<T extends Record<string, unknown>, DepsT = unknown> = ContextToolArgs<T, DepsT>;
4696
+ type index_ContextToolFn<T extends Record<string, unknown> = Record<string, unknown>, DepsT = unknown, R extends Jsonable = Jsonable> = ContextToolFn<T, DepsT, R>;
4697
+ type index_ContextToolkit<DepsT = unknown> = ContextToolkit<DepsT>;
4698
+ declare const index_ContextToolkit: typeof ContextToolkit;
4699
+ type index_ContextTools<DepsT = unknown> = ContextTools<DepsT>;
4700
+ type index_DeepPartial<T> = DeepPartial<T>;
4701
+ declare const index_Document: typeof Document;
4702
+ type index_DocumentBase64MimeType = DocumentBase64MimeType;
4703
+ type index_DocumentTextMimeType = DocumentTextMimeType;
4704
+ type index_ExtractDeps<T> = ExtractDeps<T>;
4705
+ type index_ExtractVars<T> = ExtractVars<T>;
4706
+ declare const index_FORMAT_TOOL_NAME: typeof FORMAT_TOOL_NAME;
4707
+ type index_FeatureNotSupportedError = FeatureNotSupportedError;
4708
+ declare const index_FeatureNotSupportedError: typeof FeatureNotSupportedError;
4709
+ type index_FinishReason = FinishReason;
4710
+ type index_FinishReasonChunk = FinishReasonChunk;
4711
+ type index_Format<T = unknown> = Format<T>;
4712
+ type index_FormatSpec<T = unknown> = FormatSpec<T>;
4713
+ type index_FormattingMode = FormattingMode;
4714
+ type index_GoogleModelId = GoogleModelId;
4715
+ declare const index_Image: typeof Image;
4716
+ type index_ImageMimeType = ImageMimeType;
4717
+ type index_InferZod<Z extends ZodLike> = InferZod<Z>;
4718
+ declare const index_JSON_MODE_INSTRUCTIONS: typeof JSON_MODE_INSTRUCTIONS;
4719
+ declare const index_JsonSchemaProperty: typeof JsonSchemaProperty;
4720
+ type index_Jsonable = Jsonable;
4721
+ declare const index_KNOWN_PROVIDER_IDS: typeof KNOWN_PROVIDER_IDS;
4722
+ type index_KnownProviderId = KnownProviderId;
4723
+ type index_Message = Message;
4724
+ type index_MessageTemplate<T> = MessageTemplate<T>;
4725
+ type index_MirascopeError = MirascopeError;
4726
+ declare const index_MirascopeError: typeof MirascopeError;
4727
+ type index_MissingAPIKeyError = MissingAPIKeyError;
4728
+ declare const index_MissingAPIKeyError: typeof MissingAPIKeyError;
4729
+ type index_Model = Model;
4730
+ declare const index_Model: typeof Model;
4731
+ type index_ModelId = ModelId;
4732
+ type index_NoRegisteredProviderError = NoRegisteredProviderError;
4733
+ declare const index_NoRegisteredProviderError: typeof NoRegisteredProviderError;
4734
+ type index_NotFoundError = NotFoundError;
4735
+ declare const index_NotFoundError: typeof NotFoundError;
4736
+ type index_OpenAIModelId = OpenAIModelId;
4737
+ type index_OutputParser<T = unknown> = OutputParser<T>;
4738
+ type index_OutputParserArgs<T> = OutputParserArgs<T>;
4739
+ type index_Params = Params;
4740
+ type index_ParseError = ParseError;
4741
+ declare const index_ParseError: typeof ParseError;
4742
+ type index_PermissionError = PermissionError;
4743
+ declare const index_PermissionError: typeof PermissionError;
4744
+ type index_Prompt<T = NoVars, F extends AnyFormatInput = undefined> = Prompt<T, F>;
4745
+ type index_PromptArgs<T = NoVars, F extends AnyFormatInput = undefined> = PromptArgs<T, F>;
4746
+ type index_Provider = Provider;
4747
+ type index_ProviderError = ProviderError;
4748
+ declare const index_ProviderError: typeof ProviderError;
4749
+ type index_ProviderId = ProviderId;
4750
+ type index_ProviderTool = ProviderTool;
4751
+ declare const index_ProviderTool: typeof ProviderTool;
4752
+ type index_RateLimitError = RateLimitError;
4753
+ declare const index_RateLimitError: typeof RateLimitError;
4754
+ type index_RawMessageChunk = RawMessageChunk;
4755
+ type index_RawStreamEventChunk = RawStreamEventChunk;
4756
+ type index_Response<F = unknown> = Response<F>;
4757
+ declare const index_Response: typeof Response;
4758
+ type index_ResponseInit = ResponseInit;
4759
+ type index_ResponseValidationError = ResponseValidationError;
4760
+ declare const index_ResponseValidationError: typeof ResponseValidationError;
4761
+ type index_RootResponse<F = unknown> = RootResponse<F>;
4762
+ declare const index_RootResponse: typeof RootResponse;
4763
+ type index_ServerError = ServerError;
4764
+ declare const index_ServerError: typeof ServerError;
4765
+ type index_StreamResponse<F = unknown> = StreamResponse<F>;
4766
+ declare const index_StreamResponse: typeof StreamResponse;
4767
+ type index_StreamResponseChunk = StreamResponseChunk;
4768
+ type index_StreamResponseInit = StreamResponseInit;
4769
+ type index_SystemContent = SystemContent;
4770
+ type index_SystemMessage = SystemMessage;
4771
+ declare const index_TOOL_MODE_INSTRUCTIONS: typeof TOOL_MODE_INSTRUCTIONS;
4772
+ declare const index_TOOL_TYPE: typeof TOOL_TYPE;
4773
+ type index_TemplateFunc<T> = TemplateFunc<T>;
4774
+ type index_Text = Text;
4775
+ type index_TextChunk = TextChunk;
4776
+ type index_TextDocumentSource = TextDocumentSource;
4777
+ type index_TextEndChunk = TextEndChunk;
4778
+ type index_TextStartChunk = TextStartChunk;
4779
+ type index_ThinkingConfig = ThinkingConfig;
4780
+ type index_ThinkingLevel = ThinkingLevel;
4781
+ type index_Thought = Thought;
4782
+ type index_ThoughtChunk = ThoughtChunk;
4783
+ type index_ThoughtEndChunk = ThoughtEndChunk;
4784
+ type index_ThoughtStartChunk = ThoughtStartChunk;
4785
+ type index_TimeoutError = TimeoutError;
4786
+ declare const index_TimeoutError: typeof TimeoutError;
4787
+ type index_Tool<T extends Record<string, unknown> = Record<string, unknown>> = Tool<T>;
4788
+ type index_ToolArgs<T extends Record<string, unknown>> = ToolArgs<T>;
4789
+ type index_ToolCall = ToolCall;
4790
+ type index_ToolCallChunk = ToolCallChunk;
4791
+ type index_ToolCallEndChunk = ToolCallEndChunk;
4792
+ type index_ToolCallStartChunk = ToolCallStartChunk;
4793
+ type index_ToolError = ToolError;
4794
+ declare const index_ToolError: typeof ToolError;
4795
+ type index_ToolExecutionError = ToolExecutionError;
4796
+ declare const index_ToolExecutionError: typeof ToolExecutionError;
4797
+ type index_ToolFn<T extends Record<string, unknown> = Record<string, unknown>, R extends Jsonable = Jsonable> = ToolFn<T, R>;
4798
+ type index_ToolNotFoundError = ToolNotFoundError;
4799
+ declare const index_ToolNotFoundError: typeof ToolNotFoundError;
4800
+ declare const index_ToolOutput: typeof ToolOutput;
4801
+ declare const index_ToolParameterSchema: typeof ToolParameterSchema;
4802
+ declare const index_ToolSchema: typeof ToolSchema;
4803
+ type index_Toolkit = Toolkit;
4804
+ declare const index_Toolkit: typeof Toolkit;
4805
+ type index_Tools = Tools;
4806
+ type index_URLDocumentSource = URLDocumentSource;
4807
+ type index_URLImageSource = URLImageSource;
4808
+ type index_UnifiedCall<T, F extends AnyFormatInput = undefined> = UnifiedCall<T, F>;
4809
+ type index_UnifiedPrompt<T, F extends AnyFormatInput = undefined> = UnifiedPrompt<T, F>;
4810
+ type index_Usage = Usage;
4811
+ type index_UsageDeltaChunk = UsageDeltaChunk;
4812
+ type index_UserContent = UserContent;
4813
+ type index_UserContentPart = UserContentPart;
4814
+ type index_UserMessage = UserMessage;
4815
+ type index_WebSearchTool = WebSearchTool;
4816
+ declare const index_WebSearchTool: typeof WebSearchTool;
4817
+ type index_ZodContextToolArgs<Z extends ZodLike, DepsT = unknown> = ZodContextToolArgs<Z, DepsT>;
4818
+ type index_ZodLike = ZodLike;
4819
+ type index_ZodToolArgs<Z extends ZodLike> = ZodToolArgs<Z>;
4820
+ declare const index_createContext: typeof createContext;
4821
+ declare const index_createContextToolkit: typeof createContextToolkit;
4822
+ declare const index_createToolkit: typeof createToolkit;
4823
+ declare const index_createUsage: typeof createUsage;
4824
+ declare const index_defineCall: typeof defineCall;
4825
+ declare const index_defineContextTool: typeof defineContextTool;
4826
+ declare const index_defineFormat: typeof defineFormat;
4827
+ declare const index_defineOutputParser: typeof defineOutputParser;
4828
+ declare const index_definePrompt: typeof definePrompt;
4829
+ declare const index_defineTool: typeof defineTool;
4830
+ declare const index_getProviderForModel: typeof getProviderForModel;
4831
+ declare const index_isContext: typeof isContext;
4832
+ declare const index_isContextTool: typeof isContextTool;
4833
+ declare const index_isFormat: typeof isFormat;
4834
+ declare const index_isOutputParser: typeof isOutputParser;
4835
+ declare const index_isProviderTool: typeof isProviderTool;
4836
+ declare const index_isWebSearchTool: typeof isWebSearchTool;
4837
+ declare const index_isZodLike: typeof isZodLike;
4838
+ declare const index_model: typeof model;
4839
+ declare const index_modelFromContext: typeof modelFromContext;
4840
+ declare const index_registerProvider: typeof registerProvider;
4841
+ declare const index_resetProviderRegistry: typeof resetProviderRegistry;
4842
+ declare const index_resolveFormat: typeof resolveFormat;
4843
+ declare const index_textChunk: typeof textChunk;
4844
+ declare const index_textEnd: typeof textEnd;
4845
+ declare const index_textStart: typeof textStart;
4846
+ declare const index_thoughtChunk: typeof thoughtChunk;
4847
+ declare const index_thoughtEnd: typeof thoughtEnd;
4848
+ declare const index_thoughtStart: typeof thoughtStart;
4849
+ declare const index_toolCallChunk: typeof toolCallChunk;
4850
+ declare const index_toolCallEnd: typeof toolCallEnd;
4851
+ declare const index_toolCallStart: typeof toolCallStart;
4852
+ declare const index_totalTokens: typeof totalTokens;
4853
+ declare const index_useModel: typeof useModel;
4854
+ declare const index_withModel: typeof withModel;
4855
+ declare namespace index {
4856
+ export { index_APIError as APIError, type index_AnthropicModelId as AnthropicModelId, type index_AnyContextTool as AnyContextTool, type index_AnyResponse as AnyResponse, type index_AnyTool as AnyTool, type index_AnyToolFn as AnyToolFn, type index_ApiMode as ApiMode, type index_AssistantContent as AssistantContent, type index_AssistantContentChunk as AssistantContentChunk, type index_AssistantContentPart as AssistantContentPart, type index_AssistantMessage as AssistantMessage, type index_AsyncChunkIterator as AsyncChunkIterator, index_Audio as Audio, type index_AudioMimeType as AudioMimeType, index_AuthenticationError as AuthenticationError, index_BadRequestError as BadRequestError, type index_Base64AudioSource as Base64AudioSource, type index_Base64DocumentSource as Base64DocumentSource, type index_Base64ImageSource as Base64ImageSource, type index_BaseContextTool as BaseContextTool, index_BaseResponse as BaseResponse, type index_BaseResponseInit as BaseResponseInit, type index_BaseTool as BaseTool, index_CONTEXT_MARKER as CONTEXT_MARKER, index_CONTEXT_TOOL_TYPE as CONTEXT_TOOL_TYPE, type index_Call as Call, type index_CallArgs as CallArgs, index_ConnectionError as ConnectionError, type index_ContentPart as ContentPart, type index_Context as Context, type index_ContextCall as ContextCall, type index_ContextCallArgs as ContextCallArgs, type index_ContextMessageTemplate as ContextMessageTemplate, type index_ContextPrompt as ContextPrompt, type index_ContextPromptArgs as ContextPromptArgs, index_ContextResponse as ContextResponse, type index_ContextResponseInit as ContextResponseInit, index_ContextStreamResponse as ContextStreamResponse, type index_ContextStreamResponseInit as ContextStreamResponseInit, type index_ContextTemplateFunc as ContextTemplateFunc, type index_ContextTool as ContextTool, type index_ContextToolArgs as ContextToolArgs, type index_ContextToolFn as ContextToolFn, index_ContextToolkit as ContextToolkit, type index_ContextTools as ContextTools, type index_DeepPartial as DeepPartial, index_Document as Document, type index_DocumentBase64MimeType as DocumentBase64MimeType, type index_DocumentTextMimeType as DocumentTextMimeType, type index_ExtractDeps as ExtractDeps, type index_ExtractVars as ExtractVars, index_FORMAT_TOOL_NAME as FORMAT_TOOL_NAME, index_FeatureNotSupportedError as FeatureNotSupportedError, type index_FinishReason as FinishReason, type index_FinishReasonChunk as FinishReasonChunk, FinishReason as FinishReasonType, type index_Format as Format, type index_FormatSpec as FormatSpec, type index_FormattingMode as FormattingMode, type index_GoogleModelId as GoogleModelId, index_Image as Image, type index_ImageMimeType as ImageMimeType, type index_InferZod as InferZod, index_JSON_MODE_INSTRUCTIONS as JSON_MODE_INSTRUCTIONS, index_JsonSchemaProperty as JsonSchemaProperty, type index_Jsonable as Jsonable, index_KNOWN_PROVIDER_IDS as KNOWN_PROVIDER_IDS, type index_KnownProviderId as KnownProviderId, type index_Message as Message, type index_MessageTemplate as MessageTemplate, index_MirascopeError as MirascopeError, index_MissingAPIKeyError as MissingAPIKeyError, index_Model as Model, type index_ModelId as ModelId, index_NoRegisteredProviderError as NoRegisteredProviderError, index_NotFoundError as NotFoundError, type index_OpenAIModelId as OpenAIModelId, type index_OutputParser as OutputParser, type index_OutputParserArgs as OutputParserArgs, type index_Params as Params, index_ParseError as ParseError, index_PermissionError as PermissionError, type index_Prompt as Prompt, type index_PromptArgs as PromptArgs, type index_Provider as Provider, index_ProviderError as ProviderError, type index_ProviderId as ProviderId, index_ProviderTool as ProviderTool, index_RateLimitError as RateLimitError, type index_RawMessageChunk as RawMessageChunk, type index_RawStreamEventChunk as RawStreamEventChunk, index_Response as Response, type index_ResponseInit as ResponseInit, index_ResponseValidationError as ResponseValidationError, index_RootResponse as RootResponse, index_ServerError as ServerError, index_StreamResponse as StreamResponse, type index_StreamResponseChunk as StreamResponseChunk, type index_StreamResponseInit as StreamResponseInit, type index_SystemContent as SystemContent, type index_SystemMessage as SystemMessage, index_TOOL_MODE_INSTRUCTIONS as TOOL_MODE_INSTRUCTIONS, index_TOOL_TYPE as TOOL_TYPE, type index_TemplateFunc as TemplateFunc, type index_Text as Text, type index_TextChunk as TextChunk, type index_TextDocumentSource as TextDocumentSource, type index_TextEndChunk as TextEndChunk, type index_TextStartChunk as TextStartChunk, type index_ThinkingConfig as ThinkingConfig, type index_ThinkingLevel as ThinkingLevel, type index_Thought as Thought, type index_ThoughtChunk as ThoughtChunk, type index_ThoughtEndChunk as ThoughtEndChunk, type index_ThoughtStartChunk as ThoughtStartChunk, index_TimeoutError as TimeoutError, type index_Tool as Tool, type index_ToolArgs as ToolArgs, type index_ToolCall as ToolCall, type index_ToolCallChunk as ToolCallChunk, type index_ToolCallEndChunk as ToolCallEndChunk, type index_ToolCallStartChunk as ToolCallStartChunk, index_ToolError as ToolError, index_ToolExecutionError as ToolExecutionError, type index_ToolFn as ToolFn, index_ToolNotFoundError as ToolNotFoundError, index_ToolOutput as ToolOutput, index_ToolParameterSchema as ToolParameterSchema, index_ToolSchema as ToolSchema, index_Toolkit as Toolkit, type index_Tools as Tools, type index_URLDocumentSource as URLDocumentSource, type index_URLImageSource as URLImageSource, type index_UnifiedCall as UnifiedCall, type index_UnifiedPrompt as UnifiedPrompt, type index_Usage as Usage, type index_UsageDeltaChunk as UsageDeltaChunk, type index_UserContent as UserContent, type index_UserContentPart as UserContentPart, type index_UserMessage as UserMessage, index_WebSearchTool as WebSearchTool, type index_ZodContextToolArgs as ZodContextToolArgs, type index_ZodLike as ZodLike, type index_ZodToolArgs as ZodToolArgs, index_createContext as createContext, index_createContextToolkit as createContextToolkit, index_createToolkit as createToolkit, index_createUsage as createUsage, index_defineCall as defineCall, index_defineContextTool as defineContextTool, index_defineFormat as defineFormat, index_defineOutputParser as defineOutputParser, index_definePrompt as definePrompt, index_defineTool as defineTool, index_getProviderForModel as getProviderForModel, index_isContext as isContext, index_isContextTool as isContextTool, index_isFormat as isFormat, index_isOutputParser as isOutputParser, index_isProviderTool as isProviderTool, index_isWebSearchTool as isWebSearchTool, index_isZodLike as isZodLike, index$1 as messages, index_model as model, index_modelFromContext as modelFromContext, index_registerProvider as registerProvider, index_resetProviderRegistry as resetProviderRegistry, index_resolveFormat as resolveFormat, index_textChunk as textChunk, index_textEnd as textEnd, index_textStart as textStart, index_thoughtChunk as thoughtChunk, index_thoughtEnd as thoughtEnd, index_thoughtStart as thoughtStart, index_toolCallChunk as toolCallChunk, index_toolCallEnd as toolCallEnd, index_toolCallStart as toolCallStart, index_totalTokens as totalTokens, index_useModel as useModel, index_withModel as withModel };
4857
+ }
4858
+
4859
+ export { index as llm };