@providerprotocol/ai 0.0.17 → 0.0.19

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (45) hide show
  1. package/README.md +294 -114
  2. package/dist/anthropic/index.d.ts +1 -1
  3. package/dist/anthropic/index.js +5 -3
  4. package/dist/anthropic/index.js.map +1 -1
  5. package/dist/{chunk-MOU4U3PO.js → chunk-5FEAOEXV.js} +4 -68
  6. package/dist/chunk-5FEAOEXV.js.map +1 -0
  7. package/dist/chunk-DZQHVGNV.js +71 -0
  8. package/dist/chunk-DZQHVGNV.js.map +1 -0
  9. package/dist/chunk-SKY2JLA7.js +59 -0
  10. package/dist/chunk-SKY2JLA7.js.map +1 -0
  11. package/dist/{chunk-SVYROCLD.js → chunk-UMKWXGO3.js} +1 -1
  12. package/dist/chunk-UMKWXGO3.js.map +1 -0
  13. package/dist/chunk-WAKD3OO5.js +224 -0
  14. package/dist/chunk-WAKD3OO5.js.map +1 -0
  15. package/dist/content-DEl3z_W2.d.ts +276 -0
  16. package/dist/google/index.d.ts +3 -1
  17. package/dist/google/index.js +122 -4
  18. package/dist/google/index.js.map +1 -1
  19. package/dist/http/index.d.ts +2 -2
  20. package/dist/http/index.js +2 -1
  21. package/dist/image-Dhq-Yuq4.d.ts +456 -0
  22. package/dist/index.d.ts +59 -1460
  23. package/dist/index.js +89 -267
  24. package/dist/index.js.map +1 -1
  25. package/dist/ollama/index.d.ts +1 -1
  26. package/dist/ollama/index.js +5 -3
  27. package/dist/ollama/index.js.map +1 -1
  28. package/dist/openai/index.d.ts +47 -20
  29. package/dist/openai/index.js +309 -4
  30. package/dist/openai/index.js.map +1 -1
  31. package/dist/openrouter/index.d.ts +1 -1
  32. package/dist/openrouter/index.js +5 -3
  33. package/dist/openrouter/index.js.map +1 -1
  34. package/dist/{provider-D5MO3-pS.d.ts → provider-BBMBZuGn.d.ts} +11 -11
  35. package/dist/proxy/index.d.ts +652 -0
  36. package/dist/proxy/index.js +565 -0
  37. package/dist/proxy/index.js.map +1 -0
  38. package/dist/{retry-DZ4Sqmxp.d.ts → retry-DR7YRJDz.d.ts} +1 -1
  39. package/dist/stream-DRHy6q1a.d.ts +1013 -0
  40. package/dist/xai/index.d.ts +29 -1
  41. package/dist/xai/index.js +118 -4
  42. package/dist/xai/index.js.map +1 -1
  43. package/package.json +6 -1
  44. package/dist/chunk-MOU4U3PO.js.map +0 -1
  45. package/dist/chunk-SVYROCLD.js.map +0 -1
package/dist/index.d.ts CHANGED
@@ -1,1274 +1,12 @@
1
- import { P as ProviderConfig, L as LLMProvider, E as EmbeddingInput, a as EmbeddingUsage, B as BoundEmbeddingModel, b as LLMHandler$1, c as EmbeddingHandler, I as ImageHandler, d as Provider } from './provider-D5MO3-pS.js';
2
- export { i as BoundImageModel, g as EmbeddingProvider, j as EmbeddingRequest, k as EmbeddingResponse, l as EmbeddingVector, e as ErrorCode, h as ImageProvider, K as KeyStrategy, M as Modality, f as ModelReference, R as RetryStrategy, U as UPPError } from './provider-D5MO3-pS.js';
3
- export { D as DynamicKey, E as ExponentialBackoff, L as LinearBackoff, N as NoRetry, a as RetryAfterStrategy, R as RoundRobinKeys, T as TokenBucket, W as WeightedKeys } from './retry-DZ4Sqmxp.js';
4
-
5
- /**
6
- * @fileoverview Content block types for multimodal messages.
7
- *
8
- * Defines the various content block types that can be included in
9
- * user and assistant messages, supporting text, images, audio, video,
10
- * and arbitrary binary data.
11
- *
12
- * @module types/content
13
- */
14
- /**
15
- * Image source variants for ImageBlock.
16
- *
17
- * Images can be provided as base64-encoded strings, URLs, or raw bytes.
18
- *
19
- * @example
20
- * ```typescript
21
- * // Base64 encoded image
22
- * const base64Source: ImageSource = {
23
- * type: 'base64',
24
- * data: 'iVBORw0KGgo...'
25
- * };
26
- *
27
- * // URL reference
28
- * const urlSource: ImageSource = {
29
- * type: 'url',
30
- * url: 'https://example.com/image.png'
31
- * };
32
- *
33
- * // Raw bytes
34
- * const bytesSource: ImageSource = {
35
- * type: 'bytes',
36
- * data: new Uint8Array([...])
37
- * };
38
- * ```
39
- */
40
- type ImageSource = {
41
- type: 'base64';
42
- data: string;
43
- } | {
44
- type: 'url';
45
- url: string;
46
- } | {
47
- type: 'bytes';
48
- data: Uint8Array;
49
- };
50
- /**
51
- * Text content block.
52
- *
53
- * The most common content block type, containing plain text content.
54
- *
55
- * @example
56
- * ```typescript
57
- * const textBlock: TextBlock = {
58
- * type: 'text',
59
- * text: 'Hello, world!'
60
- * };
61
- * ```
62
- */
63
- interface TextBlock {
64
- /** Discriminator for text blocks */
65
- type: 'text';
66
- /** The text content */
67
- text: string;
68
- }
69
- /**
70
- * Image content block.
71
- *
72
- * Contains an image with its source data and metadata.
73
- *
74
- * @example
75
- * ```typescript
76
- * const imageBlock: ImageBlock = {
77
- * type: 'image',
78
- * source: { type: 'url', url: 'https://example.com/photo.jpg' },
79
- * mimeType: 'image/jpeg',
80
- * width: 1920,
81
- * height: 1080
82
- * };
83
- * ```
84
- */
85
- interface ImageBlock {
86
- /** Discriminator for image blocks */
87
- type: 'image';
88
- /** The image data source */
89
- source: ImageSource;
90
- /** MIME type of the image (e.g., 'image/png', 'image/jpeg') */
91
- mimeType: string;
92
- /** Image width in pixels */
93
- width?: number;
94
- /** Image height in pixels */
95
- height?: number;
96
- }
97
- /**
98
- * Audio content block.
99
- *
100
- * Contains audio data with its metadata.
101
- *
102
- * @example
103
- * ```typescript
104
- * const audioBlock: AudioBlock = {
105
- * type: 'audio',
106
- * data: audioBytes,
107
- * mimeType: 'audio/mp3',
108
- * duration: 120.5
109
- * };
110
- * ```
111
- */
112
- interface AudioBlock {
113
- /** Discriminator for audio blocks */
114
- type: 'audio';
115
- /** Raw audio data */
116
- data: Uint8Array;
117
- /** MIME type of the audio (e.g., 'audio/mp3', 'audio/wav') */
118
- mimeType: string;
119
- /** Duration in seconds */
120
- duration?: number;
121
- }
122
- /**
123
- * Video content block.
124
- *
125
- * Contains video data with its metadata.
126
- *
127
- * @example
128
- * ```typescript
129
- * const videoBlock: VideoBlock = {
130
- * type: 'video',
131
- * data: videoBytes,
132
- * mimeType: 'video/mp4',
133
- * duration: 30,
134
- * width: 1920,
135
- * height: 1080
136
- * };
137
- * ```
138
- */
139
- interface VideoBlock {
140
- /** Discriminator for video blocks */
141
- type: 'video';
142
- /** Raw video data */
143
- data: Uint8Array;
144
- /** MIME type of the video (e.g., 'video/mp4', 'video/webm') */
145
- mimeType: string;
146
- /** Duration in seconds */
147
- duration?: number;
148
- /** Video width in pixels */
149
- width?: number;
150
- /** Video height in pixels */
151
- height?: number;
152
- }
153
- /**
154
- * Binary content block for arbitrary data.
155
- *
156
- * A generic block type for data that doesn't fit other categories.
157
- *
158
- * @example
159
- * ```typescript
160
- * const binaryBlock: BinaryBlock = {
161
- * type: 'binary',
162
- * data: pdfBytes,
163
- * mimeType: 'application/pdf',
164
- * metadata: { filename: 'document.pdf', pages: 10 }
165
- * };
166
- * ```
167
- */
168
- interface BinaryBlock {
169
- /** Discriminator for binary blocks */
170
- type: 'binary';
171
- /** Raw binary data */
172
- data: Uint8Array;
173
- /** MIME type of the data */
174
- mimeType: string;
175
- /** Additional metadata about the binary content */
176
- metadata?: Record<string, unknown>;
177
- }
178
- /**
179
- * Union of all content block types.
180
- *
181
- * Used when a function or property can accept any type of content block.
182
- */
183
- type ContentBlock = TextBlock | ImageBlock | AudioBlock | VideoBlock | BinaryBlock;
184
- /**
185
- * Content types allowed in user messages.
186
- *
187
- * Users can send any type of content block including binary data.
188
- */
189
- type UserContent = TextBlock | ImageBlock | AudioBlock | VideoBlock | BinaryBlock;
190
- /**
191
- * Content types allowed in assistant messages.
192
- *
193
- * Assistants can generate text and media but not arbitrary binary data.
194
- */
195
- type AssistantContent = TextBlock | ImageBlock | AudioBlock | VideoBlock;
196
- /**
197
- * Creates a text content block from a string.
198
- *
199
- * @param content - The text content
200
- * @returns A TextBlock containing the provided text
201
- *
202
- * @example
203
- * ```typescript
204
- * const block = text('Hello, world!');
205
- * // { type: 'text', text: 'Hello, world!' }
206
- * ```
207
- */
208
- declare function text(content: string): TextBlock;
209
- /**
210
- * Type guard for TextBlock.
211
- *
212
- * @param block - The content block to check
213
- * @returns True if the block is a TextBlock
214
- *
215
- * @example
216
- * ```typescript
217
- * if (isTextBlock(block)) {
218
- * console.log(block.text);
219
- * }
220
- * ```
221
- */
222
- declare function isTextBlock(block: ContentBlock): block is TextBlock;
223
- /**
224
- * Type guard for ImageBlock.
225
- *
226
- * @param block - The content block to check
227
- * @returns True if the block is an ImageBlock
228
- *
229
- * @example
230
- * ```typescript
231
- * if (isImageBlock(block)) {
232
- * console.log(block.mimeType, block.width, block.height);
233
- * }
234
- * ```
235
- */
236
- declare function isImageBlock(block: ContentBlock): block is ImageBlock;
237
- /**
238
- * Type guard for AudioBlock.
239
- *
240
- * @param block - The content block to check
241
- * @returns True if the block is an AudioBlock
242
- *
243
- * @example
244
- * ```typescript
245
- * if (isAudioBlock(block)) {
246
- * console.log(block.mimeType, block.duration);
247
- * }
248
- * ```
249
- */
250
- declare function isAudioBlock(block: ContentBlock): block is AudioBlock;
251
- /**
252
- * Type guard for VideoBlock.
253
- *
254
- * @param block - The content block to check
255
- * @returns True if the block is a VideoBlock
256
- *
257
- * @example
258
- * ```typescript
259
- * if (isVideoBlock(block)) {
260
- * console.log(block.mimeType, block.duration);
261
- * }
262
- * ```
263
- */
264
- declare function isVideoBlock(block: ContentBlock): block is VideoBlock;
265
- /**
266
- * Type guard for BinaryBlock.
267
- *
268
- * @param block - The content block to check
269
- * @returns True if the block is a BinaryBlock
270
- *
271
- * @example
272
- * ```typescript
273
- * if (isBinaryBlock(block)) {
274
- * console.log(block.mimeType, block.metadata);
275
- * }
276
- * ```
277
- */
278
- declare function isBinaryBlock(block: ContentBlock): block is BinaryBlock;
279
-
280
- /**
281
- * @fileoverview JSON Schema types for tool parameters and structured outputs.
282
- *
283
- * Provides TypeScript interfaces for defining JSON Schema objects used in
284
- * LLM tool definitions and structured output specifications.
285
- *
286
- * @module types/schema
287
- */
288
- /**
289
- * Primitive and composite JSON Schema property types.
290
- *
291
- * These types correspond to the JSON Schema specification's allowed type values.
292
- */
293
- type JSONSchemaPropertyType =
294
- /** String values */
295
- 'string'
296
- /** Floating point numbers */
297
- | 'number'
298
- /** Whole numbers */
299
- | 'integer'
300
- /** Boolean true/false values */
301
- | 'boolean'
302
- /** Ordered lists of values */
303
- | 'array'
304
- /** Key-value mappings */
305
- | 'object'
306
- /** Explicit null value */
307
- | 'null';
308
- /**
309
- * JSON Schema property definition.
310
- *
311
- * Describes a single property within a JSON Schema object, including
312
- * type constraints, validation rules, and nested structure definitions.
313
- *
314
- * @example
315
- * ```typescript
316
- * const nameProperty: JSONSchemaProperty = {
317
- * type: 'string',
318
- * description: 'User name',
319
- * minLength: 1,
320
- * maxLength: 100
321
- * };
322
- * ```
323
- *
324
- * @example
325
- * ```typescript
326
- * const tagsProperty: JSONSchemaProperty = {
327
- * type: 'array',
328
- * description: 'List of tags',
329
- * items: { type: 'string' },
330
- * minItems: 1,
331
- * uniqueItems: true
332
- * };
333
- * ```
334
- */
335
- interface JSONSchemaProperty {
336
- /** The JSON type of this property */
337
- type: JSONSchemaPropertyType;
338
- /** Human-readable description for the LLM */
339
- description?: string;
340
- /** Allowed values (enumeration) */
341
- enum?: unknown[];
342
- /** Constant value this property must equal */
343
- const?: unknown;
344
- /** Default value if not provided */
345
- default?: unknown;
346
- /** Minimum string length (string type only) */
347
- minLength?: number;
348
- /** Maximum string length (string type only) */
349
- maxLength?: number;
350
- /** Regular expression pattern for validation (string type only) */
351
- pattern?: string;
352
- /** Semantic format hint (string type only) */
353
- format?: 'email' | 'uri' | 'date' | 'date-time' | 'uuid';
354
- /** Minimum value inclusive (number/integer types only) */
355
- minimum?: number;
356
- /** Maximum value inclusive (number/integer types only) */
357
- maximum?: number;
358
- /** Minimum value exclusive (number/integer types only) */
359
- exclusiveMinimum?: number;
360
- /** Maximum value exclusive (number/integer types only) */
361
- exclusiveMaximum?: number;
362
- /** Value must be divisible by this (number/integer types only) */
363
- multipleOf?: number;
364
- /** Schema for array elements (array type only) */
365
- items?: JSONSchemaProperty;
366
- /** Minimum array length (array type only) */
367
- minItems?: number;
368
- /** Maximum array length (array type only) */
369
- maxItems?: number;
370
- /** Whether array elements must be unique (array type only) */
371
- uniqueItems?: boolean;
372
- /** Nested property definitions (object type only) */
373
- properties?: Record<string, JSONSchemaProperty>;
374
- /** List of required property names (object type only) */
375
- required?: string[];
376
- /** Whether additional properties are allowed (object type only) */
377
- additionalProperties?: boolean;
378
- }
379
- /**
380
- * Root JSON Schema for tool parameters or structured outputs.
381
- *
382
- * This is the top-level schema definition used when defining tool
383
- * parameters or requesting structured output from an LLM.
384
- *
385
- * @example
386
- * ```typescript
387
- * const weatherToolSchema: JSONSchema = {
388
- * type: 'object',
389
- * description: 'Parameters for getting weather information',
390
- * properties: {
391
- * location: {
392
- * type: 'string',
393
- * description: 'City name or coordinates'
394
- * },
395
- * units: {
396
- * type: 'string',
397
- * enum: ['celsius', 'fahrenheit'],
398
- * description: 'Temperature units'
399
- * }
400
- * },
401
- * required: ['location']
402
- * };
403
- * ```
404
- */
405
- interface JSONSchema {
406
- /** Root schemas are always objects */
407
- type: 'object';
408
- /** Property definitions for the object */
409
- properties: Record<string, JSONSchemaProperty>;
410
- /** List of required property names */
411
- required?: string[];
412
- /** Whether additional properties are allowed beyond those defined */
413
- additionalProperties?: boolean;
414
- /** Human-readable description of the schema's purpose */
415
- description?: string;
416
- }
417
-
418
- /**
419
- * @fileoverview Tool types for LLM function calling.
420
- *
421
- * Defines the interfaces for registering tools with LLMs, handling
422
- * tool calls from the model, and managing tool execution strategies.
423
- *
424
- * @module types/tool
425
- */
426
-
427
- /**
428
- * Provider-namespaced metadata for tools.
429
- *
430
- * Each provider can attach its own metadata under its namespace,
431
- * enabling provider-specific features like caching, strict mode, etc.
432
- *
433
- * @example
434
- * ```typescript
435
- * const metadata: ToolMetadata = {
436
- * anthropic: { cache_control: { type: 'ephemeral' } },
437
- * openrouter: { cache_control: { type: 'ephemeral', ttl: '1h' } }
438
- * };
439
- * ```
440
- */
441
- interface ToolMetadata {
442
- [provider: string]: Record<string, unknown> | undefined;
443
- }
444
- /**
445
- * Tool call requested by the model.
446
- *
447
- * Represents a single function call request from the LLM, including
448
- * the tool name and parsed arguments.
449
- *
450
- * @example
451
- * ```typescript
452
- * const toolCall: ToolCall = {
453
- * toolCallId: 'call_abc123',
454
- * toolName: 'get_weather',
455
- * arguments: { location: 'San Francisco', units: 'celsius' }
456
- * };
457
- * ```
458
- */
459
- interface ToolCall {
460
- /** Unique identifier for this tool call, used to match results */
461
- toolCallId: string;
462
- /** Name of the tool being called */
463
- toolName: string;
464
- /** Parsed arguments for the tool call */
465
- arguments: Record<string, unknown>;
466
- }
467
- /**
468
- * Result of tool execution.
469
- *
470
- * Returned after executing a tool, containing the result data
471
- * and whether an error occurred.
472
- *
473
- * @example
474
- * ```typescript
475
- * const result: ToolResult = {
476
- * toolCallId: 'call_abc123',
477
- * result: { temperature: 72, conditions: 'sunny' }
478
- * };
479
- *
480
- * // Error result
481
- * const errorResult: ToolResult = {
482
- * toolCallId: 'call_abc123',
483
- * result: 'Location not found',
484
- * isError: true
485
- * };
486
- * ```
487
- */
488
- interface ToolResult {
489
- /** The tool call ID this result corresponds to */
490
- toolCallId: string;
491
- /** The result data (can be any serializable value) */
492
- result: unknown;
493
- /** Whether the tool execution resulted in an error */
494
- isError?: boolean;
495
- }
496
- /**
497
- * Tool definition for LLM function calling.
498
- *
499
- * Defines a tool that can be called by the LLM, including its
500
- * name, description, parameter schema, and execution function.
501
- *
502
- * @typeParam TParams - The type of parameters the tool accepts
503
- * @typeParam TResult - The type of result the tool returns
504
- *
505
- * @example
506
- * ```typescript
507
- * const weatherTool: Tool<{ location: string }, WeatherData> = {
508
- * name: 'get_weather',
509
- * description: 'Get current weather for a location',
510
- * parameters: {
511
- * type: 'object',
512
- * properties: {
513
- * location: { type: 'string', description: 'City name' }
514
- * },
515
- * required: ['location']
516
- * },
517
- * run: async (params) => {
518
- * return fetchWeather(params.location);
519
- * }
520
- * };
521
- * ```
522
- */
523
- interface Tool<TParams = unknown, TResult = unknown> {
524
- /** Tool name (must be unique within an llm() instance) */
525
- name: string;
526
- /** Human-readable description for the model to understand when to use this tool */
527
- description: string;
528
- /** JSON Schema defining the tool's parameters */
529
- parameters: JSONSchema;
530
- /**
531
- * Provider-specific metadata, namespaced by provider name.
532
- *
533
- * Used for provider-specific features like prompt caching:
534
- * @example
535
- * ```typescript
536
- * const tool: Tool = {
537
- * name: 'search_docs',
538
- * description: 'Search documentation',
539
- * parameters: {...},
540
- * run: async (params) => {...},
541
- * metadata: {
542
- * anthropic: { cache_control: { type: 'ephemeral' } }
543
- * }
544
- * };
545
- * ```
546
- */
547
- metadata?: ToolMetadata;
548
- /**
549
- * Executes the tool with the provided parameters.
550
- *
551
- * @param params - The parameters passed by the model
552
- * @returns The tool result, synchronously or as a Promise
553
- */
554
- run(params: TParams): TResult | Promise<TResult>;
555
- /**
556
- * Optional approval handler for sensitive operations.
557
- *
558
- * If provided, this function is called before the tool executes.
559
- * Return false to prevent execution.
560
- *
561
- * @param params - The parameters the tool would be called with
562
- * @returns Whether to approve the execution
563
- */
564
- approval?(params: TParams): boolean | Promise<boolean>;
565
- }
566
- /**
567
- * Result from onBeforeCall hook indicating whether to proceed and optionally transformed params.
568
- */
569
- interface BeforeCallResult {
570
- /** Whether to proceed with tool execution */
571
- proceed: boolean;
572
- /** Transformed parameters to use instead of the original (optional) */
573
- params?: unknown;
574
- }
575
- /**
576
- * Result from onAfterCall hook optionally containing a transformed result.
577
- */
578
- interface AfterCallResult {
579
- /** Transformed result to use instead of the original */
580
- result: unknown;
581
- }
582
- /**
583
- * Strategy for controlling tool execution behavior.
584
- *
585
- * Provides hooks for monitoring, controlling, and transforming the tool execution
586
- * loop during LLM inference.
587
- *
588
- * @example
589
- * ```typescript
590
- * const strategy: ToolUseStrategy = {
591
- * maxIterations: 5,
592
- * onToolCall: (tool, params) => {
593
- * console.log(`Calling ${tool.name} with`, params);
594
- * },
595
- * // Transform input parameters
596
- * onBeforeCall: (tool, params) => {
597
- * if (tool.name === 'search') {
598
- * return { proceed: true, params: { ...params, limit: 10 } };
599
- * }
600
- * return true;
601
- * },
602
- * // Transform output results
603
- * onAfterCall: (tool, params, result) => {
604
- * if (tool.name === 'fetch_data') {
605
- * return { result: sanitize(result) };
606
- * }
607
- * },
608
- * onMaxIterations: (iterations) => {
609
- * console.warn(`Reached max iterations: ${iterations}`);
610
- * }
611
- * };
612
- * ```
613
- */
614
- interface ToolUseStrategy {
615
- /** Maximum number of tool execution rounds (default: 10) */
616
- maxIterations?: number;
617
- /**
618
- * Called when the model requests a tool call.
619
- *
620
- * @param tool - The tool being called
621
- * @param params - The parameters for the call
622
- */
623
- onToolCall?(tool: Tool, params: unknown): void | Promise<void>;
624
- /**
625
- * Called before tool execution. Can skip execution or transform parameters.
626
- *
627
- * @param tool - The tool about to be executed
628
- * @param params - The parameters for the call
629
- * @returns One of:
630
- * - `false` to skip execution
631
- * - `true` to proceed with original params
632
- * - `BeforeCallResult` object to control execution and optionally transform params
633
- */
634
- onBeforeCall?(tool: Tool, params: unknown): boolean | BeforeCallResult | Promise<boolean | BeforeCallResult>;
635
- /**
636
- * Called after tool execution completes. Can transform the result.
637
- *
638
- * @param tool - The tool that was executed
639
- * @param params - The parameters that were used
640
- * @param result - The result from the tool
641
- * @returns Void to use original result, or `AfterCallResult` to transform it
642
- */
643
- onAfterCall?(tool: Tool, params: unknown, result: unknown): void | AfterCallResult | Promise<void | AfterCallResult>;
644
- /**
645
- * Called when a tool execution throws an error.
646
- *
647
- * @param tool - The tool that failed
648
- * @param params - The parameters that were used
649
- * @param error - The error that was thrown
650
- */
651
- onError?(tool: Tool, params: unknown, error: Error): void | Promise<void>;
652
- /**
653
- * Called when the maximum iteration limit is reached.
654
- *
655
- * @param iterations - The number of iterations that were performed
656
- */
657
- onMaxIterations?(iterations: number): void | Promise<void>;
658
- }
659
- /**
660
- * Record of a completed tool execution.
661
- *
662
- * Contains all information about a tool call that was executed,
663
- * including timing and result data.
664
- *
665
- * @example
666
- * ```typescript
667
- * const execution: ToolExecution = {
668
- * toolName: 'get_weather',
669
- * toolCallId: 'call_abc123',
670
- * arguments: { location: 'San Francisco' },
671
- * result: { temperature: 72 },
672
- * isError: false,
673
- * duration: 150,
674
- * approved: true
675
- * };
676
- * ```
677
- */
678
- interface ToolExecution {
679
- /** Name of the tool that was called */
680
- toolName: string;
681
- /** Unique identifier for this tool call */
682
- toolCallId: string;
683
- /** Arguments that were passed to the tool */
684
- arguments: Record<string, unknown>;
685
- /** Result returned by the tool */
686
- result: unknown;
687
- /** Whether the tool execution resulted in an error */
688
- isError: boolean;
689
- /** Execution duration in milliseconds */
690
- duration: number;
691
- /** Whether approval was required and granted (undefined if no approval handler) */
692
- approved?: boolean;
693
- }
694
-
695
- /**
696
- * @fileoverview Message types for conversation history.
697
- *
698
- * Defines the message classes used to represent conversation turns
699
- * between users and assistants, including support for multimodal
700
- * content and tool calls.
701
- *
702
- * @module types/messages
703
- */
704
-
705
- /**
706
- * Message type discriminator.
707
- *
708
- * Used to distinguish between different message types in a conversation.
709
- */
710
- type MessageType = 'user' | 'assistant' | 'tool_result';
711
- /**
712
- * Provider-namespaced metadata for messages.
713
- *
714
- * Each provider can attach its own metadata under its namespace,
715
- * preventing conflicts between different providers.
716
- *
717
- * @example
718
- * ```typescript
719
- * const metadata: MessageMetadata = {
720
- * openai: { model: 'gpt-4', finishReason: 'stop' },
721
- * anthropic: { model: 'claude-3', stopReason: 'end_turn' }
722
- * };
723
- * ```
724
- */
725
- interface MessageMetadata {
726
- [provider: string]: Record<string, unknown> | undefined;
727
- }
728
- /**
729
- * Options for constructing messages.
730
- */
731
- interface MessageOptions {
732
- /** Custom message ID (auto-generated if not provided) */
733
- id?: string;
734
- /** Provider-specific metadata */
735
- metadata?: MessageMetadata;
736
- }
737
- /**
738
- * Abstract base class for all message types.
739
- *
740
- * Provides common functionality for user, assistant, and tool result
741
- * messages, including content accessors and metadata handling.
742
- *
743
- * @example
744
- * ```typescript
745
- * // Access text content from any message
746
- * const text = message.text;
747
- *
748
- * // Access images
749
- * const images = message.images;
750
- * ```
751
- */
752
- declare abstract class Message {
753
- /** Unique message identifier */
754
- readonly id: string;
755
- /** Timestamp when the message was created */
756
- readonly timestamp: Date;
757
- /** Provider-specific metadata, namespaced by provider name */
758
- readonly metadata?: MessageMetadata;
759
- /** Message type discriminator (implemented by subclasses) */
760
- abstract readonly type: MessageType;
761
- /**
762
- * Returns the content blocks for this message.
763
- * Implemented by subclasses to provide type-specific content.
764
- */
765
- protected abstract getContent(): ContentBlock[];
766
- /**
767
- * Creates a new message instance.
768
- *
769
- * @param options - Optional message ID and metadata
770
- */
771
- constructor(options?: MessageOptions);
772
- /**
773
- * Concatenated text content from all text blocks.
774
- * Blocks are joined with double newlines.
775
- */
776
- get text(): string;
777
- /**
778
- * All image content blocks in this message.
779
- */
780
- get images(): ImageBlock[];
781
- /**
782
- * All audio content blocks in this message.
783
- */
784
- get audio(): AudioBlock[];
785
- /**
786
- * All video content blocks in this message.
787
- */
788
- get video(): VideoBlock[];
789
- }
790
- /**
791
- * User input message.
792
- *
793
- * Represents a message from the user, which can contain text and/or
794
- * multimodal content like images, audio, or video.
795
- *
796
- * @example
797
- * ```typescript
798
- * // Simple text message
799
- * const msg = new UserMessage('Hello, world!');
800
- *
801
- * // Multimodal message
802
- * const msg = new UserMessage([
803
- * { type: 'text', text: 'What is in this image?' },
804
- * { type: 'image', source: { type: 'url', url: '...' }, mimeType: 'image/png' }
805
- * ]);
806
- * ```
807
- */
808
- declare class UserMessage extends Message {
809
- /** Message type discriminator */
810
- readonly type: "user";
811
- /** Content blocks in this message */
812
- readonly content: UserContent[];
813
- /**
814
- * Creates a new user message.
815
- *
816
- * @param content - String (converted to TextBlock) or array of content blocks
817
- * @param options - Optional message ID and metadata
818
- */
819
- constructor(content: string | UserContent[], options?: MessageOptions);
820
- protected getContent(): ContentBlock[];
821
- }
822
- /**
823
- * Assistant response message.
824
- *
825
- * Represents a response from the AI assistant, which may contain
826
- * text, media content, and/or tool call requests.
827
- *
828
- * @example
829
- * ```typescript
830
- * // Simple text response
831
- * const msg = new AssistantMessage('Hello! How can I help?');
832
- *
833
- * // Response with tool calls
834
- * const msg = new AssistantMessage(
835
- * 'Let me check the weather...',
836
- * [{ toolCallId: 'call_1', toolName: 'get_weather', arguments: { location: 'NYC' } }]
837
- * );
838
- * ```
839
- */
840
- declare class AssistantMessage extends Message {
841
- /** Message type discriminator */
842
- readonly type: "assistant";
843
- /** Content blocks in this message */
844
- readonly content: AssistantContent[];
845
- /** Tool calls requested by the model (if any) */
846
- readonly toolCalls?: ToolCall[];
847
- /**
848
- * Creates a new assistant message.
849
- *
850
- * @param content - String (converted to TextBlock) or array of content blocks
851
- * @param toolCalls - Tool calls requested by the model
852
- * @param options - Optional message ID and metadata
853
- */
854
- constructor(content: string | AssistantContent[], toolCalls?: ToolCall[], options?: MessageOptions);
855
- protected getContent(): ContentBlock[];
856
- /**
857
- * Whether this message contains tool call requests.
858
- */
859
- get hasToolCalls(): boolean;
860
- }
861
- /**
862
- * Tool execution result message.
863
- *
864
- * Contains the results of executing one or more tool calls,
865
- * sent back to the model for further processing.
866
- *
867
- * @example
868
- * ```typescript
869
- * const msg = new ToolResultMessage([
870
- * { toolCallId: 'call_1', result: { temperature: 72, conditions: 'sunny' } },
871
- * { toolCallId: 'call_2', result: 'File not found', isError: true }
872
- * ]);
873
- * ```
874
- */
875
- declare class ToolResultMessage extends Message {
876
- /** Message type discriminator */
877
- readonly type: "tool_result";
878
- /** Results from tool executions */
879
- readonly results: ToolResult[];
880
- /**
881
- * Creates a new tool result message.
882
- *
883
- * @param results - Array of tool execution results
884
- * @param options - Optional message ID and metadata
885
- */
886
- constructor(results: ToolResult[], options?: MessageOptions);
887
- protected getContent(): ContentBlock[];
888
- }
889
- /**
890
- * Type guard for UserMessage.
891
- *
892
- * @param msg - The message to check
893
- * @returns True if the message is a UserMessage
894
- *
895
- * @example
896
- * ```typescript
897
- * if (isUserMessage(msg)) {
898
- * console.log('User said:', msg.text);
899
- * }
900
- * ```
901
- */
902
- declare function isUserMessage(msg: Message): msg is UserMessage;
903
- /**
904
- * Type guard for AssistantMessage.
905
- *
906
- * @param msg - The message to check
907
- * @returns True if the message is an AssistantMessage
908
- *
909
- * @example
910
- * ```typescript
911
- * if (isAssistantMessage(msg)) {
912
- * console.log('Assistant said:', msg.text);
913
- * if (msg.hasToolCalls) {
914
- * console.log('Tool calls:', msg.toolCalls);
915
- * }
916
- * }
917
- * ```
918
- */
919
- declare function isAssistantMessage(msg: Message): msg is AssistantMessage;
920
- /**
921
- * Type guard for ToolResultMessage.
922
- *
923
- * @param msg - The message to check
924
- * @returns True if the message is a ToolResultMessage
925
- *
926
- * @example
927
- * ```typescript
928
- * if (isToolResultMessage(msg)) {
929
- * for (const result of msg.results) {
930
- * console.log(`Tool ${result.toolCallId}:`, result.result);
931
- * }
932
- * }
933
- * ```
934
- */
935
- declare function isToolResultMessage(msg: Message): msg is ToolResultMessage;
936
-
937
- /**
938
- * @fileoverview Turn types for inference results.
939
- *
940
- * A Turn represents the complete result of one inference call, including
941
- * all messages produced during tool execution loops, token usage, and
942
- * optional structured output data.
943
- *
944
- * @module types/turn
945
- */
946
-
947
- /**
948
- * Token usage information for an inference request.
949
- *
950
- * Tracks input and output tokens across all inference cycles,
951
- * with optional per-cycle breakdown and cache metrics.
952
- *
953
- * @example
954
- * ```typescript
955
- * const usage: TokenUsage = {
956
- * inputTokens: 150,
957
- * outputTokens: 50,
958
- * totalTokens: 200,
959
- * cacheReadTokens: 100,
960
- * cacheWriteTokens: 50,
961
- * cycles: [
962
- * { inputTokens: 100, outputTokens: 30, cacheReadTokens: 0, cacheWriteTokens: 50 },
963
- * { inputTokens: 50, outputTokens: 20, cacheReadTokens: 100, cacheWriteTokens: 0 }
964
- * ]
965
- * };
966
- * ```
967
- */
968
- interface TokenUsage {
969
- /** Total input tokens across all cycles */
970
- inputTokens: number;
971
- /** Total output tokens across all cycles */
972
- outputTokens: number;
973
- /** Sum of input and output tokens */
974
- totalTokens: number;
975
- /**
976
- * Tokens read from cache (cache hits).
977
- * Returns 0 for providers that don't support or report cache metrics.
978
- */
979
- cacheReadTokens: number;
980
- /**
981
- * Tokens written to cache (cache misses that were cached).
982
- * Only Anthropic reports this metric; returns 0 for other providers.
983
- */
984
- cacheWriteTokens: number;
985
- /** Per-cycle token breakdown (if multiple cycles occurred) */
986
- cycles?: Array<{
987
- inputTokens: number;
988
- outputTokens: number;
989
- cacheReadTokens: number;
990
- cacheWriteTokens: number;
991
- }>;
992
- }
993
- /**
994
- * A Turn represents the complete result of one inference call.
995
- *
996
- * Includes all messages produced during tool execution loops,
997
- * the final assistant response, token usage, and optional
998
- * structured output data.
999
- *
1000
- * @typeParam TData - Type of the structured output data
1001
- *
1002
- * @example
1003
- * ```typescript
1004
- * const turn = await instance.generate('Hello');
1005
- * console.log(turn.response.text);
1006
- * console.log(`Used ${turn.usage.totalTokens} tokens in ${turn.cycles} cycles`);
1007
- *
1008
- * // With structured output
1009
- * interface WeatherData { temperature: number; conditions: string; }
1010
- * const turn = await instance.generate<WeatherData>('Get weather');
1011
- * console.log(turn.data?.temperature);
1012
- * ```
1013
- */
1014
- interface Turn<TData = unknown> {
1015
- /**
1016
- * All messages produced during this inference, in chronological order.
1017
- * Includes UserMessage, AssistantMessage (may include toolCalls), and ToolResultMessage.
1018
- */
1019
- readonly messages: Message[];
1020
- /** The final assistant response (last AssistantMessage in the turn) */
1021
- readonly response: AssistantMessage;
1022
- /** Tool executions that occurred during this turn */
1023
- readonly toolExecutions: ToolExecution[];
1024
- /** Aggregate token usage for the entire turn */
1025
- readonly usage: TokenUsage;
1026
- /** Total number of inference cycles (1 + number of tool rounds) */
1027
- readonly cycles: number;
1028
- /**
1029
- * Structured output data (if a structure schema was provided).
1030
- * Type is inferred from the schema when using TypeScript.
1031
- */
1032
- readonly data?: TData;
1033
- }
1034
- /**
1035
- * Creates a Turn from accumulated inference data.
1036
- *
1037
- * @typeParam TData - Type of the structured output data
1038
- * @param messages - All messages produced during the inference
1039
- * @param toolExecutions - Record of all tool executions
1040
- * @param usage - Aggregate token usage
1041
- * @param cycles - Number of inference cycles
1042
- * @param data - Optional structured output data
1043
- * @returns A complete Turn object
1044
- * @throws Error if no assistant message is found in the messages
1045
- *
1046
- * @example
1047
- * ```typescript
1048
- * const turn = createTurn(
1049
- * [userMsg, assistantMsg],
1050
- * [],
1051
- * { inputTokens: 100, outputTokens: 50, totalTokens: 150 },
1052
- * 1
1053
- * );
1054
- * ```
1055
- */
1056
- declare function createTurn<TData = unknown>(messages: Message[], toolExecutions: ToolExecution[], usage: TokenUsage, cycles: number, data?: TData): Turn<TData>;
1057
- /**
1058
- * Creates an empty TokenUsage object.
1059
- *
1060
- * @returns A TokenUsage with all values set to zero
1061
- *
1062
- * @example
1063
- * ```typescript
1064
- * const usage = emptyUsage();
1065
- * // { inputTokens: 0, outputTokens: 0, totalTokens: 0, cacheReadTokens: 0, cacheWriteTokens: 0, cycles: [] }
1066
- * ```
1067
- */
1068
- declare function emptyUsage(): TokenUsage;
1069
- /**
1070
- * Aggregates token usage from multiple inference cycles.
1071
- *
1072
- * @param usages - Array of TokenUsage objects to aggregate
1073
- * @returns Combined TokenUsage with per-cycle breakdown
1074
- *
1075
- * @example
1076
- * ```typescript
1077
- * const cycle1 = { inputTokens: 100, outputTokens: 30, totalTokens: 130, cacheReadTokens: 50, cacheWriteTokens: 0 };
1078
- * const cycle2 = { inputTokens: 150, outputTokens: 40, totalTokens: 190, cacheReadTokens: 100, cacheWriteTokens: 0 };
1079
- * const total = aggregateUsage([cycle1, cycle2]);
1080
- * // { inputTokens: 250, outputTokens: 70, totalTokens: 320, cacheReadTokens: 150, cacheWriteTokens: 0, cycles: [...] }
1081
- * ```
1082
- */
1083
- declare function aggregateUsage(usages: TokenUsage[]): TokenUsage;
1084
-
1085
- /**
1086
- * @fileoverview Streaming types for real-time LLM responses.
1087
- *
1088
- * Defines the event types and interfaces for streaming LLM inference,
1089
- * including text deltas, tool call deltas, and control events.
1090
- *
1091
- * @module types/stream
1092
- */
1093
-
1094
- /**
1095
- * Stream event type discriminators.
1096
- *
1097
- * Each event type represents a different kind of streaming update
1098
- * from the LLM provider.
1099
- */
1100
- type StreamEventType =
1101
- /** Incremental text output */
1102
- 'text_delta'
1103
- /** Incremental reasoning/thinking output */
1104
- | 'reasoning_delta'
1105
- /** Incremental image data */
1106
- | 'image_delta'
1107
- /** Incremental audio data */
1108
- | 'audio_delta'
1109
- /** Incremental video data */
1110
- | 'video_delta'
1111
- /** Incremental tool call data (arguments being streamed) */
1112
- | 'tool_call_delta'
1113
- /** Tool execution has started */
1114
- | 'tool_execution_start'
1115
- /** Tool execution has completed */
1116
- | 'tool_execution_end'
1117
- /** Beginning of a message */
1118
- | 'message_start'
1119
- /** End of a message */
1120
- | 'message_stop'
1121
- /** Beginning of a content block */
1122
- | 'content_block_start'
1123
- /** End of a content block */
1124
- | 'content_block_stop';
1125
- /**
1126
- * Event delta data payload.
1127
- *
1128
- * Contains the type-specific data for a streaming event.
1129
- * Different fields are populated depending on the event type.
1130
- */
1131
- interface EventDelta {
1132
- /** Incremental text content (for text_delta, reasoning_delta) */
1133
- text?: string;
1134
- /** Incremental binary data (for image_delta, audio_delta, video_delta) */
1135
- data?: Uint8Array;
1136
- /** Tool call identifier (for tool_call_delta, tool_execution_start/end) */
1137
- toolCallId?: string;
1138
- /** Tool name (for tool_call_delta, tool_execution_start/end) */
1139
- toolName?: string;
1140
- /** Incremental JSON arguments string (for tool_call_delta) */
1141
- argumentsJson?: string;
1142
- /** Tool execution result (for tool_execution_end) */
1143
- result?: unknown;
1144
- /** Whether tool execution resulted in an error (for tool_execution_end) */
1145
- isError?: boolean;
1146
- /** Timestamp in milliseconds (for tool_execution_start/end) */
1147
- timestamp?: number;
1148
- }
1149
- /**
1150
- * A single streaming event from the LLM.
1151
- *
1152
- * Events are emitted in order as the model generates output,
1153
- * allowing for real-time display of responses.
1154
- *
1155
- * @example
1156
- * ```typescript
1157
- * for await (const event of stream) {
1158
- * if (event.type === 'text_delta') {
1159
- * process.stdout.write(event.delta.text ?? '');
1160
- * } else if (event.type === 'tool_call_delta') {
1161
- * console.log('Tool:', event.delta.toolName);
1162
- * }
1163
- * }
1164
- * ```
1165
- */
1166
- interface StreamEvent {
1167
- /** Event type discriminator */
1168
- type: StreamEventType;
1169
- /** Index of the content block this event belongs to */
1170
- index: number;
1171
- /** Event-specific data payload */
1172
- delta: EventDelta;
1173
- }
1174
- /**
1175
- * Stream result - an async iterable that also provides the final turn.
1176
- *
1177
- * Allows consuming streaming events while also awaiting the complete
1178
- * Turn result after streaming finishes.
1179
- *
1180
- * @typeParam TData - Type of the structured output data
1181
- *
1182
- * @example
1183
- * ```typescript
1184
- * const stream = instance.stream('Tell me a story');
1185
- *
1186
- * // Consume streaming events
1187
- * for await (const event of stream) {
1188
- * if (event.type === 'text_delta') {
1189
- * process.stdout.write(event.delta.text ?? '');
1190
- * }
1191
- * }
1192
- *
1193
- * // Get the complete turn after streaming
1194
- * const turn = await stream.turn;
1195
- * console.log('\n\nTokens used:', turn.usage.totalTokens);
1196
- * ```
1197
- */
1198
- interface StreamResult<TData = unknown> extends AsyncIterable<StreamEvent> {
1199
- /**
1200
- * Promise that resolves to the complete Turn after streaming finishes.
1201
- */
1202
- readonly turn: Promise<Turn<TData>>;
1203
- /**
1204
- * Aborts the stream, stopping further events and cancelling the request.
1205
- */
1206
- abort(): void;
1207
- }
1208
- /**
1209
- * Creates a StreamResult from an async generator and completion promise.
1210
- *
1211
- * @typeParam TData - Type of the structured output data
1212
- * @param generator - Async generator that yields stream events
1213
- * @param turnPromise - Promise that resolves to the complete Turn
1214
- * @param abortController - Controller for aborting the stream
1215
- * @returns A StreamResult that can be iterated and awaited
1216
- *
1217
- * @example
1218
- * ```typescript
1219
- * const abortController = new AbortController();
1220
- * const stream = createStreamResult(
1221
- * eventGenerator(),
1222
- * turnPromise,
1223
- * abortController
1224
- * );
1225
- * ```
1226
- */
1227
- declare function createStreamResult<TData = unknown>(generator: AsyncGenerator<StreamEvent, void, unknown>, turnPromise: Promise<Turn<TData>>, abortController: AbortController): StreamResult<TData>;
1228
- /**
1229
- * Creates a text delta stream event.
1230
- *
1231
- * @param text - The incremental text content
1232
- * @param index - Content block index (default: 0)
1233
- * @returns A text_delta StreamEvent
1234
- */
1235
- declare function textDelta(text: string, index?: number): StreamEvent;
1236
- /**
1237
- * Creates a tool call delta stream event.
1238
- *
1239
- * @param toolCallId - Unique identifier for the tool call
1240
- * @param toolName - Name of the tool being called
1241
- * @param argumentsJson - Incremental JSON arguments string
1242
- * @param index - Content block index (default: 0)
1243
- * @returns A tool_call_delta StreamEvent
1244
- */
1245
- declare function toolCallDelta(toolCallId: string, toolName: string, argumentsJson: string, index?: number): StreamEvent;
1246
- /**
1247
- * Creates a message start stream event.
1248
- *
1249
- * @returns A message_start StreamEvent
1250
- */
1251
- declare function messageStart(): StreamEvent;
1252
- /**
1253
- * Creates a message stop stream event.
1254
- *
1255
- * @returns A message_stop StreamEvent
1256
- */
1257
- declare function messageStop(): StreamEvent;
1258
- /**
1259
- * Creates a content block start stream event.
1260
- *
1261
- * @param index - The content block index starting
1262
- * @returns A content_block_start StreamEvent
1263
- */
1264
- declare function contentBlockStart(index: number): StreamEvent;
1265
- /**
1266
- * Creates a content block stop stream event.
1267
- *
1268
- * @param index - The content block index stopping
1269
- * @returns A content_block_stop StreamEvent
1270
- */
1271
- declare function contentBlockStop(index: number): StreamEvent;
1
+ import { M as Message, T as Turn, a as MessageType, b as MessageJSON, c as Tool, d as ToolUseStrategy, J as JSONSchema, S as StreamResult, A as AssistantMessage, e as TokenUsage, f as StreamEvent } from './stream-DRHy6q1a.js';
2
+ export { l as AfterCallResult, B as BeforeCallResult, E as EventDelta, g as JSONSchemaProperty, h as JSONSchemaPropertyType, r as MessageMetadata, s as MessageOptions, w as StreamEventType, i as ToolCall, m as ToolExecution, k as ToolMetadata, j as ToolResult, n as ToolResultMessage, U as UserMessage, v as aggregateUsage, F as contentBlockStart, G as contentBlockStop, x as createStreamResult, t as createTurn, u as emptyUsage, p as isAssistantMessage, q as isToolResultMessage, o as isUserMessage, C as messageStart, D as messageStop, y as textDelta, z as toolCallDelta } from './stream-DRHy6q1a.js';
3
+ import { U as UserContent, A as AssistantContent, C as ContentBlock } from './content-DEl3z_W2.js';
4
+ export { a as AudioBlock, B as BinaryBlock, I as ImageBlock, b as ImageSource, T as TextBlock, V as VideoBlock, d as isAudioBlock, f as isBinaryBlock, c as isImageBlock, i as isTextBlock, e as isVideoBlock, t as text } from './content-DEl3z_W2.js';
5
+ import { P as ProviderConfig, L as LLMProvider, E as EmbeddingInput, a as EmbeddingUsage, B as BoundEmbeddingModel, b as LLMHandler$1, c as EmbeddingHandler, I as ImageHandler, d as Provider } from './provider-BBMBZuGn.js';
6
+ export { g as EmbeddingProvider, i as EmbeddingRequest, j as EmbeddingResponse, k as EmbeddingVector, e as ErrorCode, h as ImageProvider, K as KeyStrategy, M as Modality, f as ModelReference, R as RetryStrategy, U as UPPError } from './provider-BBMBZuGn.js';
7
+ import { I as ImageOptions, a as ImageInstance } from './image-Dhq-Yuq4.js';
8
+ export { B as BoundImageModel, G as GeneratedImage, b as Image, i as ImageCapabilities, d as ImageEditInput, k as ImageEditRequest, n as ImageHandler, c as ImageInput, o as ImageModelInput, m as ImageProviderStreamResult, j as ImageRequest, l as ImageResponse, f as ImageResult, g as ImageStreamEvent, h as ImageStreamResult, e as ImageUsage } from './image-Dhq-Yuq4.js';
9
+ export { D as DynamicKey, E as ExponentialBackoff, L as LinearBackoff, N as NoRetry, a as RetryAfterStrategy, R as RoundRobinKeys, T as TokenBucket, W as WeightedKeys } from './retry-DR7YRJDz.js';
1272
10
 
1273
11
  /**
1274
12
  * @fileoverview Thread class for managing conversation history.
@@ -1280,41 +18,14 @@ declare function contentBlockStop(index: number): StreamEvent;
1280
18
  */
1281
19
 
1282
20
  /**
1283
- * Serialized message format for JSON storage.
1284
- *
1285
- * Used when persisting messages to storage or transmitting over the network.
1286
- */
1287
- interface MessageJSON {
1288
- /** Unique message identifier */
1289
- id: string;
1290
- /** Message type discriminator */
1291
- type: MessageType;
1292
- /** Content blocks in the message */
1293
- content: ContentBlock[];
1294
- /** Tool calls (for assistant messages) */
1295
- toolCalls?: ToolCall[];
1296
- /** Tool results (for tool result messages) */
1297
- results?: ToolResult[];
1298
- /** Provider-specific metadata */
1299
- metadata?: MessageMetadata;
1300
- /** ISO timestamp string */
1301
- timestamp: string;
1302
- }
1303
- /**
1304
- * Serialized thread format for JSON storage.
1305
- *
1306
- * Contains all data needed to reconstruct a Thread instance.
21
+ * Thread serialized to JSON format.
22
+ * Picks id from Thread, converts dates to strings.
1307
23
  */
1308
- interface ThreadJSON {
1309
- /** Unique thread identifier */
1310
- id: string;
1311
- /** Serialized messages */
24
+ type ThreadJSON = Pick<Thread, 'id'> & {
1312
25
  messages: MessageJSON[];
1313
- /** ISO timestamp of thread creation */
1314
26
  createdAt: string;
1315
- /** ISO timestamp of last update */
1316
27
  updatedAt: string;
1317
- }
28
+ };
1318
29
  /**
1319
30
  * Thread - A utility class for managing conversation history.
1320
31
  *
@@ -2030,6 +741,43 @@ interface EmbeddingInstance<TParams = unknown> {
2030
741
  */
2031
742
  declare function embedding<TParams = unknown>(options: EmbeddingOptions<TParams>): EmbeddingInstance<TParams>;
2032
743
 
744
+ /**
745
+ * @fileoverview Image generation instance factory for the Universal Provider Protocol.
746
+ *
747
+ * This module provides the core functionality for creating image generation instances,
748
+ * including support for text-to-image generation, streaming, and image editing.
749
+ *
750
+ * @module core/image
751
+ */
752
+
753
+ /**
754
+ * Creates an image generation instance configured with the specified options.
755
+ *
756
+ * This is the primary factory function for creating image generation instances.
757
+ * It validates provider capabilities, binds the model, and returns an instance
758
+ * with `generate`, `stream`, and `edit` methods.
759
+ *
760
+ * @typeParam TParams - Provider-specific parameter type for model configuration
761
+ * @param options - Configuration options for the image instance
762
+ * @returns A configured image instance ready for generation
763
+ * @throws {UPPError} When the provider does not support the image modality
764
+ *
765
+ * @example
766
+ * ```typescript
767
+ * import { image } from 'upp';
768
+ * import { openai } from 'upp/providers/openai';
769
+ *
770
+ * const dalle = image({
771
+ * model: openai('dall-e-3'),
772
+ * params: { size: '1024x1024', quality: 'hd' }
773
+ * });
774
+ *
775
+ * const result = await dalle.generate('A sunset over mountains');
776
+ * console.log(result.images.length);
777
+ * ```
778
+ */
779
+ declare function image<TParams = unknown>(options: ImageOptions<TParams>): ImageInstance<TParams>;
780
+
2033
781
  /**
2034
782
  * @fileoverview Base provider interface and factory for the Universal Provider Protocol.
2035
783
  *
@@ -2104,159 +852,6 @@ interface CreateProviderOptions {
2104
852
  */
2105
853
  declare function createProvider<TOptions = unknown>(options: CreateProviderOptions): Provider<TOptions>;
2106
854
 
2107
- /**
2108
- * @fileoverview Image content handling for the Universal Provider Protocol.
2109
- *
2110
- * Provides a unified Image class for working with images across different sources
2111
- * (file paths, URLs, raw bytes, base64). Supports conversion between formats and
2112
- * integration with UPP message content blocks.
2113
- *
2114
- * @module core/image
2115
- */
2116
-
2117
- /**
2118
- * Represents an image that can be used in UPP messages.
2119
- *
2120
- * Images can be created from various sources (files, URLs, bytes, base64) and
2121
- * converted to different formats as needed by providers. The class provides
2122
- * a unified interface regardless of the underlying source type.
2123
- *
2124
- * @example
2125
- * ```typescript
2126
- * // Load from file
2127
- * const fileImage = await Image.fromPath('./photo.jpg');
2128
- *
2129
- * // Reference by URL
2130
- * const urlImage = Image.fromUrl('https://example.com/image.png');
2131
- *
2132
- * // From raw bytes
2133
- * const bytesImage = Image.fromBytes(uint8Array, 'image/png');
2134
- *
2135
- * // Use in a message
2136
- * const message = new UserMessage([image.toBlock()]);
2137
- * ```
2138
- */
2139
- declare class Image {
2140
- /** The underlying image source (bytes, base64, or URL) */
2141
- readonly source: ImageSource;
2142
- /** MIME type of the image (e.g., 'image/jpeg', 'image/png') */
2143
- readonly mimeType: string;
2144
- /** Image width in pixels, if known */
2145
- readonly width?: number;
2146
- /** Image height in pixels, if known */
2147
- readonly height?: number;
2148
- private constructor();
2149
- /**
2150
- * Whether this image has data loaded in memory.
2151
- *
2152
- * Returns `false` for URL-sourced images that reference external resources.
2153
- * These must be fetched before their data can be accessed.
2154
- */
2155
- get hasData(): boolean;
2156
- /**
2157
- * Converts the image to a base64-encoded string.
2158
- *
2159
- * @returns The image data as a base64 string
2160
- * @throws {Error} When the source is a URL (data must be fetched first)
2161
- */
2162
- toBase64(): string;
2163
- /**
2164
- * Converts the image to a data URL suitable for embedding in HTML or CSS.
2165
- *
2166
- * @returns A data URL in the format `data:{mimeType};base64,{data}`
2167
- * @throws {Error} When the source is a URL (data must be fetched first)
2168
- */
2169
- toDataUrl(): string;
2170
- /**
2171
- * Gets the image data as raw bytes.
2172
- *
2173
- * @returns The image data as a Uint8Array
2174
- * @throws {Error} When the source is a URL (data must be fetched first)
2175
- */
2176
- toBytes(): Uint8Array;
2177
- /**
2178
- * Gets the URL for URL-sourced images.
2179
- *
2180
- * @returns The image URL
2181
- * @throws {Error} When the source is not a URL
2182
- */
2183
- toUrl(): string;
2184
- /**
2185
- * Converts this Image to an ImageBlock for use in UPP messages.
2186
- *
2187
- * @returns An ImageBlock that can be included in message content arrays
2188
- */
2189
- toBlock(): ImageBlock;
2190
- /**
2191
- * Creates an Image by reading a file from disk.
2192
- *
2193
- * The file is read into memory as bytes. MIME type is automatically
2194
- * detected from the file extension.
2195
- *
2196
- * @param path - Path to the image file
2197
- * @returns Promise resolving to an Image with the file contents
2198
- *
2199
- * @example
2200
- * ```typescript
2201
- * const image = await Image.fromPath('./photos/vacation.jpg');
2202
- * ```
2203
- */
2204
- static fromPath(path: string): Promise<Image>;
2205
- /**
2206
- * Creates an Image from a URL reference.
2207
- *
2208
- * The URL is stored as a reference and not fetched. Providers will handle
2209
- * URL-to-data conversion if needed. MIME type is detected from the URL
2210
- * path if not provided.
2211
- *
2212
- * @param url - URL pointing to the image
2213
- * @param mimeType - Optional MIME type override
2214
- * @returns An Image referencing the URL
2215
- *
2216
- * @example
2217
- * ```typescript
2218
- * const image = Image.fromUrl('https://example.com/logo.png');
2219
- * ```
2220
- */
2221
- static fromUrl(url: string, mimeType?: string): Image;
2222
- /**
2223
- * Creates an Image from raw byte data.
2224
- *
2225
- * @param data - The image data as a Uint8Array
2226
- * @param mimeType - The MIME type of the image
2227
- * @returns An Image containing the byte data
2228
- *
2229
- * @example
2230
- * ```typescript
2231
- * const image = Image.fromBytes(pngData, 'image/png');
2232
- * ```
2233
- */
2234
- static fromBytes(data: Uint8Array, mimeType: string): Image;
2235
- /**
2236
- * Creates an Image from a base64-encoded string.
2237
- *
2238
- * @param base64 - The base64-encoded image data (without data URL prefix)
2239
- * @param mimeType - The MIME type of the image
2240
- * @returns An Image containing the base64 data
2241
- *
2242
- * @example
2243
- * ```typescript
2244
- * const image = Image.fromBase64(base64String, 'image/jpeg');
2245
- * ```
2246
- */
2247
- static fromBase64(base64: string, mimeType: string): Image;
2248
- /**
2249
- * Creates an Image from an existing ImageBlock.
2250
- *
2251
- * Useful for converting content blocks received from providers back
2252
- * into Image instances for further processing.
2253
- *
2254
- * @param block - An ImageBlock from message content
2255
- * @returns An Image with the block's source and metadata
2256
- */
2257
- static fromBlock(block: ImageBlock): Image;
2258
- }
2259
-
2260
855
  /**
2261
856
  * @fileoverview Unified Provider Protocol (UPP) - A unified interface for AI model inference
2262
857
  *
@@ -2264,6 +859,12 @@ declare class Image {
2264
859
  * Anthropic, OpenAI, Google, Ollama, OpenRouter, and xAI. The library handles provider-specific
2265
860
  * transformations, streaming, tool execution, and error handling.
2266
861
  *
862
+ * @module @providerprotocol/ai
863
+ * @packageDocumentation
864
+ */
865
+ /**
866
+ * LLM instance factory for creating model-bound inference functions.
867
+ *
2267
868
  * @example Basic usage
2268
869
  * ```typescript
2269
870
  * import { llm, anthropic } from '@providerprotocol/ai';
@@ -2285,11 +886,7 @@ declare class Image {
2285
886
  * }
2286
887
  * }
2287
888
  * ```
2288
- *
2289
- * @module @providerprotocol/ai
2290
- * @packageDocumentation
2291
889
  */
2292
- /** LLM instance factory for creating model-bound inference functions */
2293
890
 
2294
891
  /**
2295
892
  * UPP namespace object providing alternative import style.
@@ -2309,6 +906,8 @@ declare const ai: {
2309
906
  llm: typeof llm;
2310
907
  /** Embedding instance factory */
2311
908
  embedding: typeof embedding;
909
+ /** Image generation instance factory */
910
+ image: typeof image;
2312
911
  };
2313
912
 
2314
- export { type AfterCallResult, type AssistantContent, AssistantMessage, type AudioBlock, type BeforeCallResult, type BinaryBlock, BoundEmbeddingModel, type BoundLLMModel, type ContentBlock, type EmbedOptions, type Embedding, EmbeddingHandler, EmbeddingInput, type EmbeddingInstance, type EmbeddingModelInput, type EmbeddingOptions, type EmbeddingProgress, type EmbeddingResult, type EmbeddingStream, EmbeddingUsage, type EventDelta, Image, type ImageBlock, ImageHandler, type ImageSource, type InferenceInput, type JSONSchema, type JSONSchemaProperty, type JSONSchemaPropertyType, type LLMCapabilities, type LLMHandler, type LLMInstance, type LLMOptions, LLMProvider, type LLMRequest, type LLMResponse, type LLMStreamResult, Message, type MessageJSON, type MessageMetadata, type MessageOptions, type MessageType, Provider, ProviderConfig, type StreamEvent, type StreamEventType, type StreamResult, type TextBlock, Thread, type ThreadJSON, type TokenUsage, type Tool, type ToolCall, type ToolExecution, type ToolMetadata, type ToolResult, ToolResultMessage, type ToolUseStrategy, type Turn, type UserContent, UserMessage, type VideoBlock, aggregateUsage, ai, contentBlockStart, contentBlockStop, createProvider, createStreamResult, createTurn, embedding, emptyUsage, isAssistantMessage, isAudioBlock, isBinaryBlock, isImageBlock, isTextBlock, isToolResultMessage, isUserMessage, isVideoBlock, llm, messageStart, messageStop, text, textDelta, toolCallDelta };
913
+ export { AssistantContent, AssistantMessage, BoundEmbeddingModel, type BoundLLMModel, ContentBlock, type EmbedOptions, type Embedding, EmbeddingHandler, EmbeddingInput, type EmbeddingInstance, type EmbeddingModelInput, type EmbeddingOptions, type EmbeddingProgress, type EmbeddingResult, type EmbeddingStream, EmbeddingUsage, ImageInstance, ImageOptions, type InferenceInput, JSONSchema, type LLMCapabilities, type LLMHandler, type LLMInstance, type LLMOptions, LLMProvider, type LLMRequest, type LLMResponse, type LLMStreamResult, Message, MessageJSON, MessageType, Provider, ProviderConfig, StreamEvent, StreamResult, Thread, type ThreadJSON, TokenUsage, Tool, ToolUseStrategy, Turn, UserContent, ai, createProvider, embedding, image, llm };