@providerprotocol/ai 0.0.17 → 0.0.18

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,1286 @@
1
+ /**
2
+ * @fileoverview Content block types for multimodal messages.
3
+ *
4
+ * Defines the various content block types that can be included in
5
+ * user and assistant messages, supporting text, images, audio, video,
6
+ * and arbitrary binary data.
7
+ *
8
+ * @module types/content
9
+ */
10
+ /**
11
+ * Image source variants for ImageBlock.
12
+ *
13
+ * Images can be provided as base64-encoded strings, URLs, or raw bytes.
14
+ *
15
+ * @example
16
+ * ```typescript
17
+ * // Base64 encoded image
18
+ * const base64Source: ImageSource = {
19
+ * type: 'base64',
20
+ * data: 'iVBORw0KGgo...'
21
+ * };
22
+ *
23
+ * // URL reference
24
+ * const urlSource: ImageSource = {
25
+ * type: 'url',
26
+ * url: 'https://example.com/image.png'
27
+ * };
28
+ *
29
+ * // Raw bytes
30
+ * const bytesSource: ImageSource = {
31
+ * type: 'bytes',
32
+ * data: new Uint8Array([...])
33
+ * };
34
+ * ```
35
+ */
36
+ type ImageSource = {
37
+ type: 'base64';
38
+ data: string;
39
+ } | {
40
+ type: 'url';
41
+ url: string;
42
+ } | {
43
+ type: 'bytes';
44
+ data: Uint8Array;
45
+ };
46
+ /**
47
+ * Text content block.
48
+ *
49
+ * The most common content block type, containing plain text content.
50
+ *
51
+ * @example
52
+ * ```typescript
53
+ * const textBlock: TextBlock = {
54
+ * type: 'text',
55
+ * text: 'Hello, world!'
56
+ * };
57
+ * ```
58
+ */
59
+ interface TextBlock {
60
+ /** Discriminator for text blocks */
61
+ type: 'text';
62
+ /** The text content */
63
+ text: string;
64
+ }
65
+ /**
66
+ * Image content block.
67
+ *
68
+ * Contains an image with its source data and metadata.
69
+ *
70
+ * @example
71
+ * ```typescript
72
+ * const imageBlock: ImageBlock = {
73
+ * type: 'image',
74
+ * source: { type: 'url', url: 'https://example.com/photo.jpg' },
75
+ * mimeType: 'image/jpeg',
76
+ * width: 1920,
77
+ * height: 1080
78
+ * };
79
+ * ```
80
+ */
81
+ interface ImageBlock {
82
+ /** Discriminator for image blocks */
83
+ type: 'image';
84
+ /** The image data source */
85
+ source: ImageSource;
86
+ /** MIME type of the image (e.g., 'image/png', 'image/jpeg') */
87
+ mimeType: string;
88
+ /** Image width in pixels */
89
+ width?: number;
90
+ /** Image height in pixels */
91
+ height?: number;
92
+ }
93
+ /**
94
+ * Audio content block.
95
+ *
96
+ * Contains audio data with its metadata.
97
+ *
98
+ * @example
99
+ * ```typescript
100
+ * const audioBlock: AudioBlock = {
101
+ * type: 'audio',
102
+ * data: audioBytes,
103
+ * mimeType: 'audio/mp3',
104
+ * duration: 120.5
105
+ * };
106
+ * ```
107
+ */
108
+ interface AudioBlock {
109
+ /** Discriminator for audio blocks */
110
+ type: 'audio';
111
+ /** Raw audio data */
112
+ data: Uint8Array;
113
+ /** MIME type of the audio (e.g., 'audio/mp3', 'audio/wav') */
114
+ mimeType: string;
115
+ /** Duration in seconds */
116
+ duration?: number;
117
+ }
118
+ /**
119
+ * Video content block.
120
+ *
121
+ * Contains video data with its metadata.
122
+ *
123
+ * @example
124
+ * ```typescript
125
+ * const videoBlock: VideoBlock = {
126
+ * type: 'video',
127
+ * data: videoBytes,
128
+ * mimeType: 'video/mp4',
129
+ * duration: 30,
130
+ * width: 1920,
131
+ * height: 1080
132
+ * };
133
+ * ```
134
+ */
135
+ interface VideoBlock {
136
+ /** Discriminator for video blocks */
137
+ type: 'video';
138
+ /** Raw video data */
139
+ data: Uint8Array;
140
+ /** MIME type of the video (e.g., 'video/mp4', 'video/webm') */
141
+ mimeType: string;
142
+ /** Duration in seconds */
143
+ duration?: number;
144
+ /** Video width in pixels */
145
+ width?: number;
146
+ /** Video height in pixels */
147
+ height?: number;
148
+ }
149
+ /**
150
+ * Binary content block for arbitrary data.
151
+ *
152
+ * A generic block type for data that doesn't fit other categories.
153
+ *
154
+ * @example
155
+ * ```typescript
156
+ * const binaryBlock: BinaryBlock = {
157
+ * type: 'binary',
158
+ * data: pdfBytes,
159
+ * mimeType: 'application/pdf',
160
+ * metadata: { filename: 'document.pdf', pages: 10 }
161
+ * };
162
+ * ```
163
+ */
164
+ interface BinaryBlock {
165
+ /** Discriminator for binary blocks */
166
+ type: 'binary';
167
+ /** Raw binary data */
168
+ data: Uint8Array;
169
+ /** MIME type of the data */
170
+ mimeType: string;
171
+ /** Additional metadata about the binary content */
172
+ metadata?: Record<string, unknown>;
173
+ }
174
+ /**
175
+ * Union of all content block types.
176
+ *
177
+ * Used when a function or property can accept any type of content block.
178
+ */
179
+ type ContentBlock = TextBlock | ImageBlock | AudioBlock | VideoBlock | BinaryBlock;
180
+ /**
181
+ * Content types allowed in user messages.
182
+ *
183
+ * Users can send any type of content block including binary data.
184
+ */
185
+ type UserContent = TextBlock | ImageBlock | AudioBlock | VideoBlock | BinaryBlock;
186
+ /**
187
+ * Content types allowed in assistant messages.
188
+ *
189
+ * Assistants can generate text and media but not arbitrary binary data.
190
+ */
191
+ type AssistantContent = TextBlock | ImageBlock | AudioBlock | VideoBlock;
192
+ /**
193
+ * Creates a text content block from a string.
194
+ *
195
+ * @param content - The text content
196
+ * @returns A TextBlock containing the provided text
197
+ *
198
+ * @example
199
+ * ```typescript
200
+ * const block = text('Hello, world!');
201
+ * // { type: 'text', text: 'Hello, world!' }
202
+ * ```
203
+ */
204
+ declare function text(content: string): TextBlock;
205
+ /**
206
+ * Type guard for TextBlock.
207
+ *
208
+ * @param block - The content block to check
209
+ * @returns True if the block is a TextBlock
210
+ *
211
+ * @example
212
+ * ```typescript
213
+ * if (isTextBlock(block)) {
214
+ * console.log(block.text);
215
+ * }
216
+ * ```
217
+ */
218
+ declare function isTextBlock(block: ContentBlock): block is TextBlock;
219
+ /**
220
+ * Type guard for ImageBlock.
221
+ *
222
+ * @param block - The content block to check
223
+ * @returns True if the block is an ImageBlock
224
+ *
225
+ * @example
226
+ * ```typescript
227
+ * if (isImageBlock(block)) {
228
+ * console.log(block.mimeType, block.width, block.height);
229
+ * }
230
+ * ```
231
+ */
232
+ declare function isImageBlock(block: ContentBlock): block is ImageBlock;
233
+ /**
234
+ * Type guard for AudioBlock.
235
+ *
236
+ * @param block - The content block to check
237
+ * @returns True if the block is an AudioBlock
238
+ *
239
+ * @example
240
+ * ```typescript
241
+ * if (isAudioBlock(block)) {
242
+ * console.log(block.mimeType, block.duration);
243
+ * }
244
+ * ```
245
+ */
246
+ declare function isAudioBlock(block: ContentBlock): block is AudioBlock;
247
+ /**
248
+ * Type guard for VideoBlock.
249
+ *
250
+ * @param block - The content block to check
251
+ * @returns True if the block is a VideoBlock
252
+ *
253
+ * @example
254
+ * ```typescript
255
+ * if (isVideoBlock(block)) {
256
+ * console.log(block.mimeType, block.duration);
257
+ * }
258
+ * ```
259
+ */
260
+ declare function isVideoBlock(block: ContentBlock): block is VideoBlock;
261
+ /**
262
+ * Type guard for BinaryBlock.
263
+ *
264
+ * @param block - The content block to check
265
+ * @returns True if the block is a BinaryBlock
266
+ *
267
+ * @example
268
+ * ```typescript
269
+ * if (isBinaryBlock(block)) {
270
+ * console.log(block.mimeType, block.metadata);
271
+ * }
272
+ * ```
273
+ */
274
+ declare function isBinaryBlock(block: ContentBlock): block is BinaryBlock;
275
+
276
+ /**
277
+ * @fileoverview JSON Schema types for tool parameters and structured outputs.
278
+ *
279
+ * Provides TypeScript interfaces for defining JSON Schema objects used in
280
+ * LLM tool definitions and structured output specifications.
281
+ *
282
+ * @module types/schema
283
+ */
284
+ /**
285
+ * Primitive and composite JSON Schema property types.
286
+ *
287
+ * These types correspond to the JSON Schema specification's allowed type values.
288
+ */
289
+ type JSONSchemaPropertyType =
290
+ /** String values */
291
+ 'string'
292
+ /** Floating point numbers */
293
+ | 'number'
294
+ /** Whole numbers */
295
+ | 'integer'
296
+ /** Boolean true/false values */
297
+ | 'boolean'
298
+ /** Ordered lists of values */
299
+ | 'array'
300
+ /** Key-value mappings */
301
+ | 'object'
302
+ /** Explicit null value */
303
+ | 'null';
304
+ /**
305
+ * JSON Schema property definition.
306
+ *
307
+ * Describes a single property within a JSON Schema object, including
308
+ * type constraints, validation rules, and nested structure definitions.
309
+ *
310
+ * @example
311
+ * ```typescript
312
+ * const nameProperty: JSONSchemaProperty = {
313
+ * type: 'string',
314
+ * description: 'User name',
315
+ * minLength: 1,
316
+ * maxLength: 100
317
+ * };
318
+ * ```
319
+ *
320
+ * @example
321
+ * ```typescript
322
+ * const tagsProperty: JSONSchemaProperty = {
323
+ * type: 'array',
324
+ * description: 'List of tags',
325
+ * items: { type: 'string' },
326
+ * minItems: 1,
327
+ * uniqueItems: true
328
+ * };
329
+ * ```
330
+ */
331
+ interface JSONSchemaProperty {
332
+ /** The JSON type of this property */
333
+ type: JSONSchemaPropertyType;
334
+ /** Human-readable description for the LLM */
335
+ description?: string;
336
+ /** Allowed values (enumeration) */
337
+ enum?: unknown[];
338
+ /** Constant value this property must equal */
339
+ const?: unknown;
340
+ /** Default value if not provided */
341
+ default?: unknown;
342
+ /** Minimum string length (string type only) */
343
+ minLength?: number;
344
+ /** Maximum string length (string type only) */
345
+ maxLength?: number;
346
+ /** Regular expression pattern for validation (string type only) */
347
+ pattern?: string;
348
+ /** Semantic format hint (string type only) */
349
+ format?: 'email' | 'uri' | 'date' | 'date-time' | 'uuid';
350
+ /** Minimum value inclusive (number/integer types only) */
351
+ minimum?: number;
352
+ /** Maximum value inclusive (number/integer types only) */
353
+ maximum?: number;
354
+ /** Minimum value exclusive (number/integer types only) */
355
+ exclusiveMinimum?: number;
356
+ /** Maximum value exclusive (number/integer types only) */
357
+ exclusiveMaximum?: number;
358
+ /** Value must be divisible by this (number/integer types only) */
359
+ multipleOf?: number;
360
+ /** Schema for array elements (array type only) */
361
+ items?: JSONSchemaProperty;
362
+ /** Minimum array length (array type only) */
363
+ minItems?: number;
364
+ /** Maximum array length (array type only) */
365
+ maxItems?: number;
366
+ /** Whether array elements must be unique (array type only) */
367
+ uniqueItems?: boolean;
368
+ /** Nested property definitions (object type only) */
369
+ properties?: Record<string, JSONSchemaProperty>;
370
+ /** List of required property names (object type only) */
371
+ required?: string[];
372
+ /** Whether additional properties are allowed (object type only) */
373
+ additionalProperties?: boolean;
374
+ }
375
+ /**
376
+ * Root JSON Schema for tool parameters or structured outputs.
377
+ *
378
+ * This is the top-level schema definition used when defining tool
379
+ * parameters or requesting structured output from an LLM.
380
+ *
381
+ * @example
382
+ * ```typescript
383
+ * const weatherToolSchema: JSONSchema = {
384
+ * type: 'object',
385
+ * description: 'Parameters for getting weather information',
386
+ * properties: {
387
+ * location: {
388
+ * type: 'string',
389
+ * description: 'City name or coordinates'
390
+ * },
391
+ * units: {
392
+ * type: 'string',
393
+ * enum: ['celsius', 'fahrenheit'],
394
+ * description: 'Temperature units'
395
+ * }
396
+ * },
397
+ * required: ['location']
398
+ * };
399
+ * ```
400
+ */
401
+ interface JSONSchema {
402
+ /** Root schemas are always objects */
403
+ type: 'object';
404
+ /** Property definitions for the object */
405
+ properties: Record<string, JSONSchemaProperty>;
406
+ /** List of required property names */
407
+ required?: string[];
408
+ /** Whether additional properties are allowed beyond those defined */
409
+ additionalProperties?: boolean;
410
+ /** Human-readable description of the schema's purpose */
411
+ description?: string;
412
+ }
413
+
414
+ /**
415
+ * @fileoverview Tool types for LLM function calling.
416
+ *
417
+ * Defines the interfaces for registering tools with LLMs, handling
418
+ * tool calls from the model, and managing tool execution strategies.
419
+ *
420
+ * @module types/tool
421
+ */
422
+
423
+ /**
424
+ * Provider-namespaced metadata for tools.
425
+ *
426
+ * Each provider can attach its own metadata under its namespace,
427
+ * enabling provider-specific features like caching, strict mode, etc.
428
+ *
429
+ * @example
430
+ * ```typescript
431
+ * const metadata: ToolMetadata = {
432
+ * anthropic: { cache_control: { type: 'ephemeral' } },
433
+ * openrouter: { cache_control: { type: 'ephemeral', ttl: '1h' } }
434
+ * };
435
+ * ```
436
+ */
437
+ interface ToolMetadata {
438
+ [provider: string]: Record<string, unknown> | undefined;
439
+ }
440
+ /**
441
+ * Tool call requested by the model.
442
+ *
443
+ * Represents a single function call request from the LLM, including
444
+ * the tool name and parsed arguments.
445
+ *
446
+ * @example
447
+ * ```typescript
448
+ * const toolCall: ToolCall = {
449
+ * toolCallId: 'call_abc123',
450
+ * toolName: 'get_weather',
451
+ * arguments: { location: 'San Francisco', units: 'celsius' }
452
+ * };
453
+ * ```
454
+ */
455
+ interface ToolCall {
456
+ /** Unique identifier for this tool call, used to match results */
457
+ toolCallId: string;
458
+ /** Name of the tool being called */
459
+ toolName: string;
460
+ /** Parsed arguments for the tool call */
461
+ arguments: Record<string, unknown>;
462
+ }
463
+ /**
464
+ * Result of tool execution.
465
+ *
466
+ * Returned after executing a tool, containing the result data
467
+ * and whether an error occurred.
468
+ *
469
+ * @example
470
+ * ```typescript
471
+ * const result: ToolResult = {
472
+ * toolCallId: 'call_abc123',
473
+ * result: { temperature: 72, conditions: 'sunny' }
474
+ * };
475
+ *
476
+ * // Error result
477
+ * const errorResult: ToolResult = {
478
+ * toolCallId: 'call_abc123',
479
+ * result: 'Location not found',
480
+ * isError: true
481
+ * };
482
+ * ```
483
+ */
484
+ interface ToolResult {
485
+ /** The tool call ID this result corresponds to */
486
+ toolCallId: string;
487
+ /** The result data (can be any serializable value) */
488
+ result: unknown;
489
+ /** Whether the tool execution resulted in an error */
490
+ isError?: boolean;
491
+ }
492
+ /**
493
+ * Tool definition for LLM function calling.
494
+ *
495
+ * Defines a tool that can be called by the LLM, including its
496
+ * name, description, parameter schema, and execution function.
497
+ *
498
+ * @typeParam TParams - The type of parameters the tool accepts
499
+ * @typeParam TResult - The type of result the tool returns
500
+ *
501
+ * @example
502
+ * ```typescript
503
+ * const weatherTool: Tool<{ location: string }, WeatherData> = {
504
+ * name: 'get_weather',
505
+ * description: 'Get current weather for a location',
506
+ * parameters: {
507
+ * type: 'object',
508
+ * properties: {
509
+ * location: { type: 'string', description: 'City name' }
510
+ * },
511
+ * required: ['location']
512
+ * },
513
+ * run: async (params) => {
514
+ * return fetchWeather(params.location);
515
+ * }
516
+ * };
517
+ * ```
518
+ */
519
+ interface Tool<TParams = unknown, TResult = unknown> {
520
+ /** Tool name (must be unique within an llm() instance) */
521
+ name: string;
522
+ /** Human-readable description for the model to understand when to use this tool */
523
+ description: string;
524
+ /** JSON Schema defining the tool's parameters */
525
+ parameters: JSONSchema;
526
+ /**
527
+ * Provider-specific metadata, namespaced by provider name.
528
+ *
529
+ * Used for provider-specific features like prompt caching:
530
+ * @example
531
+ * ```typescript
532
+ * const tool: Tool = {
533
+ * name: 'search_docs',
534
+ * description: 'Search documentation',
535
+ * parameters: {...},
536
+ * run: async (params) => {...},
537
+ * metadata: {
538
+ * anthropic: { cache_control: { type: 'ephemeral' } }
539
+ * }
540
+ * };
541
+ * ```
542
+ */
543
+ metadata?: ToolMetadata;
544
+ /**
545
+ * Executes the tool with the provided parameters.
546
+ *
547
+ * @param params - The parameters passed by the model
548
+ * @returns The tool result, synchronously or as a Promise
549
+ */
550
+ run(params: TParams): TResult | Promise<TResult>;
551
+ /**
552
+ * Optional approval handler for sensitive operations.
553
+ *
554
+ * If provided, this function is called before the tool executes.
555
+ * Return false to prevent execution.
556
+ *
557
+ * @param params - The parameters the tool would be called with
558
+ * @returns Whether to approve the execution
559
+ */
560
+ approval?(params: TParams): boolean | Promise<boolean>;
561
+ }
562
+ /**
563
+ * Result from onBeforeCall hook indicating whether to proceed and optionally transformed params.
564
+ */
565
+ interface BeforeCallResult {
566
+ /** Whether to proceed with tool execution */
567
+ proceed: boolean;
568
+ /** Transformed parameters to use instead of the original (optional) */
569
+ params?: unknown;
570
+ }
571
+ /**
572
+ * Result from onAfterCall hook optionally containing a transformed result.
573
+ */
574
+ interface AfterCallResult {
575
+ /** Transformed result to use instead of the original */
576
+ result: unknown;
577
+ }
578
+ /**
579
+ * Strategy for controlling tool execution behavior.
580
+ *
581
+ * Provides hooks for monitoring, controlling, and transforming the tool execution
582
+ * loop during LLM inference.
583
+ *
584
+ * @example
585
+ * ```typescript
586
+ * const strategy: ToolUseStrategy = {
587
+ * maxIterations: 5,
588
+ * onToolCall: (tool, params) => {
589
+ * console.log(`Calling ${tool.name} with`, params);
590
+ * },
591
+ * // Transform input parameters
592
+ * onBeforeCall: (tool, params) => {
593
+ * if (tool.name === 'search') {
594
+ * return { proceed: true, params: { ...params, limit: 10 } };
595
+ * }
596
+ * return true;
597
+ * },
598
+ * // Transform output results
599
+ * onAfterCall: (tool, params, result) => {
600
+ * if (tool.name === 'fetch_data') {
601
+ * return { result: sanitize(result) };
602
+ * }
603
+ * },
604
+ * onMaxIterations: (iterations) => {
605
+ * console.warn(`Reached max iterations: ${iterations}`);
606
+ * }
607
+ * };
608
+ * ```
609
+ */
610
+ interface ToolUseStrategy {
611
+ /** Maximum number of tool execution rounds (default: 10) */
612
+ maxIterations?: number;
613
+ /**
614
+ * Called when the model requests a tool call.
615
+ *
616
+ * @param tool - The tool being called
617
+ * @param params - The parameters for the call
618
+ */
619
+ onToolCall?(tool: Tool, params: unknown): void | Promise<void>;
620
+ /**
621
+ * Called before tool execution. Can skip execution or transform parameters.
622
+ *
623
+ * @param tool - The tool about to be executed
624
+ * @param params - The parameters for the call
625
+ * @returns One of:
626
+ * - `false` to skip execution
627
+ * - `true` to proceed with original params
628
+ * - `BeforeCallResult` object to control execution and optionally transform params
629
+ */
630
+ onBeforeCall?(tool: Tool, params: unknown): boolean | BeforeCallResult | Promise<boolean | BeforeCallResult>;
631
+ /**
632
+ * Called after tool execution completes. Can transform the result.
633
+ *
634
+ * @param tool - The tool that was executed
635
+ * @param params - The parameters that were used
636
+ * @param result - The result from the tool
637
+ * @returns Void to use original result, or `AfterCallResult` to transform it
638
+ */
639
+ onAfterCall?(tool: Tool, params: unknown, result: unknown): void | AfterCallResult | Promise<void | AfterCallResult>;
640
+ /**
641
+ * Called when a tool execution throws an error.
642
+ *
643
+ * @param tool - The tool that failed
644
+ * @param params - The parameters that were used
645
+ * @param error - The error that was thrown
646
+ */
647
+ onError?(tool: Tool, params: unknown, error: Error): void | Promise<void>;
648
+ /**
649
+ * Called when the maximum iteration limit is reached.
650
+ *
651
+ * @param iterations - The number of iterations that were performed
652
+ */
653
+ onMaxIterations?(iterations: number): void | Promise<void>;
654
+ }
655
+ /**
656
+ * Record of a completed tool execution.
657
+ *
658
+ * Contains all information about a tool call that was executed,
659
+ * including timing and result data.
660
+ *
661
+ * @example
662
+ * ```typescript
663
+ * const execution: ToolExecution = {
664
+ * toolName: 'get_weather',
665
+ * toolCallId: 'call_abc123',
666
+ * arguments: { location: 'San Francisco' },
667
+ * result: { temperature: 72 },
668
+ * isError: false,
669
+ * duration: 150,
670
+ * approved: true
671
+ * };
672
+ * ```
673
+ */
674
+ interface ToolExecution {
675
+ /** Name of the tool that was called */
676
+ toolName: string;
677
+ /** Unique identifier for this tool call */
678
+ toolCallId: string;
679
+ /** Arguments that were passed to the tool */
680
+ arguments: Record<string, unknown>;
681
+ /** Result returned by the tool */
682
+ result: unknown;
683
+ /** Whether the tool execution resulted in an error */
684
+ isError: boolean;
685
+ /** Execution duration in milliseconds */
686
+ duration: number;
687
+ /** Whether approval was required and granted (undefined if no approval handler) */
688
+ approved?: boolean;
689
+ }
690
+
691
+ /**
692
+ * @fileoverview Message types for conversation history.
693
+ *
694
+ * Defines the message classes used to represent conversation turns
695
+ * between users and assistants, including support for multimodal
696
+ * content and tool calls.
697
+ *
698
+ * @module types/messages
699
+ */
700
+
701
+ /**
702
+ * Message serialized to JSON format.
703
+ * Picks common fields from Message, converts timestamp to string.
704
+ */
705
+ type MessageJSON = Pick<Message, 'id' | 'type' | 'metadata'> & {
706
+ timestamp: string;
707
+ content: ContentBlock[];
708
+ toolCalls?: ToolCall[];
709
+ results?: ToolResult[];
710
+ };
711
+ /**
712
+ * Message type discriminator.
713
+ *
714
+ * Used to distinguish between different message types in a conversation.
715
+ */
716
+ type MessageType = 'user' | 'assistant' | 'tool_result';
717
+ /**
718
+ * Provider-namespaced metadata for messages.
719
+ *
720
+ * Each provider can attach its own metadata under its namespace,
721
+ * preventing conflicts between different providers.
722
+ *
723
+ * @example
724
+ * ```typescript
725
+ * const metadata: MessageMetadata = {
726
+ * openai: { model: 'gpt-4', finishReason: 'stop' },
727
+ * anthropic: { model: 'claude-3', stopReason: 'end_turn' }
728
+ * };
729
+ * ```
730
+ */
731
+ interface MessageMetadata {
732
+ [provider: string]: Record<string, unknown> | undefined;
733
+ }
734
+ /**
735
+ * Options for constructing messages.
736
+ */
737
+ interface MessageOptions {
738
+ /** Custom message ID (auto-generated if not provided) */
739
+ id?: string;
740
+ /** Provider-specific metadata */
741
+ metadata?: MessageMetadata;
742
+ }
743
+ /**
744
+ * Abstract base class for all message types.
745
+ *
746
+ * Provides common functionality for user, assistant, and tool result
747
+ * messages, including content accessors and metadata handling.
748
+ *
749
+ * @example
750
+ * ```typescript
751
+ * // Access text content from any message
752
+ * const text = message.text;
753
+ *
754
+ * // Access images
755
+ * const images = message.images;
756
+ * ```
757
+ */
758
+ declare abstract class Message {
759
+ /** Unique message identifier */
760
+ readonly id: string;
761
+ /** Timestamp when the message was created */
762
+ readonly timestamp: Date;
763
+ /** Provider-specific metadata, namespaced by provider name */
764
+ readonly metadata?: MessageMetadata;
765
+ /** Message type discriminator (implemented by subclasses) */
766
+ abstract readonly type: MessageType;
767
+ /**
768
+ * Returns the content blocks for this message.
769
+ * Implemented by subclasses to provide type-specific content.
770
+ */
771
+ protected abstract getContent(): ContentBlock[];
772
+ /**
773
+ * Creates a new message instance.
774
+ *
775
+ * @param options - Optional message ID and metadata
776
+ */
777
+ constructor(options?: MessageOptions);
778
+ /**
779
+ * Concatenated text content from all text blocks.
780
+ * Blocks are joined with double newlines.
781
+ */
782
+ get text(): string;
783
+ /**
784
+ * All image content blocks in this message.
785
+ */
786
+ get images(): ImageBlock[];
787
+ /**
788
+ * All audio content blocks in this message.
789
+ */
790
+ get audio(): AudioBlock[];
791
+ /**
792
+ * All video content blocks in this message.
793
+ */
794
+ get video(): VideoBlock[];
795
+ }
796
+ /**
797
+ * User input message.
798
+ *
799
+ * Represents a message from the user, which can contain text and/or
800
+ * multimodal content like images, audio, or video.
801
+ *
802
+ * @example
803
+ * ```typescript
804
+ * // Simple text message
805
+ * const msg = new UserMessage('Hello, world!');
806
+ *
807
+ * // Multimodal message
808
+ * const msg = new UserMessage([
809
+ * { type: 'text', text: 'What is in this image?' },
810
+ * { type: 'image', source: { type: 'url', url: '...' }, mimeType: 'image/png' }
811
+ * ]);
812
+ * ```
813
+ */
814
+ declare class UserMessage extends Message {
815
+ /** Message type discriminator */
816
+ readonly type: "user";
817
+ /** Content blocks in this message */
818
+ readonly content: UserContent[];
819
+ /**
820
+ * Creates a new user message.
821
+ *
822
+ * @param content - String (converted to TextBlock) or array of content blocks
823
+ * @param options - Optional message ID and metadata
824
+ */
825
+ constructor(content: string | UserContent[], options?: MessageOptions);
826
+ protected getContent(): ContentBlock[];
827
+ }
828
+ /**
829
+ * Assistant response message.
830
+ *
831
+ * Represents a response from the AI assistant, which may contain
832
+ * text, media content, and/or tool call requests.
833
+ *
834
+ * @example
835
+ * ```typescript
836
+ * // Simple text response
837
+ * const msg = new AssistantMessage('Hello! How can I help?');
838
+ *
839
+ * // Response with tool calls
840
+ * const msg = new AssistantMessage(
841
+ * 'Let me check the weather...',
842
+ * [{ toolCallId: 'call_1', toolName: 'get_weather', arguments: { location: 'NYC' } }]
843
+ * );
844
+ * ```
845
+ */
846
+ declare class AssistantMessage extends Message {
847
+ /** Message type discriminator */
848
+ readonly type: "assistant";
849
+ /** Content blocks in this message */
850
+ readonly content: AssistantContent[];
851
+ /** Tool calls requested by the model (if any) */
852
+ readonly toolCalls?: ToolCall[];
853
+ /**
854
+ * Creates a new assistant message.
855
+ *
856
+ * @param content - String (converted to TextBlock) or array of content blocks
857
+ * @param toolCalls - Tool calls requested by the model
858
+ * @param options - Optional message ID and metadata
859
+ */
860
+ constructor(content: string | AssistantContent[], toolCalls?: ToolCall[], options?: MessageOptions);
861
+ protected getContent(): ContentBlock[];
862
+ /**
863
+ * Whether this message contains tool call requests.
864
+ */
865
+ get hasToolCalls(): boolean;
866
+ }
867
+ /**
868
+ * Tool execution result message.
869
+ *
870
+ * Contains the results of executing one or more tool calls,
871
+ * sent back to the model for further processing.
872
+ *
873
+ * @example
874
+ * ```typescript
875
+ * const msg = new ToolResultMessage([
876
+ * { toolCallId: 'call_1', result: { temperature: 72, conditions: 'sunny' } },
877
+ * { toolCallId: 'call_2', result: 'File not found', isError: true }
878
+ * ]);
879
+ * ```
880
+ */
881
+ declare class ToolResultMessage extends Message {
882
+ /** Message type discriminator */
883
+ readonly type: "tool_result";
884
+ /** Results from tool executions */
885
+ readonly results: ToolResult[];
886
+ /**
887
+ * Creates a new tool result message.
888
+ *
889
+ * @param results - Array of tool execution results
890
+ * @param options - Optional message ID and metadata
891
+ */
892
+ constructor(results: ToolResult[], options?: MessageOptions);
893
+ protected getContent(): ContentBlock[];
894
+ }
895
+ /**
896
+ * Type guard for UserMessage.
897
+ *
898
+ * @param msg - The message to check
899
+ * @returns True if the message is a UserMessage
900
+ *
901
+ * @example
902
+ * ```typescript
903
+ * if (isUserMessage(msg)) {
904
+ * console.log('User said:', msg.text);
905
+ * }
906
+ * ```
907
+ */
908
+ declare function isUserMessage(msg: Message): msg is UserMessage;
909
+ /**
910
+ * Type guard for AssistantMessage.
911
+ *
912
+ * @param msg - The message to check
913
+ * @returns True if the message is an AssistantMessage
914
+ *
915
+ * @example
916
+ * ```typescript
917
+ * if (isAssistantMessage(msg)) {
918
+ * console.log('Assistant said:', msg.text);
919
+ * if (msg.hasToolCalls) {
920
+ * console.log('Tool calls:', msg.toolCalls);
921
+ * }
922
+ * }
923
+ * ```
924
+ */
925
+ declare function isAssistantMessage(msg: Message): msg is AssistantMessage;
926
+ /**
927
+ * Type guard for ToolResultMessage.
928
+ *
929
+ * @param msg - The message to check
930
+ * @returns True if the message is a ToolResultMessage
931
+ *
932
+ * @example
933
+ * ```typescript
934
+ * if (isToolResultMessage(msg)) {
935
+ * for (const result of msg.results) {
936
+ * console.log(`Tool ${result.toolCallId}:`, result.result);
937
+ * }
938
+ * }
939
+ * ```
940
+ */
941
+ declare function isToolResultMessage(msg: Message): msg is ToolResultMessage;
942
+
943
+ /**
944
+ * @fileoverview Turn types for inference results.
945
+ *
946
+ * A Turn represents the complete result of one inference call, including
947
+ * all messages produced during tool execution loops, token usage, and
948
+ * optional structured output data.
949
+ *
950
+ * @module types/turn
951
+ */
952
+
953
+ /**
954
+ * Token usage information for an inference request.
955
+ *
956
+ * Tracks input and output tokens across all inference cycles,
957
+ * with optional per-cycle breakdown and cache metrics.
958
+ *
959
+ * @example
960
+ * ```typescript
961
+ * const usage: TokenUsage = {
962
+ * inputTokens: 150,
963
+ * outputTokens: 50,
964
+ * totalTokens: 200,
965
+ * cacheReadTokens: 100,
966
+ * cacheWriteTokens: 50,
967
+ * cycles: [
968
+ * { inputTokens: 100, outputTokens: 30, cacheReadTokens: 0, cacheWriteTokens: 50 },
969
+ * { inputTokens: 50, outputTokens: 20, cacheReadTokens: 100, cacheWriteTokens: 0 }
970
+ * ]
971
+ * };
972
+ * ```
973
+ */
974
+ interface TokenUsage {
975
+ /** Total input tokens across all cycles */
976
+ inputTokens: number;
977
+ /** Total output tokens across all cycles */
978
+ outputTokens: number;
979
+ /** Sum of input and output tokens */
980
+ totalTokens: number;
981
+ /**
982
+ * Tokens read from cache (cache hits).
983
+ * Returns 0 for providers that don't support or report cache metrics.
984
+ */
985
+ cacheReadTokens: number;
986
+ /**
987
+ * Tokens written to cache (cache misses that were cached).
988
+ * Only Anthropic reports this metric; returns 0 for other providers.
989
+ */
990
+ cacheWriteTokens: number;
991
+ /** Per-cycle token breakdown (if multiple cycles occurred) */
992
+ cycles?: Array<{
993
+ inputTokens: number;
994
+ outputTokens: number;
995
+ cacheReadTokens: number;
996
+ cacheWriteTokens: number;
997
+ }>;
998
+ }
999
+ /**
1000
+ * A Turn represents the complete result of one inference call.
1001
+ *
1002
+ * Includes all messages produced during tool execution loops,
1003
+ * the final assistant response, token usage, and optional
1004
+ * structured output data.
1005
+ *
1006
+ * @typeParam TData - Type of the structured output data
1007
+ *
1008
+ * @example
1009
+ * ```typescript
1010
+ * const turn = await instance.generate('Hello');
1011
+ * console.log(turn.response.text);
1012
+ * console.log(`Used ${turn.usage.totalTokens} tokens in ${turn.cycles} cycles`);
1013
+ *
1014
+ * // With structured output
1015
+ * interface WeatherData { temperature: number; conditions: string; }
1016
+ * const turn = await instance.generate<WeatherData>('Get weather');
1017
+ * console.log(turn.data?.temperature);
1018
+ * ```
1019
+ */
1020
+ interface Turn<TData = unknown> {
1021
+ /**
1022
+ * All messages produced during this inference, in chronological order.
1023
+ * Includes UserMessage, AssistantMessage (may include toolCalls), and ToolResultMessage.
1024
+ */
1025
+ readonly messages: Message[];
1026
+ /** The final assistant response (last AssistantMessage in the turn) */
1027
+ readonly response: AssistantMessage;
1028
+ /** Tool executions that occurred during this turn */
1029
+ readonly toolExecutions: ToolExecution[];
1030
+ /** Aggregate token usage for the entire turn */
1031
+ readonly usage: TokenUsage;
1032
+ /** Total number of inference cycles (1 + number of tool rounds) */
1033
+ readonly cycles: number;
1034
+ /**
1035
+ * Structured output data (if a structure schema was provided).
1036
+ * Type is inferred from the schema when using TypeScript.
1037
+ */
1038
+ readonly data?: TData;
1039
+ }
1040
+ /**
1041
+ * Turn serialized to JSON format.
1042
+ * Messages are converted to MessageJSON, response is omitted (computed from messages).
1043
+ */
1044
+ type TurnJSON = Omit<Turn, 'messages' | 'response'> & {
1045
+ messages: MessageJSON[];
1046
+ };
1047
+ /**
1048
+ * Creates a Turn from accumulated inference data.
1049
+ *
1050
+ * @typeParam TData - Type of the structured output data
1051
+ * @param messages - All messages produced during the inference
1052
+ * @param toolExecutions - Record of all tool executions
1053
+ * @param usage - Aggregate token usage
1054
+ * @param cycles - Number of inference cycles
1055
+ * @param data - Optional structured output data
1056
+ * @returns A complete Turn object
1057
+ * @throws Error if no assistant message is found in the messages
1058
+ *
1059
+ * @example
1060
+ * ```typescript
1061
+ * const turn = createTurn(
1062
+ * [userMsg, assistantMsg],
1063
+ * [],
1064
+ * { inputTokens: 100, outputTokens: 50, totalTokens: 150 },
1065
+ * 1
1066
+ * );
1067
+ * ```
1068
+ */
1069
+ declare function createTurn<TData = unknown>(messages: Message[], toolExecutions: ToolExecution[], usage: TokenUsage, cycles: number, data?: TData): Turn<TData>;
1070
+ /**
1071
+ * Creates an empty TokenUsage object.
1072
+ *
1073
+ * @returns A TokenUsage with all values set to zero
1074
+ *
1075
+ * @example
1076
+ * ```typescript
1077
+ * const usage = emptyUsage();
1078
+ * // { inputTokens: 0, outputTokens: 0, totalTokens: 0, cacheReadTokens: 0, cacheWriteTokens: 0, cycles: [] }
1079
+ * ```
1080
+ */
1081
+ declare function emptyUsage(): TokenUsage;
1082
+ /**
1083
+ * Aggregates token usage from multiple inference cycles.
1084
+ *
1085
+ * @param usages - Array of TokenUsage objects to aggregate
1086
+ * @returns Combined TokenUsage with per-cycle breakdown
1087
+ *
1088
+ * @example
1089
+ * ```typescript
1090
+ * const cycle1 = { inputTokens: 100, outputTokens: 30, totalTokens: 130, cacheReadTokens: 50, cacheWriteTokens: 0 };
1091
+ * const cycle2 = { inputTokens: 150, outputTokens: 40, totalTokens: 190, cacheReadTokens: 100, cacheWriteTokens: 0 };
1092
+ * const total = aggregateUsage([cycle1, cycle2]);
1093
+ * // { inputTokens: 250, outputTokens: 70, totalTokens: 320, cacheReadTokens: 150, cacheWriteTokens: 0, cycles: [...] }
1094
+ * ```
1095
+ */
1096
+ declare function aggregateUsage(usages: TokenUsage[]): TokenUsage;
1097
+
1098
+ /**
1099
+ * @fileoverview Streaming types for real-time LLM responses.
1100
+ *
1101
+ * Defines the event types and interfaces for streaming LLM inference,
1102
+ * including text deltas, tool call deltas, and control events.
1103
+ *
1104
+ * @module types/stream
1105
+ */
1106
+
1107
+ /**
1108
+ * Stream event type discriminators.
1109
+ *
1110
+ * Each event type represents a different kind of streaming update
1111
+ * from the LLM provider.
1112
+ */
1113
+ type StreamEventType =
1114
+ /** Incremental text output */
1115
+ 'text_delta'
1116
+ /** Incremental reasoning/thinking output */
1117
+ | 'reasoning_delta'
1118
+ /** Incremental image data */
1119
+ | 'image_delta'
1120
+ /** Incremental audio data */
1121
+ | 'audio_delta'
1122
+ /** Incremental video data */
1123
+ | 'video_delta'
1124
+ /** Incremental tool call data (arguments being streamed) */
1125
+ | 'tool_call_delta'
1126
+ /** Tool execution has started */
1127
+ | 'tool_execution_start'
1128
+ /** Tool execution has completed */
1129
+ | 'tool_execution_end'
1130
+ /** Beginning of a message */
1131
+ | 'message_start'
1132
+ /** End of a message */
1133
+ | 'message_stop'
1134
+ /** Beginning of a content block */
1135
+ | 'content_block_start'
1136
+ /** End of a content block */
1137
+ | 'content_block_stop';
1138
+ /**
1139
+ * Event delta data payload.
1140
+ *
1141
+ * Contains the type-specific data for a streaming event.
1142
+ * Different fields are populated depending on the event type.
1143
+ */
1144
+ interface EventDelta {
1145
+ /** Incremental text content (for text_delta, reasoning_delta) */
1146
+ text?: string;
1147
+ /** Incremental binary data (for image_delta, audio_delta, video_delta) */
1148
+ data?: Uint8Array;
1149
+ /** Tool call identifier (for tool_call_delta, tool_execution_start/end) */
1150
+ toolCallId?: string;
1151
+ /** Tool name (for tool_call_delta, tool_execution_start/end) */
1152
+ toolName?: string;
1153
+ /** Incremental JSON arguments string (for tool_call_delta) */
1154
+ argumentsJson?: string;
1155
+ /** Tool execution result (for tool_execution_end) */
1156
+ result?: unknown;
1157
+ /** Whether tool execution resulted in an error (for tool_execution_end) */
1158
+ isError?: boolean;
1159
+ /** Timestamp in milliseconds (for tool_execution_start/end) */
1160
+ timestamp?: number;
1161
+ }
1162
+ /**
1163
+ * A single streaming event from the LLM.
1164
+ *
1165
+ * Events are emitted in order as the model generates output,
1166
+ * allowing for real-time display of responses.
1167
+ *
1168
+ * @example
1169
+ * ```typescript
1170
+ * for await (const event of stream) {
1171
+ * if (event.type === 'text_delta') {
1172
+ * process.stdout.write(event.delta.text ?? '');
1173
+ * } else if (event.type === 'tool_call_delta') {
1174
+ * console.log('Tool:', event.delta.toolName);
1175
+ * }
1176
+ * }
1177
+ * ```
1178
+ */
1179
+ interface StreamEvent {
1180
+ /** Event type discriminator */
1181
+ type: StreamEventType;
1182
+ /** Index of the content block this event belongs to */
1183
+ index: number;
1184
+ /** Event-specific data payload */
1185
+ delta: EventDelta;
1186
+ }
1187
+ /**
1188
+ * Stream result - an async iterable that also provides the final turn.
1189
+ *
1190
+ * Allows consuming streaming events while also awaiting the complete
1191
+ * Turn result after streaming finishes.
1192
+ *
1193
+ * @typeParam TData - Type of the structured output data
1194
+ *
1195
+ * @example
1196
+ * ```typescript
1197
+ * const stream = instance.stream('Tell me a story');
1198
+ *
1199
+ * // Consume streaming events
1200
+ * for await (const event of stream) {
1201
+ * if (event.type === 'text_delta') {
1202
+ * process.stdout.write(event.delta.text ?? '');
1203
+ * }
1204
+ * }
1205
+ *
1206
+ * // Get the complete turn after streaming
1207
+ * const turn = await stream.turn;
1208
+ * console.log('\n\nTokens used:', turn.usage.totalTokens);
1209
+ * ```
1210
+ */
1211
+ interface StreamResult<TData = unknown> extends AsyncIterable<StreamEvent> {
1212
+ /**
1213
+ * Promise that resolves to the complete Turn after streaming finishes.
1214
+ */
1215
+ readonly turn: Promise<Turn<TData>>;
1216
+ /**
1217
+ * Aborts the stream, stopping further events and cancelling the request.
1218
+ */
1219
+ abort(): void;
1220
+ }
1221
+ /**
1222
+ * Creates a StreamResult from an async generator and completion promise.
1223
+ *
1224
+ * @typeParam TData - Type of the structured output data
1225
+ * @param generator - Async generator that yields stream events
1226
+ * @param turnPromise - Promise that resolves to the complete Turn
1227
+ * @param abortController - Controller for aborting the stream
1228
+ * @returns A StreamResult that can be iterated and awaited
1229
+ *
1230
+ * @example
1231
+ * ```typescript
1232
+ * const abortController = new AbortController();
1233
+ * const stream = createStreamResult(
1234
+ * eventGenerator(),
1235
+ * turnPromise,
1236
+ * abortController
1237
+ * );
1238
+ * ```
1239
+ */
1240
+ declare function createStreamResult<TData = unknown>(generator: AsyncGenerator<StreamEvent, void, unknown>, turnPromise: Promise<Turn<TData>>, abortController: AbortController): StreamResult<TData>;
1241
+ /**
1242
+ * Creates a text delta stream event.
1243
+ *
1244
+ * @param text - The incremental text content
1245
+ * @param index - Content block index (default: 0)
1246
+ * @returns A text_delta StreamEvent
1247
+ */
1248
+ declare function textDelta(text: string, index?: number): StreamEvent;
1249
+ /**
1250
+ * Creates a tool call delta stream event.
1251
+ *
1252
+ * @param toolCallId - Unique identifier for the tool call
1253
+ * @param toolName - Name of the tool being called
1254
+ * @param argumentsJson - Incremental JSON arguments string
1255
+ * @param index - Content block index (default: 0)
1256
+ * @returns A tool_call_delta StreamEvent
1257
+ */
1258
+ declare function toolCallDelta(toolCallId: string, toolName: string, argumentsJson: string, index?: number): StreamEvent;
1259
+ /**
1260
+ * Creates a message start stream event.
1261
+ *
1262
+ * @returns A message_start StreamEvent
1263
+ */
1264
+ declare function messageStart(): StreamEvent;
1265
+ /**
1266
+ * Creates a message stop stream event.
1267
+ *
1268
+ * @returns A message_stop StreamEvent
1269
+ */
1270
+ declare function messageStop(): StreamEvent;
1271
+ /**
1272
+ * Creates a content block start stream event.
1273
+ *
1274
+ * @param index - The content block index starting
1275
+ * @returns A content_block_start StreamEvent
1276
+ */
1277
+ declare function contentBlockStart(index: number): StreamEvent;
1278
+ /**
1279
+ * Creates a content block stop stream event.
1280
+ *
1281
+ * @param index - The content block index stopping
1282
+ * @returns A content_block_stop StreamEvent
1283
+ */
1284
+ declare function contentBlockStop(index: number): StreamEvent;
1285
+
1286
+ export { type TurnJSON as $, type AssistantContent as A, type BinaryBlock as B, type ContentBlock as C, isUserMessage as D, isAssistantMessage as E, isToolResultMessage as F, type MessageMetadata as G, type MessageOptions as H, type ImageSource as I, type JSONSchema as J, createTurn as K, emptyUsage as L, Message as M, aggregateUsage as N, type StreamEventType as O, type EventDelta as P, createStreamResult as Q, textDelta as R, type StreamResult as S, type Turn as T, type UserContent as U, type VideoBlock as V, toolCallDelta as W, messageStart as X, messageStop as Y, contentBlockStart as Z, contentBlockStop as _, type MessageType as a, type MessageJSON as b, type Tool as c, type ToolUseStrategy as d, AssistantMessage as e, type TokenUsage as f, type StreamEvent as g, type ImageBlock as h, type JSONSchemaProperty as i, type JSONSchemaPropertyType as j, type TextBlock as k, type AudioBlock as l, isTextBlock as m, isImageBlock as n, isAudioBlock as o, isVideoBlock as p, isBinaryBlock as q, type ToolCall as r, type ToolResult as s, text as t, type ToolMetadata as u, type BeforeCallResult as v, type AfterCallResult as w, type ToolExecution as x, UserMessage as y, ToolResultMessage as z };