@providerprotocol/ai 0.0.27 → 0.0.28

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. package/dist/anthropic/index.d.ts +1 -1
  2. package/dist/anthropic/index.js +38 -1
  3. package/dist/anthropic/index.js.map +1 -1
  4. package/dist/{chunk-6AZVUI6H.js → chunk-ILR2D5PN.js} +7 -1
  5. package/dist/chunk-ILR2D5PN.js.map +1 -0
  6. package/dist/{chunk-MKDLXV4O.js → chunk-NSE7QN3P.js} +1 -1
  7. package/dist/chunk-NSE7QN3P.js.map +1 -0
  8. package/dist/embedding-DtyOFIsS.d.ts +158 -0
  9. package/dist/google/index.d.ts +1 -1
  10. package/dist/google/index.js +41 -4
  11. package/dist/google/index.js.map +1 -1
  12. package/dist/http/index.d.ts +2 -2
  13. package/dist/index.d.ts +430 -514
  14. package/dist/index.js +627 -3
  15. package/dist/index.js.map +1 -1
  16. package/dist/llm-DgDEy9il.d.ts +3118 -0
  17. package/dist/ollama/index.d.ts +1 -1
  18. package/dist/ollama/index.js +2 -1
  19. package/dist/ollama/index.js.map +1 -1
  20. package/dist/openai/index.d.ts +1 -1
  21. package/dist/openai/index.js +70 -3
  22. package/dist/openai/index.js.map +1 -1
  23. package/dist/openrouter/index.d.ts +20 -2
  24. package/dist/openrouter/index.js +134 -13
  25. package/dist/openrouter/index.js.map +1 -1
  26. package/dist/proxy/index.d.ts +2 -2
  27. package/dist/proxy/index.js +3 -2
  28. package/dist/proxy/index.js.map +1 -1
  29. package/dist/{retry-BhX8mIrL.d.ts → retry-DXLQnTuU.d.ts} +1 -1
  30. package/dist/xai/index.d.ts +1 -1
  31. package/dist/xai/index.js +7 -3
  32. package/dist/xai/index.js.map +1 -1
  33. package/package.json +1 -1
  34. package/dist/chunk-6AZVUI6H.js.map +0 -1
  35. package/dist/chunk-MKDLXV4O.js.map +0 -1
  36. package/dist/embedding-CK5oa38O.d.ts +0 -1235
  37. package/dist/provider-6-mJYOOl.d.ts +0 -1474
package/dist/index.d.ts CHANGED
@@ -1,514 +1,8 @@
1
- import { M as Message, T as Turn, a as MessageType, b as MessageJSON, c as Tool, d as ToolUseStrategy, J as JSONSchema, S as StreamResult, A as AssistantMessage, e as TokenUsage, f as StreamEvent, E as EmbeddingOptions, g as EmbeddingInstance } from './embedding-CK5oa38O.js';
2
- export { m as AfterCallResult, B as BeforeCallResult, L as EmbedOptions, N as Embedding, R as EmbeddingModelInput, P as EmbeddingProgress, O as EmbeddingResult, Q as EmbeddingStream, y as EventDelta, h as JSONSchemaProperty, i as JSONSchemaPropertyType, t as MessageMetadata, u as MessageOptions, p as MessageRole, z as StreamEventType, j as ToolCall, n as ToolExecution, l as ToolMetadata, k as ToolResult, o as ToolResultMessage, U as UserMessage, x as aggregateUsage, I as contentBlockStart, K as contentBlockStop, C as createStreamResult, v as createTurn, w as emptyUsage, r as isAssistantMessage, s as isToolResultMessage, q as isUserMessage, G as messageStart, H as messageStop, D as textDelta, F as toolCallDelta } from './embedding-CK5oa38O.js';
3
- import { U as UserContent, A as AssistantContent, P as ProviderIdentity, a as ProviderConfig, C as ContentBlock, L as LLMProvider, I as ImageOptions, b as ImageInstance, c as LLMHandler$1, E as EmbeddingHandler, d as ImageHandler, e as Provider, M as ModelReference } from './provider-6-mJYOOl.js';
4
- export { l as AudioBlock, B as BinaryBlock, D as BoundEmbeddingModel, a4 as BoundImageModel, n as ContentBlockType, N as EmbeddingInput, y as EmbeddingProvider, F as EmbeddingRequest, G as EmbeddingResponse, J as EmbeddingUsage, H as EmbeddingVector, h as ErrorCode, W as GeneratedImage, f as Image, k as ImageBlock, $ as ImageCapabilities, Q as ImageEditInput, a1 as ImageEditRequest, S as ImageGenerateOptions, a5 as ImageHandler, O as ImageInput, a6 as ImageModelInput, z as ImageProvider, a3 as ImageProviderStreamResult, a0 as ImageRequest, a2 as ImageResponse, Y as ImageResult, m as ImageSource, o as ImageSourceType, Z as ImageStreamEvent, _ as ImageStreamResult, X as ImageUsage, K as KeyStrategy, j as Modality, i as ModalityType, R as ReasoningBlock, x as RetryStrategy, T as TextBlock, g as UPPError, V as VideoBlock, u as isAudioBlock, w as isBinaryBlock, s as isImageBlock, q as isReasoningBlock, p as isTextBlock, v as isVideoBlock, r as reasoning, t as text } from './provider-6-mJYOOl.js';
5
- export { D as DynamicKey, E as ExponentialBackoff, L as LinearBackoff, N as NoRetry, a as RetryAfterStrategy, R as RoundRobinKeys, T as TokenBucket, W as WeightedKeys } from './retry-BhX8mIrL.js';
6
-
7
- /**
8
- * @fileoverview Thread class for managing conversation history.
9
- *
10
- * Provides a utility class for building and manipulating conversation
11
- * message sequences, with support for serialization and deserialization.
12
- *
13
- * @module types/thread
14
- */
15
-
16
- /**
17
- * Thread serialized to JSON format.
18
- * Picks id from Thread, converts dates to strings.
19
- */
20
- type ThreadJSON = Pick<Thread, 'id'> & {
21
- messages: MessageJSON[];
22
- createdAt: string;
23
- updatedAt: string;
24
- };
25
- /**
26
- * Thread - A utility class for managing conversation history.
27
- *
28
- * Provides methods for building, manipulating, and persisting
29
- * conversation message sequences. This class is optional; users
30
- * can also manage their own `Message[]` arrays directly.
31
- *
32
- * @example
33
- * ```typescript
34
- * // Create a new thread and add messages
35
- * const thread = new Thread();
36
- * thread.user('Hello!');
37
- * thread.assistant('Hi there! How can I help?');
38
- *
39
- * // Use with LLM inference
40
- * const turn = await instance.generate(thread, 'What is 2+2?');
41
- * thread.append(turn);
42
- *
43
- * // Serialize for storage
44
- * const json = thread.toJSON();
45
- * localStorage.setItem('chat', JSON.stringify(json));
46
- *
47
- * // Restore from storage
48
- * const restored = Thread.fromJSON(JSON.parse(localStorage.getItem('chat')));
49
- * ```
50
- */
51
- declare class Thread {
52
- /** Unique thread identifier */
53
- readonly id: string;
54
- /** Internal message storage */
55
- private _messages;
56
- /** Creation timestamp */
57
- private _createdAt;
58
- /** Last update timestamp */
59
- private _updatedAt;
60
- /**
61
- * Creates a new thread instance.
62
- *
63
- * @param messages - Optional initial messages to populate the thread
64
- */
65
- constructor(messages?: Message[]);
66
- /**
67
- * All messages in the thread (readonly).
68
- */
69
- get messages(): readonly Message[];
70
- /**
71
- * Number of messages in the thread.
72
- */
73
- get length(): number;
74
- /**
75
- * Appends all messages from a Turn to the thread.
76
- *
77
- * @param turn - The Turn containing messages to append
78
- * @returns This thread instance for chaining
79
- */
80
- append(turn: Turn): this;
81
- /**
82
- * Adds raw messages to the thread.
83
- *
84
- * @param messages - Messages to add
85
- * @returns This thread instance for chaining
86
- */
87
- push(...messages: Message[]): this;
88
- /**
89
- * Adds a user message to the thread.
90
- *
91
- * @param content - String or array of content blocks
92
- * @returns This thread instance for chaining
93
- *
94
- * @example
95
- * ```typescript
96
- * thread.user('Hello, world!');
97
- * thread.user([
98
- * { type: 'text', text: 'Describe this image:' },
99
- * { type: 'image', source: { type: 'url', url: '...' }, mimeType: 'image/png' }
100
- * ]);
101
- * ```
102
- */
103
- user(content: string | UserContent[]): this;
104
- /**
105
- * Adds an assistant message to the thread.
106
- *
107
- * @param content - String or array of content blocks
108
- * @returns This thread instance for chaining
109
- *
110
- * @example
111
- * ```typescript
112
- * thread.assistant('I can help with that!');
113
- * ```
114
- */
115
- assistant(content: string | AssistantContent[]): this;
116
- /**
117
- * Filters messages by type.
118
- *
119
- * @param type - The message type to filter by
120
- * @returns Array of messages matching the type
121
- *
122
- * @example
123
- * ```typescript
124
- * const userMessages = thread.filter('user');
125
- * const assistantMessages = thread.filter('assistant');
126
- * ```
127
- */
128
- filter(type: MessageType): Message[];
129
- /**
130
- * Returns the last N messages from the thread.
131
- *
132
- * @param count - Number of messages to return
133
- * @returns Array of the last N messages
134
- *
135
- * @example
136
- * ```typescript
137
- * const recent = thread.tail(5);
138
- * ```
139
- */
140
- tail(count: number): Message[];
141
- /**
142
- * Creates a new thread with a subset of messages.
143
- *
144
- * @param start - Start index (inclusive)
145
- * @param end - End index (exclusive)
146
- * @returns New Thread containing the sliced messages
147
- *
148
- * @example
149
- * ```typescript
150
- * const subset = thread.slice(0, 10);
151
- * ```
152
- */
153
- slice(start?: number, end?: number): Thread;
154
- /**
155
- * Removes all messages from the thread.
156
- *
157
- * @returns This thread instance for chaining
158
- */
159
- clear(): this;
160
- /**
161
- * Converts the thread to a plain message array.
162
- *
163
- * @returns Copy of the internal message array
164
- */
165
- toMessages(): Message[];
166
- /**
167
- * Serializes the thread to JSON format.
168
- *
169
- * @returns JSON-serializable representation of the thread
170
- *
171
- * @example
172
- * ```typescript
173
- * const json = thread.toJSON();
174
- * localStorage.setItem('thread', JSON.stringify(json));
175
- * ```
176
- */
177
- toJSON(): ThreadJSON;
178
- /**
179
- * Deserializes a thread from JSON format.
180
- *
181
- * @param json - The JSON representation to deserialize
182
- * @returns Reconstructed Thread instance
183
- *
184
- * @example
185
- * ```typescript
186
- * const json = JSON.parse(localStorage.getItem('thread'));
187
- * const thread = Thread.fromJSON(json);
188
- * ```
189
- */
190
- static fromJSON(json: ThreadJSON): Thread;
191
- /**
192
- * Enables iteration over messages with for...of loops.
193
- *
194
- * @returns Iterator over the thread's messages
195
- *
196
- * @example
197
- * ```typescript
198
- * for (const message of thread) {
199
- * console.log(message.text);
200
- * }
201
- * ```
202
- */
203
- [Symbol.iterator](): Iterator<Message>;
204
- /**
205
- * Converts a message to JSON format.
206
- */
207
- private messageToJSON;
208
- /**
209
- * Reconstructs a message from JSON format.
210
- */
211
- private static messageFromJSON;
212
- }
213
-
214
- /**
215
- * @fileoverview LLM types for language model inference.
216
- *
217
- * Defines the interfaces for configuring and executing LLM inference,
218
- * including options, instances, requests, responses, and capabilities.
219
- *
220
- * @module types/llm
221
- */
222
-
223
- /**
224
- * Structural type for model input that accepts any ModelReference.
225
- * Uses structural typing to avoid generic variance issues with Provider generics.
226
- * The nested types use `unknown` to accept any provider parameter types.
227
- *
228
- * @remarks
229
- * This type mirrors {@link ModelReference} while keeping provider options
230
- * structurally compatible across providers.
231
- *
232
- * @see ModelReference
233
- */
234
- type ModelInput = {
235
- readonly modelId: string;
236
- readonly provider: ProviderIdentity;
237
- /**
238
- * Optional provider-specific configuration that gets merged into request config.
239
- * Set when creating a model reference with provider-specific options.
240
- */
241
- readonly providerConfig?: Partial<ProviderConfig>;
242
- /**
243
- * The original options passed when creating this model reference.
244
- * Used by providers with multiple LLM handlers to resolve the correct handler.
245
- */
246
- readonly options?: unknown;
247
- };
248
- /**
249
- * LLM capabilities declare what a provider's API supports.
250
- *
251
- * These are API-level capabilities, not individual model capabilities.
252
- * If a user attempts to use a feature with a model that doesn't support it,
253
- * the provider's API will return an error.
254
- *
255
- * Capabilities are static and do not vary per-request or per-model.
256
- *
257
- * @example
258
- * ```typescript
259
- * const capabilities: LLMCapabilities = {
260
- * streaming: true,
261
- * tools: true,
262
- * structuredOutput: true,
263
- * imageInput: true,
264
- * videoInput: false,
265
- * audioInput: false
266
- * };
267
- * ```
268
- */
269
- interface LLMCapabilities {
270
- /** Provider API supports streaming responses */
271
- streaming: boolean;
272
- /** Provider API supports tool/function calling */
273
- tools: boolean;
274
- /** Provider API supports native structured output (JSON schema) */
275
- structuredOutput: boolean;
276
- /** Provider API supports image input in messages */
277
- imageInput: boolean;
278
- /** Provider API supports video input in messages */
279
- videoInput: boolean;
280
- /** Provider API supports audio input in messages */
281
- audioInput: boolean;
282
- /** Provider API supports image generation output (via image() or built-in tools) */
283
- imageOutput?: boolean;
284
- }
285
- /**
286
- * Valid input types for inference.
287
- *
288
- * Inference input can be a simple string, a Message object, or
289
- * a raw ContentBlock for multimodal input.
290
- */
291
- type InferenceInput = string | Message | ContentBlock;
292
- /**
293
- * Options for creating an LLM instance with the llm() function.
294
- *
295
- * @typeParam TParams - Provider-specific parameter type
296
- *
297
- * @example
298
- * ```typescript
299
- * const options: LLMOptions = {
300
- * model: openai('gpt-4'),
301
- * system: 'You are a helpful assistant.',
302
- * params: { temperature: 0.7, max_tokens: 1000 },
303
- * tools: [weatherTool, searchTool],
304
- * toolStrategy: { maxIterations: 5 }
305
- * };
306
- *
307
- * const instance = llm(options);
308
- * ```
309
- */
310
- interface LLMOptions<TParams = unknown> {
311
- /** A model reference from a provider factory */
312
- model: ModelInput;
313
- /** Provider infrastructure configuration (optional - uses env vars if omitted) */
314
- config?: ProviderConfig;
315
- /** Model-specific parameters (temperature, max_tokens, etc.) */
316
- params?: TParams;
317
- /**
318
- * System prompt for all inferences.
319
- *
320
- * Can be a simple string or a provider-specific array format:
321
- * - Anthropic: `[{type: 'text', text: '...', cache_control?: {...}}]`
322
- * - Google: `[{text: '...'}, {text: '...'}]` (parts array)
323
- *
324
- * Array formats are passed through directly to the provider.
325
- */
326
- system?: string | unknown[];
327
- /** Tools available to the model */
328
- tools?: Tool[];
329
- /** Tool execution strategy */
330
- toolStrategy?: ToolUseStrategy;
331
- /** Structured output schema (JSON Schema) */
332
- structure?: JSONSchema;
333
- }
334
- /**
335
- * LLM instance returned by the llm() function.
336
- *
337
- * Provides methods for generating responses and streaming output,
338
- * with access to the bound model and capabilities.
339
- *
340
- * @typeParam TParams - Provider-specific parameter type
341
- *
342
- * @example
343
- * ```typescript
344
- * import { llm, openai, StreamEventType } from 'provider-protocol';
345
- *
346
- * const instance = llm({ model: openai('gpt-4') });
347
- *
348
- * // Simple generation
349
- * const turn = await instance.generate('Hello!');
350
- * console.log(turn.response.text);
351
- *
352
- * // Streaming
353
- * const stream = instance.stream('Tell me a story');
354
- * for await (const event of stream) {
355
- * if (event.type === StreamEventType.TextDelta) {
356
- * process.stdout.write(event.delta.text ?? '');
357
- * }
358
- * }
359
- * const finalTurn = await stream.turn;
360
- * ```
361
- */
362
- interface LLMInstance<TParams = unknown> {
363
- /**
364
- * Executes inference and returns the complete Turn.
365
- *
366
- * Supports multiple calling patterns:
367
- * - Single input: `generate('Hello')`
368
- * - Multiple inputs: `generate('Context...', 'Question?')`
369
- * - With history: `generate(messages, 'Follow-up?')`
370
- * - With thread: `generate(thread, 'Next message')`
371
- *
372
- * @param historyOrInput - History (Message[] or Thread) or first input
373
- * @param input - Additional inputs to include in the request
374
- * @returns Promise resolving to the complete Turn
375
- */
376
- generate(historyOrInput: Message[] | Thread | InferenceInput, ...input: InferenceInput[]): Promise<Turn>;
377
- /**
378
- * Executes streaming inference.
379
- *
380
- * Returns an async iterable of stream events that can also
381
- * be awaited for the final Turn.
382
- *
383
- * @param historyOrInput - History (Message[] or Thread) or first input
384
- * @param input - Additional inputs to include in the request
385
- * @returns StreamResult that yields events and resolves to Turn
386
- */
387
- stream(historyOrInput: Message[] | Thread | InferenceInput, ...input: InferenceInput[]): StreamResult;
388
- /** The bound model instance */
389
- readonly model: BoundLLMModel<TParams>;
390
- /** Current system prompt (string or provider-specific array format) */
391
- readonly system: string | unknown[] | undefined;
392
- /** Current model parameters */
393
- readonly params: TParams | undefined;
394
- /** Provider API capabilities */
395
- readonly capabilities: LLMCapabilities;
396
- }
397
- /**
398
- * Request passed from the llm() core to providers.
399
- *
400
- * Contains all information needed by a provider to execute inference.
401
- * The config is required here because llm() resolves defaults before
402
- * passing to providers.
403
- *
404
- * @typeParam TParams - Provider-specific parameter type
405
- * @internal
406
- */
407
- interface LLMRequest<TParams = unknown> {
408
- /** All messages for this request (history + new input) */
409
- messages: Message[];
410
- /**
411
- * System prompt - string or provider-specific array format.
412
- * Arrays are passed through directly to the provider.
413
- */
414
- system?: string | unknown[];
415
- /** Model-specific parameters (passed through unchanged) */
416
- params?: TParams;
417
- /** Tools available for this request */
418
- tools?: Tool[];
419
- /** Structured output schema (if requested) */
420
- structure?: JSONSchema;
421
- /** Provider infrastructure config (resolved by llm() core) */
422
- config: ProviderConfig;
423
- /** Abort signal for cancellation */
424
- signal?: AbortSignal;
425
- }
426
- /**
427
- * Raw provider response from a single inference cycle.
428
- *
429
- * Does not include tool loop handling - that's managed by llm() core.
430
- *
431
- * @internal
432
- */
433
- interface LLMResponse {
434
- /** The assistant's response message */
435
- message: AssistantMessage;
436
- /** Token usage for this cycle */
437
- usage: TokenUsage;
438
- /** Stop reason from the provider */
439
- stopReason: string;
440
- /**
441
- * Structured output data extracted by the provider.
442
- * Present when a structure schema was requested and successfully extracted.
443
- */
444
- data?: unknown;
445
- }
446
- /**
447
- * Raw provider stream result.
448
- *
449
- * An async iterable of stream events with a Promise that resolves
450
- * to the complete response after streaming finishes.
451
- *
452
- * @internal
453
- */
454
- interface LLMStreamResult extends AsyncIterable<StreamEvent> {
455
- /** Promise resolving to the complete response */
456
- readonly response: Promise<LLMResponse>;
457
- }
458
- /**
459
- * Bound LLM model - full definition.
460
- *
461
- * Represents a model bound to a specific provider and model ID,
462
- * ready to execute inference requests.
463
- *
464
- * @typeParam TParams - Provider-specific parameter type
465
- */
466
- interface BoundLLMModel<TParams = unknown> {
467
- /** The model identifier */
468
- readonly modelId: string;
469
- /** Reference to the parent provider */
470
- readonly provider: LLMProvider<TParams>;
471
- /** Provider API capabilities */
472
- readonly capabilities: LLMCapabilities;
473
- /**
474
- * Executes a single non-streaming inference request.
475
- *
476
- * @param request - The inference request
477
- * @returns Promise resolving to the response
478
- */
479
- complete(request: LLMRequest<TParams>): Promise<LLMResponse>;
480
- /**
481
- * Executes a single streaming inference request.
482
- *
483
- * @param request - The inference request
484
- * @returns Stream result with events and final response
485
- */
486
- stream(request: LLMRequest<TParams>): LLMStreamResult;
487
- }
488
- /**
489
- * LLM Handler interface for providers.
490
- *
491
- * Implemented by providers to enable language model capabilities.
492
- *
493
- * @typeParam TParams - Provider-specific parameter type
494
- */
495
- interface LLMHandler<TParams = unknown> {
496
- /**
497
- * Binds a model ID to create an executable model instance.
498
- *
499
- * @param modelId - The model identifier to bind
500
- * @returns A bound LLM model ready for inference
501
- */
502
- bind(modelId: string): BoundLLMModel<TParams>;
503
- /**
504
- * Sets the parent provider reference.
505
- * Called by createProvider() after the provider is constructed.
506
- *
507
- * @param provider - The parent provider
508
- * @internal
509
- */
510
- _setProvider?(provider: LLMProvider<TParams>): void;
511
- }
1
+ import { L as LLMOptions, a as LLMInstance, I as ImageOptions, b as ImageInstance, c as LLMHandler, E as EmbeddingHandler, d as ImageHandler, P as Provider, M as ModelReference, D as DocumentSource, e as DocumentBlock, A as AudioBlock, V as VideoBlock } from './llm-DgDEy9il.js';
2
+ export { S as AfterCallResult, o as AssistantContent, Z as AssistantMessage, Q as BeforeCallResult, B as BinaryBlock, aw as BoundEmbeddingModel, aV as BoundImageModel, aG as BoundLLMModel, C as ContentBlock, p as ContentBlockType, r as DocumentSourceType, aB as EmbeddingInput, au as EmbeddingProvider, ax as EmbeddingRequest, ay as EmbeddingResponse, aA as EmbeddingUsage, az as EmbeddingVector, g as ErrorCode, af as EventDelta, aL as GeneratedImage, f as Image, l as ImageBlock, aQ as ImageCapabilities, aJ as ImageEditInput, aS as ImageEditRequest, aK as ImageGenerateOptions, aI as ImageInput, aW as ImageModelInput, av as ImageProvider, aU as ImageProviderStreamResult, aR as ImageRequest, aT as ImageResponse, aN as ImageResult, m as ImageSource, q as ImageSourceType, aO as ImageStreamEvent, aP as ImageStreamResult, aM as ImageUsage, aH as InferenceInput, J as JSONSchema, j as JSONSchemaProperty, k as JSONSchemaPropertyType, ar as KeyStrategy, aC as LLMCapabilities, at as LLMProvider, aD as LLMRequest, aE as LLMResponse, aF as LLMStreamResult, X as Message, ad as MessageJSON, a4 as MessageMetadata, a5 as MessageOptions, $ as MessageRole, a3 as MessageType, i as Modality, h as ModalityType, aq as ProviderConfig, ap as ProviderIdentity, R as ReasoningBlock, as as RetryStrategy, ae as StreamEvent, ah as StreamEventType, ag as StreamResult, T as TextBlock, ab as Thread, ac as ThreadJSON, a7 as TokenUsage, G as Tool, H as ToolCall, W as ToolExecution, N as ToolMetadata, K as ToolResult, _ as ToolResultMessage, O as ToolUseStrategy, a6 as Turn, U as UPPError, n as UserContent, Y as UserMessage, aa as aggregateUsage, an as contentBlockStart, ao as contentBlockStop, ai as createStreamResult, a8 as createTurn, a9 as emptyUsage, a1 as isAssistantMessage, y as isAudioBlock, F as isBinaryBlock, x as isDocumentBlock, w as isImageBlock, v as isReasoningBlock, u as isTextBlock, a2 as isToolResultMessage, a0 as isUserMessage, z as isVideoBlock, al as messageStart, am as messageStop, s as reasoning, t as text, aj as textDelta, ak as toolCallDelta } from './llm-DgDEy9il.js';
3
+ import { E as EmbeddingOptions, a as EmbeddingInstance } from './embedding-DtyOFIsS.js';
4
+ export { b as EmbedOptions, c as Embedding, g as EmbeddingModelInput, e as EmbeddingProgress, d as EmbeddingResult, f as EmbeddingStream } from './embedding-DtyOFIsS.js';
5
+ export { D as DynamicKey, E as ExponentialBackoff, L as LinearBackoff, N as NoRetry, a as RetryAfterStrategy, R as RoundRobinKeys, T as TokenBucket, W as WeightedKeys } from './retry-DXLQnTuU.js';
512
6
 
513
7
  /**
514
8
  * @fileoverview LLM instance factory and streaming logic for the Universal Provider Protocol.
@@ -650,7 +144,7 @@ declare function image<TParams = unknown>(options: ImageOptions<TParams>): Image
650
144
  */
651
145
  interface LLMHandlerResolver<TOptions = unknown> {
652
146
  /** Map of mode identifiers to their corresponding LLM handlers */
653
- handlers: Record<string, LLMHandler$1>;
147
+ handlers: Record<string, LLMHandler>;
654
148
  /** The default mode when options don't specify one */
655
149
  defaultMode: string;
656
150
  /** Function to extract the mode from provider options */
@@ -706,7 +200,7 @@ interface CreateProviderOptions<TOptions = unknown> {
706
200
  /** Handlers for supported modalities (LLM, embedding, image generation) */
707
201
  handlers: {
708
202
  /** Handler for language model completions, or resolver for multi-handler providers */
709
- llm?: LLMHandler$1 | LLMHandlerResolver<TOptions>;
203
+ llm?: LLMHandler | LLMHandlerResolver<TOptions>;
710
204
  /** Handler for text embeddings */
711
205
  embedding?: EmbeddingHandler;
712
206
  /** Handler for image generation */
@@ -764,6 +258,428 @@ interface CreateProviderOptions<TOptions = unknown> {
764
258
  */
765
259
  declare function createProvider<TOptions = unknown>(options: CreateProviderOptions<TOptions>): Provider<TOptions>;
766
260
 
261
+ /**
262
+ * @fileoverview Document content handling for the Universal Provider Protocol.
263
+ *
264
+ * Provides a unified Document class for working with documents across different sources
265
+ * (file paths, URLs, raw text, base64). Supports PDF and plain text documents with
266
+ * integration into UPP message content blocks.
267
+ *
268
+ * @module core/media/Document
269
+ */
270
+
271
+ /**
272
+ * Represents a document that can be used in UPP messages.
273
+ *
274
+ * Documents can be created from various sources (files, URLs, text, base64) and
275
+ * converted to content blocks for provider APIs. The class provides a unified
276
+ * interface regardless of the underlying source type.
277
+ *
278
+ * @example
279
+ * ```typescript
280
+ * // Load PDF from file
281
+ * const pdfDoc = await Document.fromPath('./report.pdf');
282
+ *
283
+ * // Reference PDF by URL
284
+ * const urlDoc = Document.fromUrl('https://example.com/document.pdf');
285
+ *
286
+ * // From plain text
287
+ * const textDoc = Document.fromText('Document content here...');
288
+ *
289
+ * // Use in a message
290
+ * const message = new UserMessage([document.toBlock()]);
291
+ * ```
292
+ */
293
+ declare class Document {
294
+ /** The underlying document source (base64, url, or text) */
295
+ readonly source: DocumentSource;
296
+ /** MIME type of the document ('application/pdf' or 'text/plain') */
297
+ readonly mimeType: string;
298
+ /** Optional document title (used for citations) */
299
+ readonly title?: string;
300
+ private constructor();
301
+ /**
302
+ * Whether this document has data loaded in memory.
303
+ *
304
+ * Returns `false` for URL-sourced documents that reference external resources.
305
+ */
306
+ get hasData(): boolean;
307
+ /**
308
+ * Whether this document is a PDF.
309
+ */
310
+ get isPdf(): boolean;
311
+ /**
312
+ * Whether this document is plain text.
313
+ */
314
+ get isText(): boolean;
315
+ /**
316
+ * Converts the document to a base64-encoded string.
317
+ *
318
+ * @returns The document data as a base64 string
319
+ * @throws {Error} When the source is a URL or plain text
320
+ */
321
+ toBase64(): string;
322
+ /**
323
+ * Gets the plain text content for text documents.
324
+ *
325
+ * @returns The document text content
326
+ * @throws {Error} When the source is not plain text
327
+ */
328
+ toText(): string;
329
+ /**
330
+ * Gets the URL for URL-sourced documents.
331
+ *
332
+ * @returns The document URL
333
+ * @throws {Error} When the source is not a URL
334
+ */
335
+ toUrl(): string;
336
+ /**
337
+ * Converts this Document to a DocumentBlock for use in UPP messages.
338
+ *
339
+ * @returns A DocumentBlock that can be included in message content arrays
340
+ */
341
+ toBlock(): DocumentBlock;
342
+ /**
343
+ * Creates a Document by reading a file from disk.
344
+ *
345
+ * The file is read into memory and base64-encoded. MIME type is automatically
346
+ * detected from the file extension.
347
+ *
348
+ * @param path - Path to the document file
349
+ * @param title - Optional document title
350
+ * @returns Promise resolving to a Document with the file contents
351
+ *
352
+ * @example
353
+ * ```typescript
354
+ * const doc = await Document.fromPath('./reports/annual.pdf');
355
+ * const docWithTitle = await Document.fromPath('./report.pdf', 'Annual Report 2024');
356
+ * ```
357
+ */
358
+ static fromPath(path: string, title?: string): Promise<Document>;
359
+ /**
360
+ * Creates a Document from a URL reference.
361
+ *
362
+ * The URL is stored as a reference and not fetched. Providers will handle
363
+ * URL fetching if needed. Only PDF URLs are supported.
364
+ * URLs must use the http or https protocol.
365
+ *
366
+ * @param url - URL pointing to the PDF document
367
+ * @param title - Optional document title
368
+ * @returns A Document referencing the URL
369
+ *
370
+ * @example
371
+ * ```typescript
372
+ * const doc = Document.fromUrl('https://example.com/report.pdf');
373
+ * ```
374
+ */
375
+ static fromUrl(url: string, title?: string): Document;
376
+ /**
377
+ * Creates a Document from base64-encoded data.
378
+ *
379
+ * @param base64 - The base64-encoded document data
380
+ * @param mimeType - The MIME type ('application/pdf' or 'text/plain')
381
+ * @param title - Optional document title
382
+ * @returns A Document containing the base64 data
383
+ *
384
+ * @example
385
+ * ```typescript
386
+ * const doc = Document.fromBase64(pdfBase64, 'application/pdf', 'Contract');
387
+ * ```
388
+ */
389
+ static fromBase64(base64: string, mimeType: string, title?: string): Document;
390
+ /**
391
+ * Creates a Document from plain text content.
392
+ *
393
+ * @param text - The document text content
394
+ * @param title - Optional document title
395
+ * @returns A Document containing the text
396
+ *
397
+ * @example
398
+ * ```typescript
399
+ * const doc = Document.fromText('This is the document content.', 'Notes');
400
+ * ```
401
+ */
402
+ static fromText(text: string, title?: string): Document;
403
+ /**
404
+ * Creates a Document from an existing DocumentBlock.
405
+ *
406
+ * Useful for converting content blocks received from providers back
407
+ * into Document instances for further processing.
408
+ *
409
+ * @param block - A DocumentBlock from message content
410
+ * @returns A Document with the block's source and metadata
411
+ */
412
+ static fromBlock(block: DocumentBlock): Document;
413
+ }
414
+
415
+ /**
416
+ * @fileoverview Audio content handling for the Universal Provider Protocol.
417
+ *
418
+ * Provides a unified Audio class for working with audio across different sources
419
+ * (file paths, raw bytes, base64). Supports conversion between formats and
420
+ * integration with UPP message content blocks.
421
+ *
422
+ * @module core/media/Audio
423
+ */
424
+
425
+ /**
426
+ * Represents an audio file that can be used in UPP messages.
427
+ *
428
+ * Audio can be created from various sources (files, bytes, base64) and
429
+ * converted to different formats as needed by providers. The class provides
430
+ * a unified interface regardless of the underlying source type.
431
+ *
432
+ * Note: Providers have size limits for inline audio data. Google Gemini
433
+ * limits inline data to 20MB per request. For larger files, consider using
434
+ * provider-specific file upload APIs.
435
+ *
436
+ * @example
437
+ * ```typescript
438
+ * // Load from file
439
+ * const fileAudio = await Audio.fromPath('./recording.mp3');
440
+ *
441
+ * // From raw bytes
442
+ * const bytesAudio = Audio.fromBytes(uint8Array, 'audio/wav');
443
+ *
444
+ * // Use in a message
445
+ * const message = new UserMessage([audio.toBlock()]);
446
+ * ```
447
+ */
448
+ declare class Audio {
449
+ /** The audio data as raw bytes */
450
+ readonly data: Uint8Array;
451
+ /** MIME type of the audio (e.g., 'audio/mp3', 'audio/wav') */
452
+ readonly mimeType: string;
453
+ /** Duration in seconds, if known */
454
+ readonly duration?: number;
455
+ private constructor();
456
+ /**
457
+ * Gets the size of the audio data in bytes.
458
+ */
459
+ get size(): number;
460
+ /**
461
+ * Converts the audio to a base64-encoded string.
462
+ *
463
+ * @returns The audio data as a base64 string
464
+ */
465
+ toBase64(): string;
466
+ /**
467
+ * Converts the audio to a data URL suitable for embedding.
468
+ *
469
+ * @returns A data URL in the format `data:{mimeType};base64,{data}`
470
+ */
471
+ toDataUrl(): string;
472
+ /**
473
+ * Gets the audio data as raw bytes.
474
+ *
475
+ * @returns The audio data as a Uint8Array
476
+ */
477
+ toBytes(): Uint8Array;
478
+ /**
479
+ * Converts this Audio to an AudioBlock for use in UPP messages.
480
+ *
481
+ * @returns An AudioBlock that can be included in message content arrays
482
+ */
483
+ toBlock(): AudioBlock;
484
+ /**
485
+ * Creates an Audio by reading a file from disk.
486
+ *
487
+ * The file is read into memory as bytes. MIME type is automatically
488
+ * detected from the file extension.
489
+ *
490
+ * @param path - Path to the audio file
491
+ * @param duration - Optional duration in seconds
492
+ * @returns Promise resolving to an Audio with the file contents
493
+ *
494
+ * @example
495
+ * ```typescript
496
+ * const audio = await Audio.fromPath('./recordings/interview.mp3');
497
+ * ```
498
+ */
499
+ static fromPath(path: string, duration?: number): Promise<Audio>;
500
+ /**
501
+ * Creates an Audio from raw byte data.
502
+ *
503
+ * @param data - The audio data as a Uint8Array
504
+ * @param mimeType - The MIME type of the audio
505
+ * @param duration - Optional duration in seconds
506
+ * @returns An Audio containing the byte data
507
+ *
508
+ * @example
509
+ * ```typescript
510
+ * const audio = Audio.fromBytes(wavData, 'audio/wav');
511
+ * ```
512
+ */
513
+ static fromBytes(data: Uint8Array, mimeType: string, duration?: number): Audio;
514
+ /**
515
+ * Creates an Audio from a base64-encoded string.
516
+ *
517
+ * @param base64 - The base64-encoded audio data (without data URL prefix)
518
+ * @param mimeType - The MIME type of the audio
519
+ * @param duration - Optional duration in seconds
520
+ * @returns An Audio containing the decoded data
521
+ *
522
+ * @example
523
+ * ```typescript
524
+ * const audio = Audio.fromBase64(base64String, 'audio/mp3');
525
+ * ```
526
+ */
527
+ static fromBase64(base64: string, mimeType: string, duration?: number): Audio;
528
+ /**
529
+ * Creates an Audio from an existing AudioBlock.
530
+ *
531
+ * Useful for converting content blocks received from providers back
532
+ * into Audio instances for further processing.
533
+ *
534
+ * @param block - An AudioBlock from message content
535
+ * @returns An Audio with the block's data and metadata
536
+ */
537
+ static fromBlock(block: AudioBlock): Audio;
538
+ }
539
+
540
+ /**
541
+ * @fileoverview Video content handling for the Universal Provider Protocol.
542
+ *
543
+ * Provides a unified Video class for working with video across different sources
544
+ * (file paths, raw bytes, base64). Supports conversion between formats and
545
+ * integration with UPP message content blocks.
546
+ *
547
+ * @module core/media/Video
548
+ */
549
+
550
+ /**
551
+ * Represents a video file that can be used in UPP messages.
552
+ *
553
+ * Video can be created from various sources (files, bytes, base64) and
554
+ * converted to different formats as needed by providers. The class provides
555
+ * a unified interface regardless of the underlying source type.
556
+ *
557
+ * Note: Providers have size limits for inline video data. Google Gemini
558
+ * limits inline data to 20MB per request. For larger files, consider using
559
+ * provider-specific file upload APIs.
560
+ *
561
+ * @example
562
+ * ```typescript
563
+ * // Load from file
564
+ * const fileVideo = await Video.fromPath('./clip.mp4');
565
+ *
566
+ * // From raw bytes
567
+ * const bytesVideo = Video.fromBytes(uint8Array, 'video/webm');
568
+ *
569
+ * // Use in a message
570
+ * const message = new UserMessage([video.toBlock()]);
571
+ * ```
572
+ */
573
+ declare class Video {
574
+ /** The video data as raw bytes */
575
+ readonly data: Uint8Array;
576
+ /** MIME type of the video (e.g., 'video/mp4', 'video/webm') */
577
+ readonly mimeType: string;
578
+ /** Duration in seconds, if known */
579
+ readonly duration?: number;
580
+ /** Video width in pixels, if known */
581
+ readonly width?: number;
582
+ /** Video height in pixels, if known */
583
+ readonly height?: number;
584
+ private constructor();
585
+ /**
586
+ * Gets the size of the video data in bytes.
587
+ */
588
+ get size(): number;
589
+ /**
590
+ * Converts the video to a base64-encoded string.
591
+ *
592
+ * @returns The video data as a base64 string
593
+ */
594
+ toBase64(): string;
595
+ /**
596
+ * Converts the video to a data URL suitable for embedding.
597
+ *
598
+ * @returns A data URL in the format `data:{mimeType};base64,{data}`
599
+ */
600
+ toDataUrl(): string;
601
+ /**
602
+ * Gets the video data as raw bytes.
603
+ *
604
+ * @returns The video data as a Uint8Array
605
+ */
606
+ toBytes(): Uint8Array;
607
+ /**
608
+ * Converts this Video to a VideoBlock for use in UPP messages.
609
+ *
610
+ * @returns A VideoBlock that can be included in message content arrays
611
+ */
612
+ toBlock(): VideoBlock;
613
+ /**
614
+ * Creates a Video by reading a file from disk.
615
+ *
616
+ * The file is read into memory as bytes. MIME type is automatically
617
+ * detected from the file extension.
618
+ *
619
+ * @param path - Path to the video file
620
+ * @param options - Optional metadata (duration, width, height)
621
+ * @returns Promise resolving to a Video with the file contents
622
+ *
623
+ * @example
624
+ * ```typescript
625
+ * const video = await Video.fromPath('./clips/demo.mp4');
626
+ * const videoWithMeta = await Video.fromPath('./clip.mp4', { duration: 30, width: 1920, height: 1080 });
627
+ * ```
628
+ */
629
+ static fromPath(path: string, options?: {
630
+ duration?: number;
631
+ width?: number;
632
+ height?: number;
633
+ }): Promise<Video>;
634
+ /**
635
+ * Creates a Video from raw byte data.
636
+ *
637
+ * @param data - The video data as a Uint8Array
638
+ * @param mimeType - The MIME type of the video
639
+ * @param options - Optional metadata (duration, width, height)
640
+ * @returns A Video containing the byte data
641
+ *
642
+ * @example
643
+ * ```typescript
644
+ * const video = Video.fromBytes(mp4Data, 'video/mp4');
645
+ * const videoWithMeta = Video.fromBytes(data, 'video/mp4', { duration: 60 });
646
+ * ```
647
+ */
648
+ static fromBytes(data: Uint8Array, mimeType: string, options?: {
649
+ duration?: number;
650
+ width?: number;
651
+ height?: number;
652
+ }): Video;
653
+ /**
654
+ * Creates a Video from a base64-encoded string.
655
+ *
656
+ * @param base64 - The base64-encoded video data (without data URL prefix)
657
+ * @param mimeType - The MIME type of the video
658
+ * @param options - Optional metadata (duration, width, height)
659
+ * @returns A Video containing the decoded data
660
+ *
661
+ * @example
662
+ * ```typescript
663
+ * const video = Video.fromBase64(base64String, 'video/mp4');
664
+ * ```
665
+ */
666
+ static fromBase64(base64: string, mimeType: string, options?: {
667
+ duration?: number;
668
+ width?: number;
669
+ height?: number;
670
+ }): Video;
671
+ /**
672
+ * Creates a Video from an existing VideoBlock.
673
+ *
674
+ * Useful for converting content blocks received from providers back
675
+ * into Video instances for further processing.
676
+ *
677
+ * @param block - A VideoBlock from message content
678
+ * @returns A Video with the block's data and metadata
679
+ */
680
+ static fromBlock(block: VideoBlock): Video;
681
+ }
682
+
767
683
  /**
768
684
  * @fileoverview Unified Provider Protocol (UPP) - A unified interface for AI model inference
769
685
  *
@@ -822,4 +738,4 @@ declare const ai: {
822
738
  image: typeof image;
823
739
  };
824
740
 
825
- export { AssistantContent, AssistantMessage, type BoundLLMModel, ContentBlock, EmbeddingHandler, EmbeddingInstance, EmbeddingOptions, ImageInstance, ImageOptions, type InferenceInput, JSONSchema, type LLMCapabilities, type LLMHandler, type LLMInstance, type LLMOptions, LLMProvider, type LLMRequest, type LLMResponse, type LLMStreamResult, Message, MessageJSON, MessageType, ModelReference, Provider, ProviderConfig, ProviderIdentity, StreamEvent, StreamResult, Thread, type ThreadJSON, TokenUsage, Tool, ToolUseStrategy, Turn, UserContent, ai, createProvider, embedding, image, llm };
741
+ export { Audio, AudioBlock, Document, DocumentBlock, DocumentSource, EmbeddingHandler, EmbeddingInstance, EmbeddingOptions, ImageHandler, ImageInstance, ImageOptions, LLMHandler, LLMInstance, LLMOptions, ModelReference, Provider, Video, VideoBlock, ai, createProvider, embedding, image, llm };