ai 5.0.0-canary.9 → 5.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,573 +1,14 @@
1
- import { SharedV2ProviderOptions, LanguageModelV2Usage, LanguageModelV2Source, JSONValue as JSONValue$1, JSONObject, LanguageModelV2FunctionTool, LanguageModelV2ProviderDefinedTool, LanguageModelV2ToolChoice, LanguageModelV2Prompt } from '@ai-sdk/provider';
2
- import { z } from 'zod';
3
- import { ToolCall, ToolResult, Validator } from '@ai-sdk/provider-utils';
4
- import { JSONSchema7 } from 'json-schema';
1
+ import { ModelMessage, Tool } from '@ai-sdk/provider-utils';
2
+ export { convertAsyncIteratorToReadableStream } from '@ai-sdk/provider-utils';
3
+ import { LanguageModelV2Prompt, LanguageModelV2FunctionTool, LanguageModelV2ProviderDefinedTool, LanguageModelV2ToolChoice } from '@ai-sdk/provider';
5
4
 
6
- type ToolResultContent = Array<{
7
- type: 'text';
8
- text: string;
9
- } | {
10
- type: 'image';
11
- data: string;
12
- mediaType?: string;
13
- /**
14
- * @deprecated Use `mediaType` instead.
15
- */
16
- mimeType?: string;
5
+ declare function download({ url }: {
6
+ url: URL;
7
+ }): Promise<{
8
+ data: Uint8Array;
9
+ mediaType: string | undefined;
17
10
  }>;
18
11
 
19
- /**
20
- Additional provider-specific options.
21
-
22
- They are passed through to the provider from the AI SDK and enable
23
- provider-specific functionality that can be fully encapsulated in the provider.
24
- */
25
- type ProviderOptions = SharedV2ProviderOptions;
26
-
27
- /**
28
- Data content. Can either be a base64-encoded string, a Uint8Array, an ArrayBuffer, or a Buffer.
29
- */
30
- type DataContent = string | Uint8Array | ArrayBuffer | Buffer;
31
-
32
- /**
33
- Text content part of a prompt. It contains a string of text.
34
- */
35
- interface TextPart {
36
- type: 'text';
37
- /**
38
- The text content.
39
- */
40
- text: string;
41
- /**
42
- Additional provider-specific metadata. They are passed through
43
- to the provider from the AI SDK and enable provider-specific
44
- functionality that can be fully encapsulated in the provider.
45
- */
46
- providerOptions?: ProviderOptions;
47
- }
48
- /**
49
- Image content part of a prompt. It contains an image.
50
- */
51
- interface ImagePart {
52
- type: 'image';
53
- /**
54
- Image data. Can either be:
55
-
56
- - data: a base64-encoded string, a Uint8Array, an ArrayBuffer, or a Buffer
57
- - URL: a URL that points to the image
58
- */
59
- image: DataContent | URL;
60
- /**
61
- Optional IANA media type of the image.
62
-
63
- @see https://www.iana.org/assignments/media-types/media-types.xhtml
64
- */
65
- mediaType?: string;
66
- /**
67
- @deprecated Use `mediaType` instead.
68
- */
69
- mimeType?: string;
70
- /**
71
- Additional provider-specific metadata. They are passed through
72
- to the provider from the AI SDK and enable provider-specific
73
- functionality that can be fully encapsulated in the provider.
74
- */
75
- providerOptions?: ProviderOptions;
76
- }
77
- /**
78
- File content part of a prompt. It contains a file.
79
- */
80
- interface FilePart {
81
- type: 'file';
82
- /**
83
- File data. Can either be:
84
-
85
- - data: a base64-encoded string, a Uint8Array, an ArrayBuffer, or a Buffer
86
- - URL: a URL that points to the image
87
- */
88
- data: DataContent | URL;
89
- /**
90
- Optional filename of the file.
91
- */
92
- filename?: string;
93
- /**
94
- IANA media type of the file.
95
-
96
- @see https://www.iana.org/assignments/media-types/media-types.xhtml
97
- */
98
- mediaType: string;
99
- /**
100
- @deprecated Use `mediaType` instead.
101
- */
102
- mimeType?: string;
103
- /**
104
- Additional provider-specific metadata. They are passed through
105
- to the provider from the AI SDK and enable provider-specific
106
- functionality that can be fully encapsulated in the provider.
107
- */
108
- providerOptions?: ProviderOptions;
109
- }
110
- /**
111
- * Reasoning content part of a prompt. It contains a reasoning.
112
- */
113
- interface ReasoningPart {
114
- type: 'reasoning';
115
- /**
116
- The reasoning text.
117
- */
118
- text: string;
119
- /**
120
- An optional signature for verifying that the reasoning originated from the model.
121
- */
122
- signature?: string;
123
- /**
124
- Additional provider-specific metadata. They are passed through
125
- to the provider from the AI SDK and enable provider-specific
126
- functionality that can be fully encapsulated in the provider.
127
- */
128
- providerOptions?: ProviderOptions;
129
- }
130
- /**
131
- Redacted reasoning content part of a prompt.
132
- */
133
- interface RedactedReasoningPart {
134
- type: 'redacted-reasoning';
135
- /**
136
- Redacted reasoning data.
137
- */
138
- data: string;
139
- /**
140
- Additional provider-specific metadata. They are passed through
141
- to the provider from the AI SDK and enable provider-specific
142
- functionality that can be fully encapsulated in the provider.
143
- */
144
- providerOptions?: ProviderOptions;
145
- }
146
- /**
147
- Tool call content part of a prompt. It contains a tool call (usually generated by the AI model).
148
- */
149
- interface ToolCallPart {
150
- type: 'tool-call';
151
- /**
152
- ID of the tool call. This ID is used to match the tool call with the tool result.
153
- */
154
- toolCallId: string;
155
- /**
156
- Name of the tool that is being called.
157
- */
158
- toolName: string;
159
- /**
160
- Arguments of the tool call. This is a JSON-serializable object that matches the tool's input schema.
161
- */
162
- args: unknown;
163
- /**
164
- Additional provider-specific metadata. They are passed through
165
- to the provider from the AI SDK and enable provider-specific
166
- functionality that can be fully encapsulated in the provider.
167
- */
168
- providerOptions?: ProviderOptions;
169
- }
170
- /**
171
- Tool result content part of a prompt. It contains the result of the tool call with the matching ID.
172
- */
173
- interface ToolResultPart {
174
- type: 'tool-result';
175
- /**
176
- ID of the tool call that this result is associated with.
177
- */
178
- toolCallId: string;
179
- /**
180
- Name of the tool that generated this result.
181
- */
182
- toolName: string;
183
- /**
184
- Result of the tool call. This is a JSON-serializable object.
185
- */
186
- result: unknown;
187
- /**
188
- Multi-part content of the tool result. Only for tools that support multipart results.
189
- */
190
- experimental_content?: ToolResultContent;
191
- /**
192
- Optional flag if the result is an error or an error message.
193
- */
194
- isError?: boolean;
195
- /**
196
- Additional provider-specific metadata. They are passed through
197
- to the provider from the AI SDK and enable provider-specific
198
- functionality that can be fully encapsulated in the provider.
199
- */
200
- providerOptions?: ProviderOptions;
201
- }
202
-
203
- /**
204
- A system message. It can contain system information.
205
-
206
- Note: using the "system" part of the prompt is strongly preferred
207
- to increase the resilience against prompt injection attacks,
208
- and because not all providers support several system messages.
209
- */
210
- type CoreSystemMessage = {
211
- role: 'system';
212
- content: string;
213
- /**
214
- Additional provider-specific metadata. They are passed through
215
- to the provider from the AI SDK and enable provider-specific
216
- functionality that can be fully encapsulated in the provider.
217
- */
218
- providerOptions?: ProviderOptions;
219
- };
220
- /**
221
- A user message. It can contain text or a combination of text and images.
222
- */
223
- type CoreUserMessage = {
224
- role: 'user';
225
- content: UserContent;
226
- /**
227
- Additional provider-specific metadata. They are passed through
228
- to the provider from the AI SDK and enable provider-specific
229
- functionality that can be fully encapsulated in the provider.
230
- */
231
- providerOptions?: ProviderOptions;
232
- };
233
- /**
234
- Content of a user message. It can be a string or an array of text and image parts.
235
- */
236
- type UserContent = string | Array<TextPart | ImagePart | FilePart>;
237
- /**
238
- An assistant message. It can contain text, tool calls, or a combination of text and tool calls.
239
- */
240
- type CoreAssistantMessage = {
241
- role: 'assistant';
242
- content: AssistantContent;
243
- /**
244
- Additional provider-specific metadata. They are passed through
245
- to the provider from the AI SDK and enable provider-specific
246
- functionality that can be fully encapsulated in the provider.
247
- */
248
- providerOptions?: ProviderOptions;
249
- };
250
- /**
251
- Content of an assistant message.
252
- It can be a string or an array of text, image, reasoning, redacted reasoning, and tool call parts.
253
- */
254
- type AssistantContent = string | Array<TextPart | FilePart | ReasoningPart | RedactedReasoningPart | ToolCallPart>;
255
- /**
256
- A tool message. It contains the result of one or more tool calls.
257
- */
258
- type CoreToolMessage = {
259
- role: 'tool';
260
- content: ToolContent;
261
- /**
262
- Additional provider-specific metadata. They are passed through
263
- to the provider from the AI SDK and enable provider-specific
264
- functionality that can be fully encapsulated in the provider.
265
- */
266
- providerOptions?: ProviderOptions;
267
- };
268
- /**
269
- Content of a tool message. It is an array of tool result parts.
270
- */
271
- type ToolContent = Array<ToolResultPart>;
272
- /**
273
- A message that can be used in the `messages` field of a prompt.
274
- It can be a user message, an assistant message, or a tool message.
275
- */
276
- type CoreMessage = CoreSystemMessage | CoreUserMessage | CoreAssistantMessage | CoreToolMessage;
277
-
278
- /**
279
- Tool choice for the generation. It supports the following settings:
280
-
281
- - `auto` (default): the model can choose whether and which tools to call.
282
- - `required`: the model must call a tool. It can choose which tool to call.
283
- - `none`: the model must not call tools
284
- - `{ type: 'tool', toolName: string (typed) }`: the model must call the specified tool
285
- */
286
- type ToolChoice<TOOLS extends Record<string, unknown>> = 'auto' | 'none' | 'required' | {
287
- type: 'tool';
288
- toolName: Extract<keyof TOOLS, string>;
289
- };
290
-
291
- /**
292
- Represents the number of tokens used in a prompt and completion.
293
- */
294
- type LanguageModelUsage = {
295
- /**
296
- The number of tokens used in the prompt.
297
- */
298
- promptTokens: number;
299
- /**
300
- The number of tokens used in the completion.
301
- */
302
- completionTokens: number;
303
- /**
304
- The total number of tokens used (promptTokens + completionTokens).
305
- */
306
- totalTokens: number;
307
- };
308
- declare function calculateLanguageModelUsage({ inputTokens, outputTokens, }: LanguageModelV2Usage): LanguageModelUsage;
309
-
310
- /**
311
- Tool invocations are either tool calls or tool results. For each assistant tool call,
312
- there is one tool invocation. While the call is in progress, the invocation is a tool call.
313
- Once the call is complete, the invocation is a tool result.
314
-
315
- The step is used to track how to map an assistant UI message with many tool invocations
316
- back to a sequence of LLM assistant/tool result message pairs.
317
- It is optional for backwards compatibility.
318
- */
319
- type ToolInvocation = ({
320
- state: 'partial-call';
321
- step?: number;
322
- } & ToolCall<string, any>) | ({
323
- state: 'call';
324
- step?: number;
325
- } & ToolCall<string, any>) | ({
326
- state: 'result';
327
- step?: number;
328
- } & ToolResult<string, any, any>);
329
- /**
330
- * An attachment that can be sent along with a message.
331
- */
332
- interface Attachment {
333
- /**
334
- * The name of the attachment, usually the file name.
335
- */
336
- name?: string;
337
- /**
338
- * A string indicating the [media type](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Type).
339
- * By default, it's extracted from the pathname's extension.
340
- */
341
- contentType?: string;
342
- /**
343
- * The URL of the attachment. It can either be a URL to a hosted file or a [Data URL](https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/Data_URLs).
344
- */
345
- url: string;
346
- }
347
- /**
348
- * AI SDK UI Messages. They are used in the client and to communicate between the frontend and the API routes.
349
- */
350
- interface Message {
351
- /**
352
- A unique identifier for the message.
353
- */
354
- id: string;
355
- /**
356
- The timestamp of the message.
357
- */
358
- createdAt?: Date;
359
- /**
360
- Text content of the message. Use parts when possible.
361
- */
362
- content: string;
363
- /**
364
- Reasoning for the message.
365
-
366
- @deprecated Use `parts` instead.
367
- */
368
- reasoning?: string;
369
- /**
370
- * Additional attachments to be sent along with the message.
371
- */
372
- experimental_attachments?: Attachment[];
373
- /**
374
- The 'data' role is deprecated.
375
- */
376
- role: 'system' | 'user' | 'assistant' | 'data';
377
- /**
378
- For data messages.
379
-
380
- @deprecated Data messages will be removed.
381
- */
382
- data?: JSONValue;
383
- /**
384
- * Additional message-specific information added on the server via StreamData
385
- */
386
- annotations?: JSONValue[] | undefined;
387
- /**
388
- Tool invocations (that can be tool calls or tool results, depending on whether or not the invocation has finished)
389
- that the assistant made as part of this message.
390
-
391
- @deprecated Use `parts` instead.
392
- */
393
- toolInvocations?: Array<ToolInvocation>;
394
- /**
395
- * The parts of the message. Use this for rendering the message in the UI.
396
- *
397
- * Assistant messages can have text, reasoning and tool invocation parts.
398
- * User messages can have text parts.
399
- */
400
- parts?: Array<TextUIPart | ReasoningUIPart | ToolInvocationUIPart | SourceUIPart | FileUIPart | StepStartUIPart>;
401
- }
402
- /**
403
- * A text part of a message.
404
- */
405
- type TextUIPart = {
406
- type: 'text';
407
- /**
408
- * The text content.
409
- */
410
- text: string;
411
- };
412
- /**
413
- * A reasoning part of a message.
414
- */
415
- type ReasoningUIPart = {
416
- type: 'reasoning';
417
- /**
418
- * The reasoning text.
419
- */
420
- reasoning: string;
421
- details: Array<{
422
- type: 'text';
423
- text: string;
424
- signature?: string;
425
- } | {
426
- type: 'redacted';
427
- data: string;
428
- }>;
429
- };
430
- /**
431
- * A tool invocation part of a message.
432
- */
433
- type ToolInvocationUIPart = {
434
- type: 'tool-invocation';
435
- /**
436
- * The tool invocation.
437
- */
438
- toolInvocation: ToolInvocation;
439
- };
440
- /**
441
- * A source part of a message.
442
- */
443
- type SourceUIPart = {
444
- type: 'source';
445
- /**
446
- * The source.
447
- */
448
- source: LanguageModelV2Source;
449
- };
450
- /**
451
- * A file part of a message.
452
- */
453
- type FileUIPart = {
454
- type: 'file';
455
- /**
456
- * IANA media type of the file.
457
- *
458
- * @see https://www.iana.org/assignments/media-types/media-types.xhtml
459
- */
460
- mediaType: string;
461
- /**
462
- * The base64 encoded data.
463
- */
464
- data: string;
465
- };
466
- /**
467
- * A step boundary part of a message.
468
- */
469
- type StepStartUIPart = {
470
- type: 'step-start';
471
- };
472
- /**
473
- A JSON value can be a string, number, boolean, object, array, or null.
474
- JSON values can be serialized and deserialized by the JSON.stringify and JSON.parse methods.
475
- */
476
- type JSONValue = null | string | number | boolean | {
477
- [value: string]: JSONValue;
478
- } | Array<JSONValue>;
479
-
480
- /**
481
- * Used to mark schemas so we can support both Zod and custom schemas.
482
- */
483
- declare const schemaSymbol: unique symbol;
484
- type Schema<OBJECT = unknown> = Validator<OBJECT> & {
485
- /**
486
- * Used to mark schemas so we can support both Zod and custom schemas.
487
- */
488
- [schemaSymbol]: true;
489
- /**
490
- * Schema type for inference.
491
- */
492
- _type: OBJECT;
493
- /**
494
- * The JSON Schema for the schema. It is passed to the providers.
495
- */
496
- readonly jsonSchema: JSONSchema7;
497
- };
498
-
499
- type ToolParameters<T = JSONObject> = z.Schema<T> | Schema<T>;
500
- interface ToolExecutionOptions {
501
- /**
502
- * The ID of the tool call. You can use it e.g. when sending tool-call related information with stream data.
503
- */
504
- toolCallId: string;
505
- /**
506
- * Messages that were sent to the language model to initiate the response that contained the tool call.
507
- * The messages **do not** include the system prompt nor the assistant response that contained the tool call.
508
- */
509
- messages: CoreMessage[];
510
- /**
511
- * An optional abort signal that indicates that the overall operation should be aborted.
512
- */
513
- abortSignal?: AbortSignal;
514
- }
515
- type NeverOptional<N, T> = 0 extends 1 & N ? Partial<T> : [N] extends [never] ? Partial<Record<keyof T, undefined>> : T;
516
- /**
517
- A tool contains the description and the schema of the input that the tool expects.
518
- This enables the language model to generate the input.
519
-
520
- The tool can also contain an optional execute function for the actual execution function of the tool.
521
- */
522
- type Tool<PARAMETERS extends JSONValue$1 | unknown | never = any, RESULT = any> = {
523
- /**
524
- An optional description of what the tool does.
525
- Will be used by the language model to decide whether to use the tool.
526
- Not used for provider-defined tools.
527
- */
528
- description?: string;
529
- } & NeverOptional<PARAMETERS, {
530
- /**
531
- The schema of the input that the tool expects. The language model will use this to generate the input.
532
- It is also used to validate the output of the language model.
533
- Use descriptions to make the input understandable for the language model.
534
- */
535
- parameters: ToolParameters<PARAMETERS>;
536
- }> & NeverOptional<RESULT, {
537
- /**
538
- An async function that is called with the arguments from the tool call and produces a result.
539
- If not provided, the tool will not be executed automatically.
540
-
541
- @args is the input of the tool call.
542
- @options.abortSignal is a signal that can be used to abort the tool call.
543
- */
544
- execute: (args: [PARAMETERS] extends [never] ? undefined : PARAMETERS, options: ToolExecutionOptions) => PromiseLike<RESULT>;
545
- /**
546
- Optional conversion function that maps the tool result to multi-part tool content for LLMs.
547
- */
548
- experimental_toToolResultContent?: (result: RESULT) => ToolResultContent;
549
- }> & ({
550
- /**
551
- Function tool.
552
- */
553
- type?: undefined | 'function';
554
- } | {
555
- /**
556
- Provider-defined tool.
557
- */
558
- type: 'provider-defined';
559
- /**
560
- The ID of the tool. Should follow the format `<provider-name>.<tool-name>`.
561
- */
562
- id: `${string}.${string}`;
563
- /**
564
- The arguments for configuring the tool. Must match the expected arguments defined by the provider for this tool.
565
- */
566
- args: Record<string, unknown>;
567
- });
568
-
569
- type ToolSet = Record<string, (Tool<never, never> | Tool<any, any> | Tool<any, never> | Tool<never, any>) & Pick<Tool<any, any>, 'execute'>>;
570
-
571
12
  /**
572
13
  Prompt part of the AI function options.
573
14
  It contains a system message, a simple text prompt, or a list of messages.
@@ -578,21 +19,20 @@ type Prompt = {
578
19
  */
579
20
  system?: string;
580
21
  /**
581
- A simple text prompt. You can either use `prompt` or `messages` but not both.
582
- */
583
- prompt?: string;
22
+ A prompt. It can be either a text prompt or a list of messages.
23
+
24
+ You can either use `prompt` or `messages` but not both.
25
+ */
26
+ prompt?: string | Array<ModelMessage>;
584
27
  /**
585
- A list of messages. You can either use `prompt` or `messages` but not both.
28
+ A list of messages.
29
+
30
+ You can either use `prompt` or `messages` but not both.
586
31
  */
587
- messages?: Array<CoreMessage> | Array<Omit<Message, 'id'>>;
32
+ messages?: Array<ModelMessage>;
588
33
  };
589
34
 
590
35
  type StandardizedPrompt = {
591
- /**
592
- * Original prompt type. This is forwarded to the providers and can be used
593
- * to write send raw text to providers that support it.
594
- */
595
- type: 'prompt' | 'messages';
596
36
  /**
597
37
  * System message.
598
38
  */
@@ -600,12 +40,15 @@ type StandardizedPrompt = {
600
40
  /**
601
41
  * Messages.
602
42
  */
603
- messages: CoreMessage[];
43
+ messages: ModelMessage[];
604
44
  };
605
- declare function standardizePrompt<TOOLS extends ToolSet>({ prompt, tools, }: {
606
- prompt: Prompt;
607
- tools: undefined | TOOLS;
608
- }): StandardizedPrompt;
45
+ declare function standardizePrompt(prompt: Prompt): Promise<StandardizedPrompt>;
46
+
47
+ declare function convertToLanguageModelPrompt({ prompt, supportedUrls, downloadImplementation, }: {
48
+ prompt: StandardizedPrompt;
49
+ supportedUrls: Record<string, RegExp[]>;
50
+ downloadImplementation?: typeof download;
51
+ }): Promise<LanguageModelV2Prompt>;
609
52
 
610
53
  type CallSettings = {
611
54
  /**
@@ -613,12 +56,9 @@ type CallSettings = {
613
56
  */
614
57
  maxOutputTokens?: number;
615
58
  /**
616
- Temperature setting. This is a number between 0 (almost no randomness) and
617
- 1 (very random).
59
+ Temperature setting. The range depends on the provider and model.
618
60
 
619
61
  It is recommended to set either `temperature` or `topP`, but not both.
620
-
621
- @default 0
622
62
  */
623
63
  temperature?: number;
624
64
  /**
@@ -681,6 +121,21 @@ type CallSettings = {
681
121
  headers?: Record<string, string | undefined>;
682
122
  };
683
123
 
124
+ /**
125
+ Tool choice for the generation. It supports the following settings:
126
+
127
+ - `auto` (default): the model can choose whether and which tools to call.
128
+ - `required`: the model must call a tool. It can choose which tool to call.
129
+ - `none`: the model must not call tools
130
+ - `{ type: 'tool', toolName: string (typed) }`: the model must call the specified tool
131
+ */
132
+ type ToolChoice<TOOLS extends Record<string, unknown>> = 'auto' | 'none' | 'required' | {
133
+ type: 'tool';
134
+ toolName: Extract<keyof TOOLS, string>;
135
+ };
136
+
137
+ type ToolSet = Record<string, (Tool<never, never> | Tool<any, any> | Tool<any, never> | Tool<never, any>) & Pick<Tool<any, any>, 'execute' | 'onInputAvailable' | 'onInputStart' | 'onInputDelta'>>;
138
+
684
139
  declare function prepareToolsAndToolChoice<TOOLS extends ToolSet>({ tools, toolChoice, activeTools, }: {
685
140
  tools: TOOLS | undefined;
686
141
  toolChoice: ToolChoice<TOOLS> | undefined;
@@ -690,41 +145,22 @@ declare function prepareToolsAndToolChoice<TOOLS extends ToolSet>({ tools, toolC
690
145
  toolChoice: LanguageModelV2ToolChoice | undefined;
691
146
  };
692
147
 
148
+ /**
149
+ * Validates call settings and returns a new object with limited values.
150
+ */
151
+ declare function prepareCallSettings({ maxOutputTokens, temperature, topP, topK, presencePenalty, frequencyPenalty, seed, stopSequences, }: Omit<CallSettings, 'abortSignal' | 'headers' | 'maxRetries'>): Omit<CallSettings, 'abortSignal' | 'headers' | 'maxRetries'>;
152
+
693
153
  type RetryFunction = <OUTPUT>(fn: () => PromiseLike<OUTPUT>) => PromiseLike<OUTPUT>;
694
154
 
695
155
  /**
696
156
  * Validate and prepare retries.
697
157
  */
698
- declare function prepareRetries({ maxRetries, }: {
158
+ declare function prepareRetries({ maxRetries, abortSignal, }: {
699
159
  maxRetries: number | undefined;
160
+ abortSignal: AbortSignal | undefined;
700
161
  }): {
701
162
  maxRetries: number;
702
163
  retry: RetryFunction;
703
164
  };
704
165
 
705
- /**
706
- * Validates call settings and sets default values.
707
- */
708
- declare function prepareCallSettings({ maxOutputTokens, temperature, topP, topK, presencePenalty, frequencyPenalty, stopSequences, seed, }: Omit<CallSettings, 'abortSignal' | 'headers' | 'maxRetries'>): Omit<CallSettings, 'abortSignal' | 'headers' | 'maxRetries'>;
709
-
710
- declare function download({ url }: {
711
- url: URL;
712
- }): Promise<{
713
- data: Uint8Array;
714
- mediaType: string | undefined;
715
- }>;
716
-
717
- declare function convertToLanguageModelPrompt({ prompt, modelSupportsImageUrls, modelSupportsUrl, downloadImplementation, }: {
718
- prompt: StandardizedPrompt;
719
- modelSupportsImageUrls: boolean | undefined;
720
- modelSupportsUrl: undefined | ((url: URL) => boolean);
721
- downloadImplementation?: typeof download;
722
- }): Promise<LanguageModelV2Prompt>;
723
-
724
- /**
725
- * Warning time for notifying developers that a stream is hanging in dev mode
726
- * using a console.warn.
727
- */
728
- declare const HANGING_STREAM_WARNING_TIME_MS: number;
729
-
730
- export { HANGING_STREAM_WARNING_TIME_MS, calculateLanguageModelUsage, convertToLanguageModelPrompt, prepareCallSettings, prepareRetries, prepareToolsAndToolChoice, standardizePrompt };
166
+ export { convertToLanguageModelPrompt, prepareCallSettings, prepareRetries, prepareToolsAndToolChoice, standardizePrompt };