ai 5.0.0-canary.2 → 5.0.0-canary.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +30 -0
- package/dist/index.d.mts +154 -10
- package/dist/index.d.ts +154 -10
- package/dist/index.js +270 -147
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +226 -104
- package/dist/index.mjs.map +1 -1
- package/{rsc/dist/rsc-server.d.mts → dist/internal/index.d.mts} +212 -368
- package/dist/internal/index.d.ts +592 -0
- package/dist/internal/index.js +1429 -0
- package/dist/internal/index.js.map +1 -0
- package/{rsc/dist/rsc-server.mjs → dist/internal/index.mjs} +1034 -1777
- package/dist/internal/index.mjs.map +1 -0
- package/mcp-stdio/dist/index.js +1 -1
- package/mcp-stdio/dist/index.js.map +1 -1
- package/mcp-stdio/dist/index.mjs +1 -1
- package/mcp-stdio/dist/index.mjs.map +1 -1
- package/mcp-stdio/get-environment.test.ts +13 -0
- package/mcp-stdio/get-environment.ts +1 -1
- package/package.json +12 -27
- package/rsc/dist/index.d.ts +0 -813
- package/rsc/dist/index.mjs +0 -18
- package/rsc/dist/rsc-client.d.mts +0 -1
- package/rsc/dist/rsc-client.mjs +0 -18
- package/rsc/dist/rsc-client.mjs.map +0 -1
- package/rsc/dist/rsc-server.mjs.map +0 -1
- package/rsc/dist/rsc-shared.d.mts +0 -101
- package/rsc/dist/rsc-shared.mjs +0 -308
- package/rsc/dist/rsc-shared.mjs.map +0 -1
@@ -1,194 +1,20 @@
|
|
1
|
-
import {
|
2
|
-
import { ReactNode } from 'react';
|
1
|
+
import { Schema, Message } from '@ai-sdk/ui-utils';
|
3
2
|
import { z } from 'zod';
|
4
|
-
import {
|
3
|
+
import { LanguageModelV2ProviderMetadata, LanguageModelV2FunctionTool, LanguageModelV2ProviderDefinedTool, LanguageModelV2ToolChoice, LanguageModelV2Prompt } from '@ai-sdk/provider';
|
5
4
|
|
6
|
-
type
|
7
|
-
type
|
8
|
-
|
9
|
-
|
10
|
-
|
11
|
-
|
12
|
-
|
13
|
-
$ActionTypes?: Actions;
|
14
|
-
};
|
15
|
-
type AIProvider<AIState = any, UIState = any, Actions = any> = (props: AIProviderProps<AIState, UIState, Actions>) => Promise<React.ReactElement>;
|
16
|
-
type InferAIState<T, Fallback> = T extends AIProvider<infer AIState, any, any> ? AIState : Fallback;
|
17
|
-
type OnSetAIState<S> = ({ key, state, done, }: {
|
18
|
-
key: string | number | symbol | undefined;
|
19
|
-
state: S;
|
20
|
-
done: boolean;
|
21
|
-
}) => void | Promise<void>;
|
22
|
-
type OnGetUIState<S> = AIAction<void, S | undefined>;
|
23
|
-
type ValueOrUpdater<T> = T | ((current: T) => T);
|
24
|
-
type MutableAIState<AIState> = {
|
25
|
-
get: () => AIState;
|
26
|
-
update: (newState: ValueOrUpdater<AIState>) => void;
|
27
|
-
done: ((newState: AIState) => void) | (() => void);
|
28
|
-
};
|
29
|
-
|
30
|
-
/**
|
31
|
-
* Get the current AI state.
|
32
|
-
* If `key` is provided, it will return the value of the specified key in the
|
33
|
-
* AI state, if it's an object. If it's not an object, it will throw an error.
|
34
|
-
*
|
35
|
-
* @example const state = getAIState() // Get the entire AI state
|
36
|
-
* @example const field = getAIState('key') // Get the value of the key
|
37
|
-
*/
|
38
|
-
declare function getAIState<AI extends AIProvider = any>(): Readonly<InferAIState<AI, any>>;
|
39
|
-
declare function getAIState<AI extends AIProvider = any>(key: keyof InferAIState<AI, any>): Readonly<InferAIState<AI, any>[typeof key]>;
|
40
|
-
/**
|
41
|
-
* Get the mutable AI state. Note that you must call `.done()` when finishing
|
42
|
-
* updating the AI state.
|
43
|
-
*
|
44
|
-
* @example
|
45
|
-
* ```tsx
|
46
|
-
* const state = getMutableAIState()
|
47
|
-
* state.update({ ...state.get(), key: 'value' })
|
48
|
-
* state.update((currentState) => ({ ...currentState, key: 'value' }))
|
49
|
-
* state.done()
|
50
|
-
* ```
|
51
|
-
*
|
52
|
-
* @example
|
53
|
-
* ```tsx
|
54
|
-
* const state = getMutableAIState()
|
55
|
-
* state.done({ ...state.get(), key: 'value' }) // Done with a new state
|
56
|
-
* ```
|
57
|
-
*/
|
58
|
-
declare function getMutableAIState<AI extends AIProvider = any>(): MutableAIState<InferAIState<AI, any>>;
|
59
|
-
declare function getMutableAIState<AI extends AIProvider = any>(key: keyof InferAIState<AI, any>): MutableAIState<InferAIState<AI, any>[typeof key]>;
|
60
|
-
|
61
|
-
declare function createAI<AIState = any, UIState = any, Actions extends AIActions = {}>({ actions, initialAIState, initialUIState, onSetAIState, onGetUIState, }: {
|
62
|
-
actions: Actions;
|
63
|
-
initialAIState?: AIState;
|
64
|
-
initialUIState?: UIState;
|
65
|
-
/**
|
66
|
-
* This function is called whenever the AI state is updated by an Action.
|
67
|
-
* You can use this to persist the AI state to a database, or to send it to a
|
68
|
-
* logging service.
|
69
|
-
*/
|
70
|
-
onSetAIState?: OnSetAIState<AIState>;
|
71
|
-
/**
|
72
|
-
* This function is used to retrieve the UI state based on the AI state.
|
73
|
-
* For example, to render the initial UI state based on a given AI state, or
|
74
|
-
* to sync the UI state when the application is already loaded.
|
75
|
-
*
|
76
|
-
* If returning `undefined`, the client side UI state will not be updated.
|
77
|
-
*
|
78
|
-
* This function must be annotated with the `"use server"` directive.
|
79
|
-
*
|
80
|
-
* @example
|
81
|
-
* ```tsx
|
82
|
-
* onGetUIState: async () => {
|
83
|
-
* 'use server';
|
84
|
-
*
|
85
|
-
* const currentAIState = getAIState();
|
86
|
-
* const externalAIState = await loadAIStateFromDatabase();
|
87
|
-
*
|
88
|
-
* if (currentAIState === externalAIState) return undefined;
|
89
|
-
*
|
90
|
-
* // Update current AI state and return the new UI state
|
91
|
-
* const state = getMutableAIState()
|
92
|
-
* state.done(externalAIState)
|
93
|
-
*
|
94
|
-
* return <div>...</div>;
|
95
|
-
* }
|
96
|
-
* ```
|
97
|
-
*/
|
98
|
-
onGetUIState?: OnGetUIState<UIState>;
|
99
|
-
}): AIProvider<AIState, UIState, Actions>;
|
100
|
-
|
101
|
-
type CallSettings = {
|
102
|
-
/**
|
103
|
-
Maximum number of tokens to generate.
|
104
|
-
*/
|
105
|
-
maxTokens?: number;
|
106
|
-
/**
|
107
|
-
Temperature setting. This is a number between 0 (almost no randomness) and
|
108
|
-
1 (very random).
|
109
|
-
|
110
|
-
It is recommended to set either `temperature` or `topP`, but not both.
|
111
|
-
|
112
|
-
@default 0
|
113
|
-
*/
|
114
|
-
temperature?: number;
|
115
|
-
/**
|
116
|
-
Nucleus sampling. This is a number between 0 and 1.
|
117
|
-
|
118
|
-
E.g. 0.1 would mean that only tokens with the top 10% probability mass
|
119
|
-
are considered.
|
120
|
-
|
121
|
-
It is recommended to set either `temperature` or `topP`, but not both.
|
122
|
-
*/
|
123
|
-
topP?: number;
|
124
|
-
/**
|
125
|
-
Only sample from the top K options for each subsequent token.
|
126
|
-
|
127
|
-
Used to remove "long tail" low probability responses.
|
128
|
-
Recommended for advanced use cases only. You usually only need to use temperature.
|
129
|
-
*/
|
130
|
-
topK?: number;
|
131
|
-
/**
|
132
|
-
Presence penalty setting. It affects the likelihood of the model to
|
133
|
-
repeat information that is already in the prompt.
|
134
|
-
|
135
|
-
The presence penalty is a number between -1 (increase repetition)
|
136
|
-
and 1 (maximum penalty, decrease repetition). 0 means no penalty.
|
137
|
-
*/
|
138
|
-
presencePenalty?: number;
|
139
|
-
/**
|
140
|
-
Frequency penalty setting. It affects the likelihood of the model
|
141
|
-
to repeatedly use the same words or phrases.
|
142
|
-
|
143
|
-
The frequency penalty is a number between -1 (increase repetition)
|
144
|
-
and 1 (maximum penalty, decrease repetition). 0 means no penalty.
|
145
|
-
*/
|
146
|
-
frequencyPenalty?: number;
|
147
|
-
/**
|
148
|
-
Stop sequences.
|
149
|
-
If set, the model will stop generating text when one of the stop sequences is generated.
|
150
|
-
Providers may have limits on the number of stop sequences.
|
151
|
-
*/
|
152
|
-
stopSequences?: string[];
|
153
|
-
/**
|
154
|
-
The seed (integer) to use for random sampling. If set and supported
|
155
|
-
by the model, calls will generate deterministic results.
|
156
|
-
*/
|
157
|
-
seed?: number;
|
158
|
-
/**
|
159
|
-
Maximum number of retries. Set to 0 to disable retries.
|
160
|
-
|
161
|
-
@default 2
|
162
|
-
*/
|
163
|
-
maxRetries?: number;
|
164
|
-
/**
|
165
|
-
Abort signal.
|
166
|
-
*/
|
167
|
-
abortSignal?: AbortSignal;
|
5
|
+
type ToolResultContent = Array<{
|
6
|
+
type: 'text';
|
7
|
+
text: string;
|
8
|
+
} | {
|
9
|
+
type: 'image';
|
10
|
+
data: string;
|
11
|
+
mediaType?: string;
|
168
12
|
/**
|
169
|
-
|
170
|
-
Only applicable for HTTP-based providers.
|
13
|
+
* @deprecated Use `mediaType` instead.
|
171
14
|
*/
|
172
|
-
|
173
|
-
}
|
174
|
-
|
175
|
-
/**
|
176
|
-
Reason why a language model finished generating a response.
|
15
|
+
mimeType?: string;
|
16
|
+
}>;
|
177
17
|
|
178
|
-
Can be one of the following:
|
179
|
-
- `stop`: model generated stop sequence
|
180
|
-
- `length`: model generated maximum number of tokens
|
181
|
-
- `content-filter`: content filter violation stopped the model
|
182
|
-
- `tool-calls`: model triggered tool calls
|
183
|
-
- `error`: model stopped because of an error
|
184
|
-
- `other`: model stopped for other reasons
|
185
|
-
*/
|
186
|
-
type FinishReason = LanguageModelV2FinishReason;
|
187
|
-
/**
|
188
|
-
Warning from the model provider for this call. The call will proceed, but e.g.
|
189
|
-
some settings might not be supported, which can lead to suboptimal results.
|
190
|
-
*/
|
191
|
-
type CallWarning = LanguageModelV2CallWarning;
|
192
18
|
/**
|
193
19
|
Tool choice for the generation. It supports the following settings:
|
194
20
|
|
@@ -234,21 +60,16 @@ type LanguageModelUsage = {
|
|
234
60
|
*/
|
235
61
|
totalTokens: number;
|
236
62
|
};
|
63
|
+
declare function calculateLanguageModelUsage({ promptTokens, completionTokens, }: {
|
64
|
+
promptTokens: number;
|
65
|
+
completionTokens: number;
|
66
|
+
}): LanguageModelUsage;
|
237
67
|
|
238
68
|
/**
|
239
69
|
Data content. Can either be a base64-encoded string, a Uint8Array, an ArrayBuffer, or a Buffer.
|
240
70
|
*/
|
241
71
|
type DataContent = string | Uint8Array | ArrayBuffer | Buffer;
|
242
72
|
|
243
|
-
type ToolResultContent = Array<{
|
244
|
-
type: 'text';
|
245
|
-
text: string;
|
246
|
-
} | {
|
247
|
-
type: 'image';
|
248
|
-
data: string;
|
249
|
-
mimeType?: string;
|
250
|
-
}>;
|
251
|
-
|
252
73
|
/**
|
253
74
|
Text content part of a prompt. It contains a string of text.
|
254
75
|
*/
|
@@ -282,7 +103,13 @@ interface ImagePart {
|
|
282
103
|
*/
|
283
104
|
image: DataContent | URL;
|
284
105
|
/**
|
285
|
-
Optional
|
106
|
+
Optional IANA media type of the image.
|
107
|
+
|
108
|
+
@see https://www.iana.org/assignments/media-types/media-types.xhtml
|
109
|
+
*/
|
110
|
+
mediaType?: string;
|
111
|
+
/**
|
112
|
+
@deprecated Use `mediaType` instead.
|
286
113
|
*/
|
287
114
|
mimeType?: string;
|
288
115
|
/**
|
@@ -313,9 +140,15 @@ interface FilePart {
|
|
313
140
|
*/
|
314
141
|
filename?: string;
|
315
142
|
/**
|
316
|
-
|
143
|
+
IANA media type of the file.
|
144
|
+
|
145
|
+
@see https://www.iana.org/assignments/media-types/media-types.xhtml
|
317
146
|
*/
|
318
|
-
|
147
|
+
mediaType: string;
|
148
|
+
/**
|
149
|
+
@deprecated Use `mediaType` instead.
|
150
|
+
*/
|
151
|
+
mimeType?: string;
|
319
152
|
/**
|
320
153
|
Additional provider-specific metadata. They are passed through
|
321
154
|
to the provider from the AI SDK and enable provider-specific
|
@@ -527,6 +360,76 @@ It can be a user message, an assistant message, or a tool message.
|
|
527
360
|
*/
|
528
361
|
type CoreMessage = CoreSystemMessage | CoreUserMessage | CoreAssistantMessage | CoreToolMessage;
|
529
362
|
|
363
|
+
type ToolParameters = z.ZodTypeAny | Schema<any>;
|
364
|
+
type inferParameters<PARAMETERS extends ToolParameters> = PARAMETERS extends Schema<any> ? PARAMETERS['_type'] : PARAMETERS extends z.ZodTypeAny ? z.infer<PARAMETERS> : never;
|
365
|
+
interface ToolExecutionOptions {
|
366
|
+
/**
|
367
|
+
* The ID of the tool call. You can use it e.g. when sending tool-call related information with stream data.
|
368
|
+
*/
|
369
|
+
toolCallId: string;
|
370
|
+
/**
|
371
|
+
* Messages that were sent to the language model to initiate the response that contained the tool call.
|
372
|
+
* The messages **do not** include the system prompt nor the assistant response that contained the tool call.
|
373
|
+
*/
|
374
|
+
messages: CoreMessage[];
|
375
|
+
/**
|
376
|
+
* An optional abort signal that indicates that the overall operation should be aborted.
|
377
|
+
*/
|
378
|
+
abortSignal?: AbortSignal;
|
379
|
+
}
|
380
|
+
/**
|
381
|
+
A tool contains the description and the schema of the input that the tool expects.
|
382
|
+
This enables the language model to generate the input.
|
383
|
+
|
384
|
+
The tool can also contain an optional execute function for the actual execution function of the tool.
|
385
|
+
*/
|
386
|
+
type Tool<PARAMETERS extends ToolParameters = any, RESULT = any> = {
|
387
|
+
/**
|
388
|
+
The schema of the input that the tool expects. The language model will use this to generate the input.
|
389
|
+
It is also used to validate the output of the language model.
|
390
|
+
Use descriptions to make the input understandable for the language model.
|
391
|
+
*/
|
392
|
+
parameters: PARAMETERS;
|
393
|
+
/**
|
394
|
+
An optional description of what the tool does.
|
395
|
+
Will be used by the language model to decide whether to use the tool.
|
396
|
+
Not used for provider-defined tools.
|
397
|
+
*/
|
398
|
+
description?: string;
|
399
|
+
/**
|
400
|
+
Optional conversion function that maps the tool result to multi-part tool content for LLMs.
|
401
|
+
*/
|
402
|
+
experimental_toToolResultContent?: (result: RESULT) => ToolResultContent;
|
403
|
+
/**
|
404
|
+
An async function that is called with the arguments from the tool call and produces a result.
|
405
|
+
If not provided, the tool will not be executed automatically.
|
406
|
+
|
407
|
+
@args is the input of the tool call.
|
408
|
+
@options.abortSignal is a signal that can be used to abort the tool call.
|
409
|
+
*/
|
410
|
+
execute?: (args: inferParameters<PARAMETERS>, options: ToolExecutionOptions) => PromiseLike<RESULT>;
|
411
|
+
} & ({
|
412
|
+
/**
|
413
|
+
Function tool.
|
414
|
+
*/
|
415
|
+
type?: undefined | 'function';
|
416
|
+
} | {
|
417
|
+
/**
|
418
|
+
Provider-defined tool.
|
419
|
+
*/
|
420
|
+
type: 'provider-defined';
|
421
|
+
/**
|
422
|
+
The ID of the tool. Should follow the format `<provider-name>.<tool-name>`.
|
423
|
+
*/
|
424
|
+
id: `${string}.${string}`;
|
425
|
+
/**
|
426
|
+
The arguments for configuring the tool. Must match the expected arguments defined by the provider for this tool.
|
427
|
+
*/
|
428
|
+
args: Record<string, unknown>;
|
429
|
+
});
|
430
|
+
|
431
|
+
type ToolSet = Record<string, Tool>;
|
432
|
+
|
530
433
|
/**
|
531
434
|
Prompt part of the AI function options.
|
532
435
|
It contains a system message, a simple text prompt, or a list of messages.
|
@@ -546,203 +449,144 @@ type Prompt = {
|
|
546
449
|
messages?: Array<CoreMessage> | Array<Omit<Message, 'id'>>;
|
547
450
|
};
|
548
451
|
|
549
|
-
type
|
550
|
-
|
551
|
-
|
552
|
-
|
553
|
-
|
554
|
-
|
555
|
-
|
556
|
-
|
557
|
-
|
558
|
-
|
559
|
-
|
560
|
-
|
452
|
+
type StandardizedPrompt = {
|
453
|
+
/**
|
454
|
+
* Original prompt type. This is forwarded to the providers and can be used
|
455
|
+
* to write send raw text to providers that support it.
|
456
|
+
*/
|
457
|
+
type: 'prompt' | 'messages';
|
458
|
+
/**
|
459
|
+
* System message.
|
460
|
+
*/
|
461
|
+
system?: string;
|
462
|
+
/**
|
463
|
+
* Messages.
|
464
|
+
*/
|
465
|
+
messages: CoreMessage[];
|
561
466
|
};
|
562
|
-
|
563
|
-
|
564
|
-
|
565
|
-
|
566
|
-
|
567
|
-
|
568
|
-
/**
|
569
|
-
* The new appended text content from the model since the last `text` call.
|
570
|
-
*/
|
571
|
-
delta: string;
|
572
|
-
/**
|
573
|
-
* Whether the model is done generating text.
|
574
|
-
* If `true`, the `content` will be the final output and this call will be the last.
|
575
|
-
*/
|
576
|
-
done: boolean;
|
577
|
-
}
|
578
|
-
]>;
|
579
|
-
type RenderResult = {
|
580
|
-
value: ReactNode;
|
581
|
-
} & Awaited<ReturnType<LanguageModelV2['doStream']>>;
|
582
|
-
/**
|
583
|
-
* `streamUI` is a helper function to create a streamable UI from LLMs.
|
584
|
-
*/
|
585
|
-
declare function streamUI<TOOLS extends {
|
586
|
-
[name: string]: z.ZodTypeAny;
|
587
|
-
} = {}>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, initial, text, experimental_providerMetadata, providerOptions, onFinish, ...settings }: CallSettings & Prompt & {
|
467
|
+
declare function standardizePrompt<TOOLS extends ToolSet>({ prompt, tools, }: {
|
468
|
+
prompt: Prompt;
|
469
|
+
tools: undefined | TOOLS;
|
470
|
+
}): StandardizedPrompt;
|
471
|
+
|
472
|
+
type CallSettings = {
|
588
473
|
/**
|
589
|
-
|
474
|
+
Maximum number of tokens to generate.
|
590
475
|
*/
|
591
|
-
|
476
|
+
maxTokens?: number;
|
592
477
|
/**
|
593
|
-
|
478
|
+
Temperature setting. This is a number between 0 (almost no randomness) and
|
479
|
+
1 (very random).
|
480
|
+
|
481
|
+
It is recommended to set either `temperature` or `topP`, but not both.
|
482
|
+
|
483
|
+
@default 0
|
594
484
|
*/
|
595
|
-
|
596
|
-
[name in keyof TOOLS]: RenderTool<TOOLS[name]>;
|
597
|
-
};
|
485
|
+
temperature?: number;
|
598
486
|
/**
|
599
|
-
|
487
|
+
Nucleus sampling. This is a number between 0 and 1.
|
488
|
+
|
489
|
+
E.g. 0.1 would mean that only tokens with the top 10% probability mass
|
490
|
+
are considered.
|
491
|
+
|
492
|
+
It is recommended to set either `temperature` or `topP`, but not both.
|
600
493
|
*/
|
601
|
-
|
602
|
-
text?: RenderText;
|
603
|
-
initial?: ReactNode;
|
494
|
+
topP?: number;
|
604
495
|
/**
|
605
|
-
|
606
|
-
|
607
|
-
|
608
|
-
|
609
|
-
|
496
|
+
Only sample from the top K options for each subsequent token.
|
497
|
+
|
498
|
+
Used to remove "long tail" low probability responses.
|
499
|
+
Recommended for advanced use cases only. You usually only need to use temperature.
|
500
|
+
*/
|
501
|
+
topK?: number;
|
610
502
|
/**
|
611
|
-
|
612
|
-
|
613
|
-
|
503
|
+
Presence penalty setting. It affects the likelihood of the model to
|
504
|
+
repeat information that is already in the prompt.
|
505
|
+
|
506
|
+
The presence penalty is a number between -1 (increase repetition)
|
507
|
+
and 1 (maximum penalty, decrease repetition). 0 means no penalty.
|
508
|
+
*/
|
509
|
+
presencePenalty?: number;
|
614
510
|
/**
|
615
|
-
|
616
|
-
|
617
|
-
|
618
|
-
|
619
|
-
|
620
|
-
|
621
|
-
|
622
|
-
/**
|
623
|
-
* The token usage of the generated response.
|
624
|
-
*/
|
625
|
-
usage: LanguageModelUsage;
|
626
|
-
/**
|
627
|
-
* The final ui node that was generated.
|
628
|
-
*/
|
629
|
-
value: ReactNode;
|
630
|
-
/**
|
631
|
-
* Warnings from the model provider (e.g. unsupported settings)
|
632
|
-
*/
|
633
|
-
warnings?: CallWarning[];
|
634
|
-
/**
|
635
|
-
* Optional raw response data.
|
636
|
-
*/
|
637
|
-
rawResponse?: {
|
638
|
-
/**
|
639
|
-
* Response headers.
|
640
|
-
*/
|
641
|
-
headers?: Record<string, string>;
|
642
|
-
};
|
643
|
-
}) => Promise<void> | void;
|
644
|
-
}): Promise<RenderResult>;
|
645
|
-
|
646
|
-
type StreamableUIWrapper = {
|
511
|
+
Frequency penalty setting. It affects the likelihood of the model
|
512
|
+
to repeatedly use the same words or phrases.
|
513
|
+
|
514
|
+
The frequency penalty is a number between -1 (increase repetition)
|
515
|
+
and 1 (maximum penalty, decrease repetition). 0 means no penalty.
|
516
|
+
*/
|
517
|
+
frequencyPenalty?: number;
|
647
518
|
/**
|
648
|
-
|
519
|
+
Stop sequences.
|
520
|
+
If set, the model will stop generating text when one of the stop sequences is generated.
|
521
|
+
Providers may have limits on the number of stop sequences.
|
649
522
|
*/
|
650
|
-
|
523
|
+
stopSequences?: string[];
|
651
524
|
/**
|
652
|
-
|
525
|
+
The seed (integer) to use for random sampling. If set and supported
|
526
|
+
by the model, calls will generate deterministic results.
|
653
527
|
*/
|
654
|
-
|
528
|
+
seed?: number;
|
655
529
|
/**
|
656
|
-
|
657
|
-
|
658
|
-
|
659
|
-
* @example
|
660
|
-
* ```jsx
|
661
|
-
* const ui = createStreamableUI(<div>hello</div>)
|
662
|
-
* ui.append(<div>world</div>)
|
663
|
-
*
|
664
|
-
* // The UI node will be:
|
665
|
-
* // <>
|
666
|
-
* // <div>hello</div>
|
667
|
-
* // <div>world</div>
|
668
|
-
* // </>
|
669
|
-
* ```
|
530
|
+
Maximum number of retries. Set to 0 to disable retries.
|
531
|
+
|
532
|
+
@default 2
|
670
533
|
*/
|
671
|
-
|
534
|
+
maxRetries?: number;
|
672
535
|
/**
|
673
|
-
|
674
|
-
* It will be thrown on the client side and caught by the nearest error boundary component.
|
536
|
+
Abort signal.
|
675
537
|
*/
|
676
|
-
|
538
|
+
abortSignal?: AbortSignal;
|
677
539
|
/**
|
678
|
-
|
679
|
-
|
680
|
-
*
|
681
|
-
* This method is always **required** to be called, otherwise the response will be stuck in a loading state.
|
540
|
+
Additional HTTP headers to be sent with the request.
|
541
|
+
Only applicable for HTTP-based providers.
|
682
542
|
*/
|
683
|
-
|
543
|
+
headers?: Record<string, string | undefined>;
|
544
|
+
};
|
545
|
+
|
546
|
+
declare function prepareToolsAndToolChoice<TOOLS extends ToolSet>({ tools, toolChoice, activeTools, }: {
|
547
|
+
tools: TOOLS | undefined;
|
548
|
+
toolChoice: ToolChoice<TOOLS> | undefined;
|
549
|
+
activeTools: Array<keyof TOOLS> | undefined;
|
550
|
+
}): {
|
551
|
+
tools: Array<LanguageModelV2FunctionTool | LanguageModelV2ProviderDefinedTool> | undefined;
|
552
|
+
toolChoice: LanguageModelV2ToolChoice | undefined;
|
684
553
|
};
|
554
|
+
|
555
|
+
type RetryFunction = <OUTPUT>(fn: () => PromiseLike<OUTPUT>) => PromiseLike<OUTPUT>;
|
556
|
+
|
685
557
|
/**
|
686
|
-
*
|
687
|
-
* On the client side, it can be rendered as a normal React node.
|
558
|
+
* Validate and prepare retries.
|
688
559
|
*/
|
689
|
-
declare function
|
560
|
+
declare function prepareRetries({ maxRetries, }: {
|
561
|
+
maxRetries: number | undefined;
|
562
|
+
}): {
|
563
|
+
maxRetries: number;
|
564
|
+
retry: RetryFunction;
|
565
|
+
};
|
690
566
|
|
691
|
-
declare const __internal_curr: unique symbol;
|
692
|
-
declare const __internal_error: unique symbol;
|
693
567
|
/**
|
694
|
-
*
|
695
|
-
* To read the streamed values, use the `readStreamableValue` or `useStreamableValue` APIs.
|
568
|
+
* Validates call settings and sets default values.
|
696
569
|
*/
|
697
|
-
|
698
|
-
|
699
|
-
|
700
|
-
|
570
|
+
declare function prepareCallSettings({ maxTokens, temperature, topP, topK, presencePenalty, frequencyPenalty, stopSequences, seed, }: Omit<CallSettings, 'abortSignal' | 'headers' | 'maxRetries'>): Omit<CallSettings, 'abortSignal' | 'headers' | 'maxRetries'>;
|
571
|
+
|
572
|
+
declare function download({ url }: {
|
573
|
+
url: URL;
|
574
|
+
}): Promise<{
|
575
|
+
data: Uint8Array;
|
576
|
+
mediaType: string | undefined;
|
577
|
+
}>;
|
578
|
+
|
579
|
+
declare function convertToLanguageModelPrompt({ prompt, modelSupportsImageUrls, modelSupportsUrl, downloadImplementation, }: {
|
580
|
+
prompt: StandardizedPrompt;
|
581
|
+
modelSupportsImageUrls: boolean | undefined;
|
582
|
+
modelSupportsUrl: undefined | ((url: URL) => boolean);
|
583
|
+
downloadImplementation?: typeof download;
|
584
|
+
}): Promise<LanguageModelV2Prompt>;
|
701
585
|
|
702
586
|
/**
|
703
|
-
*
|
704
|
-
*
|
587
|
+
* Warning time for notifying developers that a stream is hanging in dev mode
|
588
|
+
* using a console.warn.
|
705
589
|
*/
|
706
|
-
declare
|
707
|
-
type StreamableValueWrapper<T, E> = {
|
708
|
-
/**
|
709
|
-
* The value of the streamable. This can be returned from a Server Action and
|
710
|
-
* received by the client. To read the streamed values, use the
|
711
|
-
* `readStreamableValue` or `useStreamableValue` APIs.
|
712
|
-
*/
|
713
|
-
readonly value: StreamableValue<T, E>;
|
714
|
-
/**
|
715
|
-
* This method updates the current value with a new one.
|
716
|
-
*/
|
717
|
-
update(value: T): StreamableValueWrapper<T, E>;
|
718
|
-
/**
|
719
|
-
* This method is used to append a delta string to the current value. It
|
720
|
-
* requires the current value of the streamable to be a string.
|
721
|
-
*
|
722
|
-
* @example
|
723
|
-
* ```jsx
|
724
|
-
* const streamable = createStreamableValue('hello');
|
725
|
-
* streamable.append(' world');
|
726
|
-
*
|
727
|
-
* // The value will be 'hello world'
|
728
|
-
* ```
|
729
|
-
*/
|
730
|
-
append(value: T): StreamableValueWrapper<T, E>;
|
731
|
-
/**
|
732
|
-
* This method is used to signal that there is an error in the value stream.
|
733
|
-
* It will be thrown on the client side when consumed via
|
734
|
-
* `readStreamableValue` or `useStreamableValue`.
|
735
|
-
*/
|
736
|
-
error(error: any): StreamableValueWrapper<T, E>;
|
737
|
-
/**
|
738
|
-
* This method marks the value as finalized. You can either call it without
|
739
|
-
* any parameters or with a new value as the final state.
|
740
|
-
* Once called, the value cannot be updated or appended anymore.
|
741
|
-
*
|
742
|
-
* This method is always **required** to be called, otherwise the response
|
743
|
-
* will be stuck in a loading state.
|
744
|
-
*/
|
745
|
-
done(...args: [T] | []): StreamableValueWrapper<T, E>;
|
746
|
-
};
|
590
|
+
declare const HANGING_STREAM_WARNING_TIME_MS: number;
|
747
591
|
|
748
|
-
export {
|
592
|
+
export { HANGING_STREAM_WARNING_TIME_MS, calculateLanguageModelUsage, convertToLanguageModelPrompt, prepareCallSettings, prepareRetries, prepareToolsAndToolChoice, standardizePrompt };
|