ai 3.2.33 → 3.2.34
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.d.mts +473 -418
- package/dist/index.d.ts +473 -418
- package/dist/index.js +329 -409
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +329 -403
- package/dist/index.mjs.map +1 -1
- package/package.json +9 -9
- package/rsc/dist/index.d.ts +13 -0
- package/rsc/dist/rsc-server.d.mts +13 -0
- package/rsc/dist/rsc-server.mjs +2 -0
- package/rsc/dist/rsc-server.mjs.map +1 -1
package/dist/index.d.mts
CHANGED
@@ -3,8 +3,8 @@ export { AssistantMessage, AssistantStatus, ChatRequest, ChatRequestOptions, Cre
|
|
3
3
|
import { EmbeddingModelV1, EmbeddingModelV1Embedding, LanguageModelV1, LanguageModelV1FinishReason, LanguageModelV1LogProbs, LanguageModelV1CallWarning, LanguageModelV1StreamPart } from '@ai-sdk/provider';
|
4
4
|
export { APICallError, EmptyResponseBodyError, InvalidArgumentError, InvalidDataContentError, InvalidPromptError, InvalidResponseDataError, InvalidToolArgumentsError, JSONParseError, LoadAPIKeyError, NoObjectGeneratedError, NoSuchToolError, RetryError, ToolCallParseError, TypeValidationError, UnsupportedFunctionalityError, UnsupportedJSONSchemaError } from '@ai-sdk/provider';
|
5
5
|
import { z } from 'zod';
|
6
|
-
import { ServerResponse } from 'http';
|
7
6
|
import { AttributeValue, Span } from '@opentelemetry/api';
|
7
|
+
import { ServerResponse } from 'http';
|
8
8
|
import { ServerResponse as ServerResponse$1 } from 'node:http';
|
9
9
|
import { AssistantStream } from 'openai/lib/AssistantStream';
|
10
10
|
import { Run } from 'openai/resources/beta/threads/runs/runs';
|
@@ -89,6 +89,34 @@ type CoreToolChoice<TOOLS extends Record<string, unknown>> = 'auto' | 'none' | '
|
|
89
89
|
type TokenUsage = CompletionTokenUsage$1;
|
90
90
|
type CompletionTokenUsage = CompletionTokenUsage$1;
|
91
91
|
|
92
|
+
/**
|
93
|
+
The result of a `embed` call.
|
94
|
+
It contains the embedding, the value, and additional information.
|
95
|
+
*/
|
96
|
+
interface EmbedResult<VALUE> {
|
97
|
+
/**
|
98
|
+
The value that was embedded.
|
99
|
+
*/
|
100
|
+
readonly value: VALUE;
|
101
|
+
/**
|
102
|
+
The embedding of the value.
|
103
|
+
*/
|
104
|
+
readonly embedding: Embedding;
|
105
|
+
/**
|
106
|
+
The embedding token usage.
|
107
|
+
*/
|
108
|
+
readonly usage: EmbeddingTokenUsage;
|
109
|
+
/**
|
110
|
+
Optional raw response data.
|
111
|
+
*/
|
112
|
+
readonly rawResponse?: {
|
113
|
+
/**
|
114
|
+
Response headers.
|
115
|
+
*/
|
116
|
+
headers?: Record<string, string>;
|
117
|
+
};
|
118
|
+
}
|
119
|
+
|
92
120
|
/**
|
93
121
|
Embed a value using an embedding model. The type of the value is defined by the embedding model.
|
94
122
|
|
@@ -126,40 +154,24 @@ declare function embed<VALUE>({ model, value, maxRetries, abortSignal, headers,
|
|
126
154
|
*/
|
127
155
|
headers?: Record<string, string>;
|
128
156
|
}): Promise<EmbedResult<VALUE>>;
|
157
|
+
|
129
158
|
/**
|
130
|
-
The result of a `
|
131
|
-
It contains the
|
159
|
+
The result of a `embedMany` call.
|
160
|
+
It contains the embeddings, the values, and additional information.
|
132
161
|
*/
|
133
|
-
|
162
|
+
interface EmbedManyResult<VALUE> {
|
134
163
|
/**
|
135
|
-
|
136
|
-
|
137
|
-
readonly
|
164
|
+
The values that were embedded.
|
165
|
+
*/
|
166
|
+
readonly values: Array<VALUE>;
|
138
167
|
/**
|
139
|
-
|
140
|
-
|
141
|
-
readonly
|
168
|
+
The embeddings. They are in the same order as the values.
|
169
|
+
*/
|
170
|
+
readonly embeddings: Array<Embedding>;
|
142
171
|
/**
|
143
|
-
|
144
|
-
|
172
|
+
The embedding token usage.
|
173
|
+
*/
|
145
174
|
readonly usage: EmbeddingTokenUsage;
|
146
|
-
/**
|
147
|
-
Optional raw response data.
|
148
|
-
*/
|
149
|
-
readonly rawResponse?: {
|
150
|
-
/**
|
151
|
-
Response headers.
|
152
|
-
*/
|
153
|
-
headers?: Record<string, string>;
|
154
|
-
};
|
155
|
-
constructor(options: {
|
156
|
-
value: VALUE;
|
157
|
-
embedding: Embedding;
|
158
|
-
usage: EmbeddingTokenUsage;
|
159
|
-
rawResponse?: {
|
160
|
-
headers?: Record<string, string>;
|
161
|
-
};
|
162
|
-
});
|
163
175
|
}
|
164
176
|
|
165
177
|
/**
|
@@ -203,29 +215,6 @@ declare function embedMany<VALUE>({ model, values, maxRetries, abortSignal, head
|
|
203
215
|
*/
|
204
216
|
headers?: Record<string, string>;
|
205
217
|
}): Promise<EmbedManyResult<VALUE>>;
|
206
|
-
/**
|
207
|
-
The result of a `embedMany` call.
|
208
|
-
It contains the embeddings, the values, and additional information.
|
209
|
-
*/
|
210
|
-
declare class EmbedManyResult<VALUE> {
|
211
|
-
/**
|
212
|
-
The values that were embedded.
|
213
|
-
*/
|
214
|
-
readonly values: Array<VALUE>;
|
215
|
-
/**
|
216
|
-
The embeddings. They are in the same order as the values.
|
217
|
-
*/
|
218
|
-
readonly embeddings: Array<Embedding>;
|
219
|
-
/**
|
220
|
-
The embedding token usage.
|
221
|
-
*/
|
222
|
-
readonly usage: EmbeddingTokenUsage;
|
223
|
-
constructor(options: {
|
224
|
-
values: Array<VALUE>;
|
225
|
-
embeddings: Array<Embedding>;
|
226
|
-
usage: EmbeddingTokenUsage;
|
227
|
-
});
|
228
|
-
}
|
229
218
|
|
230
219
|
type CallSettings = {
|
231
220
|
/**
|
@@ -251,6 +240,13 @@ type CallSettings = {
|
|
251
240
|
*/
|
252
241
|
topP?: number;
|
253
242
|
/**
|
243
|
+
Only sample from the top K options for each subsequent token.
|
244
|
+
|
245
|
+
Used to remove "long tail" low probability responses.
|
246
|
+
Recommended for advanced use cases only. You usually only need to use temperature.
|
247
|
+
*/
|
248
|
+
topK?: number;
|
249
|
+
/**
|
254
250
|
Presence penalty setting. It affects the likelihood of the model to
|
255
251
|
repeat information that is already in the prompt.
|
256
252
|
|
@@ -271,6 +267,12 @@ type CallSettings = {
|
|
271
267
|
*/
|
272
268
|
frequencyPenalty?: number;
|
273
269
|
/**
|
270
|
+
Stop sequences.
|
271
|
+
If set, the model will stop generating text when one of the stop sequences is generated.
|
272
|
+
Providers may have limits on the number of stop sequences.
|
273
|
+
*/
|
274
|
+
stopSequences?: string[];
|
275
|
+
/**
|
274
276
|
The seed (integer) to use for random sampling. If set and supported
|
275
277
|
by the model, calls will generate deterministic results.
|
276
278
|
*/
|
@@ -470,6 +472,65 @@ type Prompt = {
|
|
470
472
|
messages?: Array<CoreMessage>;
|
471
473
|
};
|
472
474
|
|
475
|
+
/**
|
476
|
+
* Telemetry configuration.
|
477
|
+
*/
|
478
|
+
type TelemetrySettings = {
|
479
|
+
/**
|
480
|
+
* Enable or disable telemetry. Disabled by default while experimental.
|
481
|
+
*/
|
482
|
+
isEnabled?: boolean;
|
483
|
+
/**
|
484
|
+
* Identifier for this function. Used to group telemetry data by function.
|
485
|
+
*/
|
486
|
+
functionId?: string;
|
487
|
+
/**
|
488
|
+
* Additional information to include in the telemetry data.
|
489
|
+
*/
|
490
|
+
metadata?: Record<string, AttributeValue>;
|
491
|
+
};
|
492
|
+
|
493
|
+
/**
|
494
|
+
The result of a `generateObject` call.
|
495
|
+
*/
|
496
|
+
interface GenerateObjectResult<T> {
|
497
|
+
/**
|
498
|
+
The generated object (typed according to the schema).
|
499
|
+
*/
|
500
|
+
readonly object: T;
|
501
|
+
/**
|
502
|
+
The reason why the generation finished.
|
503
|
+
*/
|
504
|
+
readonly finishReason: FinishReason;
|
505
|
+
/**
|
506
|
+
The token usage of the generated text.
|
507
|
+
*/
|
508
|
+
readonly usage: CompletionTokenUsage$1;
|
509
|
+
/**
|
510
|
+
Warnings from the model provider (e.g. unsupported settings)
|
511
|
+
*/
|
512
|
+
readonly warnings: CallWarning[] | undefined;
|
513
|
+
/**
|
514
|
+
Optional raw response data.
|
515
|
+
*/
|
516
|
+
readonly rawResponse?: {
|
517
|
+
/**
|
518
|
+
Response headers.
|
519
|
+
*/
|
520
|
+
headers?: Record<string, string>;
|
521
|
+
};
|
522
|
+
/**
|
523
|
+
Logprobs for the completion.
|
524
|
+
`undefined` if the mode does not support logprobs or if was not enabled
|
525
|
+
*/
|
526
|
+
readonly logprobs: LogProbs | undefined;
|
527
|
+
/**
|
528
|
+
Converts the object to a JSON response.
|
529
|
+
The response will have a status code of 200 and a content type of `application/json; charset=utf-8`.
|
530
|
+
*/
|
531
|
+
toJsonResponse(init?: ResponseInit): Response;
|
532
|
+
}
|
533
|
+
|
473
534
|
/**
|
474
535
|
Generate a structured, typed object for a given prompt and schema using a language model.
|
475
536
|
|
@@ -491,6 +552,9 @@ It is recommended to set either `temperature` or `topP`, but not both.
|
|
491
552
|
@param topP - Nucleus sampling.
|
492
553
|
The value is passed through to the provider. The range depends on the provider and model.
|
493
554
|
It is recommended to set either `temperature` or `topP`, but not both.
|
555
|
+
@param topK - Only sample from the top K options for each subsequent token.
|
556
|
+
Used to remove "long tail" low probability responses.
|
557
|
+
Recommended for advanced use cases only. You usually only need to use temperature.
|
494
558
|
@param presencePenalty - Presence penalty setting.
|
495
559
|
It affects the likelihood of the model to repeat information that is already in the prompt.
|
496
560
|
The value is passed through to the provider. The range depends on the provider and model.
|
@@ -507,7 +571,7 @@ If set and supported by the model, calls will generate deterministic results.
|
|
507
571
|
@returns
|
508
572
|
A result object that contains the generated object, the finish reason, the token usage, and additional information.
|
509
573
|
*/
|
510
|
-
declare function generateObject<T>({ model, schema, mode, system, prompt, messages, maxRetries, abortSignal, headers, ...settings }: CallSettings & Prompt & {
|
574
|
+
declare function generateObject<T>({ model, schema, mode, system, prompt, messages, maxRetries, abortSignal, headers, experimental_telemetry: telemetry, ...settings }: Omit<CallSettings, 'stopSequences'> & Prompt & {
|
511
575
|
/**
|
512
576
|
The language model to use.
|
513
577
|
*/
|
@@ -523,63 +587,33 @@ The Zod schema is converted in a JSON schema and used in one of the following wa
|
|
523
587
|
|
524
588
|
- 'auto': The provider will choose the best mode for the model.
|
525
589
|
- 'tool': A tool with the JSON schema as parameters is is provided and the provider is instructed to use it.
|
526
|
-
- 'json': The JSON schema and
|
527
|
-
- 'grammar': The provider is instructed to converted the JSON schema into a provider specific grammar and use it to select the output tokens.
|
590
|
+
- 'json': The JSON schema and an instruction is injected into the prompt. If the provider supports JSON mode, it is enabled. If the provider supports JSON grammars, the grammar is used.
|
528
591
|
|
529
592
|
Please note that most providers do not support all modes.
|
530
593
|
|
531
594
|
Default and recommended: 'auto' (best mode for the model).
|
532
595
|
*/
|
533
|
-
mode?: 'auto' | 'json' | 'tool'
|
534
|
-
}): Promise<GenerateObjectResult<T>>;
|
535
|
-
/**
|
536
|
-
The result of a `generateObject` call.
|
537
|
-
*/
|
538
|
-
declare class GenerateObjectResult<T> {
|
539
|
-
/**
|
540
|
-
The generated object (typed according to the schema).
|
541
|
-
*/
|
542
|
-
readonly object: T;
|
543
|
-
/**
|
544
|
-
The reason why the generation finished.
|
545
|
-
*/
|
546
|
-
readonly finishReason: FinishReason;
|
547
|
-
/**
|
548
|
-
The token usage of the generated text.
|
549
|
-
*/
|
550
|
-
readonly usage: CompletionTokenUsage$1;
|
551
|
-
/**
|
552
|
-
Warnings from the model provider (e.g. unsupported settings)
|
553
|
-
*/
|
554
|
-
readonly warnings: CallWarning[] | undefined;
|
555
|
-
/**
|
556
|
-
Optional raw response data.
|
557
|
-
*/
|
558
|
-
rawResponse?: {
|
559
|
-
/**
|
560
|
-
Response headers.
|
561
|
-
*/
|
562
|
-
headers?: Record<string, string>;
|
563
|
-
};
|
596
|
+
mode?: 'auto' | 'json' | 'tool';
|
564
597
|
/**
|
565
|
-
|
566
|
-
`undefined` if the mode does not support logprobs or if was not enabled
|
598
|
+
* Optional telemetry configuration (experimental).
|
567
599
|
*/
|
568
|
-
|
600
|
+
experimental_telemetry?: TelemetrySettings;
|
601
|
+
}): Promise<DefaultGenerateObjectResult<T>>;
|
602
|
+
declare class DefaultGenerateObjectResult<T> implements GenerateObjectResult<T> {
|
603
|
+
readonly object: GenerateObjectResult<T>['object'];
|
604
|
+
readonly finishReason: GenerateObjectResult<T>['finishReason'];
|
605
|
+
readonly usage: GenerateObjectResult<T>['usage'];
|
606
|
+
readonly warnings: GenerateObjectResult<T>['warnings'];
|
607
|
+
readonly rawResponse: GenerateObjectResult<T>['rawResponse'];
|
608
|
+
readonly logprobs: GenerateObjectResult<T>['logprobs'];
|
569
609
|
constructor(options: {
|
570
|
-
object: T;
|
571
|
-
finishReason:
|
572
|
-
usage:
|
573
|
-
warnings:
|
574
|
-
rawResponse
|
575
|
-
|
576
|
-
};
|
577
|
-
logprobs: LogProbs | undefined;
|
610
|
+
object: GenerateObjectResult<T>['object'];
|
611
|
+
finishReason: GenerateObjectResult<T>['finishReason'];
|
612
|
+
usage: GenerateObjectResult<T>['usage'];
|
613
|
+
warnings: GenerateObjectResult<T>['warnings'];
|
614
|
+
rawResponse: GenerateObjectResult<T>['rawResponse'];
|
615
|
+
logprobs: GenerateObjectResult<T>['logprobs'];
|
578
616
|
});
|
579
|
-
/**
|
580
|
-
Converts the object to a JSON response.
|
581
|
-
The response will have a status code of 200 and a content type of `application/json; charset=utf-8`.
|
582
|
-
*/
|
583
617
|
toJsonResponse(init?: ResponseInit): Response;
|
584
618
|
}
|
585
619
|
/**
|
@@ -589,6 +623,91 @@ declare const experimental_generateObject: typeof generateObject;
|
|
589
623
|
|
590
624
|
type AsyncIterableStream<T> = AsyncIterable<T> & ReadableStream<T>;
|
591
625
|
|
626
|
+
/**
|
627
|
+
The result of a `streamObject` call that contains the partial object stream and additional information.
|
628
|
+
*/
|
629
|
+
interface StreamObjectResult<T> {
|
630
|
+
/**
|
631
|
+
Warnings from the model provider (e.g. unsupported settings)
|
632
|
+
*/
|
633
|
+
readonly warnings: CallWarning[] | undefined;
|
634
|
+
/**
|
635
|
+
The token usage of the generated response. Resolved when the response is finished.
|
636
|
+
*/
|
637
|
+
readonly usage: Promise<CompletionTokenUsage$1>;
|
638
|
+
/**
|
639
|
+
Optional raw response data.
|
640
|
+
*/
|
641
|
+
readonly rawResponse?: {
|
642
|
+
/**
|
643
|
+
Response headers.
|
644
|
+
*/
|
645
|
+
headers?: Record<string, string>;
|
646
|
+
};
|
647
|
+
/**
|
648
|
+
The generated object (typed according to the schema). Resolved when the response is finished.
|
649
|
+
*/
|
650
|
+
readonly object: Promise<T>;
|
651
|
+
/**
|
652
|
+
Stream of partial objects. It gets more complete as the stream progresses.
|
653
|
+
|
654
|
+
Note that the partial object is not validated.
|
655
|
+
If you want to be certain that the actual content matches your schema, you need to implement your own validation for partial results.
|
656
|
+
*/
|
657
|
+
readonly partialObjectStream: AsyncIterableStream<DeepPartial<T>>;
|
658
|
+
/**
|
659
|
+
Text stream of the JSON representation of the generated object. It contains text chunks.
|
660
|
+
When the stream is finished, the object is valid JSON that can be parsed.
|
661
|
+
*/
|
662
|
+
readonly textStream: AsyncIterableStream<string>;
|
663
|
+
/**
|
664
|
+
Stream of different types of events, including partial objects, errors, and finish events.
|
665
|
+
Only errors that stop the stream, such as network errors, are thrown.
|
666
|
+
*/
|
667
|
+
readonly fullStream: AsyncIterableStream<ObjectStreamPart<T>>;
|
668
|
+
/**
|
669
|
+
Writes text delta output to a Node.js response-like object.
|
670
|
+
It sets a `Content-Type` header to `text/plain; charset=utf-8` and
|
671
|
+
writes each text delta as a separate chunk.
|
672
|
+
|
673
|
+
@param response A Node.js response-like object (ServerResponse).
|
674
|
+
@param init Optional headers and status code.
|
675
|
+
*/
|
676
|
+
pipeTextStreamToResponse(response: ServerResponse, init?: {
|
677
|
+
headers?: Record<string, string>;
|
678
|
+
status?: number;
|
679
|
+
}): void;
|
680
|
+
/**
|
681
|
+
Creates a simple text stream response.
|
682
|
+
The response has a `Content-Type` header set to `text/plain; charset=utf-8`.
|
683
|
+
Each text delta is encoded as UTF-8 and sent as a separate chunk.
|
684
|
+
Non-text-delta events are ignored.
|
685
|
+
|
686
|
+
@param init Optional headers and status code.
|
687
|
+
*/
|
688
|
+
toTextStreamResponse(init?: ResponseInit): Response;
|
689
|
+
}
|
690
|
+
type ObjectStreamInputPart = {
|
691
|
+
type: 'error';
|
692
|
+
error: unknown;
|
693
|
+
} | {
|
694
|
+
type: 'finish';
|
695
|
+
finishReason: FinishReason;
|
696
|
+
logprobs?: LogProbs;
|
697
|
+
usage: {
|
698
|
+
promptTokens: number;
|
699
|
+
completionTokens: number;
|
700
|
+
totalTokens: number;
|
701
|
+
};
|
702
|
+
};
|
703
|
+
type ObjectStreamPart<T> = ObjectStreamInputPart | {
|
704
|
+
type: 'object';
|
705
|
+
object: DeepPartial<T>;
|
706
|
+
} | {
|
707
|
+
type: 'text-delta';
|
708
|
+
textDelta: string;
|
709
|
+
};
|
710
|
+
|
592
711
|
/**
|
593
712
|
Generate a structured, typed object for a given prompt and schema using a language model.
|
594
713
|
|
@@ -610,6 +729,9 @@ It is recommended to set either `temperature` or `topP`, but not both.
|
|
610
729
|
@param topP - Nucleus sampling.
|
611
730
|
The value is passed through to the provider. The range depends on the provider and model.
|
612
731
|
It is recommended to set either `temperature` or `topP`, but not both.
|
732
|
+
@param topK - Only sample from the top K options for each subsequent token.
|
733
|
+
Used to remove "long tail" low probability responses.
|
734
|
+
Recommended for advanced use cases only. You usually only need to use temperature.
|
613
735
|
@param presencePenalty - Presence penalty setting.
|
614
736
|
It affects the likelihood of the model to repeat information that is already in the prompt.
|
615
737
|
The value is passed through to the provider. The range depends on the provider and model.
|
@@ -626,7 +748,7 @@ If set and supported by the model, calls will generate deterministic results.
|
|
626
748
|
@return
|
627
749
|
A result object for accessing the partial object stream and additional information.
|
628
750
|
*/
|
629
|
-
declare function streamObject<T>({ model, schema, mode, system, prompt, messages, maxRetries, abortSignal, headers, onFinish, ...settings }: CallSettings & Prompt & {
|
751
|
+
declare function streamObject<T>({ model, schema, mode, system, prompt, messages, maxRetries, abortSignal, headers, onFinish, ...settings }: Omit<CallSettings, 'stopSequences'> & Prompt & {
|
630
752
|
/**
|
631
753
|
The language model to use.
|
632
754
|
*/
|
@@ -642,14 +764,13 @@ The Zod schema is converted in a JSON schema and used in one of the following wa
|
|
642
764
|
|
643
765
|
- 'auto': The provider will choose the best mode for the model.
|
644
766
|
- 'tool': A tool with the JSON schema as parameters is is provided and the provider is instructed to use it.
|
645
|
-
- 'json': The JSON schema and
|
646
|
-
- 'grammar': The provider is instructed to converted the JSON schema into a provider specific grammar and use it to select the output tokens.
|
767
|
+
- 'json': The JSON schema and an instruction is injected into the prompt. If the provider supports JSON mode, it is enabled. If the provider supports JSON grammars, the grammar is used.
|
647
768
|
|
648
769
|
Please note that most providers do not support all modes.
|
649
770
|
|
650
771
|
Default and recommended: 'auto' (best mode for the model).
|
651
772
|
*/
|
652
|
-
mode?: 'auto' | 'json' | 'tool'
|
773
|
+
mode?: 'auto' | 'json' | 'tool';
|
653
774
|
/**
|
654
775
|
Callback that is called when the LLM response and the final object validation are finished.
|
655
776
|
*/
|
@@ -680,99 +801,28 @@ Callback that is called when the LLM response and the final object validation ar
|
|
680
801
|
*/
|
681
802
|
warnings?: CallWarning[];
|
682
803
|
}) => Promise<void> | void;
|
683
|
-
}): Promise<
|
684
|
-
|
685
|
-
type: 'error';
|
686
|
-
error: unknown;
|
687
|
-
} | {
|
688
|
-
type: 'finish';
|
689
|
-
finishReason: FinishReason;
|
690
|
-
logprobs?: LogProbs;
|
691
|
-
usage: {
|
692
|
-
promptTokens: number;
|
693
|
-
completionTokens: number;
|
694
|
-
totalTokens: number;
|
695
|
-
};
|
696
|
-
};
|
697
|
-
type ObjectStreamPart<T> = ObjectStreamInputPart | {
|
698
|
-
type: 'object';
|
699
|
-
object: DeepPartial<T>;
|
700
|
-
} | {
|
701
|
-
type: 'text-delta';
|
702
|
-
textDelta: string;
|
703
|
-
};
|
704
|
-
/**
|
705
|
-
The result of a `streamObject` call that contains the partial object stream and additional information.
|
706
|
-
*/
|
707
|
-
declare class StreamObjectResult<T> {
|
804
|
+
}): Promise<DefaultStreamObjectResult<T>>;
|
805
|
+
declare class DefaultStreamObjectResult<T> implements StreamObjectResult<T> {
|
708
806
|
private readonly originalStream;
|
709
807
|
private readonly objectPromise;
|
710
|
-
|
711
|
-
|
712
|
-
|
713
|
-
readonly warnings: CallWarning[] | undefined;
|
714
|
-
/**
|
715
|
-
The token usage of the generated response. Resolved when the response is finished.
|
716
|
-
*/
|
717
|
-
readonly usage: Promise<CompletionTokenUsage$1>;
|
718
|
-
/**
|
719
|
-
Optional raw response data.
|
720
|
-
*/
|
721
|
-
rawResponse?: {
|
722
|
-
/**
|
723
|
-
Response headers.
|
724
|
-
*/
|
725
|
-
headers?: Record<string, string>;
|
726
|
-
};
|
808
|
+
readonly warnings: StreamObjectResult<T>['warnings'];
|
809
|
+
readonly usage: StreamObjectResult<T>['usage'];
|
810
|
+
readonly rawResponse: StreamObjectResult<T>['rawResponse'];
|
727
811
|
constructor({ stream, warnings, rawResponse, schema, onFinish, }: {
|
728
812
|
stream: ReadableStream<string | Omit<LanguageModelV1StreamPart, 'text-delta'>>;
|
729
|
-
warnings:
|
730
|
-
rawResponse?:
|
731
|
-
headers?: Record<string, string>;
|
732
|
-
};
|
813
|
+
warnings: StreamObjectResult<T>['warnings'];
|
814
|
+
rawResponse?: StreamObjectResult<T>['rawResponse'];
|
733
815
|
schema: z.Schema<T>;
|
734
816
|
onFinish: Parameters<typeof streamObject<T>>[0]['onFinish'];
|
735
817
|
});
|
736
|
-
/**
|
737
|
-
The generated object (typed according to the schema). Resolved when the response is finished.
|
738
|
-
*/
|
739
818
|
get object(): Promise<T>;
|
740
|
-
/**
|
741
|
-
Stream of partial objects. It gets more complete as the stream progresses.
|
742
|
-
|
743
|
-
Note that the partial object is not validated.
|
744
|
-
If you want to be certain that the actual content matches your schema, you need to implement your own validation for partial results.
|
745
|
-
*/
|
746
819
|
get partialObjectStream(): AsyncIterableStream<DeepPartial<T>>;
|
747
|
-
/**
|
748
|
-
Text stream of the JSON representation of the generated object. It contains text chunks.
|
749
|
-
When the stream is finished, the object is valid JSON that can be parsed.
|
750
|
-
*/
|
751
820
|
get textStream(): AsyncIterableStream<string>;
|
752
|
-
/**
|
753
|
-
Stream of different types of events, including partial objects, errors, and finish events.
|
754
|
-
*/
|
755
821
|
get fullStream(): AsyncIterableStream<ObjectStreamPart<T>>;
|
756
|
-
/**
|
757
|
-
Writes text delta output to a Node.js response-like object.
|
758
|
-
It sets a `Content-Type` header to `text/plain; charset=utf-8` and
|
759
|
-
writes each text delta as a separate chunk.
|
760
|
-
|
761
|
-
@param response A Node.js response-like object (ServerResponse).
|
762
|
-
@param init Optional headers and status code.
|
763
|
-
*/
|
764
822
|
pipeTextStreamToResponse(response: ServerResponse, init?: {
|
765
823
|
headers?: Record<string, string>;
|
766
824
|
status?: number;
|
767
825
|
}): void;
|
768
|
-
/**
|
769
|
-
Creates a simple text stream response.
|
770
|
-
The response has a `Content-Type` header set to `text/plain; charset=utf-8`.
|
771
|
-
Each text delta is encoded as UTF-8 and sent as a separate chunk.
|
772
|
-
Non-text-delta events are ignored.
|
773
|
-
|
774
|
-
@param init Optional headers and status code.
|
775
|
-
*/
|
776
826
|
toTextStreamResponse(init?: ResponseInit): Response;
|
777
827
|
}
|
778
828
|
/**
|
@@ -931,24 +981,6 @@ declare class InvalidMessageRoleError extends Error {
|
|
931
981
|
};
|
932
982
|
}
|
933
983
|
|
934
|
-
/**
|
935
|
-
* Telemetry configuration.
|
936
|
-
*/
|
937
|
-
type TelemetrySettings = {
|
938
|
-
/**
|
939
|
-
* Enable or disable telemetry. Disabled by default while experimental.
|
940
|
-
*/
|
941
|
-
isEnabled?: boolean;
|
942
|
-
/**
|
943
|
-
* Identifier for this function. Used to group telemetry data by function.
|
944
|
-
*/
|
945
|
-
functionId?: string;
|
946
|
-
/**
|
947
|
-
* Additional information to include in the telemetry data.
|
948
|
-
*/
|
949
|
-
metadata?: Record<string, AttributeValue>;
|
950
|
-
};
|
951
|
-
|
952
984
|
type ToToolCall<TOOLS extends Record<string, CoreTool>> = ValueOf<{
|
953
985
|
[NAME in keyof TOOLS]: {
|
954
986
|
type: 'tool-call';
|
@@ -959,6 +991,103 @@ type ToToolCall<TOOLS extends Record<string, CoreTool>> = ValueOf<{
|
|
959
991
|
}>;
|
960
992
|
type ToToolCallArray<TOOLS extends Record<string, CoreTool>> = Array<ToToolCall<TOOLS>>;
|
961
993
|
|
994
|
+
/**
|
995
|
+
The result of a `generateText` call.
|
996
|
+
It contains the generated text, the tool calls that were made during the generation, and the results of the tool calls.
|
997
|
+
*/
|
998
|
+
interface GenerateTextResult<TOOLS extends Record<string, CoreTool>> {
|
999
|
+
/**
|
1000
|
+
The generated text.
|
1001
|
+
*/
|
1002
|
+
readonly text: string;
|
1003
|
+
/**
|
1004
|
+
The tool calls that were made during the generation.
|
1005
|
+
*/
|
1006
|
+
readonly toolCalls: ToToolCallArray<TOOLS>;
|
1007
|
+
/**
|
1008
|
+
The results of the tool calls.
|
1009
|
+
*/
|
1010
|
+
readonly toolResults: ToToolResultArray<TOOLS>;
|
1011
|
+
/**
|
1012
|
+
The reason why the generation finished.
|
1013
|
+
*/
|
1014
|
+
readonly finishReason: FinishReason;
|
1015
|
+
/**
|
1016
|
+
The token usage of the generated text.
|
1017
|
+
*/
|
1018
|
+
readonly usage: CompletionTokenUsage$1;
|
1019
|
+
/**
|
1020
|
+
Warnings from the model provider (e.g. unsupported settings)
|
1021
|
+
*/
|
1022
|
+
readonly warnings: CallWarning[] | undefined;
|
1023
|
+
/**
|
1024
|
+
The response messages that were generated during the call. It consists of an assistant message,
|
1025
|
+
potentially containing tool calls.
|
1026
|
+
When there are tool results, there is an additional tool message with the tool results that are available.
|
1027
|
+
If there are tools that do not have execute functions, they are not included in the tool results and
|
1028
|
+
need to be added separately.
|
1029
|
+
*/
|
1030
|
+
readonly responseMessages: Array<CoreAssistantMessage | CoreToolMessage>;
|
1031
|
+
/**
|
1032
|
+
Response information for every roundtrip.
|
1033
|
+
You can use this to get information about intermediate steps, such as the tool calls or the response headers.
|
1034
|
+
*/
|
1035
|
+
readonly roundtrips: Array<{
|
1036
|
+
/**
|
1037
|
+
The generated text.
|
1038
|
+
*/
|
1039
|
+
readonly text: string;
|
1040
|
+
/**
|
1041
|
+
The tool calls that were made during the generation.
|
1042
|
+
*/
|
1043
|
+
readonly toolCalls: ToToolCallArray<TOOLS>;
|
1044
|
+
/**
|
1045
|
+
The results of the tool calls.
|
1046
|
+
*/
|
1047
|
+
readonly toolResults: ToToolResultArray<TOOLS>;
|
1048
|
+
/**
|
1049
|
+
The reason why the generation finished.
|
1050
|
+
*/
|
1051
|
+
readonly finishReason: FinishReason;
|
1052
|
+
/**
|
1053
|
+
The token usage of the generated text.
|
1054
|
+
*/
|
1055
|
+
readonly usage: CompletionTokenUsage$1;
|
1056
|
+
/**
|
1057
|
+
Warnings from the model provider (e.g. unsupported settings)
|
1058
|
+
*/
|
1059
|
+
readonly warnings: CallWarning[] | undefined;
|
1060
|
+
/**
|
1061
|
+
Logprobs for the completion.
|
1062
|
+
`undefined` if the mode does not support logprobs or if was not enabled.
|
1063
|
+
*/
|
1064
|
+
readonly logprobs: LogProbs | undefined;
|
1065
|
+
/**
|
1066
|
+
Optional raw response data.
|
1067
|
+
*/
|
1068
|
+
readonly rawResponse?: {
|
1069
|
+
/**
|
1070
|
+
Response headers.
|
1071
|
+
*/
|
1072
|
+
readonly headers?: Record<string, string>;
|
1073
|
+
};
|
1074
|
+
}>;
|
1075
|
+
/**
|
1076
|
+
Optional raw response data.
|
1077
|
+
*/
|
1078
|
+
readonly rawResponse?: {
|
1079
|
+
/**
|
1080
|
+
Response headers.
|
1081
|
+
*/
|
1082
|
+
readonly headers?: Record<string, string>;
|
1083
|
+
};
|
1084
|
+
/**
|
1085
|
+
Logprobs for the completion.
|
1086
|
+
`undefined` if the mode does not support logprobs or if was not enabled.
|
1087
|
+
*/
|
1088
|
+
readonly logprobs: LogProbs | undefined;
|
1089
|
+
}
|
1090
|
+
|
962
1091
|
/**
|
963
1092
|
Generate a text and call tools for a given prompt using a language model.
|
964
1093
|
|
@@ -980,12 +1109,17 @@ It is recommended to set either `temperature` or `topP`, but not both.
|
|
980
1109
|
@param topP - Nucleus sampling.
|
981
1110
|
The value is passed through to the provider. The range depends on the provider and model.
|
982
1111
|
It is recommended to set either `temperature` or `topP`, but not both.
|
1112
|
+
@param topK - Only sample from the top K options for each subsequent token.
|
1113
|
+
Used to remove "long tail" low probability responses.
|
1114
|
+
Recommended for advanced use cases only. You usually only need to use temperature.
|
983
1115
|
@param presencePenalty - Presence penalty setting.
|
984
1116
|
It affects the likelihood of the model to repeat information that is already in the prompt.
|
985
1117
|
The value is passed through to the provider. The range depends on the provider and model.
|
986
1118
|
@param frequencyPenalty - Frequency penalty setting.
|
987
1119
|
It affects the likelihood of the model to repeatedly use the same words or phrases.
|
988
1120
|
The value is passed through to the provider. The range depends on the provider and model.
|
1121
|
+
@param stopSequences - Stop sequences.
|
1122
|
+
If set, the model will stop generating text when one of the stop sequences is generated.
|
989
1123
|
@param seed - The seed (integer) to use for random sampling.
|
990
1124
|
If set and supported by the model, calls will generate deterministic results.
|
991
1125
|
|
@@ -1034,117 +1168,145 @@ By default, it's set to 0, which will disable the feature.
|
|
1034
1168
|
experimental_telemetry?: TelemetrySettings;
|
1035
1169
|
}): Promise<GenerateTextResult<TOOLS>>;
|
1036
1170
|
/**
|
1037
|
-
|
1038
|
-
It contains the generated text, the tool calls that were made during the generation, and the results of the tool calls.
|
1171
|
+
* @deprecated Use `generateText` instead.
|
1039
1172
|
*/
|
1040
|
-
declare
|
1041
|
-
|
1042
|
-
|
1043
|
-
|
1044
|
-
|
1173
|
+
declare const experimental_generateText: typeof generateText;
|
1174
|
+
|
1175
|
+
/**
|
1176
|
+
A result object for accessing different stream types and additional information.
|
1177
|
+
*/
|
1178
|
+
interface StreamTextResult<TOOLS extends Record<string, CoreTool>> {
|
1045
1179
|
/**
|
1046
|
-
|
1047
|
-
|
1048
|
-
readonly
|
1180
|
+
Warnings from the model provider (e.g. unsupported settings).
|
1181
|
+
*/
|
1182
|
+
readonly warnings: CallWarning[] | undefined;
|
1049
1183
|
/**
|
1050
|
-
|
1051
|
-
|
1052
|
-
readonly
|
1184
|
+
The token usage of the generated response. Resolved when the response is finished.
|
1185
|
+
*/
|
1186
|
+
readonly usage: Promise<CompletionTokenUsage$1>;
|
1053
1187
|
/**
|
1054
|
-
|
1055
|
-
|
1056
|
-
readonly finishReason: FinishReason
|
1188
|
+
The reason why the generation finished. Resolved when the response is finished.
|
1189
|
+
*/
|
1190
|
+
readonly finishReason: Promise<FinishReason>;
|
1057
1191
|
/**
|
1058
|
-
|
1059
|
-
|
1060
|
-
readonly
|
1192
|
+
The full text that has been generated. Resolved when the response is finished.
|
1193
|
+
*/
|
1194
|
+
readonly text: Promise<string>;
|
1061
1195
|
/**
|
1062
|
-
|
1063
|
-
|
1064
|
-
readonly
|
1196
|
+
The tool calls that have been executed. Resolved when the response is finished.
|
1197
|
+
*/
|
1198
|
+
readonly toolCalls: Promise<ToToolCall<TOOLS>[]>;
|
1065
1199
|
/**
|
1066
|
-
|
1067
|
-
|
1068
|
-
|
1069
|
-
If there are tools that do not have execute functions, they are not included in the tool results and
|
1070
|
-
need to be added separately.
|
1071
|
-
*/
|
1072
|
-
readonly responseMessages: Array<CoreAssistantMessage | CoreToolMessage>;
|
1200
|
+
The tool results that have been generated. Resolved when the all tool executions are finished.
|
1201
|
+
*/
|
1202
|
+
readonly toolResults: Promise<ToToolResult<TOOLS>[]>;
|
1073
1203
|
/**
|
1074
|
-
|
1075
|
-
You can use this to get information about intermediate steps, such as the tool calls or the response headers.
|
1076
|
-
*/
|
1077
|
-
readonly roundtrips: Array<{
|
1078
|
-
/**
|
1079
|
-
The generated text.
|
1204
|
+
Optional raw response data.
|
1080
1205
|
*/
|
1081
|
-
|
1082
|
-
/**
|
1083
|
-
The tool calls that were made during the generation.
|
1084
|
-
*/
|
1085
|
-
readonly toolCalls: ToToolCallArray<TOOLS>;
|
1086
|
-
/**
|
1087
|
-
The results of the tool calls.
|
1088
|
-
*/
|
1089
|
-
readonly toolResults: ToToolResultArray<TOOLS>;
|
1206
|
+
readonly rawResponse?: {
|
1090
1207
|
/**
|
1091
|
-
|
1208
|
+
Response headers.
|
1209
|
+
*/
|
1210
|
+
headers?: Record<string, string>;
|
1211
|
+
};
|
1212
|
+
/**
|
1213
|
+
A text stream that returns only the generated text deltas. You can use it
|
1214
|
+
as either an AsyncIterable or a ReadableStream. When an error occurs, the
|
1215
|
+
stream will throw the error.
|
1092
1216
|
*/
|
1093
|
-
|
1094
|
-
|
1095
|
-
|
1096
|
-
|
1097
|
-
|
1098
|
-
|
1099
|
-
Warnings from the model provider (e.g. unsupported settings)
|
1217
|
+
readonly textStream: AsyncIterableStream<string>;
|
1218
|
+
/**
|
1219
|
+
A stream with all events, including text deltas, tool calls, tool results, and
|
1220
|
+
errors.
|
1221
|
+
You can use it as either an AsyncIterable or a ReadableStream.
|
1222
|
+
Only errors that stop the stream, such as network errors, are thrown.
|
1100
1223
|
*/
|
1101
|
-
|
1102
|
-
|
1103
|
-
|
1104
|
-
|
1224
|
+
readonly fullStream: AsyncIterableStream<TextStreamPart<TOOLS>>;
|
1225
|
+
/**
|
1226
|
+
Converts the result to an `AIStream` object that is compatible with `StreamingTextResponse`.
|
1227
|
+
It can be used with the `useChat` and `useCompletion` hooks.
|
1228
|
+
|
1229
|
+
@param callbacks
|
1230
|
+
Stream callbacks that will be called when the stream emits events.
|
1231
|
+
|
1232
|
+
@returns an `AIStream` object.
|
1105
1233
|
*/
|
1106
|
-
|
1107
|
-
/**
|
1108
|
-
Optional raw response data.
|
1109
|
-
*/
|
1110
|
-
readonly rawResponse?: {
|
1111
|
-
/**
|
1112
|
-
Response headers.
|
1113
|
-
*/
|
1114
|
-
readonly headers?: Record<string, string>;
|
1115
|
-
};
|
1116
|
-
}>;
|
1234
|
+
toAIStream(callbacks?: AIStreamCallbacksAndOptions): ReadableStream<Uint8Array>;
|
1117
1235
|
/**
|
1118
|
-
|
1119
|
-
|
1120
|
-
|
1121
|
-
|
1122
|
-
|
1236
|
+
Writes stream data output to a Node.js response-like object.
|
1237
|
+
It sets a `Content-Type` header to `text/plain; charset=utf-8` and
|
1238
|
+
writes each stream data part as a separate chunk.
|
1239
|
+
|
1240
|
+
@param response A Node.js response-like object (ServerResponse).
|
1241
|
+
@param init Optional headers and status code.
|
1123
1242
|
*/
|
1124
|
-
|
1125
|
-
|
1243
|
+
pipeAIStreamToResponse(response: ServerResponse$1, init?: {
|
1244
|
+
headers?: Record<string, string>;
|
1245
|
+
status?: number;
|
1246
|
+
}): void;
|
1126
1247
|
/**
|
1127
|
-
|
1128
|
-
|
1129
|
-
|
1130
|
-
|
1131
|
-
|
1132
|
-
|
1133
|
-
|
1134
|
-
|
1135
|
-
|
1136
|
-
|
1137
|
-
|
1138
|
-
|
1139
|
-
|
1140
|
-
|
1141
|
-
|
1142
|
-
|
1248
|
+
Writes text delta output to a Node.js response-like object.
|
1249
|
+
It sets a `Content-Type` header to `text/plain; charset=utf-8` and
|
1250
|
+
writes each text delta as a separate chunk.
|
1251
|
+
|
1252
|
+
@param response A Node.js response-like object (ServerResponse).
|
1253
|
+
@param init Optional headers and status code.
|
1254
|
+
*/
|
1255
|
+
pipeTextStreamToResponse(response: ServerResponse$1, init?: {
|
1256
|
+
headers?: Record<string, string>;
|
1257
|
+
status?: number;
|
1258
|
+
}): void;
|
1259
|
+
/**
|
1260
|
+
Converts the result to a streamed response object with a stream data part stream.
|
1261
|
+
It can be used with the `useChat` and `useCompletion` hooks.
|
1262
|
+
|
1263
|
+
@param options An object with an init property (ResponseInit) and a data property.
|
1264
|
+
You can also pass in a ResponseInit directly (deprecated).
|
1265
|
+
|
1266
|
+
@return A response object.
|
1267
|
+
*/
|
1268
|
+
toAIStreamResponse(options?: ResponseInit | {
|
1269
|
+
init?: ResponseInit;
|
1270
|
+
data?: StreamData;
|
1271
|
+
}): Response;
|
1272
|
+
/**
|
1273
|
+
Creates a simple text stream response.
|
1274
|
+
Each text delta is encoded as UTF-8 and sent as a separate chunk.
|
1275
|
+
Non-text-delta events are ignored.
|
1276
|
+
|
1277
|
+
@param init Optional headers and status code.
|
1278
|
+
*/
|
1279
|
+
toTextStreamResponse(init?: ResponseInit): Response;
|
1143
1280
|
}
|
1144
|
-
|
1145
|
-
|
1146
|
-
|
1147
|
-
|
1281
|
+
type TextStreamPart<TOOLS extends Record<string, CoreTool>> = {
|
1282
|
+
type: 'text-delta';
|
1283
|
+
textDelta: string;
|
1284
|
+
} | ({
|
1285
|
+
type: 'tool-call';
|
1286
|
+
} & ToToolCall<TOOLS>) | {
|
1287
|
+
type: 'tool-call-streaming-start';
|
1288
|
+
toolCallId: string;
|
1289
|
+
toolName: string;
|
1290
|
+
} | {
|
1291
|
+
type: 'tool-call-delta';
|
1292
|
+
toolCallId: string;
|
1293
|
+
toolName: string;
|
1294
|
+
argsTextDelta: string;
|
1295
|
+
} | ({
|
1296
|
+
type: 'tool-result';
|
1297
|
+
} & ToToolResult<TOOLS>) | {
|
1298
|
+
type: 'finish';
|
1299
|
+
finishReason: FinishReason;
|
1300
|
+
logprobs?: LogProbs;
|
1301
|
+
usage: {
|
1302
|
+
promptTokens: number;
|
1303
|
+
completionTokens: number;
|
1304
|
+
totalTokens: number;
|
1305
|
+
};
|
1306
|
+
} | {
|
1307
|
+
type: 'error';
|
1308
|
+
error: unknown;
|
1309
|
+
};
|
1148
1310
|
|
1149
1311
|
/**
|
1150
1312
|
Generate a text and call tools for a given prompt using a language model.
|
@@ -1165,12 +1327,17 @@ It is recommended to set either `temperature` or `topP`, but not both.
|
|
1165
1327
|
@param topP - Nucleus sampling.
|
1166
1328
|
The value is passed through to the provider. The range depends on the provider and model.
|
1167
1329
|
It is recommended to set either `temperature` or `topP`, but not both.
|
1330
|
+
@param topK - Only sample from the top K options for each subsequent token.
|
1331
|
+
Used to remove "long tail" low probability responses.
|
1332
|
+
Recommended for advanced use cases only. You usually only need to use temperature.
|
1168
1333
|
@param presencePenalty - Presence penalty setting.
|
1169
1334
|
It affects the likelihood of the model to repeat information that is already in the prompt.
|
1170
1335
|
The value is passed through to the provider. The range depends on the provider and model.
|
1171
1336
|
@param frequencyPenalty - Frequency penalty setting.
|
1172
1337
|
It affects the likelihood of the model to repeatedly use the same words or phrases.
|
1173
1338
|
The value is passed through to the provider. The range depends on the provider and model.
|
1339
|
+
@param stopSequences - Stop sequences.
|
1340
|
+
If set, the model will stop generating text when one of the stop sequences is generated.
|
1174
1341
|
@param seed - The seed (integer) to use for random sampling.
|
1175
1342
|
If set and supported by the model, calls will generate deterministic results.
|
1176
1343
|
|
@@ -1244,81 +1411,21 @@ Callback that is called when the LLM response and all request tool executions
|
|
1244
1411
|
*/
|
1245
1412
|
warnings?: CallWarning[];
|
1246
1413
|
}) => Promise<void> | void;
|
1247
|
-
}): Promise<
|
1248
|
-
|
1249
|
-
type: 'text-delta';
|
1250
|
-
textDelta: string;
|
1251
|
-
} | ({
|
1252
|
-
type: 'tool-call';
|
1253
|
-
} & ToToolCall<TOOLS>) | {
|
1254
|
-
type: 'tool-call-streaming-start';
|
1255
|
-
toolCallId: string;
|
1256
|
-
toolName: string;
|
1257
|
-
} | {
|
1258
|
-
type: 'tool-call-delta';
|
1259
|
-
toolCallId: string;
|
1260
|
-
toolName: string;
|
1261
|
-
argsTextDelta: string;
|
1262
|
-
} | ({
|
1263
|
-
type: 'tool-result';
|
1264
|
-
} & ToToolResult<TOOLS>) | {
|
1265
|
-
type: 'finish';
|
1266
|
-
finishReason: FinishReason;
|
1267
|
-
logprobs?: LogProbs;
|
1268
|
-
usage: {
|
1269
|
-
promptTokens: number;
|
1270
|
-
completionTokens: number;
|
1271
|
-
totalTokens: number;
|
1272
|
-
};
|
1273
|
-
} | {
|
1274
|
-
type: 'error';
|
1275
|
-
error: unknown;
|
1276
|
-
};
|
1277
|
-
/**
|
1278
|
-
A result object for accessing different stream types and additional information.
|
1279
|
-
*/
|
1280
|
-
declare class StreamTextResult<TOOLS extends Record<string, CoreTool>> {
|
1414
|
+
}): Promise<DefaultStreamTextResult<TOOLS>>;
|
1415
|
+
declare class DefaultStreamTextResult<TOOLS extends Record<string, CoreTool>> implements StreamTextResult<TOOLS> {
|
1281
1416
|
private originalStream;
|
1282
1417
|
private onFinish?;
|
1283
|
-
|
1284
|
-
|
1285
|
-
|
1286
|
-
readonly
|
1287
|
-
|
1288
|
-
|
1289
|
-
|
1290
|
-
readonly usage: Promise<CompletionTokenUsage$1>;
|
1291
|
-
/**
|
1292
|
-
The reason why the generation finished. Resolved when the response is finished.
|
1293
|
-
*/
|
1294
|
-
readonly finishReason: Promise<FinishReason>;
|
1295
|
-
/**
|
1296
|
-
The full text that has been generated. Resolved when the response is finished.
|
1297
|
-
*/
|
1298
|
-
readonly text: Promise<string>;
|
1299
|
-
/**
|
1300
|
-
The tool calls that have been executed. Resolved when the response is finished.
|
1301
|
-
*/
|
1302
|
-
readonly toolCalls: Promise<ToToolCall<TOOLS>[]>;
|
1303
|
-
/**
|
1304
|
-
The tool results that have been generated. Resolved when the all tool executions are finished.
|
1305
|
-
*/
|
1306
|
-
readonly toolResults: Promise<ToToolResult<TOOLS>[]>;
|
1307
|
-
/**
|
1308
|
-
Optional raw response data.
|
1309
|
-
*/
|
1310
|
-
readonly rawResponse?: {
|
1311
|
-
/**
|
1312
|
-
Response headers.
|
1313
|
-
*/
|
1314
|
-
headers?: Record<string, string>;
|
1315
|
-
};
|
1418
|
+
readonly warnings: StreamTextResult<TOOLS>['warnings'];
|
1419
|
+
readonly usage: StreamTextResult<TOOLS>['usage'];
|
1420
|
+
readonly finishReason: StreamTextResult<TOOLS>['finishReason'];
|
1421
|
+
readonly text: StreamTextResult<TOOLS>['text'];
|
1422
|
+
readonly toolCalls: StreamTextResult<TOOLS>['toolCalls'];
|
1423
|
+
readonly toolResults: StreamTextResult<TOOLS>['toolResults'];
|
1424
|
+
readonly rawResponse: StreamTextResult<TOOLS>['rawResponse'];
|
1316
1425
|
constructor({ stream, warnings, rawResponse, onFinish, rootSpan, doStreamSpan, }: {
|
1317
1426
|
stream: ReadableStream<TextStreamPart<TOOLS>>;
|
1318
|
-
warnings:
|
1319
|
-
rawResponse
|
1320
|
-
headers?: Record<string, string>;
|
1321
|
-
};
|
1427
|
+
warnings: StreamTextResult<TOOLS>['warnings'];
|
1428
|
+
rawResponse: StreamTextResult<TOOLS>['rawResponse'];
|
1322
1429
|
onFinish?: Parameters<typeof streamText>[0]['onFinish'];
|
1323
1430
|
rootSpan: Span;
|
1324
1431
|
doStreamSpan: Span;
|
@@ -1332,73 +1439,21 @@ declare class StreamTextResult<TOOLS extends Record<string, CoreTool>> {
|
|
1332
1439
|
However, the LLM results are expected to be small enough to not cause issues.
|
1333
1440
|
*/
|
1334
1441
|
private teeStream;
|
1335
|
-
/**
|
1336
|
-
A text stream that returns only the generated text deltas. You can use it
|
1337
|
-
as either an AsyncIterable or a ReadableStream. When an error occurs, the
|
1338
|
-
stream will throw the error.
|
1339
|
-
*/
|
1340
1442
|
get textStream(): AsyncIterableStream<string>;
|
1341
|
-
/**
|
1342
|
-
A stream with all events, including text deltas, tool calls, tool results, and
|
1343
|
-
errors.
|
1344
|
-
You can use it as either an AsyncIterable or a ReadableStream. When an error occurs, the
|
1345
|
-
stream will throw the error.
|
1346
|
-
*/
|
1347
1443
|
get fullStream(): AsyncIterableStream<TextStreamPart<TOOLS>>;
|
1348
|
-
/**
|
1349
|
-
Converts the result to an `AIStream` object that is compatible with `StreamingTextResponse`.
|
1350
|
-
It can be used with the `useChat` and `useCompletion` hooks.
|
1351
|
-
|
1352
|
-
@param callbacks
|
1353
|
-
Stream callbacks that will be called when the stream emits events.
|
1354
|
-
|
1355
|
-
@returns an `AIStream` object.
|
1356
|
-
*/
|
1357
1444
|
toAIStream(callbacks?: AIStreamCallbacksAndOptions): ReadableStream<Uint8Array>;
|
1358
|
-
/**
|
1359
|
-
Writes stream data output to a Node.js response-like object.
|
1360
|
-
It sets a `Content-Type` header to `text/plain; charset=utf-8` and
|
1361
|
-
writes each stream data part as a separate chunk.
|
1362
|
-
|
1363
|
-
@param response A Node.js response-like object (ServerResponse).
|
1364
|
-
@param init Optional headers and status code.
|
1365
|
-
*/
|
1366
1445
|
pipeAIStreamToResponse(response: ServerResponse$1, init?: {
|
1367
1446
|
headers?: Record<string, string>;
|
1368
1447
|
status?: number;
|
1369
1448
|
}): void;
|
1370
|
-
/**
|
1371
|
-
Writes text delta output to a Node.js response-like object.
|
1372
|
-
It sets a `Content-Type` header to `text/plain; charset=utf-8` and
|
1373
|
-
writes each text delta as a separate chunk.
|
1374
|
-
|
1375
|
-
@param response A Node.js response-like object (ServerResponse).
|
1376
|
-
@param init Optional headers and status code.
|
1377
|
-
*/
|
1378
1449
|
pipeTextStreamToResponse(response: ServerResponse$1, init?: {
|
1379
1450
|
headers?: Record<string, string>;
|
1380
1451
|
status?: number;
|
1381
1452
|
}): void;
|
1382
|
-
/**
|
1383
|
-
Converts the result to a streamed response object with a stream data part stream.
|
1384
|
-
It can be used with the `useChat` and `useCompletion` hooks.
|
1385
|
-
|
1386
|
-
@param options An object with an init property (ResponseInit) and a data property.
|
1387
|
-
You can also pass in a ResponseInit directly (deprecated).
|
1388
|
-
|
1389
|
-
@return A response object.
|
1390
|
-
*/
|
1391
1453
|
toAIStreamResponse(options?: ResponseInit | {
|
1392
1454
|
init?: ResponseInit;
|
1393
1455
|
data?: StreamData;
|
1394
1456
|
}): Response;
|
1395
|
-
/**
|
1396
|
-
Creates a simple text stream response.
|
1397
|
-
Each text delta is encoded as UTF-8 and sent as a separate chunk.
|
1398
|
-
Non-text-delta events are ignored.
|
1399
|
-
|
1400
|
-
@param init Optional headers and status code.
|
1401
|
-
*/
|
1402
1457
|
toTextStreamResponse(init?: ResponseInit): Response;
|
1403
1458
|
}
|
1404
1459
|
/**
|