ai 3.2.33 → 3.2.35

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.mts CHANGED
@@ -1,14 +1,34 @@
1
1
  import { DeepPartial, Attachment, JSONValue, CreateMessage, FunctionCall as FunctionCall$1, AssistantMessage, DataMessage } from '@ai-sdk/ui-utils';
2
2
  export { AssistantMessage, AssistantStatus, ChatRequest, ChatRequestOptions, CreateMessage, DataMessage, DeepPartial, Function, FunctionCall, FunctionCallHandler, IdGenerator, JSONValue, Message, RequestOptions, StreamPart, Tool, ToolCall, ToolCallHandler, ToolChoice, ToolInvocation, UseAssistantOptions, formatStreamPart, parseComplexResponse, parseStreamPart, readDataStream } from '@ai-sdk/ui-utils';
3
+ import { AttributeValue, Span } from '@opentelemetry/api';
3
4
  import { EmbeddingModelV1, EmbeddingModelV1Embedding, LanguageModelV1, LanguageModelV1FinishReason, LanguageModelV1LogProbs, LanguageModelV1CallWarning, LanguageModelV1StreamPart } from '@ai-sdk/provider';
4
5
  export { APICallError, EmptyResponseBodyError, InvalidArgumentError, InvalidDataContentError, InvalidPromptError, InvalidResponseDataError, InvalidToolArgumentsError, JSONParseError, LoadAPIKeyError, NoObjectGeneratedError, NoSuchToolError, RetryError, ToolCallParseError, TypeValidationError, UnsupportedFunctionalityError, UnsupportedJSONSchemaError } from '@ai-sdk/provider';
5
6
  import { z } from 'zod';
7
+ import { Validator } from '@ai-sdk/provider-utils';
8
+ import { JSONSchema7 } from 'json-schema';
6
9
  import { ServerResponse } from 'http';
7
- import { AttributeValue, Span } from '@opentelemetry/api';
8
10
  import { ServerResponse as ServerResponse$1 } from 'node:http';
9
11
  import { AssistantStream } from 'openai/lib/AssistantStream';
10
12
  import { Run } from 'openai/resources/beta/threads/runs/runs';
11
13
 
14
+ /**
15
+ * Telemetry configuration.
16
+ */
17
+ type TelemetrySettings = {
18
+ /**
19
+ * Enable or disable telemetry. Disabled by default while experimental.
20
+ */
21
+ isEnabled?: boolean;
22
+ /**
23
+ * Identifier for this function. Used to group telemetry data by function.
24
+ */
25
+ functionId?: string;
26
+ /**
27
+ * Additional information to include in the telemetry data.
28
+ */
29
+ metadata?: Record<string, AttributeValue>;
30
+ };
31
+
12
32
  /**
13
33
  Represents the number of tokens used in a prompt and completion.
14
34
  */
@@ -89,6 +109,34 @@ type CoreToolChoice<TOOLS extends Record<string, unknown>> = 'auto' | 'none' | '
89
109
  type TokenUsage = CompletionTokenUsage$1;
90
110
  type CompletionTokenUsage = CompletionTokenUsage$1;
91
111
 
112
+ /**
113
+ The result of a `embed` call.
114
+ It contains the embedding, the value, and additional information.
115
+ */
116
+ interface EmbedResult<VALUE> {
117
+ /**
118
+ The value that was embedded.
119
+ */
120
+ readonly value: VALUE;
121
+ /**
122
+ The embedding of the value.
123
+ */
124
+ readonly embedding: Embedding;
125
+ /**
126
+ The embedding token usage.
127
+ */
128
+ readonly usage: EmbeddingTokenUsage;
129
+ /**
130
+ Optional raw response data.
131
+ */
132
+ readonly rawResponse?: {
133
+ /**
134
+ Response headers.
135
+ */
136
+ headers?: Record<string, string>;
137
+ };
138
+ }
139
+
92
140
  /**
93
141
  Embed a value using an embedding model. The type of the value is defined by the embedding model.
94
142
 
@@ -101,7 +149,7 @@ Embed a value using an embedding model. The type of the value is defined by the
101
149
 
102
150
  @returns A result object that contains the embedding, the value, and additional information.
103
151
  */
104
- declare function embed<VALUE>({ model, value, maxRetries, abortSignal, headers, }: {
152
+ declare function embed<VALUE>({ model, value, maxRetries, abortSignal, headers, experimental_telemetry: telemetry, }: {
105
153
  /**
106
154
  The embedding model to use.
107
155
  */
@@ -125,41 +173,29 @@ declare function embed<VALUE>({ model, value, maxRetries, abortSignal, headers,
125
173
  Only applicable for HTTP-based providers.
126
174
  */
127
175
  headers?: Record<string, string>;
176
+ /**
177
+ * Optional telemetry configuration (experimental).
178
+ */
179
+ experimental_telemetry?: TelemetrySettings;
128
180
  }): Promise<EmbedResult<VALUE>>;
181
+
129
182
  /**
130
- The result of a `embed` call.
131
- It contains the embedding, the value, and additional information.
183
+ The result of a `embedMany` call.
184
+ It contains the embeddings, the values, and additional information.
132
185
  */
133
- declare class EmbedResult<VALUE> {
186
+ interface EmbedManyResult<VALUE> {
134
187
  /**
135
- The value that was embedded.
136
- */
137
- readonly value: VALUE;
188
+ The values that were embedded.
189
+ */
190
+ readonly values: Array<VALUE>;
138
191
  /**
139
- The embedding of the value.
140
- */
141
- readonly embedding: Embedding;
192
+ The embeddings. They are in the same order as the values.
193
+ */
194
+ readonly embeddings: Array<Embedding>;
142
195
  /**
143
- The embedding token usage.
144
- */
196
+ The embedding token usage.
197
+ */
145
198
  readonly usage: EmbeddingTokenUsage;
146
- /**
147
- Optional raw response data.
148
- */
149
- readonly rawResponse?: {
150
- /**
151
- Response headers.
152
- */
153
- headers?: Record<string, string>;
154
- };
155
- constructor(options: {
156
- value: VALUE;
157
- embedding: Embedding;
158
- usage: EmbeddingTokenUsage;
159
- rawResponse?: {
160
- headers?: Record<string, string>;
161
- };
162
- });
163
199
  }
164
200
 
165
201
  /**
@@ -203,29 +239,6 @@ declare function embedMany<VALUE>({ model, values, maxRetries, abortSignal, head
203
239
  */
204
240
  headers?: Record<string, string>;
205
241
  }): Promise<EmbedManyResult<VALUE>>;
206
- /**
207
- The result of a `embedMany` call.
208
- It contains the embeddings, the values, and additional information.
209
- */
210
- declare class EmbedManyResult<VALUE> {
211
- /**
212
- The values that were embedded.
213
- */
214
- readonly values: Array<VALUE>;
215
- /**
216
- The embeddings. They are in the same order as the values.
217
- */
218
- readonly embeddings: Array<Embedding>;
219
- /**
220
- The embedding token usage.
221
- */
222
- readonly usage: EmbeddingTokenUsage;
223
- constructor(options: {
224
- values: Array<VALUE>;
225
- embeddings: Array<Embedding>;
226
- usage: EmbeddingTokenUsage;
227
- });
228
- }
229
242
 
230
243
  type CallSettings = {
231
244
  /**
@@ -251,6 +264,13 @@ type CallSettings = {
251
264
  */
252
265
  topP?: number;
253
266
  /**
267
+ Only sample from the top K options for each subsequent token.
268
+
269
+ Used to remove "long tail" low probability responses.
270
+ Recommended for advanced use cases only. You usually only need to use temperature.
271
+ */
272
+ topK?: number;
273
+ /**
254
274
  Presence penalty setting. It affects the likelihood of the model to
255
275
  repeat information that is already in the prompt.
256
276
 
@@ -271,6 +291,12 @@ type CallSettings = {
271
291
  */
272
292
  frequencyPenalty?: number;
273
293
  /**
294
+ Stop sequences.
295
+ If set, the model will stop generating text when one of the stop sequences is generated.
296
+ Providers may have limits on the number of stop sequences.
297
+ */
298
+ stopSequences?: string[];
299
+ /**
274
300
  The seed (integer) to use for random sampling. If set and supported
275
301
  by the model, calls will generate deterministic results.
276
302
  */
@@ -470,6 +496,81 @@ type Prompt = {
470
496
  messages?: Array<CoreMessage>;
471
497
  };
472
498
 
499
+ /**
500
+ * Used to mark schemas so we can support both Zod and custom schemas.
501
+ */
502
+ declare const schemaSymbol: unique symbol;
503
+ type Schema<OBJECT = unknown> = Validator<OBJECT> & {
504
+ /**
505
+ * Used to mark schemas so we can support both Zod and custom schemas.
506
+ */
507
+ [schemaSymbol]: true;
508
+ /**
509
+ * Schema type for inference.
510
+ */
511
+ _type: OBJECT;
512
+ /**
513
+ * The JSON Schema for the schema. It is passed to the providers.
514
+ */
515
+ readonly jsonSchema: JSONSchema7;
516
+ };
517
+ /**
518
+ * Create a schema using a JSON Schema.
519
+ *
520
+ * @param jsonSchema The JSON Schema for the schema.
521
+ * @param options.validate Optional. A validation function for the schema.
522
+ */
523
+ declare function jsonSchema<OBJECT = unknown>(jsonSchema: JSONSchema7, { validate, }?: {
524
+ validate?: (value: unknown) => {
525
+ success: true;
526
+ value: OBJECT;
527
+ } | {
528
+ success: false;
529
+ error: Error;
530
+ };
531
+ }): Schema<OBJECT>;
532
+
533
+ /**
534
+ The result of a `generateObject` call.
535
+ */
536
+ interface GenerateObjectResult<T> {
537
+ /**
538
+ The generated object (typed according to the schema).
539
+ */
540
+ readonly object: T;
541
+ /**
542
+ The reason why the generation finished.
543
+ */
544
+ readonly finishReason: FinishReason;
545
+ /**
546
+ The token usage of the generated text.
547
+ */
548
+ readonly usage: CompletionTokenUsage$1;
549
+ /**
550
+ Warnings from the model provider (e.g. unsupported settings)
551
+ */
552
+ readonly warnings: CallWarning[] | undefined;
553
+ /**
554
+ Optional raw response data.
555
+ */
556
+ readonly rawResponse?: {
557
+ /**
558
+ Response headers.
559
+ */
560
+ headers?: Record<string, string>;
561
+ };
562
+ /**
563
+ Logprobs for the completion.
564
+ `undefined` if the mode does not support logprobs or if was not enabled
565
+ */
566
+ readonly logprobs: LogProbs | undefined;
567
+ /**
568
+ Converts the object to a JSON response.
569
+ The response will have a status code of 200 and a content type of `application/json; charset=utf-8`.
570
+ */
571
+ toJsonResponse(init?: ResponseInit): Response;
572
+ }
573
+
473
574
  /**
474
575
  Generate a structured, typed object for a given prompt and schema using a language model.
475
576
 
@@ -491,6 +592,9 @@ It is recommended to set either `temperature` or `topP`, but not both.
491
592
  @param topP - Nucleus sampling.
492
593
  The value is passed through to the provider. The range depends on the provider and model.
493
594
  It is recommended to set either `temperature` or `topP`, but not both.
595
+ @param topK - Only sample from the top K options for each subsequent token.
596
+ Used to remove "long tail" low probability responses.
597
+ Recommended for advanced use cases only. You usually only need to use temperature.
494
598
  @param presencePenalty - Presence penalty setting.
495
599
  It affects the likelihood of the model to repeat information that is already in the prompt.
496
600
  The value is passed through to the provider. The range depends on the provider and model.
@@ -507,7 +611,7 @@ If set and supported by the model, calls will generate deterministic results.
507
611
  @returns
508
612
  A result object that contains the generated object, the finish reason, the token usage, and additional information.
509
613
  */
510
- declare function generateObject<T>({ model, schema, mode, system, prompt, messages, maxRetries, abortSignal, headers, ...settings }: CallSettings & Prompt & {
614
+ declare function generateObject<T>({ model, schema: inputSchema, mode, system, prompt, messages, maxRetries, abortSignal, headers, experimental_telemetry: telemetry, ...settings }: Omit<CallSettings, 'stopSequences'> & Prompt & {
511
615
  /**
512
616
  The language model to use.
513
617
  */
@@ -515,71 +619,41 @@ The language model to use.
515
619
  /**
516
620
  The schema of the object that the model should generate.
517
621
  */
518
- schema: z.Schema<T>;
622
+ schema: z.Schema<T> | Schema<T>;
519
623
  /**
520
624
  The mode to use for object generation.
521
625
 
522
- The Zod schema is converted in a JSON schema and used in one of the following ways
626
+ The schema is converted in a JSON schema and used in one of the following ways
523
627
 
524
628
  - 'auto': The provider will choose the best mode for the model.
525
629
  - 'tool': A tool with the JSON schema as parameters is is provided and the provider is instructed to use it.
526
- - 'json': The JSON schema and a instruction is injected into the prompt. If the provider supports JSON mode, it is enabled.
527
- - 'grammar': The provider is instructed to converted the JSON schema into a provider specific grammar and use it to select the output tokens.
630
+ - 'json': The JSON schema and an instruction is injected into the prompt. If the provider supports JSON mode, it is enabled. If the provider supports JSON grammars, the grammar is used.
528
631
 
529
632
  Please note that most providers do not support all modes.
530
633
 
531
634
  Default and recommended: 'auto' (best mode for the model).
532
635
  */
533
- mode?: 'auto' | 'json' | 'tool' | 'grammar';
534
- }): Promise<GenerateObjectResult<T>>;
535
- /**
536
- The result of a `generateObject` call.
537
- */
538
- declare class GenerateObjectResult<T> {
539
- /**
540
- The generated object (typed according to the schema).
541
- */
542
- readonly object: T;
543
- /**
544
- The reason why the generation finished.
545
- */
546
- readonly finishReason: FinishReason;
547
- /**
548
- The token usage of the generated text.
549
- */
550
- readonly usage: CompletionTokenUsage$1;
551
- /**
552
- Warnings from the model provider (e.g. unsupported settings)
553
- */
554
- readonly warnings: CallWarning[] | undefined;
555
- /**
556
- Optional raw response data.
557
- */
558
- rawResponse?: {
559
- /**
560
- Response headers.
561
- */
562
- headers?: Record<string, string>;
563
- };
636
+ mode?: 'auto' | 'json' | 'tool';
564
637
  /**
565
- Logprobs for the completion.
566
- `undefined` if the mode does not support logprobs or if was not enabled
638
+ * Optional telemetry configuration (experimental).
567
639
  */
568
- readonly logprobs: LogProbs | undefined;
640
+ experimental_telemetry?: TelemetrySettings;
641
+ }): Promise<DefaultGenerateObjectResult<T>>;
642
+ declare class DefaultGenerateObjectResult<T> implements GenerateObjectResult<T> {
643
+ readonly object: GenerateObjectResult<T>['object'];
644
+ readonly finishReason: GenerateObjectResult<T>['finishReason'];
645
+ readonly usage: GenerateObjectResult<T>['usage'];
646
+ readonly warnings: GenerateObjectResult<T>['warnings'];
647
+ readonly rawResponse: GenerateObjectResult<T>['rawResponse'];
648
+ readonly logprobs: GenerateObjectResult<T>['logprobs'];
569
649
  constructor(options: {
570
- object: T;
571
- finishReason: FinishReason;
572
- usage: CompletionTokenUsage$1;
573
- warnings: CallWarning[] | undefined;
574
- rawResponse?: {
575
- headers?: Record<string, string>;
576
- };
577
- logprobs: LogProbs | undefined;
650
+ object: GenerateObjectResult<T>['object'];
651
+ finishReason: GenerateObjectResult<T>['finishReason'];
652
+ usage: GenerateObjectResult<T>['usage'];
653
+ warnings: GenerateObjectResult<T>['warnings'];
654
+ rawResponse: GenerateObjectResult<T>['rawResponse'];
655
+ logprobs: GenerateObjectResult<T>['logprobs'];
578
656
  });
579
- /**
580
- Converts the object to a JSON response.
581
- The response will have a status code of 200 and a content type of `application/json; charset=utf-8`.
582
- */
583
657
  toJsonResponse(init?: ResponseInit): Response;
584
658
  }
585
659
  /**
@@ -589,6 +663,91 @@ declare const experimental_generateObject: typeof generateObject;
589
663
 
590
664
  type AsyncIterableStream<T> = AsyncIterable<T> & ReadableStream<T>;
591
665
 
666
+ /**
667
+ The result of a `streamObject` call that contains the partial object stream and additional information.
668
+ */
669
+ interface StreamObjectResult<T> {
670
+ /**
671
+ Warnings from the model provider (e.g. unsupported settings)
672
+ */
673
+ readonly warnings: CallWarning[] | undefined;
674
+ /**
675
+ The token usage of the generated response. Resolved when the response is finished.
676
+ */
677
+ readonly usage: Promise<CompletionTokenUsage$1>;
678
+ /**
679
+ Optional raw response data.
680
+ */
681
+ readonly rawResponse?: {
682
+ /**
683
+ Response headers.
684
+ */
685
+ headers?: Record<string, string>;
686
+ };
687
+ /**
688
+ The generated object (typed according to the schema). Resolved when the response is finished.
689
+ */
690
+ readonly object: Promise<T>;
691
+ /**
692
+ Stream of partial objects. It gets more complete as the stream progresses.
693
+
694
+ Note that the partial object is not validated.
695
+ If you want to be certain that the actual content matches your schema, you need to implement your own validation for partial results.
696
+ */
697
+ readonly partialObjectStream: AsyncIterableStream<DeepPartial<T>>;
698
+ /**
699
+ Text stream of the JSON representation of the generated object. It contains text chunks.
700
+ When the stream is finished, the object is valid JSON that can be parsed.
701
+ */
702
+ readonly textStream: AsyncIterableStream<string>;
703
+ /**
704
+ Stream of different types of events, including partial objects, errors, and finish events.
705
+ Only errors that stop the stream, such as network errors, are thrown.
706
+ */
707
+ readonly fullStream: AsyncIterableStream<ObjectStreamPart<T>>;
708
+ /**
709
+ Writes text delta output to a Node.js response-like object.
710
+ It sets a `Content-Type` header to `text/plain; charset=utf-8` and
711
+ writes each text delta as a separate chunk.
712
+
713
+ @param response A Node.js response-like object (ServerResponse).
714
+ @param init Optional headers and status code.
715
+ */
716
+ pipeTextStreamToResponse(response: ServerResponse, init?: {
717
+ headers?: Record<string, string>;
718
+ status?: number;
719
+ }): void;
720
+ /**
721
+ Creates a simple text stream response.
722
+ The response has a `Content-Type` header set to `text/plain; charset=utf-8`.
723
+ Each text delta is encoded as UTF-8 and sent as a separate chunk.
724
+ Non-text-delta events are ignored.
725
+
726
+ @param init Optional headers and status code.
727
+ */
728
+ toTextStreamResponse(init?: ResponseInit): Response;
729
+ }
730
+ type ObjectStreamInputPart = {
731
+ type: 'error';
732
+ error: unknown;
733
+ } | {
734
+ type: 'finish';
735
+ finishReason: FinishReason;
736
+ logprobs?: LogProbs;
737
+ usage: {
738
+ promptTokens: number;
739
+ completionTokens: number;
740
+ totalTokens: number;
741
+ };
742
+ };
743
+ type ObjectStreamPart<T> = ObjectStreamInputPart | {
744
+ type: 'object';
745
+ object: DeepPartial<T>;
746
+ } | {
747
+ type: 'text-delta';
748
+ textDelta: string;
749
+ };
750
+
592
751
  /**
593
752
  Generate a structured, typed object for a given prompt and schema using a language model.
594
753
 
@@ -610,6 +769,9 @@ It is recommended to set either `temperature` or `topP`, but not both.
610
769
  @param topP - Nucleus sampling.
611
770
  The value is passed through to the provider. The range depends on the provider and model.
612
771
  It is recommended to set either `temperature` or `topP`, but not both.
772
+ @param topK - Only sample from the top K options for each subsequent token.
773
+ Used to remove "long tail" low probability responses.
774
+ Recommended for advanced use cases only. You usually only need to use temperature.
613
775
  @param presencePenalty - Presence penalty setting.
614
776
  It affects the likelihood of the model to repeat information that is already in the prompt.
615
777
  The value is passed through to the provider. The range depends on the provider and model.
@@ -626,7 +788,7 @@ If set and supported by the model, calls will generate deterministic results.
626
788
  @return
627
789
  A result object for accessing the partial object stream and additional information.
628
790
  */
629
- declare function streamObject<T>({ model, schema, mode, system, prompt, messages, maxRetries, abortSignal, headers, onFinish, ...settings }: CallSettings & Prompt & {
791
+ declare function streamObject<T>({ model, schema: inputSchema, mode, system, prompt, messages, maxRetries, abortSignal, headers, onFinish, ...settings }: Omit<CallSettings, 'stopSequences'> & Prompt & {
630
792
  /**
631
793
  The language model to use.
632
794
  */
@@ -634,22 +796,21 @@ The language model to use.
634
796
  /**
635
797
  The schema of the object that the model should generate.
636
798
  */
637
- schema: z.Schema<T>;
799
+ schema: z.Schema<T> | Schema<T>;
638
800
  /**
639
801
  The mode to use for object generation.
640
802
 
641
- The Zod schema is converted in a JSON schema and used in one of the following ways
803
+ The schema is converted in a JSON schema and used in one of the following ways
642
804
 
643
805
  - 'auto': The provider will choose the best mode for the model.
644
806
  - 'tool': A tool with the JSON schema as parameters is is provided and the provider is instructed to use it.
645
- - 'json': The JSON schema and a instruction is injected into the prompt. If the provider supports JSON mode, it is enabled.
646
- - 'grammar': The provider is instructed to converted the JSON schema into a provider specific grammar and use it to select the output tokens.
807
+ - 'json': The JSON schema and an instruction is injected into the prompt. If the provider supports JSON mode, it is enabled. If the provider supports JSON grammars, the grammar is used.
647
808
 
648
809
  Please note that most providers do not support all modes.
649
810
 
650
811
  Default and recommended: 'auto' (best mode for the model).
651
812
  */
652
- mode?: 'auto' | 'json' | 'tool' | 'grammar';
813
+ mode?: 'auto' | 'json' | 'tool';
653
814
  /**
654
815
  Callback that is called when the LLM response and the final object validation are finished.
655
816
  */
@@ -680,99 +841,28 @@ Callback that is called when the LLM response and the final object validation ar
680
841
  */
681
842
  warnings?: CallWarning[];
682
843
  }) => Promise<void> | void;
683
- }): Promise<StreamObjectResult<T>>;
684
- type ObjectStreamInputPart = {
685
- type: 'error';
686
- error: unknown;
687
- } | {
688
- type: 'finish';
689
- finishReason: FinishReason;
690
- logprobs?: LogProbs;
691
- usage: {
692
- promptTokens: number;
693
- completionTokens: number;
694
- totalTokens: number;
695
- };
696
- };
697
- type ObjectStreamPart<T> = ObjectStreamInputPart | {
698
- type: 'object';
699
- object: DeepPartial<T>;
700
- } | {
701
- type: 'text-delta';
702
- textDelta: string;
703
- };
704
- /**
705
- The result of a `streamObject` call that contains the partial object stream and additional information.
706
- */
707
- declare class StreamObjectResult<T> {
844
+ }): Promise<DefaultStreamObjectResult<T>>;
845
+ declare class DefaultStreamObjectResult<T> implements StreamObjectResult<T> {
708
846
  private readonly originalStream;
709
847
  private readonly objectPromise;
710
- /**
711
- Warnings from the model provider (e.g. unsupported settings)
712
- */
713
- readonly warnings: CallWarning[] | undefined;
714
- /**
715
- The token usage of the generated response. Resolved when the response is finished.
716
- */
717
- readonly usage: Promise<CompletionTokenUsage$1>;
718
- /**
719
- Optional raw response data.
720
- */
721
- rawResponse?: {
722
- /**
723
- Response headers.
724
- */
725
- headers?: Record<string, string>;
726
- };
848
+ readonly warnings: StreamObjectResult<T>['warnings'];
849
+ readonly usage: StreamObjectResult<T>['usage'];
850
+ readonly rawResponse: StreamObjectResult<T>['rawResponse'];
727
851
  constructor({ stream, warnings, rawResponse, schema, onFinish, }: {
728
852
  stream: ReadableStream<string | Omit<LanguageModelV1StreamPart, 'text-delta'>>;
729
- warnings: CallWarning[] | undefined;
730
- rawResponse?: {
731
- headers?: Record<string, string>;
732
- };
733
- schema: z.Schema<T>;
853
+ warnings: StreamObjectResult<T>['warnings'];
854
+ rawResponse?: StreamObjectResult<T>['rawResponse'];
855
+ schema: z.Schema<T> | Schema<T>;
734
856
  onFinish: Parameters<typeof streamObject<T>>[0]['onFinish'];
735
857
  });
736
- /**
737
- The generated object (typed according to the schema). Resolved when the response is finished.
738
- */
739
858
  get object(): Promise<T>;
740
- /**
741
- Stream of partial objects. It gets more complete as the stream progresses.
742
-
743
- Note that the partial object is not validated.
744
- If you want to be certain that the actual content matches your schema, you need to implement your own validation for partial results.
745
- */
746
859
  get partialObjectStream(): AsyncIterableStream<DeepPartial<T>>;
747
- /**
748
- Text stream of the JSON representation of the generated object. It contains text chunks.
749
- When the stream is finished, the object is valid JSON that can be parsed.
750
- */
751
860
  get textStream(): AsyncIterableStream<string>;
752
- /**
753
- Stream of different types of events, including partial objects, errors, and finish events.
754
- */
755
861
  get fullStream(): AsyncIterableStream<ObjectStreamPart<T>>;
756
- /**
757
- Writes text delta output to a Node.js response-like object.
758
- It sets a `Content-Type` header to `text/plain; charset=utf-8` and
759
- writes each text delta as a separate chunk.
760
-
761
- @param response A Node.js response-like object (ServerResponse).
762
- @param init Optional headers and status code.
763
- */
764
862
  pipeTextStreamToResponse(response: ServerResponse, init?: {
765
863
  headers?: Record<string, string>;
766
864
  status?: number;
767
865
  }): void;
768
- /**
769
- Creates a simple text stream response.
770
- The response has a `Content-Type` header set to `text/plain; charset=utf-8`.
771
- Each text delta is encoded as UTF-8 and sent as a separate chunk.
772
- Non-text-delta events are ignored.
773
-
774
- @param init Optional headers and status code.
775
- */
776
866
  toTextStreamResponse(init?: ResponseInit): Response;
777
867
  }
778
868
  /**
@@ -780,13 +870,15 @@ declare class StreamObjectResult<T> {
780
870
  */
781
871
  declare const experimental_streamObject: typeof streamObject;
782
872
 
873
+ type Parameters$1 = z.ZodTypeAny | Schema<any>;
874
+ type inferParameters<PARAMETERS extends Parameters$1> = PARAMETERS extends Schema<any> ? PARAMETERS['_type'] : PARAMETERS extends z.ZodTypeAny ? z.infer<PARAMETERS> : never;
783
875
  /**
784
876
  A tool contains the description and the schema of the input that the tool expects.
785
877
  This enables the language model to generate the input.
786
878
 
787
879
  The tool can also contain an optional execute function for the actual execution function of the tool.
788
880
  */
789
- interface CoreTool<PARAMETERS extends z.ZodTypeAny = any, RESULT = any> {
881
+ interface CoreTool<PARAMETERS extends Parameters$1 = any, RESULT = any> {
790
882
  /**
791
883
  An optional description of what the tool does. Will be used by the language model to decide whether to use the tool.
792
884
  */
@@ -801,17 +893,17 @@ interface CoreTool<PARAMETERS extends z.ZodTypeAny = any, RESULT = any> {
801
893
  An async function that is called with the arguments from the tool call and produces a result.
802
894
  If not provided, the tool will not be executed automatically.
803
895
  */
804
- execute?: (args: z.infer<PARAMETERS>) => PromiseLike<RESULT>;
896
+ execute?: (args: inferParameters<PARAMETERS>) => PromiseLike<RESULT>;
805
897
  }
806
898
  /**
807
899
  Helper function for inferring the execute args of a tool.
808
900
  */
809
- declare function tool<PARAMETERS extends z.ZodTypeAny, RESULT>(tool: CoreTool<PARAMETERS, RESULT> & {
810
- execute: (args: z.infer<PARAMETERS>) => PromiseLike<RESULT>;
901
+ declare function tool<PARAMETERS extends Parameters$1, RESULT>(tool: CoreTool<PARAMETERS, RESULT> & {
902
+ execute: (args: inferParameters<PARAMETERS>) => PromiseLike<RESULT>;
811
903
  }): CoreTool<PARAMETERS, RESULT> & {
812
- execute: (args: z.infer<PARAMETERS>) => PromiseLike<RESULT>;
904
+ execute: (args: inferParameters<PARAMETERS>) => PromiseLike<RESULT>;
813
905
  };
814
- declare function tool<PARAMETERS extends z.ZodTypeAny, RESULT>(tool: CoreTool<PARAMETERS, RESULT> & {
906
+ declare function tool<PARAMETERS extends Parameters$1, RESULT>(tool: CoreTool<PARAMETERS, RESULT> & {
815
907
  execute?: undefined;
816
908
  }): CoreTool<PARAMETERS, RESULT> & {
817
909
  execute: undefined;
@@ -898,7 +990,7 @@ type ToToolResultObject<TOOLS extends Record<string, CoreTool>> = ValueOf<{
898
990
  type: 'tool-result';
899
991
  toolCallId: string;
900
992
  toolName: NAME & string;
901
- args: z.infer<TOOLS[NAME]['parameters']>;
993
+ args: inferParameters<TOOLS[NAME]['parameters']>;
902
994
  result: Awaited<ReturnType<Exclude<TOOLS[NAME]['execute'], undefined>>>;
903
995
  };
904
996
  }>;
@@ -931,33 +1023,112 @@ declare class InvalidMessageRoleError extends Error {
931
1023
  };
932
1024
  }
933
1025
 
1026
+ type ToToolCall<TOOLS extends Record<string, CoreTool>> = ValueOf<{
1027
+ [NAME in keyof TOOLS]: {
1028
+ type: 'tool-call';
1029
+ toolCallId: string;
1030
+ toolName: NAME & string;
1031
+ args: inferParameters<TOOLS[NAME]['parameters']>;
1032
+ };
1033
+ }>;
1034
+ type ToToolCallArray<TOOLS extends Record<string, CoreTool>> = Array<ToToolCall<TOOLS>>;
1035
+
934
1036
  /**
935
- * Telemetry configuration.
1037
+ The result of a `generateText` call.
1038
+ It contains the generated text, the tool calls that were made during the generation, and the results of the tool calls.
936
1039
  */
937
- type TelemetrySettings = {
1040
+ interface GenerateTextResult<TOOLS extends Record<string, CoreTool>> {
938
1041
  /**
939
- * Enable or disable telemetry. Disabled by default while experimental.
1042
+ The generated text.
1043
+ */
1044
+ readonly text: string;
1045
+ /**
1046
+ The tool calls that were made during the generation.
940
1047
  */
941
- isEnabled?: boolean;
1048
+ readonly toolCalls: ToToolCallArray<TOOLS>;
942
1049
  /**
943
- * Identifier for this function. Used to group telemetry data by function.
1050
+ The results of the tool calls.
944
1051
  */
945
- functionId?: string;
1052
+ readonly toolResults: ToToolResultArray<TOOLS>;
946
1053
  /**
947
- * Additional information to include in the telemetry data.
1054
+ The reason why the generation finished.
948
1055
  */
949
- metadata?: Record<string, AttributeValue>;
950
- };
951
-
952
- type ToToolCall<TOOLS extends Record<string, CoreTool>> = ValueOf<{
953
- [NAME in keyof TOOLS]: {
954
- type: 'tool-call';
955
- toolCallId: string;
956
- toolName: NAME & string;
957
- args: z.infer<TOOLS[NAME]['parameters']>;
1056
+ readonly finishReason: FinishReason;
1057
+ /**
1058
+ The token usage of the generated text.
1059
+ */
1060
+ readonly usage: CompletionTokenUsage$1;
1061
+ /**
1062
+ Warnings from the model provider (e.g. unsupported settings)
1063
+ */
1064
+ readonly warnings: CallWarning[] | undefined;
1065
+ /**
1066
+ The response messages that were generated during the call. It consists of an assistant message,
1067
+ potentially containing tool calls.
1068
+ When there are tool results, there is an additional tool message with the tool results that are available.
1069
+ If there are tools that do not have execute functions, they are not included in the tool results and
1070
+ need to be added separately.
1071
+ */
1072
+ readonly responseMessages: Array<CoreAssistantMessage | CoreToolMessage>;
1073
+ /**
1074
+ Response information for every roundtrip.
1075
+ You can use this to get information about intermediate steps, such as the tool calls or the response headers.
1076
+ */
1077
+ readonly roundtrips: Array<{
1078
+ /**
1079
+ The generated text.
1080
+ */
1081
+ readonly text: string;
1082
+ /**
1083
+ The tool calls that were made during the generation.
1084
+ */
1085
+ readonly toolCalls: ToToolCallArray<TOOLS>;
1086
+ /**
1087
+ The results of the tool calls.
1088
+ */
1089
+ readonly toolResults: ToToolResultArray<TOOLS>;
1090
+ /**
1091
+ The reason why the generation finished.
1092
+ */
1093
+ readonly finishReason: FinishReason;
1094
+ /**
1095
+ The token usage of the generated text.
1096
+ */
1097
+ readonly usage: CompletionTokenUsage$1;
1098
+ /**
1099
+ Warnings from the model provider (e.g. unsupported settings)
1100
+ */
1101
+ readonly warnings: CallWarning[] | undefined;
1102
+ /**
1103
+ Logprobs for the completion.
1104
+ `undefined` if the mode does not support logprobs or if was not enabled.
1105
+ */
1106
+ readonly logprobs: LogProbs | undefined;
1107
+ /**
1108
+ Optional raw response data.
1109
+ */
1110
+ readonly rawResponse?: {
1111
+ /**
1112
+ Response headers.
1113
+ */
1114
+ readonly headers?: Record<string, string>;
1115
+ };
1116
+ }>;
1117
+ /**
1118
+ Optional raw response data.
1119
+ */
1120
+ readonly rawResponse?: {
1121
+ /**
1122
+ Response headers.
1123
+ */
1124
+ readonly headers?: Record<string, string>;
958
1125
  };
959
- }>;
960
- type ToToolCallArray<TOOLS extends Record<string, CoreTool>> = Array<ToToolCall<TOOLS>>;
1126
+ /**
1127
+ Logprobs for the completion.
1128
+ `undefined` if the mode does not support logprobs or if was not enabled.
1129
+ */
1130
+ readonly logprobs: LogProbs | undefined;
1131
+ }
961
1132
 
962
1133
  /**
963
1134
  Generate a text and call tools for a given prompt using a language model.
@@ -980,12 +1151,17 @@ It is recommended to set either `temperature` or `topP`, but not both.
980
1151
  @param topP - Nucleus sampling.
981
1152
  The value is passed through to the provider. The range depends on the provider and model.
982
1153
  It is recommended to set either `temperature` or `topP`, but not both.
1154
+ @param topK - Only sample from the top K options for each subsequent token.
1155
+ Used to remove "long tail" low probability responses.
1156
+ Recommended for advanced use cases only. You usually only need to use temperature.
983
1157
  @param presencePenalty - Presence penalty setting.
984
1158
  It affects the likelihood of the model to repeat information that is already in the prompt.
985
1159
  The value is passed through to the provider. The range depends on the provider and model.
986
1160
  @param frequencyPenalty - Frequency penalty setting.
987
1161
  It affects the likelihood of the model to repeatedly use the same words or phrases.
988
1162
  The value is passed through to the provider. The range depends on the provider and model.
1163
+ @param stopSequences - Stop sequences.
1164
+ If set, the model will stop generating text when one of the stop sequences is generated.
989
1165
  @param seed - The seed (integer) to use for random sampling.
990
1166
  If set and supported by the model, calls will generate deterministic results.
991
1167
 
@@ -1034,117 +1210,145 @@ By default, it's set to 0, which will disable the feature.
1034
1210
  experimental_telemetry?: TelemetrySettings;
1035
1211
  }): Promise<GenerateTextResult<TOOLS>>;
1036
1212
  /**
1037
- The result of a `generateText` call.
1038
- It contains the generated text, the tool calls that were made during the generation, and the results of the tool calls.
1213
+ * @deprecated Use `generateText` instead.
1039
1214
  */
1040
- declare class GenerateTextResult<TOOLS extends Record<string, CoreTool>> {
1041
- /**
1042
- The generated text.
1043
- */
1044
- readonly text: string;
1215
+ declare const experimental_generateText: typeof generateText;
1216
+
1217
+ /**
1218
+ A result object for accessing different stream types and additional information.
1219
+ */
1220
+ interface StreamTextResult<TOOLS extends Record<string, CoreTool>> {
1045
1221
  /**
1046
- The tool calls that were made during the generation.
1047
- */
1048
- readonly toolCalls: ToToolCallArray<TOOLS>;
1222
+ Warnings from the model provider (e.g. unsupported settings).
1223
+ */
1224
+ readonly warnings: CallWarning[] | undefined;
1049
1225
  /**
1050
- The results of the tool calls.
1051
- */
1052
- readonly toolResults: ToToolResultArray<TOOLS>;
1226
+ The token usage of the generated response. Resolved when the response is finished.
1227
+ */
1228
+ readonly usage: Promise<CompletionTokenUsage$1>;
1053
1229
  /**
1054
- The reason why the generation finished.
1055
- */
1056
- readonly finishReason: FinishReason;
1230
+ The reason why the generation finished. Resolved when the response is finished.
1231
+ */
1232
+ readonly finishReason: Promise<FinishReason>;
1057
1233
  /**
1058
- The token usage of the generated text.
1059
- */
1060
- readonly usage: CompletionTokenUsage$1;
1234
+ The full text that has been generated. Resolved when the response is finished.
1235
+ */
1236
+ readonly text: Promise<string>;
1061
1237
  /**
1062
- Warnings from the model provider (e.g. unsupported settings)
1063
- */
1064
- readonly warnings: CallWarning[] | undefined;
1238
+ The tool calls that have been executed. Resolved when the response is finished.
1239
+ */
1240
+ readonly toolCalls: Promise<ToToolCall<TOOLS>[]>;
1065
1241
  /**
1066
- The response messages that were generated during the call. It consists of an assistant message,
1067
- potentially containing tool calls.
1068
- When there are tool results, there is an additional tool message with the tool results that are available.
1069
- If there are tools that do not have execute functions, they are not included in the tool results and
1070
- need to be added separately.
1071
- */
1072
- readonly responseMessages: Array<CoreAssistantMessage | CoreToolMessage>;
1242
+ The tool results that have been generated. Resolved when the all tool executions are finished.
1243
+ */
1244
+ readonly toolResults: Promise<ToToolResult<TOOLS>[]>;
1073
1245
  /**
1074
- Response information for every roundtrip.
1075
- You can use this to get information about intermediate steps, such as the tool calls or the response headers.
1076
- */
1077
- readonly roundtrips: Array<{
1078
- /**
1079
- The generated text.
1246
+ Optional raw response data.
1080
1247
  */
1081
- readonly text: string;
1082
- /**
1083
- The tool calls that were made during the generation.
1084
- */
1085
- readonly toolCalls: ToToolCallArray<TOOLS>;
1086
- /**
1087
- The results of the tool calls.
1088
- */
1089
- readonly toolResults: ToToolResultArray<TOOLS>;
1248
+ readonly rawResponse?: {
1090
1249
  /**
1091
- The reason why the generation finished.
1250
+ Response headers.
1251
+ */
1252
+ headers?: Record<string, string>;
1253
+ };
1254
+ /**
1255
+ A text stream that returns only the generated text deltas. You can use it
1256
+ as either an AsyncIterable or a ReadableStream. When an error occurs, the
1257
+ stream will throw the error.
1092
1258
  */
1093
- readonly finishReason: FinishReason;
1094
- /**
1095
- The token usage of the generated text.
1096
- */
1097
- readonly usage: CompletionTokenUsage$1;
1098
- /**
1099
- Warnings from the model provider (e.g. unsupported settings)
1259
+ readonly textStream: AsyncIterableStream<string>;
1260
+ /**
1261
+ A stream with all events, including text deltas, tool calls, tool results, and
1262
+ errors.
1263
+ You can use it as either an AsyncIterable or a ReadableStream.
1264
+ Only errors that stop the stream, such as network errors, are thrown.
1100
1265
  */
1101
- readonly warnings: CallWarning[] | undefined;
1102
- /**
1103
- Logprobs for the completion.
1104
- `undefined` if the mode does not support logprobs or if was not enabled.
1266
+ readonly fullStream: AsyncIterableStream<TextStreamPart<TOOLS>>;
1267
+ /**
1268
+ Converts the result to an `AIStream` object that is compatible with `StreamingTextResponse`.
1269
+ It can be used with the `useChat` and `useCompletion` hooks.
1270
+
1271
+ @param callbacks
1272
+ Stream callbacks that will be called when the stream emits events.
1273
+
1274
+ @returns an `AIStream` object.
1105
1275
  */
1106
- readonly logprobs: LogProbs | undefined;
1107
- /**
1108
- Optional raw response data.
1109
- */
1110
- readonly rawResponse?: {
1111
- /**
1112
- Response headers.
1113
- */
1114
- readonly headers?: Record<string, string>;
1115
- };
1116
- }>;
1276
+ toAIStream(callbacks?: AIStreamCallbacksAndOptions): ReadableStream<Uint8Array>;
1117
1277
  /**
1118
- Optional raw response data.
1119
- */
1120
- readonly rawResponse?: {
1121
- /**
1122
- Response headers.
1278
+ Writes stream data output to a Node.js response-like object.
1279
+ It sets a `Content-Type` header to `text/plain; charset=utf-8` and
1280
+ writes each stream data part as a separate chunk.
1281
+
1282
+ @param response A Node.js response-like object (ServerResponse).
1283
+ @param init Optional headers and status code.
1123
1284
  */
1124
- readonly headers?: Record<string, string>;
1125
- };
1285
+ pipeAIStreamToResponse(response: ServerResponse$1, init?: {
1286
+ headers?: Record<string, string>;
1287
+ status?: number;
1288
+ }): void;
1126
1289
  /**
1127
- Logprobs for the completion.
1128
- `undefined` if the mode does not support logprobs or if was not enabled.
1129
- */
1130
- readonly logprobs: LogProbs | undefined;
1131
- constructor(options: {
1132
- text: GenerateTextResult<TOOLS>['text'];
1133
- toolCalls: GenerateTextResult<TOOLS>['toolCalls'];
1134
- toolResults: GenerateTextResult<TOOLS>['toolResults'];
1135
- finishReason: GenerateTextResult<TOOLS>['finishReason'];
1136
- usage: GenerateTextResult<TOOLS>['usage'];
1137
- warnings: GenerateTextResult<TOOLS>['warnings'];
1138
- rawResponse?: GenerateTextResult<TOOLS>['rawResponse'];
1139
- logprobs: GenerateTextResult<TOOLS>['logprobs'];
1140
- responseMessages: GenerateTextResult<TOOLS>['responseMessages'];
1141
- roundtrips: GenerateTextResult<TOOLS>['roundtrips'];
1142
- });
1290
+ Writes text delta output to a Node.js response-like object.
1291
+ It sets a `Content-Type` header to `text/plain; charset=utf-8` and
1292
+ writes each text delta as a separate chunk.
1293
+
1294
+ @param response A Node.js response-like object (ServerResponse).
1295
+ @param init Optional headers and status code.
1296
+ */
1297
+ pipeTextStreamToResponse(response: ServerResponse$1, init?: {
1298
+ headers?: Record<string, string>;
1299
+ status?: number;
1300
+ }): void;
1301
+ /**
1302
+ Converts the result to a streamed response object with a stream data part stream.
1303
+ It can be used with the `useChat` and `useCompletion` hooks.
1304
+
1305
+ @param options An object with an init property (ResponseInit) and a data property.
1306
+ You can also pass in a ResponseInit directly (deprecated).
1307
+
1308
+ @return A response object.
1309
+ */
1310
+ toAIStreamResponse(options?: ResponseInit | {
1311
+ init?: ResponseInit;
1312
+ data?: StreamData;
1313
+ }): Response;
1314
+ /**
1315
+ Creates a simple text stream response.
1316
+ Each text delta is encoded as UTF-8 and sent as a separate chunk.
1317
+ Non-text-delta events are ignored.
1318
+
1319
+ @param init Optional headers and status code.
1320
+ */
1321
+ toTextStreamResponse(init?: ResponseInit): Response;
1143
1322
  }
1144
- /**
1145
- * @deprecated Use `generateText` instead.
1146
- */
1147
- declare const experimental_generateText: typeof generateText;
1323
+ type TextStreamPart<TOOLS extends Record<string, CoreTool>> = {
1324
+ type: 'text-delta';
1325
+ textDelta: string;
1326
+ } | ({
1327
+ type: 'tool-call';
1328
+ } & ToToolCall<TOOLS>) | {
1329
+ type: 'tool-call-streaming-start';
1330
+ toolCallId: string;
1331
+ toolName: string;
1332
+ } | {
1333
+ type: 'tool-call-delta';
1334
+ toolCallId: string;
1335
+ toolName: string;
1336
+ argsTextDelta: string;
1337
+ } | ({
1338
+ type: 'tool-result';
1339
+ } & ToToolResult<TOOLS>) | {
1340
+ type: 'finish';
1341
+ finishReason: FinishReason;
1342
+ logprobs?: LogProbs;
1343
+ usage: {
1344
+ promptTokens: number;
1345
+ completionTokens: number;
1346
+ totalTokens: number;
1347
+ };
1348
+ } | {
1349
+ type: 'error';
1350
+ error: unknown;
1351
+ };
1148
1352
 
1149
1353
  /**
1150
1354
  Generate a text and call tools for a given prompt using a language model.
@@ -1165,12 +1369,17 @@ It is recommended to set either `temperature` or `topP`, but not both.
1165
1369
  @param topP - Nucleus sampling.
1166
1370
  The value is passed through to the provider. The range depends on the provider and model.
1167
1371
  It is recommended to set either `temperature` or `topP`, but not both.
1372
+ @param topK - Only sample from the top K options for each subsequent token.
1373
+ Used to remove "long tail" low probability responses.
1374
+ Recommended for advanced use cases only. You usually only need to use temperature.
1168
1375
  @param presencePenalty - Presence penalty setting.
1169
1376
  It affects the likelihood of the model to repeat information that is already in the prompt.
1170
1377
  The value is passed through to the provider. The range depends on the provider and model.
1171
1378
  @param frequencyPenalty - Frequency penalty setting.
1172
1379
  It affects the likelihood of the model to repeatedly use the same words or phrases.
1173
1380
  The value is passed through to the provider. The range depends on the provider and model.
1381
+ @param stopSequences - Stop sequences.
1382
+ If set, the model will stop generating text when one of the stop sequences is generated.
1174
1383
  @param seed - The seed (integer) to use for random sampling.
1175
1384
  If set and supported by the model, calls will generate deterministic results.
1176
1385
 
@@ -1244,81 +1453,21 @@ Callback that is called when the LLM response and all request tool executions
1244
1453
  */
1245
1454
  warnings?: CallWarning[];
1246
1455
  }) => Promise<void> | void;
1247
- }): Promise<StreamTextResult<TOOLS>>;
1248
- type TextStreamPart<TOOLS extends Record<string, CoreTool>> = {
1249
- type: 'text-delta';
1250
- textDelta: string;
1251
- } | ({
1252
- type: 'tool-call';
1253
- } & ToToolCall<TOOLS>) | {
1254
- type: 'tool-call-streaming-start';
1255
- toolCallId: string;
1256
- toolName: string;
1257
- } | {
1258
- type: 'tool-call-delta';
1259
- toolCallId: string;
1260
- toolName: string;
1261
- argsTextDelta: string;
1262
- } | ({
1263
- type: 'tool-result';
1264
- } & ToToolResult<TOOLS>) | {
1265
- type: 'finish';
1266
- finishReason: FinishReason;
1267
- logprobs?: LogProbs;
1268
- usage: {
1269
- promptTokens: number;
1270
- completionTokens: number;
1271
- totalTokens: number;
1272
- };
1273
- } | {
1274
- type: 'error';
1275
- error: unknown;
1276
- };
1277
- /**
1278
- A result object for accessing different stream types and additional information.
1279
- */
1280
- declare class StreamTextResult<TOOLS extends Record<string, CoreTool>> {
1456
+ }): Promise<DefaultStreamTextResult<TOOLS>>;
1457
+ declare class DefaultStreamTextResult<TOOLS extends Record<string, CoreTool>> implements StreamTextResult<TOOLS> {
1281
1458
  private originalStream;
1282
1459
  private onFinish?;
1283
- /**
1284
- Warnings from the model provider (e.g. unsupported settings).
1285
- */
1286
- readonly warnings: CallWarning[] | undefined;
1287
- /**
1288
- The token usage of the generated response. Resolved when the response is finished.
1289
- */
1290
- readonly usage: Promise<CompletionTokenUsage$1>;
1291
- /**
1292
- The reason why the generation finished. Resolved when the response is finished.
1293
- */
1294
- readonly finishReason: Promise<FinishReason>;
1295
- /**
1296
- The full text that has been generated. Resolved when the response is finished.
1297
- */
1298
- readonly text: Promise<string>;
1299
- /**
1300
- The tool calls that have been executed. Resolved when the response is finished.
1301
- */
1302
- readonly toolCalls: Promise<ToToolCall<TOOLS>[]>;
1303
- /**
1304
- The tool results that have been generated. Resolved when the all tool executions are finished.
1305
- */
1306
- readonly toolResults: Promise<ToToolResult<TOOLS>[]>;
1307
- /**
1308
- Optional raw response data.
1309
- */
1310
- readonly rawResponse?: {
1311
- /**
1312
- Response headers.
1313
- */
1314
- headers?: Record<string, string>;
1315
- };
1460
+ readonly warnings: StreamTextResult<TOOLS>['warnings'];
1461
+ readonly usage: StreamTextResult<TOOLS>['usage'];
1462
+ readonly finishReason: StreamTextResult<TOOLS>['finishReason'];
1463
+ readonly text: StreamTextResult<TOOLS>['text'];
1464
+ readonly toolCalls: StreamTextResult<TOOLS>['toolCalls'];
1465
+ readonly toolResults: StreamTextResult<TOOLS>['toolResults'];
1466
+ readonly rawResponse: StreamTextResult<TOOLS>['rawResponse'];
1316
1467
  constructor({ stream, warnings, rawResponse, onFinish, rootSpan, doStreamSpan, }: {
1317
1468
  stream: ReadableStream<TextStreamPart<TOOLS>>;
1318
- warnings: CallWarning[] | undefined;
1319
- rawResponse?: {
1320
- headers?: Record<string, string>;
1321
- };
1469
+ warnings: StreamTextResult<TOOLS>['warnings'];
1470
+ rawResponse: StreamTextResult<TOOLS>['rawResponse'];
1322
1471
  onFinish?: Parameters<typeof streamText>[0]['onFinish'];
1323
1472
  rootSpan: Span;
1324
1473
  doStreamSpan: Span;
@@ -1332,73 +1481,21 @@ declare class StreamTextResult<TOOLS extends Record<string, CoreTool>> {
1332
1481
  However, the LLM results are expected to be small enough to not cause issues.
1333
1482
  */
1334
1483
  private teeStream;
1335
- /**
1336
- A text stream that returns only the generated text deltas. You can use it
1337
- as either an AsyncIterable or a ReadableStream. When an error occurs, the
1338
- stream will throw the error.
1339
- */
1340
1484
  get textStream(): AsyncIterableStream<string>;
1341
- /**
1342
- A stream with all events, including text deltas, tool calls, tool results, and
1343
- errors.
1344
- You can use it as either an AsyncIterable or a ReadableStream. When an error occurs, the
1345
- stream will throw the error.
1346
- */
1347
1485
  get fullStream(): AsyncIterableStream<TextStreamPart<TOOLS>>;
1348
- /**
1349
- Converts the result to an `AIStream` object that is compatible with `StreamingTextResponse`.
1350
- It can be used with the `useChat` and `useCompletion` hooks.
1351
-
1352
- @param callbacks
1353
- Stream callbacks that will be called when the stream emits events.
1354
-
1355
- @returns an `AIStream` object.
1356
- */
1357
1486
  toAIStream(callbacks?: AIStreamCallbacksAndOptions): ReadableStream<Uint8Array>;
1358
- /**
1359
- Writes stream data output to a Node.js response-like object.
1360
- It sets a `Content-Type` header to `text/plain; charset=utf-8` and
1361
- writes each stream data part as a separate chunk.
1362
-
1363
- @param response A Node.js response-like object (ServerResponse).
1364
- @param init Optional headers and status code.
1365
- */
1366
1487
  pipeAIStreamToResponse(response: ServerResponse$1, init?: {
1367
1488
  headers?: Record<string, string>;
1368
1489
  status?: number;
1369
1490
  }): void;
1370
- /**
1371
- Writes text delta output to a Node.js response-like object.
1372
- It sets a `Content-Type` header to `text/plain; charset=utf-8` and
1373
- writes each text delta as a separate chunk.
1374
-
1375
- @param response A Node.js response-like object (ServerResponse).
1376
- @param init Optional headers and status code.
1377
- */
1378
1491
  pipeTextStreamToResponse(response: ServerResponse$1, init?: {
1379
1492
  headers?: Record<string, string>;
1380
1493
  status?: number;
1381
1494
  }): void;
1382
- /**
1383
- Converts the result to a streamed response object with a stream data part stream.
1384
- It can be used with the `useChat` and `useCompletion` hooks.
1385
-
1386
- @param options An object with an init property (ResponseInit) and a data property.
1387
- You can also pass in a ResponseInit directly (deprecated).
1388
-
1389
- @return A response object.
1390
- */
1391
1495
  toAIStreamResponse(options?: ResponseInit | {
1392
1496
  init?: ResponseInit;
1393
1497
  data?: StreamData;
1394
1498
  }): Response;
1395
- /**
1396
- Creates a simple text stream response.
1397
- Each text delta is encoded as UTF-8 and sent as a separate chunk.
1398
- Non-text-delta events are ignored.
1399
-
1400
- @param init Optional headers and status code.
1401
- */
1402
1499
  toTextStreamResponse(init?: ResponseInit): Response;
1403
1500
  }
1404
1501
  /**
@@ -2257,4 +2354,4 @@ declare const generateId: (size?: number | undefined) => string;
2257
2354
  */
2258
2355
  declare const nanoid: (size?: number | undefined) => string;
2259
2356
 
2260
- export { AIStream, AIStreamCallbacksAndOptions, AIStreamParser, AIStreamParserOptions, AWSBedrockAnthropicMessagesStream, AWSBedrockAnthropicStream, AWSBedrockCohereStream, AWSBedrockLlama2Stream, AWSBedrockStream, AnthropicStream, AssistantContent, AssistantResponse, CallWarning, CohereStream, CompletionTokenUsage, CompletionUsage, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreTool, CoreToolChoice, CoreToolMessage, CoreUserMessage, DataContent, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingTokenUsage, ExperimentalAssistantMessage, ExperimentalMessage, ExperimentalTool, ExperimentalToolMessage, ExperimentalUserMessage, FinishReason, FunctionCallPayload, GenerateObjectResult, GenerateTextResult, GoogleGenerativeAIStream, HuggingFaceStream, ImagePart, InkeepAIStreamCallbacksAndOptions, InkeepChatResultCallbacks, InkeepOnFinalMetadata, InkeepStream, InvalidMessageRoleError, InvalidModelIdError, langchainAdapter as LangChainAdapter, LangChainStream, LanguageModel, LogProbs, MistralStream, NoSuchModelError, NoSuchProviderError, ObjectStreamInputPart, ObjectStreamPart, OpenAIStream, OpenAIStreamCallbacks, ReplicateStream, StreamData, StreamObjectResult, StreamTextResult, StreamingTextResponse, TextPart$1 as TextPart, TextStreamPart, TokenUsage, ToolCallPart, ToolCallPayload, ToolContent, ToolResultPart, UserContent, convertDataContentToBase64String, convertDataContentToUint8Array, convertToCoreMessages, convertUint8ArrayToText, cosineSimilarity, createCallbacksTransformer, createEventStreamTransformer, createStreamDataTransformer, embed, embedMany, experimental_AssistantResponse, experimental_ModelRegistry, experimental_ProviderRegistry, experimental_StreamData, experimental_createModelRegistry, experimental_createProviderRegistry, experimental_generateObject, experimental_generateText, experimental_streamObject, experimental_streamText, generateId, generateObject, generateText, nanoid, readableFromAsyncIterable, streamObject, streamText, streamToResponse, tool, trimStartOfStreamHelper };
2357
+ export { AIStream, AIStreamCallbacksAndOptions, AIStreamParser, AIStreamParserOptions, AWSBedrockAnthropicMessagesStream, AWSBedrockAnthropicStream, AWSBedrockCohereStream, AWSBedrockLlama2Stream, AWSBedrockStream, AnthropicStream, AssistantContent, AssistantResponse, CallWarning, CohereStream, CompletionTokenUsage, CompletionUsage, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreTool, CoreToolChoice, CoreToolMessage, CoreUserMessage, DataContent, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingTokenUsage, ExperimentalAssistantMessage, ExperimentalMessage, ExperimentalTool, ExperimentalToolMessage, ExperimentalUserMessage, FinishReason, FunctionCallPayload, GenerateObjectResult, GenerateTextResult, GoogleGenerativeAIStream, HuggingFaceStream, ImagePart, InkeepAIStreamCallbacksAndOptions, InkeepChatResultCallbacks, InkeepOnFinalMetadata, InkeepStream, InvalidMessageRoleError, InvalidModelIdError, langchainAdapter as LangChainAdapter, LangChainStream, LanguageModel, LogProbs, MistralStream, NoSuchModelError, NoSuchProviderError, ObjectStreamInputPart, ObjectStreamPart, OpenAIStream, OpenAIStreamCallbacks, ReplicateStream, StreamData, StreamObjectResult, StreamTextResult, StreamingTextResponse, TextPart$1 as TextPart, TextStreamPart, TokenUsage, ToolCallPart, ToolCallPayload, ToolContent, ToolResultPart, UserContent, convertDataContentToBase64String, convertDataContentToUint8Array, convertToCoreMessages, convertUint8ArrayToText, cosineSimilarity, createCallbacksTransformer, createEventStreamTransformer, createStreamDataTransformer, embed, embedMany, experimental_AssistantResponse, experimental_ModelRegistry, experimental_ProviderRegistry, experimental_StreamData, experimental_createModelRegistry, experimental_createProviderRegistry, experimental_generateObject, experimental_generateText, experimental_streamObject, experimental_streamText, generateId, generateObject, generateText, jsonSchema, nanoid, readableFromAsyncIterable, streamObject, streamText, streamToResponse, tool, trimStartOfStreamHelper };