ai 3.2.34 → 3.2.36

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.mts CHANGED
@@ -1,14 +1,34 @@
1
1
  import { DeepPartial, Attachment, JSONValue, CreateMessage, FunctionCall as FunctionCall$1, AssistantMessage, DataMessage } from '@ai-sdk/ui-utils';
2
2
  export { AssistantMessage, AssistantStatus, ChatRequest, ChatRequestOptions, CreateMessage, DataMessage, DeepPartial, Function, FunctionCall, FunctionCallHandler, IdGenerator, JSONValue, Message, RequestOptions, StreamPart, Tool, ToolCall, ToolCallHandler, ToolChoice, ToolInvocation, UseAssistantOptions, formatStreamPart, parseComplexResponse, parseStreamPart, readDataStream } from '@ai-sdk/ui-utils';
3
+ import { AttributeValue, Span } from '@opentelemetry/api';
3
4
  import { EmbeddingModelV1, EmbeddingModelV1Embedding, LanguageModelV1, LanguageModelV1FinishReason, LanguageModelV1LogProbs, LanguageModelV1CallWarning, LanguageModelV1StreamPart } from '@ai-sdk/provider';
4
5
  export { APICallError, EmptyResponseBodyError, InvalidArgumentError, InvalidDataContentError, InvalidPromptError, InvalidResponseDataError, InvalidToolArgumentsError, JSONParseError, LoadAPIKeyError, NoObjectGeneratedError, NoSuchToolError, RetryError, ToolCallParseError, TypeValidationError, UnsupportedFunctionalityError, UnsupportedJSONSchemaError } from '@ai-sdk/provider';
5
6
  import { z } from 'zod';
6
- import { AttributeValue, Span } from '@opentelemetry/api';
7
+ import { Validator } from '@ai-sdk/provider-utils';
8
+ import { JSONSchema7 } from 'json-schema';
7
9
  import { ServerResponse } from 'http';
8
10
  import { ServerResponse as ServerResponse$1 } from 'node:http';
9
11
  import { AssistantStream } from 'openai/lib/AssistantStream';
10
12
  import { Run } from 'openai/resources/beta/threads/runs/runs';
11
13
 
14
+ /**
15
+ * Telemetry configuration.
16
+ */
17
+ type TelemetrySettings = {
18
+ /**
19
+ * Enable or disable telemetry. Disabled by default while experimental.
20
+ */
21
+ isEnabled?: boolean;
22
+ /**
23
+ * Identifier for this function. Used to group telemetry data by function.
24
+ */
25
+ functionId?: string;
26
+ /**
27
+ * Additional information to include in the telemetry data.
28
+ */
29
+ metadata?: Record<string, AttributeValue>;
30
+ };
31
+
12
32
  /**
13
33
  Represents the number of tokens used in a prompt and completion.
14
34
  */
@@ -129,7 +149,7 @@ Embed a value using an embedding model. The type of the value is defined by the
129
149
 
130
150
  @returns A result object that contains the embedding, the value, and additional information.
131
151
  */
132
- declare function embed<VALUE>({ model, value, maxRetries, abortSignal, headers, }: {
152
+ declare function embed<VALUE>({ model, value, maxRetries, abortSignal, headers, experimental_telemetry: telemetry, }: {
133
153
  /**
134
154
  The embedding model to use.
135
155
  */
@@ -153,6 +173,10 @@ declare function embed<VALUE>({ model, value, maxRetries, abortSignal, headers,
153
173
  Only applicable for HTTP-based providers.
154
174
  */
155
175
  headers?: Record<string, string>;
176
+ /**
177
+ * Optional telemetry configuration (experimental).
178
+ */
179
+ experimental_telemetry?: TelemetrySettings;
156
180
  }): Promise<EmbedResult<VALUE>>;
157
181
 
158
182
  /**
@@ -190,7 +214,7 @@ has a limit on how many embeddings can be generated in a single call.
190
214
 
191
215
  @returns A result object that contains the embeddings, the value, and additional information.
192
216
  */
193
- declare function embedMany<VALUE>({ model, values, maxRetries, abortSignal, headers, }: {
217
+ declare function embedMany<VALUE>({ model, values, maxRetries, abortSignal, headers, experimental_telemetry: telemetry, }: {
194
218
  /**
195
219
  The embedding model to use.
196
220
  */
@@ -214,6 +238,10 @@ declare function embedMany<VALUE>({ model, values, maxRetries, abortSignal, head
214
238
  Only applicable for HTTP-based providers.
215
239
  */
216
240
  headers?: Record<string, string>;
241
+ /**
242
+ * Optional telemetry configuration (experimental).
243
+ */
244
+ experimental_telemetry?: TelemetrySettings;
217
245
  }): Promise<EmbedManyResult<VALUE>>;
218
246
 
219
247
  type CallSettings = {
@@ -473,22 +501,38 @@ type Prompt = {
473
501
  };
474
502
 
475
503
  /**
476
- * Telemetry configuration.
504
+ * Used to mark schemas so we can support both Zod and custom schemas.
477
505
  */
478
- type TelemetrySettings = {
506
+ declare const schemaSymbol: unique symbol;
507
+ type Schema<OBJECT = unknown> = Validator<OBJECT> & {
479
508
  /**
480
- * Enable or disable telemetry. Disabled by default while experimental.
509
+ * Used to mark schemas so we can support both Zod and custom schemas.
481
510
  */
482
- isEnabled?: boolean;
511
+ [schemaSymbol]: true;
483
512
  /**
484
- * Identifier for this function. Used to group telemetry data by function.
513
+ * Schema type for inference.
485
514
  */
486
- functionId?: string;
515
+ _type: OBJECT;
487
516
  /**
488
- * Additional information to include in the telemetry data.
517
+ * The JSON Schema for the schema. It is passed to the providers.
489
518
  */
490
- metadata?: Record<string, AttributeValue>;
519
+ readonly jsonSchema: JSONSchema7;
491
520
  };
521
+ /**
522
+ * Create a schema using a JSON Schema.
523
+ *
524
+ * @param jsonSchema The JSON Schema for the schema.
525
+ * @param options.validate Optional. A validation function for the schema.
526
+ */
527
+ declare function jsonSchema<OBJECT = unknown>(jsonSchema: JSONSchema7, { validate, }?: {
528
+ validate?: (value: unknown) => {
529
+ success: true;
530
+ value: OBJECT;
531
+ } | {
532
+ success: false;
533
+ error: Error;
534
+ };
535
+ }): Schema<OBJECT>;
492
536
 
493
537
  /**
494
538
  The result of a `generateObject` call.
@@ -571,7 +615,7 @@ If set and supported by the model, calls will generate deterministic results.
571
615
  @returns
572
616
  A result object that contains the generated object, the finish reason, the token usage, and additional information.
573
617
  */
574
- declare function generateObject<T>({ model, schema, mode, system, prompt, messages, maxRetries, abortSignal, headers, experimental_telemetry: telemetry, ...settings }: Omit<CallSettings, 'stopSequences'> & Prompt & {
618
+ declare function generateObject<T>({ model, schema: inputSchema, mode, system, prompt, messages, maxRetries, abortSignal, headers, experimental_telemetry: telemetry, ...settings }: Omit<CallSettings, 'stopSequences'> & Prompt & {
575
619
  /**
576
620
  The language model to use.
577
621
  */
@@ -579,11 +623,11 @@ The language model to use.
579
623
  /**
580
624
  The schema of the object that the model should generate.
581
625
  */
582
- schema: z.Schema<T>;
626
+ schema: z.Schema<T> | Schema<T>;
583
627
  /**
584
628
  The mode to use for object generation.
585
629
 
586
- The Zod schema is converted in a JSON schema and used in one of the following ways
630
+ The schema is converted in a JSON schema and used in one of the following ways
587
631
 
588
632
  - 'auto': The provider will choose the best mode for the model.
589
633
  - 'tool': A tool with the JSON schema as parameters is is provided and the provider is instructed to use it.
@@ -748,7 +792,7 @@ If set and supported by the model, calls will generate deterministic results.
748
792
  @return
749
793
  A result object for accessing the partial object stream and additional information.
750
794
  */
751
- declare function streamObject<T>({ model, schema, mode, system, prompt, messages, maxRetries, abortSignal, headers, onFinish, ...settings }: Omit<CallSettings, 'stopSequences'> & Prompt & {
795
+ declare function streamObject<T>({ model, schema: inputSchema, mode, system, prompt, messages, maxRetries, abortSignal, headers, experimental_telemetry: telemetry, onFinish, ...settings }: Omit<CallSettings, 'stopSequences'> & Prompt & {
752
796
  /**
753
797
  The language model to use.
754
798
  */
@@ -756,11 +800,11 @@ The language model to use.
756
800
  /**
757
801
  The schema of the object that the model should generate.
758
802
  */
759
- schema: z.Schema<T>;
803
+ schema: z.Schema<T> | Schema<T>;
760
804
  /**
761
805
  The mode to use for object generation.
762
806
 
763
- The Zod schema is converted in a JSON schema and used in one of the following ways
807
+ The schema is converted in a JSON schema and used in one of the following ways
764
808
 
765
809
  - 'auto': The provider will choose the best mode for the model.
766
810
  - 'tool': A tool with the JSON schema as parameters is is provided and the provider is instructed to use it.
@@ -772,6 +816,10 @@ Default and recommended: 'auto' (best mode for the model).
772
816
  */
773
817
  mode?: 'auto' | 'json' | 'tool';
774
818
  /**
819
+ Optional telemetry configuration (experimental).
820
+ */
821
+ experimental_telemetry?: TelemetrySettings;
822
+ /**
775
823
  Callback that is called when the LLM response and the final object validation are finished.
776
824
  */
777
825
  onFinish?: (event: {
@@ -808,12 +856,14 @@ declare class DefaultStreamObjectResult<T> implements StreamObjectResult<T> {
808
856
  readonly warnings: StreamObjectResult<T>['warnings'];
809
857
  readonly usage: StreamObjectResult<T>['usage'];
810
858
  readonly rawResponse: StreamObjectResult<T>['rawResponse'];
811
- constructor({ stream, warnings, rawResponse, schema, onFinish, }: {
859
+ constructor({ stream, warnings, rawResponse, schema, onFinish, rootSpan, doStreamSpan, }: {
812
860
  stream: ReadableStream<string | Omit<LanguageModelV1StreamPart, 'text-delta'>>;
813
861
  warnings: StreamObjectResult<T>['warnings'];
814
862
  rawResponse?: StreamObjectResult<T>['rawResponse'];
815
- schema: z.Schema<T>;
863
+ schema: z.Schema<T> | Schema<T>;
816
864
  onFinish: Parameters<typeof streamObject<T>>[0]['onFinish'];
865
+ rootSpan: Span;
866
+ doStreamSpan: Span;
817
867
  });
818
868
  get object(): Promise<T>;
819
869
  get partialObjectStream(): AsyncIterableStream<DeepPartial<T>>;
@@ -830,13 +880,15 @@ declare class DefaultStreamObjectResult<T> implements StreamObjectResult<T> {
830
880
  */
831
881
  declare const experimental_streamObject: typeof streamObject;
832
882
 
883
+ type Parameters$1 = z.ZodTypeAny | Schema<any>;
884
+ type inferParameters<PARAMETERS extends Parameters$1> = PARAMETERS extends Schema<any> ? PARAMETERS['_type'] : PARAMETERS extends z.ZodTypeAny ? z.infer<PARAMETERS> : never;
833
885
  /**
834
886
  A tool contains the description and the schema of the input that the tool expects.
835
887
  This enables the language model to generate the input.
836
888
 
837
889
  The tool can also contain an optional execute function for the actual execution function of the tool.
838
890
  */
839
- interface CoreTool<PARAMETERS extends z.ZodTypeAny = any, RESULT = any> {
891
+ interface CoreTool<PARAMETERS extends Parameters$1 = any, RESULT = any> {
840
892
  /**
841
893
  An optional description of what the tool does. Will be used by the language model to decide whether to use the tool.
842
894
  */
@@ -851,17 +903,17 @@ interface CoreTool<PARAMETERS extends z.ZodTypeAny = any, RESULT = any> {
851
903
  An async function that is called with the arguments from the tool call and produces a result.
852
904
  If not provided, the tool will not be executed automatically.
853
905
  */
854
- execute?: (args: z.infer<PARAMETERS>) => PromiseLike<RESULT>;
906
+ execute?: (args: inferParameters<PARAMETERS>) => PromiseLike<RESULT>;
855
907
  }
856
908
  /**
857
909
  Helper function for inferring the execute args of a tool.
858
910
  */
859
- declare function tool<PARAMETERS extends z.ZodTypeAny, RESULT>(tool: CoreTool<PARAMETERS, RESULT> & {
860
- execute: (args: z.infer<PARAMETERS>) => PromiseLike<RESULT>;
911
+ declare function tool<PARAMETERS extends Parameters$1, RESULT>(tool: CoreTool<PARAMETERS, RESULT> & {
912
+ execute: (args: inferParameters<PARAMETERS>) => PromiseLike<RESULT>;
861
913
  }): CoreTool<PARAMETERS, RESULT> & {
862
- execute: (args: z.infer<PARAMETERS>) => PromiseLike<RESULT>;
914
+ execute: (args: inferParameters<PARAMETERS>) => PromiseLike<RESULT>;
863
915
  };
864
- declare function tool<PARAMETERS extends z.ZodTypeAny, RESULT>(tool: CoreTool<PARAMETERS, RESULT> & {
916
+ declare function tool<PARAMETERS extends Parameters$1, RESULT>(tool: CoreTool<PARAMETERS, RESULT> & {
865
917
  execute?: undefined;
866
918
  }): CoreTool<PARAMETERS, RESULT> & {
867
919
  execute: undefined;
@@ -948,7 +1000,7 @@ type ToToolResultObject<TOOLS extends Record<string, CoreTool>> = ValueOf<{
948
1000
  type: 'tool-result';
949
1001
  toolCallId: string;
950
1002
  toolName: NAME & string;
951
- args: z.infer<TOOLS[NAME]['parameters']>;
1003
+ args: inferParameters<TOOLS[NAME]['parameters']>;
952
1004
  result: Awaited<ReturnType<Exclude<TOOLS[NAME]['execute'], undefined>>>;
953
1005
  };
954
1006
  }>;
@@ -986,7 +1038,7 @@ type ToToolCall<TOOLS extends Record<string, CoreTool>> = ValueOf<{
986
1038
  type: 'tool-call';
987
1039
  toolCallId: string;
988
1040
  toolName: NAME & string;
989
- args: z.infer<TOOLS[NAME]['parameters']>;
1041
+ args: inferParameters<TOOLS[NAME]['parameters']>;
990
1042
  };
991
1043
  }>;
992
1044
  type ToToolCallArray<TOOLS extends Record<string, CoreTool>> = Array<ToToolCall<TOOLS>>;
@@ -2312,4 +2364,4 @@ declare const generateId: (size?: number | undefined) => string;
2312
2364
  */
2313
2365
  declare const nanoid: (size?: number | undefined) => string;
2314
2366
 
2315
- export { AIStream, AIStreamCallbacksAndOptions, AIStreamParser, AIStreamParserOptions, AWSBedrockAnthropicMessagesStream, AWSBedrockAnthropicStream, AWSBedrockCohereStream, AWSBedrockLlama2Stream, AWSBedrockStream, AnthropicStream, AssistantContent, AssistantResponse, CallWarning, CohereStream, CompletionTokenUsage, CompletionUsage, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreTool, CoreToolChoice, CoreToolMessage, CoreUserMessage, DataContent, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingTokenUsage, ExperimentalAssistantMessage, ExperimentalMessage, ExperimentalTool, ExperimentalToolMessage, ExperimentalUserMessage, FinishReason, FunctionCallPayload, GenerateObjectResult, GenerateTextResult, GoogleGenerativeAIStream, HuggingFaceStream, ImagePart, InkeepAIStreamCallbacksAndOptions, InkeepChatResultCallbacks, InkeepOnFinalMetadata, InkeepStream, InvalidMessageRoleError, InvalidModelIdError, langchainAdapter as LangChainAdapter, LangChainStream, LanguageModel, LogProbs, MistralStream, NoSuchModelError, NoSuchProviderError, ObjectStreamInputPart, ObjectStreamPart, OpenAIStream, OpenAIStreamCallbacks, ReplicateStream, StreamData, StreamObjectResult, StreamTextResult, StreamingTextResponse, TextPart$1 as TextPart, TextStreamPart, TokenUsage, ToolCallPart, ToolCallPayload, ToolContent, ToolResultPart, UserContent, convertDataContentToBase64String, convertDataContentToUint8Array, convertToCoreMessages, convertUint8ArrayToText, cosineSimilarity, createCallbacksTransformer, createEventStreamTransformer, createStreamDataTransformer, embed, embedMany, experimental_AssistantResponse, experimental_ModelRegistry, experimental_ProviderRegistry, experimental_StreamData, experimental_createModelRegistry, experimental_createProviderRegistry, experimental_generateObject, experimental_generateText, experimental_streamObject, experimental_streamText, generateId, generateObject, generateText, nanoid, readableFromAsyncIterable, streamObject, streamText, streamToResponse, tool, trimStartOfStreamHelper };
2367
+ export { AIStream, AIStreamCallbacksAndOptions, AIStreamParser, AIStreamParserOptions, AWSBedrockAnthropicMessagesStream, AWSBedrockAnthropicStream, AWSBedrockCohereStream, AWSBedrockLlama2Stream, AWSBedrockStream, AnthropicStream, AssistantContent, AssistantResponse, CallWarning, CohereStream, CompletionTokenUsage, CompletionUsage, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreTool, CoreToolChoice, CoreToolMessage, CoreUserMessage, DataContent, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingTokenUsage, ExperimentalAssistantMessage, ExperimentalMessage, ExperimentalTool, ExperimentalToolMessage, ExperimentalUserMessage, FinishReason, FunctionCallPayload, GenerateObjectResult, GenerateTextResult, GoogleGenerativeAIStream, HuggingFaceStream, ImagePart, InkeepAIStreamCallbacksAndOptions, InkeepChatResultCallbacks, InkeepOnFinalMetadata, InkeepStream, InvalidMessageRoleError, InvalidModelIdError, langchainAdapter as LangChainAdapter, LangChainStream, LanguageModel, LogProbs, MistralStream, NoSuchModelError, NoSuchProviderError, ObjectStreamInputPart, ObjectStreamPart, OpenAIStream, OpenAIStreamCallbacks, ReplicateStream, StreamData, StreamObjectResult, StreamTextResult, StreamingTextResponse, TextPart$1 as TextPart, TextStreamPart, TokenUsage, ToolCallPart, ToolCallPayload, ToolContent, ToolResultPart, UserContent, convertDataContentToBase64String, convertDataContentToUint8Array, convertToCoreMessages, convertUint8ArrayToText, cosineSimilarity, createCallbacksTransformer, createEventStreamTransformer, createStreamDataTransformer, embed, embedMany, experimental_AssistantResponse, experimental_ModelRegistry, experimental_ProviderRegistry, experimental_StreamData, experimental_createModelRegistry, experimental_createProviderRegistry, experimental_generateObject, experimental_generateText, experimental_streamObject, experimental_streamText, generateId, generateObject, generateText, jsonSchema, nanoid, readableFromAsyncIterable, streamObject, streamText, streamToResponse, tool, trimStartOfStreamHelper };
package/dist/index.d.ts CHANGED
@@ -1,14 +1,34 @@
1
1
  import { DeepPartial, Attachment, JSONValue, CreateMessage, FunctionCall as FunctionCall$1, AssistantMessage, DataMessage } from '@ai-sdk/ui-utils';
2
2
  export { AssistantMessage, AssistantStatus, ChatRequest, ChatRequestOptions, CreateMessage, DataMessage, DeepPartial, Function, FunctionCall, FunctionCallHandler, IdGenerator, JSONValue, Message, RequestOptions, StreamPart, Tool, ToolCall, ToolCallHandler, ToolChoice, ToolInvocation, UseAssistantOptions, formatStreamPart, parseComplexResponse, parseStreamPart, readDataStream } from '@ai-sdk/ui-utils';
3
+ import { AttributeValue, Span } from '@opentelemetry/api';
3
4
  import { EmbeddingModelV1, EmbeddingModelV1Embedding, LanguageModelV1, LanguageModelV1FinishReason, LanguageModelV1LogProbs, LanguageModelV1CallWarning, LanguageModelV1StreamPart } from '@ai-sdk/provider';
4
5
  export { APICallError, EmptyResponseBodyError, InvalidArgumentError, InvalidDataContentError, InvalidPromptError, InvalidResponseDataError, InvalidToolArgumentsError, JSONParseError, LoadAPIKeyError, NoObjectGeneratedError, NoSuchToolError, RetryError, ToolCallParseError, TypeValidationError, UnsupportedFunctionalityError, UnsupportedJSONSchemaError } from '@ai-sdk/provider';
5
6
  import { z } from 'zod';
6
- import { AttributeValue, Span } from '@opentelemetry/api';
7
+ import { Validator } from '@ai-sdk/provider-utils';
8
+ import { JSONSchema7 } from 'json-schema';
7
9
  import { ServerResponse } from 'http';
8
10
  import { ServerResponse as ServerResponse$1 } from 'node:http';
9
11
  import { AssistantStream } from 'openai/lib/AssistantStream';
10
12
  import { Run } from 'openai/resources/beta/threads/runs/runs';
11
13
 
14
+ /**
15
+ * Telemetry configuration.
16
+ */
17
+ type TelemetrySettings = {
18
+ /**
19
+ * Enable or disable telemetry. Disabled by default while experimental.
20
+ */
21
+ isEnabled?: boolean;
22
+ /**
23
+ * Identifier for this function. Used to group telemetry data by function.
24
+ */
25
+ functionId?: string;
26
+ /**
27
+ * Additional information to include in the telemetry data.
28
+ */
29
+ metadata?: Record<string, AttributeValue>;
30
+ };
31
+
12
32
  /**
13
33
  Represents the number of tokens used in a prompt and completion.
14
34
  */
@@ -129,7 +149,7 @@ Embed a value using an embedding model. The type of the value is defined by the
129
149
 
130
150
  @returns A result object that contains the embedding, the value, and additional information.
131
151
  */
132
- declare function embed<VALUE>({ model, value, maxRetries, abortSignal, headers, }: {
152
+ declare function embed<VALUE>({ model, value, maxRetries, abortSignal, headers, experimental_telemetry: telemetry, }: {
133
153
  /**
134
154
  The embedding model to use.
135
155
  */
@@ -153,6 +173,10 @@ declare function embed<VALUE>({ model, value, maxRetries, abortSignal, headers,
153
173
  Only applicable for HTTP-based providers.
154
174
  */
155
175
  headers?: Record<string, string>;
176
+ /**
177
+ * Optional telemetry configuration (experimental).
178
+ */
179
+ experimental_telemetry?: TelemetrySettings;
156
180
  }): Promise<EmbedResult<VALUE>>;
157
181
 
158
182
  /**
@@ -190,7 +214,7 @@ has a limit on how many embeddings can be generated in a single call.
190
214
 
191
215
  @returns A result object that contains the embeddings, the value, and additional information.
192
216
  */
193
- declare function embedMany<VALUE>({ model, values, maxRetries, abortSignal, headers, }: {
217
+ declare function embedMany<VALUE>({ model, values, maxRetries, abortSignal, headers, experimental_telemetry: telemetry, }: {
194
218
  /**
195
219
  The embedding model to use.
196
220
  */
@@ -214,6 +238,10 @@ declare function embedMany<VALUE>({ model, values, maxRetries, abortSignal, head
214
238
  Only applicable for HTTP-based providers.
215
239
  */
216
240
  headers?: Record<string, string>;
241
+ /**
242
+ * Optional telemetry configuration (experimental).
243
+ */
244
+ experimental_telemetry?: TelemetrySettings;
217
245
  }): Promise<EmbedManyResult<VALUE>>;
218
246
 
219
247
  type CallSettings = {
@@ -473,22 +501,38 @@ type Prompt = {
473
501
  };
474
502
 
475
503
  /**
476
- * Telemetry configuration.
504
+ * Used to mark schemas so we can support both Zod and custom schemas.
477
505
  */
478
- type TelemetrySettings = {
506
+ declare const schemaSymbol: unique symbol;
507
+ type Schema<OBJECT = unknown> = Validator<OBJECT> & {
479
508
  /**
480
- * Enable or disable telemetry. Disabled by default while experimental.
509
+ * Used to mark schemas so we can support both Zod and custom schemas.
481
510
  */
482
- isEnabled?: boolean;
511
+ [schemaSymbol]: true;
483
512
  /**
484
- * Identifier for this function. Used to group telemetry data by function.
513
+ * Schema type for inference.
485
514
  */
486
- functionId?: string;
515
+ _type: OBJECT;
487
516
  /**
488
- * Additional information to include in the telemetry data.
517
+ * The JSON Schema for the schema. It is passed to the providers.
489
518
  */
490
- metadata?: Record<string, AttributeValue>;
519
+ readonly jsonSchema: JSONSchema7;
491
520
  };
521
+ /**
522
+ * Create a schema using a JSON Schema.
523
+ *
524
+ * @param jsonSchema The JSON Schema for the schema.
525
+ * @param options.validate Optional. A validation function for the schema.
526
+ */
527
+ declare function jsonSchema<OBJECT = unknown>(jsonSchema: JSONSchema7, { validate, }?: {
528
+ validate?: (value: unknown) => {
529
+ success: true;
530
+ value: OBJECT;
531
+ } | {
532
+ success: false;
533
+ error: Error;
534
+ };
535
+ }): Schema<OBJECT>;
492
536
 
493
537
  /**
494
538
  The result of a `generateObject` call.
@@ -571,7 +615,7 @@ If set and supported by the model, calls will generate deterministic results.
571
615
  @returns
572
616
  A result object that contains the generated object, the finish reason, the token usage, and additional information.
573
617
  */
574
- declare function generateObject<T>({ model, schema, mode, system, prompt, messages, maxRetries, abortSignal, headers, experimental_telemetry: telemetry, ...settings }: Omit<CallSettings, 'stopSequences'> & Prompt & {
618
+ declare function generateObject<T>({ model, schema: inputSchema, mode, system, prompt, messages, maxRetries, abortSignal, headers, experimental_telemetry: telemetry, ...settings }: Omit<CallSettings, 'stopSequences'> & Prompt & {
575
619
  /**
576
620
  The language model to use.
577
621
  */
@@ -579,11 +623,11 @@ The language model to use.
579
623
  /**
580
624
  The schema of the object that the model should generate.
581
625
  */
582
- schema: z.Schema<T>;
626
+ schema: z.Schema<T> | Schema<T>;
583
627
  /**
584
628
  The mode to use for object generation.
585
629
 
586
- The Zod schema is converted in a JSON schema and used in one of the following ways
630
+ The schema is converted in a JSON schema and used in one of the following ways
587
631
 
588
632
  - 'auto': The provider will choose the best mode for the model.
589
633
  - 'tool': A tool with the JSON schema as parameters is is provided and the provider is instructed to use it.
@@ -748,7 +792,7 @@ If set and supported by the model, calls will generate deterministic results.
748
792
  @return
749
793
  A result object for accessing the partial object stream and additional information.
750
794
  */
751
- declare function streamObject<T>({ model, schema, mode, system, prompt, messages, maxRetries, abortSignal, headers, onFinish, ...settings }: Omit<CallSettings, 'stopSequences'> & Prompt & {
795
+ declare function streamObject<T>({ model, schema: inputSchema, mode, system, prompt, messages, maxRetries, abortSignal, headers, experimental_telemetry: telemetry, onFinish, ...settings }: Omit<CallSettings, 'stopSequences'> & Prompt & {
752
796
  /**
753
797
  The language model to use.
754
798
  */
@@ -756,11 +800,11 @@ The language model to use.
756
800
  /**
757
801
  The schema of the object that the model should generate.
758
802
  */
759
- schema: z.Schema<T>;
803
+ schema: z.Schema<T> | Schema<T>;
760
804
  /**
761
805
  The mode to use for object generation.
762
806
 
763
- The Zod schema is converted in a JSON schema and used in one of the following ways
807
+ The schema is converted in a JSON schema and used in one of the following ways
764
808
 
765
809
  - 'auto': The provider will choose the best mode for the model.
766
810
  - 'tool': A tool with the JSON schema as parameters is is provided and the provider is instructed to use it.
@@ -772,6 +816,10 @@ Default and recommended: 'auto' (best mode for the model).
772
816
  */
773
817
  mode?: 'auto' | 'json' | 'tool';
774
818
  /**
819
+ Optional telemetry configuration (experimental).
820
+ */
821
+ experimental_telemetry?: TelemetrySettings;
822
+ /**
775
823
  Callback that is called when the LLM response and the final object validation are finished.
776
824
  */
777
825
  onFinish?: (event: {
@@ -808,12 +856,14 @@ declare class DefaultStreamObjectResult<T> implements StreamObjectResult<T> {
808
856
  readonly warnings: StreamObjectResult<T>['warnings'];
809
857
  readonly usage: StreamObjectResult<T>['usage'];
810
858
  readonly rawResponse: StreamObjectResult<T>['rawResponse'];
811
- constructor({ stream, warnings, rawResponse, schema, onFinish, }: {
859
+ constructor({ stream, warnings, rawResponse, schema, onFinish, rootSpan, doStreamSpan, }: {
812
860
  stream: ReadableStream<string | Omit<LanguageModelV1StreamPart, 'text-delta'>>;
813
861
  warnings: StreamObjectResult<T>['warnings'];
814
862
  rawResponse?: StreamObjectResult<T>['rawResponse'];
815
- schema: z.Schema<T>;
863
+ schema: z.Schema<T> | Schema<T>;
816
864
  onFinish: Parameters<typeof streamObject<T>>[0]['onFinish'];
865
+ rootSpan: Span;
866
+ doStreamSpan: Span;
817
867
  });
818
868
  get object(): Promise<T>;
819
869
  get partialObjectStream(): AsyncIterableStream<DeepPartial<T>>;
@@ -830,13 +880,15 @@ declare class DefaultStreamObjectResult<T> implements StreamObjectResult<T> {
830
880
  */
831
881
  declare const experimental_streamObject: typeof streamObject;
832
882
 
883
+ type Parameters$1 = z.ZodTypeAny | Schema<any>;
884
+ type inferParameters<PARAMETERS extends Parameters$1> = PARAMETERS extends Schema<any> ? PARAMETERS['_type'] : PARAMETERS extends z.ZodTypeAny ? z.infer<PARAMETERS> : never;
833
885
  /**
834
886
  A tool contains the description and the schema of the input that the tool expects.
835
887
  This enables the language model to generate the input.
836
888
 
837
889
  The tool can also contain an optional execute function for the actual execution function of the tool.
838
890
  */
839
- interface CoreTool<PARAMETERS extends z.ZodTypeAny = any, RESULT = any> {
891
+ interface CoreTool<PARAMETERS extends Parameters$1 = any, RESULT = any> {
840
892
  /**
841
893
  An optional description of what the tool does. Will be used by the language model to decide whether to use the tool.
842
894
  */
@@ -851,17 +903,17 @@ interface CoreTool<PARAMETERS extends z.ZodTypeAny = any, RESULT = any> {
851
903
  An async function that is called with the arguments from the tool call and produces a result.
852
904
  If not provided, the tool will not be executed automatically.
853
905
  */
854
- execute?: (args: z.infer<PARAMETERS>) => PromiseLike<RESULT>;
906
+ execute?: (args: inferParameters<PARAMETERS>) => PromiseLike<RESULT>;
855
907
  }
856
908
  /**
857
909
  Helper function for inferring the execute args of a tool.
858
910
  */
859
- declare function tool<PARAMETERS extends z.ZodTypeAny, RESULT>(tool: CoreTool<PARAMETERS, RESULT> & {
860
- execute: (args: z.infer<PARAMETERS>) => PromiseLike<RESULT>;
911
+ declare function tool<PARAMETERS extends Parameters$1, RESULT>(tool: CoreTool<PARAMETERS, RESULT> & {
912
+ execute: (args: inferParameters<PARAMETERS>) => PromiseLike<RESULT>;
861
913
  }): CoreTool<PARAMETERS, RESULT> & {
862
- execute: (args: z.infer<PARAMETERS>) => PromiseLike<RESULT>;
914
+ execute: (args: inferParameters<PARAMETERS>) => PromiseLike<RESULT>;
863
915
  };
864
- declare function tool<PARAMETERS extends z.ZodTypeAny, RESULT>(tool: CoreTool<PARAMETERS, RESULT> & {
916
+ declare function tool<PARAMETERS extends Parameters$1, RESULT>(tool: CoreTool<PARAMETERS, RESULT> & {
865
917
  execute?: undefined;
866
918
  }): CoreTool<PARAMETERS, RESULT> & {
867
919
  execute: undefined;
@@ -948,7 +1000,7 @@ type ToToolResultObject<TOOLS extends Record<string, CoreTool>> = ValueOf<{
948
1000
  type: 'tool-result';
949
1001
  toolCallId: string;
950
1002
  toolName: NAME & string;
951
- args: z.infer<TOOLS[NAME]['parameters']>;
1003
+ args: inferParameters<TOOLS[NAME]['parameters']>;
952
1004
  result: Awaited<ReturnType<Exclude<TOOLS[NAME]['execute'], undefined>>>;
953
1005
  };
954
1006
  }>;
@@ -986,7 +1038,7 @@ type ToToolCall<TOOLS extends Record<string, CoreTool>> = ValueOf<{
986
1038
  type: 'tool-call';
987
1039
  toolCallId: string;
988
1040
  toolName: NAME & string;
989
- args: z.infer<TOOLS[NAME]['parameters']>;
1041
+ args: inferParameters<TOOLS[NAME]['parameters']>;
990
1042
  };
991
1043
  }>;
992
1044
  type ToToolCallArray<TOOLS extends Record<string, CoreTool>> = Array<ToToolCall<TOOLS>>;
@@ -2312,4 +2364,4 @@ declare const generateId: (size?: number | undefined) => string;
2312
2364
  */
2313
2365
  declare const nanoid: (size?: number | undefined) => string;
2314
2366
 
2315
- export { AIStream, AIStreamCallbacksAndOptions, AIStreamParser, AIStreamParserOptions, AWSBedrockAnthropicMessagesStream, AWSBedrockAnthropicStream, AWSBedrockCohereStream, AWSBedrockLlama2Stream, AWSBedrockStream, AnthropicStream, AssistantContent, AssistantResponse, CallWarning, CohereStream, CompletionTokenUsage, CompletionUsage, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreTool, CoreToolChoice, CoreToolMessage, CoreUserMessage, DataContent, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingTokenUsage, ExperimentalAssistantMessage, ExperimentalMessage, ExperimentalTool, ExperimentalToolMessage, ExperimentalUserMessage, FinishReason, FunctionCallPayload, GenerateObjectResult, GenerateTextResult, GoogleGenerativeAIStream, HuggingFaceStream, ImagePart, InkeepAIStreamCallbacksAndOptions, InkeepChatResultCallbacks, InkeepOnFinalMetadata, InkeepStream, InvalidMessageRoleError, InvalidModelIdError, langchainAdapter as LangChainAdapter, LangChainStream, LanguageModel, LogProbs, MistralStream, NoSuchModelError, NoSuchProviderError, ObjectStreamInputPart, ObjectStreamPart, OpenAIStream, OpenAIStreamCallbacks, ReplicateStream, StreamData, StreamObjectResult, StreamTextResult, StreamingTextResponse, TextPart$1 as TextPart, TextStreamPart, TokenUsage, ToolCallPart, ToolCallPayload, ToolContent, ToolResultPart, UserContent, convertDataContentToBase64String, convertDataContentToUint8Array, convertToCoreMessages, convertUint8ArrayToText, cosineSimilarity, createCallbacksTransformer, createEventStreamTransformer, createStreamDataTransformer, embed, embedMany, experimental_AssistantResponse, experimental_ModelRegistry, experimental_ProviderRegistry, experimental_StreamData, experimental_createModelRegistry, experimental_createProviderRegistry, experimental_generateObject, experimental_generateText, experimental_streamObject, experimental_streamText, generateId, generateObject, generateText, nanoid, readableFromAsyncIterable, streamObject, streamText, streamToResponse, tool, trimStartOfStreamHelper };
2367
+ export { AIStream, AIStreamCallbacksAndOptions, AIStreamParser, AIStreamParserOptions, AWSBedrockAnthropicMessagesStream, AWSBedrockAnthropicStream, AWSBedrockCohereStream, AWSBedrockLlama2Stream, AWSBedrockStream, AnthropicStream, AssistantContent, AssistantResponse, CallWarning, CohereStream, CompletionTokenUsage, CompletionUsage, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreTool, CoreToolChoice, CoreToolMessage, CoreUserMessage, DataContent, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingTokenUsage, ExperimentalAssistantMessage, ExperimentalMessage, ExperimentalTool, ExperimentalToolMessage, ExperimentalUserMessage, FinishReason, FunctionCallPayload, GenerateObjectResult, GenerateTextResult, GoogleGenerativeAIStream, HuggingFaceStream, ImagePart, InkeepAIStreamCallbacksAndOptions, InkeepChatResultCallbacks, InkeepOnFinalMetadata, InkeepStream, InvalidMessageRoleError, InvalidModelIdError, langchainAdapter as LangChainAdapter, LangChainStream, LanguageModel, LogProbs, MistralStream, NoSuchModelError, NoSuchProviderError, ObjectStreamInputPart, ObjectStreamPart, OpenAIStream, OpenAIStreamCallbacks, ReplicateStream, StreamData, StreamObjectResult, StreamTextResult, StreamingTextResponse, TextPart$1 as TextPart, TextStreamPart, TokenUsage, ToolCallPart, ToolCallPayload, ToolContent, ToolResultPart, UserContent, convertDataContentToBase64String, convertDataContentToUint8Array, convertToCoreMessages, convertUint8ArrayToText, cosineSimilarity, createCallbacksTransformer, createEventStreamTransformer, createStreamDataTransformer, embed, embedMany, experimental_AssistantResponse, experimental_ModelRegistry, experimental_ProviderRegistry, experimental_StreamData, experimental_createModelRegistry, experimental_createProviderRegistry, experimental_generateObject, experimental_generateText, experimental_streamObject, experimental_streamText, generateId, generateObject, generateText, jsonSchema, nanoid, readableFromAsyncIterable, streamObject, streamText, streamToResponse, tool, trimStartOfStreamHelper };