ai 3.3.15 → 3.3.16
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.d.mts +120 -105
- package/dist/index.d.ts +120 -105
- package/dist/index.js +108 -22
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +108 -22
- package/dist/index.mjs.map +1 -1
- package/package.json +7 -7
package/dist/index.d.ts
CHANGED
@@ -1,7 +1,7 @@
|
|
1
|
-
import { Schema, DeepPartial, Attachment, JSONValue, CreateMessage, FunctionCall as FunctionCall$1, AssistantMessage, DataMessage } from '@ai-sdk/ui-utils';
|
1
|
+
import { Schema, DeepPartial, Attachment, JSONValue as JSONValue$1, CreateMessage, FunctionCall as FunctionCall$1, AssistantMessage, DataMessage } from '@ai-sdk/ui-utils';
|
2
2
|
export { AssistantMessage, AssistantStatus, ChatRequest, ChatRequestOptions, CreateMessage, DataMessage, DeepPartial, Function, FunctionCall, FunctionCallHandler, IdGenerator, JSONValue, Message, RequestOptions, Schema, StreamPart, Tool, ToolCall, ToolCallHandler, ToolChoice, ToolInvocation, UseAssistantOptions, formatStreamPart, jsonSchema, parseComplexResponse, parseStreamPart, readDataStream } from '@ai-sdk/ui-utils';
|
3
3
|
import { AttributeValue, Span } from '@opentelemetry/api';
|
4
|
-
import { EmbeddingModelV1, EmbeddingModelV1Embedding, LanguageModelV1, LanguageModelV1FinishReason, LanguageModelV1LogProbs, LanguageModelV1CallWarning, LanguageModelV1ProviderMetadata, LanguageModelV1StreamPart, AISDKError } from '@ai-sdk/provider';
|
4
|
+
import { EmbeddingModelV1, EmbeddingModelV1Embedding, LanguageModelV1, LanguageModelV1FinishReason, LanguageModelV1LogProbs, LanguageModelV1CallWarning, LanguageModelV1ProviderMetadata, JSONValue, LanguageModelV1StreamPart, AISDKError } from '@ai-sdk/provider';
|
5
5
|
export { AISDKError, APICallError, EmptyResponseBodyError, InvalidPromptError, InvalidResponseDataError, JSONParseError, LoadAPIKeyError, TypeValidationError, UnsupportedFunctionalityError } from '@ai-sdk/provider';
|
6
6
|
import { z } from 'zod';
|
7
7
|
import { ServerResponse } from 'http';
|
@@ -592,63 +592,30 @@ Generate a structured, typed object for a given prompt and schema using a langua
|
|
592
592
|
|
593
593
|
This function does not stream the output. If you want to stream the output, use `streamObject` instead.
|
594
594
|
|
595
|
-
@param model - The language model to use.
|
596
|
-
|
597
|
-
@param schema - The schema of the object that the model should generate.
|
598
|
-
@param schemaName - Optional name of the output that should be generated. Used by some providers for additional LLM guidance, e.g. via tool or schema name.
|
599
|
-
@param schemaDescription - Optional description of the output that should be generated. Used by some providers for additional LLM guidance, e.g. via tool or schema description.
|
600
|
-
@param mode - The mode to use for object generation. Not all models support all modes. Defaults to 'auto'.
|
601
|
-
|
602
|
-
@param system - A system message that will be part of the prompt.
|
603
|
-
@param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
|
604
|
-
@param messages - A list of messages. You can either use `prompt` or `messages` but not both.
|
605
|
-
|
606
|
-
@param maxTokens - Maximum number of tokens to generate.
|
607
|
-
@param temperature - Temperature setting.
|
608
|
-
The value is passed through to the provider. The range depends on the provider and model.
|
609
|
-
It is recommended to set either `temperature` or `topP`, but not both.
|
610
|
-
@param topP - Nucleus sampling.
|
611
|
-
The value is passed through to the provider. The range depends on the provider and model.
|
612
|
-
It is recommended to set either `temperature` or `topP`, but not both.
|
613
|
-
@param topK - Only sample from the top K options for each subsequent token.
|
614
|
-
Used to remove "long tail" low probability responses.
|
615
|
-
Recommended for advanced use cases only. You usually only need to use temperature.
|
616
|
-
@param presencePenalty - Presence penalty setting.
|
617
|
-
It affects the likelihood of the model to repeat information that is already in the prompt.
|
618
|
-
The value is passed through to the provider. The range depends on the provider and model.
|
619
|
-
@param frequencyPenalty - Frequency penalty setting.
|
620
|
-
It affects the likelihood of the model to repeatedly use the same words or phrases.
|
621
|
-
The value is passed through to the provider. The range depends on the provider and model.
|
622
|
-
@param seed - The seed (integer) to use for random sampling.
|
623
|
-
If set and supported by the model, calls will generate deterministic results.
|
624
|
-
|
625
|
-
@param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
|
626
|
-
@param abortSignal - An optional abort signal that can be used to cancel the call.
|
627
|
-
@param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
|
628
|
-
|
629
595
|
@returns
|
630
596
|
A result object that contains the generated object, the finish reason, the token usage, and additional information.
|
631
597
|
*/
|
632
|
-
declare function generateObject<T>(
|
598
|
+
declare function generateObject<T>(options: Omit<CallSettings, 'stopSequences'> & Prompt & {
|
599
|
+
output?: 'object' | undefined;
|
633
600
|
/**
|
634
601
|
The language model to use.
|
635
|
-
|
602
|
+
*/
|
636
603
|
model: LanguageModel;
|
637
604
|
/**
|
638
605
|
The schema of the object that the model should generate.
|
639
|
-
|
606
|
+
*/
|
640
607
|
schema: z.Schema<T, z.ZodTypeDef, any> | Schema<T>;
|
641
608
|
/**
|
642
609
|
Optional name of the output that should be generated.
|
643
610
|
Used by some providers for additional LLM guidance, e.g.
|
644
611
|
via tool or schema name.
|
645
|
-
|
612
|
+
*/
|
646
613
|
schemaName?: string;
|
647
614
|
/**
|
648
615
|
Optional description of the output that should be generated.
|
649
616
|
Used by some providers for additional LLM guidance, e.g.
|
650
617
|
via tool or schema description.
|
651
|
-
|
618
|
+
*/
|
652
619
|
schemaDescription?: string;
|
653
620
|
/**
|
654
621
|
The mode to use for object generation.
|
@@ -662,13 +629,36 @@ The schema is converted in a JSON schema and used in one of the following ways
|
|
662
629
|
Please note that most providers do not support all modes.
|
663
630
|
|
664
631
|
Default and recommended: 'auto' (best mode for the model).
|
665
|
-
|
632
|
+
*/
|
666
633
|
mode?: 'auto' | 'json' | 'tool';
|
667
634
|
/**
|
668
|
-
|
635
|
+
Optional telemetry configuration (experimental).
|
669
636
|
*/
|
670
637
|
experimental_telemetry?: TelemetrySettings;
|
671
638
|
}): Promise<DefaultGenerateObjectResult<T>>;
|
639
|
+
/**
|
640
|
+
Generate JSON with any schema for a given prompt using a language model.
|
641
|
+
|
642
|
+
This function does not stream the output. If you want to stream the output, use `streamObject` instead.
|
643
|
+
|
644
|
+
@returns
|
645
|
+
A result object that contains the generated object, the finish reason, the token usage, and additional information.
|
646
|
+
*/
|
647
|
+
declare function generateObject(options: Omit<CallSettings, 'stopSequences'> & Prompt & {
|
648
|
+
output: 'no-schema';
|
649
|
+
/**
|
650
|
+
The language model to use.
|
651
|
+
*/
|
652
|
+
model: LanguageModel;
|
653
|
+
/**
|
654
|
+
The mode to use for object generation. Must be "json" for no-schema output.
|
655
|
+
*/
|
656
|
+
mode?: 'json';
|
657
|
+
/**
|
658
|
+
Optional telemetry configuration (experimental).
|
659
|
+
*/
|
660
|
+
experimental_telemetry?: TelemetrySettings;
|
661
|
+
}): Promise<DefaultGenerateObjectResult<JSONValue>>;
|
672
662
|
declare class DefaultGenerateObjectResult<T> implements GenerateObjectResult<T> {
|
673
663
|
readonly object: GenerateObjectResult<T>['object'];
|
674
664
|
readonly finishReason: GenerateObjectResult<T>['finishReason'];
|
@@ -792,63 +782,29 @@ Generate a structured, typed object for a given prompt and schema using a langua
|
|
792
782
|
|
793
783
|
This function streams the output. If you do not want to stream the output, use `generateObject` instead.
|
794
784
|
|
795
|
-
@param model - The language model to use.
|
796
|
-
|
797
|
-
@param schema - The schema of the object that the model should generate.
|
798
|
-
@param schemaName - Optional name of the output that should be generated. Used by some providers for additional LLM guidance, e.g. via tool or schema name.
|
799
|
-
@param schemaDescription - Optional description of the output that should be generated. Used by some providers for additional LLM guidance, e.g. via tool or schema description.
|
800
|
-
@param mode - The mode to use for object generation. Not all models support all modes. Defaults to 'auto'.
|
801
|
-
|
802
|
-
@param system - A system message that will be part of the prompt.
|
803
|
-
@param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
|
804
|
-
@param messages - A list of messages. You can either use `prompt` or `messages` but not both.
|
805
|
-
|
806
|
-
@param maxTokens - Maximum number of tokens to generate.
|
807
|
-
@param temperature - Temperature setting.
|
808
|
-
The value is passed through to the provider. The range depends on the provider and model.
|
809
|
-
It is recommended to set either `temperature` or `topP`, but not both.
|
810
|
-
@param topP - Nucleus sampling.
|
811
|
-
The value is passed through to the provider. The range depends on the provider and model.
|
812
|
-
It is recommended to set either `temperature` or `topP`, but not both.
|
813
|
-
@param topK - Only sample from the top K options for each subsequent token.
|
814
|
-
Used to remove "long tail" low probability responses.
|
815
|
-
Recommended for advanced use cases only. You usually only need to use temperature.
|
816
|
-
@param presencePenalty - Presence penalty setting.
|
817
|
-
It affects the likelihood of the model to repeat information that is already in the prompt.
|
818
|
-
The value is passed through to the provider. The range depends on the provider and model.
|
819
|
-
@param frequencyPenalty - Frequency penalty setting.
|
820
|
-
It affects the likelihood of the model to repeatedly use the same words or phrases.
|
821
|
-
The value is passed through to the provider. The range depends on the provider and model.
|
822
|
-
@param seed - The seed (integer) to use for random sampling.
|
823
|
-
If set and supported by the model, calls will generate deterministic results.
|
824
|
-
|
825
|
-
@param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
|
826
|
-
@param abortSignal - An optional abort signal that can be used to cancel the call.
|
827
|
-
@param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
|
828
|
-
|
829
785
|
@return
|
830
786
|
A result object for accessing the partial object stream and additional information.
|
831
787
|
*/
|
832
|
-
declare function streamObject<T>(
|
788
|
+
declare function streamObject<T>(options: Omit<CallSettings, 'stopSequences'> & Prompt & {
|
833
789
|
/**
|
834
790
|
The language model to use.
|
835
|
-
|
791
|
+
*/
|
836
792
|
model: LanguageModel;
|
837
793
|
/**
|
838
794
|
The schema of the object that the model should generate.
|
839
|
-
|
795
|
+
*/
|
840
796
|
schema: z.Schema<T, z.ZodTypeDef, any> | Schema<T>;
|
841
797
|
/**
|
842
798
|
Optional name of the output that should be generated.
|
843
799
|
Used by some providers for additional LLM guidance, e.g.
|
844
800
|
via tool or schema name.
|
845
|
-
|
801
|
+
*/
|
846
802
|
schemaName?: string;
|
847
803
|
/**
|
848
804
|
Optional description of the output that should be generated.
|
849
805
|
Used by some providers for additional LLM guidance, e.g.
|
850
806
|
via tool or schema description.
|
851
|
-
|
807
|
+
*/
|
852
808
|
schemaDescription?: string;
|
853
809
|
/**
|
854
810
|
The mode to use for object generation.
|
@@ -862,49 +818,108 @@ The schema is converted in a JSON schema and used in one of the following ways
|
|
862
818
|
Please note that most providers do not support all modes.
|
863
819
|
|
864
820
|
Default and recommended: 'auto' (best mode for the model).
|
865
|
-
|
821
|
+
*/
|
866
822
|
mode?: 'auto' | 'json' | 'tool';
|
867
823
|
/**
|
868
824
|
Optional telemetry configuration (experimental).
|
869
|
-
|
825
|
+
*/
|
870
826
|
experimental_telemetry?: TelemetrySettings;
|
871
827
|
/**
|
872
828
|
Callback that is called when the LLM response and the final object validation are finished.
|
873
|
-
|
829
|
+
*/
|
874
830
|
onFinish?: (event: {
|
875
831
|
/**
|
876
|
-
|
877
|
-
|
832
|
+
The token usage of the generated response.
|
833
|
+
*/
|
878
834
|
usage: CompletionTokenUsage$1;
|
879
835
|
/**
|
880
|
-
|
881
|
-
|
836
|
+
The generated object (typed according to the schema). Can be undefined if the final object does not match the schema.
|
837
|
+
*/
|
882
838
|
object: T | undefined;
|
883
839
|
/**
|
884
|
-
|
885
|
-
|
840
|
+
Optional error object. This is e.g. a TypeValidationError when the final object does not match the schema.
|
841
|
+
*/
|
886
842
|
error: unknown | undefined;
|
887
843
|
/**
|
888
|
-
|
889
|
-
|
844
|
+
Optional raw response data.
|
845
|
+
*/
|
890
846
|
rawResponse?: {
|
891
847
|
/**
|
892
|
-
|
893
|
-
|
848
|
+
Response headers.
|
849
|
+
*/
|
894
850
|
headers?: Record<string, string>;
|
895
851
|
};
|
896
852
|
/**
|
897
|
-
|
898
|
-
|
853
|
+
Warnings from the model provider (e.g. unsupported settings).
|
854
|
+
*/
|
899
855
|
warnings?: CallWarning[];
|
900
856
|
/**
|
901
|
-
|
902
|
-
|
903
|
-
|
904
|
-
|
857
|
+
Additional provider-specific metadata. They are passed through
|
858
|
+
from the provider to the AI SDK and enable provider-specific
|
859
|
+
results that can be fully encapsulated in the provider.
|
860
|
+
*/
|
905
861
|
experimental_providerMetadata: ProviderMetadata | undefined;
|
906
862
|
}) => Promise<void> | void;
|
907
863
|
}): Promise<DefaultStreamObjectResult<T>>;
|
864
|
+
/**
|
865
|
+
Generate JSON with any schema for a given prompt using a language model.
|
866
|
+
|
867
|
+
This function streams the output. If you do not want to stream the output, use `generateObject` instead.
|
868
|
+
|
869
|
+
@return
|
870
|
+
A result object for accessing the partial object stream and additional information.
|
871
|
+
*/
|
872
|
+
declare function streamObject(options: Omit<CallSettings, 'stopSequences'> & Prompt & {
|
873
|
+
output: 'no-schema';
|
874
|
+
/**
|
875
|
+
The language model to use.
|
876
|
+
*/
|
877
|
+
model: LanguageModel;
|
878
|
+
/**
|
879
|
+
The mode to use for object generation. Must be "json" for no-schema output.
|
880
|
+
*/
|
881
|
+
mode?: 'json';
|
882
|
+
/**
|
883
|
+
Optional telemetry configuration (experimental).
|
884
|
+
*/
|
885
|
+
experimental_telemetry?: TelemetrySettings;
|
886
|
+
/**
|
887
|
+
Callback that is called when the LLM response and the final object validation are finished.
|
888
|
+
*/
|
889
|
+
onFinish?: (event: {
|
890
|
+
/**
|
891
|
+
The token usage of the generated response.
|
892
|
+
*/
|
893
|
+
usage: CompletionTokenUsage$1;
|
894
|
+
/**
|
895
|
+
The generated object (typed according to the schema). Can be undefined if the final object does not match the schema.
|
896
|
+
*/
|
897
|
+
object: JSONValue | undefined;
|
898
|
+
/**
|
899
|
+
Optional error object. This is e.g. a TypeValidationError when the final object does not match the schema.
|
900
|
+
*/
|
901
|
+
error: unknown | undefined;
|
902
|
+
/**
|
903
|
+
Optional raw response data.
|
904
|
+
*/
|
905
|
+
rawResponse?: {
|
906
|
+
/**
|
907
|
+
Response headers.
|
908
|
+
*/
|
909
|
+
headers?: Record<string, string>;
|
910
|
+
};
|
911
|
+
/**
|
912
|
+
Warnings from the model provider (e.g. unsupported settings).
|
913
|
+
*/
|
914
|
+
warnings?: CallWarning[];
|
915
|
+
/**
|
916
|
+
Additional provider-specific metadata. They are passed through
|
917
|
+
from the provider to the AI SDK and enable provider-specific
|
918
|
+
results that can be fully encapsulated in the provider.
|
919
|
+
*/
|
920
|
+
experimental_providerMetadata: ProviderMetadata | undefined;
|
921
|
+
}) => Promise<void> | void;
|
922
|
+
}): Promise<DefaultStreamObjectResult<JSONValue>>;
|
908
923
|
declare class DefaultStreamObjectResult<T> implements StreamObjectResult<T> {
|
909
924
|
private readonly originalStream;
|
910
925
|
private readonly objectPromise;
|
@@ -2083,7 +2098,7 @@ type OpenAIStreamCallbacks = AIStreamCallbacksAndOptions & {
|
|
2083
2098
|
* })
|
2084
2099
|
* ```
|
2085
2100
|
*/
|
2086
|
-
experimental_onFunctionCall?: (functionCallPayload: FunctionCallPayload, createFunctionCallMessages: (functionCallResult: JSONValue) => CreateMessage[]) => Promise<Response | undefined | void | string | AsyncIterableOpenAIStreamReturnTypes>;
|
2101
|
+
experimental_onFunctionCall?: (functionCallPayload: FunctionCallPayload, createFunctionCallMessages: (functionCallResult: JSONValue$1) => CreateMessage[]) => Promise<Response | undefined | void | string | AsyncIterableOpenAIStreamReturnTypes>;
|
2087
2102
|
/**
|
2088
2103
|
* @example
|
2089
2104
|
* ```js
|
@@ -2122,7 +2137,7 @@ type OpenAIStreamCallbacks = AIStreamCallbacksAndOptions & {
|
|
2122
2137
|
experimental_onToolCall?: (toolCallPayload: ToolCallPayload, appendToolCallMessage: (result?: {
|
2123
2138
|
tool_call_id: string;
|
2124
2139
|
function_name: string;
|
2125
|
-
tool_call_result: JSONValue;
|
2140
|
+
tool_call_result: JSONValue$1;
|
2126
2141
|
}) => CreateMessage[]) => Promise<Response | undefined | void | string | AsyncIterableOpenAIStreamReturnTypes>;
|
2127
2142
|
};
|
2128
2143
|
interface ChatCompletionChunk {
|
@@ -2583,8 +2598,8 @@ declare class StreamData {
|
|
2583
2598
|
private warningTimeout;
|
2584
2599
|
constructor();
|
2585
2600
|
close(): Promise<void>;
|
2586
|
-
append(value: JSONValue): void;
|
2587
|
-
appendMessageAnnotation(value: JSONValue): void;
|
2601
|
+
append(value: JSONValue$1): void;
|
2602
|
+
appendMessageAnnotation(value: JSONValue$1): void;
|
2588
2603
|
}
|
2589
2604
|
/**
|
2590
2605
|
* A TransformStream for LLMs that do not have their own transform stream handlers managing encoding (e.g. OpenAIStream has one for function call handling).
|
package/dist/index.js
CHANGED
@@ -1411,21 +1411,22 @@ function prepareResponseHeaders(init, {
|
|
1411
1411
|
return headers;
|
1412
1412
|
}
|
1413
1413
|
|
1414
|
-
// core/generate-object/inject-json-
|
1414
|
+
// core/generate-object/inject-json-instruction.ts
|
1415
1415
|
var DEFAULT_SCHEMA_PREFIX = "JSON schema:";
|
1416
1416
|
var DEFAULT_SCHEMA_SUFFIX = "You MUST answer with a JSON object that matches the JSON schema above.";
|
1417
|
-
|
1418
|
-
|
1417
|
+
var DEFAULT_GENERIC_SUFFIX = "You MUST answer with JSON.";
|
1418
|
+
function injectJsonInstruction({
|
1419
|
+
prompt,
|
1419
1420
|
schema,
|
1420
|
-
schemaPrefix = DEFAULT_SCHEMA_PREFIX,
|
1421
|
-
schemaSuffix = DEFAULT_SCHEMA_SUFFIX
|
1421
|
+
schemaPrefix = schema != null ? DEFAULT_SCHEMA_PREFIX : void 0,
|
1422
|
+
schemaSuffix = schema != null ? DEFAULT_SCHEMA_SUFFIX : DEFAULT_GENERIC_SUFFIX
|
1422
1423
|
}) {
|
1423
1424
|
return [
|
1424
|
-
|
1425
|
-
|
1426
|
-
// add a newline if
|
1425
|
+
prompt != null && prompt.length > 0 ? prompt : void 0,
|
1426
|
+
prompt != null && prompt.length > 0 ? "" : void 0,
|
1427
|
+
// add a newline if prompt is not null
|
1427
1428
|
schemaPrefix,
|
1428
|
-
JSON.stringify(schema),
|
1429
|
+
schema != null ? JSON.stringify(schema) : void 0,
|
1429
1430
|
schemaSuffix
|
1430
1431
|
].filter((line) => line != null).join("\n");
|
1431
1432
|
}
|
@@ -1465,6 +1466,62 @@ var NoObjectGeneratedError = class extends import_provider8.AISDKError {
|
|
1465
1466
|
};
|
1466
1467
|
_a6 = symbol6;
|
1467
1468
|
|
1469
|
+
// core/generate-object/validate-object-generation-input.ts
|
1470
|
+
function validateObjectGenerationInput({
|
1471
|
+
output,
|
1472
|
+
mode,
|
1473
|
+
schema,
|
1474
|
+
schemaName,
|
1475
|
+
schemaDescription
|
1476
|
+
}) {
|
1477
|
+
if (output != null && output !== "object" && output !== "no-schema") {
|
1478
|
+
throw new InvalidArgumentError({
|
1479
|
+
parameter: "output",
|
1480
|
+
value: output,
|
1481
|
+
message: "Invalid output type."
|
1482
|
+
});
|
1483
|
+
}
|
1484
|
+
if (output === "no-schema") {
|
1485
|
+
if (mode === "auto" || mode === "tool") {
|
1486
|
+
throw new InvalidArgumentError({
|
1487
|
+
parameter: "mode",
|
1488
|
+
value: mode,
|
1489
|
+
message: 'Mode must be "json" for no-schema output.'
|
1490
|
+
});
|
1491
|
+
}
|
1492
|
+
if (schema != null) {
|
1493
|
+
throw new InvalidArgumentError({
|
1494
|
+
parameter: "schema",
|
1495
|
+
value: schema,
|
1496
|
+
message: "Schema is not supported for no-schema output."
|
1497
|
+
});
|
1498
|
+
}
|
1499
|
+
if (schemaDescription != null) {
|
1500
|
+
throw new InvalidArgumentError({
|
1501
|
+
parameter: "schemaDescription",
|
1502
|
+
value: schemaDescription,
|
1503
|
+
message: "Schema description is not supported for no-schema output."
|
1504
|
+
});
|
1505
|
+
}
|
1506
|
+
if (schemaName != null) {
|
1507
|
+
throw new InvalidArgumentError({
|
1508
|
+
parameter: "schemaName",
|
1509
|
+
value: schemaName,
|
1510
|
+
message: "Schema name is not supported for no-schema output."
|
1511
|
+
});
|
1512
|
+
}
|
1513
|
+
}
|
1514
|
+
if (output === "object") {
|
1515
|
+
if (schema == null) {
|
1516
|
+
throw new InvalidArgumentError({
|
1517
|
+
parameter: "schema",
|
1518
|
+
value: schema,
|
1519
|
+
message: "Schema is required for object output."
|
1520
|
+
});
|
1521
|
+
}
|
1522
|
+
}
|
1523
|
+
}
|
1524
|
+
|
1468
1525
|
// core/generate-object/generate-object.ts
|
1469
1526
|
async function generateObject({
|
1470
1527
|
model,
|
@@ -1472,6 +1529,7 @@ async function generateObject({
|
|
1472
1529
|
schemaName,
|
1473
1530
|
schemaDescription,
|
1474
1531
|
mode,
|
1532
|
+
output = "object",
|
1475
1533
|
system,
|
1476
1534
|
prompt,
|
1477
1535
|
messages,
|
@@ -1482,13 +1540,23 @@ async function generateObject({
|
|
1482
1540
|
...settings
|
1483
1541
|
}) {
|
1484
1542
|
var _a12;
|
1543
|
+
validateObjectGenerationInput({
|
1544
|
+
output,
|
1545
|
+
mode,
|
1546
|
+
schema: inputSchema,
|
1547
|
+
schemaName,
|
1548
|
+
schemaDescription
|
1549
|
+
});
|
1550
|
+
if (output === "no-schema" && mode === void 0) {
|
1551
|
+
mode = "json";
|
1552
|
+
}
|
1485
1553
|
const baseTelemetryAttributes = getBaseTelemetryAttributes({
|
1486
1554
|
model,
|
1487
1555
|
telemetry,
|
1488
1556
|
headers,
|
1489
1557
|
settings: { ...settings, maxRetries }
|
1490
1558
|
});
|
1491
|
-
const schema = (0, import_ui_utils.asSchema)(inputSchema);
|
1559
|
+
const schema = inputSchema != null ? (0, import_ui_utils.asSchema)(inputSchema) : void 0;
|
1492
1560
|
const tracer = getTracer({ isEnabled: (_a12 = telemetry == null ? void 0 : telemetry.isEnabled) != null ? _a12 : false });
|
1493
1561
|
return recordSpan({
|
1494
1562
|
name: "ai.generateObject",
|
@@ -1504,11 +1572,10 @@ async function generateObject({
|
|
1504
1572
|
"ai.prompt": {
|
1505
1573
|
input: () => JSON.stringify({ system, prompt, messages })
|
1506
1574
|
},
|
1507
|
-
"ai.schema": {
|
1508
|
-
input: () => JSON.stringify(schema.jsonSchema)
|
1509
|
-
},
|
1575
|
+
"ai.schema": schema != null ? { input: () => JSON.stringify(schema.jsonSchema) } : void 0,
|
1510
1576
|
"ai.schema.name": schemaName,
|
1511
1577
|
"ai.schema.description": schemaDescription,
|
1578
|
+
"ai.settings.output": output,
|
1512
1579
|
"ai.settings.mode": mode
|
1513
1580
|
}
|
1514
1581
|
}),
|
@@ -1528,8 +1595,8 @@ async function generateObject({
|
|
1528
1595
|
switch (mode) {
|
1529
1596
|
case "json": {
|
1530
1597
|
const validatedPrompt = validatePrompt({
|
1531
|
-
system: model.supportsStructuredOutputs ? system :
|
1532
|
-
system,
|
1598
|
+
system: schema == null ? injectJsonInstruction({ prompt: system }) : model.supportsStructuredOutputs && schema != null ? system : injectJsonInstruction({
|
1599
|
+
prompt: system,
|
1533
1600
|
schema: schema.jsonSchema
|
1534
1601
|
}),
|
1535
1602
|
prompt,
|
@@ -1571,7 +1638,7 @@ async function generateObject({
|
|
1571
1638
|
const result2 = await model.doGenerate({
|
1572
1639
|
mode: {
|
1573
1640
|
type: "object-json",
|
1574
|
-
schema: schema.jsonSchema,
|
1641
|
+
schema: schema == null ? void 0 : schema.jsonSchema,
|
1575
1642
|
name: schemaName,
|
1576
1643
|
description: schemaDescription
|
1577
1644
|
},
|
@@ -1710,7 +1777,12 @@ async function generateObject({
|
|
1710
1777
|
throw new Error(`Unsupported mode: ${_exhaustiveCheck}`);
|
1711
1778
|
}
|
1712
1779
|
}
|
1713
|
-
const parseResult = (0, import_provider_utils5.safeParseJSON)({
|
1780
|
+
const parseResult = (0, import_provider_utils5.safeParseJSON)({
|
1781
|
+
text: result,
|
1782
|
+
// type casting required for `undefined` schema (no-schema mode),
|
1783
|
+
// in which case <T> is <JSONValue> as desired.
|
1784
|
+
schema
|
1785
|
+
});
|
1714
1786
|
if (!parseResult.success) {
|
1715
1787
|
throw parseResult.error;
|
1716
1788
|
}
|
@@ -1842,6 +1914,7 @@ async function streamObject({
|
|
1842
1914
|
schemaName,
|
1843
1915
|
schemaDescription,
|
1844
1916
|
mode,
|
1917
|
+
output = "object",
|
1845
1918
|
system,
|
1846
1919
|
prompt,
|
1847
1920
|
messages,
|
@@ -1853,6 +1926,16 @@ async function streamObject({
|
|
1853
1926
|
...settings
|
1854
1927
|
}) {
|
1855
1928
|
var _a12;
|
1929
|
+
validateObjectGenerationInput({
|
1930
|
+
output,
|
1931
|
+
mode,
|
1932
|
+
schema: inputSchema,
|
1933
|
+
schemaName,
|
1934
|
+
schemaDescription
|
1935
|
+
});
|
1936
|
+
if (output === "no-schema" && mode === void 0) {
|
1937
|
+
mode = "json";
|
1938
|
+
}
|
1856
1939
|
const baseTelemetryAttributes = getBaseTelemetryAttributes({
|
1857
1940
|
model,
|
1858
1941
|
telemetry,
|
@@ -1861,7 +1944,7 @@ async function streamObject({
|
|
1861
1944
|
});
|
1862
1945
|
const tracer = getTracer({ isEnabled: (_a12 = telemetry == null ? void 0 : telemetry.isEnabled) != null ? _a12 : false });
|
1863
1946
|
const retry = retryWithExponentialBackoff({ maxRetries });
|
1864
|
-
const schema = (0, import_ui_utils2.asSchema)(inputSchema);
|
1947
|
+
const schema = inputSchema != null ? (0, import_ui_utils2.asSchema)(inputSchema) : void 0;
|
1865
1948
|
return recordSpan({
|
1866
1949
|
name: "ai.streamObject",
|
1867
1950
|
attributes: selectTelemetryAttributes({
|
@@ -1876,9 +1959,10 @@ async function streamObject({
|
|
1876
1959
|
"ai.prompt": {
|
1877
1960
|
input: () => JSON.stringify({ system, prompt, messages })
|
1878
1961
|
},
|
1879
|
-
"ai.schema": { input: () => JSON.stringify(schema.jsonSchema) },
|
1962
|
+
"ai.schema": schema != null ? { input: () => JSON.stringify(schema.jsonSchema) } : void 0,
|
1880
1963
|
"ai.schema.name": schemaName,
|
1881
1964
|
"ai.schema.description": schemaDescription,
|
1965
|
+
"ai.settings.output": output,
|
1882
1966
|
"ai.settings.mode": mode
|
1883
1967
|
}
|
1884
1968
|
}),
|
@@ -1893,8 +1977,8 @@ async function streamObject({
|
|
1893
1977
|
switch (mode) {
|
1894
1978
|
case "json": {
|
1895
1979
|
const validatedPrompt = validatePrompt({
|
1896
|
-
system: model.supportsStructuredOutputs ? system :
|
1897
|
-
system,
|
1980
|
+
system: schema == null ? injectJsonInstruction({ prompt: system }) : model.supportsStructuredOutputs && schema != null ? system : injectJsonInstruction({
|
1981
|
+
prompt: system,
|
1898
1982
|
schema: schema.jsonSchema
|
1899
1983
|
}),
|
1900
1984
|
prompt,
|
@@ -1903,7 +1987,7 @@ async function streamObject({
|
|
1903
1987
|
callOptions = {
|
1904
1988
|
mode: {
|
1905
1989
|
type: "object-json",
|
1906
|
-
schema: schema.jsonSchema,
|
1990
|
+
schema: schema == null ? void 0 : schema.jsonSchema,
|
1907
1991
|
name: schemaName,
|
1908
1992
|
description: schemaDescription
|
1909
1993
|
},
|
@@ -2025,6 +2109,8 @@ async function streamObject({
|
|
2025
2109
|
stream: stream.pipeThrough(new TransformStream(transformer)),
|
2026
2110
|
warnings,
|
2027
2111
|
rawResponse,
|
2112
|
+
// type casting required for `undefined` schema (no-schema mode),
|
2113
|
+
// in which case <T> is <JSONValue> as desired.
|
2028
2114
|
schema,
|
2029
2115
|
onFinish,
|
2030
2116
|
rootSpan,
|