ai 4.1.41 → 4.1.43

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,21 @@
1
1
  # ai
2
2
 
3
+ ## 4.1.43
4
+
5
+ ### Patch Changes
6
+
7
+ - ef2e23b: feat (ai/core): add experimental repairText function to generateObject
8
+
9
+ ## 4.1.42
10
+
11
+ ### Patch Changes
12
+
13
+ - Updated dependencies [2761f06]
14
+ - @ai-sdk/provider@1.0.8
15
+ - @ai-sdk/provider-utils@2.1.9
16
+ - @ai-sdk/ui-utils@1.1.15
17
+ - @ai-sdk/react@1.1.17
18
+
3
19
  ## 4.1.41
4
20
 
5
21
  ### Patch Changes
package/dist/index.d.mts CHANGED
@@ -2,7 +2,7 @@ import { IDGenerator } from '@ai-sdk/provider-utils';
2
2
  export { CoreToolCall, CoreToolResult, IDGenerator, ToolCall, ToolResult, createIdGenerator, generateId } from '@ai-sdk/provider-utils';
3
3
  import { DataStreamString, Message, Schema, DeepPartial, JSONValue as JSONValue$1, AssistantMessage, DataMessage } from '@ai-sdk/ui-utils';
4
4
  export { AssistantMessage, AssistantStatus, Attachment, ChatRequest, ChatRequestOptions, CreateMessage, DataMessage, DataStreamPart, DeepPartial, IdGenerator, JSONValue, Message, RequestOptions, Schema, ToolInvocation, UIMessage, UseAssistantOptions, formatAssistantStreamPart, formatDataStreamPart, jsonSchema, parseAssistantStreamPart, parseDataStreamPart, processDataStream, processTextStream, zodSchema } from '@ai-sdk/ui-utils';
5
- import { JSONValue, EmbeddingModelV1, EmbeddingModelV1Embedding, ImageModelV1, ImageModelV1CallWarning, LanguageModelV1, LanguageModelV1FinishReason, LanguageModelV1LogProbs, LanguageModelV1CallWarning, LanguageModelV1Source, LanguageModelV1ProviderMetadata, LanguageModelV1CallOptions, AISDKError, LanguageModelV1FunctionToolCall, JSONSchema7, ProviderV1, NoSuchModelError } from '@ai-sdk/provider';
5
+ import { JSONValue, EmbeddingModelV1, EmbeddingModelV1Embedding, ImageModelV1, ImageModelV1CallWarning, LanguageModelV1, LanguageModelV1FinishReason, LanguageModelV1LogProbs, LanguageModelV1CallWarning, LanguageModelV1Source, LanguageModelV1ProviderMetadata, JSONParseError, TypeValidationError, LanguageModelV1CallOptions, AISDKError, LanguageModelV1FunctionToolCall, JSONSchema7, ProviderV1, NoSuchModelError } from '@ai-sdk/provider';
6
6
  export { AISDKError, APICallError, EmptyResponseBodyError, InvalidPromptError, InvalidResponseDataError, JSONParseError, LanguageModelV1, LanguageModelV1CallOptions, LanguageModelV1Prompt, LanguageModelV1StreamPart, LoadAPIKeyError, NoContentGeneratedError, NoSuchModelError, TypeValidationError, UnsupportedFunctionalityError } from '@ai-sdk/provider';
7
7
  import { ServerResponse } from 'node:http';
8
8
  import { AttributeValue, Tracer } from '@opentelemetry/api';
@@ -913,6 +913,16 @@ interface GenerateObjectResult<OBJECT> {
913
913
  toJsonResponse(init?: ResponseInit): Response;
914
914
  }
915
915
 
916
+ /**
917
+ A function that attempts to repair the raw output of the mode
918
+ to enable JSON parsing.
919
+
920
+ Should return the repaired text or null if the text cannot be repaired.
921
+ */
922
+ type RepairTextFunction = (options: {
923
+ text: string;
924
+ error: JSONParseError | TypeValidationError;
925
+ }) => Promise<string | null>;
916
926
  /**
917
927
  Generate a structured, typed object for a given prompt and schema using a language model.
918
928
 
@@ -958,6 +968,11 @@ Default and recommended: 'auto' (best mode for the model).
958
968
  */
959
969
  mode?: 'auto' | 'json' | 'tool';
960
970
  /**
971
+ A function that attempts to repair the raw output of the mode
972
+ to enable JSON parsing.
973
+ */
974
+ experimental_repairText?: RepairTextFunction;
975
+ /**
961
976
  Optional telemetry configuration (experimental).
962
977
  */
963
978
  experimental_telemetry?: TelemetrySettings;
@@ -1024,6 +1039,11 @@ Default and recommended: 'auto' (best mode for the model).
1024
1039
  */
1025
1040
  mode?: 'auto' | 'json' | 'tool';
1026
1041
  /**
1042
+ A function that attempts to repair the raw output of the mode
1043
+ to enable JSON parsing.
1044
+ */
1045
+ experimental_repairText?: RepairTextFunction;
1046
+ /**
1027
1047
  Optional telemetry configuration (experimental).
1028
1048
  */
1029
1049
  experimental_telemetry?: TelemetrySettings;
@@ -1078,6 +1098,11 @@ Default and recommended: 'auto' (best mode for the model).
1078
1098
  */
1079
1099
  mode?: 'auto' | 'json' | 'tool';
1080
1100
  /**
1101
+ A function that attempts to repair the raw output of the mode
1102
+ to enable JSON parsing.
1103
+ */
1104
+ experimental_repairText?: RepairTextFunction;
1105
+ /**
1081
1106
  Optional telemetry configuration (experimental).
1082
1107
  */
1083
1108
  experimental_telemetry?: TelemetrySettings;
@@ -1118,6 +1143,11 @@ The mode to use for object generation. Must be "json" for no-schema output.
1118
1143
  */
1119
1144
  mode?: 'json';
1120
1145
  /**
1146
+ A function that attempts to repair the raw output of the mode
1147
+ to enable JSON parsing.
1148
+ */
1149
+ experimental_repairText?: RepairTextFunction;
1150
+ /**
1121
1151
  Optional telemetry configuration (experimental).
1122
1152
  */
1123
1153
  experimental_telemetry?: TelemetrySettings;
@@ -3061,4 +3091,4 @@ declare namespace llamaindexAdapter {
3061
3091
  };
3062
3092
  }
3063
3093
 
3064
- export { AssistantContent, AssistantResponse, CallWarning, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreTool, CoreToolCallUnion, CoreToolChoice, CoreToolMessage, CoreToolResultUnion, CoreUserMessage, DataContent, DataStreamOptions, DataStreamWriter, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, GenerateImageResult as Experimental_GenerateImageResult, GeneratedImage as Experimental_GeneratedImage, Experimental_LanguageModelV1Middleware, FilePart, FinishReason, GenerateObjectResult, GenerateTextOnStepFinishCallback, GenerateTextResult, ImageModel, ImageGenerationWarning as ImageModelCallWarning, ImageModelResponseMetadata, ImagePart, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidToolArgumentsError, langchainAdapter as LangChainAdapter, LanguageModel, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage, LanguageModelV1Middleware, llamaindexAdapter as LlamaIndexAdapter, LogProbs, MessageConversionError, NoImageGeneratedError, NoObjectGeneratedError, NoOutputSpecifiedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, output as Output, Provider, ProviderMetadata, RetryError, StepResult, StreamData, StreamObjectOnFinishCallback, StreamObjectResult, StreamTextOnChunkCallback, StreamTextOnErrorCallback, StreamTextOnFinishCallback, StreamTextOnStepFinishCallback, StreamTextResult, StreamTextTransform, TelemetrySettings, TextPart, TextStreamPart, Tool, ToolCallPart, ToolCallRepairError, ToolCallRepairFunction, ToolCallUnion, ToolChoice, ToolContent, ToolExecutionError, ToolExecutionOptions, ToolResultPart, ToolResultUnion, ToolSet, UserContent, appendClientMessage, appendResponseMessages, convertToCoreMessages, coreAssistantMessageSchema, coreMessageSchema, coreSystemMessageSchema, coreToolMessageSchema, coreUserMessageSchema, cosineSimilarity, createDataStream, createDataStreamResponse, customProvider, embed, embedMany, experimental_createProviderRegistry, experimental_customProvider, generateImage as experimental_generateImage, experimental_wrapLanguageModel, extractReasoningMiddleware, generateObject, generateText, pipeDataStreamToResponse, simulateReadableStream, smoothStream, streamObject, streamText, tool, wrapLanguageModel };
3094
+ export { AssistantContent, AssistantResponse, CallWarning, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreTool, CoreToolCallUnion, CoreToolChoice, CoreToolMessage, CoreToolResultUnion, CoreUserMessage, DataContent, DataStreamOptions, DataStreamWriter, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, GenerateImageResult as Experimental_GenerateImageResult, GeneratedImage as Experimental_GeneratedImage, Experimental_LanguageModelV1Middleware, FilePart, FinishReason, GenerateObjectResult, GenerateTextOnStepFinishCallback, GenerateTextResult, ImageModel, ImageGenerationWarning as ImageModelCallWarning, ImageModelResponseMetadata, ImagePart, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidToolArgumentsError, langchainAdapter as LangChainAdapter, LanguageModel, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage, LanguageModelV1Middleware, llamaindexAdapter as LlamaIndexAdapter, LogProbs, MessageConversionError, NoImageGeneratedError, NoObjectGeneratedError, NoOutputSpecifiedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, output as Output, Provider, ProviderMetadata, RepairTextFunction, RetryError, StepResult, StreamData, StreamObjectOnFinishCallback, StreamObjectResult, StreamTextOnChunkCallback, StreamTextOnErrorCallback, StreamTextOnFinishCallback, StreamTextOnStepFinishCallback, StreamTextResult, StreamTextTransform, TelemetrySettings, TextPart, TextStreamPart, Tool, ToolCallPart, ToolCallRepairError, ToolCallRepairFunction, ToolCallUnion, ToolChoice, ToolContent, ToolExecutionError, ToolExecutionOptions, ToolResultPart, ToolResultUnion, ToolSet, UserContent, appendClientMessage, appendResponseMessages, convertToCoreMessages, coreAssistantMessageSchema, coreMessageSchema, coreSystemMessageSchema, coreToolMessageSchema, coreUserMessageSchema, cosineSimilarity, createDataStream, createDataStreamResponse, customProvider, embed, embedMany, experimental_createProviderRegistry, experimental_customProvider, generateImage as experimental_generateImage, experimental_wrapLanguageModel, extractReasoningMiddleware, generateObject, generateText, pipeDataStreamToResponse, simulateReadableStream, smoothStream, streamObject, streamText, tool, wrapLanguageModel };
package/dist/index.d.ts CHANGED
@@ -2,7 +2,7 @@ import { IDGenerator } from '@ai-sdk/provider-utils';
2
2
  export { CoreToolCall, CoreToolResult, IDGenerator, ToolCall, ToolResult, createIdGenerator, generateId } from '@ai-sdk/provider-utils';
3
3
  import { DataStreamString, Message, Schema, DeepPartial, JSONValue as JSONValue$1, AssistantMessage, DataMessage } from '@ai-sdk/ui-utils';
4
4
  export { AssistantMessage, AssistantStatus, Attachment, ChatRequest, ChatRequestOptions, CreateMessage, DataMessage, DataStreamPart, DeepPartial, IdGenerator, JSONValue, Message, RequestOptions, Schema, ToolInvocation, UIMessage, UseAssistantOptions, formatAssistantStreamPart, formatDataStreamPart, jsonSchema, parseAssistantStreamPart, parseDataStreamPart, processDataStream, processTextStream, zodSchema } from '@ai-sdk/ui-utils';
5
- import { JSONValue, EmbeddingModelV1, EmbeddingModelV1Embedding, ImageModelV1, ImageModelV1CallWarning, LanguageModelV1, LanguageModelV1FinishReason, LanguageModelV1LogProbs, LanguageModelV1CallWarning, LanguageModelV1Source, LanguageModelV1ProviderMetadata, LanguageModelV1CallOptions, AISDKError, LanguageModelV1FunctionToolCall, JSONSchema7, ProviderV1, NoSuchModelError } from '@ai-sdk/provider';
5
+ import { JSONValue, EmbeddingModelV1, EmbeddingModelV1Embedding, ImageModelV1, ImageModelV1CallWarning, LanguageModelV1, LanguageModelV1FinishReason, LanguageModelV1LogProbs, LanguageModelV1CallWarning, LanguageModelV1Source, LanguageModelV1ProviderMetadata, JSONParseError, TypeValidationError, LanguageModelV1CallOptions, AISDKError, LanguageModelV1FunctionToolCall, JSONSchema7, ProviderV1, NoSuchModelError } from '@ai-sdk/provider';
6
6
  export { AISDKError, APICallError, EmptyResponseBodyError, InvalidPromptError, InvalidResponseDataError, JSONParseError, LanguageModelV1, LanguageModelV1CallOptions, LanguageModelV1Prompt, LanguageModelV1StreamPart, LoadAPIKeyError, NoContentGeneratedError, NoSuchModelError, TypeValidationError, UnsupportedFunctionalityError } from '@ai-sdk/provider';
7
7
  import { ServerResponse } from 'node:http';
8
8
  import { AttributeValue, Tracer } from '@opentelemetry/api';
@@ -913,6 +913,16 @@ interface GenerateObjectResult<OBJECT> {
913
913
  toJsonResponse(init?: ResponseInit): Response;
914
914
  }
915
915
 
916
+ /**
917
+ A function that attempts to repair the raw output of the mode
918
+ to enable JSON parsing.
919
+
920
+ Should return the repaired text or null if the text cannot be repaired.
921
+ */
922
+ type RepairTextFunction = (options: {
923
+ text: string;
924
+ error: JSONParseError | TypeValidationError;
925
+ }) => Promise<string | null>;
916
926
  /**
917
927
  Generate a structured, typed object for a given prompt and schema using a language model.
918
928
 
@@ -958,6 +968,11 @@ Default and recommended: 'auto' (best mode for the model).
958
968
  */
959
969
  mode?: 'auto' | 'json' | 'tool';
960
970
  /**
971
+ A function that attempts to repair the raw output of the mode
972
+ to enable JSON parsing.
973
+ */
974
+ experimental_repairText?: RepairTextFunction;
975
+ /**
961
976
  Optional telemetry configuration (experimental).
962
977
  */
963
978
  experimental_telemetry?: TelemetrySettings;
@@ -1024,6 +1039,11 @@ Default and recommended: 'auto' (best mode for the model).
1024
1039
  */
1025
1040
  mode?: 'auto' | 'json' | 'tool';
1026
1041
  /**
1042
+ A function that attempts to repair the raw output of the mode
1043
+ to enable JSON parsing.
1044
+ */
1045
+ experimental_repairText?: RepairTextFunction;
1046
+ /**
1027
1047
  Optional telemetry configuration (experimental).
1028
1048
  */
1029
1049
  experimental_telemetry?: TelemetrySettings;
@@ -1078,6 +1098,11 @@ Default and recommended: 'auto' (best mode for the model).
1078
1098
  */
1079
1099
  mode?: 'auto' | 'json' | 'tool';
1080
1100
  /**
1101
+ A function that attempts to repair the raw output of the mode
1102
+ to enable JSON parsing.
1103
+ */
1104
+ experimental_repairText?: RepairTextFunction;
1105
+ /**
1081
1106
  Optional telemetry configuration (experimental).
1082
1107
  */
1083
1108
  experimental_telemetry?: TelemetrySettings;
@@ -1118,6 +1143,11 @@ The mode to use for object generation. Must be "json" for no-schema output.
1118
1143
  */
1119
1144
  mode?: 'json';
1120
1145
  /**
1146
+ A function that attempts to repair the raw output of the mode
1147
+ to enable JSON parsing.
1148
+ */
1149
+ experimental_repairText?: RepairTextFunction;
1150
+ /**
1121
1151
  Optional telemetry configuration (experimental).
1122
1152
  */
1123
1153
  experimental_telemetry?: TelemetrySettings;
@@ -3061,4 +3091,4 @@ declare namespace llamaindexAdapter {
3061
3091
  };
3062
3092
  }
3063
3093
 
3064
- export { AssistantContent, AssistantResponse, CallWarning, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreTool, CoreToolCallUnion, CoreToolChoice, CoreToolMessage, CoreToolResultUnion, CoreUserMessage, DataContent, DataStreamOptions, DataStreamWriter, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, GenerateImageResult as Experimental_GenerateImageResult, GeneratedImage as Experimental_GeneratedImage, Experimental_LanguageModelV1Middleware, FilePart, FinishReason, GenerateObjectResult, GenerateTextOnStepFinishCallback, GenerateTextResult, ImageModel, ImageGenerationWarning as ImageModelCallWarning, ImageModelResponseMetadata, ImagePart, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidToolArgumentsError, langchainAdapter as LangChainAdapter, LanguageModel, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage, LanguageModelV1Middleware, llamaindexAdapter as LlamaIndexAdapter, LogProbs, MessageConversionError, NoImageGeneratedError, NoObjectGeneratedError, NoOutputSpecifiedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, output as Output, Provider, ProviderMetadata, RetryError, StepResult, StreamData, StreamObjectOnFinishCallback, StreamObjectResult, StreamTextOnChunkCallback, StreamTextOnErrorCallback, StreamTextOnFinishCallback, StreamTextOnStepFinishCallback, StreamTextResult, StreamTextTransform, TelemetrySettings, TextPart, TextStreamPart, Tool, ToolCallPart, ToolCallRepairError, ToolCallRepairFunction, ToolCallUnion, ToolChoice, ToolContent, ToolExecutionError, ToolExecutionOptions, ToolResultPart, ToolResultUnion, ToolSet, UserContent, appendClientMessage, appendResponseMessages, convertToCoreMessages, coreAssistantMessageSchema, coreMessageSchema, coreSystemMessageSchema, coreToolMessageSchema, coreUserMessageSchema, cosineSimilarity, createDataStream, createDataStreamResponse, customProvider, embed, embedMany, experimental_createProviderRegistry, experimental_customProvider, generateImage as experimental_generateImage, experimental_wrapLanguageModel, extractReasoningMiddleware, generateObject, generateText, pipeDataStreamToResponse, simulateReadableStream, smoothStream, streamObject, streamText, tool, wrapLanguageModel };
3094
+ export { AssistantContent, AssistantResponse, CallWarning, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreTool, CoreToolCallUnion, CoreToolChoice, CoreToolMessage, CoreToolResultUnion, CoreUserMessage, DataContent, DataStreamOptions, DataStreamWriter, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, GenerateImageResult as Experimental_GenerateImageResult, GeneratedImage as Experimental_GeneratedImage, Experimental_LanguageModelV1Middleware, FilePart, FinishReason, GenerateObjectResult, GenerateTextOnStepFinishCallback, GenerateTextResult, ImageModel, ImageGenerationWarning as ImageModelCallWarning, ImageModelResponseMetadata, ImagePart, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidToolArgumentsError, langchainAdapter as LangChainAdapter, LanguageModel, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage, LanguageModelV1Middleware, llamaindexAdapter as LlamaIndexAdapter, LogProbs, MessageConversionError, NoImageGeneratedError, NoObjectGeneratedError, NoOutputSpecifiedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, output as Output, Provider, ProviderMetadata, RepairTextFunction, RetryError, StepResult, StreamData, StreamObjectOnFinishCallback, StreamObjectResult, StreamTextOnChunkCallback, StreamTextOnErrorCallback, StreamTextOnFinishCallback, StreamTextOnStepFinishCallback, StreamTextResult, StreamTextTransform, TelemetrySettings, TextPart, TextStreamPart, Tool, ToolCallPart, ToolCallRepairError, ToolCallRepairFunction, ToolCallUnion, ToolChoice, ToolContent, ToolExecutionError, ToolExecutionOptions, ToolResultPart, ToolResultUnion, ToolSet, UserContent, appendClientMessage, appendResponseMessages, convertToCoreMessages, coreAssistantMessageSchema, coreMessageSchema, coreSystemMessageSchema, coreToolMessageSchema, coreUserMessageSchema, cosineSimilarity, createDataStream, createDataStreamResponse, customProvider, embed, embedMany, experimental_createProviderRegistry, experimental_customProvider, generateImage as experimental_generateImage, experimental_wrapLanguageModel, extractReasoningMiddleware, generateObject, generateText, pipeDataStreamToResponse, simulateReadableStream, smoothStream, streamObject, streamText, tool, wrapLanguageModel };
package/dist/index.js CHANGED
@@ -20,27 +20,27 @@ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: tru
20
20
  // streams/index.ts
21
21
  var streams_exports = {};
22
22
  __export(streams_exports, {
23
- AISDKError: () => import_provider17.AISDKError,
24
- APICallError: () => import_provider17.APICallError,
23
+ AISDKError: () => import_provider18.AISDKError,
24
+ APICallError: () => import_provider18.APICallError,
25
25
  AssistantResponse: () => AssistantResponse,
26
26
  DownloadError: () => DownloadError,
27
- EmptyResponseBodyError: () => import_provider17.EmptyResponseBodyError,
27
+ EmptyResponseBodyError: () => import_provider18.EmptyResponseBodyError,
28
28
  InvalidArgumentError: () => InvalidArgumentError,
29
29
  InvalidDataContentError: () => InvalidDataContentError,
30
30
  InvalidMessageRoleError: () => InvalidMessageRoleError,
31
- InvalidPromptError: () => import_provider17.InvalidPromptError,
32
- InvalidResponseDataError: () => import_provider17.InvalidResponseDataError,
31
+ InvalidPromptError: () => import_provider18.InvalidPromptError,
32
+ InvalidResponseDataError: () => import_provider18.InvalidResponseDataError,
33
33
  InvalidToolArgumentsError: () => InvalidToolArgumentsError,
34
- JSONParseError: () => import_provider17.JSONParseError,
34
+ JSONParseError: () => import_provider18.JSONParseError,
35
35
  LangChainAdapter: () => langchain_adapter_exports,
36
36
  LlamaIndexAdapter: () => llamaindex_adapter_exports,
37
- LoadAPIKeyError: () => import_provider17.LoadAPIKeyError,
37
+ LoadAPIKeyError: () => import_provider18.LoadAPIKeyError,
38
38
  MessageConversionError: () => MessageConversionError,
39
- NoContentGeneratedError: () => import_provider17.NoContentGeneratedError,
39
+ NoContentGeneratedError: () => import_provider18.NoContentGeneratedError,
40
40
  NoImageGeneratedError: () => NoImageGeneratedError,
41
41
  NoObjectGeneratedError: () => NoObjectGeneratedError,
42
42
  NoOutputSpecifiedError: () => NoOutputSpecifiedError,
43
- NoSuchModelError: () => import_provider17.NoSuchModelError,
43
+ NoSuchModelError: () => import_provider18.NoSuchModelError,
44
44
  NoSuchProviderError: () => NoSuchProviderError,
45
45
  NoSuchToolError: () => NoSuchToolError,
46
46
  Output: () => output_exports,
@@ -48,8 +48,8 @@ __export(streams_exports, {
48
48
  StreamData: () => StreamData,
49
49
  ToolCallRepairError: () => ToolCallRepairError,
50
50
  ToolExecutionError: () => ToolExecutionError,
51
- TypeValidationError: () => import_provider17.TypeValidationError,
52
- UnsupportedFunctionalityError: () => import_provider17.UnsupportedFunctionalityError,
51
+ TypeValidationError: () => import_provider18.TypeValidationError,
52
+ UnsupportedFunctionalityError: () => import_provider18.UnsupportedFunctionalityError,
53
53
  appendClientMessage: () => appendClientMessage,
54
54
  appendResponseMessages: () => appendResponseMessages,
55
55
  convertToCoreMessages: () => convertToCoreMessages,
@@ -993,6 +993,7 @@ var DefaultGeneratedImage = class {
993
993
  };
994
994
 
995
995
  // core/generate-object/generate-object.ts
996
+ var import_provider12 = require("@ai-sdk/provider");
996
997
  var import_provider_utils6 = require("@ai-sdk/provider-utils");
997
998
 
998
999
  // errors/no-object-generated-error.ts
@@ -2437,6 +2438,7 @@ async function generateObject({
2437
2438
  maxRetries: maxRetriesArg,
2438
2439
  abortSignal,
2439
2440
  headers,
2441
+ experimental_repairText: repairText,
2440
2442
  experimental_telemetry: telemetry,
2441
2443
  experimental_providerMetadata,
2442
2444
  providerOptions = experimental_providerMetadata,
@@ -2737,32 +2739,52 @@ async function generateObject({
2737
2739
  throw new Error(`Unsupported mode: ${_exhaustiveCheck}`);
2738
2740
  }
2739
2741
  }
2740
- const parseResult = (0, import_provider_utils6.safeParseJSON)({ text: result });
2741
- if (!parseResult.success) {
2742
- throw new NoObjectGeneratedError({
2743
- message: "No object generated: could not parse the response.",
2744
- cause: parseResult.error,
2745
- text: result,
2746
- response,
2747
- usage: calculateLanguageModelUsage(usage)
2748
- });
2742
+ function processResult(result2) {
2743
+ const parseResult = (0, import_provider_utils6.safeParseJSON)({ text: result2 });
2744
+ if (!parseResult.success) {
2745
+ throw new NoObjectGeneratedError({
2746
+ message: "No object generated: could not parse the response.",
2747
+ cause: parseResult.error,
2748
+ text: result2,
2749
+ response,
2750
+ usage: calculateLanguageModelUsage(usage)
2751
+ });
2752
+ }
2753
+ const validationResult = outputStrategy.validateFinalResult(
2754
+ parseResult.value,
2755
+ {
2756
+ text: result2,
2757
+ response,
2758
+ usage: calculateLanguageModelUsage(usage)
2759
+ }
2760
+ );
2761
+ if (!validationResult.success) {
2762
+ throw new NoObjectGeneratedError({
2763
+ message: "No object generated: response did not match schema.",
2764
+ cause: validationResult.error,
2765
+ text: result2,
2766
+ response,
2767
+ usage: calculateLanguageModelUsage(usage)
2768
+ });
2769
+ }
2770
+ return validationResult.value;
2749
2771
  }
2750
- const validationResult = outputStrategy.validateFinalResult(
2751
- parseResult.value,
2752
- {
2753
- text: result,
2754
- response,
2755
- usage: calculateLanguageModelUsage(usage)
2772
+ let object2;
2773
+ try {
2774
+ object2 = processResult(result);
2775
+ } catch (error) {
2776
+ if (repairText != null && NoObjectGeneratedError.isInstance(error) && (import_provider12.JSONParseError.isInstance(error.cause) || import_provider12.TypeValidationError.isInstance(error.cause))) {
2777
+ const repairedText = await repairText({
2778
+ text: result,
2779
+ error: error.cause
2780
+ });
2781
+ if (repairedText === null) {
2782
+ throw error;
2783
+ }
2784
+ object2 = processResult(repairedText);
2785
+ } else {
2786
+ throw error;
2756
2787
  }
2757
- );
2758
- if (!validationResult.success) {
2759
- throw new NoObjectGeneratedError({
2760
- message: "No object generated: response did not match schema.",
2761
- cause: validationResult.error,
2762
- text: result,
2763
- response,
2764
- usage: calculateLanguageModelUsage(usage)
2765
- });
2766
2788
  }
2767
2789
  span.setAttributes(
2768
2790
  selectTelemetryAttributes({
@@ -2770,7 +2792,7 @@ async function generateObject({
2770
2792
  attributes: {
2771
2793
  "ai.response.finishReason": finishReason,
2772
2794
  "ai.response.object": {
2773
- output: () => JSON.stringify(validationResult.value)
2795
+ output: () => JSON.stringify(object2)
2774
2796
  },
2775
2797
  "ai.usage.promptTokens": usage.promptTokens,
2776
2798
  "ai.usage.completionTokens": usage.completionTokens
@@ -2778,7 +2800,7 @@ async function generateObject({
2778
2800
  })
2779
2801
  );
2780
2802
  return new DefaultGenerateObjectResult({
2781
- object: validationResult.value,
2803
+ object: object2,
2782
2804
  finishReason,
2783
2805
  usage: calculateLanguageModelUsage(usage),
2784
2806
  warnings,
@@ -3536,36 +3558,36 @@ var DefaultStreamObjectResult = class {
3536
3558
  var import_provider_utils9 = require("@ai-sdk/provider-utils");
3537
3559
 
3538
3560
  // errors/no-output-specified-error.ts
3539
- var import_provider12 = require("@ai-sdk/provider");
3561
+ var import_provider13 = require("@ai-sdk/provider");
3540
3562
  var name9 = "AI_NoOutputSpecifiedError";
3541
3563
  var marker9 = `vercel.ai.error.${name9}`;
3542
3564
  var symbol9 = Symbol.for(marker9);
3543
3565
  var _a9;
3544
- var NoOutputSpecifiedError = class extends import_provider12.AISDKError {
3566
+ var NoOutputSpecifiedError = class extends import_provider13.AISDKError {
3545
3567
  // used in isInstance
3546
3568
  constructor({ message = "No output specified." } = {}) {
3547
3569
  super({ name: name9, message });
3548
3570
  this[_a9] = true;
3549
3571
  }
3550
3572
  static isInstance(error) {
3551
- return import_provider12.AISDKError.hasMarker(error, marker9);
3573
+ return import_provider13.AISDKError.hasMarker(error, marker9);
3552
3574
  }
3553
3575
  };
3554
3576
  _a9 = symbol9;
3555
3577
 
3556
3578
  // errors/tool-execution-error.ts
3557
- var import_provider13 = require("@ai-sdk/provider");
3579
+ var import_provider14 = require("@ai-sdk/provider");
3558
3580
  var name10 = "AI_ToolExecutionError";
3559
3581
  var marker10 = `vercel.ai.error.${name10}`;
3560
3582
  var symbol10 = Symbol.for(marker10);
3561
3583
  var _a10;
3562
- var ToolExecutionError = class extends import_provider13.AISDKError {
3584
+ var ToolExecutionError = class extends import_provider14.AISDKError {
3563
3585
  constructor({
3564
3586
  toolArgs,
3565
3587
  toolName,
3566
3588
  toolCallId,
3567
3589
  cause,
3568
- message = `Error executing tool ${toolName}: ${(0, import_provider13.getErrorMessage)(cause)}`
3590
+ message = `Error executing tool ${toolName}: ${(0, import_provider14.getErrorMessage)(cause)}`
3569
3591
  }) {
3570
3592
  super({ name: name10, message, cause });
3571
3593
  this[_a10] = true;
@@ -3574,7 +3596,7 @@ var ToolExecutionError = class extends import_provider13.AISDKError {
3574
3596
  this.toolCallId = toolCallId;
3575
3597
  }
3576
3598
  static isInstance(error) {
3577
- return import_provider13.AISDKError.hasMarker(error, marker10);
3599
+ return import_provider14.AISDKError.hasMarker(error, marker10);
3578
3600
  }
3579
3601
  };
3580
3602
  _a10 = symbol10;
@@ -3649,17 +3671,17 @@ var import_provider_utils8 = require("@ai-sdk/provider-utils");
3649
3671
  var import_ui_utils5 = require("@ai-sdk/ui-utils");
3650
3672
 
3651
3673
  // errors/invalid-tool-arguments-error.ts
3652
- var import_provider14 = require("@ai-sdk/provider");
3674
+ var import_provider15 = require("@ai-sdk/provider");
3653
3675
  var name11 = "AI_InvalidToolArgumentsError";
3654
3676
  var marker11 = `vercel.ai.error.${name11}`;
3655
3677
  var symbol11 = Symbol.for(marker11);
3656
3678
  var _a11;
3657
- var InvalidToolArgumentsError = class extends import_provider14.AISDKError {
3679
+ var InvalidToolArgumentsError = class extends import_provider15.AISDKError {
3658
3680
  constructor({
3659
3681
  toolArgs,
3660
3682
  toolName,
3661
3683
  cause,
3662
- message = `Invalid arguments for tool ${toolName}: ${(0, import_provider14.getErrorMessage)(
3684
+ message = `Invalid arguments for tool ${toolName}: ${(0, import_provider15.getErrorMessage)(
3663
3685
  cause
3664
3686
  )}`
3665
3687
  }) {
@@ -3669,18 +3691,18 @@ var InvalidToolArgumentsError = class extends import_provider14.AISDKError {
3669
3691
  this.toolName = toolName;
3670
3692
  }
3671
3693
  static isInstance(error) {
3672
- return import_provider14.AISDKError.hasMarker(error, marker11);
3694
+ return import_provider15.AISDKError.hasMarker(error, marker11);
3673
3695
  }
3674
3696
  };
3675
3697
  _a11 = symbol11;
3676
3698
 
3677
3699
  // errors/no-such-tool-error.ts
3678
- var import_provider15 = require("@ai-sdk/provider");
3700
+ var import_provider16 = require("@ai-sdk/provider");
3679
3701
  var name12 = "AI_NoSuchToolError";
3680
3702
  var marker12 = `vercel.ai.error.${name12}`;
3681
3703
  var symbol12 = Symbol.for(marker12);
3682
3704
  var _a12;
3683
- var NoSuchToolError = class extends import_provider15.AISDKError {
3705
+ var NoSuchToolError = class extends import_provider16.AISDKError {
3684
3706
  constructor({
3685
3707
  toolName,
3686
3708
  availableTools = void 0,
@@ -3692,29 +3714,29 @@ var NoSuchToolError = class extends import_provider15.AISDKError {
3692
3714
  this.availableTools = availableTools;
3693
3715
  }
3694
3716
  static isInstance(error) {
3695
- return import_provider15.AISDKError.hasMarker(error, marker12);
3717
+ return import_provider16.AISDKError.hasMarker(error, marker12);
3696
3718
  }
3697
3719
  };
3698
3720
  _a12 = symbol12;
3699
3721
 
3700
3722
  // errors/tool-call-repair-error.ts
3701
- var import_provider16 = require("@ai-sdk/provider");
3723
+ var import_provider17 = require("@ai-sdk/provider");
3702
3724
  var name13 = "AI_ToolCallRepairError";
3703
3725
  var marker13 = `vercel.ai.error.${name13}`;
3704
3726
  var symbol13 = Symbol.for(marker13);
3705
3727
  var _a13;
3706
- var ToolCallRepairError = class extends import_provider16.AISDKError {
3728
+ var ToolCallRepairError = class extends import_provider17.AISDKError {
3707
3729
  constructor({
3708
3730
  cause,
3709
3731
  originalError,
3710
- message = `Error repairing tool call: ${(0, import_provider16.getErrorMessage)(cause)}`
3732
+ message = `Error repairing tool call: ${(0, import_provider17.getErrorMessage)(cause)}`
3711
3733
  }) {
3712
3734
  super({ name: name13, message, cause });
3713
3735
  this[_a13] = true;
3714
3736
  this.originalError = originalError;
3715
3737
  }
3716
3738
  static isInstance(error) {
3717
- return import_provider16.AISDKError.hasMarker(error, marker13);
3739
+ return import_provider17.AISDKError.hasMarker(error, marker13);
3718
3740
  }
3719
3741
  };
3720
3742
  _a13 = symbol13;
@@ -4271,7 +4293,7 @@ var import_provider_utils10 = require("@ai-sdk/provider-utils");
4271
4293
  var import_ui_utils6 = require("@ai-sdk/ui-utils");
4272
4294
 
4273
4295
  // errors/index.ts
4274
- var import_provider17 = require("@ai-sdk/provider");
4296
+ var import_provider18 = require("@ai-sdk/provider");
4275
4297
 
4276
4298
  // core/generate-text/output.ts
4277
4299
  var text = () => ({
@@ -4351,7 +4373,7 @@ var object = ({
4351
4373
  };
4352
4374
 
4353
4375
  // core/generate-text/smooth-stream.ts
4354
- var import_provider18 = require("@ai-sdk/provider");
4376
+ var import_provider19 = require("@ai-sdk/provider");
4355
4377
  var import_provider_utils11 = require("@ai-sdk/provider-utils");
4356
4378
  var CHUNKING_REGEXPS = {
4357
4379
  word: /\s*\S+\s+/m,
@@ -4364,7 +4386,7 @@ function smoothStream({
4364
4386
  } = {}) {
4365
4387
  const chunkingRegexp = typeof chunking === "string" ? CHUNKING_REGEXPS[chunking] : chunking;
4366
4388
  if (chunkingRegexp == null) {
4367
- throw new import_provider18.InvalidArgumentError({
4389
+ throw new import_provider19.InvalidArgumentError({
4368
4390
  argument: "chunking",
4369
4391
  message: `Chunking must be "word" or "line" or a RegExp. Received: ${chunking}`
4370
4392
  });
@@ -6069,7 +6091,7 @@ function appendResponseMessages({
6069
6091
  }
6070
6092
 
6071
6093
  // core/registry/custom-provider.ts
6072
- var import_provider19 = require("@ai-sdk/provider");
6094
+ var import_provider20 = require("@ai-sdk/provider");
6073
6095
  function customProvider({
6074
6096
  languageModels,
6075
6097
  textEmbeddingModels,
@@ -6084,7 +6106,7 @@ function customProvider({
6084
6106
  if (fallbackProvider) {
6085
6107
  return fallbackProvider.languageModel(modelId);
6086
6108
  }
6087
- throw new import_provider19.NoSuchModelError({ modelId, modelType: "languageModel" });
6109
+ throw new import_provider20.NoSuchModelError({ modelId, modelType: "languageModel" });
6088
6110
  },
6089
6111
  textEmbeddingModel(modelId) {
6090
6112
  if (textEmbeddingModels != null && modelId in textEmbeddingModels) {
@@ -6093,7 +6115,7 @@ function customProvider({
6093
6115
  if (fallbackProvider) {
6094
6116
  return fallbackProvider.textEmbeddingModel(modelId);
6095
6117
  }
6096
- throw new import_provider19.NoSuchModelError({ modelId, modelType: "textEmbeddingModel" });
6118
+ throw new import_provider20.NoSuchModelError({ modelId, modelType: "textEmbeddingModel" });
6097
6119
  },
6098
6120
  imageModel(modelId) {
6099
6121
  if (imageModels != null && modelId in imageModels) {
@@ -6102,19 +6124,19 @@ function customProvider({
6102
6124
  if (fallbackProvider == null ? void 0 : fallbackProvider.imageModel) {
6103
6125
  return fallbackProvider.imageModel(modelId);
6104
6126
  }
6105
- throw new import_provider19.NoSuchModelError({ modelId, modelType: "imageModel" });
6127
+ throw new import_provider20.NoSuchModelError({ modelId, modelType: "imageModel" });
6106
6128
  }
6107
6129
  };
6108
6130
  }
6109
6131
  var experimental_customProvider = customProvider;
6110
6132
 
6111
6133
  // core/registry/no-such-provider-error.ts
6112
- var import_provider20 = require("@ai-sdk/provider");
6134
+ var import_provider21 = require("@ai-sdk/provider");
6113
6135
  var name14 = "AI_NoSuchProviderError";
6114
6136
  var marker14 = `vercel.ai.error.${name14}`;
6115
6137
  var symbol14 = Symbol.for(marker14);
6116
6138
  var _a14;
6117
- var NoSuchProviderError = class extends import_provider20.NoSuchModelError {
6139
+ var NoSuchProviderError = class extends import_provider21.NoSuchModelError {
6118
6140
  constructor({
6119
6141
  modelId,
6120
6142
  modelType,
@@ -6128,13 +6150,13 @@ var NoSuchProviderError = class extends import_provider20.NoSuchModelError {
6128
6150
  this.availableProviders = availableProviders;
6129
6151
  }
6130
6152
  static isInstance(error) {
6131
- return import_provider20.AISDKError.hasMarker(error, marker14);
6153
+ return import_provider21.AISDKError.hasMarker(error, marker14);
6132
6154
  }
6133
6155
  };
6134
6156
  _a14 = symbol14;
6135
6157
 
6136
6158
  // core/registry/provider-registry.ts
6137
- var import_provider21 = require("@ai-sdk/provider");
6159
+ var import_provider22 = require("@ai-sdk/provider");
6138
6160
  function experimental_createProviderRegistry(providers) {
6139
6161
  const registry = new DefaultProviderRegistry();
6140
6162
  for (const [id, provider] of Object.entries(providers)) {
@@ -6167,7 +6189,7 @@ var DefaultProviderRegistry = class {
6167
6189
  splitId(id, modelType) {
6168
6190
  const index = id.indexOf(":");
6169
6191
  if (index === -1) {
6170
- throw new import_provider21.NoSuchModelError({
6192
+ throw new import_provider22.NoSuchModelError({
6171
6193
  modelId: id,
6172
6194
  modelType,
6173
6195
  message: `Invalid ${modelType} id for registry: ${id} (must be in the format "providerId:modelId")`
@@ -6180,7 +6202,7 @@ var DefaultProviderRegistry = class {
6180
6202
  const [providerId, modelId] = this.splitId(id, "languageModel");
6181
6203
  const model = (_b = (_a15 = this.getProvider(providerId)).languageModel) == null ? void 0 : _b.call(_a15, modelId);
6182
6204
  if (model == null) {
6183
- throw new import_provider21.NoSuchModelError({ modelId: id, modelType: "languageModel" });
6205
+ throw new import_provider22.NoSuchModelError({ modelId: id, modelType: "languageModel" });
6184
6206
  }
6185
6207
  return model;
6186
6208
  }
@@ -6190,7 +6212,7 @@ var DefaultProviderRegistry = class {
6190
6212
  const provider = this.getProvider(providerId);
6191
6213
  const model = (_a15 = provider.textEmbeddingModel) == null ? void 0 : _a15.call(provider, modelId);
6192
6214
  if (model == null) {
6193
- throw new import_provider21.NoSuchModelError({
6215
+ throw new import_provider22.NoSuchModelError({
6194
6216
  modelId: id,
6195
6217
  modelType: "textEmbeddingModel"
6196
6218
  });
@@ -6203,7 +6225,7 @@ var DefaultProviderRegistry = class {
6203
6225
  const provider = this.getProvider(providerId);
6204
6226
  const model = (_a15 = provider.imageModel) == null ? void 0 : _a15.call(provider, modelId);
6205
6227
  if (model == null) {
6206
- throw new import_provider21.NoSuchModelError({ modelId: id, modelType: "imageModel" });
6228
+ throw new import_provider22.NoSuchModelError({ modelId: id, modelType: "imageModel" });
6207
6229
  }
6208
6230
  return model;
6209
6231
  }