ai 5.0.0-canary.4 → 5.0.0-canary.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. package/CHANGELOG.md +18 -0
  2. package/dist/index.d.mts +861 -145
  3. package/dist/index.d.ts +861 -145
  4. package/dist/index.js +1653 -166
  5. package/dist/index.js.map +1 -1
  6. package/dist/index.mjs +1568 -119
  7. package/dist/index.mjs.map +1 -1
  8. package/dist/internal/index.d.mts +205 -15
  9. package/dist/internal/index.d.ts +205 -15
  10. package/dist/internal/index.js +63 -9
  11. package/dist/internal/index.js.map +1 -1
  12. package/dist/internal/index.mjs +46 -2
  13. package/dist/internal/index.mjs.map +1 -1
  14. package/dist/mcp-stdio/index.js.map +1 -0
  15. package/dist/mcp-stdio/index.mjs.map +1 -0
  16. package/dist/test/index.js.map +1 -0
  17. package/dist/test/index.mjs.map +1 -0
  18. package/package.json +18 -19
  19. package/mcp-stdio/create-child-process.test.ts +0 -92
  20. package/mcp-stdio/create-child-process.ts +0 -21
  21. package/mcp-stdio/dist/index.js.map +0 -1
  22. package/mcp-stdio/dist/index.mjs.map +0 -1
  23. package/mcp-stdio/get-environment.test.ts +0 -13
  24. package/mcp-stdio/get-environment.ts +0 -43
  25. package/mcp-stdio/index.ts +0 -4
  26. package/mcp-stdio/mcp-stdio-transport.test.ts +0 -262
  27. package/mcp-stdio/mcp-stdio-transport.ts +0 -157
  28. package/test/dist/index.js.map +0 -1
  29. package/test/dist/index.mjs.map +0 -1
  30. package/{mcp-stdio/dist → dist/mcp-stdio}/index.d.mts +6 -6
  31. package/{mcp-stdio/dist → dist/mcp-stdio}/index.d.ts +6 -6
  32. /package/{mcp-stdio/dist → dist/mcp-stdio}/index.js +0 -0
  33. /package/{mcp-stdio/dist → dist/mcp-stdio}/index.mjs +0 -0
  34. /package/{test/dist → dist/test}/index.d.mts +0 -0
  35. /package/{test/dist → dist/test}/index.d.ts +0 -0
  36. /package/{test/dist → dist/test}/index.js +0 -0
  37. /package/{test/dist → dist/test}/index.mjs +0 -0
@@ -1,19 +1,7 @@
1
- import { Schema, Message } from '@ai-sdk/ui-utils';
2
1
  import { z } from 'zod';
3
- import { LanguageModelV2ProviderMetadata, LanguageModelV2FunctionTool, LanguageModelV2ProviderDefinedTool, LanguageModelV2ToolChoice, LanguageModelV2Prompt } from '@ai-sdk/provider';
4
-
5
- type ToolResultContent = Array<{
6
- type: 'text';
7
- text: string;
8
- } | {
9
- type: 'image';
10
- data: string;
11
- mediaType?: string;
12
- /**
13
- * @deprecated Use `mediaType` instead.
14
- */
15
- mimeType?: string;
16
- }>;
2
+ import { ToolCall, ToolResult, Validator } from '@ai-sdk/provider-utils';
3
+ import { JSONSchema7 } from 'json-schema';
4
+ import { LanguageModelV2ProviderMetadata, LanguageModelV2Source, LanguageModelV2FunctionTool, LanguageModelV2ProviderDefinedTool, LanguageModelV2ToolChoice, LanguageModelV2Prompt } from '@ai-sdk/provider';
17
5
 
18
6
  /**
19
7
  Tool choice for the generation. It supports the following settings:
@@ -65,6 +53,208 @@ declare function calculateLanguageModelUsage({ promptTokens, completionTokens, }
65
53
  completionTokens: number;
66
54
  }): LanguageModelUsage;
67
55
 
56
+ /**
57
+ Tool invocations are either tool calls or tool results. For each assistant tool call,
58
+ there is one tool invocation. While the call is in progress, the invocation is a tool call.
59
+ Once the call is complete, the invocation is a tool result.
60
+
61
+ The step is used to track how to map an assistant UI message with many tool invocations
62
+ back to a sequence of LLM assistant/tool result message pairs.
63
+ It is optional for backwards compatibility.
64
+ */
65
+ type ToolInvocation = ({
66
+ state: 'partial-call';
67
+ step?: number;
68
+ } & ToolCall<string, any>) | ({
69
+ state: 'call';
70
+ step?: number;
71
+ } & ToolCall<string, any>) | ({
72
+ state: 'result';
73
+ step?: number;
74
+ } & ToolResult<string, any, any>);
75
+ /**
76
+ * An attachment that can be sent along with a message.
77
+ */
78
+ interface Attachment {
79
+ /**
80
+ * The name of the attachment, usually the file name.
81
+ */
82
+ name?: string;
83
+ /**
84
+ * A string indicating the [media type](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Type).
85
+ * By default, it's extracted from the pathname's extension.
86
+ */
87
+ contentType?: string;
88
+ /**
89
+ * The URL of the attachment. It can either be a URL to a hosted file or a [Data URL](https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/Data_URLs).
90
+ */
91
+ url: string;
92
+ }
93
+ /**
94
+ * AI SDK UI Messages. They are used in the client and to communicate between the frontend and the API routes.
95
+ */
96
+ interface Message {
97
+ /**
98
+ A unique identifier for the message.
99
+ */
100
+ id: string;
101
+ /**
102
+ The timestamp of the message.
103
+ */
104
+ createdAt?: Date;
105
+ /**
106
+ Text content of the message. Use parts when possible.
107
+ */
108
+ content: string;
109
+ /**
110
+ Reasoning for the message.
111
+
112
+ @deprecated Use `parts` instead.
113
+ */
114
+ reasoning?: string;
115
+ /**
116
+ * Additional attachments to be sent along with the message.
117
+ */
118
+ experimental_attachments?: Attachment[];
119
+ /**
120
+ The 'data' role is deprecated.
121
+ */
122
+ role: 'system' | 'user' | 'assistant' | 'data';
123
+ /**
124
+ For data messages.
125
+
126
+ @deprecated Data messages will be removed.
127
+ */
128
+ data?: JSONValue;
129
+ /**
130
+ * Additional message-specific information added on the server via StreamData
131
+ */
132
+ annotations?: JSONValue[] | undefined;
133
+ /**
134
+ Tool invocations (that can be tool calls or tool results, depending on whether or not the invocation has finished)
135
+ that the assistant made as part of this message.
136
+
137
+ @deprecated Use `parts` instead.
138
+ */
139
+ toolInvocations?: Array<ToolInvocation>;
140
+ /**
141
+ * The parts of the message. Use this for rendering the message in the UI.
142
+ *
143
+ * Assistant messages can have text, reasoning and tool invocation parts.
144
+ * User messages can have text parts.
145
+ */
146
+ parts?: Array<TextUIPart | ReasoningUIPart | ToolInvocationUIPart | SourceUIPart | FileUIPart | StepStartUIPart>;
147
+ }
148
+ /**
149
+ * A text part of a message.
150
+ */
151
+ type TextUIPart = {
152
+ type: 'text';
153
+ /**
154
+ * The text content.
155
+ */
156
+ text: string;
157
+ };
158
+ /**
159
+ * A reasoning part of a message.
160
+ */
161
+ type ReasoningUIPart = {
162
+ type: 'reasoning';
163
+ /**
164
+ * The reasoning text.
165
+ */
166
+ reasoning: string;
167
+ details: Array<{
168
+ type: 'text';
169
+ text: string;
170
+ signature?: string;
171
+ } | {
172
+ type: 'redacted';
173
+ data: string;
174
+ }>;
175
+ };
176
+ /**
177
+ * A tool invocation part of a message.
178
+ */
179
+ type ToolInvocationUIPart = {
180
+ type: 'tool-invocation';
181
+ /**
182
+ * The tool invocation.
183
+ */
184
+ toolInvocation: ToolInvocation;
185
+ };
186
+ /**
187
+ * A source part of a message.
188
+ */
189
+ type SourceUIPart = {
190
+ type: 'source';
191
+ /**
192
+ * The source.
193
+ */
194
+ source: LanguageModelV2Source;
195
+ };
196
+ /**
197
+ * A file part of a message.
198
+ */
199
+ type FileUIPart = {
200
+ type: 'file';
201
+ /**
202
+ * IANA media type of the file.
203
+ *
204
+ * @see https://www.iana.org/assignments/media-types/media-types.xhtml
205
+ */
206
+ mediaType: string;
207
+ /**
208
+ * The base64 encoded data.
209
+ */
210
+ data: string;
211
+ };
212
+ /**
213
+ * A step boundary part of a message.
214
+ */
215
+ type StepStartUIPart = {
216
+ type: 'step-start';
217
+ };
218
+ /**
219
+ A JSON value can be a string, number, boolean, object, array, or null.
220
+ JSON values can be serialized and deserialized by the JSON.stringify and JSON.parse methods.
221
+ */
222
+ type JSONValue = null | string | number | boolean | {
223
+ [value: string]: JSONValue;
224
+ } | Array<JSONValue>;
225
+
226
+ /**
227
+ * Used to mark schemas so we can support both Zod and custom schemas.
228
+ */
229
+ declare const schemaSymbol: unique symbol;
230
+ type Schema<OBJECT = unknown> = Validator<OBJECT> & {
231
+ /**
232
+ * Used to mark schemas so we can support both Zod and custom schemas.
233
+ */
234
+ [schemaSymbol]: true;
235
+ /**
236
+ * Schema type for inference.
237
+ */
238
+ _type: OBJECT;
239
+ /**
240
+ * The JSON Schema for the schema. It is passed to the providers.
241
+ */
242
+ readonly jsonSchema: JSONSchema7;
243
+ };
244
+
245
+ type ToolResultContent = Array<{
246
+ type: 'text';
247
+ text: string;
248
+ } | {
249
+ type: 'image';
250
+ data: string;
251
+ mediaType?: string;
252
+ /**
253
+ * @deprecated Use `mediaType` instead.
254
+ */
255
+ mimeType?: string;
256
+ }>;
257
+
68
258
  /**
69
259
  Data content. Can either be a base64-encoded string, a Uint8Array, an ArrayBuffer, or a Buffer.
70
260
  */
@@ -1,19 +1,7 @@
1
- import { Schema, Message } from '@ai-sdk/ui-utils';
2
1
  import { z } from 'zod';
3
- import { LanguageModelV2ProviderMetadata, LanguageModelV2FunctionTool, LanguageModelV2ProviderDefinedTool, LanguageModelV2ToolChoice, LanguageModelV2Prompt } from '@ai-sdk/provider';
4
-
5
- type ToolResultContent = Array<{
6
- type: 'text';
7
- text: string;
8
- } | {
9
- type: 'image';
10
- data: string;
11
- mediaType?: string;
12
- /**
13
- * @deprecated Use `mediaType` instead.
14
- */
15
- mimeType?: string;
16
- }>;
2
+ import { ToolCall, ToolResult, Validator } from '@ai-sdk/provider-utils';
3
+ import { JSONSchema7 } from 'json-schema';
4
+ import { LanguageModelV2ProviderMetadata, LanguageModelV2Source, LanguageModelV2FunctionTool, LanguageModelV2ProviderDefinedTool, LanguageModelV2ToolChoice, LanguageModelV2Prompt } from '@ai-sdk/provider';
17
5
 
18
6
  /**
19
7
  Tool choice for the generation. It supports the following settings:
@@ -65,6 +53,208 @@ declare function calculateLanguageModelUsage({ promptTokens, completionTokens, }
65
53
  completionTokens: number;
66
54
  }): LanguageModelUsage;
67
55
 
56
+ /**
57
+ Tool invocations are either tool calls or tool results. For each assistant tool call,
58
+ there is one tool invocation. While the call is in progress, the invocation is a tool call.
59
+ Once the call is complete, the invocation is a tool result.
60
+
61
+ The step is used to track how to map an assistant UI message with many tool invocations
62
+ back to a sequence of LLM assistant/tool result message pairs.
63
+ It is optional for backwards compatibility.
64
+ */
65
+ type ToolInvocation = ({
66
+ state: 'partial-call';
67
+ step?: number;
68
+ } & ToolCall<string, any>) | ({
69
+ state: 'call';
70
+ step?: number;
71
+ } & ToolCall<string, any>) | ({
72
+ state: 'result';
73
+ step?: number;
74
+ } & ToolResult<string, any, any>);
75
+ /**
76
+ * An attachment that can be sent along with a message.
77
+ */
78
+ interface Attachment {
79
+ /**
80
+ * The name of the attachment, usually the file name.
81
+ */
82
+ name?: string;
83
+ /**
84
+ * A string indicating the [media type](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Type).
85
+ * By default, it's extracted from the pathname's extension.
86
+ */
87
+ contentType?: string;
88
+ /**
89
+ * The URL of the attachment. It can either be a URL to a hosted file or a [Data URL](https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/Data_URLs).
90
+ */
91
+ url: string;
92
+ }
93
+ /**
94
+ * AI SDK UI Messages. They are used in the client and to communicate between the frontend and the API routes.
95
+ */
96
+ interface Message {
97
+ /**
98
+ A unique identifier for the message.
99
+ */
100
+ id: string;
101
+ /**
102
+ The timestamp of the message.
103
+ */
104
+ createdAt?: Date;
105
+ /**
106
+ Text content of the message. Use parts when possible.
107
+ */
108
+ content: string;
109
+ /**
110
+ Reasoning for the message.
111
+
112
+ @deprecated Use `parts` instead.
113
+ */
114
+ reasoning?: string;
115
+ /**
116
+ * Additional attachments to be sent along with the message.
117
+ */
118
+ experimental_attachments?: Attachment[];
119
+ /**
120
+ The 'data' role is deprecated.
121
+ */
122
+ role: 'system' | 'user' | 'assistant' | 'data';
123
+ /**
124
+ For data messages.
125
+
126
+ @deprecated Data messages will be removed.
127
+ */
128
+ data?: JSONValue;
129
+ /**
130
+ * Additional message-specific information added on the server via StreamData
131
+ */
132
+ annotations?: JSONValue[] | undefined;
133
+ /**
134
+ Tool invocations (that can be tool calls or tool results, depending on whether or not the invocation has finished)
135
+ that the assistant made as part of this message.
136
+
137
+ @deprecated Use `parts` instead.
138
+ */
139
+ toolInvocations?: Array<ToolInvocation>;
140
+ /**
141
+ * The parts of the message. Use this for rendering the message in the UI.
142
+ *
143
+ * Assistant messages can have text, reasoning and tool invocation parts.
144
+ * User messages can have text parts.
145
+ */
146
+ parts?: Array<TextUIPart | ReasoningUIPart | ToolInvocationUIPart | SourceUIPart | FileUIPart | StepStartUIPart>;
147
+ }
148
+ /**
149
+ * A text part of a message.
150
+ */
151
+ type TextUIPart = {
152
+ type: 'text';
153
+ /**
154
+ * The text content.
155
+ */
156
+ text: string;
157
+ };
158
+ /**
159
+ * A reasoning part of a message.
160
+ */
161
+ type ReasoningUIPart = {
162
+ type: 'reasoning';
163
+ /**
164
+ * The reasoning text.
165
+ */
166
+ reasoning: string;
167
+ details: Array<{
168
+ type: 'text';
169
+ text: string;
170
+ signature?: string;
171
+ } | {
172
+ type: 'redacted';
173
+ data: string;
174
+ }>;
175
+ };
176
+ /**
177
+ * A tool invocation part of a message.
178
+ */
179
+ type ToolInvocationUIPart = {
180
+ type: 'tool-invocation';
181
+ /**
182
+ * The tool invocation.
183
+ */
184
+ toolInvocation: ToolInvocation;
185
+ };
186
+ /**
187
+ * A source part of a message.
188
+ */
189
+ type SourceUIPart = {
190
+ type: 'source';
191
+ /**
192
+ * The source.
193
+ */
194
+ source: LanguageModelV2Source;
195
+ };
196
+ /**
197
+ * A file part of a message.
198
+ */
199
+ type FileUIPart = {
200
+ type: 'file';
201
+ /**
202
+ * IANA media type of the file.
203
+ *
204
+ * @see https://www.iana.org/assignments/media-types/media-types.xhtml
205
+ */
206
+ mediaType: string;
207
+ /**
208
+ * The base64 encoded data.
209
+ */
210
+ data: string;
211
+ };
212
+ /**
213
+ * A step boundary part of a message.
214
+ */
215
+ type StepStartUIPart = {
216
+ type: 'step-start';
217
+ };
218
+ /**
219
+ A JSON value can be a string, number, boolean, object, array, or null.
220
+ JSON values can be serialized and deserialized by the JSON.stringify and JSON.parse methods.
221
+ */
222
+ type JSONValue = null | string | number | boolean | {
223
+ [value: string]: JSONValue;
224
+ } | Array<JSONValue>;
225
+
226
+ /**
227
+ * Used to mark schemas so we can support both Zod and custom schemas.
228
+ */
229
+ declare const schemaSymbol: unique symbol;
230
+ type Schema<OBJECT = unknown> = Validator<OBJECT> & {
231
+ /**
232
+ * Used to mark schemas so we can support both Zod and custom schemas.
233
+ */
234
+ [schemaSymbol]: true;
235
+ /**
236
+ * Schema type for inference.
237
+ */
238
+ _type: OBJECT;
239
+ /**
240
+ * The JSON Schema for the schema. It is passed to the providers.
241
+ */
242
+ readonly jsonSchema: JSONSchema7;
243
+ };
244
+
245
+ type ToolResultContent = Array<{
246
+ type: 'text';
247
+ text: string;
248
+ } | {
249
+ type: 'image';
250
+ data: string;
251
+ mediaType?: string;
252
+ /**
253
+ * @deprecated Use `mediaType` instead.
254
+ */
255
+ mimeType?: string;
256
+ }>;
257
+
68
258
  /**
69
259
  Data content. Can either be a base64-encoded string, a Uint8Array, an ArrayBuffer, or a Buffer.
70
260
  */
@@ -1,7 +1,9 @@
1
1
  "use strict";
2
+ var __create = Object.create;
2
3
  var __defProp = Object.defineProperty;
3
4
  var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
4
5
  var __getOwnPropNames = Object.getOwnPropertyNames;
6
+ var __getProtoOf = Object.getPrototypeOf;
5
7
  var __hasOwnProp = Object.prototype.hasOwnProperty;
6
8
  var __export = (target, all) => {
7
9
  for (var name7 in all)
@@ -15,6 +17,14 @@ var __copyProps = (to, from, except, desc) => {
15
17
  }
16
18
  return to;
17
19
  };
20
+ var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__getProtoOf(mod)) : {}, __copyProps(
21
+ // If the importer is in node compatibility mode or this is not an ESM
22
+ // file that has been converted to a CommonJS file using a Babel-
23
+ // compatible transform (i.e. "__esModule" has not been set), then set
24
+ // "default" to the CommonJS "module.exports" for node compatibility.
25
+ isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", { value: mod, enumerable: true }) : target,
26
+ mod
27
+ ));
18
28
  var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
19
29
 
20
30
  // internal/index.ts
@@ -708,8 +718,52 @@ function standardizePrompt({
708
718
  throw new Error("unreachable");
709
719
  }
710
720
 
711
- // core/prompt/prepare-tools-and-tool-choice.ts
712
- var import_ui_utils = require("@ai-sdk/ui-utils");
721
+ // core/util/index.ts
722
+ var import_provider_utils4 = require("@ai-sdk/provider-utils");
723
+
724
+ // core/util/schema.ts
725
+ var import_provider_utils3 = require("@ai-sdk/provider-utils");
726
+
727
+ // core/util/zod-schema.ts
728
+ var import_zod_to_json_schema = __toESM(require("zod-to-json-schema"));
729
+ function zodSchema(zodSchema2, options) {
730
+ var _a7;
731
+ const useReferences = (_a7 = options == null ? void 0 : options.useReferences) != null ? _a7 : false;
732
+ return jsonSchema(
733
+ (0, import_zod_to_json_schema.default)(zodSchema2, {
734
+ $refStrategy: useReferences ? "root" : "none",
735
+ target: "jsonSchema7"
736
+ // note: openai mode breaks various gemini conversions
737
+ }),
738
+ {
739
+ validate: (value) => {
740
+ const result = zodSchema2.safeParse(value);
741
+ return result.success ? { success: true, value: result.data } : { success: false, error: result.error };
742
+ }
743
+ }
744
+ );
745
+ }
746
+
747
+ // core/util/schema.ts
748
+ var schemaSymbol = Symbol.for("vercel.ai.schema");
749
+ function jsonSchema(jsonSchema2, {
750
+ validate
751
+ } = {}) {
752
+ return {
753
+ [schemaSymbol]: true,
754
+ _type: void 0,
755
+ // should never be used directly
756
+ [import_provider_utils3.validatorSymbol]: true,
757
+ jsonSchema: jsonSchema2,
758
+ validate
759
+ };
760
+ }
761
+ function isSchema(value) {
762
+ return typeof value === "object" && value !== null && schemaSymbol in value && value[schemaSymbol] === true && "jsonSchema" in value && "validate" in value;
763
+ }
764
+ function asSchema(schema) {
765
+ return isSchema(schema) ? schema : zodSchema(schema);
766
+ }
713
767
 
714
768
  // core/util/is-non-empty-object.ts
715
769
  function isNonEmptyObject(object) {
@@ -741,7 +795,7 @@ function prepareToolsAndToolChoice({
741
795
  type: "function",
742
796
  name: name7,
743
797
  description: tool.description,
744
- parameters: (0, import_ui_utils.asSchema)(tool.parameters).jsonSchema
798
+ parameters: asSchema(tool.parameters).jsonSchema
745
799
  };
746
800
  case "provider-defined":
747
801
  return {
@@ -788,7 +842,7 @@ _a3 = symbol3;
788
842
 
789
843
  // util/retry-with-exponential-backoff.ts
790
844
  var import_provider6 = require("@ai-sdk/provider");
791
- var import_provider_utils3 = require("@ai-sdk/provider-utils");
845
+ var import_provider_utils5 = require("@ai-sdk/provider-utils");
792
846
 
793
847
  // util/retry-error.ts
794
848
  var import_provider5 = require("@ai-sdk/provider");
@@ -832,13 +886,13 @@ async function _retryWithExponentialBackoff(f, {
832
886
  try {
833
887
  return await f();
834
888
  } catch (error) {
835
- if ((0, import_provider_utils3.isAbortError)(error)) {
889
+ if ((0, import_provider_utils5.isAbortError)(error)) {
836
890
  throw error;
837
891
  }
838
892
  if (maxRetries === 0) {
839
893
  throw error;
840
894
  }
841
- const errorMessage = (0, import_provider_utils3.getErrorMessage)(error);
895
+ const errorMessage = (0, import_provider_utils5.getErrorMessage)(error);
842
896
  const newErrors = [...errors, error];
843
897
  const tryNumber = newErrors.length;
844
898
  if (tryNumber > maxRetries) {
@@ -849,7 +903,7 @@ async function _retryWithExponentialBackoff(f, {
849
903
  });
850
904
  }
851
905
  if (error instanceof Error && import_provider6.APICallError.isInstance(error) && error.isRetryable === true && tryNumber <= maxRetries) {
852
- await (0, import_provider_utils3.delay)(delayInMs);
906
+ await (0, import_provider_utils5.delay)(delayInMs);
853
907
  return _retryWithExponentialBackoff(
854
908
  f,
855
909
  { maxRetries, delayInMs: backoffFactor * delayInMs, backoffFactor },
@@ -989,7 +1043,7 @@ function prepareCallSettings({
989
1043
  }
990
1044
 
991
1045
  // core/prompt/convert-to-language-model-prompt.ts
992
- var import_provider_utils4 = require("@ai-sdk/provider-utils");
1046
+ var import_provider_utils6 = require("@ai-sdk/provider-utils");
993
1047
 
994
1048
  // util/download-error.ts
995
1049
  var import_provider7 = require("@ai-sdk/provider");
@@ -1383,7 +1437,7 @@ function convertPartToLanguageModelPart(part, downloadedAssets) {
1383
1437
  mediaType: mediaType != null ? mediaType : "image/*",
1384
1438
  // any image
1385
1439
  filename: void 0,
1386
- data: normalizedData instanceof Uint8Array ? (0, import_provider_utils4.convertUint8ArrayToBase64)(normalizedData) : normalizedData,
1440
+ data: normalizedData instanceof Uint8Array ? (0, import_provider_utils6.convertUint8ArrayToBase64)(normalizedData) : normalizedData,
1387
1441
  providerOptions: (_d = part.providerOptions) != null ? _d : part.experimental_providerMetadata
1388
1442
  };
1389
1443
  }