ai 4.0.13 → 4.0.14

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,16 @@
1
1
  # ai
2
2
 
3
+ ## 4.0.14
4
+
5
+ ### Patch Changes
6
+
7
+ - 09a9cab: feat (ai/core): add experimental generateImage function
8
+ - Updated dependencies [09a9cab]
9
+ - @ai-sdk/provider@1.0.2
10
+ - @ai-sdk/provider-utils@2.0.4
11
+ - @ai-sdk/ui-utils@1.0.5
12
+ - @ai-sdk/react@1.0.6
13
+
3
14
  ## 4.0.13
4
15
 
5
16
  ### Patch Changes
package/dist/index.d.mts CHANGED
@@ -1,7 +1,7 @@
1
1
  import { DataStreamString, ToolInvocation, Attachment, Schema, DeepPartial, JSONValue as JSONValue$1, AssistantMessage, DataMessage } from '@ai-sdk/ui-utils';
2
2
  export { AssistantMessage, AssistantStatus, Attachment, ChatRequest, ChatRequestOptions, CreateMessage, DataMessage, DataStreamPart, DeepPartial, IdGenerator, JSONValue, Message, RequestOptions, Schema, ToolInvocation, UseAssistantOptions, formatAssistantStreamPart, formatDataStreamPart, jsonSchema, parseAssistantStreamPart, parseDataStreamPart, processDataStream, processTextStream } from '@ai-sdk/ui-utils';
3
3
  export { ToolCall as CoreToolCall, ToolResult as CoreToolResult, generateId } from '@ai-sdk/provider-utils';
4
- import { JSONValue, EmbeddingModelV1, EmbeddingModelV1Embedding, LanguageModelV1, LanguageModelV1FinishReason, LanguageModelV1LogProbs, LanguageModelV1CallWarning, LanguageModelV1ProviderMetadata, LanguageModelV1CallOptions, AISDKError, LanguageModelV1FunctionToolCall, JSONSchema7, NoSuchModelError } from '@ai-sdk/provider';
4
+ import { JSONValue, EmbeddingModelV1, EmbeddingModelV1Embedding, LanguageModelV1, LanguageModelV1FinishReason, LanguageModelV1LogProbs, LanguageModelV1CallWarning, LanguageModelV1ProviderMetadata, ImageModelV1, LanguageModelV1CallOptions, AISDKError, LanguageModelV1FunctionToolCall, JSONSchema7, NoSuchModelError } from '@ai-sdk/provider';
5
5
  export { AISDKError, APICallError, EmptyResponseBodyError, InvalidPromptError, InvalidResponseDataError, JSONParseError, LanguageModelV1, LanguageModelV1CallOptions, LanguageModelV1Prompt, LanguageModelV1StreamPart, LoadAPIKeyError, NoContentGeneratedError, NoSuchModelError, TypeValidationError, UnsupportedFunctionalityError } from '@ai-sdk/provider';
6
6
  import { ServerResponse } from 'node:http';
7
7
  import { AttributeValue, Tracer } from '@opentelemetry/api';
@@ -352,6 +352,95 @@ declare function embedMany<VALUE>({ model, values, maxRetries: maxRetriesArg, ab
352
352
  experimental_telemetry?: TelemetrySettings;
353
353
  }): Promise<EmbedManyResult<VALUE>>;
354
354
 
355
+ /**
356
+ The result of a `generateImage` call.
357
+ It contains the images and additional information.
358
+ */
359
+ interface GenerateImageResult {
360
+ /**
361
+ The first image that was generated.
362
+ */
363
+ readonly image: GeneratedImage;
364
+ /**
365
+ The images that were generated.
366
+ */
367
+ readonly images: Array<GeneratedImage>;
368
+ }
369
+ interface GeneratedImage {
370
+ /**
371
+ Image as a base64 encoded string.
372
+ */
373
+ readonly base64: string;
374
+ /**
375
+ Image as a Uint8Array.
376
+ */
377
+ readonly uint8Array: Uint8Array;
378
+ }
379
+
380
+ /**
381
+ Generates images using an image model.
382
+
383
+ @param model - The image model to use.
384
+ @param prompt - The prompt that should be used to generate the image.
385
+ @param n - Number of images to generate. Default: 1.
386
+ @param size - Size of the images to generate. Must have the format `{width}x{height}`.
387
+ @param providerOptions - Additional provider-specific options that are passed through to the provider
388
+ as body parameters.
389
+ @param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
390
+ @param abortSignal - An optional abort signal that can be used to cancel the call.
391
+ @param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
392
+
393
+ @returns A result object that contains the generated images.
394
+ */
395
+ declare function generateImage({ model, prompt, n, size, providerOptions, maxRetries: maxRetriesArg, abortSignal, headers, }: {
396
+ /**
397
+ The image model to use.
398
+ */
399
+ model: ImageModelV1;
400
+ /**
401
+ The prompt that should be used to generate the image.
402
+ */
403
+ prompt: string;
404
+ /**
405
+ Number of images to generate.
406
+ */
407
+ n?: number;
408
+ /**
409
+ Size of the images to generate. Must have the format `{width}x{height}`.
410
+ */
411
+ size?: `${number}x${number}`;
412
+ /**
413
+ Additional provider-specific options that are passed through to the provider
414
+ as body parameters.
415
+
416
+ The outer record is keyed by the provider name, and the inner
417
+ record is keyed by the provider-specific metadata key.
418
+ ```ts
419
+ {
420
+ "openai": {
421
+ "style": "vivid"
422
+ }
423
+ }
424
+ ```
425
+ */
426
+ providerOptions?: Record<string, Record<string, JSONValue>>;
427
+ /**
428
+ Maximum number of retries per embedding model call. Set to 0 to disable retries.
429
+
430
+ @default 2
431
+ */
432
+ maxRetries?: number;
433
+ /**
434
+ Abort signal.
435
+ */
436
+ abortSignal?: AbortSignal;
437
+ /**
438
+ Additional headers to include in the request.
439
+ Only applicable for HTTP-based providers.
440
+ */
441
+ headers?: Record<string, string>;
442
+ }): Promise<GenerateImageResult>;
443
+
355
444
  type CallSettings = {
356
445
  /**
357
446
  Maximum number of tokens to generate.
@@ -2396,4 +2485,4 @@ declare namespace llamaindexAdapter {
2396
2485
  };
2397
2486
  }
2398
2487
 
2399
- export { AssistantContent, AssistantResponse, CallWarning, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreTool, ToolCallUnion as CoreToolCallUnion, CoreToolChoice, CoreToolMessage, ToolResultUnion as CoreToolResultUnion, CoreUserMessage, DataContent, DataStreamWriter, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, Experimental_LanguageModelV1Middleware, FilePart, FinishReason, GenerateObjectResult, GenerateTextResult, ImagePart, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidToolArgumentsError, langchainAdapter as LangChainAdapter, LanguageModel, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage, llamaindexAdapter as LlamaIndexAdapter, LogProbs, MessageConversionError, NoObjectGeneratedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, output as Output, Provider, ProviderMetadata, RetryError, StepResult, StreamData, StreamObjectResult, StreamTextResult, TextPart, TextStreamPart, ToolCallPart, ToolCallRepairError, ToolCallRepairFunction, ToolContent, ToolExecutionError, ToolExecutionOptions, ToolResultPart, UserContent, convertToCoreMessages, cosineSimilarity, createDataStream, createDataStreamResponse, embed, embedMany, experimental_createProviderRegistry, experimental_customProvider, experimental_wrapLanguageModel, generateObject, generateText, pipeDataStreamToResponse, streamObject, streamText, tool };
2488
+ export { AssistantContent, AssistantResponse, CallWarning, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreTool, ToolCallUnion as CoreToolCallUnion, CoreToolChoice, CoreToolMessage, ToolResultUnion as CoreToolResultUnion, CoreUserMessage, DataContent, DataStreamWriter, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, GenerateImageResult as Experimental_GenerateImageResult, GeneratedImage as Experimental_GeneratedImage, Experimental_LanguageModelV1Middleware, FilePart, FinishReason, GenerateObjectResult, GenerateTextResult, ImagePart, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidToolArgumentsError, langchainAdapter as LangChainAdapter, LanguageModel, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage, llamaindexAdapter as LlamaIndexAdapter, LogProbs, MessageConversionError, NoObjectGeneratedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, output as Output, Provider, ProviderMetadata, RetryError, StepResult, StreamData, StreamObjectResult, StreamTextResult, TextPart, TextStreamPart, ToolCallPart, ToolCallRepairError, ToolCallRepairFunction, ToolContent, ToolExecutionError, ToolExecutionOptions, ToolResultPart, UserContent, convertToCoreMessages, cosineSimilarity, createDataStream, createDataStreamResponse, embed, embedMany, experimental_createProviderRegistry, experimental_customProvider, generateImage as experimental_generateImage, experimental_wrapLanguageModel, generateObject, generateText, pipeDataStreamToResponse, streamObject, streamText, tool };
package/dist/index.d.ts CHANGED
@@ -1,7 +1,7 @@
1
1
  import { DataStreamString, ToolInvocation, Attachment, Schema, DeepPartial, JSONValue as JSONValue$1, AssistantMessage, DataMessage } from '@ai-sdk/ui-utils';
2
2
  export { AssistantMessage, AssistantStatus, Attachment, ChatRequest, ChatRequestOptions, CreateMessage, DataMessage, DataStreamPart, DeepPartial, IdGenerator, JSONValue, Message, RequestOptions, Schema, ToolInvocation, UseAssistantOptions, formatAssistantStreamPart, formatDataStreamPart, jsonSchema, parseAssistantStreamPart, parseDataStreamPart, processDataStream, processTextStream } from '@ai-sdk/ui-utils';
3
3
  export { ToolCall as CoreToolCall, ToolResult as CoreToolResult, generateId } from '@ai-sdk/provider-utils';
4
- import { JSONValue, EmbeddingModelV1, EmbeddingModelV1Embedding, LanguageModelV1, LanguageModelV1FinishReason, LanguageModelV1LogProbs, LanguageModelV1CallWarning, LanguageModelV1ProviderMetadata, LanguageModelV1CallOptions, AISDKError, LanguageModelV1FunctionToolCall, JSONSchema7, NoSuchModelError } from '@ai-sdk/provider';
4
+ import { JSONValue, EmbeddingModelV1, EmbeddingModelV1Embedding, LanguageModelV1, LanguageModelV1FinishReason, LanguageModelV1LogProbs, LanguageModelV1CallWarning, LanguageModelV1ProviderMetadata, ImageModelV1, LanguageModelV1CallOptions, AISDKError, LanguageModelV1FunctionToolCall, JSONSchema7, NoSuchModelError } from '@ai-sdk/provider';
5
5
  export { AISDKError, APICallError, EmptyResponseBodyError, InvalidPromptError, InvalidResponseDataError, JSONParseError, LanguageModelV1, LanguageModelV1CallOptions, LanguageModelV1Prompt, LanguageModelV1StreamPart, LoadAPIKeyError, NoContentGeneratedError, NoSuchModelError, TypeValidationError, UnsupportedFunctionalityError } from '@ai-sdk/provider';
6
6
  import { ServerResponse } from 'node:http';
7
7
  import { AttributeValue, Tracer } from '@opentelemetry/api';
@@ -352,6 +352,95 @@ declare function embedMany<VALUE>({ model, values, maxRetries: maxRetriesArg, ab
352
352
  experimental_telemetry?: TelemetrySettings;
353
353
  }): Promise<EmbedManyResult<VALUE>>;
354
354
 
355
+ /**
356
+ The result of a `generateImage` call.
357
+ It contains the images and additional information.
358
+ */
359
+ interface GenerateImageResult {
360
+ /**
361
+ The first image that was generated.
362
+ */
363
+ readonly image: GeneratedImage;
364
+ /**
365
+ The images that were generated.
366
+ */
367
+ readonly images: Array<GeneratedImage>;
368
+ }
369
+ interface GeneratedImage {
370
+ /**
371
+ Image as a base64 encoded string.
372
+ */
373
+ readonly base64: string;
374
+ /**
375
+ Image as a Uint8Array.
376
+ */
377
+ readonly uint8Array: Uint8Array;
378
+ }
379
+
380
+ /**
381
+ Generates images using an image model.
382
+
383
+ @param model - The image model to use.
384
+ @param prompt - The prompt that should be used to generate the image.
385
+ @param n - Number of images to generate. Default: 1.
386
+ @param size - Size of the images to generate. Must have the format `{width}x{height}`.
387
+ @param providerOptions - Additional provider-specific options that are passed through to the provider
388
+ as body parameters.
389
+ @param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
390
+ @param abortSignal - An optional abort signal that can be used to cancel the call.
391
+ @param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
392
+
393
+ @returns A result object that contains the generated images.
394
+ */
395
+ declare function generateImage({ model, prompt, n, size, providerOptions, maxRetries: maxRetriesArg, abortSignal, headers, }: {
396
+ /**
397
+ The image model to use.
398
+ */
399
+ model: ImageModelV1;
400
+ /**
401
+ The prompt that should be used to generate the image.
402
+ */
403
+ prompt: string;
404
+ /**
405
+ Number of images to generate.
406
+ */
407
+ n?: number;
408
+ /**
409
+ Size of the images to generate. Must have the format `{width}x{height}`.
410
+ */
411
+ size?: `${number}x${number}`;
412
+ /**
413
+ Additional provider-specific options that are passed through to the provider
414
+ as body parameters.
415
+
416
+ The outer record is keyed by the provider name, and the inner
417
+ record is keyed by the provider-specific metadata key.
418
+ ```ts
419
+ {
420
+ "openai": {
421
+ "style": "vivid"
422
+ }
423
+ }
424
+ ```
425
+ */
426
+ providerOptions?: Record<string, Record<string, JSONValue>>;
427
+ /**
428
+ Maximum number of retries per embedding model call. Set to 0 to disable retries.
429
+
430
+ @default 2
431
+ */
432
+ maxRetries?: number;
433
+ /**
434
+ Abort signal.
435
+ */
436
+ abortSignal?: AbortSignal;
437
+ /**
438
+ Additional headers to include in the request.
439
+ Only applicable for HTTP-based providers.
440
+ */
441
+ headers?: Record<string, string>;
442
+ }): Promise<GenerateImageResult>;
443
+
355
444
  type CallSettings = {
356
445
  /**
357
446
  Maximum number of tokens to generate.
@@ -2396,4 +2485,4 @@ declare namespace llamaindexAdapter {
2396
2485
  };
2397
2486
  }
2398
2487
 
2399
- export { AssistantContent, AssistantResponse, CallWarning, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreTool, ToolCallUnion as CoreToolCallUnion, CoreToolChoice, CoreToolMessage, ToolResultUnion as CoreToolResultUnion, CoreUserMessage, DataContent, DataStreamWriter, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, Experimental_LanguageModelV1Middleware, FilePart, FinishReason, GenerateObjectResult, GenerateTextResult, ImagePart, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidToolArgumentsError, langchainAdapter as LangChainAdapter, LanguageModel, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage, llamaindexAdapter as LlamaIndexAdapter, LogProbs, MessageConversionError, NoObjectGeneratedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, output as Output, Provider, ProviderMetadata, RetryError, StepResult, StreamData, StreamObjectResult, StreamTextResult, TextPart, TextStreamPart, ToolCallPart, ToolCallRepairError, ToolCallRepairFunction, ToolContent, ToolExecutionError, ToolExecutionOptions, ToolResultPart, UserContent, convertToCoreMessages, cosineSimilarity, createDataStream, createDataStreamResponse, embed, embedMany, experimental_createProviderRegistry, experimental_customProvider, experimental_wrapLanguageModel, generateObject, generateText, pipeDataStreamToResponse, streamObject, streamText, tool };
2488
+ export { AssistantContent, AssistantResponse, CallWarning, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreTool, ToolCallUnion as CoreToolCallUnion, CoreToolChoice, CoreToolMessage, ToolResultUnion as CoreToolResultUnion, CoreUserMessage, DataContent, DataStreamWriter, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, GenerateImageResult as Experimental_GenerateImageResult, GeneratedImage as Experimental_GeneratedImage, Experimental_LanguageModelV1Middleware, FilePart, FinishReason, GenerateObjectResult, GenerateTextResult, ImagePart, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidToolArgumentsError, langchainAdapter as LangChainAdapter, LanguageModel, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage, llamaindexAdapter as LlamaIndexAdapter, LogProbs, MessageConversionError, NoObjectGeneratedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, output as Output, Provider, ProviderMetadata, RetryError, StepResult, StreamData, StreamObjectResult, StreamTextResult, TextPart, TextStreamPart, ToolCallPart, ToolCallRepairError, ToolCallRepairFunction, ToolContent, ToolExecutionError, ToolExecutionOptions, ToolResultPart, UserContent, convertToCoreMessages, cosineSimilarity, createDataStream, createDataStreamResponse, embed, embedMany, experimental_createProviderRegistry, experimental_customProvider, generateImage as experimental_generateImage, experimental_wrapLanguageModel, generateObject, generateText, pipeDataStreamToResponse, streamObject, streamText, tool };
package/dist/index.js CHANGED
@@ -56,10 +56,11 @@ __export(streams_exports, {
56
56
  embedMany: () => embedMany,
57
57
  experimental_createProviderRegistry: () => experimental_createProviderRegistry,
58
58
  experimental_customProvider: () => experimental_customProvider,
59
+ experimental_generateImage: () => generateImage,
59
60
  experimental_wrapLanguageModel: () => experimental_wrapLanguageModel,
60
61
  formatAssistantStreamPart: () => import_ui_utils14.formatAssistantStreamPart,
61
62
  formatDataStreamPart: () => import_ui_utils14.formatDataStreamPart,
62
- generateId: () => import_provider_utils12.generateId,
63
+ generateId: () => import_provider_utils13.generateId,
63
64
  generateObject: () => generateObject,
64
65
  generateText: () => generateText,
65
66
  jsonSchema: () => import_ui_utils9.jsonSchema,
@@ -74,7 +75,7 @@ __export(streams_exports, {
74
75
  });
75
76
  module.exports = __toCommonJS(streams_exports);
76
77
  var import_ui_utils14 = require("@ai-sdk/ui-utils");
77
- var import_provider_utils12 = require("@ai-sdk/provider-utils");
78
+ var import_provider_utils13 = require("@ai-sdk/provider-utils");
78
79
 
79
80
  // core/index.ts
80
81
  var import_ui_utils9 = require("@ai-sdk/ui-utils");
@@ -856,8 +857,47 @@ var DefaultEmbedManyResult = class {
856
857
  }
857
858
  };
858
859
 
860
+ // core/generate-image/generate-image.ts
861
+ var import_provider_utils2 = require("@ai-sdk/provider-utils");
862
+ async function generateImage({
863
+ model,
864
+ prompt,
865
+ n,
866
+ size,
867
+ providerOptions,
868
+ maxRetries: maxRetriesArg,
869
+ abortSignal,
870
+ headers
871
+ }) {
872
+ const { retry } = prepareRetries({ maxRetries: maxRetriesArg });
873
+ const { images } = await retry(
874
+ () => model.doGenerate({
875
+ prompt,
876
+ n: n != null ? n : 1,
877
+ abortSignal,
878
+ headers,
879
+ size,
880
+ providerOptions: providerOptions != null ? providerOptions : {}
881
+ })
882
+ );
883
+ return new DefaultGenerateImageResult({ base64Images: images });
884
+ }
885
+ var DefaultGenerateImageResult = class {
886
+ constructor(options) {
887
+ this.images = options.base64Images.map((base64) => ({
888
+ base64,
889
+ get uint8Array() {
890
+ return (0, import_provider_utils2.convertBase64ToUint8Array)(this.base64);
891
+ }
892
+ }));
893
+ }
894
+ get image() {
895
+ return this.images[0];
896
+ }
897
+ };
898
+
859
899
  // core/generate-object/generate-object.ts
860
- var import_provider_utils5 = require("@ai-sdk/provider-utils");
900
+ var import_provider_utils6 = require("@ai-sdk/provider-utils");
861
901
 
862
902
  // util/download-error.ts
863
903
  var import_provider4 = require("@ai-sdk/provider");
@@ -930,7 +970,7 @@ function detectImageMimeType(image) {
930
970
  }
931
971
 
932
972
  // core/prompt/data-content.ts
933
- var import_provider_utils2 = require("@ai-sdk/provider-utils");
973
+ var import_provider_utils3 = require("@ai-sdk/provider-utils");
934
974
 
935
975
  // core/prompt/invalid-data-content-error.ts
936
976
  var import_provider5 = require("@ai-sdk/provider");
@@ -974,9 +1014,9 @@ function convertDataContentToBase64String(content) {
974
1014
  return content;
975
1015
  }
976
1016
  if (content instanceof ArrayBuffer) {
977
- return (0, import_provider_utils2.convertUint8ArrayToBase64)(new Uint8Array(content));
1017
+ return (0, import_provider_utils3.convertUint8ArrayToBase64)(new Uint8Array(content));
978
1018
  }
979
- return (0, import_provider_utils2.convertUint8ArrayToBase64)(content);
1019
+ return (0, import_provider_utils3.convertUint8ArrayToBase64)(content);
980
1020
  }
981
1021
  function convertDataContentToUint8Array(content) {
982
1022
  if (content instanceof Uint8Array) {
@@ -984,7 +1024,7 @@ function convertDataContentToUint8Array(content) {
984
1024
  }
985
1025
  if (typeof content === "string") {
986
1026
  try {
987
- return (0, import_provider_utils2.convertBase64ToUint8Array)(content);
1027
+ return (0, import_provider_utils3.convertBase64ToUint8Array)(content);
988
1028
  } catch (error) {
989
1029
  throw new InvalidDataContentError({
990
1030
  message: "Invalid data content. Content string is not a base64-encoded media.",
@@ -1323,7 +1363,7 @@ function prepareCallSettings({
1323
1363
 
1324
1364
  // core/prompt/standardize-prompt.ts
1325
1365
  var import_provider8 = require("@ai-sdk/provider");
1326
- var import_provider_utils3 = require("@ai-sdk/provider-utils");
1366
+ var import_provider_utils4 = require("@ai-sdk/provider-utils");
1327
1367
  var import_zod7 = require("zod");
1328
1368
 
1329
1369
  // core/prompt/message.ts
@@ -1706,7 +1746,7 @@ function standardizePrompt({
1706
1746
  const messages = promptType === "ui-messages" ? convertToCoreMessages(prompt.messages, {
1707
1747
  tools
1708
1748
  }) : prompt.messages;
1709
- const validationResult = (0, import_provider_utils3.safeValidateTypes)({
1749
+ const validationResult = (0, import_provider_utils4.safeValidateTypes)({
1710
1750
  value: messages,
1711
1751
  schema: import_zod7.z.array(coreMessageSchema)
1712
1752
  });
@@ -1778,7 +1818,7 @@ _a7 = symbol7;
1778
1818
 
1779
1819
  // core/generate-object/output-strategy.ts
1780
1820
  var import_provider10 = require("@ai-sdk/provider");
1781
- var import_provider_utils4 = require("@ai-sdk/provider-utils");
1821
+ var import_provider_utils5 = require("@ai-sdk/provider-utils");
1782
1822
  var import_ui_utils2 = require("@ai-sdk/ui-utils");
1783
1823
 
1784
1824
  // core/util/async-iterable-stream.ts
@@ -1828,7 +1868,7 @@ var objectOutputStrategy = (schema) => ({
1828
1868
  };
1829
1869
  },
1830
1870
  validateFinalResult(value) {
1831
- return (0, import_provider_utils4.safeValidateTypes)({ value, schema });
1871
+ return (0, import_provider_utils5.safeValidateTypes)({ value, schema });
1832
1872
  },
1833
1873
  createElementStream() {
1834
1874
  throw new import_provider10.UnsupportedFunctionalityError({
@@ -1867,7 +1907,7 @@ var arrayOutputStrategy = (schema) => {
1867
1907
  const resultArray = [];
1868
1908
  for (let i = 0; i < inputArray.length; i++) {
1869
1909
  const element = inputArray[i];
1870
- const result = (0, import_provider_utils4.safeValidateTypes)({ value: element, schema });
1910
+ const result = (0, import_provider_utils5.safeValidateTypes)({ value: element, schema });
1871
1911
  if (i === inputArray.length - 1 && !isFinalDelta) {
1872
1912
  continue;
1873
1913
  }
@@ -1908,7 +1948,7 @@ var arrayOutputStrategy = (schema) => {
1908
1948
  }
1909
1949
  const inputArray = value.elements;
1910
1950
  for (const element of inputArray) {
1911
- const result = (0, import_provider_utils4.safeValidateTypes)({ value: element, schema });
1951
+ const result = (0, import_provider_utils5.safeValidateTypes)({ value: element, schema });
1912
1952
  if (!result.success) {
1913
1953
  return result;
1914
1954
  }
@@ -2137,7 +2177,7 @@ function validateObjectGenerationInput({
2137
2177
  }
2138
2178
 
2139
2179
  // core/generate-object/generate-object.ts
2140
- var originalGenerateId = (0, import_provider_utils5.createIdGenerator)({ prefix: "aiobj", size: 24 });
2180
+ var originalGenerateId = (0, import_provider_utils6.createIdGenerator)({ prefix: "aiobj", size: 24 });
2141
2181
  async function generateObject({
2142
2182
  model,
2143
2183
  enum: enumValues,
@@ -2442,7 +2482,7 @@ async function generateObject({
2442
2482
  throw new Error(`Unsupported mode: ${_exhaustiveCheck}`);
2443
2483
  }
2444
2484
  }
2445
- const parseResult = (0, import_provider_utils5.safeParseJSON)({ text: result });
2485
+ const parseResult = (0, import_provider_utils6.safeParseJSON)({ text: result });
2446
2486
  if (!parseResult.success) {
2447
2487
  throw parseResult.error;
2448
2488
  }
@@ -2504,7 +2544,7 @@ var DefaultGenerateObjectResult = class {
2504
2544
  };
2505
2545
 
2506
2546
  // core/generate-object/stream-object.ts
2507
- var import_provider_utils6 = require("@ai-sdk/provider-utils");
2547
+ var import_provider_utils7 = require("@ai-sdk/provider-utils");
2508
2548
  var import_ui_utils3 = require("@ai-sdk/ui-utils");
2509
2549
 
2510
2550
  // util/delayed-promise.ts
@@ -2634,7 +2674,7 @@ function now() {
2634
2674
  }
2635
2675
 
2636
2676
  // core/generate-object/stream-object.ts
2637
- var originalGenerateId2 = (0, import_provider_utils6.createIdGenerator)({ prefix: "aiobj", size: 24 });
2677
+ var originalGenerateId2 = (0, import_provider_utils7.createIdGenerator)({ prefix: "aiobj", size: 24 });
2638
2678
  function streamObject({
2639
2679
  model,
2640
2680
  schema: inputSchema,
@@ -3173,7 +3213,7 @@ var DefaultStreamObjectResult = class {
3173
3213
  };
3174
3214
 
3175
3215
  // core/generate-text/generate-text.ts
3176
- var import_provider_utils8 = require("@ai-sdk/provider-utils");
3216
+ var import_provider_utils9 = require("@ai-sdk/provider-utils");
3177
3217
 
3178
3218
  // errors/index.ts
3179
3219
  var import_provider15 = require("@ai-sdk/provider");
@@ -3339,7 +3379,7 @@ function removeTextAfterLastWhitespace(text2) {
3339
3379
  }
3340
3380
 
3341
3381
  // core/generate-text/parse-tool-call.ts
3342
- var import_provider_utils7 = require("@ai-sdk/provider-utils");
3382
+ var import_provider_utils8 = require("@ai-sdk/provider-utils");
3343
3383
  var import_ui_utils5 = require("@ai-sdk/ui-utils");
3344
3384
  async function parseToolCall({
3345
3385
  toolCall,
@@ -3392,7 +3432,7 @@ async function doParseToolCall({
3392
3432
  });
3393
3433
  }
3394
3434
  const schema = (0, import_ui_utils5.asSchema)(tool2.parameters);
3395
- const parseResult = toolCall.args.trim() === "" ? (0, import_provider_utils7.safeValidateTypes)({ value: {}, schema }) : (0, import_provider_utils7.safeParseJSON)({ text: toolCall.args, schema });
3435
+ const parseResult = toolCall.args.trim() === "" ? (0, import_provider_utils8.safeValidateTypes)({ value: {}, schema }) : (0, import_provider_utils8.safeParseJSON)({ text: toolCall.args, schema });
3396
3436
  if (parseResult.success === false) {
3397
3437
  throw new InvalidToolArgumentsError({
3398
3438
  toolName,
@@ -3446,7 +3486,7 @@ function toResponseMessages({
3446
3486
  }
3447
3487
 
3448
3488
  // core/generate-text/generate-text.ts
3449
- var originalGenerateId3 = (0, import_provider_utils8.createIdGenerator)({ prefix: "aitxt", size: 24 });
3489
+ var originalGenerateId3 = (0, import_provider_utils9.createIdGenerator)({ prefix: "aitxt", size: 24 });
3450
3490
  async function generateText({
3451
3491
  model,
3452
3492
  tools,
@@ -3851,7 +3891,7 @@ __export(output_exports, {
3851
3891
  object: () => object,
3852
3892
  text: () => text
3853
3893
  });
3854
- var import_provider_utils9 = require("@ai-sdk/provider-utils");
3894
+ var import_provider_utils10 = require("@ai-sdk/provider-utils");
3855
3895
  var import_ui_utils6 = require("@ai-sdk/ui-utils");
3856
3896
  var text = () => ({
3857
3897
  type: "text",
@@ -3880,13 +3920,13 @@ var object = ({
3880
3920
  });
3881
3921
  },
3882
3922
  parseOutput({ text: text2 }) {
3883
- return (0, import_provider_utils9.parseJSON)({ text: text2, schema });
3923
+ return (0, import_provider_utils10.parseJSON)({ text: text2, schema });
3884
3924
  }
3885
3925
  };
3886
3926
  };
3887
3927
 
3888
3928
  // core/generate-text/stream-text.ts
3889
- var import_provider_utils10 = require("@ai-sdk/provider-utils");
3929
+ var import_provider_utils11 = require("@ai-sdk/provider-utils");
3890
3930
  var import_ui_utils8 = require("@ai-sdk/ui-utils");
3891
3931
 
3892
3932
  // core/util/merge-streams.ts
@@ -4185,7 +4225,7 @@ function runToolsTransformation({
4185
4225
  }
4186
4226
 
4187
4227
  // core/generate-text/stream-text.ts
4188
- var originalGenerateId4 = (0, import_provider_utils10.createIdGenerator)({ prefix: "aitxt", size: 24 });
4228
+ var originalGenerateId4 = (0, import_provider_utils11.createIdGenerator)({ prefix: "aitxt", size: 24 });
4189
4229
  function streamText({
4190
4230
  model,
4191
4231
  tools,
@@ -5388,11 +5428,11 @@ __export(llamaindex_adapter_exports, {
5388
5428
  toDataStream: () => toDataStream2,
5389
5429
  toDataStreamResponse: () => toDataStreamResponse2
5390
5430
  });
5391
- var import_provider_utils11 = require("@ai-sdk/provider-utils");
5431
+ var import_provider_utils12 = require("@ai-sdk/provider-utils");
5392
5432
  var import_ui_utils12 = require("@ai-sdk/ui-utils");
5393
5433
  function toDataStreamInternal2(stream, callbacks) {
5394
5434
  const trimStart = trimStartOfStream();
5395
- return (0, import_provider_utils11.convertAsyncIteratorToReadableStream)(stream[Symbol.asyncIterator]()).pipeThrough(
5435
+ return (0, import_provider_utils12.convertAsyncIteratorToReadableStream)(stream[Symbol.asyncIterator]()).pipeThrough(
5396
5436
  new TransformStream({
5397
5437
  async transform(message, controller) {
5398
5438
  controller.enqueue(trimStart(message.delta));
@@ -5548,6 +5588,7 @@ var StreamData = class {
5548
5588
  embedMany,
5549
5589
  experimental_createProviderRegistry,
5550
5590
  experimental_customProvider,
5591
+ experimental_generateImage,
5551
5592
  experimental_wrapLanguageModel,
5552
5593
  formatAssistantStreamPart,
5553
5594
  formatDataStreamPart,