ai 4.0.26 → 4.0.28
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +24 -0
- package/dist/index.d.mts +51 -9
- package/dist/index.d.ts +51 -9
- package/dist/index.js +110 -50
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +106 -44
- package/dist/index.mjs.map +1 -1
- package/package.json +5 -5
- package/rsc/dist/rsc-server.mjs +1 -1
- package/rsc/dist/rsc-server.mjs.map +1 -1
- package/test/dist/index.d.mts +11 -6
- package/test/dist/index.d.ts +11 -6
- package/test/dist/index.js +6 -3
- package/test/dist/index.js.map +1 -1
- package/test/dist/index.mjs +6 -3
- package/test/dist/index.mjs.map +1 -1
package/CHANGELOG.md
CHANGED
@@ -1,5 +1,29 @@
|
|
1
1
|
# ai
|
2
2
|
|
3
|
+
## 4.0.28
|
4
|
+
|
5
|
+
### Patch Changes
|
6
|
+
|
7
|
+
- 19a2ce7: feat (ai/core): add aspectRatio and seed options to generateImage
|
8
|
+
- 6337688: feat: change image generation errors to warnings
|
9
|
+
- 8b422ea: feat (ai/core): add caching to generated images
|
10
|
+
- Updated dependencies [19a2ce7]
|
11
|
+
- Updated dependencies [19a2ce7]
|
12
|
+
- Updated dependencies [6337688]
|
13
|
+
- @ai-sdk/provider@1.0.4
|
14
|
+
- @ai-sdk/provider-utils@2.0.6
|
15
|
+
- @ai-sdk/ui-utils@1.0.7
|
16
|
+
- @ai-sdk/react@1.0.8
|
17
|
+
|
18
|
+
## 4.0.27
|
19
|
+
|
20
|
+
### Patch Changes
|
21
|
+
|
22
|
+
- a56734f: feat (ai/core): export simulateReadableStream in ai package
|
23
|
+
- 9589601: feat (ai/core): support null delay in smoothStream
|
24
|
+
- e3cc23a: feat (ai/core): support regexp chunking pattern in smoothStream
|
25
|
+
- e463e73: feat (ai/core): support skipping delays in simulateReadableStream
|
26
|
+
|
3
27
|
## 4.0.26
|
4
28
|
|
5
29
|
### Patch Changes
|
package/dist/index.d.mts
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
import { DataStreamString, ToolInvocation, Attachment, Schema, DeepPartial, JSONValue as JSONValue$1, AssistantMessage, DataMessage } from '@ai-sdk/ui-utils';
|
2
2
|
export { AssistantMessage, AssistantStatus, Attachment, ChatRequest, ChatRequestOptions, CreateMessage, DataMessage, DataStreamPart, DeepPartial, IdGenerator, JSONValue, Message, RequestOptions, Schema, ToolInvocation, UseAssistantOptions, formatAssistantStreamPart, formatDataStreamPart, jsonSchema, parseAssistantStreamPart, parseDataStreamPart, processDataStream, processTextStream } from '@ai-sdk/ui-utils';
|
3
3
|
export { ToolCall as CoreToolCall, ToolResult as CoreToolResult, generateId } from '@ai-sdk/provider-utils';
|
4
|
-
import { JSONValue, EmbeddingModelV1, EmbeddingModelV1Embedding, LanguageModelV1, LanguageModelV1FinishReason, LanguageModelV1LogProbs, LanguageModelV1CallWarning, LanguageModelV1ProviderMetadata,
|
4
|
+
import { JSONValue, EmbeddingModelV1, EmbeddingModelV1Embedding, ImageModelV1, ImageModelV1CallWarning, LanguageModelV1, LanguageModelV1FinishReason, LanguageModelV1LogProbs, LanguageModelV1CallWarning, LanguageModelV1ProviderMetadata, LanguageModelV1CallOptions, AISDKError, LanguageModelV1FunctionToolCall, JSONSchema7, NoSuchModelError } from '@ai-sdk/provider';
|
5
5
|
export { AISDKError, APICallError, EmptyResponseBodyError, InvalidPromptError, InvalidResponseDataError, JSONParseError, LanguageModelV1, LanguageModelV1CallOptions, LanguageModelV1Prompt, LanguageModelV1StreamPart, LoadAPIKeyError, NoContentGeneratedError, NoSuchModelError, TypeValidationError, UnsupportedFunctionalityError } from '@ai-sdk/provider';
|
6
6
|
import { ServerResponse } from 'node:http';
|
7
7
|
import { AttributeValue, Tracer } from '@opentelemetry/api';
|
@@ -89,6 +89,16 @@ Embedding.
|
|
89
89
|
*/
|
90
90
|
type Embedding = EmbeddingModelV1Embedding;
|
91
91
|
|
92
|
+
/**
|
93
|
+
Image model that is used by the AI SDK Core functions.
|
94
|
+
*/
|
95
|
+
type ImageModel = ImageModelV1;
|
96
|
+
/**
|
97
|
+
Warning from the model provider for this call. The call will proceed, but e.g.
|
98
|
+
some settings might not be supported, which can lead to suboptimal results.
|
99
|
+
*/
|
100
|
+
type ImageGenerationWarning = ImageModelV1CallWarning;
|
101
|
+
|
92
102
|
/**
|
93
103
|
Language model that is used by the AI SDK Core functions.
|
94
104
|
*/
|
@@ -365,6 +375,10 @@ interface GenerateImageResult {
|
|
365
375
|
The images that were generated.
|
366
376
|
*/
|
367
377
|
readonly images: Array<GeneratedImage>;
|
378
|
+
/**
|
379
|
+
Warnings for the call, e.g. unsupported settings.
|
380
|
+
*/
|
381
|
+
readonly warnings: Array<ImageGenerationWarning>;
|
368
382
|
}
|
369
383
|
interface GeneratedImage {
|
370
384
|
/**
|
@@ -384,6 +398,8 @@ Generates images using an image model.
|
|
384
398
|
@param prompt - The prompt that should be used to generate the image.
|
385
399
|
@param n - Number of images to generate. Default: 1.
|
386
400
|
@param size - Size of the images to generate. Must have the format `{width}x{height}`.
|
401
|
+
@param aspectRatio - Aspect ratio of the images to generate. Must have the format `{width}:{height}`.
|
402
|
+
@param seed - Seed for the image generation.
|
387
403
|
@param providerOptions - Additional provider-specific options that are passed through to the provider
|
388
404
|
as body parameters.
|
389
405
|
@param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
|
@@ -392,7 +408,7 @@ as body parameters.
|
|
392
408
|
|
393
409
|
@returns A result object that contains the generated images.
|
394
410
|
*/
|
395
|
-
declare function generateImage({ model, prompt, n, size, providerOptions, maxRetries: maxRetriesArg, abortSignal, headers, }: {
|
411
|
+
declare function generateImage({ model, prompt, n, size, aspectRatio, seed, providerOptions, maxRetries: maxRetriesArg, abortSignal, headers, }: {
|
396
412
|
/**
|
397
413
|
The image model to use.
|
398
414
|
*/
|
@@ -406,10 +422,18 @@ declare function generateImage({ model, prompt, n, size, providerOptions, maxRet
|
|
406
422
|
*/
|
407
423
|
n?: number;
|
408
424
|
/**
|
409
|
-
Size of the images to generate. Must have the format `{width}x{height}`.
|
425
|
+
Size of the images to generate. Must have the format `{width}x{height}`. If not provided, the default size will be used.
|
410
426
|
*/
|
411
427
|
size?: `${number}x${number}`;
|
412
428
|
/**
|
429
|
+
Aspect ratio of the images to generate. Must have the format `{width}:{height}`. If not provided, the default aspect ratio will be used.
|
430
|
+
*/
|
431
|
+
aspectRatio?: `${number}:${number}`;
|
432
|
+
/**
|
433
|
+
Seed for the image generation. If not provided, the default seed will be used.
|
434
|
+
*/
|
435
|
+
seed?: number;
|
436
|
+
/**
|
413
437
|
Additional provider-specific options that are passed through to the provider
|
414
438
|
as body parameters.
|
415
439
|
|
@@ -2151,19 +2175,19 @@ Details for all steps.
|
|
2151
2175
|
/**
|
2152
2176
|
* Smooths text streaming output.
|
2153
2177
|
*
|
2154
|
-
* @param delayInMs - The delay in milliseconds between each chunk. Defaults to 10ms.
|
2155
|
-
* @param chunking - Controls how the text is chunked for streaming. Use "word" to stream word by word (default),
|
2178
|
+
* @param delayInMs - The delay in milliseconds between each chunk. Defaults to 10ms. Can be set to `null` to skip the delay.
|
2179
|
+
* @param chunking - Controls how the text is chunked for streaming. Use "word" to stream word by word (default), "line" to stream line by line, or provide a custom RegExp pattern for custom chunking.
|
2156
2180
|
*
|
2157
2181
|
* @returns A transform stream that smooths text streaming output.
|
2158
2182
|
*/
|
2159
2183
|
declare function smoothStream<TOOLS extends Record<string, CoreTool>>({ delayInMs, chunking, _internal: { delay }, }?: {
|
2160
|
-
delayInMs?: number;
|
2161
|
-
chunking?: 'word' | 'line';
|
2184
|
+
delayInMs?: number | null;
|
2185
|
+
chunking?: 'word' | 'line' | RegExp;
|
2162
2186
|
/**
|
2163
2187
|
* Internal. For test use only. May change without notice.
|
2164
2188
|
*/
|
2165
2189
|
_internal?: {
|
2166
|
-
delay?: (delayInMs: number) => Promise<void>;
|
2190
|
+
delay?: (delayInMs: number | null) => Promise<void>;
|
2167
2191
|
};
|
2168
2192
|
}): (options: {
|
2169
2193
|
tools: TOOLS;
|
@@ -2284,6 +2308,24 @@ declare function experimental_createProviderRegistry(providers: Record<string, P
|
|
2284
2308
|
*/
|
2285
2309
|
declare function cosineSimilarity(vector1: number[], vector2: number[]): number;
|
2286
2310
|
|
2311
|
+
/**
|
2312
|
+
* Creates a ReadableStream that emits the provided values with an optional delay between each value.
|
2313
|
+
*
|
2314
|
+
* @param options - The configuration options
|
2315
|
+
* @param options.chunks - Array of values to be emitted by the stream
|
2316
|
+
* @param options.initialDelayInMs - Optional initial delay in milliseconds before emitting the first value (default: 0). Can be set to `null` to skip the initial delay. The difference between `initialDelayInMs: null` and `initialDelayInMs: 0` is that `initialDelayInMs: null` will emit the values without any delay, while `initialDelayInMs: 0` will emit the values with a delay of 0 milliseconds.
|
2317
|
+
* @param options.chunkDelayInMs - Optional delay in milliseconds between emitting each value (default: 0). Can be set to `null` to skip the delay. The difference between `chunkDelayInMs: null` and `chunkDelayInMs: 0` is that `chunkDelayInMs: null` will emit the values without any delay, while `chunkDelayInMs: 0` will emit the values with a delay of 0 milliseconds.
|
2318
|
+
* @returns A ReadableStream that emits the provided values
|
2319
|
+
*/
|
2320
|
+
declare function simulateReadableStream<T>({ chunks, initialDelayInMs, chunkDelayInMs, _internal, }: {
|
2321
|
+
chunks: T[];
|
2322
|
+
initialDelayInMs?: number | null;
|
2323
|
+
chunkDelayInMs?: number | null;
|
2324
|
+
_internal?: {
|
2325
|
+
delay?: (ms: number | null) => Promise<void>;
|
2326
|
+
};
|
2327
|
+
}): ReadableStream<T>;
|
2328
|
+
|
2287
2329
|
declare const symbol$9: unique symbol;
|
2288
2330
|
declare class InvalidArgumentError extends AISDKError {
|
2289
2331
|
private readonly [symbol$9];
|
@@ -2569,4 +2611,4 @@ declare namespace llamaindexAdapter {
|
|
2569
2611
|
};
|
2570
2612
|
}
|
2571
2613
|
|
2572
|
-
export { AssistantContent, AssistantResponse, CallWarning, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreTool, ToolCallUnion as CoreToolCallUnion, CoreToolChoice, CoreToolMessage, ToolResultUnion as CoreToolResultUnion, CoreUserMessage, DataContent, DataStreamWriter, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, GenerateImageResult as Experimental_GenerateImageResult, GeneratedImage as Experimental_GeneratedImage, Experimental_LanguageModelV1Middleware, FilePart, FinishReason, GenerateObjectResult, GenerateTextResult, ImagePart, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidToolArgumentsError, langchainAdapter as LangChainAdapter, LanguageModel, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage, llamaindexAdapter as LlamaIndexAdapter, LogProbs, MessageConversionError, NoObjectGeneratedError, NoOutputSpecifiedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, output as Output, Provider, ProviderMetadata, RetryError, StepResult, StreamData, StreamObjectResult, StreamTextResult, TextPart, TextStreamPart, ToolCallPart, ToolCallRepairError, ToolCallRepairFunction, ToolContent, ToolExecutionError, ToolExecutionOptions, ToolResultPart, UserContent, convertToCoreMessages, cosineSimilarity, createDataStream, createDataStreamResponse, embed, embedMany, experimental_createProviderRegistry, experimental_customProvider, generateImage as experimental_generateImage, experimental_wrapLanguageModel, generateObject, generateText, pipeDataStreamToResponse, smoothStream, streamObject, streamText, tool };
|
2614
|
+
export { AssistantContent, AssistantResponse, CallWarning, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreTool, ToolCallUnion as CoreToolCallUnion, CoreToolChoice, CoreToolMessage, ToolResultUnion as CoreToolResultUnion, CoreUserMessage, DataContent, DataStreamWriter, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, GenerateImageResult as Experimental_GenerateImageResult, GeneratedImage as Experimental_GeneratedImage, Experimental_LanguageModelV1Middleware, FilePart, FinishReason, GenerateObjectResult, GenerateTextResult, ImageModel, ImageGenerationWarning as ImageModelCallWarning, ImagePart, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidToolArgumentsError, langchainAdapter as LangChainAdapter, LanguageModel, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage, llamaindexAdapter as LlamaIndexAdapter, LogProbs, MessageConversionError, NoObjectGeneratedError, NoOutputSpecifiedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, output as Output, Provider, ProviderMetadata, RetryError, StepResult, StreamData, StreamObjectResult, StreamTextResult, TextPart, TextStreamPart, ToolCallPart, ToolCallRepairError, ToolCallRepairFunction, ToolContent, ToolExecutionError, ToolExecutionOptions, ToolResultPart, UserContent, convertToCoreMessages, cosineSimilarity, createDataStream, createDataStreamResponse, embed, embedMany, experimental_createProviderRegistry, experimental_customProvider, generateImage as experimental_generateImage, experimental_wrapLanguageModel, generateObject, generateText, pipeDataStreamToResponse, simulateReadableStream, smoothStream, streamObject, streamText, tool };
|
package/dist/index.d.ts
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
import { DataStreamString, ToolInvocation, Attachment, Schema, DeepPartial, JSONValue as JSONValue$1, AssistantMessage, DataMessage } from '@ai-sdk/ui-utils';
|
2
2
|
export { AssistantMessage, AssistantStatus, Attachment, ChatRequest, ChatRequestOptions, CreateMessage, DataMessage, DataStreamPart, DeepPartial, IdGenerator, JSONValue, Message, RequestOptions, Schema, ToolInvocation, UseAssistantOptions, formatAssistantStreamPart, formatDataStreamPart, jsonSchema, parseAssistantStreamPart, parseDataStreamPart, processDataStream, processTextStream } from '@ai-sdk/ui-utils';
|
3
3
|
export { ToolCall as CoreToolCall, ToolResult as CoreToolResult, generateId } from '@ai-sdk/provider-utils';
|
4
|
-
import { JSONValue, EmbeddingModelV1, EmbeddingModelV1Embedding, LanguageModelV1, LanguageModelV1FinishReason, LanguageModelV1LogProbs, LanguageModelV1CallWarning, LanguageModelV1ProviderMetadata,
|
4
|
+
import { JSONValue, EmbeddingModelV1, EmbeddingModelV1Embedding, ImageModelV1, ImageModelV1CallWarning, LanguageModelV1, LanguageModelV1FinishReason, LanguageModelV1LogProbs, LanguageModelV1CallWarning, LanguageModelV1ProviderMetadata, LanguageModelV1CallOptions, AISDKError, LanguageModelV1FunctionToolCall, JSONSchema7, NoSuchModelError } from '@ai-sdk/provider';
|
5
5
|
export { AISDKError, APICallError, EmptyResponseBodyError, InvalidPromptError, InvalidResponseDataError, JSONParseError, LanguageModelV1, LanguageModelV1CallOptions, LanguageModelV1Prompt, LanguageModelV1StreamPart, LoadAPIKeyError, NoContentGeneratedError, NoSuchModelError, TypeValidationError, UnsupportedFunctionalityError } from '@ai-sdk/provider';
|
6
6
|
import { ServerResponse } from 'node:http';
|
7
7
|
import { AttributeValue, Tracer } from '@opentelemetry/api';
|
@@ -89,6 +89,16 @@ Embedding.
|
|
89
89
|
*/
|
90
90
|
type Embedding = EmbeddingModelV1Embedding;
|
91
91
|
|
92
|
+
/**
|
93
|
+
Image model that is used by the AI SDK Core functions.
|
94
|
+
*/
|
95
|
+
type ImageModel = ImageModelV1;
|
96
|
+
/**
|
97
|
+
Warning from the model provider for this call. The call will proceed, but e.g.
|
98
|
+
some settings might not be supported, which can lead to suboptimal results.
|
99
|
+
*/
|
100
|
+
type ImageGenerationWarning = ImageModelV1CallWarning;
|
101
|
+
|
92
102
|
/**
|
93
103
|
Language model that is used by the AI SDK Core functions.
|
94
104
|
*/
|
@@ -365,6 +375,10 @@ interface GenerateImageResult {
|
|
365
375
|
The images that were generated.
|
366
376
|
*/
|
367
377
|
readonly images: Array<GeneratedImage>;
|
378
|
+
/**
|
379
|
+
Warnings for the call, e.g. unsupported settings.
|
380
|
+
*/
|
381
|
+
readonly warnings: Array<ImageGenerationWarning>;
|
368
382
|
}
|
369
383
|
interface GeneratedImage {
|
370
384
|
/**
|
@@ -384,6 +398,8 @@ Generates images using an image model.
|
|
384
398
|
@param prompt - The prompt that should be used to generate the image.
|
385
399
|
@param n - Number of images to generate. Default: 1.
|
386
400
|
@param size - Size of the images to generate. Must have the format `{width}x{height}`.
|
401
|
+
@param aspectRatio - Aspect ratio of the images to generate. Must have the format `{width}:{height}`.
|
402
|
+
@param seed - Seed for the image generation.
|
387
403
|
@param providerOptions - Additional provider-specific options that are passed through to the provider
|
388
404
|
as body parameters.
|
389
405
|
@param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
|
@@ -392,7 +408,7 @@ as body parameters.
|
|
392
408
|
|
393
409
|
@returns A result object that contains the generated images.
|
394
410
|
*/
|
395
|
-
declare function generateImage({ model, prompt, n, size, providerOptions, maxRetries: maxRetriesArg, abortSignal, headers, }: {
|
411
|
+
declare function generateImage({ model, prompt, n, size, aspectRatio, seed, providerOptions, maxRetries: maxRetriesArg, abortSignal, headers, }: {
|
396
412
|
/**
|
397
413
|
The image model to use.
|
398
414
|
*/
|
@@ -406,10 +422,18 @@ declare function generateImage({ model, prompt, n, size, providerOptions, maxRet
|
|
406
422
|
*/
|
407
423
|
n?: number;
|
408
424
|
/**
|
409
|
-
Size of the images to generate. Must have the format `{width}x{height}`.
|
425
|
+
Size of the images to generate. Must have the format `{width}x{height}`. If not provided, the default size will be used.
|
410
426
|
*/
|
411
427
|
size?: `${number}x${number}`;
|
412
428
|
/**
|
429
|
+
Aspect ratio of the images to generate. Must have the format `{width}:{height}`. If not provided, the default aspect ratio will be used.
|
430
|
+
*/
|
431
|
+
aspectRatio?: `${number}:${number}`;
|
432
|
+
/**
|
433
|
+
Seed for the image generation. If not provided, the default seed will be used.
|
434
|
+
*/
|
435
|
+
seed?: number;
|
436
|
+
/**
|
413
437
|
Additional provider-specific options that are passed through to the provider
|
414
438
|
as body parameters.
|
415
439
|
|
@@ -2151,19 +2175,19 @@ Details for all steps.
|
|
2151
2175
|
/**
|
2152
2176
|
* Smooths text streaming output.
|
2153
2177
|
*
|
2154
|
-
* @param delayInMs - The delay in milliseconds between each chunk. Defaults to 10ms.
|
2155
|
-
* @param chunking - Controls how the text is chunked for streaming. Use "word" to stream word by word (default),
|
2178
|
+
* @param delayInMs - The delay in milliseconds between each chunk. Defaults to 10ms. Can be set to `null` to skip the delay.
|
2179
|
+
* @param chunking - Controls how the text is chunked for streaming. Use "word" to stream word by word (default), "line" to stream line by line, or provide a custom RegExp pattern for custom chunking.
|
2156
2180
|
*
|
2157
2181
|
* @returns A transform stream that smooths text streaming output.
|
2158
2182
|
*/
|
2159
2183
|
declare function smoothStream<TOOLS extends Record<string, CoreTool>>({ delayInMs, chunking, _internal: { delay }, }?: {
|
2160
|
-
delayInMs?: number;
|
2161
|
-
chunking?: 'word' | 'line';
|
2184
|
+
delayInMs?: number | null;
|
2185
|
+
chunking?: 'word' | 'line' | RegExp;
|
2162
2186
|
/**
|
2163
2187
|
* Internal. For test use only. May change without notice.
|
2164
2188
|
*/
|
2165
2189
|
_internal?: {
|
2166
|
-
delay?: (delayInMs: number) => Promise<void>;
|
2190
|
+
delay?: (delayInMs: number | null) => Promise<void>;
|
2167
2191
|
};
|
2168
2192
|
}): (options: {
|
2169
2193
|
tools: TOOLS;
|
@@ -2284,6 +2308,24 @@ declare function experimental_createProviderRegistry(providers: Record<string, P
|
|
2284
2308
|
*/
|
2285
2309
|
declare function cosineSimilarity(vector1: number[], vector2: number[]): number;
|
2286
2310
|
|
2311
|
+
/**
|
2312
|
+
* Creates a ReadableStream that emits the provided values with an optional delay between each value.
|
2313
|
+
*
|
2314
|
+
* @param options - The configuration options
|
2315
|
+
* @param options.chunks - Array of values to be emitted by the stream
|
2316
|
+
* @param options.initialDelayInMs - Optional initial delay in milliseconds before emitting the first value (default: 0). Can be set to `null` to skip the initial delay. The difference between `initialDelayInMs: null` and `initialDelayInMs: 0` is that `initialDelayInMs: null` will emit the values without any delay, while `initialDelayInMs: 0` will emit the values with a delay of 0 milliseconds.
|
2317
|
+
* @param options.chunkDelayInMs - Optional delay in milliseconds between emitting each value (default: 0). Can be set to `null` to skip the delay. The difference between `chunkDelayInMs: null` and `chunkDelayInMs: 0` is that `chunkDelayInMs: null` will emit the values without any delay, while `chunkDelayInMs: 0` will emit the values with a delay of 0 milliseconds.
|
2318
|
+
* @returns A ReadableStream that emits the provided values
|
2319
|
+
*/
|
2320
|
+
declare function simulateReadableStream<T>({ chunks, initialDelayInMs, chunkDelayInMs, _internal, }: {
|
2321
|
+
chunks: T[];
|
2322
|
+
initialDelayInMs?: number | null;
|
2323
|
+
chunkDelayInMs?: number | null;
|
2324
|
+
_internal?: {
|
2325
|
+
delay?: (ms: number | null) => Promise<void>;
|
2326
|
+
};
|
2327
|
+
}): ReadableStream<T>;
|
2328
|
+
|
2287
2329
|
declare const symbol$9: unique symbol;
|
2288
2330
|
declare class InvalidArgumentError extends AISDKError {
|
2289
2331
|
private readonly [symbol$9];
|
@@ -2569,4 +2611,4 @@ declare namespace llamaindexAdapter {
|
|
2569
2611
|
};
|
2570
2612
|
}
|
2571
2613
|
|
2572
|
-
export { AssistantContent, AssistantResponse, CallWarning, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreTool, ToolCallUnion as CoreToolCallUnion, CoreToolChoice, CoreToolMessage, ToolResultUnion as CoreToolResultUnion, CoreUserMessage, DataContent, DataStreamWriter, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, GenerateImageResult as Experimental_GenerateImageResult, GeneratedImage as Experimental_GeneratedImage, Experimental_LanguageModelV1Middleware, FilePart, FinishReason, GenerateObjectResult, GenerateTextResult, ImagePart, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidToolArgumentsError, langchainAdapter as LangChainAdapter, LanguageModel, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage, llamaindexAdapter as LlamaIndexAdapter, LogProbs, MessageConversionError, NoObjectGeneratedError, NoOutputSpecifiedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, output as Output, Provider, ProviderMetadata, RetryError, StepResult, StreamData, StreamObjectResult, StreamTextResult, TextPart, TextStreamPart, ToolCallPart, ToolCallRepairError, ToolCallRepairFunction, ToolContent, ToolExecutionError, ToolExecutionOptions, ToolResultPart, UserContent, convertToCoreMessages, cosineSimilarity, createDataStream, createDataStreamResponse, embed, embedMany, experimental_createProviderRegistry, experimental_customProvider, generateImage as experimental_generateImage, experimental_wrapLanguageModel, generateObject, generateText, pipeDataStreamToResponse, smoothStream, streamObject, streamText, tool };
|
2614
|
+
export { AssistantContent, AssistantResponse, CallWarning, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreTool, ToolCallUnion as CoreToolCallUnion, CoreToolChoice, CoreToolMessage, ToolResultUnion as CoreToolResultUnion, CoreUserMessage, DataContent, DataStreamWriter, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, GenerateImageResult as Experimental_GenerateImageResult, GeneratedImage as Experimental_GeneratedImage, Experimental_LanguageModelV1Middleware, FilePart, FinishReason, GenerateObjectResult, GenerateTextResult, ImageModel, ImageGenerationWarning as ImageModelCallWarning, ImagePart, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidToolArgumentsError, langchainAdapter as LangChainAdapter, LanguageModel, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage, llamaindexAdapter as LlamaIndexAdapter, LogProbs, MessageConversionError, NoObjectGeneratedError, NoOutputSpecifiedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, output as Output, Provider, ProviderMetadata, RetryError, StepResult, StreamData, StreamObjectResult, StreamTextResult, TextPart, TextStreamPart, ToolCallPart, ToolCallRepairError, ToolCallRepairFunction, ToolContent, ToolExecutionError, ToolExecutionOptions, ToolResultPart, UserContent, convertToCoreMessages, cosineSimilarity, createDataStream, createDataStreamResponse, embed, embedMany, experimental_createProviderRegistry, experimental_customProvider, generateImage as experimental_generateImage, experimental_wrapLanguageModel, generateObject, generateText, pipeDataStreamToResponse, simulateReadableStream, smoothStream, streamObject, streamText, tool };
|
package/dist/index.js
CHANGED
@@ -70,6 +70,7 @@ __export(streams_exports, {
|
|
70
70
|
pipeDataStreamToResponse: () => pipeDataStreamToResponse,
|
71
71
|
processDataStream: () => import_ui_utils14.processDataStream,
|
72
72
|
processTextStream: () => import_ui_utils14.processTextStream,
|
73
|
+
simulateReadableStream: () => simulateReadableStream,
|
73
74
|
smoothStream: () => smoothStream,
|
74
75
|
streamObject: () => streamObject,
|
75
76
|
streamText: () => streamText,
|
@@ -289,7 +290,7 @@ var import_provider_utils = require("@ai-sdk/provider-utils");
|
|
289
290
|
|
290
291
|
// util/delay.ts
|
291
292
|
async function delay(delayInMs) {
|
292
|
-
return delayInMs
|
293
|
+
return delayInMs == null ? Promise.resolve() : new Promise((resolve) => setTimeout(resolve, delayInMs));
|
293
294
|
}
|
294
295
|
|
295
296
|
// util/retry-error.ts
|
@@ -873,37 +874,61 @@ async function generateImage({
|
|
873
874
|
prompt,
|
874
875
|
n,
|
875
876
|
size,
|
877
|
+
aspectRatio,
|
878
|
+
seed,
|
876
879
|
providerOptions,
|
877
880
|
maxRetries: maxRetriesArg,
|
878
881
|
abortSignal,
|
879
882
|
headers
|
880
883
|
}) {
|
881
884
|
const { retry } = prepareRetries({ maxRetries: maxRetriesArg });
|
882
|
-
|
883
|
-
|
884
|
-
|
885
|
-
|
886
|
-
|
887
|
-
|
888
|
-
|
889
|
-
|
890
|
-
|
885
|
+
return new DefaultGenerateImageResult(
|
886
|
+
await retry(
|
887
|
+
() => model.doGenerate({
|
888
|
+
prompt,
|
889
|
+
n: n != null ? n : 1,
|
890
|
+
abortSignal,
|
891
|
+
headers,
|
892
|
+
size,
|
893
|
+
aspectRatio,
|
894
|
+
seed,
|
895
|
+
providerOptions: providerOptions != null ? providerOptions : {}
|
896
|
+
})
|
897
|
+
)
|
891
898
|
);
|
892
|
-
return new DefaultGenerateImageResult({ base64Images: images });
|
893
899
|
}
|
894
900
|
var DefaultGenerateImageResult = class {
|
895
901
|
constructor(options) {
|
896
|
-
this.images = options.
|
897
|
-
|
898
|
-
|
899
|
-
|
900
|
-
}
|
901
|
-
}));
|
902
|
+
this.images = options.images.map(
|
903
|
+
(image) => new DefaultGeneratedImage({ imageData: image })
|
904
|
+
);
|
905
|
+
this.warnings = options.warnings;
|
902
906
|
}
|
903
907
|
get image() {
|
904
908
|
return this.images[0];
|
905
909
|
}
|
906
910
|
};
|
911
|
+
var DefaultGeneratedImage = class {
|
912
|
+
constructor({ imageData }) {
|
913
|
+
const isUint8Array = imageData instanceof Uint8Array;
|
914
|
+
this.base64Data = isUint8Array ? void 0 : imageData;
|
915
|
+
this.uint8ArrayData = isUint8Array ? imageData : void 0;
|
916
|
+
}
|
917
|
+
// lazy conversion with caching to avoid unnecessary conversion overhead:
|
918
|
+
get base64() {
|
919
|
+
if (this.base64Data == null) {
|
920
|
+
this.base64Data = (0, import_provider_utils2.convertUint8ArrayToBase64)(this.uint8ArrayData);
|
921
|
+
}
|
922
|
+
return this.base64Data;
|
923
|
+
}
|
924
|
+
// lazy conversion with caching to avoid unnecessary conversion overhead:
|
925
|
+
get uint8Array() {
|
926
|
+
if (this.uint8ArrayData == null) {
|
927
|
+
this.uint8ArrayData = (0, import_provider_utils2.convertBase64ToUint8Array)(this.base64Data);
|
928
|
+
}
|
929
|
+
return this.uint8ArrayData;
|
930
|
+
}
|
931
|
+
};
|
907
932
|
|
908
933
|
// core/generate-object/generate-object.ts
|
909
934
|
var import_provider_utils6 = require("@ai-sdk/provider-utils");
|
@@ -5347,38 +5372,50 @@ var DefaultStreamTextResult = class {
|
|
5347
5372
|
};
|
5348
5373
|
|
5349
5374
|
// core/generate-text/smooth-stream.ts
|
5375
|
+
var import_provider17 = require("@ai-sdk/provider");
|
5376
|
+
var CHUNKING_REGEXPS = {
|
5377
|
+
word: /\s*\S+\s+/m,
|
5378
|
+
line: /[^\n]*\n/m
|
5379
|
+
};
|
5350
5380
|
function smoothStream({
|
5351
5381
|
delayInMs = 10,
|
5352
5382
|
chunking = "word",
|
5353
5383
|
_internal: { delay: delay2 = delay } = {}
|
5354
5384
|
} = {}) {
|
5355
|
-
|
5356
|
-
|
5357
|
-
|
5358
|
-
|
5359
|
-
|
5360
|
-
|
5361
|
-
|
5385
|
+
const chunkingRegexp = typeof chunking === "string" ? CHUNKING_REGEXPS[chunking] : chunking;
|
5386
|
+
if (chunkingRegexp == null) {
|
5387
|
+
throw new import_provider17.InvalidArgumentError({
|
5388
|
+
argument: "chunking",
|
5389
|
+
message: `Chunking must be "word" or "line" or a RegExp. Received: ${chunking}`
|
5390
|
+
});
|
5391
|
+
}
|
5392
|
+
return () => {
|
5393
|
+
let buffer = "";
|
5394
|
+
return new TransformStream({
|
5395
|
+
async transform(chunk, controller) {
|
5396
|
+
if (chunk.type === "step-finish") {
|
5397
|
+
if (buffer.length > 0) {
|
5398
|
+
controller.enqueue({ type: "text-delta", textDelta: buffer });
|
5399
|
+
buffer = "";
|
5400
|
+
}
|
5401
|
+
controller.enqueue(chunk);
|
5402
|
+
return;
|
5362
5403
|
}
|
5363
|
-
|
5364
|
-
|
5365
|
-
|
5366
|
-
|
5367
|
-
|
5368
|
-
|
5369
|
-
|
5370
|
-
|
5371
|
-
|
5372
|
-
|
5373
|
-
const chunk2 = buffer.match(regexp)[0];
|
5374
|
-
controller.enqueue({ type: "text-delta", textDelta: chunk2 });
|
5375
|
-
buffer = buffer.slice(chunk2.length);
|
5376
|
-
if (delayInMs > 0) {
|
5404
|
+
if (chunk.type !== "text-delta") {
|
5405
|
+
controller.enqueue(chunk);
|
5406
|
+
return;
|
5407
|
+
}
|
5408
|
+
buffer += chunk.textDelta;
|
5409
|
+
let match;
|
5410
|
+
while ((match = chunkingRegexp.exec(buffer)) != null) {
|
5411
|
+
const chunk2 = match[0];
|
5412
|
+
controller.enqueue({ type: "text-delta", textDelta: chunk2 });
|
5413
|
+
buffer = buffer.slice(chunk2.length);
|
5377
5414
|
await delay2(delayInMs);
|
5378
5415
|
}
|
5379
5416
|
}
|
5380
|
-
}
|
5381
|
-
}
|
5417
|
+
});
|
5418
|
+
};
|
5382
5419
|
}
|
5383
5420
|
|
5384
5421
|
// core/middleware/wrap-language-model.ts
|
@@ -5416,7 +5453,7 @@ var experimental_wrapLanguageModel = ({
|
|
5416
5453
|
};
|
5417
5454
|
|
5418
5455
|
// core/registry/custom-provider.ts
|
5419
|
-
var
|
5456
|
+
var import_provider18 = require("@ai-sdk/provider");
|
5420
5457
|
function experimental_customProvider({
|
5421
5458
|
languageModels,
|
5422
5459
|
textEmbeddingModels,
|
@@ -5430,7 +5467,7 @@ function experimental_customProvider({
|
|
5430
5467
|
if (fallbackProvider) {
|
5431
5468
|
return fallbackProvider.languageModel(modelId);
|
5432
5469
|
}
|
5433
|
-
throw new
|
5470
|
+
throw new import_provider18.NoSuchModelError({ modelId, modelType: "languageModel" });
|
5434
5471
|
},
|
5435
5472
|
textEmbeddingModel(modelId) {
|
5436
5473
|
if (textEmbeddingModels != null && modelId in textEmbeddingModels) {
|
@@ -5439,18 +5476,18 @@ function experimental_customProvider({
|
|
5439
5476
|
if (fallbackProvider) {
|
5440
5477
|
return fallbackProvider.textEmbeddingModel(modelId);
|
5441
5478
|
}
|
5442
|
-
throw new
|
5479
|
+
throw new import_provider18.NoSuchModelError({ modelId, modelType: "textEmbeddingModel" });
|
5443
5480
|
}
|
5444
5481
|
};
|
5445
5482
|
}
|
5446
5483
|
|
5447
5484
|
// core/registry/no-such-provider-error.ts
|
5448
|
-
var
|
5485
|
+
var import_provider19 = require("@ai-sdk/provider");
|
5449
5486
|
var name13 = "AI_NoSuchProviderError";
|
5450
5487
|
var marker13 = `vercel.ai.error.${name13}`;
|
5451
5488
|
var symbol13 = Symbol.for(marker13);
|
5452
5489
|
var _a13;
|
5453
|
-
var NoSuchProviderError = class extends
|
5490
|
+
var NoSuchProviderError = class extends import_provider19.NoSuchModelError {
|
5454
5491
|
constructor({
|
5455
5492
|
modelId,
|
5456
5493
|
modelType,
|
@@ -5464,13 +5501,13 @@ var NoSuchProviderError = class extends import_provider18.NoSuchModelError {
|
|
5464
5501
|
this.availableProviders = availableProviders;
|
5465
5502
|
}
|
5466
5503
|
static isInstance(error) {
|
5467
|
-
return
|
5504
|
+
return import_provider19.AISDKError.hasMarker(error, marker13);
|
5468
5505
|
}
|
5469
5506
|
};
|
5470
5507
|
_a13 = symbol13;
|
5471
5508
|
|
5472
5509
|
// core/registry/provider-registry.ts
|
5473
|
-
var
|
5510
|
+
var import_provider20 = require("@ai-sdk/provider");
|
5474
5511
|
function experimental_createProviderRegistry(providers) {
|
5475
5512
|
const registry = new DefaultProviderRegistry();
|
5476
5513
|
for (const [id, provider] of Object.entries(providers)) {
|
@@ -5500,7 +5537,7 @@ var DefaultProviderRegistry = class {
|
|
5500
5537
|
splitId(id, modelType) {
|
5501
5538
|
const index = id.indexOf(":");
|
5502
5539
|
if (index === -1) {
|
5503
|
-
throw new
|
5540
|
+
throw new import_provider20.NoSuchModelError({
|
5504
5541
|
modelId: id,
|
5505
5542
|
modelType,
|
5506
5543
|
message: `Invalid ${modelType} id for registry: ${id} (must be in the format "providerId:modelId")`
|
@@ -5513,7 +5550,7 @@ var DefaultProviderRegistry = class {
|
|
5513
5550
|
const [providerId, modelId] = this.splitId(id, "languageModel");
|
5514
5551
|
const model = (_b = (_a14 = this.getProvider(providerId)).languageModel) == null ? void 0 : _b.call(_a14, modelId);
|
5515
5552
|
if (model == null) {
|
5516
|
-
throw new
|
5553
|
+
throw new import_provider20.NoSuchModelError({ modelId: id, modelType: "languageModel" });
|
5517
5554
|
}
|
5518
5555
|
return model;
|
5519
5556
|
}
|
@@ -5523,7 +5560,7 @@ var DefaultProviderRegistry = class {
|
|
5523
5560
|
const provider = this.getProvider(providerId);
|
5524
5561
|
const model = (_a14 = provider.textEmbeddingModel) == null ? void 0 : _a14.call(provider, modelId);
|
5525
5562
|
if (model == null) {
|
5526
|
-
throw new
|
5563
|
+
throw new import_provider20.NoSuchModelError({
|
5527
5564
|
modelId: id,
|
5528
5565
|
modelType: "textEmbeddingModel"
|
5529
5566
|
});
|
@@ -5562,6 +5599,28 @@ function magnitude(vector) {
|
|
5562
5599
|
return Math.sqrt(dotProduct(vector, vector));
|
5563
5600
|
}
|
5564
5601
|
|
5602
|
+
// core/util/simulate-readable-stream.ts
|
5603
|
+
function simulateReadableStream({
|
5604
|
+
chunks,
|
5605
|
+
initialDelayInMs = 0,
|
5606
|
+
chunkDelayInMs = 0,
|
5607
|
+
_internal
|
5608
|
+
}) {
|
5609
|
+
var _a14;
|
5610
|
+
const delay2 = (_a14 = _internal == null ? void 0 : _internal.delay) != null ? _a14 : delay;
|
5611
|
+
let index = 0;
|
5612
|
+
return new ReadableStream({
|
5613
|
+
async pull(controller) {
|
5614
|
+
if (index < chunks.length) {
|
5615
|
+
await delay2(index === 0 ? initialDelayInMs : chunkDelayInMs);
|
5616
|
+
controller.enqueue(chunks[index++]);
|
5617
|
+
} else {
|
5618
|
+
controller.close();
|
5619
|
+
}
|
5620
|
+
}
|
5621
|
+
});
|
5622
|
+
}
|
5623
|
+
|
5565
5624
|
// streams/assistant-response.ts
|
5566
5625
|
var import_ui_utils10 = require("@ai-sdk/ui-utils");
|
5567
5626
|
function AssistantResponse({ threadId, messageId }, process2) {
|
@@ -5946,6 +6005,7 @@ var StreamData = class {
|
|
5946
6005
|
pipeDataStreamToResponse,
|
5947
6006
|
processDataStream,
|
5948
6007
|
processTextStream,
|
6008
|
+
simulateReadableStream,
|
5949
6009
|
smoothStream,
|
5950
6010
|
streamObject,
|
5951
6011
|
streamText,
|