ai 4.0.26 → 4.0.27

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,14 @@
1
1
  # ai
2
2
 
3
+ ## 4.0.27
4
+
5
+ ### Patch Changes
6
+
7
+ - a56734f: feat (ai/core): export simulateReadableStream in ai package
8
+ - 9589601: feat (ai/core): support null delay in smoothStream
9
+ - e3cc23a: feat (ai/core): support regexp chunking pattern in smoothStream
10
+ - e463e73: feat (ai/core): support skipping delays in simulateReadableStream
11
+
3
12
  ## 4.0.26
4
13
 
5
14
  ### Patch Changes
package/dist/index.d.mts CHANGED
@@ -2151,19 +2151,19 @@ Details for all steps.
2151
2151
  /**
2152
2152
  * Smooths text streaming output.
2153
2153
  *
2154
- * @param delayInMs - The delay in milliseconds between each chunk. Defaults to 10ms.
2155
- * @param chunking - Controls how the text is chunked for streaming. Use "word" to stream word by word (default), or "line" to stream line by line.
2154
+ * @param delayInMs - The delay in milliseconds between each chunk. Defaults to 10ms. Can be set to `null` to skip the delay.
2155
+ * @param chunking - Controls how the text is chunked for streaming. Use "word" to stream word by word (default), "line" to stream line by line, or provide a custom RegExp pattern for custom chunking.
2156
2156
  *
2157
2157
  * @returns A transform stream that smooths text streaming output.
2158
2158
  */
2159
2159
  declare function smoothStream<TOOLS extends Record<string, CoreTool>>({ delayInMs, chunking, _internal: { delay }, }?: {
2160
- delayInMs?: number;
2161
- chunking?: 'word' | 'line';
2160
+ delayInMs?: number | null;
2161
+ chunking?: 'word' | 'line' | RegExp;
2162
2162
  /**
2163
2163
  * Internal. For test use only. May change without notice.
2164
2164
  */
2165
2165
  _internal?: {
2166
- delay?: (delayInMs: number) => Promise<void>;
2166
+ delay?: (delayInMs: number | null) => Promise<void>;
2167
2167
  };
2168
2168
  }): (options: {
2169
2169
  tools: TOOLS;
@@ -2284,6 +2284,24 @@ declare function experimental_createProviderRegistry(providers: Record<string, P
2284
2284
  */
2285
2285
  declare function cosineSimilarity(vector1: number[], vector2: number[]): number;
2286
2286
 
2287
+ /**
2288
+ * Creates a ReadableStream that emits the provided values with an optional delay between each value.
2289
+ *
2290
+ * @param options - The configuration options
2291
+ * @param options.chunks - Array of values to be emitted by the stream
2292
+ * @param options.initialDelayInMs - Optional initial delay in milliseconds before emitting the first value (default: 0). Can be set to `null` to skip the initial delay. The difference between `initialDelayInMs: null` and `initialDelayInMs: 0` is that `initialDelayInMs: null` will emit the values without any delay, while `initialDelayInMs: 0` will emit the values with a delay of 0 milliseconds.
2293
+ * @param options.chunkDelayInMs - Optional delay in milliseconds between emitting each value (default: 0). Can be set to `null` to skip the delay. The difference between `chunkDelayInMs: null` and `chunkDelayInMs: 0` is that `chunkDelayInMs: null` will emit the values without any delay, while `chunkDelayInMs: 0` will emit the values with a delay of 0 milliseconds.
2294
+ * @returns A ReadableStream that emits the provided values
2295
+ */
2296
+ declare function simulateReadableStream<T>({ chunks, initialDelayInMs, chunkDelayInMs, _internal, }: {
2297
+ chunks: T[];
2298
+ initialDelayInMs?: number | null;
2299
+ chunkDelayInMs?: number | null;
2300
+ _internal?: {
2301
+ delay?: (ms: number | null) => Promise<void>;
2302
+ };
2303
+ }): ReadableStream<T>;
2304
+
2287
2305
  declare const symbol$9: unique symbol;
2288
2306
  declare class InvalidArgumentError extends AISDKError {
2289
2307
  private readonly [symbol$9];
@@ -2569,4 +2587,4 @@ declare namespace llamaindexAdapter {
2569
2587
  };
2570
2588
  }
2571
2589
 
2572
- export { AssistantContent, AssistantResponse, CallWarning, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreTool, ToolCallUnion as CoreToolCallUnion, CoreToolChoice, CoreToolMessage, ToolResultUnion as CoreToolResultUnion, CoreUserMessage, DataContent, DataStreamWriter, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, GenerateImageResult as Experimental_GenerateImageResult, GeneratedImage as Experimental_GeneratedImage, Experimental_LanguageModelV1Middleware, FilePart, FinishReason, GenerateObjectResult, GenerateTextResult, ImagePart, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidToolArgumentsError, langchainAdapter as LangChainAdapter, LanguageModel, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage, llamaindexAdapter as LlamaIndexAdapter, LogProbs, MessageConversionError, NoObjectGeneratedError, NoOutputSpecifiedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, output as Output, Provider, ProviderMetadata, RetryError, StepResult, StreamData, StreamObjectResult, StreamTextResult, TextPart, TextStreamPart, ToolCallPart, ToolCallRepairError, ToolCallRepairFunction, ToolContent, ToolExecutionError, ToolExecutionOptions, ToolResultPart, UserContent, convertToCoreMessages, cosineSimilarity, createDataStream, createDataStreamResponse, embed, embedMany, experimental_createProviderRegistry, experimental_customProvider, generateImage as experimental_generateImage, experimental_wrapLanguageModel, generateObject, generateText, pipeDataStreamToResponse, smoothStream, streamObject, streamText, tool };
2590
+ export { AssistantContent, AssistantResponse, CallWarning, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreTool, ToolCallUnion as CoreToolCallUnion, CoreToolChoice, CoreToolMessage, ToolResultUnion as CoreToolResultUnion, CoreUserMessage, DataContent, DataStreamWriter, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, GenerateImageResult as Experimental_GenerateImageResult, GeneratedImage as Experimental_GeneratedImage, Experimental_LanguageModelV1Middleware, FilePart, FinishReason, GenerateObjectResult, GenerateTextResult, ImagePart, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidToolArgumentsError, langchainAdapter as LangChainAdapter, LanguageModel, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage, llamaindexAdapter as LlamaIndexAdapter, LogProbs, MessageConversionError, NoObjectGeneratedError, NoOutputSpecifiedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, output as Output, Provider, ProviderMetadata, RetryError, StepResult, StreamData, StreamObjectResult, StreamTextResult, TextPart, TextStreamPart, ToolCallPart, ToolCallRepairError, ToolCallRepairFunction, ToolContent, ToolExecutionError, ToolExecutionOptions, ToolResultPart, UserContent, convertToCoreMessages, cosineSimilarity, createDataStream, createDataStreamResponse, embed, embedMany, experimental_createProviderRegistry, experimental_customProvider, generateImage as experimental_generateImage, experimental_wrapLanguageModel, generateObject, generateText, pipeDataStreamToResponse, simulateReadableStream, smoothStream, streamObject, streamText, tool };
package/dist/index.d.ts CHANGED
@@ -2151,19 +2151,19 @@ Details for all steps.
2151
2151
  /**
2152
2152
  * Smooths text streaming output.
2153
2153
  *
2154
- * @param delayInMs - The delay in milliseconds between each chunk. Defaults to 10ms.
2155
- * @param chunking - Controls how the text is chunked for streaming. Use "word" to stream word by word (default), or "line" to stream line by line.
2154
+ * @param delayInMs - The delay in milliseconds between each chunk. Defaults to 10ms. Can be set to `null` to skip the delay.
2155
+ * @param chunking - Controls how the text is chunked for streaming. Use "word" to stream word by word (default), "line" to stream line by line, or provide a custom RegExp pattern for custom chunking.
2156
2156
  *
2157
2157
  * @returns A transform stream that smooths text streaming output.
2158
2158
  */
2159
2159
  declare function smoothStream<TOOLS extends Record<string, CoreTool>>({ delayInMs, chunking, _internal: { delay }, }?: {
2160
- delayInMs?: number;
2161
- chunking?: 'word' | 'line';
2160
+ delayInMs?: number | null;
2161
+ chunking?: 'word' | 'line' | RegExp;
2162
2162
  /**
2163
2163
  * Internal. For test use only. May change without notice.
2164
2164
  */
2165
2165
  _internal?: {
2166
- delay?: (delayInMs: number) => Promise<void>;
2166
+ delay?: (delayInMs: number | null) => Promise<void>;
2167
2167
  };
2168
2168
  }): (options: {
2169
2169
  tools: TOOLS;
@@ -2284,6 +2284,24 @@ declare function experimental_createProviderRegistry(providers: Record<string, P
2284
2284
  */
2285
2285
  declare function cosineSimilarity(vector1: number[], vector2: number[]): number;
2286
2286
 
2287
+ /**
2288
+ * Creates a ReadableStream that emits the provided values with an optional delay between each value.
2289
+ *
2290
+ * @param options - The configuration options
2291
+ * @param options.chunks - Array of values to be emitted by the stream
2292
+ * @param options.initialDelayInMs - Optional initial delay in milliseconds before emitting the first value (default: 0). Can be set to `null` to skip the initial delay. The difference between `initialDelayInMs: null` and `initialDelayInMs: 0` is that `initialDelayInMs: null` will emit the values without any delay, while `initialDelayInMs: 0` will emit the values with a delay of 0 milliseconds.
2293
+ * @param options.chunkDelayInMs - Optional delay in milliseconds between emitting each value (default: 0). Can be set to `null` to skip the delay. The difference between `chunkDelayInMs: null` and `chunkDelayInMs: 0` is that `chunkDelayInMs: null` will emit the values without any delay, while `chunkDelayInMs: 0` will emit the values with a delay of 0 milliseconds.
2294
+ * @returns A ReadableStream that emits the provided values
2295
+ */
2296
+ declare function simulateReadableStream<T>({ chunks, initialDelayInMs, chunkDelayInMs, _internal, }: {
2297
+ chunks: T[];
2298
+ initialDelayInMs?: number | null;
2299
+ chunkDelayInMs?: number | null;
2300
+ _internal?: {
2301
+ delay?: (ms: number | null) => Promise<void>;
2302
+ };
2303
+ }): ReadableStream<T>;
2304
+
2287
2305
  declare const symbol$9: unique symbol;
2288
2306
  declare class InvalidArgumentError extends AISDKError {
2289
2307
  private readonly [symbol$9];
@@ -2569,4 +2587,4 @@ declare namespace llamaindexAdapter {
2569
2587
  };
2570
2588
  }
2571
2589
 
2572
- export { AssistantContent, AssistantResponse, CallWarning, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreTool, ToolCallUnion as CoreToolCallUnion, CoreToolChoice, CoreToolMessage, ToolResultUnion as CoreToolResultUnion, CoreUserMessage, DataContent, DataStreamWriter, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, GenerateImageResult as Experimental_GenerateImageResult, GeneratedImage as Experimental_GeneratedImage, Experimental_LanguageModelV1Middleware, FilePart, FinishReason, GenerateObjectResult, GenerateTextResult, ImagePart, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidToolArgumentsError, langchainAdapter as LangChainAdapter, LanguageModel, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage, llamaindexAdapter as LlamaIndexAdapter, LogProbs, MessageConversionError, NoObjectGeneratedError, NoOutputSpecifiedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, output as Output, Provider, ProviderMetadata, RetryError, StepResult, StreamData, StreamObjectResult, StreamTextResult, TextPart, TextStreamPart, ToolCallPart, ToolCallRepairError, ToolCallRepairFunction, ToolContent, ToolExecutionError, ToolExecutionOptions, ToolResultPart, UserContent, convertToCoreMessages, cosineSimilarity, createDataStream, createDataStreamResponse, embed, embedMany, experimental_createProviderRegistry, experimental_customProvider, generateImage as experimental_generateImage, experimental_wrapLanguageModel, generateObject, generateText, pipeDataStreamToResponse, smoothStream, streamObject, streamText, tool };
2590
+ export { AssistantContent, AssistantResponse, CallWarning, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreTool, ToolCallUnion as CoreToolCallUnion, CoreToolChoice, CoreToolMessage, ToolResultUnion as CoreToolResultUnion, CoreUserMessage, DataContent, DataStreamWriter, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, GenerateImageResult as Experimental_GenerateImageResult, GeneratedImage as Experimental_GeneratedImage, Experimental_LanguageModelV1Middleware, FilePart, FinishReason, GenerateObjectResult, GenerateTextResult, ImagePart, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidToolArgumentsError, langchainAdapter as LangChainAdapter, LanguageModel, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage, llamaindexAdapter as LlamaIndexAdapter, LogProbs, MessageConversionError, NoObjectGeneratedError, NoOutputSpecifiedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, output as Output, Provider, ProviderMetadata, RetryError, StepResult, StreamData, StreamObjectResult, StreamTextResult, TextPart, TextStreamPart, ToolCallPart, ToolCallRepairError, ToolCallRepairFunction, ToolContent, ToolExecutionError, ToolExecutionOptions, ToolResultPart, UserContent, convertToCoreMessages, cosineSimilarity, createDataStream, createDataStreamResponse, embed, embedMany, experimental_createProviderRegistry, experimental_customProvider, generateImage as experimental_generateImage, experimental_wrapLanguageModel, generateObject, generateText, pipeDataStreamToResponse, simulateReadableStream, smoothStream, streamObject, streamText, tool };
package/dist/index.js CHANGED
@@ -70,6 +70,7 @@ __export(streams_exports, {
70
70
  pipeDataStreamToResponse: () => pipeDataStreamToResponse,
71
71
  processDataStream: () => import_ui_utils14.processDataStream,
72
72
  processTextStream: () => import_ui_utils14.processTextStream,
73
+ simulateReadableStream: () => simulateReadableStream,
73
74
  smoothStream: () => smoothStream,
74
75
  streamObject: () => streamObject,
75
76
  streamText: () => streamText,
@@ -289,7 +290,7 @@ var import_provider_utils = require("@ai-sdk/provider-utils");
289
290
 
290
291
  // util/delay.ts
291
292
  async function delay(delayInMs) {
292
- return delayInMs === void 0 ? Promise.resolve() : new Promise((resolve) => setTimeout(resolve, delayInMs));
293
+ return delayInMs == null ? Promise.resolve() : new Promise((resolve) => setTimeout(resolve, delayInMs));
293
294
  }
294
295
 
295
296
  // util/retry-error.ts
@@ -5347,38 +5348,50 @@ var DefaultStreamTextResult = class {
5347
5348
  };
5348
5349
 
5349
5350
  // core/generate-text/smooth-stream.ts
5351
+ var import_provider17 = require("@ai-sdk/provider");
5352
+ var CHUNKING_REGEXPS = {
5353
+ word: /\s*\S+\s+/m,
5354
+ line: /[^\n]*\n/m
5355
+ };
5350
5356
  function smoothStream({
5351
5357
  delayInMs = 10,
5352
5358
  chunking = "word",
5353
5359
  _internal: { delay: delay2 = delay } = {}
5354
5360
  } = {}) {
5355
- let buffer = "";
5356
- return () => new TransformStream({
5357
- async transform(chunk, controller) {
5358
- if (chunk.type === "step-finish") {
5359
- if (buffer.length > 0) {
5360
- controller.enqueue({ type: "text-delta", textDelta: buffer });
5361
- buffer = "";
5361
+ const chunkingRegexp = typeof chunking === "string" ? CHUNKING_REGEXPS[chunking] : chunking;
5362
+ if (chunkingRegexp == null) {
5363
+ throw new import_provider17.InvalidArgumentError({
5364
+ argument: "chunking",
5365
+ message: `Chunking must be "word" or "line" or a RegExp. Received: ${chunking}`
5366
+ });
5367
+ }
5368
+ return () => {
5369
+ let buffer = "";
5370
+ return new TransformStream({
5371
+ async transform(chunk, controller) {
5372
+ if (chunk.type === "step-finish") {
5373
+ if (buffer.length > 0) {
5374
+ controller.enqueue({ type: "text-delta", textDelta: buffer });
5375
+ buffer = "";
5376
+ }
5377
+ controller.enqueue(chunk);
5378
+ return;
5362
5379
  }
5363
- controller.enqueue(chunk);
5364
- return;
5365
- }
5366
- if (chunk.type !== "text-delta") {
5367
- controller.enqueue(chunk);
5368
- return;
5369
- }
5370
- buffer += chunk.textDelta;
5371
- const regexp = chunking === "line" ? /[^\n]*\n/m : /\s*\S+\s+/m;
5372
- while (regexp.test(buffer)) {
5373
- const chunk2 = buffer.match(regexp)[0];
5374
- controller.enqueue({ type: "text-delta", textDelta: chunk2 });
5375
- buffer = buffer.slice(chunk2.length);
5376
- if (delayInMs > 0) {
5380
+ if (chunk.type !== "text-delta") {
5381
+ controller.enqueue(chunk);
5382
+ return;
5383
+ }
5384
+ buffer += chunk.textDelta;
5385
+ let match;
5386
+ while ((match = chunkingRegexp.exec(buffer)) != null) {
5387
+ const chunk2 = match[0];
5388
+ controller.enqueue({ type: "text-delta", textDelta: chunk2 });
5389
+ buffer = buffer.slice(chunk2.length);
5377
5390
  await delay2(delayInMs);
5378
5391
  }
5379
5392
  }
5380
- }
5381
- });
5393
+ });
5394
+ };
5382
5395
  }
5383
5396
 
5384
5397
  // core/middleware/wrap-language-model.ts
@@ -5416,7 +5429,7 @@ var experimental_wrapLanguageModel = ({
5416
5429
  };
5417
5430
 
5418
5431
  // core/registry/custom-provider.ts
5419
- var import_provider17 = require("@ai-sdk/provider");
5432
+ var import_provider18 = require("@ai-sdk/provider");
5420
5433
  function experimental_customProvider({
5421
5434
  languageModels,
5422
5435
  textEmbeddingModels,
@@ -5430,7 +5443,7 @@ function experimental_customProvider({
5430
5443
  if (fallbackProvider) {
5431
5444
  return fallbackProvider.languageModel(modelId);
5432
5445
  }
5433
- throw new import_provider17.NoSuchModelError({ modelId, modelType: "languageModel" });
5446
+ throw new import_provider18.NoSuchModelError({ modelId, modelType: "languageModel" });
5434
5447
  },
5435
5448
  textEmbeddingModel(modelId) {
5436
5449
  if (textEmbeddingModels != null && modelId in textEmbeddingModels) {
@@ -5439,18 +5452,18 @@ function experimental_customProvider({
5439
5452
  if (fallbackProvider) {
5440
5453
  return fallbackProvider.textEmbeddingModel(modelId);
5441
5454
  }
5442
- throw new import_provider17.NoSuchModelError({ modelId, modelType: "textEmbeddingModel" });
5455
+ throw new import_provider18.NoSuchModelError({ modelId, modelType: "textEmbeddingModel" });
5443
5456
  }
5444
5457
  };
5445
5458
  }
5446
5459
 
5447
5460
  // core/registry/no-such-provider-error.ts
5448
- var import_provider18 = require("@ai-sdk/provider");
5461
+ var import_provider19 = require("@ai-sdk/provider");
5449
5462
  var name13 = "AI_NoSuchProviderError";
5450
5463
  var marker13 = `vercel.ai.error.${name13}`;
5451
5464
  var symbol13 = Symbol.for(marker13);
5452
5465
  var _a13;
5453
- var NoSuchProviderError = class extends import_provider18.NoSuchModelError {
5466
+ var NoSuchProviderError = class extends import_provider19.NoSuchModelError {
5454
5467
  constructor({
5455
5468
  modelId,
5456
5469
  modelType,
@@ -5464,13 +5477,13 @@ var NoSuchProviderError = class extends import_provider18.NoSuchModelError {
5464
5477
  this.availableProviders = availableProviders;
5465
5478
  }
5466
5479
  static isInstance(error) {
5467
- return import_provider18.AISDKError.hasMarker(error, marker13);
5480
+ return import_provider19.AISDKError.hasMarker(error, marker13);
5468
5481
  }
5469
5482
  };
5470
5483
  _a13 = symbol13;
5471
5484
 
5472
5485
  // core/registry/provider-registry.ts
5473
- var import_provider19 = require("@ai-sdk/provider");
5486
+ var import_provider20 = require("@ai-sdk/provider");
5474
5487
  function experimental_createProviderRegistry(providers) {
5475
5488
  const registry = new DefaultProviderRegistry();
5476
5489
  for (const [id, provider] of Object.entries(providers)) {
@@ -5500,7 +5513,7 @@ var DefaultProviderRegistry = class {
5500
5513
  splitId(id, modelType) {
5501
5514
  const index = id.indexOf(":");
5502
5515
  if (index === -1) {
5503
- throw new import_provider19.NoSuchModelError({
5516
+ throw new import_provider20.NoSuchModelError({
5504
5517
  modelId: id,
5505
5518
  modelType,
5506
5519
  message: `Invalid ${modelType} id for registry: ${id} (must be in the format "providerId:modelId")`
@@ -5513,7 +5526,7 @@ var DefaultProviderRegistry = class {
5513
5526
  const [providerId, modelId] = this.splitId(id, "languageModel");
5514
5527
  const model = (_b = (_a14 = this.getProvider(providerId)).languageModel) == null ? void 0 : _b.call(_a14, modelId);
5515
5528
  if (model == null) {
5516
- throw new import_provider19.NoSuchModelError({ modelId: id, modelType: "languageModel" });
5529
+ throw new import_provider20.NoSuchModelError({ modelId: id, modelType: "languageModel" });
5517
5530
  }
5518
5531
  return model;
5519
5532
  }
@@ -5523,7 +5536,7 @@ var DefaultProviderRegistry = class {
5523
5536
  const provider = this.getProvider(providerId);
5524
5537
  const model = (_a14 = provider.textEmbeddingModel) == null ? void 0 : _a14.call(provider, modelId);
5525
5538
  if (model == null) {
5526
- throw new import_provider19.NoSuchModelError({
5539
+ throw new import_provider20.NoSuchModelError({
5527
5540
  modelId: id,
5528
5541
  modelType: "textEmbeddingModel"
5529
5542
  });
@@ -5562,6 +5575,28 @@ function magnitude(vector) {
5562
5575
  return Math.sqrt(dotProduct(vector, vector));
5563
5576
  }
5564
5577
 
5578
+ // core/util/simulate-readable-stream.ts
5579
+ function simulateReadableStream({
5580
+ chunks,
5581
+ initialDelayInMs = 0,
5582
+ chunkDelayInMs = 0,
5583
+ _internal
5584
+ }) {
5585
+ var _a14;
5586
+ const delay2 = (_a14 = _internal == null ? void 0 : _internal.delay) != null ? _a14 : delay;
5587
+ let index = 0;
5588
+ return new ReadableStream({
5589
+ async pull(controller) {
5590
+ if (index < chunks.length) {
5591
+ await delay2(index === 0 ? initialDelayInMs : chunkDelayInMs);
5592
+ controller.enqueue(chunks[index++]);
5593
+ } else {
5594
+ controller.close();
5595
+ }
5596
+ }
5597
+ });
5598
+ }
5599
+
5565
5600
  // streams/assistant-response.ts
5566
5601
  var import_ui_utils10 = require("@ai-sdk/ui-utils");
5567
5602
  function AssistantResponse({ threadId, messageId }, process2) {
@@ -5946,6 +5981,7 @@ var StreamData = class {
5946
5981
  pipeDataStreamToResponse,
5947
5982
  processDataStream,
5948
5983
  processTextStream,
5984
+ simulateReadableStream,
5949
5985
  smoothStream,
5950
5986
  streamObject,
5951
5987
  streamText,