ai 4.2.10 → 5.0.0-canary.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,22 @@
1
1
  # ai
2
2
 
3
+ ## 5.0.0-canary.0
4
+
5
+ ### Major Changes
6
+
7
+ - d5f588f: AI SDK 5
8
+ - 9477ebb: chore (ui): remove useAssistant hook (**breaking change**)
9
+
10
+ ### Patch Changes
11
+
12
+ - 8026705: fix (core): send buffered text in smooth stream when stream parts change
13
+ - Updated dependencies [d5f588f]
14
+ - Updated dependencies [9477ebb]
15
+ - @ai-sdk/provider-utils@3.0.0-canary.0
16
+ - @ai-sdk/ui-utils@2.0.0-canary.0
17
+ - @ai-sdk/react@2.0.0-canary.0
18
+ - @ai-sdk/provider@2.0.0-canary.0
19
+
3
20
  ## 4.2.10
4
21
 
5
22
  ### Patch Changes
package/dist/index.d.mts CHANGED
@@ -1,7 +1,7 @@
1
1
  import { IDGenerator } from '@ai-sdk/provider-utils';
2
2
  export { CoreToolCall, CoreToolResult, IDGenerator, ToolCall, ToolResult, createIdGenerator, generateId } from '@ai-sdk/provider-utils';
3
- import { DataStreamString, Message, Schema, DeepPartial, JSONValue as JSONValue$1, AssistantMessage, DataMessage } from '@ai-sdk/ui-utils';
4
- export { AssistantMessage, AssistantStatus, Attachment, ChatRequest, ChatRequestOptions, CreateMessage, DataMessage, DataStreamPart, DeepPartial, IdGenerator, JSONValue, Message, RequestOptions, Schema, ToolInvocation, UIMessage, UseAssistantOptions, formatAssistantStreamPart, formatDataStreamPart, jsonSchema, parseAssistantStreamPart, parseDataStreamPart, processDataStream, processTextStream, zodSchema } from '@ai-sdk/ui-utils';
3
+ import { DataStreamString, Message, Schema, DeepPartial, JSONValue as JSONValue$1 } from '@ai-sdk/ui-utils';
4
+ export { Attachment, ChatRequest, ChatRequestOptions, CreateMessage, DataStreamPart, DeepPartial, IdGenerator, JSONValue, Message, RequestOptions, Schema, ToolInvocation, UIMessage, formatDataStreamPart, jsonSchema, parseDataStreamPart, processDataStream, processTextStream, zodSchema } from '@ai-sdk/ui-utils';
5
5
  import { LanguageModelV1, LanguageModelV1FinishReason, LanguageModelV1LogProbs, LanguageModelV1CallWarning, LanguageModelV1Source, JSONValue, EmbeddingModelV1, EmbeddingModelV1Embedding, ImageModelV1, ImageModelV1CallWarning, LanguageModelV1ProviderMetadata, LanguageModelV1CallOptions, AISDKError, LanguageModelV1FunctionToolCall, JSONSchema7, JSONParseError, TypeValidationError, ProviderV1, NoSuchModelError } from '@ai-sdk/provider';
6
6
  export { AISDKError, APICallError, EmptyResponseBodyError, InvalidPromptError, InvalidResponseDataError, JSONParseError, LanguageModelV1, LanguageModelV1CallOptions, LanguageModelV1Prompt, LanguageModelV1StreamPart, LoadAPIKeyError, NoContentGeneratedError, NoSuchModelError, TypeValidationError, UnsupportedFunctionalityError } from '@ai-sdk/provider';
7
7
  import { ServerResponse } from 'node:http';
@@ -4243,43 +4243,6 @@ declare class RetryError extends AISDKError {
4243
4243
  static isInstance(error: unknown): error is RetryError;
4244
4244
  }
4245
4245
 
4246
- /**
4247
- You can pass the thread and the latest message into the `AssistantResponse`. This establishes the context for the response.
4248
- */
4249
- type AssistantResponseSettings = {
4250
- /**
4251
- The thread ID that the response is associated with.
4252
- */
4253
- threadId: string;
4254
- /**
4255
- The ID of the latest message that the response is associated with.
4256
- */
4257
- messageId: string;
4258
- };
4259
- /**
4260
- The process parameter is a callback in which you can run the assistant on threads, and send messages and data messages to the client.
4261
- */
4262
- type AssistantResponseCallback = (options: {
4263
- /**
4264
- Forwards an assistant message (non-streaming) to the client.
4265
- */
4266
- sendMessage: (message: AssistantMessage) => void;
4267
- /**
4268
- Send a data message to the client. You can use this to provide information for rendering custom UIs while the assistant is processing the thread.
4269
- */
4270
- sendDataMessage: (message: DataMessage) => void;
4271
- /**
4272
- Forwards the assistant response stream to the client. Returns the `Run` object after it completes, or when it requires an action.
4273
- */
4274
- forwardStream: (stream: any) => Promise<any | undefined>;
4275
- }) => Promise<void>;
4276
- /**
4277
- The `AssistantResponse` allows you to send a stream of assistant update to `useAssistant`.
4278
- It is designed to facilitate streaming assistant responses to the `useAssistant` hook.
4279
- It receives an assistant thread and a current message, and can send messages and data messages to the client.
4280
- */
4281
- declare function AssistantResponse({ threadId, messageId }: AssistantResponseSettings, process: AssistantResponseCallback): Response;
4282
-
4283
4246
  /**
4284
4247
  * Configuration options and helper callback methods for stream lifecycle events.
4285
4248
  */
@@ -4376,4 +4339,4 @@ declare namespace llamaindexAdapter {
4376
4339
  };
4377
4340
  }
4378
4341
 
4379
- export { AssistantContent, AssistantResponse, CallWarning, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreTool, CoreToolCallUnion, CoreToolChoice, CoreToolMessage, CoreToolResultUnion, CoreUserMessage, DataContent, DataStreamOptions, DataStreamWriter, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, GenerateImageResult as Experimental_GenerateImageResult, GeneratedFile as Experimental_GeneratedImage, Experimental_LanguageModelV1Middleware, FilePart, FinishReason, GenerateObjectResult, GenerateTextOnStepFinishCallback, GenerateTextResult, GeneratedFile, ImageModel, ImageGenerationWarning as ImageModelCallWarning, ImageModelResponseMetadata, ImagePart, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidStreamPartError, InvalidToolArgumentsError, JSONRPCError, JSONRPCMessage, JSONRPCNotification, JSONRPCRequest, JSONRPCResponse, langchainAdapter as LangChainAdapter, LanguageModel, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage, LanguageModelV1Middleware, llamaindexAdapter as LlamaIndexAdapter, LogProbs, MCPClientError, MCPTransport, MessageConversionError, NoImageGeneratedError, NoObjectGeneratedError, NoOutputSpecifiedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, output as Output, Provider, ProviderMetadata, ProviderRegistryProvider, RepairTextFunction, RetryError, StepResult, StreamData, StreamObjectOnFinishCallback, StreamObjectResult, StreamTextOnChunkCallback, StreamTextOnErrorCallback, StreamTextOnFinishCallback, StreamTextOnStepFinishCallback, StreamTextResult, StreamTextTransform, TelemetrySettings, TextPart, TextStreamPart, Tool, ToolCallPart, ToolCallRepairError, ToolCallRepairFunction, ToolCallUnion, ToolChoice, ToolContent, ToolExecutionError, ToolExecutionOptions, ToolResultPart, ToolResultUnion, ToolSet, UserContent, appendClientMessage, appendResponseMessages, convertToCoreMessages, coreAssistantMessageSchema, coreMessageSchema, coreSystemMessageSchema, coreToolMessageSchema, coreUserMessageSchema, cosineSimilarity, createDataStream, createDataStreamResponse, createProviderRegistry, customProvider, defaultSettingsMiddleware, embed, embedMany, createMCPClient as experimental_createMCPClient, experimental_createProviderRegistry, experimental_customProvider, generateImage as experimental_generateImage, experimental_wrapLanguageModel, extractReasoningMiddleware, generateObject, generateText, pipeDataStreamToResponse, simulateReadableStream, simulateStreamingMiddleware, smoothStream, streamObject, streamText, tool, wrapLanguageModel };
4342
+ export { AssistantContent, CallWarning, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreTool, CoreToolCallUnion, CoreToolChoice, CoreToolMessage, CoreToolResultUnion, CoreUserMessage, DataContent, DataStreamOptions, DataStreamWriter, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, GenerateImageResult as Experimental_GenerateImageResult, GeneratedFile as Experimental_GeneratedImage, Experimental_LanguageModelV1Middleware, FilePart, FinishReason, GenerateObjectResult, GenerateTextOnStepFinishCallback, GenerateTextResult, GeneratedFile, ImageModel, ImageGenerationWarning as ImageModelCallWarning, ImageModelResponseMetadata, ImagePart, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidStreamPartError, InvalidToolArgumentsError, JSONRPCError, JSONRPCMessage, JSONRPCNotification, JSONRPCRequest, JSONRPCResponse, langchainAdapter as LangChainAdapter, LanguageModel, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage, LanguageModelV1Middleware, llamaindexAdapter as LlamaIndexAdapter, LogProbs, MCPClientError, MCPTransport, MessageConversionError, NoImageGeneratedError, NoObjectGeneratedError, NoOutputSpecifiedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, output as Output, Provider, ProviderMetadata, ProviderRegistryProvider, RepairTextFunction, RetryError, StepResult, StreamData, StreamObjectOnFinishCallback, StreamObjectResult, StreamTextOnChunkCallback, StreamTextOnErrorCallback, StreamTextOnFinishCallback, StreamTextOnStepFinishCallback, StreamTextResult, StreamTextTransform, TelemetrySettings, TextPart, TextStreamPart, Tool, ToolCallPart, ToolCallRepairError, ToolCallRepairFunction, ToolCallUnion, ToolChoice, ToolContent, ToolExecutionError, ToolExecutionOptions, ToolResultPart, ToolResultUnion, ToolSet, UserContent, appendClientMessage, appendResponseMessages, convertToCoreMessages, coreAssistantMessageSchema, coreMessageSchema, coreSystemMessageSchema, coreToolMessageSchema, coreUserMessageSchema, cosineSimilarity, createDataStream, createDataStreamResponse, createProviderRegistry, customProvider, defaultSettingsMiddleware, embed, embedMany, createMCPClient as experimental_createMCPClient, experimental_createProviderRegistry, experimental_customProvider, generateImage as experimental_generateImage, experimental_wrapLanguageModel, extractReasoningMiddleware, generateObject, generateText, pipeDataStreamToResponse, simulateReadableStream, simulateStreamingMiddleware, smoothStream, streamObject, streamText, tool, wrapLanguageModel };
package/dist/index.d.ts CHANGED
@@ -1,7 +1,7 @@
1
1
  import { IDGenerator } from '@ai-sdk/provider-utils';
2
2
  export { CoreToolCall, CoreToolResult, IDGenerator, ToolCall, ToolResult, createIdGenerator, generateId } from '@ai-sdk/provider-utils';
3
- import { DataStreamString, Message, Schema, DeepPartial, JSONValue as JSONValue$1, AssistantMessage, DataMessage } from '@ai-sdk/ui-utils';
4
- export { AssistantMessage, AssistantStatus, Attachment, ChatRequest, ChatRequestOptions, CreateMessage, DataMessage, DataStreamPart, DeepPartial, IdGenerator, JSONValue, Message, RequestOptions, Schema, ToolInvocation, UIMessage, UseAssistantOptions, formatAssistantStreamPart, formatDataStreamPart, jsonSchema, parseAssistantStreamPart, parseDataStreamPart, processDataStream, processTextStream, zodSchema } from '@ai-sdk/ui-utils';
3
+ import { DataStreamString, Message, Schema, DeepPartial, JSONValue as JSONValue$1 } from '@ai-sdk/ui-utils';
4
+ export { Attachment, ChatRequest, ChatRequestOptions, CreateMessage, DataStreamPart, DeepPartial, IdGenerator, JSONValue, Message, RequestOptions, Schema, ToolInvocation, UIMessage, formatDataStreamPart, jsonSchema, parseDataStreamPart, processDataStream, processTextStream, zodSchema } from '@ai-sdk/ui-utils';
5
5
  import { LanguageModelV1, LanguageModelV1FinishReason, LanguageModelV1LogProbs, LanguageModelV1CallWarning, LanguageModelV1Source, JSONValue, EmbeddingModelV1, EmbeddingModelV1Embedding, ImageModelV1, ImageModelV1CallWarning, LanguageModelV1ProviderMetadata, LanguageModelV1CallOptions, AISDKError, LanguageModelV1FunctionToolCall, JSONSchema7, JSONParseError, TypeValidationError, ProviderV1, NoSuchModelError } from '@ai-sdk/provider';
6
6
  export { AISDKError, APICallError, EmptyResponseBodyError, InvalidPromptError, InvalidResponseDataError, JSONParseError, LanguageModelV1, LanguageModelV1CallOptions, LanguageModelV1Prompt, LanguageModelV1StreamPart, LoadAPIKeyError, NoContentGeneratedError, NoSuchModelError, TypeValidationError, UnsupportedFunctionalityError } from '@ai-sdk/provider';
7
7
  import { ServerResponse } from 'node:http';
@@ -4243,43 +4243,6 @@ declare class RetryError extends AISDKError {
4243
4243
  static isInstance(error: unknown): error is RetryError;
4244
4244
  }
4245
4245
 
4246
- /**
4247
- You can pass the thread and the latest message into the `AssistantResponse`. This establishes the context for the response.
4248
- */
4249
- type AssistantResponseSettings = {
4250
- /**
4251
- The thread ID that the response is associated with.
4252
- */
4253
- threadId: string;
4254
- /**
4255
- The ID of the latest message that the response is associated with.
4256
- */
4257
- messageId: string;
4258
- };
4259
- /**
4260
- The process parameter is a callback in which you can run the assistant on threads, and send messages and data messages to the client.
4261
- */
4262
- type AssistantResponseCallback = (options: {
4263
- /**
4264
- Forwards an assistant message (non-streaming) to the client.
4265
- */
4266
- sendMessage: (message: AssistantMessage) => void;
4267
- /**
4268
- Send a data message to the client. You can use this to provide information for rendering custom UIs while the assistant is processing the thread.
4269
- */
4270
- sendDataMessage: (message: DataMessage) => void;
4271
- /**
4272
- Forwards the assistant response stream to the client. Returns the `Run` object after it completes, or when it requires an action.
4273
- */
4274
- forwardStream: (stream: any) => Promise<any | undefined>;
4275
- }) => Promise<void>;
4276
- /**
4277
- The `AssistantResponse` allows you to send a stream of assistant update to `useAssistant`.
4278
- It is designed to facilitate streaming assistant responses to the `useAssistant` hook.
4279
- It receives an assistant thread and a current message, and can send messages and data messages to the client.
4280
- */
4281
- declare function AssistantResponse({ threadId, messageId }: AssistantResponseSettings, process: AssistantResponseCallback): Response;
4282
-
4283
4246
  /**
4284
4247
  * Configuration options and helper callback methods for stream lifecycle events.
4285
4248
  */
@@ -4376,4 +4339,4 @@ declare namespace llamaindexAdapter {
4376
4339
  };
4377
4340
  }
4378
4341
 
4379
- export { AssistantContent, AssistantResponse, CallWarning, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreTool, CoreToolCallUnion, CoreToolChoice, CoreToolMessage, CoreToolResultUnion, CoreUserMessage, DataContent, DataStreamOptions, DataStreamWriter, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, GenerateImageResult as Experimental_GenerateImageResult, GeneratedFile as Experimental_GeneratedImage, Experimental_LanguageModelV1Middleware, FilePart, FinishReason, GenerateObjectResult, GenerateTextOnStepFinishCallback, GenerateTextResult, GeneratedFile, ImageModel, ImageGenerationWarning as ImageModelCallWarning, ImageModelResponseMetadata, ImagePart, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidStreamPartError, InvalidToolArgumentsError, JSONRPCError, JSONRPCMessage, JSONRPCNotification, JSONRPCRequest, JSONRPCResponse, langchainAdapter as LangChainAdapter, LanguageModel, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage, LanguageModelV1Middleware, llamaindexAdapter as LlamaIndexAdapter, LogProbs, MCPClientError, MCPTransport, MessageConversionError, NoImageGeneratedError, NoObjectGeneratedError, NoOutputSpecifiedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, output as Output, Provider, ProviderMetadata, ProviderRegistryProvider, RepairTextFunction, RetryError, StepResult, StreamData, StreamObjectOnFinishCallback, StreamObjectResult, StreamTextOnChunkCallback, StreamTextOnErrorCallback, StreamTextOnFinishCallback, StreamTextOnStepFinishCallback, StreamTextResult, StreamTextTransform, TelemetrySettings, TextPart, TextStreamPart, Tool, ToolCallPart, ToolCallRepairError, ToolCallRepairFunction, ToolCallUnion, ToolChoice, ToolContent, ToolExecutionError, ToolExecutionOptions, ToolResultPart, ToolResultUnion, ToolSet, UserContent, appendClientMessage, appendResponseMessages, convertToCoreMessages, coreAssistantMessageSchema, coreMessageSchema, coreSystemMessageSchema, coreToolMessageSchema, coreUserMessageSchema, cosineSimilarity, createDataStream, createDataStreamResponse, createProviderRegistry, customProvider, defaultSettingsMiddleware, embed, embedMany, createMCPClient as experimental_createMCPClient, experimental_createProviderRegistry, experimental_customProvider, generateImage as experimental_generateImage, experimental_wrapLanguageModel, extractReasoningMiddleware, generateObject, generateText, pipeDataStreamToResponse, simulateReadableStream, simulateStreamingMiddleware, smoothStream, streamObject, streamText, tool, wrapLanguageModel };
4342
+ export { AssistantContent, CallWarning, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreTool, CoreToolCallUnion, CoreToolChoice, CoreToolMessage, CoreToolResultUnion, CoreUserMessage, DataContent, DataStreamOptions, DataStreamWriter, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, GenerateImageResult as Experimental_GenerateImageResult, GeneratedFile as Experimental_GeneratedImage, Experimental_LanguageModelV1Middleware, FilePart, FinishReason, GenerateObjectResult, GenerateTextOnStepFinishCallback, GenerateTextResult, GeneratedFile, ImageModel, ImageGenerationWarning as ImageModelCallWarning, ImageModelResponseMetadata, ImagePart, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidStreamPartError, InvalidToolArgumentsError, JSONRPCError, JSONRPCMessage, JSONRPCNotification, JSONRPCRequest, JSONRPCResponse, langchainAdapter as LangChainAdapter, LanguageModel, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage, LanguageModelV1Middleware, llamaindexAdapter as LlamaIndexAdapter, LogProbs, MCPClientError, MCPTransport, MessageConversionError, NoImageGeneratedError, NoObjectGeneratedError, NoOutputSpecifiedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, output as Output, Provider, ProviderMetadata, ProviderRegistryProvider, RepairTextFunction, RetryError, StepResult, StreamData, StreamObjectOnFinishCallback, StreamObjectResult, StreamTextOnChunkCallback, StreamTextOnErrorCallback, StreamTextOnFinishCallback, StreamTextOnStepFinishCallback, StreamTextResult, StreamTextTransform, TelemetrySettings, TextPart, TextStreamPart, Tool, ToolCallPart, ToolCallRepairError, ToolCallRepairFunction, ToolCallUnion, ToolChoice, ToolContent, ToolExecutionError, ToolExecutionOptions, ToolResultPart, ToolResultUnion, ToolSet, UserContent, appendClientMessage, appendResponseMessages, convertToCoreMessages, coreAssistantMessageSchema, coreMessageSchema, coreSystemMessageSchema, coreToolMessageSchema, coreUserMessageSchema, cosineSimilarity, createDataStream, createDataStreamResponse, createProviderRegistry, customProvider, defaultSettingsMiddleware, embed, embedMany, createMCPClient as experimental_createMCPClient, experimental_createProviderRegistry, experimental_customProvider, generateImage as experimental_generateImage, experimental_wrapLanguageModel, extractReasoningMiddleware, generateObject, generateText, pipeDataStreamToResponse, simulateReadableStream, simulateStreamingMiddleware, smoothStream, streamObject, streamText, tool, wrapLanguageModel };
package/dist/index.js CHANGED
@@ -22,7 +22,6 @@ var streams_exports = {};
22
22
  __export(streams_exports, {
23
23
  AISDKError: () => import_provider20.AISDKError,
24
24
  APICallError: () => import_provider20.APICallError,
25
- AssistantResponse: () => AssistantResponse,
26
25
  DownloadError: () => DownloadError,
27
26
  EmptyResponseBodyError: () => import_provider20.EmptyResponseBodyError,
28
27
  InvalidArgumentError: () => InvalidArgumentError,
@@ -75,13 +74,11 @@ __export(streams_exports, {
75
74
  experimental_generateImage: () => generateImage,
76
75
  experimental_wrapLanguageModel: () => experimental_wrapLanguageModel,
77
76
  extractReasoningMiddleware: () => extractReasoningMiddleware,
78
- formatAssistantStreamPart: () => import_ui_utils11.formatAssistantStreamPart,
79
77
  formatDataStreamPart: () => import_ui_utils11.formatDataStreamPart,
80
78
  generateId: () => import_provider_utils15.generateId,
81
79
  generateObject: () => generateObject,
82
80
  generateText: () => generateText,
83
81
  jsonSchema: () => import_ui_utils11.jsonSchema,
84
- parseAssistantStreamPart: () => import_ui_utils11.parseAssistantStreamPart,
85
82
  parseDataStreamPart: () => import_ui_utils11.parseDataStreamPart,
86
83
  pipeDataStreamToResponse: () => pipeDataStreamToResponse,
87
84
  processDataStream: () => import_ui_utils11.processDataStream,
@@ -1156,14 +1153,11 @@ var DownloadError = class extends import_provider6.AISDKError {
1156
1153
  _a5 = symbol5;
1157
1154
 
1158
1155
  // util/download.ts
1159
- async function download({
1160
- url,
1161
- fetchImplementation = fetch
1162
- }) {
1156
+ async function download({ url }) {
1163
1157
  var _a17;
1164
1158
  const urlText = url.toString();
1165
1159
  try {
1166
- const response = await fetchImplementation(urlText);
1160
+ const response = await fetch(urlText);
1167
1161
  if (!response.ok) {
1168
1162
  throw new DownloadError({
1169
1163
  url: urlText,
@@ -4687,7 +4681,7 @@ function smoothStream({
4687
4681
  let buffer = "";
4688
4682
  return new TransformStream({
4689
4683
  async transform(chunk, controller) {
4690
- if (chunk.type === "step-finish") {
4684
+ if (chunk.type !== "text-delta") {
4691
4685
  if (buffer.length > 0) {
4692
4686
  controller.enqueue({ type: "text-delta", textDelta: buffer });
4693
4687
  buffer = "";
@@ -4695,10 +4689,6 @@ function smoothStream({
4695
4689
  controller.enqueue(chunk);
4696
4690
  return;
4697
4691
  }
4698
- if (chunk.type !== "text-delta") {
4699
- controller.enqueue(chunk);
4700
- return;
4701
- }
4702
4692
  buffer += chunk.textDelta;
4703
4693
  let match;
4704
4694
  while ((match = chunkingRegexp.exec(buffer)) != null) {
@@ -7483,102 +7473,6 @@ function simulateReadableStream({
7483
7473
  });
7484
7474
  }
7485
7475
 
7486
- // streams/assistant-response.ts
7487
- var import_ui_utils12 = require("@ai-sdk/ui-utils");
7488
- function AssistantResponse({ threadId, messageId }, process2) {
7489
- const stream = new ReadableStream({
7490
- async start(controller) {
7491
- var _a17;
7492
- const textEncoder = new TextEncoder();
7493
- const sendMessage = (message) => {
7494
- controller.enqueue(
7495
- textEncoder.encode(
7496
- (0, import_ui_utils12.formatAssistantStreamPart)("assistant_message", message)
7497
- )
7498
- );
7499
- };
7500
- const sendDataMessage = (message) => {
7501
- controller.enqueue(
7502
- textEncoder.encode(
7503
- (0, import_ui_utils12.formatAssistantStreamPart)("data_message", message)
7504
- )
7505
- );
7506
- };
7507
- const sendError = (errorMessage) => {
7508
- controller.enqueue(
7509
- textEncoder.encode((0, import_ui_utils12.formatAssistantStreamPart)("error", errorMessage))
7510
- );
7511
- };
7512
- const forwardStream = async (stream2) => {
7513
- var _a18, _b;
7514
- let result = void 0;
7515
- for await (const value of stream2) {
7516
- switch (value.event) {
7517
- case "thread.message.created": {
7518
- controller.enqueue(
7519
- textEncoder.encode(
7520
- (0, import_ui_utils12.formatAssistantStreamPart)("assistant_message", {
7521
- id: value.data.id,
7522
- role: "assistant",
7523
- content: [{ type: "text", text: { value: "" } }]
7524
- })
7525
- )
7526
- );
7527
- break;
7528
- }
7529
- case "thread.message.delta": {
7530
- const content = (_a18 = value.data.delta.content) == null ? void 0 : _a18[0];
7531
- if ((content == null ? void 0 : content.type) === "text" && ((_b = content.text) == null ? void 0 : _b.value) != null) {
7532
- controller.enqueue(
7533
- textEncoder.encode(
7534
- (0, import_ui_utils12.formatAssistantStreamPart)("text", content.text.value)
7535
- )
7536
- );
7537
- }
7538
- break;
7539
- }
7540
- case "thread.run.completed":
7541
- case "thread.run.requires_action": {
7542
- result = value.data;
7543
- break;
7544
- }
7545
- }
7546
- }
7547
- return result;
7548
- };
7549
- controller.enqueue(
7550
- textEncoder.encode(
7551
- (0, import_ui_utils12.formatAssistantStreamPart)("assistant_control_data", {
7552
- threadId,
7553
- messageId
7554
- })
7555
- )
7556
- );
7557
- try {
7558
- await process2({
7559
- sendMessage,
7560
- sendDataMessage,
7561
- forwardStream
7562
- });
7563
- } catch (error) {
7564
- sendError((_a17 = error.message) != null ? _a17 : `${error}`);
7565
- } finally {
7566
- controller.close();
7567
- }
7568
- },
7569
- pull(controller) {
7570
- },
7571
- cancel() {
7572
- }
7573
- });
7574
- return new Response(stream, {
7575
- status: 200,
7576
- headers: {
7577
- "Content-Type": "text/plain; charset=utf-8"
7578
- }
7579
- });
7580
- }
7581
-
7582
7476
  // streams/langchain-adapter.ts
7583
7477
  var langchain_adapter_exports = {};
7584
7478
  __export(langchain_adapter_exports, {
@@ -7586,7 +7480,7 @@ __export(langchain_adapter_exports, {
7586
7480
  toDataStream: () => toDataStream,
7587
7481
  toDataStreamResponse: () => toDataStreamResponse
7588
7482
  });
7589
- var import_ui_utils13 = require("@ai-sdk/ui-utils");
7483
+ var import_ui_utils12 = require("@ai-sdk/ui-utils");
7590
7484
 
7591
7485
  // streams/stream-callbacks.ts
7592
7486
  function createCallbacksTransformer(callbacks = {}) {
@@ -7642,7 +7536,7 @@ function toDataStreamInternal(stream, callbacks) {
7642
7536
  ).pipeThrough(createCallbacksTransformer(callbacks)).pipeThrough(new TextDecoderStream()).pipeThrough(
7643
7537
  new TransformStream({
7644
7538
  transform: async (chunk, controller) => {
7645
- controller.enqueue((0, import_ui_utils13.formatDataStreamPart)("text", chunk));
7539
+ controller.enqueue((0, import_ui_utils12.formatDataStreamPart)("text", chunk));
7646
7540
  }
7647
7541
  })
7648
7542
  );
@@ -7694,7 +7588,7 @@ __export(llamaindex_adapter_exports, {
7694
7588
  toDataStreamResponse: () => toDataStreamResponse2
7695
7589
  });
7696
7590
  var import_provider_utils16 = require("@ai-sdk/provider-utils");
7697
- var import_ui_utils14 = require("@ai-sdk/ui-utils");
7591
+ var import_ui_utils13 = require("@ai-sdk/ui-utils");
7698
7592
  function toDataStreamInternal2(stream, callbacks) {
7699
7593
  const trimStart = trimStartOfStream();
7700
7594
  return (0, import_provider_utils16.convertAsyncIteratorToReadableStream)(stream[Symbol.asyncIterator]()).pipeThrough(
@@ -7706,7 +7600,7 @@ function toDataStreamInternal2(stream, callbacks) {
7706
7600
  ).pipeThrough(createCallbacksTransformer(callbacks)).pipeThrough(new TextDecoderStream()).pipeThrough(
7707
7601
  new TransformStream({
7708
7602
  transform: async (chunk, controller) => {
7709
- controller.enqueue((0, import_ui_utils14.formatDataStreamPart)("text", chunk));
7603
+ controller.enqueue((0, import_ui_utils13.formatDataStreamPart)("text", chunk));
7710
7604
  }
7711
7605
  })
7712
7606
  );
@@ -7748,7 +7642,7 @@ function trimStartOfStream() {
7748
7642
  }
7749
7643
 
7750
7644
  // streams/stream-data.ts
7751
- var import_ui_utils15 = require("@ai-sdk/ui-utils");
7645
+ var import_ui_utils14 = require("@ai-sdk/ui-utils");
7752
7646
 
7753
7647
  // util/constants.ts
7754
7648
  var HANGING_STREAM_WARNING_TIME_MS = 15 * 1e3;
@@ -7800,7 +7694,7 @@ var StreamData = class {
7800
7694
  throw new Error("Stream controller is not initialized.");
7801
7695
  }
7802
7696
  this.controller.enqueue(
7803
- this.encoder.encode((0, import_ui_utils15.formatDataStreamPart)("data", [value]))
7697
+ this.encoder.encode((0, import_ui_utils14.formatDataStreamPart)("data", [value]))
7804
7698
  );
7805
7699
  }
7806
7700
  appendMessageAnnotation(value) {
@@ -7811,7 +7705,7 @@ var StreamData = class {
7811
7705
  throw new Error("Stream controller is not initialized.");
7812
7706
  }
7813
7707
  this.controller.enqueue(
7814
- this.encoder.encode((0, import_ui_utils15.formatDataStreamPart)("message_annotations", [value]))
7708
+ this.encoder.encode((0, import_ui_utils14.formatDataStreamPart)("message_annotations", [value]))
7815
7709
  );
7816
7710
  }
7817
7711
  };
@@ -7819,7 +7713,6 @@ var StreamData = class {
7819
7713
  0 && (module.exports = {
7820
7714
  AISDKError,
7821
7715
  APICallError,
7822
- AssistantResponse,
7823
7716
  DownloadError,
7824
7717
  EmptyResponseBodyError,
7825
7718
  InvalidArgumentError,
@@ -7872,13 +7765,11 @@ var StreamData = class {
7872
7765
  experimental_generateImage,
7873
7766
  experimental_wrapLanguageModel,
7874
7767
  extractReasoningMiddleware,
7875
- formatAssistantStreamPart,
7876
7768
  formatDataStreamPart,
7877
7769
  generateId,
7878
7770
  generateObject,
7879
7771
  generateText,
7880
7772
  jsonSchema,
7881
- parseAssistantStreamPart,
7882
7773
  parseDataStreamPart,
7883
7774
  pipeDataStreamToResponse,
7884
7775
  processDataStream,