ai 4.2.11 → 5.0.0-canary.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,10 +1,21 @@
1
1
  # ai
2
2
 
3
- ## 4.2.11
3
+ ## 5.0.0-canary.0
4
+
5
+ ### Major Changes
6
+
7
+ - d5f588f: AI SDK 5
8
+ - 9477ebb: chore (ui): remove useAssistant hook (**breaking change**)
4
9
 
5
10
  ### Patch Changes
6
11
 
7
- - c45d100: fix (core): send buffered text in smooth stream when stream parts change
12
+ - 8026705: fix (core): send buffered text in smooth stream when stream parts change
13
+ - Updated dependencies [d5f588f]
14
+ - Updated dependencies [9477ebb]
15
+ - @ai-sdk/provider-utils@3.0.0-canary.0
16
+ - @ai-sdk/ui-utils@2.0.0-canary.0
17
+ - @ai-sdk/react@2.0.0-canary.0
18
+ - @ai-sdk/provider@2.0.0-canary.0
8
19
 
9
20
  ## 4.2.10
10
21
 
package/dist/index.d.mts CHANGED
@@ -1,7 +1,7 @@
1
1
  import { IDGenerator } from '@ai-sdk/provider-utils';
2
2
  export { CoreToolCall, CoreToolResult, IDGenerator, ToolCall, ToolResult, createIdGenerator, generateId } from '@ai-sdk/provider-utils';
3
- import { DataStreamString, Message, Schema, DeepPartial, JSONValue as JSONValue$1, AssistantMessage, DataMessage } from '@ai-sdk/ui-utils';
4
- export { AssistantMessage, AssistantStatus, Attachment, ChatRequest, ChatRequestOptions, CreateMessage, DataMessage, DataStreamPart, DeepPartial, IdGenerator, JSONValue, Message, RequestOptions, Schema, ToolInvocation, UIMessage, UseAssistantOptions, formatAssistantStreamPart, formatDataStreamPart, jsonSchema, parseAssistantStreamPart, parseDataStreamPart, processDataStream, processTextStream, zodSchema } from '@ai-sdk/ui-utils';
3
+ import { DataStreamString, Message, Schema, DeepPartial, JSONValue as JSONValue$1 } from '@ai-sdk/ui-utils';
4
+ export { Attachment, ChatRequest, ChatRequestOptions, CreateMessage, DataStreamPart, DeepPartial, IdGenerator, JSONValue, Message, RequestOptions, Schema, ToolInvocation, UIMessage, formatDataStreamPart, jsonSchema, parseDataStreamPart, processDataStream, processTextStream, zodSchema } from '@ai-sdk/ui-utils';
5
5
  import { LanguageModelV1, LanguageModelV1FinishReason, LanguageModelV1LogProbs, LanguageModelV1CallWarning, LanguageModelV1Source, JSONValue, EmbeddingModelV1, EmbeddingModelV1Embedding, ImageModelV1, ImageModelV1CallWarning, LanguageModelV1ProviderMetadata, LanguageModelV1CallOptions, AISDKError, LanguageModelV1FunctionToolCall, JSONSchema7, JSONParseError, TypeValidationError, ProviderV1, NoSuchModelError } from '@ai-sdk/provider';
6
6
  export { AISDKError, APICallError, EmptyResponseBodyError, InvalidPromptError, InvalidResponseDataError, JSONParseError, LanguageModelV1, LanguageModelV1CallOptions, LanguageModelV1Prompt, LanguageModelV1StreamPart, LoadAPIKeyError, NoContentGeneratedError, NoSuchModelError, TypeValidationError, UnsupportedFunctionalityError } from '@ai-sdk/provider';
7
7
  import { ServerResponse } from 'node:http';
@@ -4243,43 +4243,6 @@ declare class RetryError extends AISDKError {
4243
4243
  static isInstance(error: unknown): error is RetryError;
4244
4244
  }
4245
4245
 
4246
- /**
4247
- You can pass the thread and the latest message into the `AssistantResponse`. This establishes the context for the response.
4248
- */
4249
- type AssistantResponseSettings = {
4250
- /**
4251
- The thread ID that the response is associated with.
4252
- */
4253
- threadId: string;
4254
- /**
4255
- The ID of the latest message that the response is associated with.
4256
- */
4257
- messageId: string;
4258
- };
4259
- /**
4260
- The process parameter is a callback in which you can run the assistant on threads, and send messages and data messages to the client.
4261
- */
4262
- type AssistantResponseCallback = (options: {
4263
- /**
4264
- Forwards an assistant message (non-streaming) to the client.
4265
- */
4266
- sendMessage: (message: AssistantMessage) => void;
4267
- /**
4268
- Send a data message to the client. You can use this to provide information for rendering custom UIs while the assistant is processing the thread.
4269
- */
4270
- sendDataMessage: (message: DataMessage) => void;
4271
- /**
4272
- Forwards the assistant response stream to the client. Returns the `Run` object after it completes, or when it requires an action.
4273
- */
4274
- forwardStream: (stream: any) => Promise<any | undefined>;
4275
- }) => Promise<void>;
4276
- /**
4277
- The `AssistantResponse` allows you to send a stream of assistant update to `useAssistant`.
4278
- It is designed to facilitate streaming assistant responses to the `useAssistant` hook.
4279
- It receives an assistant thread and a current message, and can send messages and data messages to the client.
4280
- */
4281
- declare function AssistantResponse({ threadId, messageId }: AssistantResponseSettings, process: AssistantResponseCallback): Response;
4282
-
4283
4246
  /**
4284
4247
  * Configuration options and helper callback methods for stream lifecycle events.
4285
4248
  */
@@ -4376,4 +4339,4 @@ declare namespace llamaindexAdapter {
4376
4339
  };
4377
4340
  }
4378
4341
 
4379
- export { AssistantContent, AssistantResponse, CallWarning, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreTool, CoreToolCallUnion, CoreToolChoice, CoreToolMessage, CoreToolResultUnion, CoreUserMessage, DataContent, DataStreamOptions, DataStreamWriter, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, GenerateImageResult as Experimental_GenerateImageResult, GeneratedFile as Experimental_GeneratedImage, Experimental_LanguageModelV1Middleware, FilePart, FinishReason, GenerateObjectResult, GenerateTextOnStepFinishCallback, GenerateTextResult, GeneratedFile, ImageModel, ImageGenerationWarning as ImageModelCallWarning, ImageModelResponseMetadata, ImagePart, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidStreamPartError, InvalidToolArgumentsError, JSONRPCError, JSONRPCMessage, JSONRPCNotification, JSONRPCRequest, JSONRPCResponse, langchainAdapter as LangChainAdapter, LanguageModel, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage, LanguageModelV1Middleware, llamaindexAdapter as LlamaIndexAdapter, LogProbs, MCPClientError, MCPTransport, MessageConversionError, NoImageGeneratedError, NoObjectGeneratedError, NoOutputSpecifiedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, output as Output, Provider, ProviderMetadata, ProviderRegistryProvider, RepairTextFunction, RetryError, StepResult, StreamData, StreamObjectOnFinishCallback, StreamObjectResult, StreamTextOnChunkCallback, StreamTextOnErrorCallback, StreamTextOnFinishCallback, StreamTextOnStepFinishCallback, StreamTextResult, StreamTextTransform, TelemetrySettings, TextPart, TextStreamPart, Tool, ToolCallPart, ToolCallRepairError, ToolCallRepairFunction, ToolCallUnion, ToolChoice, ToolContent, ToolExecutionError, ToolExecutionOptions, ToolResultPart, ToolResultUnion, ToolSet, UserContent, appendClientMessage, appendResponseMessages, convertToCoreMessages, coreAssistantMessageSchema, coreMessageSchema, coreSystemMessageSchema, coreToolMessageSchema, coreUserMessageSchema, cosineSimilarity, createDataStream, createDataStreamResponse, createProviderRegistry, customProvider, defaultSettingsMiddleware, embed, embedMany, createMCPClient as experimental_createMCPClient, experimental_createProviderRegistry, experimental_customProvider, generateImage as experimental_generateImage, experimental_wrapLanguageModel, extractReasoningMiddleware, generateObject, generateText, pipeDataStreamToResponse, simulateReadableStream, simulateStreamingMiddleware, smoothStream, streamObject, streamText, tool, wrapLanguageModel };
4342
+ export { AssistantContent, CallWarning, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreTool, CoreToolCallUnion, CoreToolChoice, CoreToolMessage, CoreToolResultUnion, CoreUserMessage, DataContent, DataStreamOptions, DataStreamWriter, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, GenerateImageResult as Experimental_GenerateImageResult, GeneratedFile as Experimental_GeneratedImage, Experimental_LanguageModelV1Middleware, FilePart, FinishReason, GenerateObjectResult, GenerateTextOnStepFinishCallback, GenerateTextResult, GeneratedFile, ImageModel, ImageGenerationWarning as ImageModelCallWarning, ImageModelResponseMetadata, ImagePart, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidStreamPartError, InvalidToolArgumentsError, JSONRPCError, JSONRPCMessage, JSONRPCNotification, JSONRPCRequest, JSONRPCResponse, langchainAdapter as LangChainAdapter, LanguageModel, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage, LanguageModelV1Middleware, llamaindexAdapter as LlamaIndexAdapter, LogProbs, MCPClientError, MCPTransport, MessageConversionError, NoImageGeneratedError, NoObjectGeneratedError, NoOutputSpecifiedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, output as Output, Provider, ProviderMetadata, ProviderRegistryProvider, RepairTextFunction, RetryError, StepResult, StreamData, StreamObjectOnFinishCallback, StreamObjectResult, StreamTextOnChunkCallback, StreamTextOnErrorCallback, StreamTextOnFinishCallback, StreamTextOnStepFinishCallback, StreamTextResult, StreamTextTransform, TelemetrySettings, TextPart, TextStreamPart, Tool, ToolCallPart, ToolCallRepairError, ToolCallRepairFunction, ToolCallUnion, ToolChoice, ToolContent, ToolExecutionError, ToolExecutionOptions, ToolResultPart, ToolResultUnion, ToolSet, UserContent, appendClientMessage, appendResponseMessages, convertToCoreMessages, coreAssistantMessageSchema, coreMessageSchema, coreSystemMessageSchema, coreToolMessageSchema, coreUserMessageSchema, cosineSimilarity, createDataStream, createDataStreamResponse, createProviderRegistry, customProvider, defaultSettingsMiddleware, embed, embedMany, createMCPClient as experimental_createMCPClient, experimental_createProviderRegistry, experimental_customProvider, generateImage as experimental_generateImage, experimental_wrapLanguageModel, extractReasoningMiddleware, generateObject, generateText, pipeDataStreamToResponse, simulateReadableStream, simulateStreamingMiddleware, smoothStream, streamObject, streamText, tool, wrapLanguageModel };
package/dist/index.d.ts CHANGED
@@ -1,7 +1,7 @@
1
1
  import { IDGenerator } from '@ai-sdk/provider-utils';
2
2
  export { CoreToolCall, CoreToolResult, IDGenerator, ToolCall, ToolResult, createIdGenerator, generateId } from '@ai-sdk/provider-utils';
3
- import { DataStreamString, Message, Schema, DeepPartial, JSONValue as JSONValue$1, AssistantMessage, DataMessage } from '@ai-sdk/ui-utils';
4
- export { AssistantMessage, AssistantStatus, Attachment, ChatRequest, ChatRequestOptions, CreateMessage, DataMessage, DataStreamPart, DeepPartial, IdGenerator, JSONValue, Message, RequestOptions, Schema, ToolInvocation, UIMessage, UseAssistantOptions, formatAssistantStreamPart, formatDataStreamPart, jsonSchema, parseAssistantStreamPart, parseDataStreamPart, processDataStream, processTextStream, zodSchema } from '@ai-sdk/ui-utils';
3
+ import { DataStreamString, Message, Schema, DeepPartial, JSONValue as JSONValue$1 } from '@ai-sdk/ui-utils';
4
+ export { Attachment, ChatRequest, ChatRequestOptions, CreateMessage, DataStreamPart, DeepPartial, IdGenerator, JSONValue, Message, RequestOptions, Schema, ToolInvocation, UIMessage, formatDataStreamPart, jsonSchema, parseDataStreamPart, processDataStream, processTextStream, zodSchema } from '@ai-sdk/ui-utils';
5
5
  import { LanguageModelV1, LanguageModelV1FinishReason, LanguageModelV1LogProbs, LanguageModelV1CallWarning, LanguageModelV1Source, JSONValue, EmbeddingModelV1, EmbeddingModelV1Embedding, ImageModelV1, ImageModelV1CallWarning, LanguageModelV1ProviderMetadata, LanguageModelV1CallOptions, AISDKError, LanguageModelV1FunctionToolCall, JSONSchema7, JSONParseError, TypeValidationError, ProviderV1, NoSuchModelError } from '@ai-sdk/provider';
6
6
  export { AISDKError, APICallError, EmptyResponseBodyError, InvalidPromptError, InvalidResponseDataError, JSONParseError, LanguageModelV1, LanguageModelV1CallOptions, LanguageModelV1Prompt, LanguageModelV1StreamPart, LoadAPIKeyError, NoContentGeneratedError, NoSuchModelError, TypeValidationError, UnsupportedFunctionalityError } from '@ai-sdk/provider';
7
7
  import { ServerResponse } from 'node:http';
@@ -4243,43 +4243,6 @@ declare class RetryError extends AISDKError {
4243
4243
  static isInstance(error: unknown): error is RetryError;
4244
4244
  }
4245
4245
 
4246
- /**
4247
- You can pass the thread and the latest message into the `AssistantResponse`. This establishes the context for the response.
4248
- */
4249
- type AssistantResponseSettings = {
4250
- /**
4251
- The thread ID that the response is associated with.
4252
- */
4253
- threadId: string;
4254
- /**
4255
- The ID of the latest message that the response is associated with.
4256
- */
4257
- messageId: string;
4258
- };
4259
- /**
4260
- The process parameter is a callback in which you can run the assistant on threads, and send messages and data messages to the client.
4261
- */
4262
- type AssistantResponseCallback = (options: {
4263
- /**
4264
- Forwards an assistant message (non-streaming) to the client.
4265
- */
4266
- sendMessage: (message: AssistantMessage) => void;
4267
- /**
4268
- Send a data message to the client. You can use this to provide information for rendering custom UIs while the assistant is processing the thread.
4269
- */
4270
- sendDataMessage: (message: DataMessage) => void;
4271
- /**
4272
- Forwards the assistant response stream to the client. Returns the `Run` object after it completes, or when it requires an action.
4273
- */
4274
- forwardStream: (stream: any) => Promise<any | undefined>;
4275
- }) => Promise<void>;
4276
- /**
4277
- The `AssistantResponse` allows you to send a stream of assistant update to `useAssistant`.
4278
- It is designed to facilitate streaming assistant responses to the `useAssistant` hook.
4279
- It receives an assistant thread and a current message, and can send messages and data messages to the client.
4280
- */
4281
- declare function AssistantResponse({ threadId, messageId }: AssistantResponseSettings, process: AssistantResponseCallback): Response;
4282
-
4283
4246
  /**
4284
4247
  * Configuration options and helper callback methods for stream lifecycle events.
4285
4248
  */
@@ -4376,4 +4339,4 @@ declare namespace llamaindexAdapter {
4376
4339
  };
4377
4340
  }
4378
4341
 
4379
- export { AssistantContent, AssistantResponse, CallWarning, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreTool, CoreToolCallUnion, CoreToolChoice, CoreToolMessage, CoreToolResultUnion, CoreUserMessage, DataContent, DataStreamOptions, DataStreamWriter, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, GenerateImageResult as Experimental_GenerateImageResult, GeneratedFile as Experimental_GeneratedImage, Experimental_LanguageModelV1Middleware, FilePart, FinishReason, GenerateObjectResult, GenerateTextOnStepFinishCallback, GenerateTextResult, GeneratedFile, ImageModel, ImageGenerationWarning as ImageModelCallWarning, ImageModelResponseMetadata, ImagePart, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidStreamPartError, InvalidToolArgumentsError, JSONRPCError, JSONRPCMessage, JSONRPCNotification, JSONRPCRequest, JSONRPCResponse, langchainAdapter as LangChainAdapter, LanguageModel, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage, LanguageModelV1Middleware, llamaindexAdapter as LlamaIndexAdapter, LogProbs, MCPClientError, MCPTransport, MessageConversionError, NoImageGeneratedError, NoObjectGeneratedError, NoOutputSpecifiedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, output as Output, Provider, ProviderMetadata, ProviderRegistryProvider, RepairTextFunction, RetryError, StepResult, StreamData, StreamObjectOnFinishCallback, StreamObjectResult, StreamTextOnChunkCallback, StreamTextOnErrorCallback, StreamTextOnFinishCallback, StreamTextOnStepFinishCallback, StreamTextResult, StreamTextTransform, TelemetrySettings, TextPart, TextStreamPart, Tool, ToolCallPart, ToolCallRepairError, ToolCallRepairFunction, ToolCallUnion, ToolChoice, ToolContent, ToolExecutionError, ToolExecutionOptions, ToolResultPart, ToolResultUnion, ToolSet, UserContent, appendClientMessage, appendResponseMessages, convertToCoreMessages, coreAssistantMessageSchema, coreMessageSchema, coreSystemMessageSchema, coreToolMessageSchema, coreUserMessageSchema, cosineSimilarity, createDataStream, createDataStreamResponse, createProviderRegistry, customProvider, defaultSettingsMiddleware, embed, embedMany, createMCPClient as experimental_createMCPClient, experimental_createProviderRegistry, experimental_customProvider, generateImage as experimental_generateImage, experimental_wrapLanguageModel, extractReasoningMiddleware, generateObject, generateText, pipeDataStreamToResponse, simulateReadableStream, simulateStreamingMiddleware, smoothStream, streamObject, streamText, tool, wrapLanguageModel };
4342
+ export { AssistantContent, CallWarning, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreTool, CoreToolCallUnion, CoreToolChoice, CoreToolMessage, CoreToolResultUnion, CoreUserMessage, DataContent, DataStreamOptions, DataStreamWriter, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, GenerateImageResult as Experimental_GenerateImageResult, GeneratedFile as Experimental_GeneratedImage, Experimental_LanguageModelV1Middleware, FilePart, FinishReason, GenerateObjectResult, GenerateTextOnStepFinishCallback, GenerateTextResult, GeneratedFile, ImageModel, ImageGenerationWarning as ImageModelCallWarning, ImageModelResponseMetadata, ImagePart, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidStreamPartError, InvalidToolArgumentsError, JSONRPCError, JSONRPCMessage, JSONRPCNotification, JSONRPCRequest, JSONRPCResponse, langchainAdapter as LangChainAdapter, LanguageModel, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage, LanguageModelV1Middleware, llamaindexAdapter as LlamaIndexAdapter, LogProbs, MCPClientError, MCPTransport, MessageConversionError, NoImageGeneratedError, NoObjectGeneratedError, NoOutputSpecifiedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, output as Output, Provider, ProviderMetadata, ProviderRegistryProvider, RepairTextFunction, RetryError, StepResult, StreamData, StreamObjectOnFinishCallback, StreamObjectResult, StreamTextOnChunkCallback, StreamTextOnErrorCallback, StreamTextOnFinishCallback, StreamTextOnStepFinishCallback, StreamTextResult, StreamTextTransform, TelemetrySettings, TextPart, TextStreamPart, Tool, ToolCallPart, ToolCallRepairError, ToolCallRepairFunction, ToolCallUnion, ToolChoice, ToolContent, ToolExecutionError, ToolExecutionOptions, ToolResultPart, ToolResultUnion, ToolSet, UserContent, appendClientMessage, appendResponseMessages, convertToCoreMessages, coreAssistantMessageSchema, coreMessageSchema, coreSystemMessageSchema, coreToolMessageSchema, coreUserMessageSchema, cosineSimilarity, createDataStream, createDataStreamResponse, createProviderRegistry, customProvider, defaultSettingsMiddleware, embed, embedMany, createMCPClient as experimental_createMCPClient, experimental_createProviderRegistry, experimental_customProvider, generateImage as experimental_generateImage, experimental_wrapLanguageModel, extractReasoningMiddleware, generateObject, generateText, pipeDataStreamToResponse, simulateReadableStream, simulateStreamingMiddleware, smoothStream, streamObject, streamText, tool, wrapLanguageModel };
package/dist/index.js CHANGED
@@ -22,7 +22,6 @@ var streams_exports = {};
22
22
  __export(streams_exports, {
23
23
  AISDKError: () => import_provider20.AISDKError,
24
24
  APICallError: () => import_provider20.APICallError,
25
- AssistantResponse: () => AssistantResponse,
26
25
  DownloadError: () => DownloadError,
27
26
  EmptyResponseBodyError: () => import_provider20.EmptyResponseBodyError,
28
27
  InvalidArgumentError: () => InvalidArgumentError,
@@ -75,13 +74,11 @@ __export(streams_exports, {
75
74
  experimental_generateImage: () => generateImage,
76
75
  experimental_wrapLanguageModel: () => experimental_wrapLanguageModel,
77
76
  extractReasoningMiddleware: () => extractReasoningMiddleware,
78
- formatAssistantStreamPart: () => import_ui_utils11.formatAssistantStreamPart,
79
77
  formatDataStreamPart: () => import_ui_utils11.formatDataStreamPart,
80
78
  generateId: () => import_provider_utils15.generateId,
81
79
  generateObject: () => generateObject,
82
80
  generateText: () => generateText,
83
81
  jsonSchema: () => import_ui_utils11.jsonSchema,
84
- parseAssistantStreamPart: () => import_ui_utils11.parseAssistantStreamPart,
85
82
  parseDataStreamPart: () => import_ui_utils11.parseDataStreamPart,
86
83
  pipeDataStreamToResponse: () => pipeDataStreamToResponse,
87
84
  processDataStream: () => import_ui_utils11.processDataStream,
@@ -7476,102 +7473,6 @@ function simulateReadableStream({
7476
7473
  });
7477
7474
  }
7478
7475
 
7479
- // streams/assistant-response.ts
7480
- var import_ui_utils12 = require("@ai-sdk/ui-utils");
7481
- function AssistantResponse({ threadId, messageId }, process2) {
7482
- const stream = new ReadableStream({
7483
- async start(controller) {
7484
- var _a17;
7485
- const textEncoder = new TextEncoder();
7486
- const sendMessage = (message) => {
7487
- controller.enqueue(
7488
- textEncoder.encode(
7489
- (0, import_ui_utils12.formatAssistantStreamPart)("assistant_message", message)
7490
- )
7491
- );
7492
- };
7493
- const sendDataMessage = (message) => {
7494
- controller.enqueue(
7495
- textEncoder.encode(
7496
- (0, import_ui_utils12.formatAssistantStreamPart)("data_message", message)
7497
- )
7498
- );
7499
- };
7500
- const sendError = (errorMessage) => {
7501
- controller.enqueue(
7502
- textEncoder.encode((0, import_ui_utils12.formatAssistantStreamPart)("error", errorMessage))
7503
- );
7504
- };
7505
- const forwardStream = async (stream2) => {
7506
- var _a18, _b;
7507
- let result = void 0;
7508
- for await (const value of stream2) {
7509
- switch (value.event) {
7510
- case "thread.message.created": {
7511
- controller.enqueue(
7512
- textEncoder.encode(
7513
- (0, import_ui_utils12.formatAssistantStreamPart)("assistant_message", {
7514
- id: value.data.id,
7515
- role: "assistant",
7516
- content: [{ type: "text", text: { value: "" } }]
7517
- })
7518
- )
7519
- );
7520
- break;
7521
- }
7522
- case "thread.message.delta": {
7523
- const content = (_a18 = value.data.delta.content) == null ? void 0 : _a18[0];
7524
- if ((content == null ? void 0 : content.type) === "text" && ((_b = content.text) == null ? void 0 : _b.value) != null) {
7525
- controller.enqueue(
7526
- textEncoder.encode(
7527
- (0, import_ui_utils12.formatAssistantStreamPart)("text", content.text.value)
7528
- )
7529
- );
7530
- }
7531
- break;
7532
- }
7533
- case "thread.run.completed":
7534
- case "thread.run.requires_action": {
7535
- result = value.data;
7536
- break;
7537
- }
7538
- }
7539
- }
7540
- return result;
7541
- };
7542
- controller.enqueue(
7543
- textEncoder.encode(
7544
- (0, import_ui_utils12.formatAssistantStreamPart)("assistant_control_data", {
7545
- threadId,
7546
- messageId
7547
- })
7548
- )
7549
- );
7550
- try {
7551
- await process2({
7552
- sendMessage,
7553
- sendDataMessage,
7554
- forwardStream
7555
- });
7556
- } catch (error) {
7557
- sendError((_a17 = error.message) != null ? _a17 : `${error}`);
7558
- } finally {
7559
- controller.close();
7560
- }
7561
- },
7562
- pull(controller) {
7563
- },
7564
- cancel() {
7565
- }
7566
- });
7567
- return new Response(stream, {
7568
- status: 200,
7569
- headers: {
7570
- "Content-Type": "text/plain; charset=utf-8"
7571
- }
7572
- });
7573
- }
7574
-
7575
7476
  // streams/langchain-adapter.ts
7576
7477
  var langchain_adapter_exports = {};
7577
7478
  __export(langchain_adapter_exports, {
@@ -7579,7 +7480,7 @@ __export(langchain_adapter_exports, {
7579
7480
  toDataStream: () => toDataStream,
7580
7481
  toDataStreamResponse: () => toDataStreamResponse
7581
7482
  });
7582
- var import_ui_utils13 = require("@ai-sdk/ui-utils");
7483
+ var import_ui_utils12 = require("@ai-sdk/ui-utils");
7583
7484
 
7584
7485
  // streams/stream-callbacks.ts
7585
7486
  function createCallbacksTransformer(callbacks = {}) {
@@ -7635,7 +7536,7 @@ function toDataStreamInternal(stream, callbacks) {
7635
7536
  ).pipeThrough(createCallbacksTransformer(callbacks)).pipeThrough(new TextDecoderStream()).pipeThrough(
7636
7537
  new TransformStream({
7637
7538
  transform: async (chunk, controller) => {
7638
- controller.enqueue((0, import_ui_utils13.formatDataStreamPart)("text", chunk));
7539
+ controller.enqueue((0, import_ui_utils12.formatDataStreamPart)("text", chunk));
7639
7540
  }
7640
7541
  })
7641
7542
  );
@@ -7687,7 +7588,7 @@ __export(llamaindex_adapter_exports, {
7687
7588
  toDataStreamResponse: () => toDataStreamResponse2
7688
7589
  });
7689
7590
  var import_provider_utils16 = require("@ai-sdk/provider-utils");
7690
- var import_ui_utils14 = require("@ai-sdk/ui-utils");
7591
+ var import_ui_utils13 = require("@ai-sdk/ui-utils");
7691
7592
  function toDataStreamInternal2(stream, callbacks) {
7692
7593
  const trimStart = trimStartOfStream();
7693
7594
  return (0, import_provider_utils16.convertAsyncIteratorToReadableStream)(stream[Symbol.asyncIterator]()).pipeThrough(
@@ -7699,7 +7600,7 @@ function toDataStreamInternal2(stream, callbacks) {
7699
7600
  ).pipeThrough(createCallbacksTransformer(callbacks)).pipeThrough(new TextDecoderStream()).pipeThrough(
7700
7601
  new TransformStream({
7701
7602
  transform: async (chunk, controller) => {
7702
- controller.enqueue((0, import_ui_utils14.formatDataStreamPart)("text", chunk));
7603
+ controller.enqueue((0, import_ui_utils13.formatDataStreamPart)("text", chunk));
7703
7604
  }
7704
7605
  })
7705
7606
  );
@@ -7741,7 +7642,7 @@ function trimStartOfStream() {
7741
7642
  }
7742
7643
 
7743
7644
  // streams/stream-data.ts
7744
- var import_ui_utils15 = require("@ai-sdk/ui-utils");
7645
+ var import_ui_utils14 = require("@ai-sdk/ui-utils");
7745
7646
 
7746
7647
  // util/constants.ts
7747
7648
  var HANGING_STREAM_WARNING_TIME_MS = 15 * 1e3;
@@ -7793,7 +7694,7 @@ var StreamData = class {
7793
7694
  throw new Error("Stream controller is not initialized.");
7794
7695
  }
7795
7696
  this.controller.enqueue(
7796
- this.encoder.encode((0, import_ui_utils15.formatDataStreamPart)("data", [value]))
7697
+ this.encoder.encode((0, import_ui_utils14.formatDataStreamPart)("data", [value]))
7797
7698
  );
7798
7699
  }
7799
7700
  appendMessageAnnotation(value) {
@@ -7804,7 +7705,7 @@ var StreamData = class {
7804
7705
  throw new Error("Stream controller is not initialized.");
7805
7706
  }
7806
7707
  this.controller.enqueue(
7807
- this.encoder.encode((0, import_ui_utils15.formatDataStreamPart)("message_annotations", [value]))
7708
+ this.encoder.encode((0, import_ui_utils14.formatDataStreamPart)("message_annotations", [value]))
7808
7709
  );
7809
7710
  }
7810
7711
  };
@@ -7812,7 +7713,6 @@ var StreamData = class {
7812
7713
  0 && (module.exports = {
7813
7714
  AISDKError,
7814
7715
  APICallError,
7815
- AssistantResponse,
7816
7716
  DownloadError,
7817
7717
  EmptyResponseBodyError,
7818
7718
  InvalidArgumentError,
@@ -7865,13 +7765,11 @@ var StreamData = class {
7865
7765
  experimental_generateImage,
7866
7766
  experimental_wrapLanguageModel,
7867
7767
  extractReasoningMiddleware,
7868
- formatAssistantStreamPart,
7869
7768
  formatDataStreamPart,
7870
7769
  generateId,
7871
7770
  generateObject,
7872
7771
  generateText,
7873
7772
  jsonSchema,
7874
- parseAssistantStreamPart,
7875
7773
  parseDataStreamPart,
7876
7774
  pipeDataStreamToResponse,
7877
7775
  processDataStream,