ai 4.0.0-canary.11 → 4.0.0-canary.12

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,14 @@
1
1
  # ai
2
2
 
3
+ ## 4.0.0-canary.12
4
+
5
+ ### Patch Changes
6
+
7
+ - b053413: chore (ui): refactorings & README update
8
+ - Updated dependencies [b053413]
9
+ - @ai-sdk/ui-utils@1.0.0-canary.8
10
+ - @ai-sdk/react@1.0.0-canary.8
11
+
3
12
  ## 4.0.0-canary.11
4
13
 
5
14
  ### Major Changes
package/README.md CHANGED
@@ -32,17 +32,13 @@ npm install @ai-sdk/openai
32
32
  import { generateText } from 'ai';
33
33
  import { openai } from '@ai-sdk/openai'; // Ensure OPENAI_API_KEY environment variable is set
34
34
 
35
- async function main() {
36
- const { text } = await generateText({
37
- model: openai('gpt-4-turbo'),
38
- system: 'You are a friendly assistant!',
39
- prompt: 'Why is the sky blue?',
40
- });
41
-
42
- console.log(text);
43
- }
35
+ const { text } = await generateText({
36
+ model: openai('gpt-4o'),
37
+ system: 'You are a friendly assistant!',
38
+ prompt: 'Why is the sky blue?',
39
+ });
44
40
 
45
- main();
41
+ console.log(text);
46
42
  ```
47
43
 
48
44
  ### AI SDK UI
@@ -85,14 +81,14 @@ export default function Page() {
85
81
  ###### @/app/api/chat/route.ts (Next.js App Router)
86
82
 
87
83
  ```ts
88
- import { CoreMessage, streamText } from 'ai';
84
+ import { streamText } from 'ai';
89
85
  import { openai } from '@ai-sdk/openai';
90
86
 
91
87
  export async function POST(req: Request) {
92
- const { messages }: { messages: CoreMessage[] } = await req.json();
88
+ const { messages } = await req.json();
93
89
 
94
90
  const result = streamText({
95
- model: openai('gpt-4'),
91
+ model: openai('gpt-4o'),
96
92
  system: 'You are a helpful assistant.',
97
93
  messages,
98
94
  });
package/dist/index.d.mts CHANGED
@@ -1,5 +1,5 @@
1
1
  import { ToolInvocation, Attachment, Schema, DeepPartial, JSONValue as JSONValue$1, AssistantMessage, DataMessage } from '@ai-sdk/ui-utils';
2
- export { AssistantMessage, AssistantStatus, Attachment, ChatRequest, ChatRequestOptions, CreateMessage, DataMessage, DeepPartial, IdGenerator, JSONValue, Message, RequestOptions, Schema, StreamPart, ToolInvocation, UseAssistantOptions, formatStreamPart, jsonSchema, parseStreamPart, processDataProtocolResponse, readDataStream } from '@ai-sdk/ui-utils';
2
+ export { AssistantMessage, AssistantStatus, Attachment, ChatRequest, ChatRequestOptions, CreateMessage, DataMessage, DataStreamPart, DeepPartial, IdGenerator, JSONValue, Message, RequestOptions, Schema, ToolInvocation, UseAssistantOptions, formatAssistantStreamPart, formatDataStreamPart, jsonSchema, parseAssistantStreamPart, parseDataStreamPart, processDataStream, processTextStream } from '@ai-sdk/ui-utils';
3
3
  export { ToolCall as CoreToolCall, ToolResult as CoreToolResult, generateId } from '@ai-sdk/provider-utils';
4
4
  import { AttributeValue, Tracer } from '@opentelemetry/api';
5
5
  import { EmbeddingModelV1, EmbeddingModelV1Embedding, LanguageModelV1, LanguageModelV1FinishReason, LanguageModelV1LogProbs, LanguageModelV1CallWarning, LanguageModelV1ProviderMetadata, JSONValue, LanguageModelV1CallOptions, NoSuchModelError, AISDKError } from '@ai-sdk/provider';
package/dist/index.d.ts CHANGED
@@ -1,5 +1,5 @@
1
1
  import { ToolInvocation, Attachment, Schema, DeepPartial, JSONValue as JSONValue$1, AssistantMessage, DataMessage } from '@ai-sdk/ui-utils';
2
- export { AssistantMessage, AssistantStatus, Attachment, ChatRequest, ChatRequestOptions, CreateMessage, DataMessage, DeepPartial, IdGenerator, JSONValue, Message, RequestOptions, Schema, StreamPart, ToolInvocation, UseAssistantOptions, formatStreamPart, jsonSchema, parseStreamPart, processDataProtocolResponse, readDataStream } from '@ai-sdk/ui-utils';
2
+ export { AssistantMessage, AssistantStatus, Attachment, ChatRequest, ChatRequestOptions, CreateMessage, DataMessage, DataStreamPart, DeepPartial, IdGenerator, JSONValue, Message, RequestOptions, Schema, ToolInvocation, UseAssistantOptions, formatAssistantStreamPart, formatDataStreamPart, jsonSchema, parseAssistantStreamPart, parseDataStreamPart, processDataStream, processTextStream } from '@ai-sdk/ui-utils';
3
3
  export { ToolCall as CoreToolCall, ToolResult as CoreToolResult, generateId } from '@ai-sdk/provider-utils';
4
4
  import { AttributeValue, Tracer } from '@opentelemetry/api';
5
5
  import { EmbeddingModelV1, EmbeddingModelV1Embedding, LanguageModelV1, LanguageModelV1FinishReason, LanguageModelV1LogProbs, LanguageModelV1CallWarning, LanguageModelV1ProviderMetadata, JSONValue, LanguageModelV1CallOptions, NoSuchModelError, AISDKError } from '@ai-sdk/provider';
package/dist/index.js CHANGED
@@ -53,14 +53,16 @@ __export(streams_exports, {
53
53
  experimental_createProviderRegistry: () => experimental_createProviderRegistry,
54
54
  experimental_customProvider: () => experimental_customProvider,
55
55
  experimental_wrapLanguageModel: () => experimental_wrapLanguageModel,
56
- formatStreamPart: () => import_ui_utils10.formatStreamPart,
56
+ formatAssistantStreamPart: () => import_ui_utils10.formatAssistantStreamPart,
57
+ formatDataStreamPart: () => import_ui_utils10.formatDataStreamPart,
57
58
  generateId: () => import_provider_utils11.generateId,
58
59
  generateObject: () => generateObject,
59
60
  generateText: () => generateText,
60
61
  jsonSchema: () => import_ui_utils7.jsonSchema,
61
- parseStreamPart: () => import_ui_utils10.parseStreamPart,
62
- processDataProtocolResponse: () => import_ui_utils10.processDataProtocolResponse,
63
- readDataStream: () => import_ui_utils10.readDataStream,
62
+ parseAssistantStreamPart: () => import_ui_utils10.parseAssistantStreamPart,
63
+ parseDataStreamPart: () => import_ui_utils10.parseDataStreamPart,
64
+ processDataStream: () => import_ui_utils10.processDataStream,
65
+ processTextStream: () => import_ui_utils10.processTextStream,
64
66
  streamObject: () => streamObject,
65
67
  streamText: () => streamText,
66
68
  tool: () => tool
@@ -3920,13 +3922,6 @@ function streamText({
3920
3922
  } = {},
3921
3923
  ...settings
3922
3924
  }) {
3923
- if (maxSteps < 1) {
3924
- throw new InvalidArgumentError({
3925
- parameter: "maxSteps",
3926
- value: maxSteps,
3927
- message: "maxSteps must be at least 1"
3928
- });
3929
- }
3930
3925
  return new DefaultStreamTextResult({
3931
3926
  model,
3932
3927
  telemetry,
@@ -3988,6 +3983,13 @@ var DefaultStreamTextResult = class {
3988
3983
  this.responsePromise = new DelayedPromise();
3989
3984
  this.stepsPromise = new DelayedPromise();
3990
3985
  this.stitchableStream = createStitchableStream();
3986
+ if (maxSteps < 1) {
3987
+ throw new InvalidArgumentError({
3988
+ parameter: "maxSteps",
3989
+ value: maxSteps,
3990
+ message: "maxSteps must be at least 1"
3991
+ });
3992
+ }
3991
3993
  const tracer = getTracer(telemetry);
3992
3994
  const baseTelemetryAttributes = getBaseTelemetryAttributes({
3993
3995
  model,
@@ -4000,7 +4002,6 @@ var DefaultStreamTextResult = class {
4000
4002
  tools
4001
4003
  });
4002
4004
  const self = this;
4003
- const stepResults = [];
4004
4005
  recordSpan({
4005
4006
  name: "ai.streamText",
4006
4007
  attributes: selectTelemetryAttributes({
@@ -4019,9 +4020,15 @@ var DefaultStreamTextResult = class {
4019
4020
  endWhenDone: false,
4020
4021
  fn: async (rootSpan) => {
4021
4022
  const retry = retryWithExponentialBackoff({ maxRetries });
4022
- const startStep = async ({
4023
- responseMessages
4024
- }) => {
4023
+ const stepResults = [];
4024
+ async function streamStep({
4025
+ currentStep,
4026
+ responseMessages,
4027
+ usage,
4028
+ stepType,
4029
+ previousStepText,
4030
+ hasLeadingWhitespace
4031
+ }) {
4025
4032
  const promptFormat = responseMessages.length === 0 ? initialPrompt.type : "messages";
4026
4033
  const promptMessages = await convertToLanguageModelPrompt({
4027
4034
  prompt: {
@@ -4037,9 +4044,9 @@ var DefaultStreamTextResult = class {
4037
4044
  ...prepareToolsAndToolChoice({ tools, toolChoice, activeTools })
4038
4045
  };
4039
4046
  const {
4040
- result: { stream: stream2, warnings: warnings2, rawResponse: rawResponse2, request: request2 },
4041
- doStreamSpan: doStreamSpan2,
4042
- startTimestampMs: startTimestampMs2
4047
+ result: { stream, warnings, rawResponse, request },
4048
+ doStreamSpan,
4049
+ startTimestampMs
4043
4050
  } = await retry(
4044
4051
  () => recordSpan({
4045
4052
  name: "ai.streamText.doStream",
@@ -4081,10 +4088,10 @@ var DefaultStreamTextResult = class {
4081
4088
  }),
4082
4089
  tracer,
4083
4090
  endWhenDone: false,
4084
- fn: async (doStreamSpan3) => ({
4091
+ fn: async (doStreamSpan2) => ({
4085
4092
  startTimestampMs: now2(),
4086
4093
  // get before the call
4087
- doStreamSpan: doStreamSpan3,
4094
+ doStreamSpan: doStreamSpan2,
4088
4095
  result: await model.doStream({
4089
4096
  mode,
4090
4097
  ...prepareCallSettings(settings),
@@ -4097,47 +4104,15 @@ var DefaultStreamTextResult = class {
4097
4104
  })
4098
4105
  })
4099
4106
  );
4100
- return {
4101
- result: {
4102
- stream: runToolsTransformation({
4103
- tools,
4104
- generatorStream: stream2,
4105
- toolCallStreaming,
4106
- tracer,
4107
- telemetry,
4108
- abortSignal
4109
- }),
4110
- warnings: warnings2,
4111
- request: request2 != null ? request2 : {},
4112
- rawResponse: rawResponse2
4113
- },
4114
- doStreamSpan: doStreamSpan2,
4115
- startTimestampMs: startTimestampMs2
4116
- };
4117
- };
4118
- const {
4119
- result: { stream, warnings, rawResponse, request },
4120
- doStreamSpan,
4121
- startTimestampMs
4122
- } = await startStep({ responseMessages: [] });
4123
- function addStepStream({
4124
- stream: stream2,
4125
- startTimestamp,
4126
- doStreamSpan: doStreamSpan2,
4127
- currentStep,
4128
- responseMessages,
4129
- usage = {
4130
- promptTokens: 0,
4131
- completionTokens: 0,
4132
- totalTokens: 0
4133
- },
4134
- stepType,
4135
- previousStepText = "",
4136
- stepRequest,
4137
- hasLeadingWhitespace,
4138
- warnings: warnings2,
4139
- response
4140
- }) {
4107
+ const transformedStream = runToolsTransformation({
4108
+ tools,
4109
+ generatorStream: stream,
4110
+ toolCallStreaming,
4111
+ tracer,
4112
+ telemetry,
4113
+ abortSignal
4114
+ });
4115
+ const stepRequest = request != null ? request : {};
4141
4116
  const stepToolCalls = [];
4142
4117
  const stepToolResults = [];
4143
4118
  let stepFinishReason = "unknown";
@@ -4172,17 +4147,17 @@ var DefaultStreamTextResult = class {
4172
4147
  await (onChunk == null ? void 0 : onChunk({ chunk }));
4173
4148
  }
4174
4149
  self.stitchableStream.addStream(
4175
- stream2.pipeThrough(
4150
+ transformedStream.pipeThrough(
4176
4151
  new TransformStream({
4177
4152
  async transform(chunk, controller) {
4178
4153
  var _a11, _b, _c;
4179
4154
  if (stepFirstChunk) {
4180
- const msToFirstChunk = now2() - startTimestamp;
4155
+ const msToFirstChunk = now2() - startTimestampMs;
4181
4156
  stepFirstChunk = false;
4182
- doStreamSpan2.addEvent("ai.stream.firstChunk", {
4157
+ doStreamSpan.addEvent("ai.stream.firstChunk", {
4183
4158
  "ai.response.msToFirstChunk": msToFirstChunk
4184
4159
  });
4185
- doStreamSpan2.setAttributes({
4160
+ doStreamSpan.setAttributes({
4186
4161
  "ai.response.msToFirstChunk": msToFirstChunk
4187
4162
  });
4188
4163
  }
@@ -4240,9 +4215,9 @@ var DefaultStreamTextResult = class {
4240
4215
  stepFinishReason = chunk.finishReason;
4241
4216
  stepProviderMetadata = chunk.experimental_providerMetadata;
4242
4217
  stepLogProbs = chunk.logprobs;
4243
- const msToFinish = now2() - startTimestamp;
4244
- doStreamSpan2.addEvent("ai.stream.finish");
4245
- doStreamSpan2.setAttributes({
4218
+ const msToFinish = now2() - startTimestampMs;
4219
+ doStreamSpan.addEvent("ai.stream.finish");
4220
+ doStreamSpan.setAttributes({
4246
4221
  "ai.response.msToFinish": msToFinish,
4247
4222
  "ai.response.avgCompletionTokensPerSecond": 1e3 * stepUsage.completionTokens / msToFinish
4248
4223
  });
@@ -4293,7 +4268,7 @@ var DefaultStreamTextResult = class {
4293
4268
  chunkBuffer = "";
4294
4269
  }
4295
4270
  try {
4296
- doStreamSpan2.setAttributes(
4271
+ doStreamSpan.setAttributes(
4297
4272
  selectTelemetryAttributes({
4298
4273
  telemetry,
4299
4274
  attributes: {
@@ -4318,7 +4293,7 @@ var DefaultStreamTextResult = class {
4318
4293
  );
4319
4294
  } catch (error) {
4320
4295
  } finally {
4321
- doStreamSpan2.end();
4296
+ doStreamSpan.end();
4322
4297
  }
4323
4298
  controller.enqueue({
4324
4299
  type: "step-finish",
@@ -4358,12 +4333,12 @@ var DefaultStreamTextResult = class {
4358
4333
  toolResults: stepToolResults,
4359
4334
  finishReason: stepFinishReason,
4360
4335
  usage: stepUsage,
4361
- warnings: warnings2,
4336
+ warnings,
4362
4337
  logprobs: stepLogProbs,
4363
4338
  request: stepRequest,
4364
4339
  response: {
4365
4340
  ...stepResponse,
4366
- headers: response == null ? void 0 : response.headers,
4341
+ headers: rawResponse == null ? void 0 : rawResponse.headers,
4367
4342
  // deep clone msgs to avoid mutating past messages in multi-step:
4368
4343
  messages: JSON.parse(JSON.stringify(responseMessages))
4369
4344
  },
@@ -4378,26 +4353,13 @@ var DefaultStreamTextResult = class {
4378
4353
  totalTokens: usage.totalTokens + stepUsage.totalTokens
4379
4354
  };
4380
4355
  if (nextStepType !== "done") {
4381
- const {
4382
- result,
4383
- doStreamSpan: doStreamSpan3,
4384
- startTimestampMs: startTimestamp2
4385
- } = await startStep({ responseMessages });
4386
- warnings2 = result.warnings;
4387
- response = result.rawResponse;
4388
- addStepStream({
4389
- stream: result.stream,
4390
- startTimestamp: startTimestamp2,
4391
- doStreamSpan: doStreamSpan3,
4356
+ await streamStep({
4392
4357
  currentStep: currentStep + 1,
4393
4358
  responseMessages,
4394
4359
  usage: combinedUsage,
4395
4360
  stepType: nextStepType,
4396
4361
  previousStepText: fullStepText,
4397
- stepRequest: result.request,
4398
- hasLeadingWhitespace: hasWhitespaceSuffix,
4399
- warnings: warnings2,
4400
- response
4362
+ hasLeadingWhitespace: hasWhitespaceSuffix
4401
4363
  });
4402
4364
  return;
4403
4365
  }
@@ -4440,7 +4402,7 @@ var DefaultStreamTextResult = class {
4440
4402
  messages: responseMessages
4441
4403
  });
4442
4404
  self.stepsPromise.resolve(stepResults);
4443
- self.warningsPromise.resolve(warnings2 != null ? warnings2 : []);
4405
+ self.warningsPromise.resolve(warnings != null ? warnings : []);
4444
4406
  await (onFinish == null ? void 0 : onFinish({
4445
4407
  finishReason: stepFinishReason,
4446
4408
  logprobs: stepLogProbs,
@@ -4458,7 +4420,7 @@ var DefaultStreamTextResult = class {
4458
4420
  headers: rawResponse == null ? void 0 : rawResponse.headers,
4459
4421
  messages: responseMessages
4460
4422
  },
4461
- warnings: warnings2,
4423
+ warnings,
4462
4424
  experimental_providerMetadata: stepProviderMetadata,
4463
4425
  steps: stepResults
4464
4426
  }));
@@ -4472,18 +4434,17 @@ var DefaultStreamTextResult = class {
4472
4434
  )
4473
4435
  );
4474
4436
  }
4475
- addStepStream({
4476
- stream,
4477
- startTimestamp: startTimestampMs,
4478
- doStreamSpan,
4437
+ await streamStep({
4479
4438
  currentStep: 0,
4480
4439
  responseMessages: [],
4481
- usage: void 0,
4440
+ usage: {
4441
+ promptTokens: 0,
4442
+ completionTokens: 0,
4443
+ totalTokens: 0
4444
+ },
4445
+ previousStepText: "",
4482
4446
  stepType: "initial",
4483
- stepRequest: request,
4484
- hasLeadingWhitespace: false,
4485
- warnings,
4486
- response: rawResponse
4447
+ hasLeadingWhitespace: false
4487
4448
  });
4488
4449
  }
4489
4450
  }).catch((error) => {
@@ -4577,12 +4538,12 @@ var DefaultStreamTextResult = class {
4577
4538
  const chunkType = chunk.type;
4578
4539
  switch (chunkType) {
4579
4540
  case "text-delta": {
4580
- controller.enqueue((0, import_ui_utils6.formatStreamPart)("text", chunk.textDelta));
4541
+ controller.enqueue((0, import_ui_utils6.formatDataStreamPart)("text", chunk.textDelta));
4581
4542
  break;
4582
4543
  }
4583
4544
  case "tool-call-streaming-start": {
4584
4545
  controller.enqueue(
4585
- (0, import_ui_utils6.formatStreamPart)("tool_call_streaming_start", {
4546
+ (0, import_ui_utils6.formatDataStreamPart)("tool_call_streaming_start", {
4586
4547
  toolCallId: chunk.toolCallId,
4587
4548
  toolName: chunk.toolName
4588
4549
  })
@@ -4591,7 +4552,7 @@ var DefaultStreamTextResult = class {
4591
4552
  }
4592
4553
  case "tool-call-delta": {
4593
4554
  controller.enqueue(
4594
- (0, import_ui_utils6.formatStreamPart)("tool_call_delta", {
4555
+ (0, import_ui_utils6.formatDataStreamPart)("tool_call_delta", {
4595
4556
  toolCallId: chunk.toolCallId,
4596
4557
  argsTextDelta: chunk.argsTextDelta
4597
4558
  })
@@ -4600,7 +4561,7 @@ var DefaultStreamTextResult = class {
4600
4561
  }
4601
4562
  case "tool-call": {
4602
4563
  controller.enqueue(
4603
- (0, import_ui_utils6.formatStreamPart)("tool_call", {
4564
+ (0, import_ui_utils6.formatDataStreamPart)("tool_call", {
4604
4565
  toolCallId: chunk.toolCallId,
4605
4566
  toolName: chunk.toolName,
4606
4567
  args: chunk.args
@@ -4610,7 +4571,7 @@ var DefaultStreamTextResult = class {
4610
4571
  }
4611
4572
  case "tool-result": {
4612
4573
  controller.enqueue(
4613
- (0, import_ui_utils6.formatStreamPart)("tool_result", {
4574
+ (0, import_ui_utils6.formatDataStreamPart)("tool_result", {
4614
4575
  toolCallId: chunk.toolCallId,
4615
4576
  result: chunk.result
4616
4577
  })
@@ -4619,13 +4580,13 @@ var DefaultStreamTextResult = class {
4619
4580
  }
4620
4581
  case "error": {
4621
4582
  controller.enqueue(
4622
- (0, import_ui_utils6.formatStreamPart)("error", getErrorMessage3(chunk.error))
4583
+ (0, import_ui_utils6.formatDataStreamPart)("error", getErrorMessage3(chunk.error))
4623
4584
  );
4624
4585
  break;
4625
4586
  }
4626
4587
  case "step-finish": {
4627
4588
  controller.enqueue(
4628
- (0, import_ui_utils6.formatStreamPart)("finish_step", {
4589
+ (0, import_ui_utils6.formatDataStreamPart)("finish_step", {
4629
4590
  finishReason: chunk.finishReason,
4630
4591
  usage: sendUsage ? {
4631
4592
  promptTokens: chunk.usage.promptTokens,
@@ -4638,7 +4599,7 @@ var DefaultStreamTextResult = class {
4638
4599
  }
4639
4600
  case "finish": {
4640
4601
  controller.enqueue(
4641
- (0, import_ui_utils6.formatStreamPart)("finish_message", {
4602
+ (0, import_ui_utils6.formatDataStreamPart)("finish_message", {
4642
4603
  finishReason: chunk.finishReason,
4643
4604
  usage: sendUsage ? {
4644
4605
  promptTokens: chunk.usage.promptTokens,
@@ -4915,17 +4876,21 @@ function AssistantResponse({ threadId, messageId }, process2) {
4915
4876
  const textEncoder = new TextEncoder();
4916
4877
  const sendMessage = (message) => {
4917
4878
  controller.enqueue(
4918
- textEncoder.encode((0, import_ui_utils8.formatStreamPart)("assistant_message", message))
4879
+ textEncoder.encode(
4880
+ (0, import_ui_utils8.formatAssistantStreamPart)("assistant_message", message)
4881
+ )
4919
4882
  );
4920
4883
  };
4921
4884
  const sendDataMessage = (message) => {
4922
4885
  controller.enqueue(
4923
- textEncoder.encode((0, import_ui_utils8.formatStreamPart)("data_message", message))
4886
+ textEncoder.encode(
4887
+ (0, import_ui_utils8.formatAssistantStreamPart)("data_message", message)
4888
+ )
4924
4889
  );
4925
4890
  };
4926
4891
  const sendError = (errorMessage) => {
4927
4892
  controller.enqueue(
4928
- textEncoder.encode((0, import_ui_utils8.formatStreamPart)("error", errorMessage))
4893
+ textEncoder.encode((0, import_ui_utils8.formatAssistantStreamPart)("error", errorMessage))
4929
4894
  );
4930
4895
  };
4931
4896
  const forwardStream = async (stream2) => {
@@ -4936,7 +4901,7 @@ function AssistantResponse({ threadId, messageId }, process2) {
4936
4901
  case "thread.message.created": {
4937
4902
  controller.enqueue(
4938
4903
  textEncoder.encode(
4939
- (0, import_ui_utils8.formatStreamPart)("assistant_message", {
4904
+ (0, import_ui_utils8.formatAssistantStreamPart)("assistant_message", {
4940
4905
  id: value.data.id,
4941
4906
  role: "assistant",
4942
4907
  content: [{ type: "text", text: { value: "" } }]
@@ -4950,7 +4915,7 @@ function AssistantResponse({ threadId, messageId }, process2) {
4950
4915
  if ((content == null ? void 0 : content.type) === "text" && ((_b = content.text) == null ? void 0 : _b.value) != null) {
4951
4916
  controller.enqueue(
4952
4917
  textEncoder.encode(
4953
- (0, import_ui_utils8.formatStreamPart)("text", content.text.value)
4918
+ (0, import_ui_utils8.formatAssistantStreamPart)("text", content.text.value)
4954
4919
  )
4955
4920
  );
4956
4921
  }
@@ -4967,7 +4932,7 @@ function AssistantResponse({ threadId, messageId }, process2) {
4967
4932
  };
4968
4933
  controller.enqueue(
4969
4934
  textEncoder.encode(
4970
- (0, import_ui_utils8.formatStreamPart)("assistant_control_data", {
4935
+ (0, import_ui_utils8.formatAssistantStreamPart)("assistant_control_data", {
4971
4936
  threadId,
4972
4937
  messageId
4973
4938
  })
@@ -5084,7 +5049,7 @@ var StreamData = class {
5084
5049
  throw new Error("Stream controller is not initialized.");
5085
5050
  }
5086
5051
  this.controller.enqueue(
5087
- this.encoder.encode((0, import_ui_utils9.formatStreamPart)("data", [value]))
5052
+ this.encoder.encode((0, import_ui_utils9.formatDataStreamPart)("data", [value]))
5088
5053
  );
5089
5054
  }
5090
5055
  appendMessageAnnotation(value) {
@@ -5095,7 +5060,7 @@ var StreamData = class {
5095
5060
  throw new Error("Stream controller is not initialized.");
5096
5061
  }
5097
5062
  this.controller.enqueue(
5098
- this.encoder.encode((0, import_ui_utils9.formatStreamPart)("message_annotations", [value]))
5063
+ this.encoder.encode((0, import_ui_utils9.formatDataStreamPart)("message_annotations", [value]))
5099
5064
  );
5100
5065
  }
5101
5066
  };
@@ -5105,7 +5070,7 @@ function createStreamDataTransformer() {
5105
5070
  return new TransformStream({
5106
5071
  transform: async (chunk, controller) => {
5107
5072
  const message = decoder.decode(chunk);
5108
- controller.enqueue(encoder.encode((0, import_ui_utils9.formatStreamPart)("text", message)));
5073
+ controller.enqueue(encoder.encode((0, import_ui_utils9.formatDataStreamPart)("text", message)));
5109
5074
  }
5110
5075
  });
5111
5076
  }
@@ -5239,14 +5204,16 @@ function trimStartOfStream() {
5239
5204
  experimental_createProviderRegistry,
5240
5205
  experimental_customProvider,
5241
5206
  experimental_wrapLanguageModel,
5242
- formatStreamPart,
5207
+ formatAssistantStreamPart,
5208
+ formatDataStreamPart,
5243
5209
  generateId,
5244
5210
  generateObject,
5245
5211
  generateText,
5246
5212
  jsonSchema,
5247
- parseStreamPart,
5248
- processDataProtocolResponse,
5249
- readDataStream,
5213
+ parseAssistantStreamPart,
5214
+ parseDataStreamPart,
5215
+ processDataStream,
5216
+ processTextStream,
5250
5217
  streamObject,
5251
5218
  streamText,
5252
5219
  tool