ai 5.0.0-canary.7 → 5.0.0-canary.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -1993,6 +1993,7 @@ function selectTelemetryAttributes({
1993
1993
  async function embed({
1994
1994
  model,
1995
1995
  value,
1996
+ providerOptions,
1996
1997
  maxRetries: maxRetriesArg,
1997
1998
  abortSignal,
1998
1999
  headers,
@@ -2018,7 +2019,7 @@ async function embed({
2018
2019
  }),
2019
2020
  tracer,
2020
2021
  fn: async (span) => {
2021
- const { embedding, usage, rawResponse } = await retry(
2022
+ const { embedding, usage, response } = await retry(
2022
2023
  () => (
2023
2024
  // nested spans to align with the embedMany telemetry data:
2024
2025
  recordSpan({
@@ -2041,7 +2042,8 @@ async function embed({
2041
2042
  const modelResponse = await model.doEmbed({
2042
2043
  values: [value],
2043
2044
  abortSignal,
2044
- headers
2045
+ headers,
2046
+ providerOptions
2045
2047
  });
2046
2048
  const embedding2 = modelResponse.embeddings[0];
2047
2049
  const usage2 = (_a17 = modelResponse.usage) != null ? _a17 : { tokens: NaN };
@@ -2061,7 +2063,7 @@ async function embed({
2061
2063
  return {
2062
2064
  embedding: embedding2,
2063
2065
  usage: usage2,
2064
- rawResponse: modelResponse.rawResponse
2066
+ response: modelResponse.response
2065
2067
  };
2066
2068
  }
2067
2069
  })
@@ -2076,7 +2078,12 @@ async function embed({
2076
2078
  }
2077
2079
  })
2078
2080
  );
2079
- return new DefaultEmbedResult({ value, embedding, usage, rawResponse });
2081
+ return new DefaultEmbedResult({
2082
+ value,
2083
+ embedding,
2084
+ usage,
2085
+ response
2086
+ });
2080
2087
  }
2081
2088
  });
2082
2089
  }
@@ -2085,7 +2092,7 @@ var DefaultEmbedResult = class {
2085
2092
  this.value = options.value;
2086
2093
  this.embedding = options.embedding;
2087
2094
  this.usage = options.usage;
2088
- this.rawResponse = options.rawResponse;
2095
+ this.response = options.response;
2089
2096
  }
2090
2097
  };
2091
2098
 
@@ -2108,6 +2115,7 @@ async function embedMany({
2108
2115
  maxRetries: maxRetriesArg,
2109
2116
  abortSignal,
2110
2117
  headers,
2118
+ providerOptions,
2111
2119
  experimental_telemetry: telemetry
2112
2120
  }) {
2113
2121
  const { maxRetries, retry } = prepareRetries({ maxRetries: maxRetriesArg });
@@ -2135,7 +2143,7 @@ async function embedMany({
2135
2143
  fn: async (span) => {
2136
2144
  const maxEmbeddingsPerCall = model.maxEmbeddingsPerCall;
2137
2145
  if (maxEmbeddingsPerCall == null) {
2138
- const { embeddings: embeddings2, usage } = await retry(() => {
2146
+ const { embeddings: embeddings2, usage, response } = await retry(() => {
2139
2147
  return recordSpan({
2140
2148
  name: "ai.embedMany.doEmbed",
2141
2149
  attributes: selectTelemetryAttributes({
@@ -2158,7 +2166,8 @@ async function embedMany({
2158
2166
  const modelResponse = await model.doEmbed({
2159
2167
  values,
2160
2168
  abortSignal,
2161
- headers
2169
+ headers,
2170
+ providerOptions
2162
2171
  });
2163
2172
  const embeddings3 = modelResponse.embeddings;
2164
2173
  const usage2 = (_a17 = modelResponse.usage) != null ? _a17 : { tokens: NaN };
@@ -2173,7 +2182,11 @@ async function embedMany({
2173
2182
  }
2174
2183
  })
2175
2184
  );
2176
- return { embeddings: embeddings3, usage: usage2 };
2185
+ return {
2186
+ embeddings: embeddings3,
2187
+ usage: usage2,
2188
+ response: modelResponse.response
2189
+ };
2177
2190
  }
2178
2191
  });
2179
2192
  });
@@ -2188,13 +2201,23 @@ async function embedMany({
2188
2201
  }
2189
2202
  })
2190
2203
  );
2191
- return new DefaultEmbedManyResult({ values, embeddings: embeddings2, usage });
2204
+ return new DefaultEmbedManyResult({
2205
+ values,
2206
+ embeddings: embeddings2,
2207
+ usage,
2208
+ responses: [response]
2209
+ });
2192
2210
  }
2193
2211
  const valueChunks = splitArray(values, maxEmbeddingsPerCall);
2194
2212
  const embeddings = [];
2213
+ const responses = [];
2195
2214
  let tokens = 0;
2196
2215
  for (const chunk of valueChunks) {
2197
- const { embeddings: responseEmbeddings, usage } = await retry(() => {
2216
+ const {
2217
+ embeddings: responseEmbeddings,
2218
+ usage,
2219
+ response
2220
+ } = await retry(() => {
2198
2221
  return recordSpan({
2199
2222
  name: "ai.embedMany.doEmbed",
2200
2223
  attributes: selectTelemetryAttributes({
@@ -2217,7 +2240,8 @@ async function embedMany({
2217
2240
  const modelResponse = await model.doEmbed({
2218
2241
  values: chunk,
2219
2242
  abortSignal,
2220
- headers
2243
+ headers,
2244
+ providerOptions
2221
2245
  });
2222
2246
  const embeddings2 = modelResponse.embeddings;
2223
2247
  const usage2 = (_a17 = modelResponse.usage) != null ? _a17 : { tokens: NaN };
@@ -2232,11 +2256,16 @@ async function embedMany({
2232
2256
  }
2233
2257
  })
2234
2258
  );
2235
- return { embeddings: embeddings2, usage: usage2 };
2259
+ return {
2260
+ embeddings: embeddings2,
2261
+ usage: usage2,
2262
+ response: modelResponse.response
2263
+ };
2236
2264
  }
2237
2265
  });
2238
2266
  });
2239
2267
  embeddings.push(...responseEmbeddings);
2268
+ responses.push(response);
2240
2269
  tokens += usage.tokens;
2241
2270
  }
2242
2271
  span.setAttributes(
@@ -2253,7 +2282,8 @@ async function embedMany({
2253
2282
  return new DefaultEmbedManyResult({
2254
2283
  values,
2255
2284
  embeddings,
2256
- usage: { tokens }
2285
+ usage: { tokens },
2286
+ responses
2257
2287
  });
2258
2288
  }
2259
2289
  });
@@ -2263,6 +2293,7 @@ var DefaultEmbedManyResult = class {
2263
2293
  this.values = options.values;
2264
2294
  this.embeddings = options.embeddings;
2265
2295
  this.usage = options.usage;
2296
+ this.responses = options.responses;
2266
2297
  }
2267
2298
  };
2268
2299
 
@@ -4062,6 +4093,17 @@ function validateObjectGenerationInput({
4062
4093
  }
4063
4094
  }
4064
4095
 
4096
+ // core/generate-text/extract-content-text.ts
4097
+ function extractContentText(content) {
4098
+ const parts = content.filter(
4099
+ (content2) => content2.type === "text"
4100
+ );
4101
+ if (parts.length === 0) {
4102
+ return void 0;
4103
+ }
4104
+ return parts.map((content2) => content2.text).join("");
4105
+ }
4106
+
4065
4107
  // core/generate-object/generate-object.ts
4066
4108
  var originalGenerateId = createIdGenerator({ prefix: "aiobj", size: 24 });
4067
4109
  async function generateObject({
@@ -4219,7 +4261,8 @@ async function generateObject({
4219
4261
  headers: (_g = result2.response) == null ? void 0 : _g.headers,
4220
4262
  body: (_h = result2.response) == null ? void 0 : _h.body
4221
4263
  };
4222
- if (result2.text === void 0) {
4264
+ const text2 = extractContentText(result2.content);
4265
+ if (text2 === void 0) {
4223
4266
  throw new NoObjectGeneratedError({
4224
4267
  message: "No object generated: the model did not return a response.",
4225
4268
  response: responseData,
@@ -4232,7 +4275,7 @@ async function generateObject({
4232
4275
  telemetry,
4233
4276
  attributes: {
4234
4277
  "ai.response.finishReason": result2.finishReason,
4235
- "ai.response.object": { output: () => result2.text },
4278
+ "ai.response.object": { output: () => text2 },
4236
4279
  "ai.response.id": responseData.id,
4237
4280
  "ai.response.model": responseData.modelId,
4238
4281
  "ai.response.timestamp": responseData.timestamp.toISOString(),
@@ -4248,7 +4291,7 @@ async function generateObject({
4248
4291
  }
4249
4292
  })
4250
4293
  );
4251
- return { ...result2, objectText: result2.text, responseData };
4294
+ return { ...result2, objectText: text2, responseData };
4252
4295
  }
4253
4296
  })
4254
4297
  );
@@ -4305,7 +4348,7 @@ async function generateObject({
4305
4348
  }),
4306
4349
  tracer,
4307
4350
  fn: async (span2) => {
4308
- var _a18, _b2, _c2, _d2, _e, _f, _g, _h, _i, _j;
4351
+ var _a18, _b2, _c2, _d2, _e, _f, _g, _h;
4309
4352
  const result2 = await model.doGenerate({
4310
4353
  tools: [
4311
4354
  {
@@ -4323,13 +4366,16 @@ async function generateObject({
4323
4366
  abortSignal,
4324
4367
  headers
4325
4368
  });
4326
- const objectText = (_b2 = (_a18 = result2.toolCalls) == null ? void 0 : _a18[0]) == null ? void 0 : _b2.args;
4369
+ const firstToolCall = result2.content.find(
4370
+ (content) => content.type === "tool-call"
4371
+ );
4372
+ const objectText = firstToolCall == null ? void 0 : firstToolCall.args;
4327
4373
  const responseData = {
4328
- id: (_d2 = (_c2 = result2.response) == null ? void 0 : _c2.id) != null ? _d2 : generateId3(),
4329
- timestamp: (_f = (_e = result2.response) == null ? void 0 : _e.timestamp) != null ? _f : currentDate(),
4330
- modelId: (_h = (_g = result2.response) == null ? void 0 : _g.modelId) != null ? _h : model.modelId,
4331
- headers: (_i = result2.response) == null ? void 0 : _i.headers,
4332
- body: (_j = result2.response) == null ? void 0 : _j.body
4374
+ id: (_b2 = (_a18 = result2.response) == null ? void 0 : _a18.id) != null ? _b2 : generateId3(),
4375
+ timestamp: (_d2 = (_c2 = result2.response) == null ? void 0 : _c2.timestamp) != null ? _d2 : currentDate(),
4376
+ modelId: (_f = (_e = result2.response) == null ? void 0 : _e.modelId) != null ? _f : model.modelId,
4377
+ headers: (_g = result2.response) == null ? void 0 : _g.headers,
4378
+ body: (_h = result2.response) == null ? void 0 : _h.body
4333
4379
  };
4334
4380
  if (objectText === void 0) {
4335
4381
  throw new NoObjectGeneratedError({
@@ -4799,8 +4845,8 @@ var DefaultStreamObjectResult = class {
4799
4845
  transformer = {
4800
4846
  transform: (chunk, controller) => {
4801
4847
  switch (chunk.type) {
4802
- case "text-delta":
4803
- controller.enqueue(chunk.textDelta);
4848
+ case "text":
4849
+ controller.enqueue(chunk.text);
4804
4850
  break;
4805
4851
  case "response-metadata":
4806
4852
  case "finish":
@@ -4866,7 +4912,7 @@ var DefaultStreamObjectResult = class {
4866
4912
  }
4867
4913
  }
4868
4914
  const {
4869
- result: { stream, warnings, response, request },
4915
+ result: { stream, response, request },
4870
4916
  doStreamSpan,
4871
4917
  startTimestampMs
4872
4918
  } = await retry(
@@ -4908,6 +4954,7 @@ var DefaultStreamObjectResult = class {
4908
4954
  })
4909
4955
  );
4910
4956
  self.requestPromise.resolve(request != null ? request : {});
4957
+ let warnings;
4911
4958
  let usage;
4912
4959
  let finishReason;
4913
4960
  let providerMetadata;
@@ -4928,6 +4975,10 @@ var DefaultStreamObjectResult = class {
4928
4975
  new TransformStream({
4929
4976
  async transform(chunk, controller) {
4930
4977
  var _a18, _b2, _c;
4978
+ if (typeof chunk === "object" && chunk.type === "stream-start") {
4979
+ warnings = chunk.warnings;
4980
+ return;
4981
+ }
4931
4982
  if (isFirstChunk) {
4932
4983
  const msToFirstChunk = now2() - startTimestampMs;
4933
4984
  isFirstChunk = false;
@@ -5451,7 +5502,7 @@ async function doParseToolCall({
5451
5502
  };
5452
5503
  }
5453
5504
 
5454
- // core/generate-text/reasoning-detail.ts
5505
+ // core/generate-text/reasoning.ts
5455
5506
  function asReasoningText(reasoning) {
5456
5507
  const reasoningText = reasoning.filter((part) => part.type === "text").map((part) => part.text).join("");
5457
5508
  return reasoningText.length > 0 ? reasoningText : void 0;
@@ -5469,23 +5520,36 @@ function toResponseMessages({
5469
5520
  generateMessageId
5470
5521
  }) {
5471
5522
  const responseMessages = [];
5472
- responseMessages.push({
5473
- role: "assistant",
5474
- content: [
5523
+ const content = [];
5524
+ if (reasoning.length > 0) {
5525
+ content.push(
5475
5526
  ...reasoning.map(
5476
5527
  (part) => part.type === "text" ? { ...part, type: "reasoning" } : { ...part, type: "redacted-reasoning" }
5477
- ),
5478
- // TODO language model v2: switch to order response content (instead of type-based ordering)
5528
+ )
5529
+ );
5530
+ }
5531
+ if (files.length > 0) {
5532
+ content.push(
5479
5533
  ...files.map((file) => ({
5480
5534
  type: "file",
5481
5535
  data: file.base64,
5482
5536
  mediaType: file.mediaType
5483
- })),
5484
- { type: "text", text: text2 },
5485
- ...toolCalls
5486
- ],
5487
- id: messageId
5488
- });
5537
+ }))
5538
+ );
5539
+ }
5540
+ if (text2.length > 0) {
5541
+ content.push({ type: "text", text: text2 });
5542
+ }
5543
+ if (toolCalls.length > 0) {
5544
+ content.push(...toolCalls);
5545
+ }
5546
+ if (content.length > 0) {
5547
+ responseMessages.push({
5548
+ role: "assistant",
5549
+ content,
5550
+ id: messageId
5551
+ });
5552
+ }
5489
5553
  if (toolResults.length > 0) {
5490
5554
  responseMessages.push({
5491
5555
  role: "tool",
@@ -5589,7 +5653,7 @@ async function generateText({
5589
5653
  }),
5590
5654
  tracer,
5591
5655
  fn: async (span) => {
5592
- var _a18, _b, _c, _d, _e, _f, _g;
5656
+ var _a18, _b, _c, _d;
5593
5657
  const toolsAndToolChoice = {
5594
5658
  ...prepareToolsAndToolChoice({ tools, toolChoice, activeTools })
5595
5659
  };
@@ -5664,7 +5728,7 @@ async function generateText({
5664
5728
  }),
5665
5729
  tracer,
5666
5730
  fn: async (span2) => {
5667
- var _a19, _b2, _c2, _d2, _e2, _f2, _g2, _h;
5731
+ var _a19, _b2, _c2, _d2, _e, _f, _g, _h;
5668
5732
  const result = await model.doGenerate({
5669
5733
  ...callSettings,
5670
5734
  ...toolsAndToolChoice,
@@ -5678,8 +5742,8 @@ async function generateText({
5678
5742
  const responseData = {
5679
5743
  id: (_b2 = (_a19 = result.response) == null ? void 0 : _a19.id) != null ? _b2 : generateId3(),
5680
5744
  timestamp: (_d2 = (_c2 = result.response) == null ? void 0 : _c2.timestamp) != null ? _d2 : currentDate(),
5681
- modelId: (_f2 = (_e2 = result.response) == null ? void 0 : _e2.modelId) != null ? _f2 : model.modelId,
5682
- headers: (_g2 = result.response) == null ? void 0 : _g2.headers,
5745
+ modelId: (_f = (_e = result.response) == null ? void 0 : _e.modelId) != null ? _f : model.modelId,
5746
+ headers: (_g = result.response) == null ? void 0 : _g.headers,
5683
5747
  body: (_h = result.response) == null ? void 0 : _h.body
5684
5748
  };
5685
5749
  span2.setAttributes(
@@ -5688,10 +5752,13 @@ async function generateText({
5688
5752
  attributes: {
5689
5753
  "ai.response.finishReason": result.finishReason,
5690
5754
  "ai.response.text": {
5691
- output: () => result.text
5755
+ output: () => extractContentText(result.content)
5692
5756
  },
5693
5757
  "ai.response.toolCalls": {
5694
- output: () => JSON.stringify(result.toolCalls)
5758
+ output: () => {
5759
+ const toolCalls = asToolCalls(result.content);
5760
+ return toolCalls == null ? void 0 : JSON.stringify(toolCalls);
5761
+ }
5695
5762
  },
5696
5763
  "ai.response.id": responseData.id,
5697
5764
  "ai.response.model": responseData.modelId,
@@ -5713,7 +5780,9 @@ async function generateText({
5713
5780
  })
5714
5781
  );
5715
5782
  currentToolCalls = await Promise.all(
5716
- ((_b = currentModelResponse.toolCalls) != null ? _b : []).map(
5783
+ currentModelResponse.content.filter(
5784
+ (part) => part.type === "tool-call"
5785
+ ).map(
5717
5786
  (toolCall) => parseToolCall({
5718
5787
  toolCall,
5719
5788
  tools,
@@ -5748,15 +5817,19 @@ async function generateText({
5748
5817
  nextStepType = "tool-result";
5749
5818
  }
5750
5819
  }
5751
- const originalText = (_c = currentModelResponse.text) != null ? _c : "";
5820
+ const originalText = (_b = extractContentText(currentModelResponse.content)) != null ? _b : "";
5752
5821
  const stepTextLeadingWhitespaceTrimmed = stepType === "continue" && // only for continue steps
5753
5822
  text2.trimEnd() !== text2 ? originalText.trimStart() : originalText;
5754
5823
  const stepText = nextStepType === "continue" ? removeTextAfterLastWhitespace(stepTextLeadingWhitespaceTrimmed) : stepTextLeadingWhitespaceTrimmed;
5755
5824
  text2 = nextStepType === "continue" || stepType === "continue" ? text2 + stepText : stepText;
5756
5825
  currentReasoningDetails = asReasoningDetails(
5757
- currentModelResponse.reasoning
5826
+ currentModelResponse.content
5827
+ );
5828
+ sources.push(
5829
+ ...currentModelResponse.content.filter(
5830
+ (part) => part.type === "source"
5831
+ )
5758
5832
  );
5759
- sources.push(...(_d = currentModelResponse.sources) != null ? _d : []);
5760
5833
  if (stepType === "continue") {
5761
5834
  const lastMessage = responseMessages[responseMessages.length - 1];
5762
5835
  if (typeof lastMessage.content === "string") {
@@ -5771,8 +5844,8 @@ async function generateText({
5771
5844
  responseMessages.push(
5772
5845
  ...toResponseMessages({
5773
5846
  text: text2,
5774
- files: asFiles(currentModelResponse.files),
5775
- reasoning: asReasoningDetails(currentModelResponse.reasoning),
5847
+ files: asFiles(currentModelResponse.content),
5848
+ reasoning: asReasoningDetails(currentModelResponse.content),
5776
5849
  tools: tools != null ? tools : {},
5777
5850
  toolCalls: currentToolCalls,
5778
5851
  toolResults: currentToolResults,
@@ -5784,18 +5857,19 @@ async function generateText({
5784
5857
  const currentStepResult = {
5785
5858
  stepType,
5786
5859
  text: stepText,
5787
- // TODO v5: rename reasoning to reasoningText (and use reasoning for composite array)
5788
- reasoning: asReasoningText(currentReasoningDetails),
5789
- reasoningDetails: currentReasoningDetails,
5790
- files: asFiles(currentModelResponse.files),
5791
- sources: (_e = currentModelResponse.sources) != null ? _e : [],
5860
+ reasoningText: asReasoningText(currentReasoningDetails),
5861
+ reasoning: currentReasoningDetails,
5862
+ files: asFiles(currentModelResponse.content),
5863
+ sources: currentModelResponse.content.filter(
5864
+ (part) => part.type === "source"
5865
+ ),
5792
5866
  toolCalls: currentToolCalls,
5793
5867
  toolResults: currentToolResults,
5794
5868
  finishReason: currentModelResponse.finishReason,
5795
5869
  usage: currentUsage,
5796
5870
  warnings: currentModelResponse.warnings,
5797
5871
  logprobs: currentModelResponse.logprobs,
5798
- request: (_f = currentModelResponse.request) != null ? _f : {},
5872
+ request: (_c = currentModelResponse.request) != null ? _c : {},
5799
5873
  response: {
5800
5874
  ...currentModelResponse.response,
5801
5875
  // deep clone msgs to avoid mutating past messages in multi-step:
@@ -5814,10 +5888,13 @@ async function generateText({
5814
5888
  attributes: {
5815
5889
  "ai.response.finishReason": currentModelResponse.finishReason,
5816
5890
  "ai.response.text": {
5817
- output: () => currentModelResponse.text
5891
+ output: () => extractContentText(currentModelResponse.content)
5818
5892
  },
5819
5893
  "ai.response.toolCalls": {
5820
- output: () => JSON.stringify(currentModelResponse.toolCalls)
5894
+ output: () => {
5895
+ const toolCalls = asToolCalls(currentModelResponse.content);
5896
+ return toolCalls == null ? void 0 : JSON.stringify(toolCalls);
5897
+ }
5821
5898
  },
5822
5899
  // TODO rename telemetry attributes to inputTokens and outputTokens
5823
5900
  "ai.usage.promptTokens": currentModelResponse.usage.inputTokens,
@@ -5827,7 +5904,7 @@ async function generateText({
5827
5904
  );
5828
5905
  return new DefaultGenerateTextResult({
5829
5906
  text: text2,
5830
- files: asFiles(currentModelResponse.files),
5907
+ files: asFiles(currentModelResponse.content),
5831
5908
  reasoning: asReasoningText(currentReasoningDetails),
5832
5909
  reasoningDetails: currentReasoningDetails,
5833
5910
  sources,
@@ -5849,7 +5926,7 @@ async function generateText({
5849
5926
  finishReason: currentModelResponse.finishReason,
5850
5927
  usage,
5851
5928
  warnings: currentModelResponse.warnings,
5852
- request: (_g = currentModelResponse.request) != null ? _g : {},
5929
+ request: (_d = currentModelResponse.request) != null ? _d : {},
5853
5930
  response: {
5854
5931
  ...currentModelResponse.response,
5855
5932
  messages: responseMessages
@@ -5940,8 +6017,8 @@ var DefaultGenerateTextResult = class {
5940
6017
  constructor(options) {
5941
6018
  this.text = options.text;
5942
6019
  this.files = options.files;
5943
- this.reasoning = options.reasoning;
5944
- this.reasoningDetails = options.reasoningDetails;
6020
+ this.reasoningText = options.reasoning;
6021
+ this.reasoning = options.reasoningDetails;
5945
6022
  this.toolCalls = options.toolCalls;
5946
6023
  this.toolResults = options.toolResults;
5947
6024
  this.finishReason = options.finishReason;
@@ -5959,18 +6036,50 @@ var DefaultGenerateTextResult = class {
5959
6036
  return this.outputResolver();
5960
6037
  }
5961
6038
  };
5962
- function asReasoningDetails(reasoning) {
5963
- if (reasoning == null) {
6039
+ function asReasoningDetails(content) {
6040
+ const reasoning = content.filter((part) => part.type === "reasoning");
6041
+ if (reasoning.length === 0) {
5964
6042
  return [];
5965
6043
  }
5966
- if (typeof reasoning === "string") {
5967
- return [{ type: "text", text: reasoning }];
6044
+ const result = [];
6045
+ let activeReasoningText;
6046
+ for (const part of reasoning) {
6047
+ if (part.reasoningType === "text") {
6048
+ if (activeReasoningText == null) {
6049
+ activeReasoningText = { type: "text", text: part.text };
6050
+ result.push(activeReasoningText);
6051
+ } else {
6052
+ activeReasoningText.text += part.text;
6053
+ }
6054
+ } else if (part.reasoningType === "signature") {
6055
+ if (activeReasoningText == null) {
6056
+ activeReasoningText = { type: "text", text: "" };
6057
+ result.push(activeReasoningText);
6058
+ }
6059
+ activeReasoningText.signature = part.signature;
6060
+ activeReasoningText = void 0;
6061
+ } else if (part.reasoningType === "redacted") {
6062
+ result.push({ type: "redacted", data: part.data });
6063
+ }
5968
6064
  }
5969
- return reasoning;
6065
+ return result;
5970
6066
  }
5971
- function asFiles(files) {
5972
- var _a17;
5973
- return (_a17 = files == null ? void 0 : files.map((file) => new DefaultGeneratedFile(file))) != null ? _a17 : [];
6067
+ function asFiles(content) {
6068
+ return content.filter((part) => part.type === "file").map((part) => new DefaultGeneratedFile(part));
6069
+ }
6070
+ function asToolCalls(content) {
6071
+ const parts = content.filter(
6072
+ (part) => part.type === "tool-call"
6073
+ );
6074
+ if (parts.length === 0) {
6075
+ return void 0;
6076
+ }
6077
+ return parts.map((toolCall) => ({
6078
+ toolCallType: toolCall.toolCallType,
6079
+ toolCallId: toolCall.toolCallId,
6080
+ toolName: toolCall.toolName,
6081
+ args: toolCall.args
6082
+ }));
5974
6083
  }
5975
6084
 
5976
6085
  // core/generate-text/output.ts
@@ -6166,18 +6275,18 @@ function smoothStream({
6166
6275
  let buffer = "";
6167
6276
  return new TransformStream({
6168
6277
  async transform(chunk, controller) {
6169
- if (chunk.type !== "text-delta") {
6278
+ if (chunk.type !== "text") {
6170
6279
  if (buffer.length > 0) {
6171
- controller.enqueue({ type: "text-delta", textDelta: buffer });
6280
+ controller.enqueue({ type: "text", text: buffer });
6172
6281
  buffer = "";
6173
6282
  }
6174
6283
  controller.enqueue(chunk);
6175
6284
  return;
6176
6285
  }
6177
- buffer += chunk.textDelta;
6286
+ buffer += chunk.text;
6178
6287
  let match;
6179
6288
  while ((match = detectChunk(buffer)) != null) {
6180
- controller.enqueue({ type: "text-delta", textDelta: match });
6289
+ controller.enqueue({ type: "text", text: match });
6181
6290
  buffer = buffer.slice(match.length);
6182
6291
  await delay2(delayInMs);
6183
6292
  }
@@ -6187,7 +6296,9 @@ function smoothStream({
6187
6296
  }
6188
6297
 
6189
6298
  // core/generate-text/stream-text.ts
6190
- import { AISDKError as AISDKError18 } from "@ai-sdk/provider";
6299
+ import {
6300
+ AISDKError as AISDKError18
6301
+ } from "@ai-sdk/provider";
6191
6302
  import { createIdGenerator as createIdGenerator4 } from "@ai-sdk/provider-utils";
6192
6303
 
6193
6304
  // util/as-array.ts
@@ -6336,10 +6447,9 @@ function runToolsTransformation({
6336
6447
  async transform(chunk, controller) {
6337
6448
  const chunkType = chunk.type;
6338
6449
  switch (chunkType) {
6339
- case "text-delta":
6450
+ case "stream-start":
6451
+ case "text":
6340
6452
  case "reasoning":
6341
- case "reasoning-signature":
6342
- case "redacted-reasoning":
6343
6453
  case "source":
6344
6454
  case "response-metadata":
6345
6455
  case "error": {
@@ -6347,12 +6457,13 @@ function runToolsTransformation({
6347
6457
  break;
6348
6458
  }
6349
6459
  case "file": {
6350
- controller.enqueue(
6351
- new DefaultGeneratedFileWithType({
6460
+ controller.enqueue({
6461
+ type: "file",
6462
+ file: new DefaultGeneratedFileWithType({
6352
6463
  data: chunk.data,
6353
6464
  mediaType: chunk.mediaType
6354
6465
  })
6355
- );
6466
+ });
6356
6467
  break;
6357
6468
  }
6358
6469
  case "tool-call-delta": {
@@ -6591,7 +6702,7 @@ function createOutputTransformStream(output) {
6591
6702
  partialOutput = void 0
6592
6703
  }) {
6593
6704
  controller.enqueue({
6594
- part: { type: "text-delta", textDelta: textChunk },
6705
+ part: { type: "text", text: textChunk },
6595
6706
  partialOutput
6596
6707
  });
6597
6708
  textChunk = "";
@@ -6601,12 +6712,12 @@ function createOutputTransformStream(output) {
6601
6712
  if (chunk.type === "step-finish") {
6602
6713
  publishTextChunk({ controller });
6603
6714
  }
6604
- if (chunk.type !== "text-delta") {
6715
+ if (chunk.type !== "text") {
6605
6716
  controller.enqueue({ part: chunk, partialOutput: void 0 });
6606
6717
  return;
6607
6718
  }
6608
- text2 += chunk.textDelta;
6609
- textChunk += chunk.textDelta;
6719
+ text2 += chunk.text;
6720
+ textChunk += chunk.text;
6610
6721
  const result = output.parsePartial({ text: text2 });
6611
6722
  if (result != null) {
6612
6723
  const currentJson = JSON.stringify(result.partial);
@@ -6701,44 +6812,44 @@ var DefaultStreamTextResult = class {
6701
6812
  async transform(chunk, controller) {
6702
6813
  controller.enqueue(chunk);
6703
6814
  const { part } = chunk;
6704
- if (part.type === "text-delta" || part.type === "reasoning" || part.type === "source" || part.type === "tool-call" || part.type === "tool-result" || part.type === "tool-call-streaming-start" || part.type === "tool-call-delta") {
6815
+ if (part.type === "text" || part.type === "reasoning" || part.type === "source" || part.type === "tool-call" || part.type === "tool-result" || part.type === "tool-call-streaming-start" || part.type === "tool-call-delta") {
6705
6816
  await (onChunk == null ? void 0 : onChunk({ chunk: part }));
6706
6817
  }
6707
6818
  if (part.type === "error") {
6708
6819
  await (onError == null ? void 0 : onError({ error: part.error }));
6709
6820
  }
6710
- if (part.type === "text-delta") {
6711
- recordedStepText += part.textDelta;
6712
- recordedContinuationText += part.textDelta;
6713
- recordedFullText += part.textDelta;
6821
+ if (part.type === "text") {
6822
+ recordedStepText += part.text;
6823
+ recordedContinuationText += part.text;
6824
+ recordedFullText += part.text;
6714
6825
  }
6715
6826
  if (part.type === "reasoning") {
6716
- if (activeReasoningText == null) {
6717
- activeReasoningText = { type: "text", text: part.textDelta };
6718
- stepReasoning.push(activeReasoningText);
6719
- } else {
6720
- activeReasoningText.text += part.textDelta;
6721
- }
6722
- }
6723
- if (part.type === "reasoning-signature") {
6724
- if (activeReasoningText == null) {
6725
- throw new AISDKError18({
6726
- name: "InvalidStreamPart",
6727
- message: "reasoning-signature without reasoning"
6728
- });
6827
+ if (part.reasoningType === "text") {
6828
+ if (activeReasoningText == null) {
6829
+ activeReasoningText = { type: "text", text: part.text };
6830
+ stepReasoning.push(activeReasoningText);
6831
+ } else {
6832
+ activeReasoningText.text += part.text;
6833
+ }
6834
+ } else if (part.reasoningType === "signature") {
6835
+ if (activeReasoningText == null) {
6836
+ throw new AISDKError18({
6837
+ name: "InvalidStreamPart",
6838
+ message: "reasoning-signature without reasoning"
6839
+ });
6840
+ }
6841
+ activeReasoningText.signature = part.signature;
6842
+ activeReasoningText = void 0;
6843
+ } else if (part.reasoningType === "redacted") {
6844
+ stepReasoning.push({ type: "redacted", data: part.data });
6729
6845
  }
6730
- activeReasoningText.signature = part.signature;
6731
- activeReasoningText = void 0;
6732
- }
6733
- if (part.type === "redacted-reasoning") {
6734
- stepReasoning.push({ type: "redacted", data: part.data });
6735
6846
  }
6736
6847
  if (part.type === "file") {
6737
- stepFiles.push(part);
6848
+ stepFiles.push(part.file);
6738
6849
  }
6739
6850
  if (part.type === "source") {
6740
- recordedSources.push(part.source);
6741
- recordedStepSources.push(part.source);
6851
+ recordedSources.push(part);
6852
+ recordedStepSources.push(part);
6742
6853
  }
6743
6854
  if (part.type === "tool-call") {
6744
6855
  recordedToolCalls.push(part);
@@ -6774,8 +6885,8 @@ var DefaultStreamTextResult = class {
6774
6885
  const currentStepResult = {
6775
6886
  stepType,
6776
6887
  text: recordedStepText,
6777
- reasoning: asReasoningText(stepReasoning),
6778
- reasoningDetails: stepReasoning,
6888
+ reasoningText: asReasoningText(stepReasoning),
6889
+ reasoning: stepReasoning,
6779
6890
  files: stepFiles,
6780
6891
  sources: recordedStepSources,
6781
6892
  toolCalls: recordedToolCalls,
@@ -6831,8 +6942,8 @@ var DefaultStreamTextResult = class {
6831
6942
  self.toolCallsPromise.resolve(lastStep.toolCalls);
6832
6943
  self.toolResultsPromise.resolve(lastStep.toolResults);
6833
6944
  self.providerMetadataPromise.resolve(lastStep.providerMetadata);
6834
- self.reasoningPromise.resolve(lastStep.reasoning);
6835
- self.reasoningDetailsPromise.resolve(lastStep.reasoningDetails);
6945
+ self.reasoningPromise.resolve(lastStep.reasoningText);
6946
+ self.reasoningDetailsPromise.resolve(lastStep.reasoning);
6836
6947
  const finishReason = recordedFinishReason != null ? recordedFinishReason : "unknown";
6837
6948
  const usage = recordedUsage != null ? recordedUsage : {
6838
6949
  completionTokens: NaN,
@@ -6850,8 +6961,8 @@ var DefaultStreamTextResult = class {
6850
6961
  logprobs: void 0,
6851
6962
  usage,
6852
6963
  text: recordedFullText,
6964
+ reasoningText: lastStep.reasoningText,
6853
6965
  reasoning: lastStep.reasoning,
6854
- reasoningDetails: lastStep.reasoningDetails,
6855
6966
  files: lastStep.files,
6856
6967
  sources: lastStep.sources,
6857
6968
  toolCalls: lastStep.toolCalls,
@@ -6967,7 +7078,7 @@ var DefaultStreamTextResult = class {
6967
7078
  ...prepareToolsAndToolChoice({ tools, toolChoice, activeTools })
6968
7079
  };
6969
7080
  const {
6970
- result: { stream: stream2, warnings, response, request },
7081
+ result: { stream: stream2, response, request },
6971
7082
  doStreamSpan,
6972
7083
  startTimestampMs
6973
7084
  } = await retry(
@@ -7044,6 +7155,7 @@ var DefaultStreamTextResult = class {
7044
7155
  const stepRequest = request != null ? request : {};
7045
7156
  const stepToolCalls = [];
7046
7157
  const stepToolResults = [];
7158
+ let warnings;
7047
7159
  const stepReasoning2 = [];
7048
7160
  const stepFiles2 = [];
7049
7161
  let activeReasoningText2 = void 0;
@@ -7072,16 +7184,20 @@ var DefaultStreamTextResult = class {
7072
7184
  chunk
7073
7185
  }) {
7074
7186
  controller.enqueue(chunk);
7075
- stepText += chunk.textDelta;
7076
- fullStepText += chunk.textDelta;
7187
+ stepText += chunk.text;
7188
+ fullStepText += chunk.text;
7077
7189
  chunkTextPublished = true;
7078
- hasWhitespaceSuffix = chunk.textDelta.trimEnd() !== chunk.textDelta;
7190
+ hasWhitespaceSuffix = chunk.text.trimEnd() !== chunk.text;
7079
7191
  }
7080
7192
  self.addStream(
7081
7193
  transformedStream.pipeThrough(
7082
7194
  new TransformStream({
7083
7195
  async transform(chunk, controller) {
7084
7196
  var _a19, _b, _c;
7197
+ if (chunk.type === "stream-start") {
7198
+ warnings = chunk.warnings;
7199
+ return;
7200
+ }
7085
7201
  if (stepFirstChunk) {
7086
7202
  const msToFirstChunk = now2() - startTimestampMs;
7087
7203
  stepFirstChunk = false;
@@ -7098,14 +7214,14 @@ var DefaultStreamTextResult = class {
7098
7214
  warnings: warnings != null ? warnings : []
7099
7215
  });
7100
7216
  }
7101
- if (chunk.type === "text-delta" && chunk.textDelta.length === 0) {
7217
+ if (chunk.type === "text" && chunk.text.length === 0) {
7102
7218
  return;
7103
7219
  }
7104
7220
  const chunkType = chunk.type;
7105
7221
  switch (chunkType) {
7106
- case "text-delta": {
7222
+ case "text": {
7107
7223
  if (continueSteps) {
7108
- const trimmedChunkText = inWhitespacePrefix && hasLeadingWhitespace ? chunk.textDelta.trimStart() : chunk.textDelta;
7224
+ const trimmedChunkText = inWhitespacePrefix && hasLeadingWhitespace ? chunk.text.trimStart() : chunk.text;
7109
7225
  if (trimmedChunkText.length === 0) {
7110
7226
  break;
7111
7227
  }
@@ -7117,8 +7233,8 @@ var DefaultStreamTextResult = class {
7117
7233
  await publishTextChunk({
7118
7234
  controller,
7119
7235
  chunk: {
7120
- type: "text-delta",
7121
- textDelta: split.prefix + split.whitespace
7236
+ type: "text",
7237
+ text: split.prefix + split.whitespace
7122
7238
  }
7123
7239
  });
7124
7240
  }
@@ -7129,35 +7245,31 @@ var DefaultStreamTextResult = class {
7129
7245
  }
7130
7246
  case "reasoning": {
7131
7247
  controller.enqueue(chunk);
7132
- if (activeReasoningText2 == null) {
7133
- activeReasoningText2 = {
7134
- type: "text",
7135
- text: chunk.textDelta
7136
- };
7137
- stepReasoning2.push(activeReasoningText2);
7138
- } else {
7139
- activeReasoningText2.text += chunk.textDelta;
7140
- }
7141
- break;
7142
- }
7143
- case "reasoning-signature": {
7144
- controller.enqueue(chunk);
7145
- if (activeReasoningText2 == null) {
7146
- throw new InvalidStreamPartError({
7147
- chunk,
7148
- message: "reasoning-signature without reasoning"
7248
+ if (chunk.reasoningType === "text") {
7249
+ if (activeReasoningText2 == null) {
7250
+ activeReasoningText2 = {
7251
+ type: "text",
7252
+ text: chunk.text
7253
+ };
7254
+ stepReasoning2.push(activeReasoningText2);
7255
+ } else {
7256
+ activeReasoningText2.text += chunk.text;
7257
+ }
7258
+ } else if (chunk.reasoningType === "signature") {
7259
+ if (activeReasoningText2 == null) {
7260
+ throw new InvalidStreamPartError({
7261
+ chunk,
7262
+ message: "reasoning-signature without reasoning"
7263
+ });
7264
+ }
7265
+ activeReasoningText2.signature = chunk.signature;
7266
+ activeReasoningText2 = void 0;
7267
+ } else if (chunk.reasoningType === "redacted") {
7268
+ stepReasoning2.push({
7269
+ type: "redacted",
7270
+ data: chunk.data
7149
7271
  });
7150
7272
  }
7151
- activeReasoningText2.signature = chunk.signature;
7152
- activeReasoningText2 = void 0;
7153
- break;
7154
- }
7155
- case "redacted-reasoning": {
7156
- controller.enqueue(chunk);
7157
- stepReasoning2.push({
7158
- type: "redacted",
7159
- data: chunk.data
7160
- });
7161
7273
  break;
7162
7274
  }
7163
7275
  case "tool-call": {
@@ -7192,7 +7304,7 @@ var DefaultStreamTextResult = class {
7192
7304
  break;
7193
7305
  }
7194
7306
  case "file": {
7195
- stepFiles2.push(chunk);
7307
+ stepFiles2.push(chunk.file);
7196
7308
  controller.enqueue(chunk);
7197
7309
  break;
7198
7310
  }
@@ -7233,10 +7345,7 @@ var DefaultStreamTextResult = class {
7233
7345
  stepType2 === "continue" && !chunkTextPublished)) {
7234
7346
  await publishTextChunk({
7235
7347
  controller,
7236
- chunk: {
7237
- type: "text-delta",
7238
- textDelta: chunkBuffer
7239
- }
7348
+ chunk: { type: "text", text: chunkBuffer }
7240
7349
  });
7241
7350
  chunkBuffer = "";
7242
7351
  }
@@ -7381,10 +7490,10 @@ var DefaultStreamTextResult = class {
7381
7490
  get text() {
7382
7491
  return this.textPromise.value;
7383
7492
  }
7384
- get reasoning() {
7493
+ get reasoningText() {
7385
7494
  return this.reasoningPromise.value;
7386
7495
  }
7387
- get reasoningDetails() {
7496
+ get reasoning() {
7388
7497
  return this.reasoningDetailsPromise.value;
7389
7498
  }
7390
7499
  get sources() {
@@ -7426,8 +7535,8 @@ var DefaultStreamTextResult = class {
7426
7535
  this.teeStream().pipeThrough(
7427
7536
  new TransformStream({
7428
7537
  transform({ part }, controller) {
7429
- if (part.type === "text-delta") {
7430
- controller.enqueue(part.textDelta);
7538
+ if (part.type === "text") {
7539
+ controller.enqueue(part.text);
7431
7540
  }
7432
7541
  }
7433
7542
  })
@@ -7485,52 +7594,45 @@ var DefaultStreamTextResult = class {
7485
7594
  transform: async (chunk, controller) => {
7486
7595
  const chunkType = chunk.type;
7487
7596
  switch (chunkType) {
7488
- case "text-delta": {
7489
- controller.enqueue(formatDataStreamPart("text", chunk.textDelta));
7597
+ case "text": {
7598
+ controller.enqueue(formatDataStreamPart("text", chunk.text));
7490
7599
  break;
7491
7600
  }
7492
7601
  case "reasoning": {
7493
7602
  if (sendReasoning) {
7494
- controller.enqueue(
7495
- formatDataStreamPart("reasoning", chunk.textDelta)
7496
- );
7497
- }
7498
- break;
7499
- }
7500
- case "redacted-reasoning": {
7501
- if (sendReasoning) {
7502
- controller.enqueue(
7503
- formatDataStreamPart("redacted_reasoning", {
7504
- data: chunk.data
7505
- })
7506
- );
7507
- }
7508
- break;
7509
- }
7510
- case "reasoning-signature": {
7511
- if (sendReasoning) {
7512
- controller.enqueue(
7513
- formatDataStreamPart("reasoning_signature", {
7514
- signature: chunk.signature
7515
- })
7516
- );
7603
+ if (chunk.reasoningType === "text") {
7604
+ controller.enqueue(
7605
+ formatDataStreamPart("reasoning", chunk.text)
7606
+ );
7607
+ } else if (chunk.reasoningType === "signature") {
7608
+ controller.enqueue(
7609
+ formatDataStreamPart("reasoning_signature", {
7610
+ signature: chunk.signature
7611
+ })
7612
+ );
7613
+ } else if (chunk.reasoningType === "redacted") {
7614
+ controller.enqueue(
7615
+ formatDataStreamPart("redacted_reasoning", {
7616
+ data: chunk.data
7617
+ })
7618
+ );
7619
+ }
7517
7620
  }
7518
7621
  break;
7519
7622
  }
7520
7623
  case "file": {
7521
7624
  controller.enqueue(
7625
+ // TODO update protocol to v2 or replace with event stream
7522
7626
  formatDataStreamPart("file", {
7523
- mimeType: chunk.mediaType,
7524
- data: chunk.base64
7627
+ mimeType: chunk.file.mediaType,
7628
+ data: chunk.file.base64
7525
7629
  })
7526
7630
  );
7527
7631
  break;
7528
7632
  }
7529
7633
  case "source": {
7530
7634
  if (sendSources) {
7531
- controller.enqueue(
7532
- formatDataStreamPart("source", chunk.source)
7533
- );
7635
+ controller.enqueue(formatDataStreamPart("source", chunk));
7534
7636
  }
7535
7637
  break;
7536
7638
  }
@@ -7724,9 +7826,99 @@ var DefaultStreamTextResult = class {
7724
7826
  }
7725
7827
  };
7726
7828
 
7727
- // errors/no-transcript-generated-error.ts
7829
+ // errors/no-speech-generated-error.ts
7728
7830
  import { AISDKError as AISDKError19 } from "@ai-sdk/provider";
7729
- var NoTranscriptGeneratedError = class extends AISDKError19 {
7831
+ var NoSpeechGeneratedError = class extends AISDKError19 {
7832
+ constructor(options) {
7833
+ super({
7834
+ name: "AI_NoSpeechGeneratedError",
7835
+ message: "No speech audio generated."
7836
+ });
7837
+ this.responses = options.responses;
7838
+ }
7839
+ };
7840
+
7841
+ // core/generate-speech/generated-audio-file.ts
7842
+ var DefaultGeneratedAudioFile = class extends DefaultGeneratedFile {
7843
+ constructor({
7844
+ data,
7845
+ mediaType
7846
+ }) {
7847
+ super({ data, mediaType });
7848
+ let format = "mp3";
7849
+ if (mediaType) {
7850
+ const mimeTypeParts = mediaType.split("/");
7851
+ if (mimeTypeParts.length === 2) {
7852
+ if (mediaType !== "audio/mpeg") {
7853
+ format = mimeTypeParts[1];
7854
+ }
7855
+ }
7856
+ }
7857
+ if (!format) {
7858
+ throw new Error(
7859
+ "Audio format must be provided or determinable from mimeType"
7860
+ );
7861
+ }
7862
+ this.format = format;
7863
+ }
7864
+ };
7865
+
7866
+ // core/generate-speech/generate-speech.ts
7867
+ async function generateSpeech({
7868
+ model,
7869
+ text: text2,
7870
+ voice,
7871
+ outputFormat,
7872
+ instructions,
7873
+ speed,
7874
+ providerOptions = {},
7875
+ maxRetries: maxRetriesArg,
7876
+ abortSignal,
7877
+ headers
7878
+ }) {
7879
+ var _a17;
7880
+ const { retry } = prepareRetries({ maxRetries: maxRetriesArg });
7881
+ const result = await retry(
7882
+ () => model.doGenerate({
7883
+ text: text2,
7884
+ voice,
7885
+ outputFormat,
7886
+ instructions,
7887
+ speed,
7888
+ abortSignal,
7889
+ headers,
7890
+ providerOptions
7891
+ })
7892
+ );
7893
+ if (!result.audio || result.audio.length === 0) {
7894
+ throw new NoSpeechGeneratedError({ responses: [result.response] });
7895
+ }
7896
+ return new DefaultSpeechResult({
7897
+ audio: new DefaultGeneratedAudioFile({
7898
+ data: result.audio,
7899
+ mediaType: (_a17 = detectMediaType({
7900
+ data: result.audio,
7901
+ signatures: audioMediaTypeSignatures
7902
+ })) != null ? _a17 : "audio/mp3"
7903
+ }),
7904
+ warnings: result.warnings,
7905
+ responses: [result.response],
7906
+ providerMetadata: result.providerMetadata
7907
+ });
7908
+ }
7909
+ var DefaultSpeechResult = class {
7910
+ constructor(options) {
7911
+ var _a17;
7912
+ this.audio = options.audio;
7913
+ this.warnings = options.warnings;
7914
+ this.responses = options.responses;
7915
+ this.providerMetadata = (_a17 = options.providerMetadata) != null ? _a17 : {};
7916
+ }
7917
+ };
7918
+
7919
+ // errors/no-transcript-generated-error.ts
7920
+ import { AISDKError as AISDKError20 } from "@ai-sdk/provider";
7921
+ var NoTranscriptGeneratedError = class extends AISDKError20 {
7730
7922
  constructor(options) {
7731
7923
  super({
7732
7924
  name: "AI_NoTranscriptGeneratedError",
@@ -7873,27 +8065,41 @@ function extractReasoningMiddleware({
7873
8065
  return {
7874
8066
  middlewareVersion: "v2",
7875
8067
  wrapGenerate: async ({ doGenerate }) => {
7876
- const { text: rawText, ...rest } = await doGenerate();
7877
- if (rawText == null) {
7878
- return { text: rawText, ...rest };
7879
- }
7880
- const text2 = startWithReasoning ? openingTag + rawText : rawText;
7881
- const regexp = new RegExp(`${openingTag}(.*?)${closingTag}`, "gs");
7882
- const matches = Array.from(text2.matchAll(regexp));
7883
- if (!matches.length) {
7884
- return { text: text2, ...rest };
7885
- }
7886
- const reasoning = matches.map((match) => match[1]).join(separator);
7887
- let textWithoutReasoning = text2;
7888
- for (let i = matches.length - 1; i >= 0; i--) {
7889
- const match = matches[i];
7890
- const beforeMatch = textWithoutReasoning.slice(0, match.index);
7891
- const afterMatch = textWithoutReasoning.slice(
7892
- match.index + match[0].length
7893
- );
7894
- textWithoutReasoning = beforeMatch + (beforeMatch.length > 0 && afterMatch.length > 0 ? separator : "") + afterMatch;
8068
+ const { content, ...rest } = await doGenerate();
8069
+ const transformedContent = [];
8070
+ for (const part of content) {
8071
+ if (part.type !== "text") {
8072
+ transformedContent.push(part);
8073
+ continue;
8074
+ }
8075
+ const text2 = startWithReasoning ? openingTag + part.text : part.text;
8076
+ const regexp = new RegExp(`${openingTag}(.*?)${closingTag}`, "gs");
8077
+ const matches = Array.from(text2.matchAll(regexp));
8078
+ if (!matches.length) {
8079
+ transformedContent.push(part);
8080
+ continue;
8081
+ }
8082
+ const reasoningText = matches.map((match) => match[1]).join(separator);
8083
+ let textWithoutReasoning = text2;
8084
+ for (let i = matches.length - 1; i >= 0; i--) {
8085
+ const match = matches[i];
8086
+ const beforeMatch = textWithoutReasoning.slice(0, match.index);
8087
+ const afterMatch = textWithoutReasoning.slice(
8088
+ match.index + match[0].length
8089
+ );
8090
+ textWithoutReasoning = beforeMatch + (beforeMatch.length > 0 && afterMatch.length > 0 ? separator : "") + afterMatch;
8091
+ }
8092
+ transformedContent.push({
8093
+ type: "reasoning",
8094
+ reasoningType: "text",
8095
+ text: reasoningText
8096
+ });
8097
+ transformedContent.push({
8098
+ type: "text",
8099
+ text: textWithoutReasoning
8100
+ });
7895
8101
  }
7896
- return { ...rest, text: textWithoutReasoning, reasoning };
8102
+ return { content: transformedContent, ...rest };
7897
8103
  },
7898
8104
  wrapStream: async ({ doStream }) => {
7899
8105
  const { stream, ...rest } = await doStream();
@@ -7906,18 +8112,24 @@ function extractReasoningMiddleware({
7906
8112
  stream: stream.pipeThrough(
7907
8113
  new TransformStream({
7908
8114
  transform: (chunk, controller) => {
7909
- if (chunk.type !== "text-delta") {
8115
+ if (chunk.type !== "text") {
7910
8116
  controller.enqueue(chunk);
7911
8117
  return;
7912
8118
  }
7913
- buffer += chunk.textDelta;
8119
+ buffer += chunk.text;
7914
8120
  function publish(text2) {
7915
8121
  if (text2.length > 0) {
7916
8122
  const prefix = afterSwitch && (isReasoning ? !isFirstReasoning : !isFirstText) ? separator : "";
7917
- controller.enqueue({
7918
- type: isReasoning ? "reasoning" : "text-delta",
7919
- textDelta: prefix + text2
7920
- });
8123
+ controller.enqueue(
8124
+ isReasoning ? {
8125
+ type: "reasoning",
8126
+ reasoningType: "text",
8127
+ text: prefix + text2
8128
+ } : {
8129
+ type: "text",
8130
+ text: prefix + text2
8131
+ }
8132
+ );
7921
8133
  afterSwitch = false;
7922
8134
  if (isReasoning) {
7923
8135
  isFirstReasoning = false;
@@ -7962,60 +8174,13 @@ function simulateStreamingMiddleware() {
7962
8174
  const result = await doGenerate();
7963
8175
  const simulatedStream = new ReadableStream({
7964
8176
  start(controller) {
8177
+ controller.enqueue({
8178
+ type: "stream-start",
8179
+ warnings: result.warnings
8180
+ });
7965
8181
  controller.enqueue({ type: "response-metadata", ...result.response });
7966
- if (result.reasoning) {
7967
- if (typeof result.reasoning === "string") {
7968
- controller.enqueue({
7969
- type: "reasoning",
7970
- textDelta: result.reasoning
7971
- });
7972
- } else {
7973
- for (const reasoning of result.reasoning) {
7974
- switch (reasoning.type) {
7975
- case "text": {
7976
- controller.enqueue({
7977
- type: "reasoning",
7978
- textDelta: reasoning.text
7979
- });
7980
- if (reasoning.signature != null) {
7981
- controller.enqueue({
7982
- type: "reasoning-signature",
7983
- signature: reasoning.signature
7984
- });
7985
- }
7986
- break;
7987
- }
7988
- case "redacted": {
7989
- controller.enqueue({
7990
- type: "redacted-reasoning",
7991
- data: reasoning.data
7992
- });
7993
- break;
7994
- }
7995
- }
7996
- }
7997
- }
7998
- }
7999
- if (result.text) {
8000
- controller.enqueue({
8001
- type: "text-delta",
8002
- textDelta: result.text
8003
- });
8004
- }
8005
- if (result.toolCalls) {
8006
- for (const toolCall of result.toolCalls) {
8007
- controller.enqueue({
8008
- type: "tool-call-delta",
8009
- toolCallType: "function",
8010
- toolCallId: toolCall.toolCallId,
8011
- toolName: toolCall.toolName,
8012
- argsTextDelta: toolCall.args
8013
- });
8014
- controller.enqueue({
8015
- type: "tool-call",
8016
- ...toolCall
8017
- });
8018
- }
8182
+ for (const part of result.content) {
8183
+ controller.enqueue(part);
8019
8184
  }
8020
8185
  controller.enqueue({
8021
8186
  type: "finish",
@@ -8030,8 +8195,7 @@ function simulateStreamingMiddleware() {
8030
8195
  return {
8031
8196
  stream: simulatedStream,
8032
8197
  request: result.request,
8033
- response: result.response,
8034
- warnings: result.warnings
8198
+ response: result.response
8035
8199
  };
8036
8200
  }
8037
8201
  };
@@ -8088,7 +8252,6 @@ var doWrap = ({
8088
8252
  }
8089
8253
  };
8090
8254
  };
8091
- var experimental_wrapLanguageModel = wrapLanguageModel;
8092
8255
 
8093
8256
  // core/prompt/append-client-message.ts
8094
8257
  function appendClientMessage({
@@ -8102,7 +8265,7 @@ function appendClientMessage({
8102
8265
  }
8103
8266
 
8104
8267
  // core/prompt/append-response-messages.ts
8105
- import { AISDKError as AISDKError20 } from "@ai-sdk/provider";
8268
+ import { AISDKError as AISDKError21 } from "@ai-sdk/provider";
8106
8269
  function appendResponseMessages({
8107
8270
  messages,
8108
8271
  responseMessages,
@@ -8185,7 +8348,7 @@ function appendResponseMessages({
8185
8348
  break;
8186
8349
  case "file":
8187
8350
  if (part.data instanceof URL) {
8188
- throw new AISDKError20({
8351
+ throw new AISDKError21({
8189
8352
  name: "InvalidAssistantFileData",
8190
8353
  message: "File data cannot be a URL"
8191
8354
  });
@@ -8319,7 +8482,7 @@ function customProvider({
8319
8482
  var experimental_customProvider = customProvider;
8320
8483
 
8321
8484
  // core/registry/no-such-provider-error.ts
8322
- import { AISDKError as AISDKError21, NoSuchModelError as NoSuchModelError3 } from "@ai-sdk/provider";
8485
+ import { AISDKError as AISDKError22, NoSuchModelError as NoSuchModelError3 } from "@ai-sdk/provider";
8323
8486
  var name16 = "AI_NoSuchProviderError";
8324
8487
  var marker16 = `vercel.ai.error.${name16}`;
8325
8488
  var symbol16 = Symbol.for(marker16);
@@ -8338,7 +8501,7 @@ var NoSuchProviderError = class extends NoSuchModelError3 {
8338
8501
  this.availableProviders = availableProviders;
8339
8502
  }
8340
8503
  static isInstance(error) {
8341
- return AISDKError21.hasMarker(error, marker16);
8504
+ return AISDKError22.hasMarker(error, marker16);
8342
8505
  }
8343
8506
  };
8344
8507
  _a16 = symbol16;
@@ -8389,7 +8552,7 @@ var DefaultProviderRegistry = class {
8389
8552
  message: `Invalid ${modelType} id for registry: ${id} (must be in the format "providerId${this.separator}modelId")`
8390
8553
  });
8391
8554
  }
8392
- return [id.slice(0, index), id.slice(index + 1)];
8555
+ return [id.slice(0, index), id.slice(index + this.separator.length)];
8393
8556
  }
8394
8557
  languageModel(id) {
8395
8558
  var _a17, _b;
@@ -8981,7 +9144,7 @@ var MCPClient = class {
8981
9144
  };
8982
9145
 
8983
9146
  // core/util/cosine-similarity.ts
8984
- function cosineSimilarity(vector1, vector2, options) {
9147
+ function cosineSimilarity(vector1, vector2) {
8985
9148
  if (vector1.length !== vector2.length) {
8986
9149
  throw new InvalidArgumentError({
8987
9150
  parameter: "vector1,vector2",
@@ -8991,13 +9154,6 @@ function cosineSimilarity(vector1, vector2, options) {
8991
9154
  }
8992
9155
  const n = vector1.length;
8993
9156
  if (n === 0) {
8994
- if (options == null ? void 0 : options.throwErrorForEmptyVectors) {
8995
- throw new InvalidArgumentError({
8996
- parameter: "vector1",
8997
- value: vector1,
8998
- message: "Vectors cannot be empty"
8999
- });
9000
- }
9001
9157
  return 0;
9002
9158
  }
9003
9159
  let magnitudeSquared1 = 0;
@@ -9323,8 +9479,8 @@ export {
9323
9479
  experimental_createProviderRegistry,
9324
9480
  experimental_customProvider,
9325
9481
  generateImage as experimental_generateImage,
9482
+ generateSpeech as experimental_generateSpeech,
9326
9483
  transcribe as experimental_transcribe,
9327
- experimental_wrapLanguageModel,
9328
9484
  extractMaxToolInvocationStep,
9329
9485
  extractReasoningMiddleware,
9330
9486
  fillMessageParts,