ai 5.0.0-canary.7 → 5.0.0-canary.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -85,8 +85,8 @@ __export(ai_exports, {
85
85
  experimental_createProviderRegistry: () => experimental_createProviderRegistry,
86
86
  experimental_customProvider: () => experimental_customProvider,
87
87
  experimental_generateImage: () => generateImage,
88
+ experimental_generateSpeech: () => generateSpeech,
88
89
  experimental_transcribe: () => transcribe,
89
- experimental_wrapLanguageModel: () => experimental_wrapLanguageModel,
90
90
  extractMaxToolInvocationStep: () => extractMaxToolInvocationStep,
91
91
  extractReasoningMiddleware: () => extractReasoningMiddleware,
92
92
  fillMessageParts: () => fillMessageParts,
@@ -2107,6 +2107,7 @@ function selectTelemetryAttributes({
2107
2107
  async function embed({
2108
2108
  model,
2109
2109
  value,
2110
+ providerOptions,
2110
2111
  maxRetries: maxRetriesArg,
2111
2112
  abortSignal,
2112
2113
  headers,
@@ -2132,7 +2133,7 @@ async function embed({
2132
2133
  }),
2133
2134
  tracer,
2134
2135
  fn: async (span) => {
2135
- const { embedding, usage, rawResponse } = await retry(
2136
+ const { embedding, usage, response } = await retry(
2136
2137
  () => (
2137
2138
  // nested spans to align with the embedMany telemetry data:
2138
2139
  recordSpan({
@@ -2155,7 +2156,8 @@ async function embed({
2155
2156
  const modelResponse = await model.doEmbed({
2156
2157
  values: [value],
2157
2158
  abortSignal,
2158
- headers
2159
+ headers,
2160
+ providerOptions
2159
2161
  });
2160
2162
  const embedding2 = modelResponse.embeddings[0];
2161
2163
  const usage2 = (_a17 = modelResponse.usage) != null ? _a17 : { tokens: NaN };
@@ -2175,7 +2177,7 @@ async function embed({
2175
2177
  return {
2176
2178
  embedding: embedding2,
2177
2179
  usage: usage2,
2178
- rawResponse: modelResponse.rawResponse
2180
+ response: modelResponse.response
2179
2181
  };
2180
2182
  }
2181
2183
  })
@@ -2190,7 +2192,12 @@ async function embed({
2190
2192
  }
2191
2193
  })
2192
2194
  );
2193
- return new DefaultEmbedResult({ value, embedding, usage, rawResponse });
2195
+ return new DefaultEmbedResult({
2196
+ value,
2197
+ embedding,
2198
+ usage,
2199
+ response
2200
+ });
2194
2201
  }
2195
2202
  });
2196
2203
  }
@@ -2199,7 +2206,7 @@ var DefaultEmbedResult = class {
2199
2206
  this.value = options.value;
2200
2207
  this.embedding = options.embedding;
2201
2208
  this.usage = options.usage;
2202
- this.rawResponse = options.rawResponse;
2209
+ this.response = options.response;
2203
2210
  }
2204
2211
  };
2205
2212
 
@@ -2222,6 +2229,7 @@ async function embedMany({
2222
2229
  maxRetries: maxRetriesArg,
2223
2230
  abortSignal,
2224
2231
  headers,
2232
+ providerOptions,
2225
2233
  experimental_telemetry: telemetry
2226
2234
  }) {
2227
2235
  const { maxRetries, retry } = prepareRetries({ maxRetries: maxRetriesArg });
@@ -2249,7 +2257,7 @@ async function embedMany({
2249
2257
  fn: async (span) => {
2250
2258
  const maxEmbeddingsPerCall = model.maxEmbeddingsPerCall;
2251
2259
  if (maxEmbeddingsPerCall == null) {
2252
- const { embeddings: embeddings2, usage } = await retry(() => {
2260
+ const { embeddings: embeddings2, usage, response } = await retry(() => {
2253
2261
  return recordSpan({
2254
2262
  name: "ai.embedMany.doEmbed",
2255
2263
  attributes: selectTelemetryAttributes({
@@ -2272,7 +2280,8 @@ async function embedMany({
2272
2280
  const modelResponse = await model.doEmbed({
2273
2281
  values,
2274
2282
  abortSignal,
2275
- headers
2283
+ headers,
2284
+ providerOptions
2276
2285
  });
2277
2286
  const embeddings3 = modelResponse.embeddings;
2278
2287
  const usage2 = (_a17 = modelResponse.usage) != null ? _a17 : { tokens: NaN };
@@ -2287,7 +2296,11 @@ async function embedMany({
2287
2296
  }
2288
2297
  })
2289
2298
  );
2290
- return { embeddings: embeddings3, usage: usage2 };
2299
+ return {
2300
+ embeddings: embeddings3,
2301
+ usage: usage2,
2302
+ response: modelResponse.response
2303
+ };
2291
2304
  }
2292
2305
  });
2293
2306
  });
@@ -2302,13 +2315,23 @@ async function embedMany({
2302
2315
  }
2303
2316
  })
2304
2317
  );
2305
- return new DefaultEmbedManyResult({ values, embeddings: embeddings2, usage });
2318
+ return new DefaultEmbedManyResult({
2319
+ values,
2320
+ embeddings: embeddings2,
2321
+ usage,
2322
+ responses: [response]
2323
+ });
2306
2324
  }
2307
2325
  const valueChunks = splitArray(values, maxEmbeddingsPerCall);
2308
2326
  const embeddings = [];
2327
+ const responses = [];
2309
2328
  let tokens = 0;
2310
2329
  for (const chunk of valueChunks) {
2311
- const { embeddings: responseEmbeddings, usage } = await retry(() => {
2330
+ const {
2331
+ embeddings: responseEmbeddings,
2332
+ usage,
2333
+ response
2334
+ } = await retry(() => {
2312
2335
  return recordSpan({
2313
2336
  name: "ai.embedMany.doEmbed",
2314
2337
  attributes: selectTelemetryAttributes({
@@ -2331,7 +2354,8 @@ async function embedMany({
2331
2354
  const modelResponse = await model.doEmbed({
2332
2355
  values: chunk,
2333
2356
  abortSignal,
2334
- headers
2357
+ headers,
2358
+ providerOptions
2335
2359
  });
2336
2360
  const embeddings2 = modelResponse.embeddings;
2337
2361
  const usage2 = (_a17 = modelResponse.usage) != null ? _a17 : { tokens: NaN };
@@ -2346,11 +2370,16 @@ async function embedMany({
2346
2370
  }
2347
2371
  })
2348
2372
  );
2349
- return { embeddings: embeddings2, usage: usage2 };
2373
+ return {
2374
+ embeddings: embeddings2,
2375
+ usage: usage2,
2376
+ response: modelResponse.response
2377
+ };
2350
2378
  }
2351
2379
  });
2352
2380
  });
2353
2381
  embeddings.push(...responseEmbeddings);
2382
+ responses.push(response);
2354
2383
  tokens += usage.tokens;
2355
2384
  }
2356
2385
  span.setAttributes(
@@ -2367,7 +2396,8 @@ async function embedMany({
2367
2396
  return new DefaultEmbedManyResult({
2368
2397
  values,
2369
2398
  embeddings,
2370
- usage: { tokens }
2399
+ usage: { tokens },
2400
+ responses
2371
2401
  });
2372
2402
  }
2373
2403
  });
@@ -2377,6 +2407,7 @@ var DefaultEmbedManyResult = class {
2377
2407
  this.values = options.values;
2378
2408
  this.embeddings = options.embeddings;
2379
2409
  this.usage = options.usage;
2410
+ this.responses = options.responses;
2380
2411
  }
2381
2412
  };
2382
2413
 
@@ -4162,6 +4193,17 @@ function validateObjectGenerationInput({
4162
4193
  }
4163
4194
  }
4164
4195
 
4196
+ // core/generate-text/extract-content-text.ts
4197
+ function extractContentText(content) {
4198
+ const parts = content.filter(
4199
+ (content2) => content2.type === "text"
4200
+ );
4201
+ if (parts.length === 0) {
4202
+ return void 0;
4203
+ }
4204
+ return parts.map((content2) => content2.text).join("");
4205
+ }
4206
+
4165
4207
  // core/generate-object/generate-object.ts
4166
4208
  var originalGenerateId = (0, import_provider_utils11.createIdGenerator)({ prefix: "aiobj", size: 24 });
4167
4209
  async function generateObject({
@@ -4319,7 +4361,8 @@ async function generateObject({
4319
4361
  headers: (_g = result2.response) == null ? void 0 : _g.headers,
4320
4362
  body: (_h = result2.response) == null ? void 0 : _h.body
4321
4363
  };
4322
- if (result2.text === void 0) {
4364
+ const text2 = extractContentText(result2.content);
4365
+ if (text2 === void 0) {
4323
4366
  throw new NoObjectGeneratedError({
4324
4367
  message: "No object generated: the model did not return a response.",
4325
4368
  response: responseData,
@@ -4332,7 +4375,7 @@ async function generateObject({
4332
4375
  telemetry,
4333
4376
  attributes: {
4334
4377
  "ai.response.finishReason": result2.finishReason,
4335
- "ai.response.object": { output: () => result2.text },
4378
+ "ai.response.object": { output: () => text2 },
4336
4379
  "ai.response.id": responseData.id,
4337
4380
  "ai.response.model": responseData.modelId,
4338
4381
  "ai.response.timestamp": responseData.timestamp.toISOString(),
@@ -4348,7 +4391,7 @@ async function generateObject({
4348
4391
  }
4349
4392
  })
4350
4393
  );
4351
- return { ...result2, objectText: result2.text, responseData };
4394
+ return { ...result2, objectText: text2, responseData };
4352
4395
  }
4353
4396
  })
4354
4397
  );
@@ -4405,7 +4448,7 @@ async function generateObject({
4405
4448
  }),
4406
4449
  tracer,
4407
4450
  fn: async (span2) => {
4408
- var _a18, _b2, _c2, _d2, _e, _f, _g, _h, _i, _j;
4451
+ var _a18, _b2, _c2, _d2, _e, _f, _g, _h;
4409
4452
  const result2 = await model.doGenerate({
4410
4453
  tools: [
4411
4454
  {
@@ -4423,13 +4466,16 @@ async function generateObject({
4423
4466
  abortSignal,
4424
4467
  headers
4425
4468
  });
4426
- const objectText = (_b2 = (_a18 = result2.toolCalls) == null ? void 0 : _a18[0]) == null ? void 0 : _b2.args;
4469
+ const firstToolCall = result2.content.find(
4470
+ (content) => content.type === "tool-call"
4471
+ );
4472
+ const objectText = firstToolCall == null ? void 0 : firstToolCall.args;
4427
4473
  const responseData = {
4428
- id: (_d2 = (_c2 = result2.response) == null ? void 0 : _c2.id) != null ? _d2 : generateId3(),
4429
- timestamp: (_f = (_e = result2.response) == null ? void 0 : _e.timestamp) != null ? _f : currentDate(),
4430
- modelId: (_h = (_g = result2.response) == null ? void 0 : _g.modelId) != null ? _h : model.modelId,
4431
- headers: (_i = result2.response) == null ? void 0 : _i.headers,
4432
- body: (_j = result2.response) == null ? void 0 : _j.body
4474
+ id: (_b2 = (_a18 = result2.response) == null ? void 0 : _a18.id) != null ? _b2 : generateId3(),
4475
+ timestamp: (_d2 = (_c2 = result2.response) == null ? void 0 : _c2.timestamp) != null ? _d2 : currentDate(),
4476
+ modelId: (_f = (_e = result2.response) == null ? void 0 : _e.modelId) != null ? _f : model.modelId,
4477
+ headers: (_g = result2.response) == null ? void 0 : _g.headers,
4478
+ body: (_h = result2.response) == null ? void 0 : _h.body
4433
4479
  };
4434
4480
  if (objectText === void 0) {
4435
4481
  throw new NoObjectGeneratedError({
@@ -4899,8 +4945,8 @@ var DefaultStreamObjectResult = class {
4899
4945
  transformer = {
4900
4946
  transform: (chunk, controller) => {
4901
4947
  switch (chunk.type) {
4902
- case "text-delta":
4903
- controller.enqueue(chunk.textDelta);
4948
+ case "text":
4949
+ controller.enqueue(chunk.text);
4904
4950
  break;
4905
4951
  case "response-metadata":
4906
4952
  case "finish":
@@ -4966,7 +5012,7 @@ var DefaultStreamObjectResult = class {
4966
5012
  }
4967
5013
  }
4968
5014
  const {
4969
- result: { stream, warnings, response, request },
5015
+ result: { stream, response, request },
4970
5016
  doStreamSpan,
4971
5017
  startTimestampMs
4972
5018
  } = await retry(
@@ -5008,6 +5054,7 @@ var DefaultStreamObjectResult = class {
5008
5054
  })
5009
5055
  );
5010
5056
  self.requestPromise.resolve(request != null ? request : {});
5057
+ let warnings;
5011
5058
  let usage;
5012
5059
  let finishReason;
5013
5060
  let providerMetadata;
@@ -5028,6 +5075,10 @@ var DefaultStreamObjectResult = class {
5028
5075
  new TransformStream({
5029
5076
  async transform(chunk, controller) {
5030
5077
  var _a18, _b2, _c;
5078
+ if (typeof chunk === "object" && chunk.type === "stream-start") {
5079
+ warnings = chunk.warnings;
5080
+ return;
5081
+ }
5031
5082
  if (isFirstChunk) {
5032
5083
  const msToFirstChunk = now2() - startTimestampMs;
5033
5084
  isFirstChunk = false;
@@ -5551,7 +5602,7 @@ async function doParseToolCall({
5551
5602
  };
5552
5603
  }
5553
5604
 
5554
- // core/generate-text/reasoning-detail.ts
5605
+ // core/generate-text/reasoning.ts
5555
5606
  function asReasoningText(reasoning) {
5556
5607
  const reasoningText = reasoning.filter((part) => part.type === "text").map((part) => part.text).join("");
5557
5608
  return reasoningText.length > 0 ? reasoningText : void 0;
@@ -5569,23 +5620,36 @@ function toResponseMessages({
5569
5620
  generateMessageId
5570
5621
  }) {
5571
5622
  const responseMessages = [];
5572
- responseMessages.push({
5573
- role: "assistant",
5574
- content: [
5623
+ const content = [];
5624
+ if (reasoning.length > 0) {
5625
+ content.push(
5575
5626
  ...reasoning.map(
5576
5627
  (part) => part.type === "text" ? { ...part, type: "reasoning" } : { ...part, type: "redacted-reasoning" }
5577
- ),
5578
- // TODO language model v2: switch to order response content (instead of type-based ordering)
5628
+ )
5629
+ );
5630
+ }
5631
+ if (files.length > 0) {
5632
+ content.push(
5579
5633
  ...files.map((file) => ({
5580
5634
  type: "file",
5581
5635
  data: file.base64,
5582
5636
  mediaType: file.mediaType
5583
- })),
5584
- { type: "text", text: text2 },
5585
- ...toolCalls
5586
- ],
5587
- id: messageId
5588
- });
5637
+ }))
5638
+ );
5639
+ }
5640
+ if (text2.length > 0) {
5641
+ content.push({ type: "text", text: text2 });
5642
+ }
5643
+ if (toolCalls.length > 0) {
5644
+ content.push(...toolCalls);
5645
+ }
5646
+ if (content.length > 0) {
5647
+ responseMessages.push({
5648
+ role: "assistant",
5649
+ content,
5650
+ id: messageId
5651
+ });
5652
+ }
5589
5653
  if (toolResults.length > 0) {
5590
5654
  responseMessages.push({
5591
5655
  role: "tool",
@@ -5689,7 +5753,7 @@ async function generateText({
5689
5753
  }),
5690
5754
  tracer,
5691
5755
  fn: async (span) => {
5692
- var _a18, _b, _c, _d, _e, _f, _g;
5756
+ var _a18, _b, _c, _d;
5693
5757
  const toolsAndToolChoice = {
5694
5758
  ...prepareToolsAndToolChoice({ tools, toolChoice, activeTools })
5695
5759
  };
@@ -5764,7 +5828,7 @@ async function generateText({
5764
5828
  }),
5765
5829
  tracer,
5766
5830
  fn: async (span2) => {
5767
- var _a19, _b2, _c2, _d2, _e2, _f2, _g2, _h;
5831
+ var _a19, _b2, _c2, _d2, _e, _f, _g, _h;
5768
5832
  const result = await model.doGenerate({
5769
5833
  ...callSettings,
5770
5834
  ...toolsAndToolChoice,
@@ -5778,8 +5842,8 @@ async function generateText({
5778
5842
  const responseData = {
5779
5843
  id: (_b2 = (_a19 = result.response) == null ? void 0 : _a19.id) != null ? _b2 : generateId3(),
5780
5844
  timestamp: (_d2 = (_c2 = result.response) == null ? void 0 : _c2.timestamp) != null ? _d2 : currentDate(),
5781
- modelId: (_f2 = (_e2 = result.response) == null ? void 0 : _e2.modelId) != null ? _f2 : model.modelId,
5782
- headers: (_g2 = result.response) == null ? void 0 : _g2.headers,
5845
+ modelId: (_f = (_e = result.response) == null ? void 0 : _e.modelId) != null ? _f : model.modelId,
5846
+ headers: (_g = result.response) == null ? void 0 : _g.headers,
5783
5847
  body: (_h = result.response) == null ? void 0 : _h.body
5784
5848
  };
5785
5849
  span2.setAttributes(
@@ -5788,10 +5852,13 @@ async function generateText({
5788
5852
  attributes: {
5789
5853
  "ai.response.finishReason": result.finishReason,
5790
5854
  "ai.response.text": {
5791
- output: () => result.text
5855
+ output: () => extractContentText(result.content)
5792
5856
  },
5793
5857
  "ai.response.toolCalls": {
5794
- output: () => JSON.stringify(result.toolCalls)
5858
+ output: () => {
5859
+ const toolCalls = asToolCalls(result.content);
5860
+ return toolCalls == null ? void 0 : JSON.stringify(toolCalls);
5861
+ }
5795
5862
  },
5796
5863
  "ai.response.id": responseData.id,
5797
5864
  "ai.response.model": responseData.modelId,
@@ -5813,7 +5880,9 @@ async function generateText({
5813
5880
  })
5814
5881
  );
5815
5882
  currentToolCalls = await Promise.all(
5816
- ((_b = currentModelResponse.toolCalls) != null ? _b : []).map(
5883
+ currentModelResponse.content.filter(
5884
+ (part) => part.type === "tool-call"
5885
+ ).map(
5817
5886
  (toolCall) => parseToolCall({
5818
5887
  toolCall,
5819
5888
  tools,
@@ -5848,15 +5917,19 @@ async function generateText({
5848
5917
  nextStepType = "tool-result";
5849
5918
  }
5850
5919
  }
5851
- const originalText = (_c = currentModelResponse.text) != null ? _c : "";
5920
+ const originalText = (_b = extractContentText(currentModelResponse.content)) != null ? _b : "";
5852
5921
  const stepTextLeadingWhitespaceTrimmed = stepType === "continue" && // only for continue steps
5853
5922
  text2.trimEnd() !== text2 ? originalText.trimStart() : originalText;
5854
5923
  const stepText = nextStepType === "continue" ? removeTextAfterLastWhitespace(stepTextLeadingWhitespaceTrimmed) : stepTextLeadingWhitespaceTrimmed;
5855
5924
  text2 = nextStepType === "continue" || stepType === "continue" ? text2 + stepText : stepText;
5856
5925
  currentReasoningDetails = asReasoningDetails(
5857
- currentModelResponse.reasoning
5926
+ currentModelResponse.content
5927
+ );
5928
+ sources.push(
5929
+ ...currentModelResponse.content.filter(
5930
+ (part) => part.type === "source"
5931
+ )
5858
5932
  );
5859
- sources.push(...(_d = currentModelResponse.sources) != null ? _d : []);
5860
5933
  if (stepType === "continue") {
5861
5934
  const lastMessage = responseMessages[responseMessages.length - 1];
5862
5935
  if (typeof lastMessage.content === "string") {
@@ -5871,8 +5944,8 @@ async function generateText({
5871
5944
  responseMessages.push(
5872
5945
  ...toResponseMessages({
5873
5946
  text: text2,
5874
- files: asFiles(currentModelResponse.files),
5875
- reasoning: asReasoningDetails(currentModelResponse.reasoning),
5947
+ files: asFiles(currentModelResponse.content),
5948
+ reasoning: asReasoningDetails(currentModelResponse.content),
5876
5949
  tools: tools != null ? tools : {},
5877
5950
  toolCalls: currentToolCalls,
5878
5951
  toolResults: currentToolResults,
@@ -5884,18 +5957,19 @@ async function generateText({
5884
5957
  const currentStepResult = {
5885
5958
  stepType,
5886
5959
  text: stepText,
5887
- // TODO v5: rename reasoning to reasoningText (and use reasoning for composite array)
5888
- reasoning: asReasoningText(currentReasoningDetails),
5889
- reasoningDetails: currentReasoningDetails,
5890
- files: asFiles(currentModelResponse.files),
5891
- sources: (_e = currentModelResponse.sources) != null ? _e : [],
5960
+ reasoningText: asReasoningText(currentReasoningDetails),
5961
+ reasoning: currentReasoningDetails,
5962
+ files: asFiles(currentModelResponse.content),
5963
+ sources: currentModelResponse.content.filter(
5964
+ (part) => part.type === "source"
5965
+ ),
5892
5966
  toolCalls: currentToolCalls,
5893
5967
  toolResults: currentToolResults,
5894
5968
  finishReason: currentModelResponse.finishReason,
5895
5969
  usage: currentUsage,
5896
5970
  warnings: currentModelResponse.warnings,
5897
5971
  logprobs: currentModelResponse.logprobs,
5898
- request: (_f = currentModelResponse.request) != null ? _f : {},
5972
+ request: (_c = currentModelResponse.request) != null ? _c : {},
5899
5973
  response: {
5900
5974
  ...currentModelResponse.response,
5901
5975
  // deep clone msgs to avoid mutating past messages in multi-step:
@@ -5914,10 +5988,13 @@ async function generateText({
5914
5988
  attributes: {
5915
5989
  "ai.response.finishReason": currentModelResponse.finishReason,
5916
5990
  "ai.response.text": {
5917
- output: () => currentModelResponse.text
5991
+ output: () => extractContentText(currentModelResponse.content)
5918
5992
  },
5919
5993
  "ai.response.toolCalls": {
5920
- output: () => JSON.stringify(currentModelResponse.toolCalls)
5994
+ output: () => {
5995
+ const toolCalls = asToolCalls(currentModelResponse.content);
5996
+ return toolCalls == null ? void 0 : JSON.stringify(toolCalls);
5997
+ }
5921
5998
  },
5922
5999
  // TODO rename telemetry attributes to inputTokens and outputTokens
5923
6000
  "ai.usage.promptTokens": currentModelResponse.usage.inputTokens,
@@ -5927,7 +6004,7 @@ async function generateText({
5927
6004
  );
5928
6005
  return new DefaultGenerateTextResult({
5929
6006
  text: text2,
5930
- files: asFiles(currentModelResponse.files),
6007
+ files: asFiles(currentModelResponse.content),
5931
6008
  reasoning: asReasoningText(currentReasoningDetails),
5932
6009
  reasoningDetails: currentReasoningDetails,
5933
6010
  sources,
@@ -5949,7 +6026,7 @@ async function generateText({
5949
6026
  finishReason: currentModelResponse.finishReason,
5950
6027
  usage,
5951
6028
  warnings: currentModelResponse.warnings,
5952
- request: (_g = currentModelResponse.request) != null ? _g : {},
6029
+ request: (_d = currentModelResponse.request) != null ? _d : {},
5953
6030
  response: {
5954
6031
  ...currentModelResponse.response,
5955
6032
  messages: responseMessages
@@ -6040,8 +6117,8 @@ var DefaultGenerateTextResult = class {
6040
6117
  constructor(options) {
6041
6118
  this.text = options.text;
6042
6119
  this.files = options.files;
6043
- this.reasoning = options.reasoning;
6044
- this.reasoningDetails = options.reasoningDetails;
6120
+ this.reasoningText = options.reasoning;
6121
+ this.reasoning = options.reasoningDetails;
6045
6122
  this.toolCalls = options.toolCalls;
6046
6123
  this.toolResults = options.toolResults;
6047
6124
  this.finishReason = options.finishReason;
@@ -6059,18 +6136,50 @@ var DefaultGenerateTextResult = class {
6059
6136
  return this.outputResolver();
6060
6137
  }
6061
6138
  };
6062
- function asReasoningDetails(reasoning) {
6063
- if (reasoning == null) {
6139
+ function asReasoningDetails(content) {
6140
+ const reasoning = content.filter((part) => part.type === "reasoning");
6141
+ if (reasoning.length === 0) {
6064
6142
  return [];
6065
6143
  }
6066
- if (typeof reasoning === "string") {
6067
- return [{ type: "text", text: reasoning }];
6144
+ const result = [];
6145
+ let activeReasoningText;
6146
+ for (const part of reasoning) {
6147
+ if (part.reasoningType === "text") {
6148
+ if (activeReasoningText == null) {
6149
+ activeReasoningText = { type: "text", text: part.text };
6150
+ result.push(activeReasoningText);
6151
+ } else {
6152
+ activeReasoningText.text += part.text;
6153
+ }
6154
+ } else if (part.reasoningType === "signature") {
6155
+ if (activeReasoningText == null) {
6156
+ activeReasoningText = { type: "text", text: "" };
6157
+ result.push(activeReasoningText);
6158
+ }
6159
+ activeReasoningText.signature = part.signature;
6160
+ activeReasoningText = void 0;
6161
+ } else if (part.reasoningType === "redacted") {
6162
+ result.push({ type: "redacted", data: part.data });
6163
+ }
6068
6164
  }
6069
- return reasoning;
6165
+ return result;
6070
6166
  }
6071
- function asFiles(files) {
6072
- var _a17;
6073
- return (_a17 = files == null ? void 0 : files.map((file) => new DefaultGeneratedFile(file))) != null ? _a17 : [];
6167
+ function asFiles(content) {
6168
+ return content.filter((part) => part.type === "file").map((part) => new DefaultGeneratedFile(part));
6169
+ }
6170
+ function asToolCalls(content) {
6171
+ const parts = content.filter(
6172
+ (part) => part.type === "tool-call"
6173
+ );
6174
+ if (parts.length === 0) {
6175
+ return void 0;
6176
+ }
6177
+ return parts.map((toolCall) => ({
6178
+ toolCallType: toolCall.toolCallType,
6179
+ toolCallId: toolCall.toolCallId,
6180
+ toolName: toolCall.toolName,
6181
+ args: toolCall.args
6182
+ }));
6074
6183
  }
6075
6184
 
6076
6185
  // core/generate-text/output.ts
@@ -6254,18 +6363,18 @@ function smoothStream({
6254
6363
  let buffer = "";
6255
6364
  return new TransformStream({
6256
6365
  async transform(chunk, controller) {
6257
- if (chunk.type !== "text-delta") {
6366
+ if (chunk.type !== "text") {
6258
6367
  if (buffer.length > 0) {
6259
- controller.enqueue({ type: "text-delta", textDelta: buffer });
6368
+ controller.enqueue({ type: "text", text: buffer });
6260
6369
  buffer = "";
6261
6370
  }
6262
6371
  controller.enqueue(chunk);
6263
6372
  return;
6264
6373
  }
6265
- buffer += chunk.textDelta;
6374
+ buffer += chunk.text;
6266
6375
  let match;
6267
6376
  while ((match = detectChunk(buffer)) != null) {
6268
- controller.enqueue({ type: "text-delta", textDelta: match });
6377
+ controller.enqueue({ type: "text", text: match });
6269
6378
  buffer = buffer.slice(match.length);
6270
6379
  await delay2(delayInMs);
6271
6380
  }
@@ -6424,10 +6533,9 @@ function runToolsTransformation({
6424
6533
  async transform(chunk, controller) {
6425
6534
  const chunkType = chunk.type;
6426
6535
  switch (chunkType) {
6427
- case "text-delta":
6536
+ case "stream-start":
6537
+ case "text":
6428
6538
  case "reasoning":
6429
- case "reasoning-signature":
6430
- case "redacted-reasoning":
6431
6539
  case "source":
6432
6540
  case "response-metadata":
6433
6541
  case "error": {
@@ -6435,12 +6543,13 @@ function runToolsTransformation({
6435
6543
  break;
6436
6544
  }
6437
6545
  case "file": {
6438
- controller.enqueue(
6439
- new DefaultGeneratedFileWithType({
6546
+ controller.enqueue({
6547
+ type: "file",
6548
+ file: new DefaultGeneratedFileWithType({
6440
6549
  data: chunk.data,
6441
6550
  mediaType: chunk.mediaType
6442
6551
  })
6443
- );
6552
+ });
6444
6553
  break;
6445
6554
  }
6446
6555
  case "tool-call-delta": {
@@ -6679,7 +6788,7 @@ function createOutputTransformStream(output) {
6679
6788
  partialOutput = void 0
6680
6789
  }) {
6681
6790
  controller.enqueue({
6682
- part: { type: "text-delta", textDelta: textChunk },
6791
+ part: { type: "text", text: textChunk },
6683
6792
  partialOutput
6684
6793
  });
6685
6794
  textChunk = "";
@@ -6689,12 +6798,12 @@ function createOutputTransformStream(output) {
6689
6798
  if (chunk.type === "step-finish") {
6690
6799
  publishTextChunk({ controller });
6691
6800
  }
6692
- if (chunk.type !== "text-delta") {
6801
+ if (chunk.type !== "text") {
6693
6802
  controller.enqueue({ part: chunk, partialOutput: void 0 });
6694
6803
  return;
6695
6804
  }
6696
- text2 += chunk.textDelta;
6697
- textChunk += chunk.textDelta;
6805
+ text2 += chunk.text;
6806
+ textChunk += chunk.text;
6698
6807
  const result = output.parsePartial({ text: text2 });
6699
6808
  if (result != null) {
6700
6809
  const currentJson = JSON.stringify(result.partial);
@@ -6789,44 +6898,44 @@ var DefaultStreamTextResult = class {
6789
6898
  async transform(chunk, controller) {
6790
6899
  controller.enqueue(chunk);
6791
6900
  const { part } = chunk;
6792
- if (part.type === "text-delta" || part.type === "reasoning" || part.type === "source" || part.type === "tool-call" || part.type === "tool-result" || part.type === "tool-call-streaming-start" || part.type === "tool-call-delta") {
6901
+ if (part.type === "text" || part.type === "reasoning" || part.type === "source" || part.type === "tool-call" || part.type === "tool-result" || part.type === "tool-call-streaming-start" || part.type === "tool-call-delta") {
6793
6902
  await (onChunk == null ? void 0 : onChunk({ chunk: part }));
6794
6903
  }
6795
6904
  if (part.type === "error") {
6796
6905
  await (onError == null ? void 0 : onError({ error: part.error }));
6797
6906
  }
6798
- if (part.type === "text-delta") {
6799
- recordedStepText += part.textDelta;
6800
- recordedContinuationText += part.textDelta;
6801
- recordedFullText += part.textDelta;
6907
+ if (part.type === "text") {
6908
+ recordedStepText += part.text;
6909
+ recordedContinuationText += part.text;
6910
+ recordedFullText += part.text;
6802
6911
  }
6803
6912
  if (part.type === "reasoning") {
6804
- if (activeReasoningText == null) {
6805
- activeReasoningText = { type: "text", text: part.textDelta };
6806
- stepReasoning.push(activeReasoningText);
6807
- } else {
6808
- activeReasoningText.text += part.textDelta;
6809
- }
6810
- }
6811
- if (part.type === "reasoning-signature") {
6812
- if (activeReasoningText == null) {
6813
- throw new import_provider23.AISDKError({
6814
- name: "InvalidStreamPart",
6815
- message: "reasoning-signature without reasoning"
6816
- });
6913
+ if (part.reasoningType === "text") {
6914
+ if (activeReasoningText == null) {
6915
+ activeReasoningText = { type: "text", text: part.text };
6916
+ stepReasoning.push(activeReasoningText);
6917
+ } else {
6918
+ activeReasoningText.text += part.text;
6919
+ }
6920
+ } else if (part.reasoningType === "signature") {
6921
+ if (activeReasoningText == null) {
6922
+ throw new import_provider23.AISDKError({
6923
+ name: "InvalidStreamPart",
6924
+ message: "reasoning-signature without reasoning"
6925
+ });
6926
+ }
6927
+ activeReasoningText.signature = part.signature;
6928
+ activeReasoningText = void 0;
6929
+ } else if (part.reasoningType === "redacted") {
6930
+ stepReasoning.push({ type: "redacted", data: part.data });
6817
6931
  }
6818
- activeReasoningText.signature = part.signature;
6819
- activeReasoningText = void 0;
6820
- }
6821
- if (part.type === "redacted-reasoning") {
6822
- stepReasoning.push({ type: "redacted", data: part.data });
6823
6932
  }
6824
6933
  if (part.type === "file") {
6825
- stepFiles.push(part);
6934
+ stepFiles.push(part.file);
6826
6935
  }
6827
6936
  if (part.type === "source") {
6828
- recordedSources.push(part.source);
6829
- recordedStepSources.push(part.source);
6937
+ recordedSources.push(part);
6938
+ recordedStepSources.push(part);
6830
6939
  }
6831
6940
  if (part.type === "tool-call") {
6832
6941
  recordedToolCalls.push(part);
@@ -6862,8 +6971,8 @@ var DefaultStreamTextResult = class {
6862
6971
  const currentStepResult = {
6863
6972
  stepType,
6864
6973
  text: recordedStepText,
6865
- reasoning: asReasoningText(stepReasoning),
6866
- reasoningDetails: stepReasoning,
6974
+ reasoningText: asReasoningText(stepReasoning),
6975
+ reasoning: stepReasoning,
6867
6976
  files: stepFiles,
6868
6977
  sources: recordedStepSources,
6869
6978
  toolCalls: recordedToolCalls,
@@ -6919,8 +7028,8 @@ var DefaultStreamTextResult = class {
6919
7028
  self.toolCallsPromise.resolve(lastStep.toolCalls);
6920
7029
  self.toolResultsPromise.resolve(lastStep.toolResults);
6921
7030
  self.providerMetadataPromise.resolve(lastStep.providerMetadata);
6922
- self.reasoningPromise.resolve(lastStep.reasoning);
6923
- self.reasoningDetailsPromise.resolve(lastStep.reasoningDetails);
7031
+ self.reasoningPromise.resolve(lastStep.reasoningText);
7032
+ self.reasoningDetailsPromise.resolve(lastStep.reasoning);
6924
7033
  const finishReason = recordedFinishReason != null ? recordedFinishReason : "unknown";
6925
7034
  const usage = recordedUsage != null ? recordedUsage : {
6926
7035
  completionTokens: NaN,
@@ -6938,8 +7047,8 @@ var DefaultStreamTextResult = class {
6938
7047
  logprobs: void 0,
6939
7048
  usage,
6940
7049
  text: recordedFullText,
7050
+ reasoningText: lastStep.reasoningText,
6941
7051
  reasoning: lastStep.reasoning,
6942
- reasoningDetails: lastStep.reasoningDetails,
6943
7052
  files: lastStep.files,
6944
7053
  sources: lastStep.sources,
6945
7054
  toolCalls: lastStep.toolCalls,
@@ -7055,7 +7164,7 @@ var DefaultStreamTextResult = class {
7055
7164
  ...prepareToolsAndToolChoice({ tools, toolChoice, activeTools })
7056
7165
  };
7057
7166
  const {
7058
- result: { stream: stream2, warnings, response, request },
7167
+ result: { stream: stream2, response, request },
7059
7168
  doStreamSpan,
7060
7169
  startTimestampMs
7061
7170
  } = await retry(
@@ -7132,6 +7241,7 @@ var DefaultStreamTextResult = class {
7132
7241
  const stepRequest = request != null ? request : {};
7133
7242
  const stepToolCalls = [];
7134
7243
  const stepToolResults = [];
7244
+ let warnings;
7135
7245
  const stepReasoning2 = [];
7136
7246
  const stepFiles2 = [];
7137
7247
  let activeReasoningText2 = void 0;
@@ -7160,16 +7270,20 @@ var DefaultStreamTextResult = class {
7160
7270
  chunk
7161
7271
  }) {
7162
7272
  controller.enqueue(chunk);
7163
- stepText += chunk.textDelta;
7164
- fullStepText += chunk.textDelta;
7273
+ stepText += chunk.text;
7274
+ fullStepText += chunk.text;
7165
7275
  chunkTextPublished = true;
7166
- hasWhitespaceSuffix = chunk.textDelta.trimEnd() !== chunk.textDelta;
7276
+ hasWhitespaceSuffix = chunk.text.trimEnd() !== chunk.text;
7167
7277
  }
7168
7278
  self.addStream(
7169
7279
  transformedStream.pipeThrough(
7170
7280
  new TransformStream({
7171
7281
  async transform(chunk, controller) {
7172
7282
  var _a19, _b, _c;
7283
+ if (chunk.type === "stream-start") {
7284
+ warnings = chunk.warnings;
7285
+ return;
7286
+ }
7173
7287
  if (stepFirstChunk) {
7174
7288
  const msToFirstChunk = now2() - startTimestampMs;
7175
7289
  stepFirstChunk = false;
@@ -7186,14 +7300,14 @@ var DefaultStreamTextResult = class {
7186
7300
  warnings: warnings != null ? warnings : []
7187
7301
  });
7188
7302
  }
7189
- if (chunk.type === "text-delta" && chunk.textDelta.length === 0) {
7303
+ if (chunk.type === "text" && chunk.text.length === 0) {
7190
7304
  return;
7191
7305
  }
7192
7306
  const chunkType = chunk.type;
7193
7307
  switch (chunkType) {
7194
- case "text-delta": {
7308
+ case "text": {
7195
7309
  if (continueSteps) {
7196
- const trimmedChunkText = inWhitespacePrefix && hasLeadingWhitespace ? chunk.textDelta.trimStart() : chunk.textDelta;
7310
+ const trimmedChunkText = inWhitespacePrefix && hasLeadingWhitespace ? chunk.text.trimStart() : chunk.text;
7197
7311
  if (trimmedChunkText.length === 0) {
7198
7312
  break;
7199
7313
  }
@@ -7205,8 +7319,8 @@ var DefaultStreamTextResult = class {
7205
7319
  await publishTextChunk({
7206
7320
  controller,
7207
7321
  chunk: {
7208
- type: "text-delta",
7209
- textDelta: split.prefix + split.whitespace
7322
+ type: "text",
7323
+ text: split.prefix + split.whitespace
7210
7324
  }
7211
7325
  });
7212
7326
  }
@@ -7217,35 +7331,31 @@ var DefaultStreamTextResult = class {
7217
7331
  }
7218
7332
  case "reasoning": {
7219
7333
  controller.enqueue(chunk);
7220
- if (activeReasoningText2 == null) {
7221
- activeReasoningText2 = {
7222
- type: "text",
7223
- text: chunk.textDelta
7224
- };
7225
- stepReasoning2.push(activeReasoningText2);
7226
- } else {
7227
- activeReasoningText2.text += chunk.textDelta;
7228
- }
7229
- break;
7230
- }
7231
- case "reasoning-signature": {
7232
- controller.enqueue(chunk);
7233
- if (activeReasoningText2 == null) {
7234
- throw new InvalidStreamPartError({
7235
- chunk,
7236
- message: "reasoning-signature without reasoning"
7334
+ if (chunk.reasoningType === "text") {
7335
+ if (activeReasoningText2 == null) {
7336
+ activeReasoningText2 = {
7337
+ type: "text",
7338
+ text: chunk.text
7339
+ };
7340
+ stepReasoning2.push(activeReasoningText2);
7341
+ } else {
7342
+ activeReasoningText2.text += chunk.text;
7343
+ }
7344
+ } else if (chunk.reasoningType === "signature") {
7345
+ if (activeReasoningText2 == null) {
7346
+ throw new InvalidStreamPartError({
7347
+ chunk,
7348
+ message: "reasoning-signature without reasoning"
7349
+ });
7350
+ }
7351
+ activeReasoningText2.signature = chunk.signature;
7352
+ activeReasoningText2 = void 0;
7353
+ } else if (chunk.reasoningType === "redacted") {
7354
+ stepReasoning2.push({
7355
+ type: "redacted",
7356
+ data: chunk.data
7237
7357
  });
7238
7358
  }
7239
- activeReasoningText2.signature = chunk.signature;
7240
- activeReasoningText2 = void 0;
7241
- break;
7242
- }
7243
- case "redacted-reasoning": {
7244
- controller.enqueue(chunk);
7245
- stepReasoning2.push({
7246
- type: "redacted",
7247
- data: chunk.data
7248
- });
7249
7359
  break;
7250
7360
  }
7251
7361
  case "tool-call": {
@@ -7280,7 +7390,7 @@ var DefaultStreamTextResult = class {
7280
7390
  break;
7281
7391
  }
7282
7392
  case "file": {
7283
- stepFiles2.push(chunk);
7393
+ stepFiles2.push(chunk.file);
7284
7394
  controller.enqueue(chunk);
7285
7395
  break;
7286
7396
  }
@@ -7321,10 +7431,7 @@ var DefaultStreamTextResult = class {
7321
7431
  stepType2 === "continue" && !chunkTextPublished)) {
7322
7432
  await publishTextChunk({
7323
7433
  controller,
7324
- chunk: {
7325
- type: "text-delta",
7326
- textDelta: chunkBuffer
7327
- }
7434
+ chunk: { type: "text", text: chunkBuffer }
7328
7435
  });
7329
7436
  chunkBuffer = "";
7330
7437
  }
@@ -7469,10 +7576,10 @@ var DefaultStreamTextResult = class {
7469
7576
  get text() {
7470
7577
  return this.textPromise.value;
7471
7578
  }
7472
- get reasoning() {
7579
+ get reasoningText() {
7473
7580
  return this.reasoningPromise.value;
7474
7581
  }
7475
- get reasoningDetails() {
7582
+ get reasoning() {
7476
7583
  return this.reasoningDetailsPromise.value;
7477
7584
  }
7478
7585
  get sources() {
@@ -7514,8 +7621,8 @@ var DefaultStreamTextResult = class {
7514
7621
  this.teeStream().pipeThrough(
7515
7622
  new TransformStream({
7516
7623
  transform({ part }, controller) {
7517
- if (part.type === "text-delta") {
7518
- controller.enqueue(part.textDelta);
7624
+ if (part.type === "text") {
7625
+ controller.enqueue(part.text);
7519
7626
  }
7520
7627
  }
7521
7628
  })
@@ -7573,52 +7680,45 @@ var DefaultStreamTextResult = class {
7573
7680
  transform: async (chunk, controller) => {
7574
7681
  const chunkType = chunk.type;
7575
7682
  switch (chunkType) {
7576
- case "text-delta": {
7577
- controller.enqueue(formatDataStreamPart("text", chunk.textDelta));
7683
+ case "text": {
7684
+ controller.enqueue(formatDataStreamPart("text", chunk.text));
7578
7685
  break;
7579
7686
  }
7580
7687
  case "reasoning": {
7581
7688
  if (sendReasoning) {
7582
- controller.enqueue(
7583
- formatDataStreamPart("reasoning", chunk.textDelta)
7584
- );
7585
- }
7586
- break;
7587
- }
7588
- case "redacted-reasoning": {
7589
- if (sendReasoning) {
7590
- controller.enqueue(
7591
- formatDataStreamPart("redacted_reasoning", {
7592
- data: chunk.data
7593
- })
7594
- );
7595
- }
7596
- break;
7597
- }
7598
- case "reasoning-signature": {
7599
- if (sendReasoning) {
7600
- controller.enqueue(
7601
- formatDataStreamPart("reasoning_signature", {
7602
- signature: chunk.signature
7603
- })
7604
- );
7689
+ if (chunk.reasoningType === "text") {
7690
+ controller.enqueue(
7691
+ formatDataStreamPart("reasoning", chunk.text)
7692
+ );
7693
+ } else if (chunk.reasoningType === "signature") {
7694
+ controller.enqueue(
7695
+ formatDataStreamPart("reasoning_signature", {
7696
+ signature: chunk.signature
7697
+ })
7698
+ );
7699
+ } else if (chunk.reasoningType === "redacted") {
7700
+ controller.enqueue(
7701
+ formatDataStreamPart("redacted_reasoning", {
7702
+ data: chunk.data
7703
+ })
7704
+ );
7705
+ }
7605
7706
  }
7606
7707
  break;
7607
7708
  }
7608
7709
  case "file": {
7609
7710
  controller.enqueue(
7711
+ // TODO update protocol to v2 or replace with event stream
7610
7712
  formatDataStreamPart("file", {
7611
- mimeType: chunk.mediaType,
7612
- data: chunk.base64
7713
+ mimeType: chunk.file.mediaType,
7714
+ data: chunk.file.base64
7613
7715
  })
7614
7716
  );
7615
7717
  break;
7616
7718
  }
7617
7719
  case "source": {
7618
7720
  if (sendSources) {
7619
- controller.enqueue(
7620
- formatDataStreamPart("source", chunk.source)
7621
- );
7721
+ controller.enqueue(formatDataStreamPart("source", chunk));
7622
7722
  }
7623
7723
  break;
7624
7724
  }
@@ -7812,9 +7912,99 @@ var DefaultStreamTextResult = class {
7812
7912
  }
7813
7913
  };
7814
7914
 
7815
- // errors/no-transcript-generated-error.ts
7915
+ // errors/no-speech-generated-error.ts
7816
7916
  var import_provider24 = require("@ai-sdk/provider");
7817
- var NoTranscriptGeneratedError = class extends import_provider24.AISDKError {
7917
+ var NoSpeechGeneratedError = class extends import_provider24.AISDKError {
7918
+ constructor(options) {
7919
+ super({
7920
+ name: "AI_NoSpeechGeneratedError",
7921
+ message: "No speech audio generated."
7922
+ });
7923
+ this.responses = options.responses;
7924
+ }
7925
+ };
7926
+
7927
+ // core/generate-speech/generated-audio-file.ts
7928
+ var DefaultGeneratedAudioFile = class extends DefaultGeneratedFile {
7929
+ constructor({
7930
+ data,
7931
+ mediaType
7932
+ }) {
7933
+ super({ data, mediaType });
7934
+ let format = "mp3";
7935
+ if (mediaType) {
7936
+ const mimeTypeParts = mediaType.split("/");
7937
+ if (mimeTypeParts.length === 2) {
7938
+ if (mediaType !== "audio/mpeg") {
7939
+ format = mimeTypeParts[1];
7940
+ }
7941
+ }
7942
+ }
7943
+ if (!format) {
7944
+ throw new Error(
7945
+ "Audio format must be provided or determinable from mimeType"
7946
+ );
7947
+ }
7948
+ this.format = format;
7949
+ }
7950
+ };
7951
+
7952
+ // core/generate-speech/generate-speech.ts
7953
+ async function generateSpeech({
7954
+ model,
7955
+ text: text2,
7956
+ voice,
7957
+ outputFormat,
7958
+ instructions,
7959
+ speed,
7960
+ providerOptions = {},
7961
+ maxRetries: maxRetriesArg,
7962
+ abortSignal,
7963
+ headers
7964
+ }) {
7965
+ var _a17;
7966
+ const { retry } = prepareRetries({ maxRetries: maxRetriesArg });
7967
+ const result = await retry(
7968
+ () => model.doGenerate({
7969
+ text: text2,
7970
+ voice,
7971
+ outputFormat,
7972
+ instructions,
7973
+ speed,
7974
+ abortSignal,
7975
+ headers,
7976
+ providerOptions
7977
+ })
7978
+ );
7979
+ if (!result.audio || result.audio.length === 0) {
7980
+ throw new NoSpeechGeneratedError({ responses: [result.response] });
7981
+ }
7982
+ return new DefaultSpeechResult({
7983
+ audio: new DefaultGeneratedAudioFile({
7984
+ data: result.audio,
7985
+ mediaType: (_a17 = detectMediaType({
7986
+ data: result.audio,
7987
+ signatures: audioMediaTypeSignatures
7988
+ })) != null ? _a17 : "audio/mp3"
7989
+ }),
7990
+ warnings: result.warnings,
7991
+ responses: [result.response],
7992
+ providerMetadata: result.providerMetadata
7993
+ });
7994
+ }
7995
+ var DefaultSpeechResult = class {
7996
+ constructor(options) {
7997
+ var _a17;
7998
+ this.audio = options.audio;
7999
+ this.warnings = options.warnings;
8000
+ this.responses = options.responses;
8001
+ this.providerMetadata = (_a17 = options.providerMetadata) != null ? _a17 : {};
8002
+ }
8003
+ };
8004
+
8005
+ // errors/no-transcript-generated-error.ts
8006
+ var import_provider25 = require("@ai-sdk/provider");
8007
+ var NoTranscriptGeneratedError = class extends import_provider25.AISDKError {
7818
8008
  constructor(options) {
7819
8009
  super({
7820
8010
  name: "AI_NoTranscriptGeneratedError",
@@ -7961,27 +8151,41 @@ function extractReasoningMiddleware({
7961
8151
  return {
7962
8152
  middlewareVersion: "v2",
7963
8153
  wrapGenerate: async ({ doGenerate }) => {
7964
- const { text: rawText, ...rest } = await doGenerate();
7965
- if (rawText == null) {
7966
- return { text: rawText, ...rest };
7967
- }
7968
- const text2 = startWithReasoning ? openingTag + rawText : rawText;
7969
- const regexp = new RegExp(`${openingTag}(.*?)${closingTag}`, "gs");
7970
- const matches = Array.from(text2.matchAll(regexp));
7971
- if (!matches.length) {
7972
- return { text: text2, ...rest };
7973
- }
7974
- const reasoning = matches.map((match) => match[1]).join(separator);
7975
- let textWithoutReasoning = text2;
7976
- for (let i = matches.length - 1; i >= 0; i--) {
7977
- const match = matches[i];
7978
- const beforeMatch = textWithoutReasoning.slice(0, match.index);
7979
- const afterMatch = textWithoutReasoning.slice(
7980
- match.index + match[0].length
7981
- );
7982
- textWithoutReasoning = beforeMatch + (beforeMatch.length > 0 && afterMatch.length > 0 ? separator : "") + afterMatch;
8154
+ const { content, ...rest } = await doGenerate();
8155
+ const transformedContent = [];
8156
+ for (const part of content) {
8157
+ if (part.type !== "text") {
8158
+ transformedContent.push(part);
8159
+ continue;
8160
+ }
8161
+ const text2 = startWithReasoning ? openingTag + part.text : part.text;
8162
+ const regexp = new RegExp(`${openingTag}(.*?)${closingTag}`, "gs");
8163
+ const matches = Array.from(text2.matchAll(regexp));
8164
+ if (!matches.length) {
8165
+ transformedContent.push(part);
8166
+ continue;
8167
+ }
8168
+ const reasoningText = matches.map((match) => match[1]).join(separator);
8169
+ let textWithoutReasoning = text2;
8170
+ for (let i = matches.length - 1; i >= 0; i--) {
8171
+ const match = matches[i];
8172
+ const beforeMatch = textWithoutReasoning.slice(0, match.index);
8173
+ const afterMatch = textWithoutReasoning.slice(
8174
+ match.index + match[0].length
8175
+ );
8176
+ textWithoutReasoning = beforeMatch + (beforeMatch.length > 0 && afterMatch.length > 0 ? separator : "") + afterMatch;
8177
+ }
8178
+ transformedContent.push({
8179
+ type: "reasoning",
8180
+ reasoningType: "text",
8181
+ text: reasoningText
8182
+ });
8183
+ transformedContent.push({
8184
+ type: "text",
8185
+ text: textWithoutReasoning
8186
+ });
7983
8187
  }
7984
- return { ...rest, text: textWithoutReasoning, reasoning };
8188
+ return { content: transformedContent, ...rest };
7985
8189
  },
7986
8190
  wrapStream: async ({ doStream }) => {
7987
8191
  const { stream, ...rest } = await doStream();
@@ -7994,18 +8198,24 @@ function extractReasoningMiddleware({
7994
8198
  stream: stream.pipeThrough(
7995
8199
  new TransformStream({
7996
8200
  transform: (chunk, controller) => {
7997
- if (chunk.type !== "text-delta") {
8201
+ if (chunk.type !== "text") {
7998
8202
  controller.enqueue(chunk);
7999
8203
  return;
8000
8204
  }
8001
- buffer += chunk.textDelta;
8205
+ buffer += chunk.text;
8002
8206
  function publish(text2) {
8003
8207
  if (text2.length > 0) {
8004
8208
  const prefix = afterSwitch && (isReasoning ? !isFirstReasoning : !isFirstText) ? separator : "";
8005
- controller.enqueue({
8006
- type: isReasoning ? "reasoning" : "text-delta",
8007
- textDelta: prefix + text2
8008
- });
8209
+ controller.enqueue(
8210
+ isReasoning ? {
8211
+ type: "reasoning",
8212
+ reasoningType: "text",
8213
+ text: prefix + text2
8214
+ } : {
8215
+ type: "text",
8216
+ text: prefix + text2
8217
+ }
8218
+ );
8009
8219
  afterSwitch = false;
8010
8220
  if (isReasoning) {
8011
8221
  isFirstReasoning = false;
@@ -8050,60 +8260,13 @@ function simulateStreamingMiddleware() {
8050
8260
  const result = await doGenerate();
8051
8261
  const simulatedStream = new ReadableStream({
8052
8262
  start(controller) {
8263
+ controller.enqueue({
8264
+ type: "stream-start",
8265
+ warnings: result.warnings
8266
+ });
8053
8267
  controller.enqueue({ type: "response-metadata", ...result.response });
8054
- if (result.reasoning) {
8055
- if (typeof result.reasoning === "string") {
8056
- controller.enqueue({
8057
- type: "reasoning",
8058
- textDelta: result.reasoning
8059
- });
8060
- } else {
8061
- for (const reasoning of result.reasoning) {
8062
- switch (reasoning.type) {
8063
- case "text": {
8064
- controller.enqueue({
8065
- type: "reasoning",
8066
- textDelta: reasoning.text
8067
- });
8068
- if (reasoning.signature != null) {
8069
- controller.enqueue({
8070
- type: "reasoning-signature",
8071
- signature: reasoning.signature
8072
- });
8073
- }
8074
- break;
8075
- }
8076
- case "redacted": {
8077
- controller.enqueue({
8078
- type: "redacted-reasoning",
8079
- data: reasoning.data
8080
- });
8081
- break;
8082
- }
8083
- }
8084
- }
8085
- }
8086
- }
8087
- if (result.text) {
8088
- controller.enqueue({
8089
- type: "text-delta",
8090
- textDelta: result.text
8091
- });
8092
- }
8093
- if (result.toolCalls) {
8094
- for (const toolCall of result.toolCalls) {
8095
- controller.enqueue({
8096
- type: "tool-call-delta",
8097
- toolCallType: "function",
8098
- toolCallId: toolCall.toolCallId,
8099
- toolName: toolCall.toolName,
8100
- argsTextDelta: toolCall.args
8101
- });
8102
- controller.enqueue({
8103
- type: "tool-call",
8104
- ...toolCall
8105
- });
8106
- }
8268
+ for (const part of result.content) {
8269
+ controller.enqueue(part);
8107
8270
  }
8108
8271
  controller.enqueue({
8109
8272
  type: "finish",
@@ -8118,8 +8281,7 @@ function simulateStreamingMiddleware() {
8118
8281
  return {
8119
8282
  stream: simulatedStream,
8120
8283
  request: result.request,
8121
- response: result.response,
8122
- warnings: result.warnings
8284
+ response: result.response
8123
8285
  };
8124
8286
  }
8125
8287
  };
@@ -8176,7 +8338,6 @@ var doWrap = ({
8176
8338
  }
8177
8339
  };
8178
8340
  };
8179
- var experimental_wrapLanguageModel = wrapLanguageModel;
8180
8341
 
8181
8342
  // core/prompt/append-client-message.ts
8182
8343
  function appendClientMessage({
@@ -8190,7 +8351,7 @@ function appendClientMessage({
8190
8351
  }
8191
8352
 
8192
8353
  // core/prompt/append-response-messages.ts
8193
- var import_provider25 = require("@ai-sdk/provider");
8354
+ var import_provider26 = require("@ai-sdk/provider");
8194
8355
  function appendResponseMessages({
8195
8356
  messages,
8196
8357
  responseMessages,
@@ -8273,7 +8434,7 @@ function appendResponseMessages({
8273
8434
  break;
8274
8435
  case "file":
8275
8436
  if (part.data instanceof URL) {
8276
- throw new import_provider25.AISDKError({
8437
+ throw new import_provider26.AISDKError({
8277
8438
  name: "InvalidAssistantFileData",
8278
8439
  message: "File data cannot be a URL"
8279
8440
  });
@@ -8367,7 +8528,7 @@ function appendResponseMessages({
8367
8528
  }
8368
8529
 
8369
8530
  // core/registry/custom-provider.ts
8370
- var import_provider26 = require("@ai-sdk/provider");
8531
+ var import_provider27 = require("@ai-sdk/provider");
8371
8532
  function customProvider({
8372
8533
  languageModels,
8373
8534
  textEmbeddingModels,
@@ -8382,7 +8543,7 @@ function customProvider({
8382
8543
  if (fallbackProvider) {
8383
8544
  return fallbackProvider.languageModel(modelId);
8384
8545
  }
8385
- throw new import_provider26.NoSuchModelError({ modelId, modelType: "languageModel" });
8546
+ throw new import_provider27.NoSuchModelError({ modelId, modelType: "languageModel" });
8386
8547
  },
8387
8548
  textEmbeddingModel(modelId) {
8388
8549
  if (textEmbeddingModels != null && modelId in textEmbeddingModels) {
@@ -8391,7 +8552,7 @@ function customProvider({
8391
8552
  if (fallbackProvider) {
8392
8553
  return fallbackProvider.textEmbeddingModel(modelId);
8393
8554
  }
8394
- throw new import_provider26.NoSuchModelError({ modelId, modelType: "textEmbeddingModel" });
8555
+ throw new import_provider27.NoSuchModelError({ modelId, modelType: "textEmbeddingModel" });
8395
8556
  },
8396
8557
  imageModel(modelId) {
8397
8558
  if (imageModels != null && modelId in imageModels) {
@@ -8400,19 +8561,19 @@ function customProvider({
8400
8561
  if (fallbackProvider == null ? void 0 : fallbackProvider.imageModel) {
8401
8562
  return fallbackProvider.imageModel(modelId);
8402
8563
  }
8403
- throw new import_provider26.NoSuchModelError({ modelId, modelType: "imageModel" });
8564
+ throw new import_provider27.NoSuchModelError({ modelId, modelType: "imageModel" });
8404
8565
  }
8405
8566
  };
8406
8567
  }
8407
8568
  var experimental_customProvider = customProvider;
8408
8569
 
8409
8570
  // core/registry/no-such-provider-error.ts
8410
- var import_provider27 = require("@ai-sdk/provider");
8571
+ var import_provider28 = require("@ai-sdk/provider");
8411
8572
  var name16 = "AI_NoSuchProviderError";
8412
8573
  var marker16 = `vercel.ai.error.${name16}`;
8413
8574
  var symbol16 = Symbol.for(marker16);
8414
8575
  var _a16;
8415
- var NoSuchProviderError = class extends import_provider27.NoSuchModelError {
8576
+ var NoSuchProviderError = class extends import_provider28.NoSuchModelError {
8416
8577
  constructor({
8417
8578
  modelId,
8418
8579
  modelType,
@@ -8426,13 +8587,13 @@ var NoSuchProviderError = class extends import_provider27.NoSuchModelError {
8426
8587
  this.availableProviders = availableProviders;
8427
8588
  }
8428
8589
  static isInstance(error) {
8429
- return import_provider27.AISDKError.hasMarker(error, marker16);
8590
+ return import_provider28.AISDKError.hasMarker(error, marker16);
8430
8591
  }
8431
8592
  };
8432
8593
  _a16 = symbol16;
8433
8594
 
8434
8595
  // core/registry/provider-registry.ts
8435
- var import_provider28 = require("@ai-sdk/provider");
8596
+ var import_provider29 = require("@ai-sdk/provider");
8436
8597
  function createProviderRegistry(providers, {
8437
8598
  separator = ":"
8438
8599
  } = {}) {
@@ -8471,20 +8632,20 @@ var DefaultProviderRegistry = class {
8471
8632
  splitId(id, modelType) {
8472
8633
  const index = id.indexOf(this.separator);
8473
8634
  if (index === -1) {
8474
- throw new import_provider28.NoSuchModelError({
8635
+ throw new import_provider29.NoSuchModelError({
8475
8636
  modelId: id,
8476
8637
  modelType,
8477
8638
  message: `Invalid ${modelType} id for registry: ${id} (must be in the format "providerId${this.separator}modelId")`
8478
8639
  });
8479
8640
  }
8480
- return [id.slice(0, index), id.slice(index + 1)];
8641
+ return [id.slice(0, index), id.slice(index + this.separator.length)];
8481
8642
  }
8482
8643
  languageModel(id) {
8483
8644
  var _a17, _b;
8484
8645
  const [providerId, modelId] = this.splitId(id, "languageModel");
8485
8646
  const model = (_b = (_a17 = this.getProvider(providerId)).languageModel) == null ? void 0 : _b.call(_a17, modelId);
8486
8647
  if (model == null) {
8487
- throw new import_provider28.NoSuchModelError({ modelId: id, modelType: "languageModel" });
8648
+ throw new import_provider29.NoSuchModelError({ modelId: id, modelType: "languageModel" });
8488
8649
  }
8489
8650
  return model;
8490
8651
  }
@@ -8494,7 +8655,7 @@ var DefaultProviderRegistry = class {
8494
8655
  const provider = this.getProvider(providerId);
8495
8656
  const model = (_a17 = provider.textEmbeddingModel) == null ? void 0 : _a17.call(provider, modelId);
8496
8657
  if (model == null) {
8497
- throw new import_provider28.NoSuchModelError({
8658
+ throw new import_provider29.NoSuchModelError({
8498
8659
  modelId: id,
8499
8660
  modelType: "textEmbeddingModel"
8500
8661
  });
@@ -8507,7 +8668,7 @@ var DefaultProviderRegistry = class {
8507
8668
  const provider = this.getProvider(providerId);
8508
8669
  const model = (_a17 = provider.imageModel) == null ? void 0 : _a17.call(provider, modelId);
8509
8670
  if (model == null) {
8510
- throw new import_provider28.NoSuchModelError({ modelId: id, modelType: "imageModel" });
8671
+ throw new import_provider29.NoSuchModelError({ modelId: id, modelType: "imageModel" });
8511
8672
  }
8512
8673
  return model;
8513
8674
  }
@@ -9069,7 +9230,7 @@ var MCPClient = class {
9069
9230
  };
9070
9231
 
9071
9232
  // core/util/cosine-similarity.ts
9072
- function cosineSimilarity(vector1, vector2, options) {
9233
+ function cosineSimilarity(vector1, vector2) {
9073
9234
  if (vector1.length !== vector2.length) {
9074
9235
  throw new InvalidArgumentError({
9075
9236
  parameter: "vector1,vector2",
@@ -9079,13 +9240,6 @@ function cosineSimilarity(vector1, vector2, options) {
9079
9240
  }
9080
9241
  const n = vector1.length;
9081
9242
  if (n === 0) {
9082
- if (options == null ? void 0 : options.throwErrorForEmptyVectors) {
9083
- throw new InvalidArgumentError({
9084
- parameter: "vector1",
9085
- value: vector1,
9086
- message: "Vectors cannot be empty"
9087
- });
9088
- }
9089
9243
  return 0;
9090
9244
  }
9091
9245
  let magnitudeSquared1 = 0;
@@ -9412,8 +9566,8 @@ var StreamData = class {
9412
9566
  experimental_createProviderRegistry,
9413
9567
  experimental_customProvider,
9414
9568
  experimental_generateImage,
9569
+ experimental_generateSpeech,
9415
9570
  experimental_transcribe,
9416
- experimental_wrapLanguageModel,
9417
9571
  extractMaxToolInvocationStep,
9418
9572
  extractReasoningMiddleware,
9419
9573
  fillMessageParts,