ai 5.0.0-canary.7 → 5.0.0-canary.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -1993,6 +1993,7 @@ function selectTelemetryAttributes({
1993
1993
  async function embed({
1994
1994
  model,
1995
1995
  value,
1996
+ providerOptions,
1996
1997
  maxRetries: maxRetriesArg,
1997
1998
  abortSignal,
1998
1999
  headers,
@@ -2018,7 +2019,7 @@ async function embed({
2018
2019
  }),
2019
2020
  tracer,
2020
2021
  fn: async (span) => {
2021
- const { embedding, usage, rawResponse } = await retry(
2022
+ const { embedding, usage, response } = await retry(
2022
2023
  () => (
2023
2024
  // nested spans to align with the embedMany telemetry data:
2024
2025
  recordSpan({
@@ -2041,7 +2042,8 @@ async function embed({
2041
2042
  const modelResponse = await model.doEmbed({
2042
2043
  values: [value],
2043
2044
  abortSignal,
2044
- headers
2045
+ headers,
2046
+ providerOptions
2045
2047
  });
2046
2048
  const embedding2 = modelResponse.embeddings[0];
2047
2049
  const usage2 = (_a17 = modelResponse.usage) != null ? _a17 : { tokens: NaN };
@@ -2061,7 +2063,7 @@ async function embed({
2061
2063
  return {
2062
2064
  embedding: embedding2,
2063
2065
  usage: usage2,
2064
- rawResponse: modelResponse.rawResponse
2066
+ response: modelResponse.response
2065
2067
  };
2066
2068
  }
2067
2069
  })
@@ -2076,7 +2078,12 @@ async function embed({
2076
2078
  }
2077
2079
  })
2078
2080
  );
2079
- return new DefaultEmbedResult({ value, embedding, usage, rawResponse });
2081
+ return new DefaultEmbedResult({
2082
+ value,
2083
+ embedding,
2084
+ usage,
2085
+ response
2086
+ });
2080
2087
  }
2081
2088
  });
2082
2089
  }
@@ -2085,7 +2092,7 @@ var DefaultEmbedResult = class {
2085
2092
  this.value = options.value;
2086
2093
  this.embedding = options.embedding;
2087
2094
  this.usage = options.usage;
2088
- this.rawResponse = options.rawResponse;
2095
+ this.response = options.response;
2089
2096
  }
2090
2097
  };
2091
2098
 
@@ -2108,6 +2115,7 @@ async function embedMany({
2108
2115
  maxRetries: maxRetriesArg,
2109
2116
  abortSignal,
2110
2117
  headers,
2118
+ providerOptions,
2111
2119
  experimental_telemetry: telemetry
2112
2120
  }) {
2113
2121
  const { maxRetries, retry } = prepareRetries({ maxRetries: maxRetriesArg });
@@ -2135,7 +2143,7 @@ async function embedMany({
2135
2143
  fn: async (span) => {
2136
2144
  const maxEmbeddingsPerCall = model.maxEmbeddingsPerCall;
2137
2145
  if (maxEmbeddingsPerCall == null) {
2138
- const { embeddings: embeddings2, usage } = await retry(() => {
2146
+ const { embeddings: embeddings2, usage, response } = await retry(() => {
2139
2147
  return recordSpan({
2140
2148
  name: "ai.embedMany.doEmbed",
2141
2149
  attributes: selectTelemetryAttributes({
@@ -2158,7 +2166,8 @@ async function embedMany({
2158
2166
  const modelResponse = await model.doEmbed({
2159
2167
  values,
2160
2168
  abortSignal,
2161
- headers
2169
+ headers,
2170
+ providerOptions
2162
2171
  });
2163
2172
  const embeddings3 = modelResponse.embeddings;
2164
2173
  const usage2 = (_a17 = modelResponse.usage) != null ? _a17 : { tokens: NaN };
@@ -2173,7 +2182,11 @@ async function embedMany({
2173
2182
  }
2174
2183
  })
2175
2184
  );
2176
- return { embeddings: embeddings3, usage: usage2 };
2185
+ return {
2186
+ embeddings: embeddings3,
2187
+ usage: usage2,
2188
+ response: modelResponse.response
2189
+ };
2177
2190
  }
2178
2191
  });
2179
2192
  });
@@ -2188,13 +2201,23 @@ async function embedMany({
2188
2201
  }
2189
2202
  })
2190
2203
  );
2191
- return new DefaultEmbedManyResult({ values, embeddings: embeddings2, usage });
2204
+ return new DefaultEmbedManyResult({
2205
+ values,
2206
+ embeddings: embeddings2,
2207
+ usage,
2208
+ responses: [response]
2209
+ });
2192
2210
  }
2193
2211
  const valueChunks = splitArray(values, maxEmbeddingsPerCall);
2194
2212
  const embeddings = [];
2213
+ const responses = [];
2195
2214
  let tokens = 0;
2196
2215
  for (const chunk of valueChunks) {
2197
- const { embeddings: responseEmbeddings, usage } = await retry(() => {
2216
+ const {
2217
+ embeddings: responseEmbeddings,
2218
+ usage,
2219
+ response
2220
+ } = await retry(() => {
2198
2221
  return recordSpan({
2199
2222
  name: "ai.embedMany.doEmbed",
2200
2223
  attributes: selectTelemetryAttributes({
@@ -2217,7 +2240,8 @@ async function embedMany({
2217
2240
  const modelResponse = await model.doEmbed({
2218
2241
  values: chunk,
2219
2242
  abortSignal,
2220
- headers
2243
+ headers,
2244
+ providerOptions
2221
2245
  });
2222
2246
  const embeddings2 = modelResponse.embeddings;
2223
2247
  const usage2 = (_a17 = modelResponse.usage) != null ? _a17 : { tokens: NaN };
@@ -2232,11 +2256,16 @@ async function embedMany({
2232
2256
  }
2233
2257
  })
2234
2258
  );
2235
- return { embeddings: embeddings2, usage: usage2 };
2259
+ return {
2260
+ embeddings: embeddings2,
2261
+ usage: usage2,
2262
+ response: modelResponse.response
2263
+ };
2236
2264
  }
2237
2265
  });
2238
2266
  });
2239
2267
  embeddings.push(...responseEmbeddings);
2268
+ responses.push(response);
2240
2269
  tokens += usage.tokens;
2241
2270
  }
2242
2271
  span.setAttributes(
@@ -2253,7 +2282,8 @@ async function embedMany({
2253
2282
  return new DefaultEmbedManyResult({
2254
2283
  values,
2255
2284
  embeddings,
2256
- usage: { tokens }
2285
+ usage: { tokens },
2286
+ responses
2257
2287
  });
2258
2288
  }
2259
2289
  });
@@ -2263,6 +2293,7 @@ var DefaultEmbedManyResult = class {
2263
2293
  this.values = options.values;
2264
2294
  this.embeddings = options.embeddings;
2265
2295
  this.usage = options.usage;
2296
+ this.responses = options.responses;
2266
2297
  }
2267
2298
  };
2268
2299
 
@@ -4135,7 +4166,7 @@ async function generateObject({
4135
4166
  }),
4136
4167
  tracer,
4137
4168
  fn: async (span) => {
4138
- var _a17, _b, _c, _d;
4169
+ var _a17, _b, _c, _d, _e;
4139
4170
  if (mode === "auto" || mode == null) {
4140
4171
  mode = model.defaultObjectGenerationMode;
4141
4172
  }
@@ -4197,7 +4228,7 @@ async function generateObject({
4197
4228
  }),
4198
4229
  tracer,
4199
4230
  fn: async (span2) => {
4200
- var _a18, _b2, _c2, _d2, _e, _f, _g, _h;
4231
+ var _a18, _b2, _c2, _d2, _e2, _f, _g, _h;
4201
4232
  const result2 = await model.doGenerate({
4202
4233
  responseFormat: {
4203
4234
  type: "json",
@@ -4215,7 +4246,7 @@ async function generateObject({
4215
4246
  const responseData = {
4216
4247
  id: (_b2 = (_a18 = result2.response) == null ? void 0 : _a18.id) != null ? _b2 : generateId3(),
4217
4248
  timestamp: (_d2 = (_c2 = result2.response) == null ? void 0 : _c2.timestamp) != null ? _d2 : currentDate(),
4218
- modelId: (_f = (_e = result2.response) == null ? void 0 : _e.modelId) != null ? _f : model.modelId,
4249
+ modelId: (_f = (_e2 = result2.response) == null ? void 0 : _e2.modelId) != null ? _f : model.modelId,
4219
4250
  headers: (_g = result2.response) == null ? void 0 : _g.headers,
4220
4251
  body: (_h = result2.response) == null ? void 0 : _h.body
4221
4252
  };
@@ -4232,7 +4263,10 @@ async function generateObject({
4232
4263
  telemetry,
4233
4264
  attributes: {
4234
4265
  "ai.response.finishReason": result2.finishReason,
4235
- "ai.response.object": { output: () => result2.text },
4266
+ "ai.response.object": { output: () => {
4267
+ var _a19;
4268
+ return (_a19 = result2.text) == null ? void 0 : _a19.text;
4269
+ } },
4236
4270
  "ai.response.id": responseData.id,
4237
4271
  "ai.response.model": responseData.modelId,
4238
4272
  "ai.response.timestamp": responseData.timestamp.toISOString(),
@@ -4252,13 +4286,13 @@ async function generateObject({
4252
4286
  }
4253
4287
  })
4254
4288
  );
4255
- result = generateResult.objectText;
4289
+ result = (_b = generateResult.objectText) == null ? void 0 : _b.text;
4256
4290
  finishReason = generateResult.finishReason;
4257
4291
  usage = generateResult.usage;
4258
4292
  warnings = generateResult.warnings;
4259
4293
  logprobs = generateResult.logprobs;
4260
4294
  resultProviderMetadata = generateResult.providerMetadata;
4261
- request = (_b = generateResult.request) != null ? _b : {};
4295
+ request = (_c = generateResult.request) != null ? _c : {};
4262
4296
  response = generateResult.responseData;
4263
4297
  break;
4264
4298
  }
@@ -4270,7 +4304,7 @@ async function generateObject({
4270
4304
  const promptMessages = await convertToLanguageModelPrompt({
4271
4305
  prompt: standardizedPrompt,
4272
4306
  modelSupportsImageUrls: model.supportsImageUrls,
4273
- modelSupportsUrl: (_c = model.supportsUrl) == null ? void 0 : _c.bind(model)
4307
+ modelSupportsUrl: (_d = model.supportsUrl) == null ? void 0 : _d.bind(model)
4274
4308
  // support 'this' context,
4275
4309
  });
4276
4310
  const inputFormat = standardizedPrompt.type;
@@ -4305,7 +4339,7 @@ async function generateObject({
4305
4339
  }),
4306
4340
  tracer,
4307
4341
  fn: async (span2) => {
4308
- var _a18, _b2, _c2, _d2, _e, _f, _g, _h, _i, _j;
4342
+ var _a18, _b2, _c2, _d2, _e2, _f, _g, _h, _i, _j;
4309
4343
  const result2 = await model.doGenerate({
4310
4344
  tools: [
4311
4345
  {
@@ -4326,7 +4360,7 @@ async function generateObject({
4326
4360
  const objectText = (_b2 = (_a18 = result2.toolCalls) == null ? void 0 : _a18[0]) == null ? void 0 : _b2.args;
4327
4361
  const responseData = {
4328
4362
  id: (_d2 = (_c2 = result2.response) == null ? void 0 : _c2.id) != null ? _d2 : generateId3(),
4329
- timestamp: (_f = (_e = result2.response) == null ? void 0 : _e.timestamp) != null ? _f : currentDate(),
4363
+ timestamp: (_f = (_e2 = result2.response) == null ? void 0 : _e2.timestamp) != null ? _f : currentDate(),
4330
4364
  modelId: (_h = (_g = result2.response) == null ? void 0 : _g.modelId) != null ? _h : model.modelId,
4331
4365
  headers: (_i = result2.response) == null ? void 0 : _i.headers,
4332
4366
  body: (_j = result2.response) == null ? void 0 : _j.body
@@ -4370,7 +4404,7 @@ async function generateObject({
4370
4404
  warnings = generateResult.warnings;
4371
4405
  logprobs = generateResult.logprobs;
4372
4406
  resultProviderMetadata = generateResult.providerMetadata;
4373
- request = (_d = generateResult.request) != null ? _d : {};
4407
+ request = (_e = generateResult.request) != null ? _e : {};
4374
4408
  response = generateResult.responseData;
4375
4409
  break;
4376
4410
  }
@@ -4799,8 +4833,8 @@ var DefaultStreamObjectResult = class {
4799
4833
  transformer = {
4800
4834
  transform: (chunk, controller) => {
4801
4835
  switch (chunk.type) {
4802
- case "text-delta":
4803
- controller.enqueue(chunk.textDelta);
4836
+ case "text":
4837
+ controller.enqueue(chunk.text);
4804
4838
  break;
4805
4839
  case "response-metadata":
4806
4840
  case "finish":
@@ -5469,23 +5503,36 @@ function toResponseMessages({
5469
5503
  generateMessageId
5470
5504
  }) {
5471
5505
  const responseMessages = [];
5472
- responseMessages.push({
5473
- role: "assistant",
5474
- content: [
5506
+ const content = [];
5507
+ if (reasoning.length > 0) {
5508
+ content.push(
5475
5509
  ...reasoning.map(
5476
5510
  (part) => part.type === "text" ? { ...part, type: "reasoning" } : { ...part, type: "redacted-reasoning" }
5477
- ),
5478
- // TODO language model v2: switch to order response content (instead of type-based ordering)
5511
+ )
5512
+ );
5513
+ }
5514
+ if (files.length > 0) {
5515
+ content.push(
5479
5516
  ...files.map((file) => ({
5480
5517
  type: "file",
5481
5518
  data: file.base64,
5482
5519
  mediaType: file.mediaType
5483
- })),
5484
- { type: "text", text: text2 },
5485
- ...toolCalls
5486
- ],
5487
- id: messageId
5488
- });
5520
+ }))
5521
+ );
5522
+ }
5523
+ if (text2.length > 0) {
5524
+ content.push({ type: "text", text: text2 });
5525
+ }
5526
+ if (toolCalls.length > 0) {
5527
+ content.push(...toolCalls);
5528
+ }
5529
+ if (content.length > 0) {
5530
+ responseMessages.push({
5531
+ role: "assistant",
5532
+ content,
5533
+ id: messageId
5534
+ });
5535
+ }
5489
5536
  if (toolResults.length > 0) {
5490
5537
  responseMessages.push({
5491
5538
  role: "tool",
@@ -5589,7 +5636,7 @@ async function generateText({
5589
5636
  }),
5590
5637
  tracer,
5591
5638
  fn: async (span) => {
5592
- var _a18, _b, _c, _d, _e, _f, _g;
5639
+ var _a18, _b, _c, _d, _e, _f, _g, _h;
5593
5640
  const toolsAndToolChoice = {
5594
5641
  ...prepareToolsAndToolChoice({ tools, toolChoice, activeTools })
5595
5642
  };
@@ -5664,7 +5711,7 @@ async function generateText({
5664
5711
  }),
5665
5712
  tracer,
5666
5713
  fn: async (span2) => {
5667
- var _a19, _b2, _c2, _d2, _e2, _f2, _g2, _h;
5714
+ var _a19, _b2, _c2, _d2, _e2, _f2, _g2, _h2;
5668
5715
  const result = await model.doGenerate({
5669
5716
  ...callSettings,
5670
5717
  ...toolsAndToolChoice,
@@ -5680,7 +5727,7 @@ async function generateText({
5680
5727
  timestamp: (_d2 = (_c2 = result.response) == null ? void 0 : _c2.timestamp) != null ? _d2 : currentDate(),
5681
5728
  modelId: (_f2 = (_e2 = result.response) == null ? void 0 : _e2.modelId) != null ? _f2 : model.modelId,
5682
5729
  headers: (_g2 = result.response) == null ? void 0 : _g2.headers,
5683
- body: (_h = result.response) == null ? void 0 : _h.body
5730
+ body: (_h2 = result.response) == null ? void 0 : _h2.body
5684
5731
  };
5685
5732
  span2.setAttributes(
5686
5733
  selectTelemetryAttributes({
@@ -5688,10 +5735,23 @@ async function generateText({
5688
5735
  attributes: {
5689
5736
  "ai.response.finishReason": result.finishReason,
5690
5737
  "ai.response.text": {
5691
- output: () => result.text
5738
+ output: () => {
5739
+ var _a20;
5740
+ return (_a20 = result.text) == null ? void 0 : _a20.text;
5741
+ }
5692
5742
  },
5693
5743
  "ai.response.toolCalls": {
5694
- output: () => JSON.stringify(result.toolCalls)
5744
+ output: () => {
5745
+ var _a20;
5746
+ return JSON.stringify(
5747
+ (_a20 = result.toolCalls) == null ? void 0 : _a20.map((toolCall) => ({
5748
+ toolCallType: toolCall.toolCallType,
5749
+ toolCallId: toolCall.toolCallId,
5750
+ toolName: toolCall.toolName,
5751
+ args: toolCall.args
5752
+ }))
5753
+ );
5754
+ }
5695
5755
  },
5696
5756
  "ai.response.id": responseData.id,
5697
5757
  "ai.response.model": responseData.modelId,
@@ -5748,7 +5808,7 @@ async function generateText({
5748
5808
  nextStepType = "tool-result";
5749
5809
  }
5750
5810
  }
5751
- const originalText = (_c = currentModelResponse.text) != null ? _c : "";
5811
+ const originalText = (_d = (_c = currentModelResponse.text) == null ? void 0 : _c.text) != null ? _d : "";
5752
5812
  const stepTextLeadingWhitespaceTrimmed = stepType === "continue" && // only for continue steps
5753
5813
  text2.trimEnd() !== text2 ? originalText.trimStart() : originalText;
5754
5814
  const stepText = nextStepType === "continue" ? removeTextAfterLastWhitespace(stepTextLeadingWhitespaceTrimmed) : stepTextLeadingWhitespaceTrimmed;
@@ -5756,7 +5816,7 @@ async function generateText({
5756
5816
  currentReasoningDetails = asReasoningDetails(
5757
5817
  currentModelResponse.reasoning
5758
5818
  );
5759
- sources.push(...(_d = currentModelResponse.sources) != null ? _d : []);
5819
+ sources.push(...(_e = currentModelResponse.sources) != null ? _e : []);
5760
5820
  if (stepType === "continue") {
5761
5821
  const lastMessage = responseMessages[responseMessages.length - 1];
5762
5822
  if (typeof lastMessage.content === "string") {
@@ -5788,14 +5848,14 @@ async function generateText({
5788
5848
  reasoning: asReasoningText(currentReasoningDetails),
5789
5849
  reasoningDetails: currentReasoningDetails,
5790
5850
  files: asFiles(currentModelResponse.files),
5791
- sources: (_e = currentModelResponse.sources) != null ? _e : [],
5851
+ sources: (_f = currentModelResponse.sources) != null ? _f : [],
5792
5852
  toolCalls: currentToolCalls,
5793
5853
  toolResults: currentToolResults,
5794
5854
  finishReason: currentModelResponse.finishReason,
5795
5855
  usage: currentUsage,
5796
5856
  warnings: currentModelResponse.warnings,
5797
5857
  logprobs: currentModelResponse.logprobs,
5798
- request: (_f = currentModelResponse.request) != null ? _f : {},
5858
+ request: (_g = currentModelResponse.request) != null ? _g : {},
5799
5859
  response: {
5800
5860
  ...currentModelResponse.response,
5801
5861
  // deep clone msgs to avoid mutating past messages in multi-step:
@@ -5814,10 +5874,23 @@ async function generateText({
5814
5874
  attributes: {
5815
5875
  "ai.response.finishReason": currentModelResponse.finishReason,
5816
5876
  "ai.response.text": {
5817
- output: () => currentModelResponse.text
5877
+ output: () => {
5878
+ var _a19;
5879
+ return (_a19 = currentModelResponse.text) == null ? void 0 : _a19.text;
5880
+ }
5818
5881
  },
5819
5882
  "ai.response.toolCalls": {
5820
- output: () => JSON.stringify(currentModelResponse.toolCalls)
5883
+ output: () => {
5884
+ var _a19;
5885
+ return JSON.stringify(
5886
+ (_a19 = currentModelResponse.toolCalls) == null ? void 0 : _a19.map((toolCall) => ({
5887
+ toolCallType: toolCall.toolCallType,
5888
+ toolCallId: toolCall.toolCallId,
5889
+ toolName: toolCall.toolName,
5890
+ args: toolCall.args
5891
+ }))
5892
+ );
5893
+ }
5821
5894
  },
5822
5895
  // TODO rename telemetry attributes to inputTokens and outputTokens
5823
5896
  "ai.usage.promptTokens": currentModelResponse.usage.inputTokens,
@@ -5849,7 +5922,7 @@ async function generateText({
5849
5922
  finishReason: currentModelResponse.finishReason,
5850
5923
  usage,
5851
5924
  warnings: currentModelResponse.warnings,
5852
- request: (_g = currentModelResponse.request) != null ? _g : {},
5925
+ request: (_h = currentModelResponse.request) != null ? _h : {},
5853
5926
  response: {
5854
5927
  ...currentModelResponse.response,
5855
5928
  messages: responseMessages
@@ -5963,10 +6036,28 @@ function asReasoningDetails(reasoning) {
5963
6036
  if (reasoning == null) {
5964
6037
  return [];
5965
6038
  }
5966
- if (typeof reasoning === "string") {
5967
- return [{ type: "text", text: reasoning }];
6039
+ const result = [];
6040
+ let activeReasoningText;
6041
+ for (const part of reasoning) {
6042
+ if (part.reasoningType === "text") {
6043
+ if (activeReasoningText == null) {
6044
+ activeReasoningText = { type: "text", text: part.text };
6045
+ result.push(activeReasoningText);
6046
+ } else {
6047
+ activeReasoningText.text += part.text;
6048
+ }
6049
+ } else if (part.reasoningType === "signature") {
6050
+ if (activeReasoningText == null) {
6051
+ activeReasoningText = { type: "text", text: "" };
6052
+ result.push(activeReasoningText);
6053
+ }
6054
+ activeReasoningText.signature = part.signature;
6055
+ activeReasoningText = void 0;
6056
+ } else if (part.reasoningType === "redacted") {
6057
+ result.push({ type: "redacted", data: part.data });
6058
+ }
5968
6059
  }
5969
- return reasoning;
6060
+ return result;
5970
6061
  }
5971
6062
  function asFiles(files) {
5972
6063
  var _a17;
@@ -6166,18 +6257,18 @@ function smoothStream({
6166
6257
  let buffer = "";
6167
6258
  return new TransformStream({
6168
6259
  async transform(chunk, controller) {
6169
- if (chunk.type !== "text-delta") {
6260
+ if (chunk.type !== "text") {
6170
6261
  if (buffer.length > 0) {
6171
- controller.enqueue({ type: "text-delta", textDelta: buffer });
6262
+ controller.enqueue({ type: "text", text: buffer });
6172
6263
  buffer = "";
6173
6264
  }
6174
6265
  controller.enqueue(chunk);
6175
6266
  return;
6176
6267
  }
6177
- buffer += chunk.textDelta;
6268
+ buffer += chunk.text;
6178
6269
  let match;
6179
6270
  while ((match = detectChunk(buffer)) != null) {
6180
- controller.enqueue({ type: "text-delta", textDelta: match });
6271
+ controller.enqueue({ type: "text", text: match });
6181
6272
  buffer = buffer.slice(match.length);
6182
6273
  await delay2(delayInMs);
6183
6274
  }
@@ -6336,10 +6427,8 @@ function runToolsTransformation({
6336
6427
  async transform(chunk, controller) {
6337
6428
  const chunkType = chunk.type;
6338
6429
  switch (chunkType) {
6339
- case "text-delta":
6430
+ case "text":
6340
6431
  case "reasoning":
6341
- case "reasoning-signature":
6342
- case "redacted-reasoning":
6343
6432
  case "source":
6344
6433
  case "response-metadata":
6345
6434
  case "error": {
@@ -6347,12 +6436,13 @@ function runToolsTransformation({
6347
6436
  break;
6348
6437
  }
6349
6438
  case "file": {
6350
- controller.enqueue(
6351
- new DefaultGeneratedFileWithType({
6439
+ controller.enqueue({
6440
+ type: "file",
6441
+ file: new DefaultGeneratedFileWithType({
6352
6442
  data: chunk.data,
6353
6443
  mediaType: chunk.mediaType
6354
6444
  })
6355
- );
6445
+ });
6356
6446
  break;
6357
6447
  }
6358
6448
  case "tool-call-delta": {
@@ -6591,7 +6681,7 @@ function createOutputTransformStream(output) {
6591
6681
  partialOutput = void 0
6592
6682
  }) {
6593
6683
  controller.enqueue({
6594
- part: { type: "text-delta", textDelta: textChunk },
6684
+ part: { type: "text", text: textChunk },
6595
6685
  partialOutput
6596
6686
  });
6597
6687
  textChunk = "";
@@ -6601,12 +6691,12 @@ function createOutputTransformStream(output) {
6601
6691
  if (chunk.type === "step-finish") {
6602
6692
  publishTextChunk({ controller });
6603
6693
  }
6604
- if (chunk.type !== "text-delta") {
6694
+ if (chunk.type !== "text") {
6605
6695
  controller.enqueue({ part: chunk, partialOutput: void 0 });
6606
6696
  return;
6607
6697
  }
6608
- text2 += chunk.textDelta;
6609
- textChunk += chunk.textDelta;
6698
+ text2 += chunk.text;
6699
+ textChunk += chunk.text;
6610
6700
  const result = output.parsePartial({ text: text2 });
6611
6701
  if (result != null) {
6612
6702
  const currentJson = JSON.stringify(result.partial);
@@ -6701,44 +6791,44 @@ var DefaultStreamTextResult = class {
6701
6791
  async transform(chunk, controller) {
6702
6792
  controller.enqueue(chunk);
6703
6793
  const { part } = chunk;
6704
- if (part.type === "text-delta" || part.type === "reasoning" || part.type === "source" || part.type === "tool-call" || part.type === "tool-result" || part.type === "tool-call-streaming-start" || part.type === "tool-call-delta") {
6794
+ if (part.type === "text" || part.type === "reasoning" || part.type === "source" || part.type === "tool-call" || part.type === "tool-result" || part.type === "tool-call-streaming-start" || part.type === "tool-call-delta") {
6705
6795
  await (onChunk == null ? void 0 : onChunk({ chunk: part }));
6706
6796
  }
6707
6797
  if (part.type === "error") {
6708
6798
  await (onError == null ? void 0 : onError({ error: part.error }));
6709
6799
  }
6710
- if (part.type === "text-delta") {
6711
- recordedStepText += part.textDelta;
6712
- recordedContinuationText += part.textDelta;
6713
- recordedFullText += part.textDelta;
6800
+ if (part.type === "text") {
6801
+ recordedStepText += part.text;
6802
+ recordedContinuationText += part.text;
6803
+ recordedFullText += part.text;
6714
6804
  }
6715
6805
  if (part.type === "reasoning") {
6716
- if (activeReasoningText == null) {
6717
- activeReasoningText = { type: "text", text: part.textDelta };
6718
- stepReasoning.push(activeReasoningText);
6719
- } else {
6720
- activeReasoningText.text += part.textDelta;
6721
- }
6722
- }
6723
- if (part.type === "reasoning-signature") {
6724
- if (activeReasoningText == null) {
6725
- throw new AISDKError18({
6726
- name: "InvalidStreamPart",
6727
- message: "reasoning-signature without reasoning"
6728
- });
6806
+ if (part.reasoningType === "text") {
6807
+ if (activeReasoningText == null) {
6808
+ activeReasoningText = { type: "text", text: part.text };
6809
+ stepReasoning.push(activeReasoningText);
6810
+ } else {
6811
+ activeReasoningText.text += part.text;
6812
+ }
6813
+ } else if (part.reasoningType === "signature") {
6814
+ if (activeReasoningText == null) {
6815
+ throw new AISDKError18({
6816
+ name: "InvalidStreamPart",
6817
+ message: "reasoning-signature without reasoning"
6818
+ });
6819
+ }
6820
+ activeReasoningText.signature = part.signature;
6821
+ activeReasoningText = void 0;
6822
+ } else if (part.reasoningType === "redacted") {
6823
+ stepReasoning.push({ type: "redacted", data: part.data });
6729
6824
  }
6730
- activeReasoningText.signature = part.signature;
6731
- activeReasoningText = void 0;
6732
- }
6733
- if (part.type === "redacted-reasoning") {
6734
- stepReasoning.push({ type: "redacted", data: part.data });
6735
6825
  }
6736
6826
  if (part.type === "file") {
6737
- stepFiles.push(part);
6827
+ stepFiles.push(part.file);
6738
6828
  }
6739
6829
  if (part.type === "source") {
6740
- recordedSources.push(part.source);
6741
- recordedStepSources.push(part.source);
6830
+ recordedSources.push(part);
6831
+ recordedStepSources.push(part);
6742
6832
  }
6743
6833
  if (part.type === "tool-call") {
6744
6834
  recordedToolCalls.push(part);
@@ -7072,10 +7162,10 @@ var DefaultStreamTextResult = class {
7072
7162
  chunk
7073
7163
  }) {
7074
7164
  controller.enqueue(chunk);
7075
- stepText += chunk.textDelta;
7076
- fullStepText += chunk.textDelta;
7165
+ stepText += chunk.text;
7166
+ fullStepText += chunk.text;
7077
7167
  chunkTextPublished = true;
7078
- hasWhitespaceSuffix = chunk.textDelta.trimEnd() !== chunk.textDelta;
7168
+ hasWhitespaceSuffix = chunk.text.trimEnd() !== chunk.text;
7079
7169
  }
7080
7170
  self.addStream(
7081
7171
  transformedStream.pipeThrough(
@@ -7098,14 +7188,14 @@ var DefaultStreamTextResult = class {
7098
7188
  warnings: warnings != null ? warnings : []
7099
7189
  });
7100
7190
  }
7101
- if (chunk.type === "text-delta" && chunk.textDelta.length === 0) {
7191
+ if (chunk.type === "text" && chunk.text.length === 0) {
7102
7192
  return;
7103
7193
  }
7104
7194
  const chunkType = chunk.type;
7105
7195
  switch (chunkType) {
7106
- case "text-delta": {
7196
+ case "text": {
7107
7197
  if (continueSteps) {
7108
- const trimmedChunkText = inWhitespacePrefix && hasLeadingWhitespace ? chunk.textDelta.trimStart() : chunk.textDelta;
7198
+ const trimmedChunkText = inWhitespacePrefix && hasLeadingWhitespace ? chunk.text.trimStart() : chunk.text;
7109
7199
  if (trimmedChunkText.length === 0) {
7110
7200
  break;
7111
7201
  }
@@ -7117,8 +7207,8 @@ var DefaultStreamTextResult = class {
7117
7207
  await publishTextChunk({
7118
7208
  controller,
7119
7209
  chunk: {
7120
- type: "text-delta",
7121
- textDelta: split.prefix + split.whitespace
7210
+ type: "text",
7211
+ text: split.prefix + split.whitespace
7122
7212
  }
7123
7213
  });
7124
7214
  }
@@ -7129,35 +7219,31 @@ var DefaultStreamTextResult = class {
7129
7219
  }
7130
7220
  case "reasoning": {
7131
7221
  controller.enqueue(chunk);
7132
- if (activeReasoningText2 == null) {
7133
- activeReasoningText2 = {
7134
- type: "text",
7135
- text: chunk.textDelta
7136
- };
7137
- stepReasoning2.push(activeReasoningText2);
7138
- } else {
7139
- activeReasoningText2.text += chunk.textDelta;
7140
- }
7141
- break;
7142
- }
7143
- case "reasoning-signature": {
7144
- controller.enqueue(chunk);
7145
- if (activeReasoningText2 == null) {
7146
- throw new InvalidStreamPartError({
7147
- chunk,
7148
- message: "reasoning-signature without reasoning"
7222
+ if (chunk.reasoningType === "text") {
7223
+ if (activeReasoningText2 == null) {
7224
+ activeReasoningText2 = {
7225
+ type: "text",
7226
+ text: chunk.text
7227
+ };
7228
+ stepReasoning2.push(activeReasoningText2);
7229
+ } else {
7230
+ activeReasoningText2.text += chunk.text;
7231
+ }
7232
+ } else if (chunk.reasoningType === "signature") {
7233
+ if (activeReasoningText2 == null) {
7234
+ throw new InvalidStreamPartError({
7235
+ chunk,
7236
+ message: "reasoning-signature without reasoning"
7237
+ });
7238
+ }
7239
+ activeReasoningText2.signature = chunk.signature;
7240
+ activeReasoningText2 = void 0;
7241
+ } else if (chunk.reasoningType === "redacted") {
7242
+ stepReasoning2.push({
7243
+ type: "redacted",
7244
+ data: chunk.data
7149
7245
  });
7150
7246
  }
7151
- activeReasoningText2.signature = chunk.signature;
7152
- activeReasoningText2 = void 0;
7153
- break;
7154
- }
7155
- case "redacted-reasoning": {
7156
- controller.enqueue(chunk);
7157
- stepReasoning2.push({
7158
- type: "redacted",
7159
- data: chunk.data
7160
- });
7161
7247
  break;
7162
7248
  }
7163
7249
  case "tool-call": {
@@ -7192,7 +7278,7 @@ var DefaultStreamTextResult = class {
7192
7278
  break;
7193
7279
  }
7194
7280
  case "file": {
7195
- stepFiles2.push(chunk);
7281
+ stepFiles2.push(chunk.file);
7196
7282
  controller.enqueue(chunk);
7197
7283
  break;
7198
7284
  }
@@ -7233,10 +7319,7 @@ var DefaultStreamTextResult = class {
7233
7319
  stepType2 === "continue" && !chunkTextPublished)) {
7234
7320
  await publishTextChunk({
7235
7321
  controller,
7236
- chunk: {
7237
- type: "text-delta",
7238
- textDelta: chunkBuffer
7239
- }
7322
+ chunk: { type: "text", text: chunkBuffer }
7240
7323
  });
7241
7324
  chunkBuffer = "";
7242
7325
  }
@@ -7426,8 +7509,8 @@ var DefaultStreamTextResult = class {
7426
7509
  this.teeStream().pipeThrough(
7427
7510
  new TransformStream({
7428
7511
  transform({ part }, controller) {
7429
- if (part.type === "text-delta") {
7430
- controller.enqueue(part.textDelta);
7512
+ if (part.type === "text") {
7513
+ controller.enqueue(part.text);
7431
7514
  }
7432
7515
  }
7433
7516
  })
@@ -7485,52 +7568,45 @@ var DefaultStreamTextResult = class {
7485
7568
  transform: async (chunk, controller) => {
7486
7569
  const chunkType = chunk.type;
7487
7570
  switch (chunkType) {
7488
- case "text-delta": {
7489
- controller.enqueue(formatDataStreamPart("text", chunk.textDelta));
7571
+ case "text": {
7572
+ controller.enqueue(formatDataStreamPart("text", chunk.text));
7490
7573
  break;
7491
7574
  }
7492
7575
  case "reasoning": {
7493
7576
  if (sendReasoning) {
7494
- controller.enqueue(
7495
- formatDataStreamPart("reasoning", chunk.textDelta)
7496
- );
7497
- }
7498
- break;
7499
- }
7500
- case "redacted-reasoning": {
7501
- if (sendReasoning) {
7502
- controller.enqueue(
7503
- formatDataStreamPart("redacted_reasoning", {
7504
- data: chunk.data
7505
- })
7506
- );
7507
- }
7508
- break;
7509
- }
7510
- case "reasoning-signature": {
7511
- if (sendReasoning) {
7512
- controller.enqueue(
7513
- formatDataStreamPart("reasoning_signature", {
7514
- signature: chunk.signature
7515
- })
7516
- );
7577
+ if (chunk.reasoningType === "text") {
7578
+ controller.enqueue(
7579
+ formatDataStreamPart("reasoning", chunk.text)
7580
+ );
7581
+ } else if (chunk.reasoningType === "signature") {
7582
+ controller.enqueue(
7583
+ formatDataStreamPart("reasoning_signature", {
7584
+ signature: chunk.signature
7585
+ })
7586
+ );
7587
+ } else if (chunk.reasoningType === "redacted") {
7588
+ controller.enqueue(
7589
+ formatDataStreamPart("redacted_reasoning", {
7590
+ data: chunk.data
7591
+ })
7592
+ );
7593
+ }
7517
7594
  }
7518
7595
  break;
7519
7596
  }
7520
7597
  case "file": {
7521
7598
  controller.enqueue(
7599
+ // TODO update protocol to v2 or replace with event stream
7522
7600
  formatDataStreamPart("file", {
7523
- mimeType: chunk.mediaType,
7524
- data: chunk.base64
7601
+ mimeType: chunk.file.mediaType,
7602
+ data: chunk.file.base64
7525
7603
  })
7526
7604
  );
7527
7605
  break;
7528
7606
  }
7529
7607
  case "source": {
7530
7608
  if (sendSources) {
7531
- controller.enqueue(
7532
- formatDataStreamPart("source", chunk.source)
7533
- );
7609
+ controller.enqueue(formatDataStreamPart("source", chunk));
7534
7610
  }
7535
7611
  break;
7536
7612
  }
@@ -7875,15 +7951,15 @@ function extractReasoningMiddleware({
7875
7951
  wrapGenerate: async ({ doGenerate }) => {
7876
7952
  const { text: rawText, ...rest } = await doGenerate();
7877
7953
  if (rawText == null) {
7878
- return { text: rawText, ...rest };
7954
+ return { text: void 0, ...rest };
7879
7955
  }
7880
- const text2 = startWithReasoning ? openingTag + rawText : rawText;
7956
+ const text2 = startWithReasoning ? openingTag + rawText.text : rawText.text;
7881
7957
  const regexp = new RegExp(`${openingTag}(.*?)${closingTag}`, "gs");
7882
7958
  const matches = Array.from(text2.matchAll(regexp));
7883
7959
  if (!matches.length) {
7884
- return { text: text2, ...rest };
7960
+ return { text: { type: "text", text: text2 }, ...rest };
7885
7961
  }
7886
- const reasoning = matches.map((match) => match[1]).join(separator);
7962
+ const reasoningText = matches.map((match) => match[1]).join(separator);
7887
7963
  let textWithoutReasoning = text2;
7888
7964
  for (let i = matches.length - 1; i >= 0; i--) {
7889
7965
  const match = matches[i];
@@ -7893,7 +7969,17 @@ function extractReasoningMiddleware({
7893
7969
  );
7894
7970
  textWithoutReasoning = beforeMatch + (beforeMatch.length > 0 && afterMatch.length > 0 ? separator : "") + afterMatch;
7895
7971
  }
7896
- return { ...rest, text: textWithoutReasoning, reasoning };
7972
+ return {
7973
+ ...rest,
7974
+ text: { type: "text", text: textWithoutReasoning },
7975
+ reasoning: reasoningText.length > 0 ? [
7976
+ {
7977
+ type: "reasoning",
7978
+ reasoningType: "text",
7979
+ text: reasoningText
7980
+ }
7981
+ ] : void 0
7982
+ };
7897
7983
  },
7898
7984
  wrapStream: async ({ doStream }) => {
7899
7985
  const { stream, ...rest } = await doStream();
@@ -7906,18 +7992,24 @@ function extractReasoningMiddleware({
7906
7992
  stream: stream.pipeThrough(
7907
7993
  new TransformStream({
7908
7994
  transform: (chunk, controller) => {
7909
- if (chunk.type !== "text-delta") {
7995
+ if (chunk.type !== "text") {
7910
7996
  controller.enqueue(chunk);
7911
7997
  return;
7912
7998
  }
7913
- buffer += chunk.textDelta;
7999
+ buffer += chunk.text;
7914
8000
  function publish(text2) {
7915
8001
  if (text2.length > 0) {
7916
8002
  const prefix = afterSwitch && (isReasoning ? !isFirstReasoning : !isFirstText) ? separator : "";
7917
- controller.enqueue({
7918
- type: isReasoning ? "reasoning" : "text-delta",
7919
- textDelta: prefix + text2
7920
- });
8003
+ controller.enqueue(
8004
+ isReasoning ? {
8005
+ type: "reasoning",
8006
+ reasoningType: "text",
8007
+ text: prefix + text2
8008
+ } : {
8009
+ type: "text",
8010
+ text: prefix + text2
8011
+ }
8012
+ );
7921
8013
  afterSwitch = false;
7922
8014
  if (isReasoning) {
7923
8015
  isFirstReasoning = false;
@@ -7964,43 +8056,12 @@ function simulateStreamingMiddleware() {
7964
8056
  start(controller) {
7965
8057
  controller.enqueue({ type: "response-metadata", ...result.response });
7966
8058
  if (result.reasoning) {
7967
- if (typeof result.reasoning === "string") {
7968
- controller.enqueue({
7969
- type: "reasoning",
7970
- textDelta: result.reasoning
7971
- });
7972
- } else {
7973
- for (const reasoning of result.reasoning) {
7974
- switch (reasoning.type) {
7975
- case "text": {
7976
- controller.enqueue({
7977
- type: "reasoning",
7978
- textDelta: reasoning.text
7979
- });
7980
- if (reasoning.signature != null) {
7981
- controller.enqueue({
7982
- type: "reasoning-signature",
7983
- signature: reasoning.signature
7984
- });
7985
- }
7986
- break;
7987
- }
7988
- case "redacted": {
7989
- controller.enqueue({
7990
- type: "redacted-reasoning",
7991
- data: reasoning.data
7992
- });
7993
- break;
7994
- }
7995
- }
7996
- }
8059
+ for (const reasoningPart of result.reasoning) {
8060
+ controller.enqueue(reasoningPart);
7997
8061
  }
7998
8062
  }
7999
8063
  if (result.text) {
8000
- controller.enqueue({
8001
- type: "text-delta",
8002
- textDelta: result.text
8003
- });
8064
+ controller.enqueue(result.text);
8004
8065
  }
8005
8066
  if (result.toolCalls) {
8006
8067
  for (const toolCall of result.toolCalls) {
@@ -8011,10 +8072,7 @@ function simulateStreamingMiddleware() {
8011
8072
  toolName: toolCall.toolName,
8012
8073
  argsTextDelta: toolCall.args
8013
8074
  });
8014
- controller.enqueue({
8015
- type: "tool-call",
8016
- ...toolCall
8017
- });
8075
+ controller.enqueue(toolCall);
8018
8076
  }
8019
8077
  }
8020
8078
  controller.enqueue({
@@ -8389,7 +8447,7 @@ var DefaultProviderRegistry = class {
8389
8447
  message: `Invalid ${modelType} id for registry: ${id} (must be in the format "providerId${this.separator}modelId")`
8390
8448
  });
8391
8449
  }
8392
- return [id.slice(0, index), id.slice(index + 1)];
8450
+ return [id.slice(0, index), id.slice(index + this.separator.length)];
8393
8451
  }
8394
8452
  languageModel(id) {
8395
8453
  var _a17, _b;
@@ -8981,7 +9039,7 @@ var MCPClient = class {
8981
9039
  };
8982
9040
 
8983
9041
  // core/util/cosine-similarity.ts
8984
- function cosineSimilarity(vector1, vector2, options) {
9042
+ function cosineSimilarity(vector1, vector2) {
8985
9043
  if (vector1.length !== vector2.length) {
8986
9044
  throw new InvalidArgumentError({
8987
9045
  parameter: "vector1,vector2",
@@ -8991,13 +9049,6 @@ function cosineSimilarity(vector1, vector2, options) {
8991
9049
  }
8992
9050
  const n = vector1.length;
8993
9051
  if (n === 0) {
8994
- if (options == null ? void 0 : options.throwErrorForEmptyVectors) {
8995
- throw new InvalidArgumentError({
8996
- parameter: "vector1",
8997
- value: vector1,
8998
- message: "Vectors cannot be empty"
8999
- });
9000
- }
9001
9052
  return 0;
9002
9053
  }
9003
9054
  let magnitudeSquared1 = 0;