ai 5.0.0-canary.7 → 5.0.0-canary.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -2107,6 +2107,7 @@ function selectTelemetryAttributes({
2107
2107
  async function embed({
2108
2108
  model,
2109
2109
  value,
2110
+ providerOptions,
2110
2111
  maxRetries: maxRetriesArg,
2111
2112
  abortSignal,
2112
2113
  headers,
@@ -2132,7 +2133,7 @@ async function embed({
2132
2133
  }),
2133
2134
  tracer,
2134
2135
  fn: async (span) => {
2135
- const { embedding, usage, rawResponse } = await retry(
2136
+ const { embedding, usage, response } = await retry(
2136
2137
  () => (
2137
2138
  // nested spans to align with the embedMany telemetry data:
2138
2139
  recordSpan({
@@ -2155,7 +2156,8 @@ async function embed({
2155
2156
  const modelResponse = await model.doEmbed({
2156
2157
  values: [value],
2157
2158
  abortSignal,
2158
- headers
2159
+ headers,
2160
+ providerOptions
2159
2161
  });
2160
2162
  const embedding2 = modelResponse.embeddings[0];
2161
2163
  const usage2 = (_a17 = modelResponse.usage) != null ? _a17 : { tokens: NaN };
@@ -2175,7 +2177,7 @@ async function embed({
2175
2177
  return {
2176
2178
  embedding: embedding2,
2177
2179
  usage: usage2,
2178
- rawResponse: modelResponse.rawResponse
2180
+ response: modelResponse.response
2179
2181
  };
2180
2182
  }
2181
2183
  })
@@ -2190,7 +2192,12 @@ async function embed({
2190
2192
  }
2191
2193
  })
2192
2194
  );
2193
- return new DefaultEmbedResult({ value, embedding, usage, rawResponse });
2195
+ return new DefaultEmbedResult({
2196
+ value,
2197
+ embedding,
2198
+ usage,
2199
+ response
2200
+ });
2194
2201
  }
2195
2202
  });
2196
2203
  }
@@ -2199,7 +2206,7 @@ var DefaultEmbedResult = class {
2199
2206
  this.value = options.value;
2200
2207
  this.embedding = options.embedding;
2201
2208
  this.usage = options.usage;
2202
- this.rawResponse = options.rawResponse;
2209
+ this.response = options.response;
2203
2210
  }
2204
2211
  };
2205
2212
 
@@ -2222,6 +2229,7 @@ async function embedMany({
2222
2229
  maxRetries: maxRetriesArg,
2223
2230
  abortSignal,
2224
2231
  headers,
2232
+ providerOptions,
2225
2233
  experimental_telemetry: telemetry
2226
2234
  }) {
2227
2235
  const { maxRetries, retry } = prepareRetries({ maxRetries: maxRetriesArg });
@@ -2249,7 +2257,7 @@ async function embedMany({
2249
2257
  fn: async (span) => {
2250
2258
  const maxEmbeddingsPerCall = model.maxEmbeddingsPerCall;
2251
2259
  if (maxEmbeddingsPerCall == null) {
2252
- const { embeddings: embeddings2, usage } = await retry(() => {
2260
+ const { embeddings: embeddings2, usage, response } = await retry(() => {
2253
2261
  return recordSpan({
2254
2262
  name: "ai.embedMany.doEmbed",
2255
2263
  attributes: selectTelemetryAttributes({
@@ -2272,7 +2280,8 @@ async function embedMany({
2272
2280
  const modelResponse = await model.doEmbed({
2273
2281
  values,
2274
2282
  abortSignal,
2275
- headers
2283
+ headers,
2284
+ providerOptions
2276
2285
  });
2277
2286
  const embeddings3 = modelResponse.embeddings;
2278
2287
  const usage2 = (_a17 = modelResponse.usage) != null ? _a17 : { tokens: NaN };
@@ -2287,7 +2296,11 @@ async function embedMany({
2287
2296
  }
2288
2297
  })
2289
2298
  );
2290
- return { embeddings: embeddings3, usage: usage2 };
2299
+ return {
2300
+ embeddings: embeddings3,
2301
+ usage: usage2,
2302
+ response: modelResponse.response
2303
+ };
2291
2304
  }
2292
2305
  });
2293
2306
  });
@@ -2302,13 +2315,23 @@ async function embedMany({
2302
2315
  }
2303
2316
  })
2304
2317
  );
2305
- return new DefaultEmbedManyResult({ values, embeddings: embeddings2, usage });
2318
+ return new DefaultEmbedManyResult({
2319
+ values,
2320
+ embeddings: embeddings2,
2321
+ usage,
2322
+ responses: [response]
2323
+ });
2306
2324
  }
2307
2325
  const valueChunks = splitArray(values, maxEmbeddingsPerCall);
2308
2326
  const embeddings = [];
2327
+ const responses = [];
2309
2328
  let tokens = 0;
2310
2329
  for (const chunk of valueChunks) {
2311
- const { embeddings: responseEmbeddings, usage } = await retry(() => {
2330
+ const {
2331
+ embeddings: responseEmbeddings,
2332
+ usage,
2333
+ response
2334
+ } = await retry(() => {
2312
2335
  return recordSpan({
2313
2336
  name: "ai.embedMany.doEmbed",
2314
2337
  attributes: selectTelemetryAttributes({
@@ -2331,7 +2354,8 @@ async function embedMany({
2331
2354
  const modelResponse = await model.doEmbed({
2332
2355
  values: chunk,
2333
2356
  abortSignal,
2334
- headers
2357
+ headers,
2358
+ providerOptions
2335
2359
  });
2336
2360
  const embeddings2 = modelResponse.embeddings;
2337
2361
  const usage2 = (_a17 = modelResponse.usage) != null ? _a17 : { tokens: NaN };
@@ -2346,11 +2370,16 @@ async function embedMany({
2346
2370
  }
2347
2371
  })
2348
2372
  );
2349
- return { embeddings: embeddings2, usage: usage2 };
2373
+ return {
2374
+ embeddings: embeddings2,
2375
+ usage: usage2,
2376
+ response: modelResponse.response
2377
+ };
2350
2378
  }
2351
2379
  });
2352
2380
  });
2353
2381
  embeddings.push(...responseEmbeddings);
2382
+ responses.push(response);
2354
2383
  tokens += usage.tokens;
2355
2384
  }
2356
2385
  span.setAttributes(
@@ -2367,7 +2396,8 @@ async function embedMany({
2367
2396
  return new DefaultEmbedManyResult({
2368
2397
  values,
2369
2398
  embeddings,
2370
- usage: { tokens }
2399
+ usage: { tokens },
2400
+ responses
2371
2401
  });
2372
2402
  }
2373
2403
  });
@@ -2377,6 +2407,7 @@ var DefaultEmbedManyResult = class {
2377
2407
  this.values = options.values;
2378
2408
  this.embeddings = options.embeddings;
2379
2409
  this.usage = options.usage;
2410
+ this.responses = options.responses;
2380
2411
  }
2381
2412
  };
2382
2413
 
@@ -4235,7 +4266,7 @@ async function generateObject({
4235
4266
  }),
4236
4267
  tracer,
4237
4268
  fn: async (span) => {
4238
- var _a17, _b, _c, _d;
4269
+ var _a17, _b, _c, _d, _e;
4239
4270
  if (mode === "auto" || mode == null) {
4240
4271
  mode = model.defaultObjectGenerationMode;
4241
4272
  }
@@ -4297,7 +4328,7 @@ async function generateObject({
4297
4328
  }),
4298
4329
  tracer,
4299
4330
  fn: async (span2) => {
4300
- var _a18, _b2, _c2, _d2, _e, _f, _g, _h;
4331
+ var _a18, _b2, _c2, _d2, _e2, _f, _g, _h;
4301
4332
  const result2 = await model.doGenerate({
4302
4333
  responseFormat: {
4303
4334
  type: "json",
@@ -4315,7 +4346,7 @@ async function generateObject({
4315
4346
  const responseData = {
4316
4347
  id: (_b2 = (_a18 = result2.response) == null ? void 0 : _a18.id) != null ? _b2 : generateId3(),
4317
4348
  timestamp: (_d2 = (_c2 = result2.response) == null ? void 0 : _c2.timestamp) != null ? _d2 : currentDate(),
4318
- modelId: (_f = (_e = result2.response) == null ? void 0 : _e.modelId) != null ? _f : model.modelId,
4349
+ modelId: (_f = (_e2 = result2.response) == null ? void 0 : _e2.modelId) != null ? _f : model.modelId,
4319
4350
  headers: (_g = result2.response) == null ? void 0 : _g.headers,
4320
4351
  body: (_h = result2.response) == null ? void 0 : _h.body
4321
4352
  };
@@ -4332,7 +4363,10 @@ async function generateObject({
4332
4363
  telemetry,
4333
4364
  attributes: {
4334
4365
  "ai.response.finishReason": result2.finishReason,
4335
- "ai.response.object": { output: () => result2.text },
4366
+ "ai.response.object": { output: () => {
4367
+ var _a19;
4368
+ return (_a19 = result2.text) == null ? void 0 : _a19.text;
4369
+ } },
4336
4370
  "ai.response.id": responseData.id,
4337
4371
  "ai.response.model": responseData.modelId,
4338
4372
  "ai.response.timestamp": responseData.timestamp.toISOString(),
@@ -4352,13 +4386,13 @@ async function generateObject({
4352
4386
  }
4353
4387
  })
4354
4388
  );
4355
- result = generateResult.objectText;
4389
+ result = (_b = generateResult.objectText) == null ? void 0 : _b.text;
4356
4390
  finishReason = generateResult.finishReason;
4357
4391
  usage = generateResult.usage;
4358
4392
  warnings = generateResult.warnings;
4359
4393
  logprobs = generateResult.logprobs;
4360
4394
  resultProviderMetadata = generateResult.providerMetadata;
4361
- request = (_b = generateResult.request) != null ? _b : {};
4395
+ request = (_c = generateResult.request) != null ? _c : {};
4362
4396
  response = generateResult.responseData;
4363
4397
  break;
4364
4398
  }
@@ -4370,7 +4404,7 @@ async function generateObject({
4370
4404
  const promptMessages = await convertToLanguageModelPrompt({
4371
4405
  prompt: standardizedPrompt,
4372
4406
  modelSupportsImageUrls: model.supportsImageUrls,
4373
- modelSupportsUrl: (_c = model.supportsUrl) == null ? void 0 : _c.bind(model)
4407
+ modelSupportsUrl: (_d = model.supportsUrl) == null ? void 0 : _d.bind(model)
4374
4408
  // support 'this' context,
4375
4409
  });
4376
4410
  const inputFormat = standardizedPrompt.type;
@@ -4405,7 +4439,7 @@ async function generateObject({
4405
4439
  }),
4406
4440
  tracer,
4407
4441
  fn: async (span2) => {
4408
- var _a18, _b2, _c2, _d2, _e, _f, _g, _h, _i, _j;
4442
+ var _a18, _b2, _c2, _d2, _e2, _f, _g, _h, _i, _j;
4409
4443
  const result2 = await model.doGenerate({
4410
4444
  tools: [
4411
4445
  {
@@ -4426,7 +4460,7 @@ async function generateObject({
4426
4460
  const objectText = (_b2 = (_a18 = result2.toolCalls) == null ? void 0 : _a18[0]) == null ? void 0 : _b2.args;
4427
4461
  const responseData = {
4428
4462
  id: (_d2 = (_c2 = result2.response) == null ? void 0 : _c2.id) != null ? _d2 : generateId3(),
4429
- timestamp: (_f = (_e = result2.response) == null ? void 0 : _e.timestamp) != null ? _f : currentDate(),
4463
+ timestamp: (_f = (_e2 = result2.response) == null ? void 0 : _e2.timestamp) != null ? _f : currentDate(),
4430
4464
  modelId: (_h = (_g = result2.response) == null ? void 0 : _g.modelId) != null ? _h : model.modelId,
4431
4465
  headers: (_i = result2.response) == null ? void 0 : _i.headers,
4432
4466
  body: (_j = result2.response) == null ? void 0 : _j.body
@@ -4470,7 +4504,7 @@ async function generateObject({
4470
4504
  warnings = generateResult.warnings;
4471
4505
  logprobs = generateResult.logprobs;
4472
4506
  resultProviderMetadata = generateResult.providerMetadata;
4473
- request = (_d = generateResult.request) != null ? _d : {};
4507
+ request = (_e = generateResult.request) != null ? _e : {};
4474
4508
  response = generateResult.responseData;
4475
4509
  break;
4476
4510
  }
@@ -4899,8 +4933,8 @@ var DefaultStreamObjectResult = class {
4899
4933
  transformer = {
4900
4934
  transform: (chunk, controller) => {
4901
4935
  switch (chunk.type) {
4902
- case "text-delta":
4903
- controller.enqueue(chunk.textDelta);
4936
+ case "text":
4937
+ controller.enqueue(chunk.text);
4904
4938
  break;
4905
4939
  case "response-metadata":
4906
4940
  case "finish":
@@ -5569,23 +5603,36 @@ function toResponseMessages({
5569
5603
  generateMessageId
5570
5604
  }) {
5571
5605
  const responseMessages = [];
5572
- responseMessages.push({
5573
- role: "assistant",
5574
- content: [
5606
+ const content = [];
5607
+ if (reasoning.length > 0) {
5608
+ content.push(
5575
5609
  ...reasoning.map(
5576
5610
  (part) => part.type === "text" ? { ...part, type: "reasoning" } : { ...part, type: "redacted-reasoning" }
5577
- ),
5578
- // TODO language model v2: switch to order response content (instead of type-based ordering)
5611
+ )
5612
+ );
5613
+ }
5614
+ if (files.length > 0) {
5615
+ content.push(
5579
5616
  ...files.map((file) => ({
5580
5617
  type: "file",
5581
5618
  data: file.base64,
5582
5619
  mediaType: file.mediaType
5583
- })),
5584
- { type: "text", text: text2 },
5585
- ...toolCalls
5586
- ],
5587
- id: messageId
5588
- });
5620
+ }))
5621
+ );
5622
+ }
5623
+ if (text2.length > 0) {
5624
+ content.push({ type: "text", text: text2 });
5625
+ }
5626
+ if (toolCalls.length > 0) {
5627
+ content.push(...toolCalls);
5628
+ }
5629
+ if (content.length > 0) {
5630
+ responseMessages.push({
5631
+ role: "assistant",
5632
+ content,
5633
+ id: messageId
5634
+ });
5635
+ }
5589
5636
  if (toolResults.length > 0) {
5590
5637
  responseMessages.push({
5591
5638
  role: "tool",
@@ -5689,7 +5736,7 @@ async function generateText({
5689
5736
  }),
5690
5737
  tracer,
5691
5738
  fn: async (span) => {
5692
- var _a18, _b, _c, _d, _e, _f, _g;
5739
+ var _a18, _b, _c, _d, _e, _f, _g, _h;
5693
5740
  const toolsAndToolChoice = {
5694
5741
  ...prepareToolsAndToolChoice({ tools, toolChoice, activeTools })
5695
5742
  };
@@ -5764,7 +5811,7 @@ async function generateText({
5764
5811
  }),
5765
5812
  tracer,
5766
5813
  fn: async (span2) => {
5767
- var _a19, _b2, _c2, _d2, _e2, _f2, _g2, _h;
5814
+ var _a19, _b2, _c2, _d2, _e2, _f2, _g2, _h2;
5768
5815
  const result = await model.doGenerate({
5769
5816
  ...callSettings,
5770
5817
  ...toolsAndToolChoice,
@@ -5780,7 +5827,7 @@ async function generateText({
5780
5827
  timestamp: (_d2 = (_c2 = result.response) == null ? void 0 : _c2.timestamp) != null ? _d2 : currentDate(),
5781
5828
  modelId: (_f2 = (_e2 = result.response) == null ? void 0 : _e2.modelId) != null ? _f2 : model.modelId,
5782
5829
  headers: (_g2 = result.response) == null ? void 0 : _g2.headers,
5783
- body: (_h = result.response) == null ? void 0 : _h.body
5830
+ body: (_h2 = result.response) == null ? void 0 : _h2.body
5784
5831
  };
5785
5832
  span2.setAttributes(
5786
5833
  selectTelemetryAttributes({
@@ -5788,10 +5835,23 @@ async function generateText({
5788
5835
  attributes: {
5789
5836
  "ai.response.finishReason": result.finishReason,
5790
5837
  "ai.response.text": {
5791
- output: () => result.text
5838
+ output: () => {
5839
+ var _a20;
5840
+ return (_a20 = result.text) == null ? void 0 : _a20.text;
5841
+ }
5792
5842
  },
5793
5843
  "ai.response.toolCalls": {
5794
- output: () => JSON.stringify(result.toolCalls)
5844
+ output: () => {
5845
+ var _a20;
5846
+ return JSON.stringify(
5847
+ (_a20 = result.toolCalls) == null ? void 0 : _a20.map((toolCall) => ({
5848
+ toolCallType: toolCall.toolCallType,
5849
+ toolCallId: toolCall.toolCallId,
5850
+ toolName: toolCall.toolName,
5851
+ args: toolCall.args
5852
+ }))
5853
+ );
5854
+ }
5795
5855
  },
5796
5856
  "ai.response.id": responseData.id,
5797
5857
  "ai.response.model": responseData.modelId,
@@ -5848,7 +5908,7 @@ async function generateText({
5848
5908
  nextStepType = "tool-result";
5849
5909
  }
5850
5910
  }
5851
- const originalText = (_c = currentModelResponse.text) != null ? _c : "";
5911
+ const originalText = (_d = (_c = currentModelResponse.text) == null ? void 0 : _c.text) != null ? _d : "";
5852
5912
  const stepTextLeadingWhitespaceTrimmed = stepType === "continue" && // only for continue steps
5853
5913
  text2.trimEnd() !== text2 ? originalText.trimStart() : originalText;
5854
5914
  const stepText = nextStepType === "continue" ? removeTextAfterLastWhitespace(stepTextLeadingWhitespaceTrimmed) : stepTextLeadingWhitespaceTrimmed;
@@ -5856,7 +5916,7 @@ async function generateText({
5856
5916
  currentReasoningDetails = asReasoningDetails(
5857
5917
  currentModelResponse.reasoning
5858
5918
  );
5859
- sources.push(...(_d = currentModelResponse.sources) != null ? _d : []);
5919
+ sources.push(...(_e = currentModelResponse.sources) != null ? _e : []);
5860
5920
  if (stepType === "continue") {
5861
5921
  const lastMessage = responseMessages[responseMessages.length - 1];
5862
5922
  if (typeof lastMessage.content === "string") {
@@ -5888,14 +5948,14 @@ async function generateText({
5888
5948
  reasoning: asReasoningText(currentReasoningDetails),
5889
5949
  reasoningDetails: currentReasoningDetails,
5890
5950
  files: asFiles(currentModelResponse.files),
5891
- sources: (_e = currentModelResponse.sources) != null ? _e : [],
5951
+ sources: (_f = currentModelResponse.sources) != null ? _f : [],
5892
5952
  toolCalls: currentToolCalls,
5893
5953
  toolResults: currentToolResults,
5894
5954
  finishReason: currentModelResponse.finishReason,
5895
5955
  usage: currentUsage,
5896
5956
  warnings: currentModelResponse.warnings,
5897
5957
  logprobs: currentModelResponse.logprobs,
5898
- request: (_f = currentModelResponse.request) != null ? _f : {},
5958
+ request: (_g = currentModelResponse.request) != null ? _g : {},
5899
5959
  response: {
5900
5960
  ...currentModelResponse.response,
5901
5961
  // deep clone msgs to avoid mutating past messages in multi-step:
@@ -5914,10 +5974,23 @@ async function generateText({
5914
5974
  attributes: {
5915
5975
  "ai.response.finishReason": currentModelResponse.finishReason,
5916
5976
  "ai.response.text": {
5917
- output: () => currentModelResponse.text
5977
+ output: () => {
5978
+ var _a19;
5979
+ return (_a19 = currentModelResponse.text) == null ? void 0 : _a19.text;
5980
+ }
5918
5981
  },
5919
5982
  "ai.response.toolCalls": {
5920
- output: () => JSON.stringify(currentModelResponse.toolCalls)
5983
+ output: () => {
5984
+ var _a19;
5985
+ return JSON.stringify(
5986
+ (_a19 = currentModelResponse.toolCalls) == null ? void 0 : _a19.map((toolCall) => ({
5987
+ toolCallType: toolCall.toolCallType,
5988
+ toolCallId: toolCall.toolCallId,
5989
+ toolName: toolCall.toolName,
5990
+ args: toolCall.args
5991
+ }))
5992
+ );
5993
+ }
5921
5994
  },
5922
5995
  // TODO rename telemetry attributes to inputTokens and outputTokens
5923
5996
  "ai.usage.promptTokens": currentModelResponse.usage.inputTokens,
@@ -5949,7 +6022,7 @@ async function generateText({
5949
6022
  finishReason: currentModelResponse.finishReason,
5950
6023
  usage,
5951
6024
  warnings: currentModelResponse.warnings,
5952
- request: (_g = currentModelResponse.request) != null ? _g : {},
6025
+ request: (_h = currentModelResponse.request) != null ? _h : {},
5953
6026
  response: {
5954
6027
  ...currentModelResponse.response,
5955
6028
  messages: responseMessages
@@ -6063,10 +6136,28 @@ function asReasoningDetails(reasoning) {
6063
6136
  if (reasoning == null) {
6064
6137
  return [];
6065
6138
  }
6066
- if (typeof reasoning === "string") {
6067
- return [{ type: "text", text: reasoning }];
6139
+ const result = [];
6140
+ let activeReasoningText;
6141
+ for (const part of reasoning) {
6142
+ if (part.reasoningType === "text") {
6143
+ if (activeReasoningText == null) {
6144
+ activeReasoningText = { type: "text", text: part.text };
6145
+ result.push(activeReasoningText);
6146
+ } else {
6147
+ activeReasoningText.text += part.text;
6148
+ }
6149
+ } else if (part.reasoningType === "signature") {
6150
+ if (activeReasoningText == null) {
6151
+ activeReasoningText = { type: "text", text: "" };
6152
+ result.push(activeReasoningText);
6153
+ }
6154
+ activeReasoningText.signature = part.signature;
6155
+ activeReasoningText = void 0;
6156
+ } else if (part.reasoningType === "redacted") {
6157
+ result.push({ type: "redacted", data: part.data });
6158
+ }
6068
6159
  }
6069
- return reasoning;
6160
+ return result;
6070
6161
  }
6071
6162
  function asFiles(files) {
6072
6163
  var _a17;
@@ -6254,18 +6345,18 @@ function smoothStream({
6254
6345
  let buffer = "";
6255
6346
  return new TransformStream({
6256
6347
  async transform(chunk, controller) {
6257
- if (chunk.type !== "text-delta") {
6348
+ if (chunk.type !== "text") {
6258
6349
  if (buffer.length > 0) {
6259
- controller.enqueue({ type: "text-delta", textDelta: buffer });
6350
+ controller.enqueue({ type: "text", text: buffer });
6260
6351
  buffer = "";
6261
6352
  }
6262
6353
  controller.enqueue(chunk);
6263
6354
  return;
6264
6355
  }
6265
- buffer += chunk.textDelta;
6356
+ buffer += chunk.text;
6266
6357
  let match;
6267
6358
  while ((match = detectChunk(buffer)) != null) {
6268
- controller.enqueue({ type: "text-delta", textDelta: match });
6359
+ controller.enqueue({ type: "text", text: match });
6269
6360
  buffer = buffer.slice(match.length);
6270
6361
  await delay2(delayInMs);
6271
6362
  }
@@ -6424,10 +6515,8 @@ function runToolsTransformation({
6424
6515
  async transform(chunk, controller) {
6425
6516
  const chunkType = chunk.type;
6426
6517
  switch (chunkType) {
6427
- case "text-delta":
6518
+ case "text":
6428
6519
  case "reasoning":
6429
- case "reasoning-signature":
6430
- case "redacted-reasoning":
6431
6520
  case "source":
6432
6521
  case "response-metadata":
6433
6522
  case "error": {
@@ -6435,12 +6524,13 @@ function runToolsTransformation({
6435
6524
  break;
6436
6525
  }
6437
6526
  case "file": {
6438
- controller.enqueue(
6439
- new DefaultGeneratedFileWithType({
6527
+ controller.enqueue({
6528
+ type: "file",
6529
+ file: new DefaultGeneratedFileWithType({
6440
6530
  data: chunk.data,
6441
6531
  mediaType: chunk.mediaType
6442
6532
  })
6443
- );
6533
+ });
6444
6534
  break;
6445
6535
  }
6446
6536
  case "tool-call-delta": {
@@ -6679,7 +6769,7 @@ function createOutputTransformStream(output) {
6679
6769
  partialOutput = void 0
6680
6770
  }) {
6681
6771
  controller.enqueue({
6682
- part: { type: "text-delta", textDelta: textChunk },
6772
+ part: { type: "text", text: textChunk },
6683
6773
  partialOutput
6684
6774
  });
6685
6775
  textChunk = "";
@@ -6689,12 +6779,12 @@ function createOutputTransformStream(output) {
6689
6779
  if (chunk.type === "step-finish") {
6690
6780
  publishTextChunk({ controller });
6691
6781
  }
6692
- if (chunk.type !== "text-delta") {
6782
+ if (chunk.type !== "text") {
6693
6783
  controller.enqueue({ part: chunk, partialOutput: void 0 });
6694
6784
  return;
6695
6785
  }
6696
- text2 += chunk.textDelta;
6697
- textChunk += chunk.textDelta;
6786
+ text2 += chunk.text;
6787
+ textChunk += chunk.text;
6698
6788
  const result = output.parsePartial({ text: text2 });
6699
6789
  if (result != null) {
6700
6790
  const currentJson = JSON.stringify(result.partial);
@@ -6789,44 +6879,44 @@ var DefaultStreamTextResult = class {
6789
6879
  async transform(chunk, controller) {
6790
6880
  controller.enqueue(chunk);
6791
6881
  const { part } = chunk;
6792
- if (part.type === "text-delta" || part.type === "reasoning" || part.type === "source" || part.type === "tool-call" || part.type === "tool-result" || part.type === "tool-call-streaming-start" || part.type === "tool-call-delta") {
6882
+ if (part.type === "text" || part.type === "reasoning" || part.type === "source" || part.type === "tool-call" || part.type === "tool-result" || part.type === "tool-call-streaming-start" || part.type === "tool-call-delta") {
6793
6883
  await (onChunk == null ? void 0 : onChunk({ chunk: part }));
6794
6884
  }
6795
6885
  if (part.type === "error") {
6796
6886
  await (onError == null ? void 0 : onError({ error: part.error }));
6797
6887
  }
6798
- if (part.type === "text-delta") {
6799
- recordedStepText += part.textDelta;
6800
- recordedContinuationText += part.textDelta;
6801
- recordedFullText += part.textDelta;
6888
+ if (part.type === "text") {
6889
+ recordedStepText += part.text;
6890
+ recordedContinuationText += part.text;
6891
+ recordedFullText += part.text;
6802
6892
  }
6803
6893
  if (part.type === "reasoning") {
6804
- if (activeReasoningText == null) {
6805
- activeReasoningText = { type: "text", text: part.textDelta };
6806
- stepReasoning.push(activeReasoningText);
6807
- } else {
6808
- activeReasoningText.text += part.textDelta;
6809
- }
6810
- }
6811
- if (part.type === "reasoning-signature") {
6812
- if (activeReasoningText == null) {
6813
- throw new import_provider23.AISDKError({
6814
- name: "InvalidStreamPart",
6815
- message: "reasoning-signature without reasoning"
6816
- });
6894
+ if (part.reasoningType === "text") {
6895
+ if (activeReasoningText == null) {
6896
+ activeReasoningText = { type: "text", text: part.text };
6897
+ stepReasoning.push(activeReasoningText);
6898
+ } else {
6899
+ activeReasoningText.text += part.text;
6900
+ }
6901
+ } else if (part.reasoningType === "signature") {
6902
+ if (activeReasoningText == null) {
6903
+ throw new import_provider23.AISDKError({
6904
+ name: "InvalidStreamPart",
6905
+ message: "reasoning-signature without reasoning"
6906
+ });
6907
+ }
6908
+ activeReasoningText.signature = part.signature;
6909
+ activeReasoningText = void 0;
6910
+ } else if (part.reasoningType === "redacted") {
6911
+ stepReasoning.push({ type: "redacted", data: part.data });
6817
6912
  }
6818
- activeReasoningText.signature = part.signature;
6819
- activeReasoningText = void 0;
6820
- }
6821
- if (part.type === "redacted-reasoning") {
6822
- stepReasoning.push({ type: "redacted", data: part.data });
6823
6913
  }
6824
6914
  if (part.type === "file") {
6825
- stepFiles.push(part);
6915
+ stepFiles.push(part.file);
6826
6916
  }
6827
6917
  if (part.type === "source") {
6828
- recordedSources.push(part.source);
6829
- recordedStepSources.push(part.source);
6918
+ recordedSources.push(part);
6919
+ recordedStepSources.push(part);
6830
6920
  }
6831
6921
  if (part.type === "tool-call") {
6832
6922
  recordedToolCalls.push(part);
@@ -7160,10 +7250,10 @@ var DefaultStreamTextResult = class {
7160
7250
  chunk
7161
7251
  }) {
7162
7252
  controller.enqueue(chunk);
7163
- stepText += chunk.textDelta;
7164
- fullStepText += chunk.textDelta;
7253
+ stepText += chunk.text;
7254
+ fullStepText += chunk.text;
7165
7255
  chunkTextPublished = true;
7166
- hasWhitespaceSuffix = chunk.textDelta.trimEnd() !== chunk.textDelta;
7256
+ hasWhitespaceSuffix = chunk.text.trimEnd() !== chunk.text;
7167
7257
  }
7168
7258
  self.addStream(
7169
7259
  transformedStream.pipeThrough(
@@ -7186,14 +7276,14 @@ var DefaultStreamTextResult = class {
7186
7276
  warnings: warnings != null ? warnings : []
7187
7277
  });
7188
7278
  }
7189
- if (chunk.type === "text-delta" && chunk.textDelta.length === 0) {
7279
+ if (chunk.type === "text" && chunk.text.length === 0) {
7190
7280
  return;
7191
7281
  }
7192
7282
  const chunkType = chunk.type;
7193
7283
  switch (chunkType) {
7194
- case "text-delta": {
7284
+ case "text": {
7195
7285
  if (continueSteps) {
7196
- const trimmedChunkText = inWhitespacePrefix && hasLeadingWhitespace ? chunk.textDelta.trimStart() : chunk.textDelta;
7286
+ const trimmedChunkText = inWhitespacePrefix && hasLeadingWhitespace ? chunk.text.trimStart() : chunk.text;
7197
7287
  if (trimmedChunkText.length === 0) {
7198
7288
  break;
7199
7289
  }
@@ -7205,8 +7295,8 @@ var DefaultStreamTextResult = class {
7205
7295
  await publishTextChunk({
7206
7296
  controller,
7207
7297
  chunk: {
7208
- type: "text-delta",
7209
- textDelta: split.prefix + split.whitespace
7298
+ type: "text",
7299
+ text: split.prefix + split.whitespace
7210
7300
  }
7211
7301
  });
7212
7302
  }
@@ -7217,35 +7307,31 @@ var DefaultStreamTextResult = class {
7217
7307
  }
7218
7308
  case "reasoning": {
7219
7309
  controller.enqueue(chunk);
7220
- if (activeReasoningText2 == null) {
7221
- activeReasoningText2 = {
7222
- type: "text",
7223
- text: chunk.textDelta
7224
- };
7225
- stepReasoning2.push(activeReasoningText2);
7226
- } else {
7227
- activeReasoningText2.text += chunk.textDelta;
7228
- }
7229
- break;
7230
- }
7231
- case "reasoning-signature": {
7232
- controller.enqueue(chunk);
7233
- if (activeReasoningText2 == null) {
7234
- throw new InvalidStreamPartError({
7235
- chunk,
7236
- message: "reasoning-signature without reasoning"
7310
+ if (chunk.reasoningType === "text") {
7311
+ if (activeReasoningText2 == null) {
7312
+ activeReasoningText2 = {
7313
+ type: "text",
7314
+ text: chunk.text
7315
+ };
7316
+ stepReasoning2.push(activeReasoningText2);
7317
+ } else {
7318
+ activeReasoningText2.text += chunk.text;
7319
+ }
7320
+ } else if (chunk.reasoningType === "signature") {
7321
+ if (activeReasoningText2 == null) {
7322
+ throw new InvalidStreamPartError({
7323
+ chunk,
7324
+ message: "reasoning-signature without reasoning"
7325
+ });
7326
+ }
7327
+ activeReasoningText2.signature = chunk.signature;
7328
+ activeReasoningText2 = void 0;
7329
+ } else if (chunk.reasoningType === "redacted") {
7330
+ stepReasoning2.push({
7331
+ type: "redacted",
7332
+ data: chunk.data
7237
7333
  });
7238
7334
  }
7239
- activeReasoningText2.signature = chunk.signature;
7240
- activeReasoningText2 = void 0;
7241
- break;
7242
- }
7243
- case "redacted-reasoning": {
7244
- controller.enqueue(chunk);
7245
- stepReasoning2.push({
7246
- type: "redacted",
7247
- data: chunk.data
7248
- });
7249
7335
  break;
7250
7336
  }
7251
7337
  case "tool-call": {
@@ -7280,7 +7366,7 @@ var DefaultStreamTextResult = class {
7280
7366
  break;
7281
7367
  }
7282
7368
  case "file": {
7283
- stepFiles2.push(chunk);
7369
+ stepFiles2.push(chunk.file);
7284
7370
  controller.enqueue(chunk);
7285
7371
  break;
7286
7372
  }
@@ -7321,10 +7407,7 @@ var DefaultStreamTextResult = class {
7321
7407
  stepType2 === "continue" && !chunkTextPublished)) {
7322
7408
  await publishTextChunk({
7323
7409
  controller,
7324
- chunk: {
7325
- type: "text-delta",
7326
- textDelta: chunkBuffer
7327
- }
7410
+ chunk: { type: "text", text: chunkBuffer }
7328
7411
  });
7329
7412
  chunkBuffer = "";
7330
7413
  }
@@ -7514,8 +7597,8 @@ var DefaultStreamTextResult = class {
7514
7597
  this.teeStream().pipeThrough(
7515
7598
  new TransformStream({
7516
7599
  transform({ part }, controller) {
7517
- if (part.type === "text-delta") {
7518
- controller.enqueue(part.textDelta);
7600
+ if (part.type === "text") {
7601
+ controller.enqueue(part.text);
7519
7602
  }
7520
7603
  }
7521
7604
  })
@@ -7573,52 +7656,45 @@ var DefaultStreamTextResult = class {
7573
7656
  transform: async (chunk, controller) => {
7574
7657
  const chunkType = chunk.type;
7575
7658
  switch (chunkType) {
7576
- case "text-delta": {
7577
- controller.enqueue(formatDataStreamPart("text", chunk.textDelta));
7659
+ case "text": {
7660
+ controller.enqueue(formatDataStreamPart("text", chunk.text));
7578
7661
  break;
7579
7662
  }
7580
7663
  case "reasoning": {
7581
7664
  if (sendReasoning) {
7582
- controller.enqueue(
7583
- formatDataStreamPart("reasoning", chunk.textDelta)
7584
- );
7585
- }
7586
- break;
7587
- }
7588
- case "redacted-reasoning": {
7589
- if (sendReasoning) {
7590
- controller.enqueue(
7591
- formatDataStreamPart("redacted_reasoning", {
7592
- data: chunk.data
7593
- })
7594
- );
7595
- }
7596
- break;
7597
- }
7598
- case "reasoning-signature": {
7599
- if (sendReasoning) {
7600
- controller.enqueue(
7601
- formatDataStreamPart("reasoning_signature", {
7602
- signature: chunk.signature
7603
- })
7604
- );
7665
+ if (chunk.reasoningType === "text") {
7666
+ controller.enqueue(
7667
+ formatDataStreamPart("reasoning", chunk.text)
7668
+ );
7669
+ } else if (chunk.reasoningType === "signature") {
7670
+ controller.enqueue(
7671
+ formatDataStreamPart("reasoning_signature", {
7672
+ signature: chunk.signature
7673
+ })
7674
+ );
7675
+ } else if (chunk.reasoningType === "redacted") {
7676
+ controller.enqueue(
7677
+ formatDataStreamPart("redacted_reasoning", {
7678
+ data: chunk.data
7679
+ })
7680
+ );
7681
+ }
7605
7682
  }
7606
7683
  break;
7607
7684
  }
7608
7685
  case "file": {
7609
7686
  controller.enqueue(
7687
+ // TODO update protocol to v2 or replace with event stream
7610
7688
  formatDataStreamPart("file", {
7611
- mimeType: chunk.mediaType,
7612
- data: chunk.base64
7689
+ mimeType: chunk.file.mediaType,
7690
+ data: chunk.file.base64
7613
7691
  })
7614
7692
  );
7615
7693
  break;
7616
7694
  }
7617
7695
  case "source": {
7618
7696
  if (sendSources) {
7619
- controller.enqueue(
7620
- formatDataStreamPart("source", chunk.source)
7621
- );
7697
+ controller.enqueue(formatDataStreamPart("source", chunk));
7622
7698
  }
7623
7699
  break;
7624
7700
  }
@@ -7963,15 +8039,15 @@ function extractReasoningMiddleware({
7963
8039
  wrapGenerate: async ({ doGenerate }) => {
7964
8040
  const { text: rawText, ...rest } = await doGenerate();
7965
8041
  if (rawText == null) {
7966
- return { text: rawText, ...rest };
8042
+ return { text: void 0, ...rest };
7967
8043
  }
7968
- const text2 = startWithReasoning ? openingTag + rawText : rawText;
8044
+ const text2 = startWithReasoning ? openingTag + rawText.text : rawText.text;
7969
8045
  const regexp = new RegExp(`${openingTag}(.*?)${closingTag}`, "gs");
7970
8046
  const matches = Array.from(text2.matchAll(regexp));
7971
8047
  if (!matches.length) {
7972
- return { text: text2, ...rest };
8048
+ return { text: { type: "text", text: text2 }, ...rest };
7973
8049
  }
7974
- const reasoning = matches.map((match) => match[1]).join(separator);
8050
+ const reasoningText = matches.map((match) => match[1]).join(separator);
7975
8051
  let textWithoutReasoning = text2;
7976
8052
  for (let i = matches.length - 1; i >= 0; i--) {
7977
8053
  const match = matches[i];
@@ -7981,7 +8057,17 @@ function extractReasoningMiddleware({
7981
8057
  );
7982
8058
  textWithoutReasoning = beforeMatch + (beforeMatch.length > 0 && afterMatch.length > 0 ? separator : "") + afterMatch;
7983
8059
  }
7984
- return { ...rest, text: textWithoutReasoning, reasoning };
8060
+ return {
8061
+ ...rest,
8062
+ text: { type: "text", text: textWithoutReasoning },
8063
+ reasoning: reasoningText.length > 0 ? [
8064
+ {
8065
+ type: "reasoning",
8066
+ reasoningType: "text",
8067
+ text: reasoningText
8068
+ }
8069
+ ] : void 0
8070
+ };
7985
8071
  },
7986
8072
  wrapStream: async ({ doStream }) => {
7987
8073
  const { stream, ...rest } = await doStream();
@@ -7994,18 +8080,24 @@ function extractReasoningMiddleware({
7994
8080
  stream: stream.pipeThrough(
7995
8081
  new TransformStream({
7996
8082
  transform: (chunk, controller) => {
7997
- if (chunk.type !== "text-delta") {
8083
+ if (chunk.type !== "text") {
7998
8084
  controller.enqueue(chunk);
7999
8085
  return;
8000
8086
  }
8001
- buffer += chunk.textDelta;
8087
+ buffer += chunk.text;
8002
8088
  function publish(text2) {
8003
8089
  if (text2.length > 0) {
8004
8090
  const prefix = afterSwitch && (isReasoning ? !isFirstReasoning : !isFirstText) ? separator : "";
8005
- controller.enqueue({
8006
- type: isReasoning ? "reasoning" : "text-delta",
8007
- textDelta: prefix + text2
8008
- });
8091
+ controller.enqueue(
8092
+ isReasoning ? {
8093
+ type: "reasoning",
8094
+ reasoningType: "text",
8095
+ text: prefix + text2
8096
+ } : {
8097
+ type: "text",
8098
+ text: prefix + text2
8099
+ }
8100
+ );
8009
8101
  afterSwitch = false;
8010
8102
  if (isReasoning) {
8011
8103
  isFirstReasoning = false;
@@ -8052,43 +8144,12 @@ function simulateStreamingMiddleware() {
8052
8144
  start(controller) {
8053
8145
  controller.enqueue({ type: "response-metadata", ...result.response });
8054
8146
  if (result.reasoning) {
8055
- if (typeof result.reasoning === "string") {
8056
- controller.enqueue({
8057
- type: "reasoning",
8058
- textDelta: result.reasoning
8059
- });
8060
- } else {
8061
- for (const reasoning of result.reasoning) {
8062
- switch (reasoning.type) {
8063
- case "text": {
8064
- controller.enqueue({
8065
- type: "reasoning",
8066
- textDelta: reasoning.text
8067
- });
8068
- if (reasoning.signature != null) {
8069
- controller.enqueue({
8070
- type: "reasoning-signature",
8071
- signature: reasoning.signature
8072
- });
8073
- }
8074
- break;
8075
- }
8076
- case "redacted": {
8077
- controller.enqueue({
8078
- type: "redacted-reasoning",
8079
- data: reasoning.data
8080
- });
8081
- break;
8082
- }
8083
- }
8084
- }
8147
+ for (const reasoningPart of result.reasoning) {
8148
+ controller.enqueue(reasoningPart);
8085
8149
  }
8086
8150
  }
8087
8151
  if (result.text) {
8088
- controller.enqueue({
8089
- type: "text-delta",
8090
- textDelta: result.text
8091
- });
8152
+ controller.enqueue(result.text);
8092
8153
  }
8093
8154
  if (result.toolCalls) {
8094
8155
  for (const toolCall of result.toolCalls) {
@@ -8099,10 +8160,7 @@ function simulateStreamingMiddleware() {
8099
8160
  toolName: toolCall.toolName,
8100
8161
  argsTextDelta: toolCall.args
8101
8162
  });
8102
- controller.enqueue({
8103
- type: "tool-call",
8104
- ...toolCall
8105
- });
8163
+ controller.enqueue(toolCall);
8106
8164
  }
8107
8165
  }
8108
8166
  controller.enqueue({
@@ -8477,7 +8535,7 @@ var DefaultProviderRegistry = class {
8477
8535
  message: `Invalid ${modelType} id for registry: ${id} (must be in the format "providerId${this.separator}modelId")`
8478
8536
  });
8479
8537
  }
8480
- return [id.slice(0, index), id.slice(index + 1)];
8538
+ return [id.slice(0, index), id.slice(index + this.separator.length)];
8481
8539
  }
8482
8540
  languageModel(id) {
8483
8541
  var _a17, _b;
@@ -9069,7 +9127,7 @@ var MCPClient = class {
9069
9127
  };
9070
9128
 
9071
9129
  // core/util/cosine-similarity.ts
9072
- function cosineSimilarity(vector1, vector2, options) {
9130
+ function cosineSimilarity(vector1, vector2) {
9073
9131
  if (vector1.length !== vector2.length) {
9074
9132
  throw new InvalidArgumentError({
9075
9133
  parameter: "vector1,vector2",
@@ -9079,13 +9137,6 @@ function cosineSimilarity(vector1, vector2, options) {
9079
9137
  }
9080
9138
  const n = vector1.length;
9081
9139
  if (n === 0) {
9082
- if (options == null ? void 0 : options.throwErrorForEmptyVectors) {
9083
- throw new InvalidArgumentError({
9084
- parameter: "vector1",
9085
- value: vector1,
9086
- message: "Vectors cannot be empty"
9087
- });
9088
- }
9089
9140
  return 0;
9090
9141
  }
9091
9142
  let magnitudeSquared1 = 0;