ai 5.0.0-alpha.2 → 5.0.0-alpha.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -47,6 +47,7 @@ __export(src_exports, {
47
47
  NoSuchToolError: () => NoSuchToolError,
48
48
  Output: () => output_exports,
49
49
  RetryError: () => RetryError,
50
+ TextStreamChatTransport: () => TextStreamChatTransport,
50
51
  ToolCallRepairError: () => ToolCallRepairError,
51
52
  ToolExecutionError: () => ToolExecutionError,
52
53
  TypeValidationError: () => import_provider16.TypeValidationError,
@@ -92,7 +93,6 @@ __export(src_exports, {
92
93
  isAssistantMessageWithCompletedToolCalls: () => isAssistantMessageWithCompletedToolCalls,
93
94
  isDeepEqualData: () => isDeepEqualData,
94
95
  jsonSchema: () => import_provider_utils26.jsonSchema,
95
- maxSteps: () => maxSteps,
96
96
  modelMessageSchema: () => modelMessageSchema,
97
97
  parsePartialJson: () => parsePartialJson,
98
98
  pipeTextStreamToResponse: () => pipeTextStreamToResponse,
@@ -101,6 +101,7 @@ __export(src_exports, {
101
101
  simulateReadableStream: () => simulateReadableStream,
102
102
  simulateStreamingMiddleware: () => simulateStreamingMiddleware,
103
103
  smoothStream: () => smoothStream,
104
+ stepCountIs: () => stepCountIs,
104
105
  streamObject: () => streamObject,
105
106
  streamText: () => streamText,
106
107
  systemModelMessageSchema: () => systemModelMessageSchema,
@@ -591,9 +592,8 @@ var uiMessageStreamPartSchema = import_zod.z.union([
591
592
  providerMetadata: import_zod.z.record(import_zod.z.any()).optional()
592
593
  }),
593
594
  import_zod.z.object({
594
- type: import_zod.z.literal("source"),
595
- sourceType: import_zod.z.literal("url"),
596
- id: import_zod.z.string(),
595
+ type: import_zod.z.literal("source-url"),
596
+ sourceId: import_zod.z.string(),
597
597
  url: import_zod.z.string(),
598
598
  title: import_zod.z.string().optional(),
599
599
  providerMetadata: import_zod.z.any().optional()
@@ -1147,16 +1147,13 @@ function processUIMessageStream({
1147
1147
  write();
1148
1148
  break;
1149
1149
  }
1150
- case "source": {
1150
+ case "source-url": {
1151
1151
  state.message.parts.push({
1152
- type: "source",
1153
- source: {
1154
- sourceType: "url",
1155
- id: part.id,
1156
- url: part.url,
1157
- title: part.title,
1158
- providerMetadata: part.providerMetadata
1159
- }
1152
+ type: "source-url",
1153
+ sourceId: part.sourceId,
1154
+ url: part.url,
1155
+ title: part.title,
1156
+ providerMetadata: part.providerMetadata
1160
1157
  });
1161
1158
  write();
1162
1159
  break;
@@ -1339,7 +1336,6 @@ var getOriginalFetch = () => fetch;
1339
1336
  async function fetchUIMessageStream({
1340
1337
  api,
1341
1338
  body,
1342
- streamProtocol = "ui-message",
1343
1339
  credentials,
1344
1340
  headers,
1345
1341
  abortController,
@@ -1373,9 +1369,7 @@ async function fetchUIMessageStream({
1373
1369
  if (!response.body) {
1374
1370
  throw new Error("The response body is empty.");
1375
1371
  }
1376
- return streamProtocol === "text" ? transformTextToUiMessageStream({
1377
- stream: response.body.pipeThrough(new TextDecoderStream())
1378
- }) : (0, import_provider_utils3.parseJsonEventStream)({
1372
+ return (0, import_provider_utils3.parseJsonEventStream)({
1379
1373
  stream: response.body,
1380
1374
  schema: uiMessageStreamPartSchema
1381
1375
  }).pipeThrough(
@@ -1389,6 +1383,46 @@ async function fetchUIMessageStream({
1389
1383
  })
1390
1384
  );
1391
1385
  }
1386
+ async function fetchTextStream({
1387
+ api,
1388
+ body,
1389
+ credentials,
1390
+ headers,
1391
+ abortController,
1392
+ fetch: fetch2 = getOriginalFetch(),
1393
+ requestType = "generate"
1394
+ }) {
1395
+ var _a17, _b, _c;
1396
+ const response = requestType === "resume" ? await fetch2(`${api}?chatId=${body.chatId}`, {
1397
+ method: "GET",
1398
+ headers: {
1399
+ "Content-Type": "application/json",
1400
+ ...headers
1401
+ },
1402
+ signal: (_a17 = abortController == null ? void 0 : abortController()) == null ? void 0 : _a17.signal,
1403
+ credentials
1404
+ }) : await fetch2(api, {
1405
+ method: "POST",
1406
+ body: JSON.stringify(body),
1407
+ headers: {
1408
+ "Content-Type": "application/json",
1409
+ ...headers
1410
+ },
1411
+ signal: (_b = abortController == null ? void 0 : abortController()) == null ? void 0 : _b.signal,
1412
+ credentials
1413
+ });
1414
+ if (!response.ok) {
1415
+ throw new Error(
1416
+ (_c = await response.text()) != null ? _c : "Failed to fetch the chat response."
1417
+ );
1418
+ }
1419
+ if (!response.body) {
1420
+ throw new Error("The response body is empty.");
1421
+ }
1422
+ return transformTextToUiMessageStream({
1423
+ stream: response.body.pipeThrough(new TextDecoderStream())
1424
+ });
1425
+ }
1392
1426
  async function consumeUIMessageStream({
1393
1427
  stream,
1394
1428
  onUpdate,
@@ -1439,10 +1473,17 @@ async function callChatApi({
1439
1473
  requestType = "generate",
1440
1474
  messageMetadataSchema
1441
1475
  }) {
1442
- const stream = await fetchUIMessageStream({
1476
+ const stream = streamProtocol === "text" ? await fetchTextStream({
1477
+ api,
1478
+ body,
1479
+ credentials,
1480
+ headers,
1481
+ abortController,
1482
+ fetch: fetch2,
1483
+ requestType
1484
+ }) : await fetchUIMessageStream({
1443
1485
  api,
1444
1486
  body,
1445
- streamProtocol,
1446
1487
  credentials,
1447
1488
  headers,
1448
1489
  abortController,
@@ -1629,18 +1670,18 @@ var SerialJobExecutor = class {
1629
1670
  function shouldResubmitMessages({
1630
1671
  originalMaxToolInvocationStep,
1631
1672
  originalMessageCount,
1632
- maxSteps: maxSteps2,
1673
+ maxSteps,
1633
1674
  messages
1634
1675
  }) {
1635
1676
  var _a17;
1636
1677
  const lastMessage = messages[messages.length - 1];
1637
1678
  return (
1638
1679
  // check if the feature is enabled:
1639
- maxSteps2 > 1 && // ensure there is a last message:
1680
+ maxSteps > 1 && // ensure there is a last message:
1640
1681
  lastMessage != null && // ensure we actually have new steps (to prevent infinite loops in case of errors):
1641
1682
  (messages.length > originalMessageCount || extractMaxToolInvocationStep(getToolInvocations(lastMessage)) !== originalMaxToolInvocationStep) && // check that next step is possible:
1642
1683
  isAssistantMessageWithCompletedToolCalls(lastMessage) && // limit the number of automatic steps:
1643
- ((_a17 = extractMaxToolInvocationStep(getToolInvocations(lastMessage))) != null ? _a17 : 0) < maxSteps2
1684
+ ((_a17 = extractMaxToolInvocationStep(getToolInvocations(lastMessage))) != null ? _a17 : 0) < maxSteps
1644
1685
  );
1645
1686
  }
1646
1687
  function isAssistantMessageWithCompletedToolCalls(message) {
@@ -1680,7 +1721,7 @@ var ChatStore = class {
1680
1721
  chats = {},
1681
1722
  generateId: generateId3,
1682
1723
  transport,
1683
- maxSteps: maxSteps2 = 1,
1724
+ maxSteps = 1,
1684
1725
  messageMetadataSchema,
1685
1726
  dataPartSchemas
1686
1727
  }) {
@@ -1696,7 +1737,7 @@ var ChatStore = class {
1696
1737
  }
1697
1738
  ])
1698
1739
  );
1699
- this.maxSteps = maxSteps2;
1740
+ this.maxSteps = maxSteps;
1700
1741
  this.transport = transport;
1701
1742
  this.subscribers = /* @__PURE__ */ new Set();
1702
1743
  this.generateId = generateId3 != null ? generateId3 : import_provider_utils5.generateId;
@@ -1994,7 +2035,6 @@ var DefaultChatTransport = class {
1994
2035
  credentials,
1995
2036
  headers,
1996
2037
  body,
1997
- streamProtocol,
1998
2038
  fetch: fetch2,
1999
2039
  prepareRequestBody
2000
2040
  }) {
@@ -2002,7 +2042,6 @@ var DefaultChatTransport = class {
2002
2042
  this.credentials = credentials;
2003
2043
  this.headers = headers;
2004
2044
  this.body = body;
2005
- this.streamProtocol = streamProtocol;
2006
2045
  this.fetch = fetch2;
2007
2046
  this.prepareRequestBody = prepareRequestBody;
2008
2047
  }
@@ -2032,7 +2071,55 @@ var DefaultChatTransport = class {
2032
2071
  ...this.body,
2033
2072
  ...body
2034
2073
  },
2035
- streamProtocol: this.streamProtocol,
2074
+ credentials: this.credentials,
2075
+ abortController: () => abortController,
2076
+ fetch: this.fetch,
2077
+ requestType
2078
+ });
2079
+ }
2080
+ };
2081
+ var TextStreamChatTransport = class {
2082
+ constructor({
2083
+ api,
2084
+ credentials,
2085
+ headers,
2086
+ body,
2087
+ fetch: fetch2,
2088
+ prepareRequestBody
2089
+ }) {
2090
+ this.api = api;
2091
+ this.credentials = credentials;
2092
+ this.headers = headers;
2093
+ this.body = body;
2094
+ this.fetch = fetch2;
2095
+ this.prepareRequestBody = prepareRequestBody;
2096
+ }
2097
+ submitMessages({
2098
+ chatId,
2099
+ messages,
2100
+ abortController,
2101
+ body,
2102
+ headers,
2103
+ requestType
2104
+ }) {
2105
+ var _a17, _b;
2106
+ return fetchTextStream({
2107
+ api: this.api,
2108
+ headers: {
2109
+ ...this.headers,
2110
+ ...headers
2111
+ },
2112
+ body: (_b = (_a17 = this.prepareRequestBody) == null ? void 0 : _a17.call(this, {
2113
+ chatId,
2114
+ messages,
2115
+ ...this.body,
2116
+ ...body
2117
+ })) != null ? _b : {
2118
+ chatId,
2119
+ messages,
2120
+ ...this.body,
2121
+ ...body
2122
+ },
2036
2123
  credentials: this.credentials,
2037
2124
  abortController: () => abortController,
2038
2125
  fetch: this.fetch,
@@ -2232,7 +2319,6 @@ var import_provider_utils6 = require("@ai-sdk/provider-utils");
2232
2319
  function defaultChatStore({
2233
2320
  api,
2234
2321
  fetch: fetch2,
2235
- streamProtocol = "ui-message",
2236
2322
  credentials,
2237
2323
  headers,
2238
2324
  body,
@@ -2240,14 +2326,13 @@ function defaultChatStore({
2240
2326
  generateId: generateId3 = import_provider_utils6.generateId,
2241
2327
  dataPartSchemas,
2242
2328
  messageMetadataSchema,
2243
- maxSteps: maxSteps2 = 1,
2329
+ maxSteps = 1,
2244
2330
  chats
2245
2331
  }) {
2246
2332
  return new ChatStore({
2247
2333
  transport: new DefaultChatTransport({
2248
2334
  api,
2249
2335
  fetch: fetch2,
2250
- streamProtocol,
2251
2336
  credentials,
2252
2337
  headers,
2253
2338
  body,
@@ -2256,7 +2341,7 @@ function defaultChatStore({
2256
2341
  generateId: generateId3,
2257
2342
  messageMetadataSchema,
2258
2343
  dataPartSchemas,
2259
- maxSteps: maxSteps2,
2344
+ maxSteps,
2260
2345
  chats
2261
2346
  });
2262
2347
  }
@@ -4745,11 +4830,11 @@ var DelayedPromise = class {
4745
4830
  this._resolve = void 0;
4746
4831
  this._reject = void 0;
4747
4832
  }
4748
- get value() {
4749
- if (this.promise) {
4750
- return this.promise;
4833
+ get promise() {
4834
+ if (this._promise) {
4835
+ return this._promise;
4751
4836
  }
4752
- this.promise = new Promise((resolve, reject) => {
4837
+ this._promise = new Promise((resolve, reject) => {
4753
4838
  if (this.status.type === "resolved") {
4754
4839
  resolve(this.status.value);
4755
4840
  } else if (this.status.type === "rejected") {
@@ -4758,19 +4843,19 @@ var DelayedPromise = class {
4758
4843
  this._resolve = resolve;
4759
4844
  this._reject = reject;
4760
4845
  });
4761
- return this.promise;
4846
+ return this._promise;
4762
4847
  }
4763
4848
  resolve(value) {
4764
4849
  var _a17;
4765
4850
  this.status = { type: "resolved", value };
4766
- if (this.promise) {
4851
+ if (this._promise) {
4767
4852
  (_a17 = this._resolve) == null ? void 0 : _a17.call(this, value);
4768
4853
  }
4769
4854
  }
4770
4855
  reject(error) {
4771
4856
  var _a17;
4772
4857
  this.status = { type: "rejected", error };
4773
- if (this.promise) {
4858
+ if (this._promise) {
4774
4859
  (_a17 = this._reject) == null ? void 0 : _a17.call(this, error);
4775
4860
  }
4776
4861
  }
@@ -4865,12 +4950,12 @@ var DefaultStreamObjectResult = class {
4865
4950
  currentDate,
4866
4951
  now: now2
4867
4952
  }) {
4868
- this.objectPromise = new DelayedPromise();
4869
- this.usagePromise = new DelayedPromise();
4870
- this.providerMetadataPromise = new DelayedPromise();
4871
- this.warningsPromise = new DelayedPromise();
4872
- this.requestPromise = new DelayedPromise();
4873
- this.responsePromise = new DelayedPromise();
4953
+ this._object = new DelayedPromise();
4954
+ this._usage = new DelayedPromise();
4955
+ this._providerMetadata = new DelayedPromise();
4956
+ this._warnings = new DelayedPromise();
4957
+ this._request = new DelayedPromise();
4958
+ this._response = new DelayedPromise();
4874
4959
  const { maxRetries, retry } = prepareRetries({
4875
4960
  maxRetries: maxRetriesArg
4876
4961
  });
@@ -4989,7 +5074,7 @@ var DefaultStreamObjectResult = class {
4989
5074
  })
4990
5075
  })
4991
5076
  );
4992
- self.requestPromise.resolve(request != null ? request : {});
5077
+ self._request.resolve(request != null ? request : {});
4993
5078
  let warnings;
4994
5079
  let usage = {
4995
5080
  inputTokens: void 0,
@@ -5082,9 +5167,9 @@ var DefaultStreamObjectResult = class {
5082
5167
  usage,
5083
5168
  response: fullResponse
5084
5169
  });
5085
- self.usagePromise.resolve(usage);
5086
- self.providerMetadataPromise.resolve(providerMetadata);
5087
- self.responsePromise.resolve({
5170
+ self._usage.resolve(usage);
5171
+ self._providerMetadata.resolve(providerMetadata);
5172
+ self._response.resolve({
5088
5173
  ...fullResponse,
5089
5174
  headers: response == null ? void 0 : response.headers
5090
5175
  });
@@ -5098,7 +5183,7 @@ var DefaultStreamObjectResult = class {
5098
5183
  );
5099
5184
  if (validationResult.success) {
5100
5185
  object2 = validationResult.value;
5101
- self.objectPromise.resolve(object2);
5186
+ self._object.resolve(object2);
5102
5187
  } else {
5103
5188
  error = new NoObjectGeneratedError({
5104
5189
  message: "No object generated: response did not match schema.",
@@ -5108,7 +5193,7 @@ var DefaultStreamObjectResult = class {
5108
5193
  usage,
5109
5194
  finishReason
5110
5195
  });
5111
- self.objectPromise.reject(error);
5196
+ self._object.reject(error);
5112
5197
  }
5113
5198
  break;
5114
5199
  }
@@ -5203,22 +5288,22 @@ var DefaultStreamObjectResult = class {
5203
5288
  this.outputStrategy = outputStrategy;
5204
5289
  }
5205
5290
  get object() {
5206
- return this.objectPromise.value;
5291
+ return this._object.promise;
5207
5292
  }
5208
5293
  get usage() {
5209
- return this.usagePromise.value;
5294
+ return this._usage.promise;
5210
5295
  }
5211
5296
  get providerMetadata() {
5212
- return this.providerMetadataPromise.value;
5297
+ return this._providerMetadata.promise;
5213
5298
  }
5214
5299
  get warnings() {
5215
- return this.warningsPromise.value;
5300
+ return this._warnings.promise;
5216
5301
  }
5217
5302
  get request() {
5218
- return this.requestPromise.value;
5303
+ return this._request.promise;
5219
5304
  }
5220
5305
  get response() {
5221
- return this.responsePromise.value;
5306
+ return this._response.promise;
5222
5307
  }
5223
5308
  get partialObjectStream() {
5224
5309
  return createAsyncIterableStream(
@@ -5380,6 +5465,11 @@ var DefaultSpeechResult = class {
5380
5465
  // core/generate-text/generate-text.ts
5381
5466
  var import_provider_utils19 = require("@ai-sdk/provider-utils");
5382
5467
 
5468
+ // src/util/as-array.ts
5469
+ function asArray(value) {
5470
+ return value === void 0 ? [] : Array.isArray(value) ? value : [value];
5471
+ }
5472
+
5383
5473
  // core/prompt/prepare-tools-and-tool-choice.ts
5384
5474
  var import_provider_utils17 = require("@ai-sdk/provider-utils");
5385
5475
 
@@ -5597,8 +5687,8 @@ var DefaultStepResult = class {
5597
5687
  };
5598
5688
 
5599
5689
  // core/generate-text/stop-condition.ts
5600
- function maxSteps(maxSteps2) {
5601
- return ({ steps }) => steps.length >= maxSteps2;
5690
+ function stepCountIs(stepCount) {
5691
+ return ({ steps }) => steps.length === stepCount;
5602
5692
  }
5603
5693
  function hasToolCall(toolName) {
5604
5694
  return ({ steps }) => {
@@ -5608,6 +5698,12 @@ function hasToolCall(toolName) {
5608
5698
  )) != null ? _c : false;
5609
5699
  };
5610
5700
  }
5701
+ async function isStopConditionMet({
5702
+ stopConditions,
5703
+ steps
5704
+ }) {
5705
+ return (await Promise.all(stopConditions.map((condition) => condition({ steps })))).some((result) => result);
5706
+ }
5611
5707
 
5612
5708
  // core/generate-text/to-response-messages.ts
5613
5709
  function toResponseMessages({
@@ -5682,12 +5778,14 @@ async function generateText({
5682
5778
  maxRetries: maxRetriesArg,
5683
5779
  abortSignal,
5684
5780
  headers,
5685
- continueUntil = maxSteps(1),
5781
+ stopWhen = stepCountIs(1),
5686
5782
  experimental_output: output,
5687
5783
  experimental_telemetry: telemetry,
5688
5784
  providerOptions,
5689
- experimental_activeTools: activeTools,
5690
- experimental_prepareStep: prepareStep,
5785
+ experimental_activeTools,
5786
+ activeTools = experimental_activeTools,
5787
+ experimental_prepareStep,
5788
+ prepareStep = experimental_prepareStep,
5691
5789
  experimental_repairToolCall: repairToolCall,
5692
5790
  _internal: {
5693
5791
  generateId: generateId3 = originalGenerateId3,
@@ -5696,6 +5794,7 @@ async function generateText({
5696
5794
  onStepFinish,
5697
5795
  ...settings
5698
5796
  }) {
5797
+ const stopConditions = asArray(stopWhen);
5699
5798
  const { maxRetries, retry } = prepareRetries({ maxRetries: maxRetriesArg });
5700
5799
  const callSettings = prepareCallSettings(settings);
5701
5800
  const baseTelemetryAttributes = getBaseTelemetryAttributes({
@@ -5759,7 +5858,7 @@ async function generateText({
5759
5858
  const { toolChoice: stepToolChoice, tools: stepTools } = prepareToolsAndToolChoice({
5760
5859
  tools,
5761
5860
  toolChoice: (_b = prepareStepResult == null ? void 0 : prepareStepResult.toolChoice) != null ? _b : toolChoice,
5762
- activeTools: (_c = prepareStepResult == null ? void 0 : prepareStepResult.experimental_activeTools) != null ? _c : activeTools
5861
+ activeTools: (_c = prepareStepResult == null ? void 0 : prepareStepResult.activeTools) != null ? _c : activeTools
5763
5862
  });
5764
5863
  currentModelResponse = await retry(
5765
5864
  () => {
@@ -5904,8 +6003,8 @@ async function generateText({
5904
6003
  } while (
5905
6004
  // there are tool calls:
5906
6005
  currentToolCalls.length > 0 && // all current tool calls have results:
5907
- currentToolResults.length === currentToolCalls.length && // continue until the stop condition is met:
5908
- !await continueUntil({ steps })
6006
+ currentToolResults.length === currentToolCalls.length && // continue until a stop condition is met:
6007
+ !await isStopConditionMet({ stopConditions, steps })
5909
6008
  );
5910
6009
  span.setAttributes(
5911
6010
  selectTelemetryAttributes({
@@ -6251,11 +6350,6 @@ function smoothStream({
6251
6350
  // core/generate-text/stream-text.ts
6252
6351
  var import_provider_utils23 = require("@ai-sdk/provider-utils");
6253
6352
 
6254
- // src/util/as-array.ts
6255
- function asArray(value) {
6256
- return value === void 0 ? [] : Array.isArray(value) ? value : [value];
6257
- }
6258
-
6259
6353
  // core/generate-text/run-tools-transformation.ts
6260
6354
  var import_provider_utils22 = require("@ai-sdk/provider-utils");
6261
6355
  function runToolsTransformation({
@@ -6466,13 +6560,15 @@ function streamText({
6466
6560
  maxRetries,
6467
6561
  abortSignal,
6468
6562
  headers,
6469
- maxSteps: maxSteps2 = 1,
6563
+ stopWhen = stepCountIs(1),
6470
6564
  experimental_output: output,
6471
6565
  experimental_telemetry: telemetry,
6566
+ prepareStep,
6472
6567
  providerOptions,
6473
6568
  experimental_toolCallStreaming = false,
6474
6569
  toolCallStreaming = experimental_toolCallStreaming,
6475
- experimental_activeTools: activeTools,
6570
+ experimental_activeTools,
6571
+ activeTools = experimental_activeTools,
6476
6572
  experimental_repairToolCall: repairToolCall,
6477
6573
  experimental_transform: transform,
6478
6574
  onChunk,
@@ -6502,9 +6598,10 @@ function streamText({
6502
6598
  transforms: asArray(transform),
6503
6599
  activeTools,
6504
6600
  repairToolCall,
6505
- maxSteps: maxSteps2,
6601
+ stopConditions: asArray(stopWhen),
6506
6602
  output,
6507
6603
  providerOptions,
6604
+ prepareStep,
6508
6605
  onChunk,
6509
6606
  onError,
6510
6607
  onFinish,
@@ -6579,9 +6676,10 @@ var DefaultStreamTextResult = class {
6579
6676
  transforms,
6580
6677
  activeTools,
6581
6678
  repairToolCall,
6582
- maxSteps: maxSteps2,
6679
+ stopConditions,
6583
6680
  output,
6584
6681
  providerOptions,
6682
+ prepareStep,
6585
6683
  now: now2,
6586
6684
  currentDate,
6587
6685
  generateId: generateId3,
@@ -6590,18 +6688,12 @@ var DefaultStreamTextResult = class {
6590
6688
  onFinish,
6591
6689
  onStepFinish
6592
6690
  }) {
6593
- this.totalUsagePromise = new DelayedPromise();
6594
- this.finishReasonPromise = new DelayedPromise();
6595
- this.stepsPromise = new DelayedPromise();
6596
- if (maxSteps2 < 1) {
6597
- throw new InvalidArgumentError({
6598
- parameter: "maxSteps",
6599
- value: maxSteps2,
6600
- message: "maxSteps must be at least 1"
6601
- });
6602
- }
6691
+ this._totalUsage = new DelayedPromise();
6692
+ this._finishReason = new DelayedPromise();
6693
+ this._steps = new DelayedPromise();
6603
6694
  this.output = output;
6604
6695
  this.generateId = generateId3;
6696
+ let stepFinish;
6605
6697
  let activeReasoningPart = void 0;
6606
6698
  let recordedContent = [];
6607
6699
  const recordedResponseMessages = [];
@@ -6683,6 +6775,7 @@ var DefaultStreamTextResult = class {
6683
6775
  recordedContent = [];
6684
6776
  activeReasoningPart = void 0;
6685
6777
  recordedResponseMessages.push(...stepMessages);
6778
+ stepFinish.resolve();
6686
6779
  }
6687
6780
  if (part.type === "finish") {
6688
6781
  recordedTotalUsage = part.totalUsage;
@@ -6700,9 +6793,9 @@ var DefaultStreamTextResult = class {
6700
6793
  outputTokens: void 0,
6701
6794
  totalTokens: void 0
6702
6795
  };
6703
- self.finishReasonPromise.resolve(finishReason);
6704
- self.totalUsagePromise.resolve(totalUsage);
6705
- self.stepsPromise.resolve(recordedSteps);
6796
+ self._finishReason.resolve(finishReason);
6797
+ self._totalUsage.resolve(totalUsage);
6798
+ self._steps.resolve(recordedSteps);
6706
6799
  const finalStep = recordedSteps[recordedSteps.length - 1];
6707
6800
  await (onFinish == null ? void 0 : onFinish({
6708
6801
  finishReason,
@@ -6793,8 +6886,7 @@ var DefaultStreamTextResult = class {
6793
6886
  // specific settings that only make sense on the outer level:
6794
6887
  "ai.prompt": {
6795
6888
  input: () => JSON.stringify({ system, prompt, messages })
6796
- },
6797
- "ai.settings.maxSteps": maxSteps2
6889
+ }
6798
6890
  }
6799
6891
  }),
6800
6892
  tracer,
@@ -6806,6 +6898,8 @@ var DefaultStreamTextResult = class {
6806
6898
  responseMessages,
6807
6899
  usage
6808
6900
  }) {
6901
+ var _a17, _b, _c;
6902
+ stepFinish = new DelayedPromise();
6809
6903
  const initialPrompt = await standardizePrompt({
6810
6904
  system,
6811
6905
  prompt,
@@ -6815,6 +6909,11 @@ var DefaultStreamTextResult = class {
6815
6909
  ...initialPrompt.messages,
6816
6910
  ...responseMessages
6817
6911
  ];
6912
+ const prepareStepResult = await (prepareStep == null ? void 0 : prepareStep({
6913
+ model,
6914
+ steps: recordedSteps,
6915
+ stepNumber: recordedSteps.length
6916
+ }));
6818
6917
  const promptMessages = await convertToLanguageModelPrompt({
6819
6918
  prompt: {
6820
6919
  system: initialPrompt.system,
@@ -6822,9 +6921,12 @@ var DefaultStreamTextResult = class {
6822
6921
  },
6823
6922
  supportedUrls: await model.supportedUrls
6824
6923
  });
6825
- const toolsAndToolChoice = {
6826
- ...prepareToolsAndToolChoice({ tools, toolChoice, activeTools })
6827
- };
6924
+ const stepModel = (_a17 = prepareStepResult == null ? void 0 : prepareStepResult.model) != null ? _a17 : model;
6925
+ const { toolChoice: stepToolChoice, tools: stepTools } = prepareToolsAndToolChoice({
6926
+ tools,
6927
+ toolChoice: (_b = prepareStepResult == null ? void 0 : prepareStepResult.toolChoice) != null ? _b : toolChoice,
6928
+ activeTools: (_c = prepareStepResult == null ? void 0 : prepareStepResult.activeTools) != null ? _c : activeTools
6929
+ });
6828
6930
  const {
6829
6931
  result: { stream: stream2, response, request },
6830
6932
  doStreamSpan,
@@ -6840,24 +6942,23 @@ var DefaultStreamTextResult = class {
6840
6942
  telemetry
6841
6943
  }),
6842
6944
  ...baseTelemetryAttributes,
6945
+ // model:
6946
+ "ai.model.provider": stepModel.provider,
6947
+ "ai.model.id": stepModel.modelId,
6948
+ // prompt:
6843
6949
  "ai.prompt.messages": {
6844
6950
  input: () => JSON.stringify(promptMessages)
6845
6951
  },
6846
6952
  "ai.prompt.tools": {
6847
6953
  // convert the language model level tools:
6848
- input: () => {
6849
- var _a17;
6850
- return (_a17 = toolsAndToolChoice.tools) == null ? void 0 : _a17.map(
6851
- (tool2) => JSON.stringify(tool2)
6852
- );
6853
- }
6954
+ input: () => stepTools == null ? void 0 : stepTools.map((tool2) => JSON.stringify(tool2))
6854
6955
  },
6855
6956
  "ai.prompt.toolChoice": {
6856
- input: () => toolsAndToolChoice.toolChoice != null ? JSON.stringify(toolsAndToolChoice.toolChoice) : void 0
6957
+ input: () => stepToolChoice != null ? JSON.stringify(stepToolChoice) : void 0
6857
6958
  },
6858
6959
  // standardized gen-ai llm span attributes:
6859
- "gen_ai.system": model.provider,
6860
- "gen_ai.request.model": model.modelId,
6960
+ "gen_ai.system": stepModel.provider,
6961
+ "gen_ai.request.model": stepModel.modelId,
6861
6962
  "gen_ai.request.frequency_penalty": callSettings.frequencyPenalty,
6862
6963
  "gen_ai.request.max_tokens": callSettings.maxOutputTokens,
6863
6964
  "gen_ai.request.presence_penalty": callSettings.presencePenalty,
@@ -6874,9 +6975,10 @@ var DefaultStreamTextResult = class {
6874
6975
  startTimestampMs: now2(),
6875
6976
  // get before the call
6876
6977
  doStreamSpan: doStreamSpan2,
6877
- result: await model.doStream({
6978
+ result: await stepModel.doStream({
6878
6979
  ...callSettings,
6879
- ...toolsAndToolChoice,
6980
+ tools: stepTools,
6981
+ toolChoice: stepToolChoice,
6880
6982
  responseFormat: output == null ? void 0 : output.responseFormat,
6881
6983
  prompt: promptMessages,
6882
6984
  providerOptions,
@@ -6887,7 +6989,7 @@ var DefaultStreamTextResult = class {
6887
6989
  }
6888
6990
  })
6889
6991
  );
6890
- const transformedStream = runToolsTransformation({
6992
+ const streamWithToolResults = runToolsTransformation({
6891
6993
  tools,
6892
6994
  generatorStream: stream2,
6893
6995
  toolCallStreaming,
@@ -6926,10 +7028,10 @@ var DefaultStreamTextResult = class {
6926
7028
  stepText += chunk.text;
6927
7029
  }
6928
7030
  self.addStream(
6929
- transformedStream.pipeThrough(
7031
+ streamWithToolResults.pipeThrough(
6930
7032
  new TransformStream({
6931
7033
  async transform(chunk, controller) {
6932
- var _a17, _b, _c, _d;
7034
+ var _a18, _b2, _c2, _d;
6933
7035
  if (chunk.type === "stream-start") {
6934
7036
  warnings = chunk.warnings;
6935
7037
  return;
@@ -6992,9 +7094,9 @@ var DefaultStreamTextResult = class {
6992
7094
  }
6993
7095
  case "response-metadata": {
6994
7096
  stepResponse = {
6995
- id: (_a17 = chunk.id) != null ? _a17 : stepResponse.id,
6996
- timestamp: (_b = chunk.timestamp) != null ? _b : stepResponse.timestamp,
6997
- modelId: (_c = chunk.modelId) != null ? _c : stepResponse.modelId
7097
+ id: (_a18 = chunk.id) != null ? _a18 : stepResponse.id,
7098
+ timestamp: (_b2 = chunk.timestamp) != null ? _b2 : stepResponse.timestamp,
7099
+ modelId: (_c2 = chunk.modelId) != null ? _c2 : stepResponse.modelId
6998
7100
  };
6999
7101
  break;
7000
7102
  }
@@ -7081,9 +7183,13 @@ var DefaultStreamTextResult = class {
7081
7183
  }
7082
7184
  });
7083
7185
  const combinedUsage = addLanguageModelUsage(usage, stepUsage);
7084
- if (currentStep + 1 < maxSteps2 && // there are tool calls:
7085
- stepToolCalls.length > 0 && // all current tool calls have results:
7086
- stepToolResults.length === stepToolCalls.length) {
7186
+ await stepFinish.promise;
7187
+ if (stepToolCalls.length > 0 && // all current tool calls have results:
7188
+ stepToolResults.length === stepToolCalls.length && // continue until a stop condition is met:
7189
+ !await isStopConditionMet({
7190
+ stopConditions,
7191
+ steps: recordedSteps
7192
+ })) {
7087
7193
  responseMessages.push(
7088
7194
  ...toResponseMessages({
7089
7195
  content: stepContent,
@@ -7131,7 +7237,7 @@ var DefaultStreamTextResult = class {
7131
7237
  });
7132
7238
  }
7133
7239
  get steps() {
7134
- return this.stepsPromise.value;
7240
+ return this._steps.promise;
7135
7241
  }
7136
7242
  get finalStep() {
7137
7243
  return this.steps.then((steps) => steps[steps.length - 1]);
@@ -7176,10 +7282,10 @@ var DefaultStreamTextResult = class {
7176
7282
  return this.finalStep.then((step) => step.response);
7177
7283
  }
7178
7284
  get totalUsage() {
7179
- return this.totalUsagePromise.value;
7285
+ return this._totalUsage.promise;
7180
7286
  }
7181
7287
  get finishReason() {
7182
- return this.finishReasonPromise.value;
7288
+ return this._finishReason.promise;
7183
7289
  }
7184
7290
  /**
7185
7291
  Split out a new stream from the original stream.
@@ -7299,9 +7405,8 @@ var DefaultStreamTextResult = class {
7299
7405
  case "source": {
7300
7406
  if (sendSources) {
7301
7407
  controller.enqueue({
7302
- type: "source",
7303
- sourceType: part.sourceType,
7304
- id: part.id,
7408
+ type: "source-url",
7409
+ sourceId: part.id,
7305
7410
  url: part.url,
7306
7411
  title: part.title,
7307
7412
  providerMetadata: part.providerMetadata
@@ -8531,6 +8636,7 @@ var DefaultTranscriptionResult = class {
8531
8636
  NoSuchToolError,
8532
8637
  Output,
8533
8638
  RetryError,
8639
+ TextStreamChatTransport,
8534
8640
  ToolCallRepairError,
8535
8641
  ToolExecutionError,
8536
8642
  TypeValidationError,
@@ -8576,7 +8682,6 @@ var DefaultTranscriptionResult = class {
8576
8682
  isAssistantMessageWithCompletedToolCalls,
8577
8683
  isDeepEqualData,
8578
8684
  jsonSchema,
8579
- maxSteps,
8580
8685
  modelMessageSchema,
8581
8686
  parsePartialJson,
8582
8687
  pipeTextStreamToResponse,
@@ -8585,6 +8690,7 @@ var DefaultTranscriptionResult = class {
8585
8690
  simulateReadableStream,
8586
8691
  simulateStreamingMiddleware,
8587
8692
  smoothStream,
8693
+ stepCountIs,
8588
8694
  streamObject,
8589
8695
  streamText,
8590
8696
  systemModelMessageSchema,