ai 5.0.0-alpha.3 → 5.0.0-alpha.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -93,7 +93,6 @@ __export(src_exports, {
93
93
  isAssistantMessageWithCompletedToolCalls: () => isAssistantMessageWithCompletedToolCalls,
94
94
  isDeepEqualData: () => isDeepEqualData,
95
95
  jsonSchema: () => import_provider_utils26.jsonSchema,
96
- maxSteps: () => maxSteps,
97
96
  modelMessageSchema: () => modelMessageSchema,
98
97
  parsePartialJson: () => parsePartialJson,
99
98
  pipeTextStreamToResponse: () => pipeTextStreamToResponse,
@@ -102,6 +101,7 @@ __export(src_exports, {
102
101
  simulateReadableStream: () => simulateReadableStream,
103
102
  simulateStreamingMiddleware: () => simulateStreamingMiddleware,
104
103
  smoothStream: () => smoothStream,
104
+ stepCountIs: () => stepCountIs,
105
105
  streamObject: () => streamObject,
106
106
  streamText: () => streamText,
107
107
  systemModelMessageSchema: () => systemModelMessageSchema,
@@ -1670,18 +1670,18 @@ var SerialJobExecutor = class {
1670
1670
  function shouldResubmitMessages({
1671
1671
  originalMaxToolInvocationStep,
1672
1672
  originalMessageCount,
1673
- maxSteps: maxSteps2,
1673
+ maxSteps,
1674
1674
  messages
1675
1675
  }) {
1676
1676
  var _a17;
1677
1677
  const lastMessage = messages[messages.length - 1];
1678
1678
  return (
1679
1679
  // check if the feature is enabled:
1680
- maxSteps2 > 1 && // ensure there is a last message:
1680
+ maxSteps > 1 && // ensure there is a last message:
1681
1681
  lastMessage != null && // ensure we actually have new steps (to prevent infinite loops in case of errors):
1682
1682
  (messages.length > originalMessageCount || extractMaxToolInvocationStep(getToolInvocations(lastMessage)) !== originalMaxToolInvocationStep) && // check that next step is possible:
1683
1683
  isAssistantMessageWithCompletedToolCalls(lastMessage) && // limit the number of automatic steps:
1684
- ((_a17 = extractMaxToolInvocationStep(getToolInvocations(lastMessage))) != null ? _a17 : 0) < maxSteps2
1684
+ ((_a17 = extractMaxToolInvocationStep(getToolInvocations(lastMessage))) != null ? _a17 : 0) < maxSteps
1685
1685
  );
1686
1686
  }
1687
1687
  function isAssistantMessageWithCompletedToolCalls(message) {
@@ -1721,7 +1721,7 @@ var ChatStore = class {
1721
1721
  chats = {},
1722
1722
  generateId: generateId3,
1723
1723
  transport,
1724
- maxSteps: maxSteps2 = 1,
1724
+ maxSteps = 1,
1725
1725
  messageMetadataSchema,
1726
1726
  dataPartSchemas
1727
1727
  }) {
@@ -1737,7 +1737,7 @@ var ChatStore = class {
1737
1737
  }
1738
1738
  ])
1739
1739
  );
1740
- this.maxSteps = maxSteps2;
1740
+ this.maxSteps = maxSteps;
1741
1741
  this.transport = transport;
1742
1742
  this.subscribers = /* @__PURE__ */ new Set();
1743
1743
  this.generateId = generateId3 != null ? generateId3 : import_provider_utils5.generateId;
@@ -2326,7 +2326,7 @@ function defaultChatStore({
2326
2326
  generateId: generateId3 = import_provider_utils6.generateId,
2327
2327
  dataPartSchemas,
2328
2328
  messageMetadataSchema,
2329
- maxSteps: maxSteps2 = 1,
2329
+ maxSteps = 1,
2330
2330
  chats
2331
2331
  }) {
2332
2332
  return new ChatStore({
@@ -2341,7 +2341,7 @@ function defaultChatStore({
2341
2341
  generateId: generateId3,
2342
2342
  messageMetadataSchema,
2343
2343
  dataPartSchemas,
2344
- maxSteps: maxSteps2,
2344
+ maxSteps,
2345
2345
  chats
2346
2346
  });
2347
2347
  }
@@ -5465,6 +5465,11 @@ var DefaultSpeechResult = class {
5465
5465
  // core/generate-text/generate-text.ts
5466
5466
  var import_provider_utils19 = require("@ai-sdk/provider-utils");
5467
5467
 
5468
+ // src/util/as-array.ts
5469
+ function asArray(value) {
5470
+ return value === void 0 ? [] : Array.isArray(value) ? value : [value];
5471
+ }
5472
+
5468
5473
  // core/prompt/prepare-tools-and-tool-choice.ts
5469
5474
  var import_provider_utils17 = require("@ai-sdk/provider-utils");
5470
5475
 
@@ -5682,8 +5687,8 @@ var DefaultStepResult = class {
5682
5687
  };
5683
5688
 
5684
5689
  // core/generate-text/stop-condition.ts
5685
- function maxSteps(maxSteps2) {
5686
- return ({ steps }) => steps.length >= maxSteps2;
5690
+ function stepCountIs(stepCount) {
5691
+ return ({ steps }) => steps.length === stepCount;
5687
5692
  }
5688
5693
  function hasToolCall(toolName) {
5689
5694
  return ({ steps }) => {
@@ -5693,6 +5698,12 @@ function hasToolCall(toolName) {
5693
5698
  )) != null ? _c : false;
5694
5699
  };
5695
5700
  }
5701
+ async function isStopConditionMet({
5702
+ stopConditions,
5703
+ steps
5704
+ }) {
5705
+ return (await Promise.all(stopConditions.map((condition) => condition({ steps })))).some((result) => result);
5706
+ }
5696
5707
 
5697
5708
  // core/generate-text/to-response-messages.ts
5698
5709
  function toResponseMessages({
@@ -5767,12 +5778,14 @@ async function generateText({
5767
5778
  maxRetries: maxRetriesArg,
5768
5779
  abortSignal,
5769
5780
  headers,
5770
- continueUntil = maxSteps(1),
5781
+ stopWhen = stepCountIs(1),
5771
5782
  experimental_output: output,
5772
5783
  experimental_telemetry: telemetry,
5773
5784
  providerOptions,
5774
- experimental_activeTools: activeTools,
5775
- experimental_prepareStep: prepareStep,
5785
+ experimental_activeTools,
5786
+ activeTools = experimental_activeTools,
5787
+ experimental_prepareStep,
5788
+ prepareStep = experimental_prepareStep,
5776
5789
  experimental_repairToolCall: repairToolCall,
5777
5790
  _internal: {
5778
5791
  generateId: generateId3 = originalGenerateId3,
@@ -5781,6 +5794,7 @@ async function generateText({
5781
5794
  onStepFinish,
5782
5795
  ...settings
5783
5796
  }) {
5797
+ const stopConditions = asArray(stopWhen);
5784
5798
  const { maxRetries, retry } = prepareRetries({ maxRetries: maxRetriesArg });
5785
5799
  const callSettings = prepareCallSettings(settings);
5786
5800
  const baseTelemetryAttributes = getBaseTelemetryAttributes({
@@ -5844,7 +5858,7 @@ async function generateText({
5844
5858
  const { toolChoice: stepToolChoice, tools: stepTools } = prepareToolsAndToolChoice({
5845
5859
  tools,
5846
5860
  toolChoice: (_b = prepareStepResult == null ? void 0 : prepareStepResult.toolChoice) != null ? _b : toolChoice,
5847
- activeTools: (_c = prepareStepResult == null ? void 0 : prepareStepResult.experimental_activeTools) != null ? _c : activeTools
5861
+ activeTools: (_c = prepareStepResult == null ? void 0 : prepareStepResult.activeTools) != null ? _c : activeTools
5848
5862
  });
5849
5863
  currentModelResponse = await retry(
5850
5864
  () => {
@@ -5989,8 +6003,8 @@ async function generateText({
5989
6003
  } while (
5990
6004
  // there are tool calls:
5991
6005
  currentToolCalls.length > 0 && // all current tool calls have results:
5992
- currentToolResults.length === currentToolCalls.length && // continue until the stop condition is met:
5993
- !await continueUntil({ steps })
6006
+ currentToolResults.length === currentToolCalls.length && // continue until a stop condition is met:
6007
+ !await isStopConditionMet({ stopConditions, steps })
5994
6008
  );
5995
6009
  span.setAttributes(
5996
6010
  selectTelemetryAttributes({
@@ -6336,11 +6350,6 @@ function smoothStream({
6336
6350
  // core/generate-text/stream-text.ts
6337
6351
  var import_provider_utils23 = require("@ai-sdk/provider-utils");
6338
6352
 
6339
- // src/util/as-array.ts
6340
- function asArray(value) {
6341
- return value === void 0 ? [] : Array.isArray(value) ? value : [value];
6342
- }
6343
-
6344
6353
  // core/generate-text/run-tools-transformation.ts
6345
6354
  var import_provider_utils22 = require("@ai-sdk/provider-utils");
6346
6355
  function runToolsTransformation({
@@ -6551,13 +6560,15 @@ function streamText({
6551
6560
  maxRetries,
6552
6561
  abortSignal,
6553
6562
  headers,
6554
- continueUntil = maxSteps(1),
6563
+ stopWhen = stepCountIs(1),
6555
6564
  experimental_output: output,
6556
6565
  experimental_telemetry: telemetry,
6566
+ prepareStep,
6557
6567
  providerOptions,
6558
6568
  experimental_toolCallStreaming = false,
6559
6569
  toolCallStreaming = experimental_toolCallStreaming,
6560
- experimental_activeTools: activeTools,
6570
+ experimental_activeTools,
6571
+ activeTools = experimental_activeTools,
6561
6572
  experimental_repairToolCall: repairToolCall,
6562
6573
  experimental_transform: transform,
6563
6574
  onChunk,
@@ -6587,9 +6598,10 @@ function streamText({
6587
6598
  transforms: asArray(transform),
6588
6599
  activeTools,
6589
6600
  repairToolCall,
6590
- continueUntil,
6601
+ stopConditions: asArray(stopWhen),
6591
6602
  output,
6592
6603
  providerOptions,
6604
+ prepareStep,
6593
6605
  onChunk,
6594
6606
  onError,
6595
6607
  onFinish,
@@ -6664,9 +6676,10 @@ var DefaultStreamTextResult = class {
6664
6676
  transforms,
6665
6677
  activeTools,
6666
6678
  repairToolCall,
6667
- continueUntil,
6679
+ stopConditions,
6668
6680
  output,
6669
6681
  providerOptions,
6682
+ prepareStep,
6670
6683
  now: now2,
6671
6684
  currentDate,
6672
6685
  generateId: generateId3,
@@ -6885,6 +6898,7 @@ var DefaultStreamTextResult = class {
6885
6898
  responseMessages,
6886
6899
  usage
6887
6900
  }) {
6901
+ var _a17, _b, _c;
6888
6902
  stepFinish = new DelayedPromise();
6889
6903
  const initialPrompt = await standardizePrompt({
6890
6904
  system,
@@ -6895,6 +6909,11 @@ var DefaultStreamTextResult = class {
6895
6909
  ...initialPrompt.messages,
6896
6910
  ...responseMessages
6897
6911
  ];
6912
+ const prepareStepResult = await (prepareStep == null ? void 0 : prepareStep({
6913
+ model,
6914
+ steps: recordedSteps,
6915
+ stepNumber: recordedSteps.length
6916
+ }));
6898
6917
  const promptMessages = await convertToLanguageModelPrompt({
6899
6918
  prompt: {
6900
6919
  system: initialPrompt.system,
@@ -6902,9 +6921,12 @@ var DefaultStreamTextResult = class {
6902
6921
  },
6903
6922
  supportedUrls: await model.supportedUrls
6904
6923
  });
6905
- const toolsAndToolChoice = {
6906
- ...prepareToolsAndToolChoice({ tools, toolChoice, activeTools })
6907
- };
6924
+ const stepModel = (_a17 = prepareStepResult == null ? void 0 : prepareStepResult.model) != null ? _a17 : model;
6925
+ const { toolChoice: stepToolChoice, tools: stepTools } = prepareToolsAndToolChoice({
6926
+ tools,
6927
+ toolChoice: (_b = prepareStepResult == null ? void 0 : prepareStepResult.toolChoice) != null ? _b : toolChoice,
6928
+ activeTools: (_c = prepareStepResult == null ? void 0 : prepareStepResult.activeTools) != null ? _c : activeTools
6929
+ });
6908
6930
  const {
6909
6931
  result: { stream: stream2, response, request },
6910
6932
  doStreamSpan,
@@ -6920,24 +6942,23 @@ var DefaultStreamTextResult = class {
6920
6942
  telemetry
6921
6943
  }),
6922
6944
  ...baseTelemetryAttributes,
6945
+ // model:
6946
+ "ai.model.provider": stepModel.provider,
6947
+ "ai.model.id": stepModel.modelId,
6948
+ // prompt:
6923
6949
  "ai.prompt.messages": {
6924
6950
  input: () => JSON.stringify(promptMessages)
6925
6951
  },
6926
6952
  "ai.prompt.tools": {
6927
6953
  // convert the language model level tools:
6928
- input: () => {
6929
- var _a17;
6930
- return (_a17 = toolsAndToolChoice.tools) == null ? void 0 : _a17.map(
6931
- (tool2) => JSON.stringify(tool2)
6932
- );
6933
- }
6954
+ input: () => stepTools == null ? void 0 : stepTools.map((tool2) => JSON.stringify(tool2))
6934
6955
  },
6935
6956
  "ai.prompt.toolChoice": {
6936
- input: () => toolsAndToolChoice.toolChoice != null ? JSON.stringify(toolsAndToolChoice.toolChoice) : void 0
6957
+ input: () => stepToolChoice != null ? JSON.stringify(stepToolChoice) : void 0
6937
6958
  },
6938
6959
  // standardized gen-ai llm span attributes:
6939
- "gen_ai.system": model.provider,
6940
- "gen_ai.request.model": model.modelId,
6960
+ "gen_ai.system": stepModel.provider,
6961
+ "gen_ai.request.model": stepModel.modelId,
6941
6962
  "gen_ai.request.frequency_penalty": callSettings.frequencyPenalty,
6942
6963
  "gen_ai.request.max_tokens": callSettings.maxOutputTokens,
6943
6964
  "gen_ai.request.presence_penalty": callSettings.presencePenalty,
@@ -6954,9 +6975,10 @@ var DefaultStreamTextResult = class {
6954
6975
  startTimestampMs: now2(),
6955
6976
  // get before the call
6956
6977
  doStreamSpan: doStreamSpan2,
6957
- result: await model.doStream({
6978
+ result: await stepModel.doStream({
6958
6979
  ...callSettings,
6959
- ...toolsAndToolChoice,
6980
+ tools: stepTools,
6981
+ toolChoice: stepToolChoice,
6960
6982
  responseFormat: output == null ? void 0 : output.responseFormat,
6961
6983
  prompt: promptMessages,
6962
6984
  providerOptions,
@@ -7009,7 +7031,7 @@ var DefaultStreamTextResult = class {
7009
7031
  streamWithToolResults.pipeThrough(
7010
7032
  new TransformStream({
7011
7033
  async transform(chunk, controller) {
7012
- var _a17, _b, _c, _d;
7034
+ var _a18, _b2, _c2, _d;
7013
7035
  if (chunk.type === "stream-start") {
7014
7036
  warnings = chunk.warnings;
7015
7037
  return;
@@ -7072,9 +7094,9 @@ var DefaultStreamTextResult = class {
7072
7094
  }
7073
7095
  case "response-metadata": {
7074
7096
  stepResponse = {
7075
- id: (_a17 = chunk.id) != null ? _a17 : stepResponse.id,
7076
- timestamp: (_b = chunk.timestamp) != null ? _b : stepResponse.timestamp,
7077
- modelId: (_c = chunk.modelId) != null ? _c : stepResponse.modelId
7097
+ id: (_a18 = chunk.id) != null ? _a18 : stepResponse.id,
7098
+ timestamp: (_b2 = chunk.timestamp) != null ? _b2 : stepResponse.timestamp,
7099
+ modelId: (_c2 = chunk.modelId) != null ? _c2 : stepResponse.modelId
7078
7100
  };
7079
7101
  break;
7080
7102
  }
@@ -7163,7 +7185,11 @@ var DefaultStreamTextResult = class {
7163
7185
  const combinedUsage = addLanguageModelUsage(usage, stepUsage);
7164
7186
  await stepFinish.promise;
7165
7187
  if (stepToolCalls.length > 0 && // all current tool calls have results:
7166
- stepToolResults.length === stepToolCalls.length && !await continueUntil({ steps: recordedSteps })) {
7188
+ stepToolResults.length === stepToolCalls.length && // continue until a stop condition is met:
7189
+ !await isStopConditionMet({
7190
+ stopConditions,
7191
+ steps: recordedSteps
7192
+ })) {
7167
7193
  responseMessages.push(
7168
7194
  ...toResponseMessages({
7169
7195
  content: stepContent,
@@ -8656,7 +8682,6 @@ var DefaultTranscriptionResult = class {
8656
8682
  isAssistantMessageWithCompletedToolCalls,
8657
8683
  isDeepEqualData,
8658
8684
  jsonSchema,
8659
- maxSteps,
8660
8685
  modelMessageSchema,
8661
8686
  parsePartialJson,
8662
8687
  pipeTextStreamToResponse,
@@ -8665,6 +8690,7 @@ var DefaultTranscriptionResult = class {
8665
8690
  simulateReadableStream,
8666
8691
  simulateStreamingMiddleware,
8667
8692
  smoothStream,
8693
+ stepCountIs,
8668
8694
  streamObject,
8669
8695
  streamText,
8670
8696
  systemModelMessageSchema,