ai 6.0.21 → 6.0.23

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -96,6 +96,7 @@ __export(src_exports, {
96
96
  generateObject: () => generateObject,
97
97
  generateText: () => generateText,
98
98
  getStaticToolName: () => getStaticToolName,
99
+ getStepTimeoutMs: () => getStepTimeoutMs,
99
100
  getTextFromDataUrl: () => getTextFromDataUrl,
100
101
  getToolName: () => getToolName,
101
102
  getToolOrDynamicToolName: () => getToolOrDynamicToolName,
@@ -819,6 +820,12 @@ function getTotalTimeoutMs(timeout) {
819
820
  }
820
821
  return timeout.totalMs;
821
822
  }
823
+ function getStepTimeoutMs(timeout) {
824
+ if (timeout == null || typeof timeout === "number") {
825
+ return void 0;
826
+ }
827
+ return timeout.stepMs;
828
+ }
822
829
 
823
830
  // src/prompt/convert-to-language-model-prompt.ts
824
831
  var import_provider_utils6 = require("@ai-sdk/provider-utils");
@@ -1013,7 +1020,7 @@ var import_provider_utils3 = require("@ai-sdk/provider-utils");
1013
1020
  var import_provider_utils4 = require("@ai-sdk/provider-utils");
1014
1021
 
1015
1022
  // src/version.ts
1016
- var VERSION = true ? "6.0.21" : "0.0.0-test";
1023
+ var VERSION = true ? "6.0.23" : "0.0.0-test";
1017
1024
 
1018
1025
  // src/util/download/download.ts
1019
1026
  var download = async ({ url }) => {
@@ -3667,9 +3674,12 @@ async function generateText({
3667
3674
  const model = resolveLanguageModel(modelArg);
3668
3675
  const stopConditions = asArray(stopWhen);
3669
3676
  const totalTimeoutMs = getTotalTimeoutMs(timeout);
3677
+ const stepTimeoutMs = getStepTimeoutMs(timeout);
3678
+ const stepAbortController = stepTimeoutMs != null ? new AbortController() : void 0;
3670
3679
  const mergedAbortSignal = mergeAbortSignals(
3671
3680
  abortSignal,
3672
- totalTimeoutMs != null ? AbortSignal.timeout(totalTimeoutMs) : void 0
3681
+ totalTimeoutMs != null ? AbortSignal.timeout(totalTimeoutMs) : void 0,
3682
+ stepAbortController == null ? void 0 : stepAbortController.signal
3673
3683
  );
3674
3684
  const { maxRetries, retry } = prepareRetries({
3675
3685
  maxRetries: maxRetriesArg,
@@ -3798,263 +3808,270 @@ async function generateText({
3798
3808
  const steps = [];
3799
3809
  const pendingDeferredToolCalls = /* @__PURE__ */ new Map();
3800
3810
  do {
3801
- const stepInputMessages = [...initialMessages, ...responseMessages];
3802
- const prepareStepResult = await (prepareStep == null ? void 0 : prepareStep({
3803
- model,
3804
- steps,
3805
- stepNumber: steps.length,
3806
- messages: stepInputMessages,
3807
- experimental_context
3808
- }));
3809
- const stepModel = resolveLanguageModel(
3810
- (_a16 = prepareStepResult == null ? void 0 : prepareStepResult.model) != null ? _a16 : model
3811
- );
3812
- const promptMessages = await convertToLanguageModelPrompt({
3813
- prompt: {
3814
- system: (_b = prepareStepResult == null ? void 0 : prepareStepResult.system) != null ? _b : initialPrompt.system,
3815
- messages: (_c = prepareStepResult == null ? void 0 : prepareStepResult.messages) != null ? _c : stepInputMessages
3816
- },
3817
- supportedUrls: await stepModel.supportedUrls,
3818
- download: download2
3819
- });
3820
- experimental_context = (_d = prepareStepResult == null ? void 0 : prepareStepResult.experimental_context) != null ? _d : experimental_context;
3821
- const { toolChoice: stepToolChoice, tools: stepTools } = await prepareToolsAndToolChoice({
3822
- tools,
3823
- toolChoice: (_e = prepareStepResult == null ? void 0 : prepareStepResult.toolChoice) != null ? _e : toolChoice,
3824
- activeTools: (_f = prepareStepResult == null ? void 0 : prepareStepResult.activeTools) != null ? _f : activeTools
3825
- });
3826
- currentModelResponse = await retry(
3827
- () => {
3828
- var _a17;
3829
- return recordSpan({
3830
- name: "ai.generateText.doGenerate",
3831
- attributes: selectTelemetryAttributes({
3832
- telemetry,
3833
- attributes: {
3834
- ...assembleOperationName({
3835
- operationId: "ai.generateText.doGenerate",
3836
- telemetry
3837
- }),
3838
- ...baseTelemetryAttributes,
3839
- // model:
3840
- "ai.model.provider": stepModel.provider,
3841
- "ai.model.id": stepModel.modelId,
3842
- // prompt:
3843
- "ai.prompt.messages": {
3844
- input: () => stringifyForTelemetry(promptMessages)
3845
- },
3846
- "ai.prompt.tools": {
3847
- // convert the language model level tools:
3848
- input: () => stepTools == null ? void 0 : stepTools.map((tool2) => JSON.stringify(tool2))
3849
- },
3850
- "ai.prompt.toolChoice": {
3851
- input: () => stepToolChoice != null ? JSON.stringify(stepToolChoice) : void 0
3852
- },
3853
- // standardized gen-ai llm span attributes:
3854
- "gen_ai.system": stepModel.provider,
3855
- "gen_ai.request.model": stepModel.modelId,
3856
- "gen_ai.request.frequency_penalty": settings.frequencyPenalty,
3857
- "gen_ai.request.max_tokens": settings.maxOutputTokens,
3858
- "gen_ai.request.presence_penalty": settings.presencePenalty,
3859
- "gen_ai.request.stop_sequences": settings.stopSequences,
3860
- "gen_ai.request.temperature": (_a17 = settings.temperature) != null ? _a17 : void 0,
3861
- "gen_ai.request.top_k": settings.topK,
3862
- "gen_ai.request.top_p": settings.topP
3811
+ const stepTimeoutId = stepTimeoutMs != null ? setTimeout(() => stepAbortController.abort(), stepTimeoutMs) : void 0;
3812
+ try {
3813
+ const stepInputMessages = [...initialMessages, ...responseMessages];
3814
+ const prepareStepResult = await (prepareStep == null ? void 0 : prepareStep({
3815
+ model,
3816
+ steps,
3817
+ stepNumber: steps.length,
3818
+ messages: stepInputMessages,
3819
+ experimental_context
3820
+ }));
3821
+ const stepModel = resolveLanguageModel(
3822
+ (_a16 = prepareStepResult == null ? void 0 : prepareStepResult.model) != null ? _a16 : model
3823
+ );
3824
+ const promptMessages = await convertToLanguageModelPrompt({
3825
+ prompt: {
3826
+ system: (_b = prepareStepResult == null ? void 0 : prepareStepResult.system) != null ? _b : initialPrompt.system,
3827
+ messages: (_c = prepareStepResult == null ? void 0 : prepareStepResult.messages) != null ? _c : stepInputMessages
3828
+ },
3829
+ supportedUrls: await stepModel.supportedUrls,
3830
+ download: download2
3831
+ });
3832
+ experimental_context = (_d = prepareStepResult == null ? void 0 : prepareStepResult.experimental_context) != null ? _d : experimental_context;
3833
+ const { toolChoice: stepToolChoice, tools: stepTools } = await prepareToolsAndToolChoice({
3834
+ tools,
3835
+ toolChoice: (_e = prepareStepResult == null ? void 0 : prepareStepResult.toolChoice) != null ? _e : toolChoice,
3836
+ activeTools: (_f = prepareStepResult == null ? void 0 : prepareStepResult.activeTools) != null ? _f : activeTools
3837
+ });
3838
+ currentModelResponse = await retry(
3839
+ () => {
3840
+ var _a17;
3841
+ return recordSpan({
3842
+ name: "ai.generateText.doGenerate",
3843
+ attributes: selectTelemetryAttributes({
3844
+ telemetry,
3845
+ attributes: {
3846
+ ...assembleOperationName({
3847
+ operationId: "ai.generateText.doGenerate",
3848
+ telemetry
3849
+ }),
3850
+ ...baseTelemetryAttributes,
3851
+ // model:
3852
+ "ai.model.provider": stepModel.provider,
3853
+ "ai.model.id": stepModel.modelId,
3854
+ // prompt:
3855
+ "ai.prompt.messages": {
3856
+ input: () => stringifyForTelemetry(promptMessages)
3857
+ },
3858
+ "ai.prompt.tools": {
3859
+ // convert the language model level tools:
3860
+ input: () => stepTools == null ? void 0 : stepTools.map((tool2) => JSON.stringify(tool2))
3861
+ },
3862
+ "ai.prompt.toolChoice": {
3863
+ input: () => stepToolChoice != null ? JSON.stringify(stepToolChoice) : void 0
3864
+ },
3865
+ // standardized gen-ai llm span attributes:
3866
+ "gen_ai.system": stepModel.provider,
3867
+ "gen_ai.request.model": stepModel.modelId,
3868
+ "gen_ai.request.frequency_penalty": settings.frequencyPenalty,
3869
+ "gen_ai.request.max_tokens": settings.maxOutputTokens,
3870
+ "gen_ai.request.presence_penalty": settings.presencePenalty,
3871
+ "gen_ai.request.stop_sequences": settings.stopSequences,
3872
+ "gen_ai.request.temperature": (_a17 = settings.temperature) != null ? _a17 : void 0,
3873
+ "gen_ai.request.top_k": settings.topK,
3874
+ "gen_ai.request.top_p": settings.topP
3875
+ }
3876
+ }),
3877
+ tracer,
3878
+ fn: async (span2) => {
3879
+ var _a18, _b2, _c2, _d2, _e2, _f2, _g2, _h2;
3880
+ const stepProviderOptions = mergeObjects(
3881
+ providerOptions,
3882
+ prepareStepResult == null ? void 0 : prepareStepResult.providerOptions
3883
+ );
3884
+ const result = await stepModel.doGenerate({
3885
+ ...callSettings2,
3886
+ tools: stepTools,
3887
+ toolChoice: stepToolChoice,
3888
+ responseFormat: await (output == null ? void 0 : output.responseFormat),
3889
+ prompt: promptMessages,
3890
+ providerOptions: stepProviderOptions,
3891
+ abortSignal: mergedAbortSignal,
3892
+ headers: headersWithUserAgent
3893
+ });
3894
+ const responseData = {
3895
+ id: (_b2 = (_a18 = result.response) == null ? void 0 : _a18.id) != null ? _b2 : generateId2(),
3896
+ timestamp: (_d2 = (_c2 = result.response) == null ? void 0 : _c2.timestamp) != null ? _d2 : /* @__PURE__ */ new Date(),
3897
+ modelId: (_f2 = (_e2 = result.response) == null ? void 0 : _e2.modelId) != null ? _f2 : stepModel.modelId,
3898
+ headers: (_g2 = result.response) == null ? void 0 : _g2.headers,
3899
+ body: (_h2 = result.response) == null ? void 0 : _h2.body
3900
+ };
3901
+ span2.setAttributes(
3902
+ await selectTelemetryAttributes({
3903
+ telemetry,
3904
+ attributes: {
3905
+ "ai.response.finishReason": result.finishReason.unified,
3906
+ "ai.response.text": {
3907
+ output: () => extractTextContent(result.content)
3908
+ },
3909
+ "ai.response.toolCalls": {
3910
+ output: () => {
3911
+ const toolCalls = asToolCalls(result.content);
3912
+ return toolCalls == null ? void 0 : JSON.stringify(toolCalls);
3913
+ }
3914
+ },
3915
+ "ai.response.id": responseData.id,
3916
+ "ai.response.model": responseData.modelId,
3917
+ "ai.response.timestamp": responseData.timestamp.toISOString(),
3918
+ "ai.response.providerMetadata": JSON.stringify(
3919
+ result.providerMetadata
3920
+ ),
3921
+ // TODO rename telemetry attributes to inputTokens and outputTokens
3922
+ "ai.usage.promptTokens": result.usage.inputTokens.total,
3923
+ "ai.usage.completionTokens": result.usage.outputTokens.total,
3924
+ // standardized gen-ai llm span attributes:
3925
+ "gen_ai.response.finish_reasons": [
3926
+ result.finishReason.unified
3927
+ ],
3928
+ "gen_ai.response.id": responseData.id,
3929
+ "gen_ai.response.model": responseData.modelId,
3930
+ "gen_ai.usage.input_tokens": result.usage.inputTokens.total,
3931
+ "gen_ai.usage.output_tokens": result.usage.outputTokens.total
3932
+ }
3933
+ })
3934
+ );
3935
+ return { ...result, response: responseData };
3863
3936
  }
3864
- }),
3865
- tracer,
3866
- fn: async (span2) => {
3867
- var _a18, _b2, _c2, _d2, _e2, _f2, _g2, _h2;
3868
- const stepProviderOptions = mergeObjects(
3869
- providerOptions,
3870
- prepareStepResult == null ? void 0 : prepareStepResult.providerOptions
3871
- );
3872
- const result = await stepModel.doGenerate({
3873
- ...callSettings2,
3874
- tools: stepTools,
3875
- toolChoice: stepToolChoice,
3876
- responseFormat: await (output == null ? void 0 : output.responseFormat),
3877
- prompt: promptMessages,
3878
- providerOptions: stepProviderOptions,
3879
- abortSignal: mergedAbortSignal,
3880
- headers: headersWithUserAgent
3881
- });
3882
- const responseData = {
3883
- id: (_b2 = (_a18 = result.response) == null ? void 0 : _a18.id) != null ? _b2 : generateId2(),
3884
- timestamp: (_d2 = (_c2 = result.response) == null ? void 0 : _c2.timestamp) != null ? _d2 : /* @__PURE__ */ new Date(),
3885
- modelId: (_f2 = (_e2 = result.response) == null ? void 0 : _e2.modelId) != null ? _f2 : stepModel.modelId,
3886
- headers: (_g2 = result.response) == null ? void 0 : _g2.headers,
3887
- body: (_h2 = result.response) == null ? void 0 : _h2.body
3888
- };
3889
- span2.setAttributes(
3890
- await selectTelemetryAttributes({
3891
- telemetry,
3892
- attributes: {
3893
- "ai.response.finishReason": result.finishReason.unified,
3894
- "ai.response.text": {
3895
- output: () => extractTextContent(result.content)
3896
- },
3897
- "ai.response.toolCalls": {
3898
- output: () => {
3899
- const toolCalls = asToolCalls(result.content);
3900
- return toolCalls == null ? void 0 : JSON.stringify(toolCalls);
3901
- }
3902
- },
3903
- "ai.response.id": responseData.id,
3904
- "ai.response.model": responseData.modelId,
3905
- "ai.response.timestamp": responseData.timestamp.toISOString(),
3906
- "ai.response.providerMetadata": JSON.stringify(
3907
- result.providerMetadata
3908
- ),
3909
- // TODO rename telemetry attributes to inputTokens and outputTokens
3910
- "ai.usage.promptTokens": result.usage.inputTokens.total,
3911
- "ai.usage.completionTokens": result.usage.outputTokens.total,
3912
- // standardized gen-ai llm span attributes:
3913
- "gen_ai.response.finish_reasons": [
3914
- result.finishReason.unified
3915
- ],
3916
- "gen_ai.response.id": responseData.id,
3917
- "gen_ai.response.model": responseData.modelId,
3918
- "gen_ai.usage.input_tokens": result.usage.inputTokens.total,
3919
- "gen_ai.usage.output_tokens": result.usage.outputTokens.total
3920
- }
3921
- })
3922
- );
3923
- return { ...result, response: responseData };
3924
- }
3925
- });
3926
- }
3927
- );
3928
- const stepToolCalls = await Promise.all(
3929
- currentModelResponse.content.filter(
3930
- (part) => part.type === "tool-call"
3931
- ).map(
3932
- (toolCall) => parseToolCall({
3937
+ });
3938
+ }
3939
+ );
3940
+ const stepToolCalls = await Promise.all(
3941
+ currentModelResponse.content.filter(
3942
+ (part) => part.type === "tool-call"
3943
+ ).map(
3944
+ (toolCall) => parseToolCall({
3945
+ toolCall,
3946
+ tools,
3947
+ repairToolCall,
3948
+ system,
3949
+ messages: stepInputMessages
3950
+ })
3951
+ )
3952
+ );
3953
+ const toolApprovalRequests = {};
3954
+ for (const toolCall of stepToolCalls) {
3955
+ if (toolCall.invalid) {
3956
+ continue;
3957
+ }
3958
+ const tool2 = tools == null ? void 0 : tools[toolCall.toolName];
3959
+ if (tool2 == null) {
3960
+ continue;
3961
+ }
3962
+ if ((tool2 == null ? void 0 : tool2.onInputAvailable) != null) {
3963
+ await tool2.onInputAvailable({
3964
+ input: toolCall.input,
3965
+ toolCallId: toolCall.toolCallId,
3966
+ messages: stepInputMessages,
3967
+ abortSignal: mergedAbortSignal,
3968
+ experimental_context
3969
+ });
3970
+ }
3971
+ if (await isApprovalNeeded({
3972
+ tool: tool2,
3933
3973
  toolCall,
3934
- tools,
3935
- repairToolCall,
3936
- system,
3937
- messages: stepInputMessages
3938
- })
3939
- )
3940
- );
3941
- const toolApprovalRequests = {};
3942
- for (const toolCall of stepToolCalls) {
3943
- if (toolCall.invalid) {
3944
- continue;
3945
- }
3946
- const tool2 = tools == null ? void 0 : tools[toolCall.toolName];
3947
- if (tool2 == null) {
3948
- continue;
3949
- }
3950
- if ((tool2 == null ? void 0 : tool2.onInputAvailable) != null) {
3951
- await tool2.onInputAvailable({
3952
- input: toolCall.input,
3953
- toolCallId: toolCall.toolCallId,
3954
3974
  messages: stepInputMessages,
3955
- abortSignal: mergedAbortSignal,
3956
3975
  experimental_context
3957
- });
3976
+ })) {
3977
+ toolApprovalRequests[toolCall.toolCallId] = {
3978
+ type: "tool-approval-request",
3979
+ approvalId: generateId2(),
3980
+ toolCall
3981
+ };
3982
+ }
3958
3983
  }
3959
- if (await isApprovalNeeded({
3960
- tool: tool2,
3961
- toolCall,
3962
- messages: stepInputMessages,
3963
- experimental_context
3964
- })) {
3965
- toolApprovalRequests[toolCall.toolCallId] = {
3966
- type: "tool-approval-request",
3967
- approvalId: generateId2(),
3968
- toolCall
3969
- };
3984
+ const invalidToolCalls = stepToolCalls.filter(
3985
+ (toolCall) => toolCall.invalid && toolCall.dynamic
3986
+ );
3987
+ clientToolOutputs = [];
3988
+ for (const toolCall of invalidToolCalls) {
3989
+ clientToolOutputs.push({
3990
+ type: "tool-error",
3991
+ toolCallId: toolCall.toolCallId,
3992
+ toolName: toolCall.toolName,
3993
+ input: toolCall.input,
3994
+ error: (0, import_provider_utils15.getErrorMessage)(toolCall.error),
3995
+ dynamic: true
3996
+ });
3970
3997
  }
3971
- }
3972
- const invalidToolCalls = stepToolCalls.filter(
3973
- (toolCall) => toolCall.invalid && toolCall.dynamic
3974
- );
3975
- clientToolOutputs = [];
3976
- for (const toolCall of invalidToolCalls) {
3977
- clientToolOutputs.push({
3978
- type: "tool-error",
3979
- toolCallId: toolCall.toolCallId,
3980
- toolName: toolCall.toolName,
3981
- input: toolCall.input,
3982
- error: (0, import_provider_utils15.getErrorMessage)(toolCall.error),
3983
- dynamic: true
3984
- });
3985
- }
3986
- clientToolCalls = stepToolCalls.filter(
3987
- (toolCall) => !toolCall.providerExecuted
3988
- );
3989
- if (tools != null) {
3990
- clientToolOutputs.push(
3991
- ...await executeTools({
3992
- toolCalls: clientToolCalls.filter(
3993
- (toolCall) => !toolCall.invalid && toolApprovalRequests[toolCall.toolCallId] == null
3994
- ),
3995
- tools,
3996
- tracer,
3997
- telemetry,
3998
- messages: stepInputMessages,
3999
- abortSignal: mergedAbortSignal,
4000
- experimental_context
4001
- })
3998
+ clientToolCalls = stepToolCalls.filter(
3999
+ (toolCall) => !toolCall.providerExecuted
4002
4000
  );
4003
- }
4004
- for (const toolCall of stepToolCalls) {
4005
- if (!toolCall.providerExecuted)
4006
- continue;
4007
- const tool2 = tools == null ? void 0 : tools[toolCall.toolName];
4008
- if ((tool2 == null ? void 0 : tool2.type) === "provider" && tool2.supportsDeferredResults) {
4009
- const hasResultInResponse = currentModelResponse.content.some(
4010
- (part) => part.type === "tool-result" && part.toolCallId === toolCall.toolCallId
4001
+ if (tools != null) {
4002
+ clientToolOutputs.push(
4003
+ ...await executeTools({
4004
+ toolCalls: clientToolCalls.filter(
4005
+ (toolCall) => !toolCall.invalid && toolApprovalRequests[toolCall.toolCallId] == null
4006
+ ),
4007
+ tools,
4008
+ tracer,
4009
+ telemetry,
4010
+ messages: stepInputMessages,
4011
+ abortSignal: mergedAbortSignal,
4012
+ experimental_context
4013
+ })
4011
4014
  );
4012
- if (!hasResultInResponse) {
4013
- pendingDeferredToolCalls.set(toolCall.toolCallId, {
4014
- toolName: toolCall.toolName
4015
- });
4015
+ }
4016
+ for (const toolCall of stepToolCalls) {
4017
+ if (!toolCall.providerExecuted)
4018
+ continue;
4019
+ const tool2 = tools == null ? void 0 : tools[toolCall.toolName];
4020
+ if ((tool2 == null ? void 0 : tool2.type) === "provider" && tool2.supportsDeferredResults) {
4021
+ const hasResultInResponse = currentModelResponse.content.some(
4022
+ (part) => part.type === "tool-result" && part.toolCallId === toolCall.toolCallId
4023
+ );
4024
+ if (!hasResultInResponse) {
4025
+ pendingDeferredToolCalls.set(toolCall.toolCallId, {
4026
+ toolName: toolCall.toolName
4027
+ });
4028
+ }
4016
4029
  }
4017
4030
  }
4018
- }
4019
- for (const part of currentModelResponse.content) {
4020
- if (part.type === "tool-result") {
4021
- pendingDeferredToolCalls.delete(part.toolCallId);
4031
+ for (const part of currentModelResponse.content) {
4032
+ if (part.type === "tool-result") {
4033
+ pendingDeferredToolCalls.delete(part.toolCallId);
4034
+ }
4022
4035
  }
4023
- }
4024
- const stepContent = asContent({
4025
- content: currentModelResponse.content,
4026
- toolCalls: stepToolCalls,
4027
- toolOutputs: clientToolOutputs,
4028
- toolApprovalRequests: Object.values(toolApprovalRequests),
4029
- tools
4030
- });
4031
- responseMessages.push(
4032
- ...await toResponseMessages({
4033
- content: stepContent,
4036
+ const stepContent = asContent({
4037
+ content: currentModelResponse.content,
4038
+ toolCalls: stepToolCalls,
4039
+ toolOutputs: clientToolOutputs,
4040
+ toolApprovalRequests: Object.values(toolApprovalRequests),
4034
4041
  tools
4035
- })
4036
- );
4037
- const currentStepResult = new DefaultStepResult({
4038
- content: stepContent,
4039
- finishReason: currentModelResponse.finishReason.unified,
4040
- rawFinishReason: currentModelResponse.finishReason.raw,
4041
- usage: asLanguageModelUsage(currentModelResponse.usage),
4042
- warnings: currentModelResponse.warnings,
4043
- providerMetadata: currentModelResponse.providerMetadata,
4044
- request: (_g = currentModelResponse.request) != null ? _g : {},
4045
- response: {
4046
- ...currentModelResponse.response,
4047
- // deep clone msgs to avoid mutating past messages in multi-step:
4048
- messages: structuredClone(responseMessages)
4042
+ });
4043
+ responseMessages.push(
4044
+ ...await toResponseMessages({
4045
+ content: stepContent,
4046
+ tools
4047
+ })
4048
+ );
4049
+ const currentStepResult = new DefaultStepResult({
4050
+ content: stepContent,
4051
+ finishReason: currentModelResponse.finishReason.unified,
4052
+ rawFinishReason: currentModelResponse.finishReason.raw,
4053
+ usage: asLanguageModelUsage(currentModelResponse.usage),
4054
+ warnings: currentModelResponse.warnings,
4055
+ providerMetadata: currentModelResponse.providerMetadata,
4056
+ request: (_g = currentModelResponse.request) != null ? _g : {},
4057
+ response: {
4058
+ ...currentModelResponse.response,
4059
+ // deep clone msgs to avoid mutating past messages in multi-step:
4060
+ messages: structuredClone(responseMessages)
4061
+ }
4062
+ });
4063
+ logWarnings({
4064
+ warnings: (_h = currentModelResponse.warnings) != null ? _h : [],
4065
+ provider: stepModel.provider,
4066
+ model: stepModel.modelId
4067
+ });
4068
+ steps.push(currentStepResult);
4069
+ await (onStepFinish == null ? void 0 : onStepFinish(currentStepResult));
4070
+ } finally {
4071
+ if (stepTimeoutId != null) {
4072
+ clearTimeout(stepTimeoutId);
4049
4073
  }
4050
- });
4051
- logWarnings({
4052
- warnings: (_h = currentModelResponse.warnings) != null ? _h : [],
4053
- provider: stepModel.provider,
4054
- model: stepModel.modelId
4055
- });
4056
- steps.push(currentStepResult);
4057
- await (onStepFinish == null ? void 0 : onStepFinish(currentStepResult));
4074
+ }
4058
4075
  } while (
4059
4076
  // Continue if:
4060
4077
  // 1. There are client tool calls that have all been executed, OR
@@ -5749,14 +5766,12 @@ function streamText({
5749
5766
  onAbort,
5750
5767
  onStepFinish,
5751
5768
  experimental_context,
5752
- _internal: {
5753
- now: now2 = now,
5754
- generateId: generateId2 = originalGenerateId2,
5755
- currentDate = () => /* @__PURE__ */ new Date()
5756
- } = {},
5769
+ _internal: { now: now2 = now, generateId: generateId2 = originalGenerateId2 } = {},
5757
5770
  ...settings
5758
5771
  }) {
5759
5772
  const totalTimeoutMs = getTotalTimeoutMs(timeout);
5773
+ const stepTimeoutMs = getStepTimeoutMs(timeout);
5774
+ const stepAbortController = stepTimeoutMs != null ? new AbortController() : void 0;
5760
5775
  return new DefaultStreamTextResult({
5761
5776
  model: resolveLanguageModel(model),
5762
5777
  telemetry,
@@ -5765,8 +5780,11 @@ function streamText({
5765
5780
  maxRetries,
5766
5781
  abortSignal: mergeAbortSignals(
5767
5782
  abortSignal,
5768
- totalTimeoutMs != null ? AbortSignal.timeout(totalTimeoutMs) : void 0
5783
+ totalTimeoutMs != null ? AbortSignal.timeout(totalTimeoutMs) : void 0,
5784
+ stepAbortController == null ? void 0 : stepAbortController.signal
5769
5785
  ),
5786
+ stepTimeoutMs,
5787
+ stepAbortController,
5770
5788
  system,
5771
5789
  prompt,
5772
5790
  messages,
@@ -5786,7 +5804,6 @@ function streamText({
5786
5804
  onAbort,
5787
5805
  onStepFinish,
5788
5806
  now: now2,
5789
- currentDate,
5790
5807
  generateId: generateId2,
5791
5808
  experimental_context,
5792
5809
  download: download2
@@ -5862,6 +5879,8 @@ var DefaultStreamTextResult = class {
5862
5879
  settings,
5863
5880
  maxRetries: maxRetriesArg,
5864
5881
  abortSignal,
5882
+ stepTimeoutMs,
5883
+ stepAbortController,
5865
5884
  system,
5866
5885
  prompt,
5867
5886
  messages,
@@ -5876,7 +5895,6 @@ var DefaultStreamTextResult = class {
5876
5895
  prepareStep,
5877
5896
  includeRawChunks,
5878
5897
  now: now2,
5879
- currentDate,
5880
5898
  generateId: generateId2,
5881
5899
  onChunk,
5882
5900
  onError,
@@ -6328,6 +6346,7 @@ var DefaultStreamTextResult = class {
6328
6346
  }) {
6329
6347
  var _a16, _b, _c, _d, _e, _f;
6330
6348
  const includeRawChunks2 = self.includeRawChunks;
6349
+ const stepTimeoutId = stepTimeoutMs != null ? setTimeout(() => stepAbortController.abort(), stepTimeoutMs) : void 0;
6331
6350
  stepFinish = new import_provider_utils19.DelayedPromise();
6332
6351
  const stepInputMessages = [...initialMessages, ...responseMessages];
6333
6352
  const prepareStepResult = await (prepareStep == null ? void 0 : prepareStep({
@@ -6443,7 +6462,7 @@ var DefaultStreamTextResult = class {
6443
6462
  let stepFirstChunk = true;
6444
6463
  let stepResponse = {
6445
6464
  id: generateId2(),
6446
- timestamp: currentDate(),
6465
+ timestamp: /* @__PURE__ */ new Date(),
6447
6466
  modelId: model.modelId
6448
6467
  };
6449
6468
  let activeText = "";
@@ -6682,6 +6701,9 @@ var DefaultStreamTextResult = class {
6682
6701
  pendingDeferredToolCalls.delete(output2.toolCallId);
6683
6702
  }
6684
6703
  }
6704
+ if (stepTimeoutId != null) {
6705
+ clearTimeout(stepTimeoutId);
6706
+ }
6685
6707
  if (
6686
6708
  // Continue if:
6687
6709
  // 1. There are client tool calls that have all been executed, OR
@@ -10239,7 +10261,16 @@ function smoothStream({
10239
10261
  _internal: { delay: delay2 = import_provider_utils32.delay } = {}
10240
10262
  } = {}) {
10241
10263
  let detectChunk;
10242
- if (typeof chunking === "function") {
10264
+ if (chunking != null && typeof chunking === "object" && "segment" in chunking && typeof chunking.segment === "function") {
10265
+ const segmenter = chunking;
10266
+ detectChunk = (buffer) => {
10267
+ if (buffer.length === 0)
10268
+ return null;
10269
+ const iterator = segmenter.segment(buffer)[Symbol.iterator]();
10270
+ const first = iterator.next().value;
10271
+ return (first == null ? void 0 : first.segment) || null;
10272
+ };
10273
+ } else if (typeof chunking === "function") {
10243
10274
  detectChunk = (buffer) => {
10244
10275
  const match = chunking(buffer);
10245
10276
  if (match == null) {
@@ -10256,11 +10287,11 @@ function smoothStream({
10256
10287
  return match;
10257
10288
  };
10258
10289
  } else {
10259
- const chunkingRegex = typeof chunking === "string" ? CHUNKING_REGEXPS[chunking] : chunking;
10290
+ const chunkingRegex = typeof chunking === "string" ? CHUNKING_REGEXPS[chunking] : chunking instanceof RegExp ? chunking : void 0;
10260
10291
  if (chunkingRegex == null) {
10261
10292
  throw new import_provider28.InvalidArgumentError({
10262
10293
  argument: "chunking",
10263
- message: `Chunking must be "word" or "line" or a RegExp. Received: ${chunking}`
10294
+ message: `Chunking must be "word", "line", a RegExp, an Intl.Segmenter, or a ChunkDetector function. Received: ${chunking}`
10264
10295
  });
10265
10296
  }
10266
10297
  detectChunk = (buffer) => {
@@ -12123,6 +12154,7 @@ var TextStreamChatTransport = class extends HttpChatTransport {
12123
12154
  generateObject,
12124
12155
  generateText,
12125
12156
  getStaticToolName,
12157
+ getStepTimeoutMs,
12126
12158
  getTextFromDataUrl,
12127
12159
  getToolName,
12128
12160
  getToolOrDynamicToolName,