ai 3.3.36 → 3.3.38
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +18 -0
- package/dist/index.d.mts +14 -49
- package/dist/index.d.ts +14 -49
- package/dist/index.js +36 -30
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +36 -30
- package/dist/index.mjs.map +1 -1
- package/package.json +7 -7
- package/rsc/dist/rsc-server.mjs +2 -2
- package/rsc/dist/rsc-server.mjs.map +1 -1
package/CHANGELOG.md
CHANGED
@@ -1,5 +1,23 @@
|
|
1
1
|
# ai
|
2
2
|
|
3
|
+
## 3.3.38
|
4
|
+
|
5
|
+
### Patch Changes
|
6
|
+
|
7
|
+
- 83da52c: feat (ai/core): add onStepFinish callback to streamText
|
8
|
+
|
9
|
+
## 3.3.37
|
10
|
+
|
11
|
+
### Patch Changes
|
12
|
+
|
13
|
+
- Updated dependencies [273f696]
|
14
|
+
- @ai-sdk/provider-utils@1.0.19
|
15
|
+
- @ai-sdk/react@0.0.59
|
16
|
+
- @ai-sdk/solid@0.0.47
|
17
|
+
- @ai-sdk/svelte@0.0.49
|
18
|
+
- @ai-sdk/ui-utils@0.0.44
|
19
|
+
- @ai-sdk/vue@0.0.49
|
20
|
+
|
3
21
|
## 3.3.36
|
4
22
|
|
5
23
|
### Patch Changes
|
package/dist/index.d.mts
CHANGED
@@ -1806,7 +1806,7 @@ If set and supported by the model, calls will generate deterministic results.
|
|
1806
1806
|
@return
|
1807
1807
|
A result object for accessing different stream types and additional information.
|
1808
1808
|
*/
|
1809
|
-
declare function streamText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, maxToolRoundtrips, maxSteps, experimental_telemetry: telemetry, experimental_providerMetadata: providerMetadata, experimental_toolCallStreaming: toolCallStreaming, onChunk, onFinish, _internal: { now, generateId, currentDate, }, ...settings }: CallSettings & Prompt & {
|
1809
|
+
declare function streamText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, maxToolRoundtrips, maxSteps, experimental_telemetry: telemetry, experimental_providerMetadata: providerMetadata, experimental_toolCallStreaming: toolCallStreaming, onChunk, onFinish, onStepFinish, _internal: { now, generateId, currentDate, }, ...settings }: CallSettings & Prompt & {
|
1810
1810
|
/**
|
1811
1811
|
The language model to use.
|
1812
1812
|
*/
|
@@ -1868,57 +1868,22 @@ Callback that is called for each chunk of the stream. The stream processing will
|
|
1868
1868
|
Callback that is called when the LLM response and all request tool executions
|
1869
1869
|
(for tools that have an `execute` function) are finished.
|
1870
1870
|
*/
|
1871
|
-
onFinish?: (event: {
|
1871
|
+
onFinish?: (event: StepResult<TOOLS> & {
|
1872
1872
|
/**
|
1873
|
-
|
1874
|
-
|
1875
|
-
finishReason: FinishReason;
|
1876
|
-
/**
|
1877
|
-
The token usage of the generated response.
|
1878
|
-
*/
|
1879
|
-
usage: LanguageModelUsage$1;
|
1880
|
-
/**
|
1881
|
-
The full text that has been generated.
|
1882
|
-
*/
|
1883
|
-
text: string;
|
1884
|
-
/**
|
1885
|
-
The tool calls that have been executed.
|
1886
|
-
*/
|
1887
|
-
toolCalls?: ToToolCall<TOOLS>[];
|
1888
|
-
/**
|
1889
|
-
The tool results that have been generated.
|
1890
|
-
*/
|
1891
|
-
toolResults?: ToToolResult<TOOLS>[];
|
1892
|
-
/**
|
1893
|
-
Optional raw response data.
|
1894
|
-
|
1895
|
-
@deprecated Use `response` instead.
|
1896
|
-
*/
|
1897
|
-
rawResponse?: {
|
1898
|
-
/**
|
1899
|
-
Response headers.
|
1900
|
-
*/
|
1901
|
-
headers?: Record<string, string>;
|
1902
|
-
};
|
1903
|
-
/**
|
1904
|
-
Response metadata.
|
1905
|
-
*/
|
1906
|
-
response: LanguageModelResponseMetadataWithHeaders;
|
1907
|
-
/**
|
1908
|
-
Details for all steps.
|
1909
|
-
*/
|
1873
|
+
Details for all steps.
|
1874
|
+
*/
|
1910
1875
|
steps: StepResult<TOOLS>[];
|
1911
1876
|
/**
|
1912
|
-
|
1913
|
-
|
1914
|
-
|
1915
|
-
|
1916
|
-
Additional provider-specific metadata. They are passed through
|
1917
|
-
from the provider to the AI SDK and enable provider-specific
|
1918
|
-
results that can be fully encapsulated in the provider.
|
1919
|
-
*/
|
1877
|
+
Additional provider-specific metadata. They are passed through
|
1878
|
+
from the provider to the AI SDK and enable provider-specific
|
1879
|
+
results that can be fully encapsulated in the provider.
|
1880
|
+
*/
|
1920
1881
|
readonly experimental_providerMetadata: ProviderMetadata | undefined;
|
1921
1882
|
}) => Promise<void> | void;
|
1883
|
+
/**
|
1884
|
+
Callback that is called when each step (LLM call) is finished, including intermediate steps.
|
1885
|
+
*/
|
1886
|
+
onStepFinish?: (event: StepResult<TOOLS>) => Promise<void> | void;
|
1922
1887
|
/**
|
1923
1888
|
* Internal. For test use only. May change without notice.
|
1924
1889
|
*/
|
@@ -3107,10 +3072,10 @@ declare class StreamingTextResponse extends Response {
|
|
3107
3072
|
constructor(res: ReadableStream, init?: ResponseInit, data?: StreamData);
|
3108
3073
|
}
|
3109
3074
|
|
3110
|
-
declare const generateId: () => string;
|
3075
|
+
declare const generateId: (size?: number) => string;
|
3111
3076
|
/**
|
3112
3077
|
@deprecated Use `generateId` instead.
|
3113
3078
|
*/
|
3114
|
-
declare const nanoid: () => string;
|
3079
|
+
declare const nanoid: (size?: number) => string;
|
3115
3080
|
|
3116
3081
|
export { AIStream, AIStreamCallbacksAndOptions, AIStreamParser, AIStreamParserOptions, AWSBedrockAnthropicMessagesStream, AWSBedrockAnthropicStream, AWSBedrockCohereStream, AWSBedrockLlama2Stream, AWSBedrockStream, AnthropicStream, AssistantContent, AssistantResponse, CallWarning, CohereStream, CompletionTokenUsage, CompletionUsage, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreTool, CoreToolChoice, CoreToolMessage, CoreUserMessage, DataContent, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, EmbeddingTokenUsage, ExperimentalAssistantMessage, ExperimentalMessage, ExperimentalTool, ExperimentalToolMessage, ExperimentalUserMessage, Experimental_LanguageModelV1Middleware, FinishReason, FunctionCallPayload, GenerateObjectResult, GenerateTextResult, GoogleGenerativeAIStream, HuggingFaceStream, ImagePart, InkeepAIStreamCallbacksAndOptions, InkeepChatResultCallbacks, InkeepOnFinalMetadata, InkeepStream, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidToolArgumentsError, langchainAdapter as LangChainAdapter, LangChainStream, LanguageModel, LanguageModelResponseMetadata, LanguageModelResponseMetadataWithHeaders, LanguageModelUsage, LogProbs, MessageConversionError, MistralStream, NoObjectGeneratedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, OpenAIStream, OpenAIStreamCallbacks, Provider, ProviderMetadata, ReplicateStream, RetryError, StreamData, StreamObjectResult, StreamTextResult, StreamingTextResponse, TextPart$1 as TextPart, TextStreamPart, TokenUsage, ToolCallPart, ToolCallPayload, ToolContent, ToolResultPart, UserContent, convertToCoreMessages, cosineSimilarity, createCallbacksTransformer, createEventStreamTransformer, createStreamDataTransformer, embed, embedMany, experimental_AssistantResponse, experimental_ModelRegistry, experimental_Provider, experimental_ProviderRegistry, experimental_StreamData, experimental_createModelRegistry, experimental_createProviderRegistry, experimental_customProvider, experimental_generateObject, experimental_generateText, experimental_streamObject, experimental_streamText, experimental_wrapLanguageModel, generateId, generateObject, generateText, nanoid, readableFromAsyncIterable, streamObject, streamText, streamToResponse, tool, trimStartOfStreamHelper };
|
package/dist/index.d.ts
CHANGED
@@ -1806,7 +1806,7 @@ If set and supported by the model, calls will generate deterministic results.
|
|
1806
1806
|
@return
|
1807
1807
|
A result object for accessing different stream types and additional information.
|
1808
1808
|
*/
|
1809
|
-
declare function streamText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, maxToolRoundtrips, maxSteps, experimental_telemetry: telemetry, experimental_providerMetadata: providerMetadata, experimental_toolCallStreaming: toolCallStreaming, onChunk, onFinish, _internal: { now, generateId, currentDate, }, ...settings }: CallSettings & Prompt & {
|
1809
|
+
declare function streamText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, maxToolRoundtrips, maxSteps, experimental_telemetry: telemetry, experimental_providerMetadata: providerMetadata, experimental_toolCallStreaming: toolCallStreaming, onChunk, onFinish, onStepFinish, _internal: { now, generateId, currentDate, }, ...settings }: CallSettings & Prompt & {
|
1810
1810
|
/**
|
1811
1811
|
The language model to use.
|
1812
1812
|
*/
|
@@ -1868,57 +1868,22 @@ Callback that is called for each chunk of the stream. The stream processing will
|
|
1868
1868
|
Callback that is called when the LLM response and all request tool executions
|
1869
1869
|
(for tools that have an `execute` function) are finished.
|
1870
1870
|
*/
|
1871
|
-
onFinish?: (event: {
|
1871
|
+
onFinish?: (event: StepResult<TOOLS> & {
|
1872
1872
|
/**
|
1873
|
-
|
1874
|
-
|
1875
|
-
finishReason: FinishReason;
|
1876
|
-
/**
|
1877
|
-
The token usage of the generated response.
|
1878
|
-
*/
|
1879
|
-
usage: LanguageModelUsage$1;
|
1880
|
-
/**
|
1881
|
-
The full text that has been generated.
|
1882
|
-
*/
|
1883
|
-
text: string;
|
1884
|
-
/**
|
1885
|
-
The tool calls that have been executed.
|
1886
|
-
*/
|
1887
|
-
toolCalls?: ToToolCall<TOOLS>[];
|
1888
|
-
/**
|
1889
|
-
The tool results that have been generated.
|
1890
|
-
*/
|
1891
|
-
toolResults?: ToToolResult<TOOLS>[];
|
1892
|
-
/**
|
1893
|
-
Optional raw response data.
|
1894
|
-
|
1895
|
-
@deprecated Use `response` instead.
|
1896
|
-
*/
|
1897
|
-
rawResponse?: {
|
1898
|
-
/**
|
1899
|
-
Response headers.
|
1900
|
-
*/
|
1901
|
-
headers?: Record<string, string>;
|
1902
|
-
};
|
1903
|
-
/**
|
1904
|
-
Response metadata.
|
1905
|
-
*/
|
1906
|
-
response: LanguageModelResponseMetadataWithHeaders;
|
1907
|
-
/**
|
1908
|
-
Details for all steps.
|
1909
|
-
*/
|
1873
|
+
Details for all steps.
|
1874
|
+
*/
|
1910
1875
|
steps: StepResult<TOOLS>[];
|
1911
1876
|
/**
|
1912
|
-
|
1913
|
-
|
1914
|
-
|
1915
|
-
|
1916
|
-
Additional provider-specific metadata. They are passed through
|
1917
|
-
from the provider to the AI SDK and enable provider-specific
|
1918
|
-
results that can be fully encapsulated in the provider.
|
1919
|
-
*/
|
1877
|
+
Additional provider-specific metadata. They are passed through
|
1878
|
+
from the provider to the AI SDK and enable provider-specific
|
1879
|
+
results that can be fully encapsulated in the provider.
|
1880
|
+
*/
|
1920
1881
|
readonly experimental_providerMetadata: ProviderMetadata | undefined;
|
1921
1882
|
}) => Promise<void> | void;
|
1883
|
+
/**
|
1884
|
+
Callback that is called when each step (LLM call) is finished, including intermediate steps.
|
1885
|
+
*/
|
1886
|
+
onStepFinish?: (event: StepResult<TOOLS>) => Promise<void> | void;
|
1922
1887
|
/**
|
1923
1888
|
* Internal. For test use only. May change without notice.
|
1924
1889
|
*/
|
@@ -3107,10 +3072,10 @@ declare class StreamingTextResponse extends Response {
|
|
3107
3072
|
constructor(res: ReadableStream, init?: ResponseInit, data?: StreamData);
|
3108
3073
|
}
|
3109
3074
|
|
3110
|
-
declare const generateId: () => string;
|
3075
|
+
declare const generateId: (size?: number) => string;
|
3111
3076
|
/**
|
3112
3077
|
@deprecated Use `generateId` instead.
|
3113
3078
|
*/
|
3114
|
-
declare const nanoid: () => string;
|
3079
|
+
declare const nanoid: (size?: number) => string;
|
3115
3080
|
|
3116
3081
|
export { AIStream, AIStreamCallbacksAndOptions, AIStreamParser, AIStreamParserOptions, AWSBedrockAnthropicMessagesStream, AWSBedrockAnthropicStream, AWSBedrockCohereStream, AWSBedrockLlama2Stream, AWSBedrockStream, AnthropicStream, AssistantContent, AssistantResponse, CallWarning, CohereStream, CompletionTokenUsage, CompletionUsage, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreTool, CoreToolChoice, CoreToolMessage, CoreUserMessage, DataContent, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, EmbeddingTokenUsage, ExperimentalAssistantMessage, ExperimentalMessage, ExperimentalTool, ExperimentalToolMessage, ExperimentalUserMessage, Experimental_LanguageModelV1Middleware, FinishReason, FunctionCallPayload, GenerateObjectResult, GenerateTextResult, GoogleGenerativeAIStream, HuggingFaceStream, ImagePart, InkeepAIStreamCallbacksAndOptions, InkeepChatResultCallbacks, InkeepOnFinalMetadata, InkeepStream, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidToolArgumentsError, langchainAdapter as LangChainAdapter, LangChainStream, LanguageModel, LanguageModelResponseMetadata, LanguageModelResponseMetadataWithHeaders, LanguageModelUsage, LogProbs, MessageConversionError, MistralStream, NoObjectGeneratedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, OpenAIStream, OpenAIStreamCallbacks, Provider, ProviderMetadata, ReplicateStream, RetryError, StreamData, StreamObjectResult, StreamTextResult, StreamingTextResponse, TextPart$1 as TextPart, TextStreamPart, TokenUsage, ToolCallPart, ToolCallPayload, ToolContent, ToolResultPart, UserContent, convertToCoreMessages, cosineSimilarity, createCallbacksTransformer, createEventStreamTransformer, createStreamDataTransformer, embed, embedMany, experimental_AssistantResponse, experimental_ModelRegistry, experimental_Provider, experimental_ProviderRegistry, experimental_StreamData, experimental_createModelRegistry, experimental_createProviderRegistry, experimental_customProvider, experimental_generateObject, experimental_generateText, experimental_streamObject, experimental_streamText, experimental_wrapLanguageModel, generateId, generateObject, generateText, nanoid, readableFromAsyncIterable, streamObject, streamText, streamToResponse, tool, trimStartOfStreamHelper };
|
package/dist/index.js
CHANGED
@@ -1193,8 +1193,8 @@ function prepareCallSettings({
|
|
1193
1193
|
if (typeof topK !== "number") {
|
1194
1194
|
throw new InvalidArgumentError({
|
1195
1195
|
parameter: "topK",
|
1196
|
-
value:
|
1197
|
-
message: "
|
1196
|
+
value: topK,
|
1197
|
+
message: "topK must be a number"
|
1198
1198
|
});
|
1199
1199
|
}
|
1200
1200
|
}
|
@@ -1844,7 +1844,7 @@ function validateObjectGenerationInput({
|
|
1844
1844
|
}
|
1845
1845
|
|
1846
1846
|
// core/generate-object/generate-object.ts
|
1847
|
-
var originalGenerateId = (0, import_provider_utils6.createIdGenerator)({ prefix: "aiobj-",
|
1847
|
+
var originalGenerateId = (0, import_provider_utils6.createIdGenerator)({ prefix: "aiobj-", size: 24 });
|
1848
1848
|
async function generateObject({
|
1849
1849
|
model,
|
1850
1850
|
enum: enumValues,
|
@@ -2330,7 +2330,7 @@ function writeToServerResponse({
|
|
2330
2330
|
}
|
2331
2331
|
|
2332
2332
|
// core/generate-object/stream-object.ts
|
2333
|
-
var originalGenerateId2 = (0, import_provider_utils7.createIdGenerator)({ prefix: "aiobj-",
|
2333
|
+
var originalGenerateId2 = (0, import_provider_utils7.createIdGenerator)({ prefix: "aiobj-", size: 24 });
|
2334
2334
|
async function streamObject({
|
2335
2335
|
model,
|
2336
2336
|
schema: inputSchema,
|
@@ -3039,7 +3039,7 @@ function parseToolCall({
|
|
3039
3039
|
}
|
3040
3040
|
|
3041
3041
|
// core/generate-text/generate-text.ts
|
3042
|
-
var originalGenerateId3 = (0, import_provider_utils9.createIdGenerator)({ prefix: "aitxt-",
|
3042
|
+
var originalGenerateId3 = (0, import_provider_utils9.createIdGenerator)({ prefix: "aitxt-", size: 24 });
|
3043
3043
|
async function generateText({
|
3044
3044
|
model,
|
3045
3045
|
tools,
|
@@ -3719,7 +3719,7 @@ function runToolsTransformation({
|
|
3719
3719
|
}
|
3720
3720
|
|
3721
3721
|
// core/generate-text/stream-text.ts
|
3722
|
-
var originalGenerateId4 = (0, import_provider_utils10.createIdGenerator)({ prefix: "aitxt-",
|
3722
|
+
var originalGenerateId4 = (0, import_provider_utils10.createIdGenerator)({ prefix: "aitxt-", size: 24 });
|
3723
3723
|
async function streamText({
|
3724
3724
|
model,
|
3725
3725
|
tools,
|
@@ -3737,6 +3737,7 @@ async function streamText({
|
|
3737
3737
|
experimental_toolCallStreaming: toolCallStreaming = false,
|
3738
3738
|
onChunk,
|
3739
3739
|
onFinish,
|
3740
|
+
onStepFinish,
|
3740
3741
|
_internal: {
|
3741
3742
|
now: now2 = now,
|
3742
3743
|
generateId: generateId3 = originalGenerateId4,
|
@@ -3869,6 +3870,7 @@ async function streamText({
|
|
3869
3870
|
rawResponse,
|
3870
3871
|
onChunk,
|
3871
3872
|
onFinish,
|
3873
|
+
onStepFinish,
|
3872
3874
|
rootSpan,
|
3873
3875
|
doStreamSpan,
|
3874
3876
|
telemetry,
|
@@ -3891,6 +3893,7 @@ var DefaultStreamTextResult = class {
|
|
3891
3893
|
rawResponse,
|
3892
3894
|
onChunk,
|
3893
3895
|
onFinish,
|
3896
|
+
onStepFinish,
|
3894
3897
|
rootSpan,
|
3895
3898
|
doStreamSpan,
|
3896
3899
|
telemetry,
|
@@ -4043,26 +4046,7 @@ var DefaultStreamTextResult = class {
|
|
4043
4046
|
},
|
4044
4047
|
// invoke onFinish callback and resolve toolResults promise when the stream is about to close:
|
4045
4048
|
async flush(controller) {
|
4046
|
-
|
4047
|
-
type: "step-finish",
|
4048
|
-
finishReason: stepFinishReason,
|
4049
|
-
usage: stepUsage,
|
4050
|
-
experimental_providerMetadata: stepProviderMetadata,
|
4051
|
-
logprobs: stepLogProbs,
|
4052
|
-
response: stepResponse
|
4053
|
-
});
|
4054
|
-
stepResults.push({
|
4055
|
-
text: stepText,
|
4056
|
-
toolCalls: stepToolCalls,
|
4057
|
-
toolResults: stepToolResults,
|
4058
|
-
finishReason: stepFinishReason,
|
4059
|
-
usage: stepUsage,
|
4060
|
-
warnings: self.warnings,
|
4061
|
-
logprobs: stepLogProbs,
|
4062
|
-
response: stepResponse,
|
4063
|
-
rawResponse: self.rawResponse
|
4064
|
-
});
|
4065
|
-
const telemetryToolCalls = stepToolCalls.length > 0 ? JSON.stringify(stepToolCalls) : void 0;
|
4049
|
+
const stepToolCallsJson = stepToolCalls.length > 0 ? JSON.stringify(stepToolCalls) : void 0;
|
4066
4050
|
try {
|
4067
4051
|
doStreamSpan2.setAttributes(
|
4068
4052
|
selectTelemetryAttributes({
|
@@ -4071,7 +4055,7 @@ var DefaultStreamTextResult = class {
|
|
4071
4055
|
"ai.response.finishReason": stepFinishReason,
|
4072
4056
|
"ai.response.text": { output: () => stepText },
|
4073
4057
|
"ai.response.toolCalls": {
|
4074
|
-
output: () =>
|
4058
|
+
output: () => stepToolCallsJson
|
4075
4059
|
},
|
4076
4060
|
"ai.response.id": stepResponse.id,
|
4077
4061
|
"ai.response.model": stepResponse.modelId,
|
@@ -4082,7 +4066,7 @@ var DefaultStreamTextResult = class {
|
|
4082
4066
|
"ai.finishReason": stepFinishReason,
|
4083
4067
|
"ai.result.text": { output: () => stepText },
|
4084
4068
|
"ai.result.toolCalls": {
|
4085
|
-
output: () =>
|
4069
|
+
output: () => stepToolCallsJson
|
4086
4070
|
},
|
4087
4071
|
// standardized gen-ai llm span attributes:
|
4088
4072
|
"gen_ai.response.finish_reasons": [stepFinishReason],
|
@@ -4097,6 +4081,27 @@ var DefaultStreamTextResult = class {
|
|
4097
4081
|
} finally {
|
4098
4082
|
doStreamSpan2.end();
|
4099
4083
|
}
|
4084
|
+
controller.enqueue({
|
4085
|
+
type: "step-finish",
|
4086
|
+
finishReason: stepFinishReason,
|
4087
|
+
usage: stepUsage,
|
4088
|
+
experimental_providerMetadata: stepProviderMetadata,
|
4089
|
+
logprobs: stepLogProbs,
|
4090
|
+
response: stepResponse
|
4091
|
+
});
|
4092
|
+
const stepResult = {
|
4093
|
+
text: stepText,
|
4094
|
+
toolCalls: stepToolCalls,
|
4095
|
+
toolResults: stepToolResults,
|
4096
|
+
finishReason: stepFinishReason,
|
4097
|
+
usage: stepUsage,
|
4098
|
+
warnings: self.warnings,
|
4099
|
+
logprobs: stepLogProbs,
|
4100
|
+
response: stepResponse,
|
4101
|
+
rawResponse: self.rawResponse
|
4102
|
+
};
|
4103
|
+
stepResults.push(stepResult);
|
4104
|
+
await (onStepFinish == null ? void 0 : onStepFinish(stepResult));
|
4100
4105
|
const combinedUsage = {
|
4101
4106
|
promptTokens: usage.promptTokens + stepUsage.promptTokens,
|
4102
4107
|
completionTokens: usage.completionTokens + stepUsage.completionTokens,
|
@@ -4154,7 +4159,7 @@ var DefaultStreamTextResult = class {
|
|
4154
4159
|
"ai.response.finishReason": stepFinishReason,
|
4155
4160
|
"ai.response.text": { output: () => stepText },
|
4156
4161
|
"ai.response.toolCalls": {
|
4157
|
-
output: () =>
|
4162
|
+
output: () => stepToolCallsJson
|
4158
4163
|
},
|
4159
4164
|
"ai.usage.promptTokens": combinedUsage.promptTokens,
|
4160
4165
|
"ai.usage.completionTokens": combinedUsage.completionTokens,
|
@@ -4162,7 +4167,7 @@ var DefaultStreamTextResult = class {
|
|
4162
4167
|
"ai.finishReason": stepFinishReason,
|
4163
4168
|
"ai.result.text": { output: () => stepText },
|
4164
4169
|
"ai.result.toolCalls": {
|
4165
|
-
output: () =>
|
4170
|
+
output: () => stepToolCallsJson
|
4166
4171
|
}
|
4167
4172
|
}
|
4168
4173
|
})
|
@@ -4180,6 +4185,7 @@ var DefaultStreamTextResult = class {
|
|
4180
4185
|
resolveSteps(stepResults);
|
4181
4186
|
await (onFinish == null ? void 0 : onFinish({
|
4182
4187
|
finishReason: stepFinishReason,
|
4188
|
+
logprobs: stepLogProbs,
|
4183
4189
|
usage: combinedUsage,
|
4184
4190
|
text: stepText,
|
4185
4191
|
toolCalls: stepToolCalls,
|