ai 3.3.37 → 3.3.39
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +13 -0
- package/dist/index.d.mts +24 -50
- package/dist/index.d.ts +24 -50
- package/dist/index.js +38 -27
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +38 -27
- package/dist/index.mjs.map +1 -1
- package/package.json +1 -1
package/dist/index.mjs
CHANGED
@@ -2998,6 +2998,7 @@ async function generateText({
|
|
2998
2998
|
generateId: generateId3 = originalGenerateId3,
|
2999
2999
|
currentDate = () => /* @__PURE__ */ new Date()
|
3000
3000
|
} = {},
|
3001
|
+
onStepFinish,
|
3001
3002
|
...settings
|
3002
3003
|
}) {
|
3003
3004
|
var _a11;
|
@@ -3159,7 +3160,7 @@ async function generateText({
|
|
3159
3160
|
usage.completionTokens += currentUsage.completionTokens;
|
3160
3161
|
usage.promptTokens += currentUsage.promptTokens;
|
3161
3162
|
usage.totalTokens += currentUsage.totalTokens;
|
3162
|
-
|
3163
|
+
const currentStep = {
|
3163
3164
|
text: (_b = currentModelResponse.text) != null ? _b : "",
|
3164
3165
|
toolCalls: currentToolCalls,
|
3165
3166
|
toolResults: currentToolResults,
|
@@ -3170,8 +3171,11 @@ async function generateText({
|
|
3170
3171
|
response: {
|
3171
3172
|
...currentModelResponse.response,
|
3172
3173
|
headers: (_c = currentModelResponse.rawResponse) == null ? void 0 : _c.headers
|
3173
|
-
}
|
3174
|
-
|
3174
|
+
},
|
3175
|
+
experimental_providerMetadata: currentModelResponse.providerMetadata
|
3176
|
+
};
|
3177
|
+
steps.push(currentStep);
|
3178
|
+
await (onStepFinish == null ? void 0 : onStepFinish(currentStep));
|
3175
3179
|
const newResponseMessages = toResponseMessages({
|
3176
3180
|
text: currentModelResponse.text,
|
3177
3181
|
toolCalls: currentToolCalls,
|
@@ -3676,6 +3680,7 @@ async function streamText({
|
|
3676
3680
|
experimental_toolCallStreaming: toolCallStreaming = false,
|
3677
3681
|
onChunk,
|
3678
3682
|
onFinish,
|
3683
|
+
onStepFinish,
|
3679
3684
|
_internal: {
|
3680
3685
|
now: now2 = now,
|
3681
3686
|
generateId: generateId3 = originalGenerateId4,
|
@@ -3808,6 +3813,7 @@ async function streamText({
|
|
3808
3813
|
rawResponse,
|
3809
3814
|
onChunk,
|
3810
3815
|
onFinish,
|
3816
|
+
onStepFinish,
|
3811
3817
|
rootSpan,
|
3812
3818
|
doStreamSpan,
|
3813
3819
|
telemetry,
|
@@ -3830,6 +3836,7 @@ var DefaultStreamTextResult = class {
|
|
3830
3836
|
rawResponse,
|
3831
3837
|
onChunk,
|
3832
3838
|
onFinish,
|
3839
|
+
onStepFinish,
|
3833
3840
|
rootSpan,
|
3834
3841
|
doStreamSpan,
|
3835
3842
|
telemetry,
|
@@ -3982,26 +3989,7 @@ var DefaultStreamTextResult = class {
|
|
3982
3989
|
},
|
3983
3990
|
// invoke onFinish callback and resolve toolResults promise when the stream is about to close:
|
3984
3991
|
async flush(controller) {
|
3985
|
-
|
3986
|
-
type: "step-finish",
|
3987
|
-
finishReason: stepFinishReason,
|
3988
|
-
usage: stepUsage,
|
3989
|
-
experimental_providerMetadata: stepProviderMetadata,
|
3990
|
-
logprobs: stepLogProbs,
|
3991
|
-
response: stepResponse
|
3992
|
-
});
|
3993
|
-
stepResults.push({
|
3994
|
-
text: stepText,
|
3995
|
-
toolCalls: stepToolCalls,
|
3996
|
-
toolResults: stepToolResults,
|
3997
|
-
finishReason: stepFinishReason,
|
3998
|
-
usage: stepUsage,
|
3999
|
-
warnings: self.warnings,
|
4000
|
-
logprobs: stepLogProbs,
|
4001
|
-
response: stepResponse,
|
4002
|
-
rawResponse: self.rawResponse
|
4003
|
-
});
|
4004
|
-
const telemetryToolCalls = stepToolCalls.length > 0 ? JSON.stringify(stepToolCalls) : void 0;
|
3992
|
+
const stepToolCallsJson = stepToolCalls.length > 0 ? JSON.stringify(stepToolCalls) : void 0;
|
4005
3993
|
try {
|
4006
3994
|
doStreamSpan2.setAttributes(
|
4007
3995
|
selectTelemetryAttributes({
|
@@ -4010,7 +3998,7 @@ var DefaultStreamTextResult = class {
|
|
4010
3998
|
"ai.response.finishReason": stepFinishReason,
|
4011
3999
|
"ai.response.text": { output: () => stepText },
|
4012
4000
|
"ai.response.toolCalls": {
|
4013
|
-
output: () =>
|
4001
|
+
output: () => stepToolCallsJson
|
4014
4002
|
},
|
4015
4003
|
"ai.response.id": stepResponse.id,
|
4016
4004
|
"ai.response.model": stepResponse.modelId,
|
@@ -4021,7 +4009,7 @@ var DefaultStreamTextResult = class {
|
|
4021
4009
|
"ai.finishReason": stepFinishReason,
|
4022
4010
|
"ai.result.text": { output: () => stepText },
|
4023
4011
|
"ai.result.toolCalls": {
|
4024
|
-
output: () =>
|
4012
|
+
output: () => stepToolCallsJson
|
4025
4013
|
},
|
4026
4014
|
// standardized gen-ai llm span attributes:
|
4027
4015
|
"gen_ai.response.finish_reasons": [stepFinishReason],
|
@@ -4036,6 +4024,28 @@ var DefaultStreamTextResult = class {
|
|
4036
4024
|
} finally {
|
4037
4025
|
doStreamSpan2.end();
|
4038
4026
|
}
|
4027
|
+
controller.enqueue({
|
4028
|
+
type: "step-finish",
|
4029
|
+
finishReason: stepFinishReason,
|
4030
|
+
usage: stepUsage,
|
4031
|
+
experimental_providerMetadata: stepProviderMetadata,
|
4032
|
+
logprobs: stepLogProbs,
|
4033
|
+
response: stepResponse
|
4034
|
+
});
|
4035
|
+
const stepResult = {
|
4036
|
+
text: stepText,
|
4037
|
+
toolCalls: stepToolCalls,
|
4038
|
+
toolResults: stepToolResults,
|
4039
|
+
finishReason: stepFinishReason,
|
4040
|
+
usage: stepUsage,
|
4041
|
+
warnings: self.warnings,
|
4042
|
+
logprobs: stepLogProbs,
|
4043
|
+
response: stepResponse,
|
4044
|
+
rawResponse: self.rawResponse,
|
4045
|
+
experimental_providerMetadata: stepProviderMetadata
|
4046
|
+
};
|
4047
|
+
stepResults.push(stepResult);
|
4048
|
+
await (onStepFinish == null ? void 0 : onStepFinish(stepResult));
|
4039
4049
|
const combinedUsage = {
|
4040
4050
|
promptTokens: usage.promptTokens + stepUsage.promptTokens,
|
4041
4051
|
completionTokens: usage.completionTokens + stepUsage.completionTokens,
|
@@ -4093,7 +4103,7 @@ var DefaultStreamTextResult = class {
|
|
4093
4103
|
"ai.response.finishReason": stepFinishReason,
|
4094
4104
|
"ai.response.text": { output: () => stepText },
|
4095
4105
|
"ai.response.toolCalls": {
|
4096
|
-
output: () =>
|
4106
|
+
output: () => stepToolCallsJson
|
4097
4107
|
},
|
4098
4108
|
"ai.usage.promptTokens": combinedUsage.promptTokens,
|
4099
4109
|
"ai.usage.completionTokens": combinedUsage.completionTokens,
|
@@ -4101,7 +4111,7 @@ var DefaultStreamTextResult = class {
|
|
4101
4111
|
"ai.finishReason": stepFinishReason,
|
4102
4112
|
"ai.result.text": { output: () => stepText },
|
4103
4113
|
"ai.result.toolCalls": {
|
4104
|
-
output: () =>
|
4114
|
+
output: () => stepToolCallsJson
|
4105
4115
|
}
|
4106
4116
|
}
|
4107
4117
|
})
|
@@ -4119,6 +4129,7 @@ var DefaultStreamTextResult = class {
|
|
4119
4129
|
resolveSteps(stepResults);
|
4120
4130
|
await (onFinish == null ? void 0 : onFinish({
|
4121
4131
|
finishReason: stepFinishReason,
|
4132
|
+
logprobs: stepLogProbs,
|
4122
4133
|
usage: combinedUsage,
|
4123
4134
|
text: stepText,
|
4124
4135
|
toolCalls: stepToolCalls,
|