ai 3.3.23 → 3.3.25
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +13 -0
- package/dist/index.d.mts +25 -1
- package/dist/index.d.ts +25 -1
- package/dist/index.js +131 -45
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +131 -45
- package/dist/index.mjs.map +1 -1
- package/package.json +1 -1
package/dist/index.mjs
CHANGED
@@ -1746,10 +1746,13 @@ async function generateObject({
|
|
1746
1746
|
},
|
1747
1747
|
"ai.settings.mode": mode,
|
1748
1748
|
// standardized gen-ai llm span attributes:
|
1749
|
-
"gen_ai.request.model": model.modelId,
|
1750
1749
|
"gen_ai.system": model.provider,
|
1750
|
+
"gen_ai.request.model": model.modelId,
|
1751
|
+
"gen_ai.request.frequency_penalty": settings.frequencyPenalty,
|
1751
1752
|
"gen_ai.request.max_tokens": settings.maxTokens,
|
1753
|
+
"gen_ai.request.presence_penalty": settings.presencePenalty,
|
1752
1754
|
"gen_ai.request.temperature": settings.temperature,
|
1755
|
+
"gen_ai.request.top_k": settings.topK,
|
1753
1756
|
"gen_ai.request.top_p": settings.topP
|
1754
1757
|
}
|
1755
1758
|
}),
|
@@ -1775,9 +1778,12 @@ async function generateObject({
|
|
1775
1778
|
selectTelemetryAttributes({
|
1776
1779
|
telemetry,
|
1777
1780
|
attributes: {
|
1778
|
-
"ai.finishReason": result2.finishReason,
|
1781
|
+
"ai.response.finishReason": result2.finishReason,
|
1782
|
+
"ai.response.object": { output: () => result2.text },
|
1779
1783
|
"ai.usage.promptTokens": result2.usage.promptTokens,
|
1780
1784
|
"ai.usage.completionTokens": result2.usage.completionTokens,
|
1785
|
+
// deprecated:
|
1786
|
+
"ai.finishReason": result2.finishReason,
|
1781
1787
|
"ai.result.object": { output: () => result2.text },
|
1782
1788
|
// standardized gen-ai llm span attributes:
|
1783
1789
|
"gen_ai.response.finish_reasons": [result2.finishReason],
|
@@ -1829,10 +1835,13 @@ async function generateObject({
|
|
1829
1835
|
},
|
1830
1836
|
"ai.settings.mode": mode,
|
1831
1837
|
// standardized gen-ai llm span attributes:
|
1832
|
-
"gen_ai.request.model": model.modelId,
|
1833
1838
|
"gen_ai.system": model.provider,
|
1839
|
+
"gen_ai.request.model": model.modelId,
|
1840
|
+
"gen_ai.request.frequency_penalty": settings.frequencyPenalty,
|
1834
1841
|
"gen_ai.request.max_tokens": settings.maxTokens,
|
1842
|
+
"gen_ai.request.presence_penalty": settings.presencePenalty,
|
1835
1843
|
"gen_ai.request.temperature": settings.temperature,
|
1844
|
+
"gen_ai.request.top_k": settings.topK,
|
1836
1845
|
"gen_ai.request.top_p": settings.topP
|
1837
1846
|
}
|
1838
1847
|
}),
|
@@ -1863,14 +1872,17 @@ async function generateObject({
|
|
1863
1872
|
selectTelemetryAttributes({
|
1864
1873
|
telemetry,
|
1865
1874
|
attributes: {
|
1866
|
-
"ai.finishReason": result2.finishReason,
|
1875
|
+
"ai.response.finishReason": result2.finishReason,
|
1876
|
+
"ai.response.object": { output: () => objectText },
|
1867
1877
|
"ai.usage.promptTokens": result2.usage.promptTokens,
|
1868
1878
|
"ai.usage.completionTokens": result2.usage.completionTokens,
|
1879
|
+
// deprecated:
|
1880
|
+
"ai.finishReason": result2.finishReason,
|
1869
1881
|
"ai.result.object": { output: () => objectText },
|
1870
1882
|
// standardized gen-ai llm span attributes:
|
1871
1883
|
"gen_ai.response.finish_reasons": [result2.finishReason],
|
1872
|
-
"gen_ai.usage.
|
1873
|
-
"gen_ai.usage.
|
1884
|
+
"gen_ai.usage.input_tokens": result2.usage.promptTokens,
|
1885
|
+
"gen_ai.usage.output_tokens": result2.usage.completionTokens
|
1874
1886
|
}
|
1875
1887
|
})
|
1876
1888
|
);
|
@@ -1911,9 +1923,14 @@ async function generateObject({
|
|
1911
1923
|
selectTelemetryAttributes({
|
1912
1924
|
telemetry,
|
1913
1925
|
attributes: {
|
1914
|
-
"ai.finishReason": finishReason,
|
1926
|
+
"ai.response.finishReason": finishReason,
|
1927
|
+
"ai.response.object": {
|
1928
|
+
output: () => JSON.stringify(validationResult.value)
|
1929
|
+
},
|
1915
1930
|
"ai.usage.promptTokens": usage.promptTokens,
|
1916
1931
|
"ai.usage.completionTokens": usage.completionTokens,
|
1932
|
+
// deprecated:
|
1933
|
+
"ai.finishReason": finishReason,
|
1917
1934
|
"ai.result.object": {
|
1918
1935
|
output: () => JSON.stringify(validationResult.value)
|
1919
1936
|
}
|
@@ -2013,6 +2030,12 @@ var DelayedPromise = class {
|
|
2013
2030
|
}
|
2014
2031
|
};
|
2015
2032
|
|
2033
|
+
// core/util/now.ts
|
2034
|
+
function now() {
|
2035
|
+
var _a11, _b;
|
2036
|
+
return (_b = (_a11 = globalThis == null ? void 0 : globalThis.performance) == null ? void 0 : _a11.now()) != null ? _b : Date.now();
|
2037
|
+
}
|
2038
|
+
|
2016
2039
|
// core/generate-object/stream-object.ts
|
2017
2040
|
async function streamObject({
|
2018
2041
|
model,
|
@@ -2029,6 +2052,7 @@ async function streamObject({
|
|
2029
2052
|
headers,
|
2030
2053
|
experimental_telemetry: telemetry,
|
2031
2054
|
onFinish,
|
2055
|
+
_internal: { now: now2 = now } = {},
|
2032
2056
|
...settings
|
2033
2057
|
}) {
|
2034
2058
|
var _a11;
|
@@ -2174,7 +2198,7 @@ async function streamObject({
|
|
2174
2198
|
const {
|
2175
2199
|
result: { stream, warnings, rawResponse },
|
2176
2200
|
doStreamSpan,
|
2177
|
-
|
2201
|
+
startTimestampMs
|
2178
2202
|
} = await retry(
|
2179
2203
|
() => recordSpan({
|
2180
2204
|
name: "ai.streamObject.doStream",
|
@@ -2194,18 +2218,20 @@ async function streamObject({
|
|
2194
2218
|
},
|
2195
2219
|
"ai.settings.mode": mode,
|
2196
2220
|
// standardized gen-ai llm span attributes:
|
2197
|
-
"gen_ai.request.model": model.modelId,
|
2198
2221
|
"gen_ai.system": model.provider,
|
2222
|
+
"gen_ai.request.model": model.modelId,
|
2223
|
+
"gen_ai.request.frequency_penalty": settings.frequencyPenalty,
|
2199
2224
|
"gen_ai.request.max_tokens": settings.maxTokens,
|
2225
|
+
"gen_ai.request.presence_penalty": settings.presencePenalty,
|
2200
2226
|
"gen_ai.request.temperature": settings.temperature,
|
2227
|
+
"gen_ai.request.top_k": settings.topK,
|
2201
2228
|
"gen_ai.request.top_p": settings.topP
|
2202
2229
|
}
|
2203
2230
|
}),
|
2204
2231
|
tracer,
|
2205
2232
|
endWhenDone: false,
|
2206
2233
|
fn: async (doStreamSpan2) => ({
|
2207
|
-
|
2208
|
-
// get before the call
|
2234
|
+
startTimestampMs: now2(),
|
2209
2235
|
doStreamSpan: doStreamSpan2,
|
2210
2236
|
result: await model.doStream(callOptions)
|
2211
2237
|
})
|
@@ -2220,7 +2246,8 @@ async function streamObject({
|
|
2220
2246
|
rootSpan,
|
2221
2247
|
doStreamSpan,
|
2222
2248
|
telemetry,
|
2223
|
-
|
2249
|
+
startTimestampMs,
|
2250
|
+
now: now2
|
2224
2251
|
});
|
2225
2252
|
}
|
2226
2253
|
});
|
@@ -2235,7 +2262,8 @@ var DefaultStreamObjectResult = class {
|
|
2235
2262
|
rootSpan,
|
2236
2263
|
doStreamSpan,
|
2237
2264
|
telemetry,
|
2238
|
-
|
2265
|
+
startTimestampMs,
|
2266
|
+
now: now2
|
2239
2267
|
}) {
|
2240
2268
|
this.warnings = warnings;
|
2241
2269
|
this.rawResponse = rawResponse;
|
@@ -2264,7 +2292,7 @@ var DefaultStreamObjectResult = class {
|
|
2264
2292
|
new TransformStream({
|
2265
2293
|
async transform(chunk, controller) {
|
2266
2294
|
if (isFirstChunk) {
|
2267
|
-
const msToFirstChunk =
|
2295
|
+
const msToFirstChunk = now2() - startTimestampMs;
|
2268
2296
|
isFirstChunk = false;
|
2269
2297
|
doStreamSpan.addEvent("ai.stream.firstChunk", {
|
2270
2298
|
"ai.stream.msToFirstChunk": msToFirstChunk
|
@@ -2341,15 +2369,18 @@ var DefaultStreamObjectResult = class {
|
|
2341
2369
|
selectTelemetryAttributes({
|
2342
2370
|
telemetry,
|
2343
2371
|
attributes: {
|
2344
|
-
"ai.finishReason": finishReason,
|
2345
|
-
"ai.
|
2346
|
-
"ai.usage.completionTokens": finalUsage.completionTokens,
|
2347
|
-
"ai.result.object": {
|
2372
|
+
"ai.response.finishReason": finishReason,
|
2373
|
+
"ai.response.object": {
|
2348
2374
|
output: () => JSON.stringify(object)
|
2349
2375
|
},
|
2376
|
+
"ai.usage.promptTokens": finalUsage.promptTokens,
|
2377
|
+
"ai.usage.completionTokens": finalUsage.completionTokens,
|
2378
|
+
// deprecated
|
2379
|
+
"ai.finishReason": finishReason,
|
2380
|
+
"ai.result.object": { output: () => JSON.stringify(object) },
|
2350
2381
|
// standardized gen-ai llm span attributes:
|
2351
|
-
"gen_ai.usage.
|
2352
|
-
"gen_ai.usage.
|
2382
|
+
"gen_ai.usage.input_tokens": finalUsage.promptTokens,
|
2383
|
+
"gen_ai.usage.output_tokens": finalUsage.completionTokens,
|
2353
2384
|
"gen_ai.response.finish_reasons": [finishReason]
|
2354
2385
|
}
|
2355
2386
|
})
|
@@ -2361,9 +2392,11 @@ var DefaultStreamObjectResult = class {
|
|
2361
2392
|
attributes: {
|
2362
2393
|
"ai.usage.promptTokens": finalUsage.promptTokens,
|
2363
2394
|
"ai.usage.completionTokens": finalUsage.completionTokens,
|
2364
|
-
"ai.
|
2395
|
+
"ai.response.object": {
|
2365
2396
|
output: () => JSON.stringify(object)
|
2366
|
-
}
|
2397
|
+
},
|
2398
|
+
// deprecated
|
2399
|
+
"ai.result.object": { output: () => JSON.stringify(object) }
|
2367
2400
|
}
|
2368
2401
|
})
|
2369
2402
|
);
|
@@ -2742,10 +2775,14 @@ async function generateText({
|
|
2742
2775
|
input: () => JSON.stringify(promptMessages)
|
2743
2776
|
},
|
2744
2777
|
// standardized gen-ai llm span attributes:
|
2745
|
-
"gen_ai.request.model": model.modelId,
|
2746
2778
|
"gen_ai.system": model.provider,
|
2779
|
+
"gen_ai.request.model": model.modelId,
|
2780
|
+
"gen_ai.request.frequency_penalty": settings.frequencyPenalty,
|
2747
2781
|
"gen_ai.request.max_tokens": settings.maxTokens,
|
2782
|
+
"gen_ai.request.presence_penalty": settings.presencePenalty,
|
2783
|
+
"gen_ai.request.stop_sequences": settings.stopSequences,
|
2748
2784
|
"gen_ai.request.temperature": settings.temperature,
|
2785
|
+
"gen_ai.request.top_k": settings.topK,
|
2749
2786
|
"gen_ai.request.top_p": settings.topP
|
2750
2787
|
}
|
2751
2788
|
}),
|
@@ -2763,9 +2800,17 @@ async function generateText({
|
|
2763
2800
|
selectTelemetryAttributes({
|
2764
2801
|
telemetry,
|
2765
2802
|
attributes: {
|
2766
|
-
"ai.finishReason": result.finishReason,
|
2803
|
+
"ai.response.finishReason": result.finishReason,
|
2804
|
+
"ai.response.text": {
|
2805
|
+
output: () => result.text
|
2806
|
+
},
|
2807
|
+
"ai.response.toolCalls": {
|
2808
|
+
output: () => JSON.stringify(result.toolCalls)
|
2809
|
+
},
|
2767
2810
|
"ai.usage.promptTokens": result.usage.promptTokens,
|
2768
2811
|
"ai.usage.completionTokens": result.usage.completionTokens,
|
2812
|
+
// deprecated:
|
2813
|
+
"ai.finishReason": result.finishReason,
|
2769
2814
|
"ai.result.text": {
|
2770
2815
|
output: () => result.text
|
2771
2816
|
},
|
@@ -2774,8 +2819,8 @@ async function generateText({
|
|
2774
2819
|
},
|
2775
2820
|
// standardized gen-ai llm span attributes:
|
2776
2821
|
"gen_ai.response.finish_reasons": [result.finishReason],
|
2777
|
-
"gen_ai.usage.
|
2778
|
-
"gen_ai.usage.
|
2822
|
+
"gen_ai.usage.input_tokens": result.usage.promptTokens,
|
2823
|
+
"gen_ai.usage.output_tokens": result.usage.completionTokens
|
2779
2824
|
}
|
2780
2825
|
})
|
2781
2826
|
);
|
@@ -2828,9 +2873,17 @@ async function generateText({
|
|
2828
2873
|
selectTelemetryAttributes({
|
2829
2874
|
telemetry,
|
2830
2875
|
attributes: {
|
2831
|
-
"ai.finishReason": currentModelResponse.finishReason,
|
2876
|
+
"ai.response.finishReason": currentModelResponse.finishReason,
|
2877
|
+
"ai.response.text": {
|
2878
|
+
output: () => currentModelResponse.text
|
2879
|
+
},
|
2880
|
+
"ai.response.toolCalls": {
|
2881
|
+
output: () => JSON.stringify(currentModelResponse.toolCalls)
|
2882
|
+
},
|
2832
2883
|
"ai.usage.promptTokens": currentModelResponse.usage.promptTokens,
|
2833
2884
|
"ai.usage.completionTokens": currentModelResponse.usage.completionTokens,
|
2885
|
+
// deprecated:
|
2886
|
+
"ai.finishReason": currentModelResponse.finishReason,
|
2834
2887
|
"ai.result.text": {
|
2835
2888
|
output: () => currentModelResponse.text
|
2836
2889
|
},
|
@@ -3288,6 +3341,7 @@ async function streamText({
|
|
3288
3341
|
experimental_toolCallStreaming: toolCallStreaming = false,
|
3289
3342
|
onChunk,
|
3290
3343
|
onFinish,
|
3344
|
+
_internal: { now: now2 = now } = {},
|
3291
3345
|
...settings
|
3292
3346
|
}) {
|
3293
3347
|
var _a11;
|
@@ -3322,7 +3376,7 @@ async function streamText({
|
|
3322
3376
|
const {
|
3323
3377
|
result: { stream: stream2, warnings: warnings2, rawResponse: rawResponse2 },
|
3324
3378
|
doStreamSpan: doStreamSpan2,
|
3325
|
-
|
3379
|
+
startTimestampMs: startTimestampMs2
|
3326
3380
|
} = await retry(
|
3327
3381
|
() => recordSpan({
|
3328
3382
|
name: "ai.streamText.doStream",
|
@@ -3341,17 +3395,21 @@ async function streamText({
|
|
3341
3395
|
input: () => JSON.stringify(promptMessages2)
|
3342
3396
|
},
|
3343
3397
|
// standardized gen-ai llm span attributes:
|
3344
|
-
"gen_ai.request.model": model.modelId,
|
3345
3398
|
"gen_ai.system": model.provider,
|
3399
|
+
"gen_ai.request.model": model.modelId,
|
3400
|
+
"gen_ai.request.frequency_penalty": settings.frequencyPenalty,
|
3346
3401
|
"gen_ai.request.max_tokens": settings.maxTokens,
|
3402
|
+
"gen_ai.request.presence_penalty": settings.presencePenalty,
|
3403
|
+
"gen_ai.request.stop_sequences": settings.stopSequences,
|
3347
3404
|
"gen_ai.request.temperature": settings.temperature,
|
3405
|
+
"gen_ai.request.top_k": settings.topK,
|
3348
3406
|
"gen_ai.request.top_p": settings.topP
|
3349
3407
|
}
|
3350
3408
|
}),
|
3351
3409
|
tracer,
|
3352
3410
|
endWhenDone: false,
|
3353
3411
|
fn: async (doStreamSpan3) => ({
|
3354
|
-
|
3412
|
+
startTimestampMs: now2(),
|
3355
3413
|
// get before the call
|
3356
3414
|
doStreamSpan: doStreamSpan3,
|
3357
3415
|
result: await model.doStream({
|
@@ -3381,7 +3439,7 @@ async function streamText({
|
|
3381
3439
|
rawResponse: rawResponse2
|
3382
3440
|
},
|
3383
3441
|
doStreamSpan: doStreamSpan2,
|
3384
|
-
|
3442
|
+
startTimestampMs: startTimestampMs2
|
3385
3443
|
};
|
3386
3444
|
};
|
3387
3445
|
const promptMessages = await convertToLanguageModelPrompt({
|
@@ -3391,7 +3449,7 @@ async function streamText({
|
|
3391
3449
|
const {
|
3392
3450
|
result: { stream, warnings, rawResponse },
|
3393
3451
|
doStreamSpan,
|
3394
|
-
|
3452
|
+
startTimestampMs
|
3395
3453
|
} = await startRoundtrip({
|
3396
3454
|
promptType: validatePrompt({ system, prompt, messages }).type,
|
3397
3455
|
promptMessages
|
@@ -3405,10 +3463,11 @@ async function streamText({
|
|
3405
3463
|
rootSpan,
|
3406
3464
|
doStreamSpan,
|
3407
3465
|
telemetry,
|
3408
|
-
|
3466
|
+
startTimestampMs,
|
3409
3467
|
maxToolRoundtrips,
|
3410
3468
|
startRoundtrip,
|
3411
|
-
promptMessages
|
3469
|
+
promptMessages,
|
3470
|
+
now: now2
|
3412
3471
|
});
|
3413
3472
|
}
|
3414
3473
|
});
|
@@ -3423,10 +3482,11 @@ var DefaultStreamTextResult = class {
|
|
3423
3482
|
rootSpan,
|
3424
3483
|
doStreamSpan,
|
3425
3484
|
telemetry,
|
3426
|
-
|
3485
|
+
startTimestampMs,
|
3427
3486
|
maxToolRoundtrips,
|
3428
3487
|
startRoundtrip,
|
3429
|
-
promptMessages
|
3488
|
+
promptMessages,
|
3489
|
+
now: now2
|
3430
3490
|
}) {
|
3431
3491
|
this.warnings = warnings;
|
3432
3492
|
this.rawResponse = rawResponse;
|
@@ -3454,7 +3514,7 @@ var DefaultStreamTextResult = class {
|
|
3454
3514
|
const self = this;
|
3455
3515
|
function addRoundtripStream({
|
3456
3516
|
stream: stream2,
|
3457
|
-
startTimestamp
|
3517
|
+
startTimestamp,
|
3458
3518
|
doStreamSpan: doStreamSpan2,
|
3459
3519
|
currentToolRoundtrip,
|
3460
3520
|
promptMessages: promptMessages2,
|
@@ -3481,12 +3541,16 @@ var DefaultStreamTextResult = class {
|
|
3481
3541
|
new TransformStream({
|
3482
3542
|
async transform(chunk, controller) {
|
3483
3543
|
if (roundtripFirstChunk) {
|
3484
|
-
const msToFirstChunk =
|
3544
|
+
const msToFirstChunk = now2() - startTimestamp;
|
3485
3545
|
roundtripFirstChunk = false;
|
3486
3546
|
doStreamSpan2.addEvent("ai.stream.firstChunk", {
|
3547
|
+
"ai.response.msToFirstChunk": msToFirstChunk,
|
3548
|
+
// deprecated:
|
3487
3549
|
"ai.stream.msToFirstChunk": msToFirstChunk
|
3488
3550
|
});
|
3489
3551
|
doStreamSpan2.setAttributes({
|
3552
|
+
"ai.response.msToFirstChunk": msToFirstChunk,
|
3553
|
+
// deprecated:
|
3490
3554
|
"ai.stream.msToFirstChunk": msToFirstChunk
|
3491
3555
|
});
|
3492
3556
|
}
|
@@ -3515,6 +3579,12 @@ var DefaultStreamTextResult = class {
|
|
3515
3579
|
roundtripFinishReason = chunk.finishReason;
|
3516
3580
|
roundtripProviderMetadata = chunk.experimental_providerMetadata;
|
3517
3581
|
roundtripLogProbs = chunk.logprobs;
|
3582
|
+
const msToFinish = now2() - startTimestamp;
|
3583
|
+
doStreamSpan2.addEvent("ai.stream.finish");
|
3584
|
+
doStreamSpan2.setAttributes({
|
3585
|
+
"ai.response.msToFinish": msToFinish,
|
3586
|
+
"ai.response.avgCompletionTokensPerSecond": 1e3 * roundtripUsage.completionTokens / msToFinish
|
3587
|
+
});
|
3518
3588
|
break;
|
3519
3589
|
case "tool-call-streaming-start":
|
3520
3590
|
case "tool-call-delta": {
|
@@ -3547,17 +3617,23 @@ var DefaultStreamTextResult = class {
|
|
3547
3617
|
selectTelemetryAttributes({
|
3548
3618
|
telemetry,
|
3549
3619
|
attributes: {
|
3550
|
-
"ai.finishReason": roundtripFinishReason,
|
3620
|
+
"ai.response.finishReason": roundtripFinishReason,
|
3621
|
+
"ai.response.text": { output: () => roundtripText },
|
3622
|
+
"ai.response.toolCalls": {
|
3623
|
+
output: () => telemetryToolCalls
|
3624
|
+
},
|
3551
3625
|
"ai.usage.promptTokens": roundtripUsage.promptTokens,
|
3552
3626
|
"ai.usage.completionTokens": roundtripUsage.completionTokens,
|
3627
|
+
// deprecated
|
3628
|
+
"ai.finishReason": roundtripFinishReason,
|
3553
3629
|
"ai.result.text": { output: () => roundtripText },
|
3554
3630
|
"ai.result.toolCalls": {
|
3555
3631
|
output: () => telemetryToolCalls
|
3556
3632
|
},
|
3557
3633
|
// standardized gen-ai llm span attributes:
|
3558
3634
|
"gen_ai.response.finish_reasons": [roundtripFinishReason],
|
3559
|
-
"gen_ai.usage.
|
3560
|
-
"gen_ai.usage.
|
3635
|
+
"gen_ai.usage.input_tokens": roundtripUsage.promptTokens,
|
3636
|
+
"gen_ai.usage.output_tokens": roundtripUsage.completionTokens
|
3561
3637
|
}
|
3562
3638
|
})
|
3563
3639
|
);
|
@@ -3585,7 +3661,11 @@ var DefaultStreamTextResult = class {
|
|
3585
3661
|
(message) => convertToLanguageModelMessage(message, null)
|
3586
3662
|
)
|
3587
3663
|
);
|
3588
|
-
const {
|
3664
|
+
const {
|
3665
|
+
result,
|
3666
|
+
doStreamSpan: doStreamSpan3,
|
3667
|
+
startTimestampMs: startTimestamp2
|
3668
|
+
} = await startRoundtrip({
|
3589
3669
|
promptType: "messages",
|
3590
3670
|
promptMessages: promptMessages2
|
3591
3671
|
});
|
@@ -3593,7 +3673,7 @@ var DefaultStreamTextResult = class {
|
|
3593
3673
|
self.rawResponse = result.rawResponse;
|
3594
3674
|
addRoundtripStream({
|
3595
3675
|
stream: result.stream,
|
3596
|
-
startTimestamp:
|
3676
|
+
startTimestamp: startTimestamp2,
|
3597
3677
|
doStreamSpan: doStreamSpan3,
|
3598
3678
|
currentToolRoundtrip: currentToolRoundtrip + 1,
|
3599
3679
|
promptMessages: promptMessages2,
|
@@ -3614,9 +3694,15 @@ var DefaultStreamTextResult = class {
|
|
3614
3694
|
selectTelemetryAttributes({
|
3615
3695
|
telemetry,
|
3616
3696
|
attributes: {
|
3617
|
-
"ai.finishReason": roundtripFinishReason,
|
3697
|
+
"ai.response.finishReason": roundtripFinishReason,
|
3698
|
+
"ai.response.text": { output: () => roundtripText },
|
3699
|
+
"ai.response.toolCalls": {
|
3700
|
+
output: () => telemetryToolCalls
|
3701
|
+
},
|
3618
3702
|
"ai.usage.promptTokens": combinedUsage.promptTokens,
|
3619
3703
|
"ai.usage.completionTokens": combinedUsage.completionTokens,
|
3704
|
+
// deprecated
|
3705
|
+
"ai.finishReason": roundtripFinishReason,
|
3620
3706
|
"ai.result.text": { output: () => roundtripText },
|
3621
3707
|
"ai.result.toolCalls": {
|
3622
3708
|
output: () => telemetryToolCalls
|
@@ -3656,7 +3742,7 @@ var DefaultStreamTextResult = class {
|
|
3656
3742
|
}
|
3657
3743
|
addRoundtripStream({
|
3658
3744
|
stream,
|
3659
|
-
startTimestamp,
|
3745
|
+
startTimestamp: startTimestampMs,
|
3660
3746
|
doStreamSpan,
|
3661
3747
|
currentToolRoundtrip: 0,
|
3662
3748
|
promptMessages,
|