ai 3.3.24 → 3.3.26
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +14 -0
- package/dist/index.d.mts +25 -1
- package/dist/index.d.ts +25 -1
- package/dist/index.js +111 -34
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +111 -34
- package/dist/index.mjs.map +1 -1
- package/package.json +2 -2
package/dist/index.mjs
CHANGED
@@ -1746,10 +1746,13 @@ async function generateObject({
|
|
1746
1746
|
},
|
1747
1747
|
"ai.settings.mode": mode,
|
1748
1748
|
// standardized gen-ai llm span attributes:
|
1749
|
-
"gen_ai.request.model": model.modelId,
|
1750
1749
|
"gen_ai.system": model.provider,
|
1750
|
+
"gen_ai.request.model": model.modelId,
|
1751
|
+
"gen_ai.request.frequency_penalty": settings.frequencyPenalty,
|
1751
1752
|
"gen_ai.request.max_tokens": settings.maxTokens,
|
1753
|
+
"gen_ai.request.presence_penalty": settings.presencePenalty,
|
1752
1754
|
"gen_ai.request.temperature": settings.temperature,
|
1755
|
+
"gen_ai.request.top_k": settings.topK,
|
1753
1756
|
"gen_ai.request.top_p": settings.topP
|
1754
1757
|
}
|
1755
1758
|
}),
|
@@ -1775,9 +1778,12 @@ async function generateObject({
|
|
1775
1778
|
selectTelemetryAttributes({
|
1776
1779
|
telemetry,
|
1777
1780
|
attributes: {
|
1778
|
-
"ai.finishReason": result2.finishReason,
|
1781
|
+
"ai.response.finishReason": result2.finishReason,
|
1782
|
+
"ai.response.object": { output: () => result2.text },
|
1779
1783
|
"ai.usage.promptTokens": result2.usage.promptTokens,
|
1780
1784
|
"ai.usage.completionTokens": result2.usage.completionTokens,
|
1785
|
+
// deprecated:
|
1786
|
+
"ai.finishReason": result2.finishReason,
|
1781
1787
|
"ai.result.object": { output: () => result2.text },
|
1782
1788
|
// standardized gen-ai llm span attributes:
|
1783
1789
|
"gen_ai.response.finish_reasons": [result2.finishReason],
|
@@ -1829,10 +1835,13 @@ async function generateObject({
|
|
1829
1835
|
},
|
1830
1836
|
"ai.settings.mode": mode,
|
1831
1837
|
// standardized gen-ai llm span attributes:
|
1832
|
-
"gen_ai.request.model": model.modelId,
|
1833
1838
|
"gen_ai.system": model.provider,
|
1839
|
+
"gen_ai.request.model": model.modelId,
|
1840
|
+
"gen_ai.request.frequency_penalty": settings.frequencyPenalty,
|
1834
1841
|
"gen_ai.request.max_tokens": settings.maxTokens,
|
1842
|
+
"gen_ai.request.presence_penalty": settings.presencePenalty,
|
1835
1843
|
"gen_ai.request.temperature": settings.temperature,
|
1844
|
+
"gen_ai.request.top_k": settings.topK,
|
1836
1845
|
"gen_ai.request.top_p": settings.topP
|
1837
1846
|
}
|
1838
1847
|
}),
|
@@ -1863,14 +1872,17 @@ async function generateObject({
|
|
1863
1872
|
selectTelemetryAttributes({
|
1864
1873
|
telemetry,
|
1865
1874
|
attributes: {
|
1866
|
-
"ai.finishReason": result2.finishReason,
|
1875
|
+
"ai.response.finishReason": result2.finishReason,
|
1876
|
+
"ai.response.object": { output: () => objectText },
|
1867
1877
|
"ai.usage.promptTokens": result2.usage.promptTokens,
|
1868
1878
|
"ai.usage.completionTokens": result2.usage.completionTokens,
|
1879
|
+
// deprecated:
|
1880
|
+
"ai.finishReason": result2.finishReason,
|
1869
1881
|
"ai.result.object": { output: () => objectText },
|
1870
1882
|
// standardized gen-ai llm span attributes:
|
1871
1883
|
"gen_ai.response.finish_reasons": [result2.finishReason],
|
1872
|
-
"gen_ai.usage.
|
1873
|
-
"gen_ai.usage.
|
1884
|
+
"gen_ai.usage.input_tokens": result2.usage.promptTokens,
|
1885
|
+
"gen_ai.usage.output_tokens": result2.usage.completionTokens
|
1874
1886
|
}
|
1875
1887
|
})
|
1876
1888
|
);
|
@@ -1911,9 +1923,14 @@ async function generateObject({
|
|
1911
1923
|
selectTelemetryAttributes({
|
1912
1924
|
telemetry,
|
1913
1925
|
attributes: {
|
1914
|
-
"ai.finishReason": finishReason,
|
1926
|
+
"ai.response.finishReason": finishReason,
|
1927
|
+
"ai.response.object": {
|
1928
|
+
output: () => JSON.stringify(validationResult.value)
|
1929
|
+
},
|
1915
1930
|
"ai.usage.promptTokens": usage.promptTokens,
|
1916
1931
|
"ai.usage.completionTokens": usage.completionTokens,
|
1932
|
+
// deprecated:
|
1933
|
+
"ai.finishReason": finishReason,
|
1917
1934
|
"ai.result.object": {
|
1918
1935
|
output: () => JSON.stringify(validationResult.value)
|
1919
1936
|
}
|
@@ -2035,6 +2052,7 @@ async function streamObject({
|
|
2035
2052
|
headers,
|
2036
2053
|
experimental_telemetry: telemetry,
|
2037
2054
|
onFinish,
|
2055
|
+
_internal: { now: now2 = now } = {},
|
2038
2056
|
...settings
|
2039
2057
|
}) {
|
2040
2058
|
var _a11;
|
@@ -2200,17 +2218,20 @@ async function streamObject({
|
|
2200
2218
|
},
|
2201
2219
|
"ai.settings.mode": mode,
|
2202
2220
|
// standardized gen-ai llm span attributes:
|
2203
|
-
"gen_ai.request.model": model.modelId,
|
2204
2221
|
"gen_ai.system": model.provider,
|
2222
|
+
"gen_ai.request.model": model.modelId,
|
2223
|
+
"gen_ai.request.frequency_penalty": settings.frequencyPenalty,
|
2205
2224
|
"gen_ai.request.max_tokens": settings.maxTokens,
|
2225
|
+
"gen_ai.request.presence_penalty": settings.presencePenalty,
|
2206
2226
|
"gen_ai.request.temperature": settings.temperature,
|
2227
|
+
"gen_ai.request.top_k": settings.topK,
|
2207
2228
|
"gen_ai.request.top_p": settings.topP
|
2208
2229
|
}
|
2209
2230
|
}),
|
2210
2231
|
tracer,
|
2211
2232
|
endWhenDone: false,
|
2212
2233
|
fn: async (doStreamSpan2) => ({
|
2213
|
-
startTimestampMs:
|
2234
|
+
startTimestampMs: now2(),
|
2214
2235
|
doStreamSpan: doStreamSpan2,
|
2215
2236
|
result: await model.doStream(callOptions)
|
2216
2237
|
})
|
@@ -2225,7 +2246,8 @@ async function streamObject({
|
|
2225
2246
|
rootSpan,
|
2226
2247
|
doStreamSpan,
|
2227
2248
|
telemetry,
|
2228
|
-
startTimestampMs
|
2249
|
+
startTimestampMs,
|
2250
|
+
now: now2
|
2229
2251
|
});
|
2230
2252
|
}
|
2231
2253
|
});
|
@@ -2240,7 +2262,8 @@ var DefaultStreamObjectResult = class {
|
|
2240
2262
|
rootSpan,
|
2241
2263
|
doStreamSpan,
|
2242
2264
|
telemetry,
|
2243
|
-
startTimestampMs
|
2265
|
+
startTimestampMs,
|
2266
|
+
now: now2
|
2244
2267
|
}) {
|
2245
2268
|
this.warnings = warnings;
|
2246
2269
|
this.rawResponse = rawResponse;
|
@@ -2269,7 +2292,7 @@ var DefaultStreamObjectResult = class {
|
|
2269
2292
|
new TransformStream({
|
2270
2293
|
async transform(chunk, controller) {
|
2271
2294
|
if (isFirstChunk) {
|
2272
|
-
const msToFirstChunk =
|
2295
|
+
const msToFirstChunk = now2() - startTimestampMs;
|
2273
2296
|
isFirstChunk = false;
|
2274
2297
|
doStreamSpan.addEvent("ai.stream.firstChunk", {
|
2275
2298
|
"ai.stream.msToFirstChunk": msToFirstChunk
|
@@ -2346,15 +2369,18 @@ var DefaultStreamObjectResult = class {
|
|
2346
2369
|
selectTelemetryAttributes({
|
2347
2370
|
telemetry,
|
2348
2371
|
attributes: {
|
2349
|
-
"ai.finishReason": finishReason,
|
2350
|
-
"ai.
|
2351
|
-
"ai.usage.completionTokens": finalUsage.completionTokens,
|
2352
|
-
"ai.result.object": {
|
2372
|
+
"ai.response.finishReason": finishReason,
|
2373
|
+
"ai.response.object": {
|
2353
2374
|
output: () => JSON.stringify(object)
|
2354
2375
|
},
|
2376
|
+
"ai.usage.promptTokens": finalUsage.promptTokens,
|
2377
|
+
"ai.usage.completionTokens": finalUsage.completionTokens,
|
2378
|
+
// deprecated
|
2379
|
+
"ai.finishReason": finishReason,
|
2380
|
+
"ai.result.object": { output: () => JSON.stringify(object) },
|
2355
2381
|
// standardized gen-ai llm span attributes:
|
2356
|
-
"gen_ai.usage.
|
2357
|
-
"gen_ai.usage.
|
2382
|
+
"gen_ai.usage.input_tokens": finalUsage.promptTokens,
|
2383
|
+
"gen_ai.usage.output_tokens": finalUsage.completionTokens,
|
2358
2384
|
"gen_ai.response.finish_reasons": [finishReason]
|
2359
2385
|
}
|
2360
2386
|
})
|
@@ -2366,9 +2392,11 @@ var DefaultStreamObjectResult = class {
|
|
2366
2392
|
attributes: {
|
2367
2393
|
"ai.usage.promptTokens": finalUsage.promptTokens,
|
2368
2394
|
"ai.usage.completionTokens": finalUsage.completionTokens,
|
2369
|
-
"ai.
|
2395
|
+
"ai.response.object": {
|
2370
2396
|
output: () => JSON.stringify(object)
|
2371
|
-
}
|
2397
|
+
},
|
2398
|
+
// deprecated
|
2399
|
+
"ai.result.object": { output: () => JSON.stringify(object) }
|
2372
2400
|
}
|
2373
2401
|
})
|
2374
2402
|
);
|
@@ -2747,10 +2775,14 @@ async function generateText({
|
|
2747
2775
|
input: () => JSON.stringify(promptMessages)
|
2748
2776
|
},
|
2749
2777
|
// standardized gen-ai llm span attributes:
|
2750
|
-
"gen_ai.request.model": model.modelId,
|
2751
2778
|
"gen_ai.system": model.provider,
|
2779
|
+
"gen_ai.request.model": model.modelId,
|
2780
|
+
"gen_ai.request.frequency_penalty": settings.frequencyPenalty,
|
2752
2781
|
"gen_ai.request.max_tokens": settings.maxTokens,
|
2782
|
+
"gen_ai.request.presence_penalty": settings.presencePenalty,
|
2783
|
+
"gen_ai.request.stop_sequences": settings.stopSequences,
|
2753
2784
|
"gen_ai.request.temperature": settings.temperature,
|
2785
|
+
"gen_ai.request.top_k": settings.topK,
|
2754
2786
|
"gen_ai.request.top_p": settings.topP
|
2755
2787
|
}
|
2756
2788
|
}),
|
@@ -2768,9 +2800,17 @@ async function generateText({
|
|
2768
2800
|
selectTelemetryAttributes({
|
2769
2801
|
telemetry,
|
2770
2802
|
attributes: {
|
2771
|
-
"ai.finishReason": result.finishReason,
|
2803
|
+
"ai.response.finishReason": result.finishReason,
|
2804
|
+
"ai.response.text": {
|
2805
|
+
output: () => result.text
|
2806
|
+
},
|
2807
|
+
"ai.response.toolCalls": {
|
2808
|
+
output: () => JSON.stringify(result.toolCalls)
|
2809
|
+
},
|
2772
2810
|
"ai.usage.promptTokens": result.usage.promptTokens,
|
2773
2811
|
"ai.usage.completionTokens": result.usage.completionTokens,
|
2812
|
+
// deprecated:
|
2813
|
+
"ai.finishReason": result.finishReason,
|
2774
2814
|
"ai.result.text": {
|
2775
2815
|
output: () => result.text
|
2776
2816
|
},
|
@@ -2779,8 +2819,8 @@ async function generateText({
|
|
2779
2819
|
},
|
2780
2820
|
// standardized gen-ai llm span attributes:
|
2781
2821
|
"gen_ai.response.finish_reasons": [result.finishReason],
|
2782
|
-
"gen_ai.usage.
|
2783
|
-
"gen_ai.usage.
|
2822
|
+
"gen_ai.usage.input_tokens": result.usage.promptTokens,
|
2823
|
+
"gen_ai.usage.output_tokens": result.usage.completionTokens
|
2784
2824
|
}
|
2785
2825
|
})
|
2786
2826
|
);
|
@@ -2833,9 +2873,17 @@ async function generateText({
|
|
2833
2873
|
selectTelemetryAttributes({
|
2834
2874
|
telemetry,
|
2835
2875
|
attributes: {
|
2836
|
-
"ai.finishReason": currentModelResponse.finishReason,
|
2876
|
+
"ai.response.finishReason": currentModelResponse.finishReason,
|
2877
|
+
"ai.response.text": {
|
2878
|
+
output: () => currentModelResponse.text
|
2879
|
+
},
|
2880
|
+
"ai.response.toolCalls": {
|
2881
|
+
output: () => JSON.stringify(currentModelResponse.toolCalls)
|
2882
|
+
},
|
2837
2883
|
"ai.usage.promptTokens": currentModelResponse.usage.promptTokens,
|
2838
2884
|
"ai.usage.completionTokens": currentModelResponse.usage.completionTokens,
|
2885
|
+
// deprecated:
|
2886
|
+
"ai.finishReason": currentModelResponse.finishReason,
|
2839
2887
|
"ai.result.text": {
|
2840
2888
|
output: () => currentModelResponse.text
|
2841
2889
|
},
|
@@ -3293,6 +3341,7 @@ async function streamText({
|
|
3293
3341
|
experimental_toolCallStreaming: toolCallStreaming = false,
|
3294
3342
|
onChunk,
|
3295
3343
|
onFinish,
|
3344
|
+
_internal: { now: now2 = now } = {},
|
3296
3345
|
...settings
|
3297
3346
|
}) {
|
3298
3347
|
var _a11;
|
@@ -3346,17 +3395,21 @@ async function streamText({
|
|
3346
3395
|
input: () => JSON.stringify(promptMessages2)
|
3347
3396
|
},
|
3348
3397
|
// standardized gen-ai llm span attributes:
|
3349
|
-
"gen_ai.request.model": model.modelId,
|
3350
3398
|
"gen_ai.system": model.provider,
|
3399
|
+
"gen_ai.request.model": model.modelId,
|
3400
|
+
"gen_ai.request.frequency_penalty": settings.frequencyPenalty,
|
3351
3401
|
"gen_ai.request.max_tokens": settings.maxTokens,
|
3402
|
+
"gen_ai.request.presence_penalty": settings.presencePenalty,
|
3403
|
+
"gen_ai.request.stop_sequences": settings.stopSequences,
|
3352
3404
|
"gen_ai.request.temperature": settings.temperature,
|
3405
|
+
"gen_ai.request.top_k": settings.topK,
|
3353
3406
|
"gen_ai.request.top_p": settings.topP
|
3354
3407
|
}
|
3355
3408
|
}),
|
3356
3409
|
tracer,
|
3357
3410
|
endWhenDone: false,
|
3358
3411
|
fn: async (doStreamSpan3) => ({
|
3359
|
-
startTimestampMs:
|
3412
|
+
startTimestampMs: now2(),
|
3360
3413
|
// get before the call
|
3361
3414
|
doStreamSpan: doStreamSpan3,
|
3362
3415
|
result: await model.doStream({
|
@@ -3413,7 +3466,8 @@ async function streamText({
|
|
3413
3466
|
startTimestampMs,
|
3414
3467
|
maxToolRoundtrips,
|
3415
3468
|
startRoundtrip,
|
3416
|
-
promptMessages
|
3469
|
+
promptMessages,
|
3470
|
+
now: now2
|
3417
3471
|
});
|
3418
3472
|
}
|
3419
3473
|
});
|
@@ -3431,7 +3485,8 @@ var DefaultStreamTextResult = class {
|
|
3431
3485
|
startTimestampMs,
|
3432
3486
|
maxToolRoundtrips,
|
3433
3487
|
startRoundtrip,
|
3434
|
-
promptMessages
|
3488
|
+
promptMessages,
|
3489
|
+
now: now2
|
3435
3490
|
}) {
|
3436
3491
|
this.warnings = warnings;
|
3437
3492
|
this.rawResponse = rawResponse;
|
@@ -3486,12 +3541,16 @@ var DefaultStreamTextResult = class {
|
|
3486
3541
|
new TransformStream({
|
3487
3542
|
async transform(chunk, controller) {
|
3488
3543
|
if (roundtripFirstChunk) {
|
3489
|
-
const msToFirstChunk =
|
3544
|
+
const msToFirstChunk = now2() - startTimestamp;
|
3490
3545
|
roundtripFirstChunk = false;
|
3491
3546
|
doStreamSpan2.addEvent("ai.stream.firstChunk", {
|
3547
|
+
"ai.response.msToFirstChunk": msToFirstChunk,
|
3548
|
+
// deprecated:
|
3492
3549
|
"ai.stream.msToFirstChunk": msToFirstChunk
|
3493
3550
|
});
|
3494
3551
|
doStreamSpan2.setAttributes({
|
3552
|
+
"ai.response.msToFirstChunk": msToFirstChunk,
|
3553
|
+
// deprecated:
|
3495
3554
|
"ai.stream.msToFirstChunk": msToFirstChunk
|
3496
3555
|
});
|
3497
3556
|
}
|
@@ -3520,6 +3579,12 @@ var DefaultStreamTextResult = class {
|
|
3520
3579
|
roundtripFinishReason = chunk.finishReason;
|
3521
3580
|
roundtripProviderMetadata = chunk.experimental_providerMetadata;
|
3522
3581
|
roundtripLogProbs = chunk.logprobs;
|
3582
|
+
const msToFinish = now2() - startTimestamp;
|
3583
|
+
doStreamSpan2.addEvent("ai.stream.finish");
|
3584
|
+
doStreamSpan2.setAttributes({
|
3585
|
+
"ai.response.msToFinish": msToFinish,
|
3586
|
+
"ai.response.avgCompletionTokensPerSecond": 1e3 * roundtripUsage.completionTokens / msToFinish
|
3587
|
+
});
|
3523
3588
|
break;
|
3524
3589
|
case "tool-call-streaming-start":
|
3525
3590
|
case "tool-call-delta": {
|
@@ -3552,17 +3617,23 @@ var DefaultStreamTextResult = class {
|
|
3552
3617
|
selectTelemetryAttributes({
|
3553
3618
|
telemetry,
|
3554
3619
|
attributes: {
|
3555
|
-
"ai.finishReason": roundtripFinishReason,
|
3620
|
+
"ai.response.finishReason": roundtripFinishReason,
|
3621
|
+
"ai.response.text": { output: () => roundtripText },
|
3622
|
+
"ai.response.toolCalls": {
|
3623
|
+
output: () => telemetryToolCalls
|
3624
|
+
},
|
3556
3625
|
"ai.usage.promptTokens": roundtripUsage.promptTokens,
|
3557
3626
|
"ai.usage.completionTokens": roundtripUsage.completionTokens,
|
3627
|
+
// deprecated
|
3628
|
+
"ai.finishReason": roundtripFinishReason,
|
3558
3629
|
"ai.result.text": { output: () => roundtripText },
|
3559
3630
|
"ai.result.toolCalls": {
|
3560
3631
|
output: () => telemetryToolCalls
|
3561
3632
|
},
|
3562
3633
|
// standardized gen-ai llm span attributes:
|
3563
3634
|
"gen_ai.response.finish_reasons": [roundtripFinishReason],
|
3564
|
-
"gen_ai.usage.
|
3565
|
-
"gen_ai.usage.
|
3635
|
+
"gen_ai.usage.input_tokens": roundtripUsage.promptTokens,
|
3636
|
+
"gen_ai.usage.output_tokens": roundtripUsage.completionTokens
|
3566
3637
|
}
|
3567
3638
|
})
|
3568
3639
|
);
|
@@ -3623,9 +3694,15 @@ var DefaultStreamTextResult = class {
|
|
3623
3694
|
selectTelemetryAttributes({
|
3624
3695
|
telemetry,
|
3625
3696
|
attributes: {
|
3626
|
-
"ai.finishReason": roundtripFinishReason,
|
3697
|
+
"ai.response.finishReason": roundtripFinishReason,
|
3698
|
+
"ai.response.text": { output: () => roundtripText },
|
3699
|
+
"ai.response.toolCalls": {
|
3700
|
+
output: () => telemetryToolCalls
|
3701
|
+
},
|
3627
3702
|
"ai.usage.promptTokens": combinedUsage.promptTokens,
|
3628
3703
|
"ai.usage.completionTokens": combinedUsage.completionTokens,
|
3704
|
+
// deprecated
|
3705
|
+
"ai.finishReason": roundtripFinishReason,
|
3629
3706
|
"ai.result.text": { output: () => roundtripText },
|
3630
3707
|
"ai.result.toolCalls": {
|
3631
3708
|
output: () => telemetryToolCalls
|