ai 3.3.23 → 3.3.25
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +13 -0
- package/dist/index.d.mts +25 -1
- package/dist/index.d.ts +25 -1
- package/dist/index.js +131 -45
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +131 -45
- package/dist/index.mjs.map +1 -1
- package/package.json +1 -1
package/CHANGELOG.md
CHANGED
@@ -1,5 +1,18 @@
|
|
1
1
|
# ai
|
2
2
|
|
3
|
+
## 3.3.25
|
4
|
+
|
5
|
+
### Patch Changes
|
6
|
+
|
7
|
+
- 4f1530f: feat (ai/core): add OpenTelemetry Semantic Conventions for GenAI operations to v1.27.0 of standard
|
8
|
+
- dad775f: feat (ai/core): add finish event and avg output tokens per second (telemetry)
|
9
|
+
|
10
|
+
## 3.3.24
|
11
|
+
|
12
|
+
### Patch Changes
|
13
|
+
|
14
|
+
- d87a655: fix (ai/core): provide fallback when globalThis.performance is not available
|
15
|
+
|
3
16
|
## 3.3.23
|
4
17
|
|
5
18
|
### Patch Changes
|
package/dist/index.d.mts
CHANGED
@@ -924,6 +924,12 @@ Optional telemetry configuration (experimental).
|
|
924
924
|
Callback that is called when the LLM response and the final object validation are finished.
|
925
925
|
*/
|
926
926
|
onFinish?: OnFinishCallback<OBJECT>;
|
927
|
+
/**
|
928
|
+
* Internal. For test use only. May change without notice.
|
929
|
+
*/
|
930
|
+
_internal?: {
|
931
|
+
now?: () => number;
|
932
|
+
};
|
927
933
|
}): Promise<StreamObjectResult<DeepPartial<OBJECT>, OBJECT, never>>;
|
928
934
|
/**
|
929
935
|
Generate an array with structured, typed elements for a given prompt and element schema using a language model.
|
@@ -977,6 +983,12 @@ Optional telemetry configuration (experimental).
|
|
977
983
|
Callback that is called when the LLM response and the final object validation are finished.
|
978
984
|
*/
|
979
985
|
onFinish?: OnFinishCallback<Array<ELEMENT>>;
|
986
|
+
/**
|
987
|
+
* Internal. For test use only. May change without notice.
|
988
|
+
*/
|
989
|
+
_internal?: {
|
990
|
+
now?: () => number;
|
991
|
+
};
|
980
992
|
}): Promise<StreamObjectResult<Array<ELEMENT>, Array<ELEMENT>, AsyncIterableStream<ELEMENT>>>;
|
981
993
|
/**
|
982
994
|
Generate JSON with any schema for a given prompt using a language model.
|
@@ -1004,6 +1016,12 @@ Optional telemetry configuration (experimental).
|
|
1004
1016
|
Callback that is called when the LLM response and the final object validation are finished.
|
1005
1017
|
*/
|
1006
1018
|
onFinish?: OnFinishCallback<JSONValue>;
|
1019
|
+
/**
|
1020
|
+
* Internal. For test use only. May change without notice.
|
1021
|
+
*/
|
1022
|
+
_internal?: {
|
1023
|
+
now?: () => number;
|
1024
|
+
};
|
1007
1025
|
}): Promise<StreamObjectResult<JSONValue, JSONValue, never>>;
|
1008
1026
|
/**
|
1009
1027
|
* @deprecated Use `streamObject` instead.
|
@@ -1562,7 +1580,7 @@ If set and supported by the model, calls will generate deterministic results.
|
|
1562
1580
|
@return
|
1563
1581
|
A result object for accessing different stream types and additional information.
|
1564
1582
|
*/
|
1565
|
-
declare function streamText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, maxToolRoundtrips, experimental_telemetry: telemetry, experimental_toolCallStreaming: toolCallStreaming, onChunk, onFinish, ...settings }: CallSettings & Prompt & {
|
1583
|
+
declare function streamText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, maxToolRoundtrips, experimental_telemetry: telemetry, experimental_toolCallStreaming: toolCallStreaming, onChunk, onFinish, _internal: { now }, ...settings }: CallSettings & Prompt & {
|
1566
1584
|
/**
|
1567
1585
|
The language model to use.
|
1568
1586
|
*/
|
@@ -1649,6 +1667,12 @@ Callback that is called when the LLM response and all request tool executions
|
|
1649
1667
|
*/
|
1650
1668
|
readonly experimental_providerMetadata: ProviderMetadata | undefined;
|
1651
1669
|
}) => Promise<void> | void;
|
1670
|
+
/**
|
1671
|
+
* Internal. For test use only. May change without notice.
|
1672
|
+
*/
|
1673
|
+
_internal?: {
|
1674
|
+
now?: () => number;
|
1675
|
+
};
|
1652
1676
|
}): Promise<StreamTextResult<TOOLS>>;
|
1653
1677
|
|
1654
1678
|
/**
|
package/dist/index.d.ts
CHANGED
@@ -924,6 +924,12 @@ Optional telemetry configuration (experimental).
|
|
924
924
|
Callback that is called when the LLM response and the final object validation are finished.
|
925
925
|
*/
|
926
926
|
onFinish?: OnFinishCallback<OBJECT>;
|
927
|
+
/**
|
928
|
+
* Internal. For test use only. May change without notice.
|
929
|
+
*/
|
930
|
+
_internal?: {
|
931
|
+
now?: () => number;
|
932
|
+
};
|
927
933
|
}): Promise<StreamObjectResult<DeepPartial<OBJECT>, OBJECT, never>>;
|
928
934
|
/**
|
929
935
|
Generate an array with structured, typed elements for a given prompt and element schema using a language model.
|
@@ -977,6 +983,12 @@ Optional telemetry configuration (experimental).
|
|
977
983
|
Callback that is called when the LLM response and the final object validation are finished.
|
978
984
|
*/
|
979
985
|
onFinish?: OnFinishCallback<Array<ELEMENT>>;
|
986
|
+
/**
|
987
|
+
* Internal. For test use only. May change without notice.
|
988
|
+
*/
|
989
|
+
_internal?: {
|
990
|
+
now?: () => number;
|
991
|
+
};
|
980
992
|
}): Promise<StreamObjectResult<Array<ELEMENT>, Array<ELEMENT>, AsyncIterableStream<ELEMENT>>>;
|
981
993
|
/**
|
982
994
|
Generate JSON with any schema for a given prompt using a language model.
|
@@ -1004,6 +1016,12 @@ Optional telemetry configuration (experimental).
|
|
1004
1016
|
Callback that is called when the LLM response and the final object validation are finished.
|
1005
1017
|
*/
|
1006
1018
|
onFinish?: OnFinishCallback<JSONValue>;
|
1019
|
+
/**
|
1020
|
+
* Internal. For test use only. May change without notice.
|
1021
|
+
*/
|
1022
|
+
_internal?: {
|
1023
|
+
now?: () => number;
|
1024
|
+
};
|
1007
1025
|
}): Promise<StreamObjectResult<JSONValue, JSONValue, never>>;
|
1008
1026
|
/**
|
1009
1027
|
* @deprecated Use `streamObject` instead.
|
@@ -1562,7 +1580,7 @@ If set and supported by the model, calls will generate deterministic results.
|
|
1562
1580
|
@return
|
1563
1581
|
A result object for accessing different stream types and additional information.
|
1564
1582
|
*/
|
1565
|
-
declare function streamText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, maxToolRoundtrips, experimental_telemetry: telemetry, experimental_toolCallStreaming: toolCallStreaming, onChunk, onFinish, ...settings }: CallSettings & Prompt & {
|
1583
|
+
declare function streamText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, maxToolRoundtrips, experimental_telemetry: telemetry, experimental_toolCallStreaming: toolCallStreaming, onChunk, onFinish, _internal: { now }, ...settings }: CallSettings & Prompt & {
|
1566
1584
|
/**
|
1567
1585
|
The language model to use.
|
1568
1586
|
*/
|
@@ -1649,6 +1667,12 @@ Callback that is called when the LLM response and all request tool executions
|
|
1649
1667
|
*/
|
1650
1668
|
readonly experimental_providerMetadata: ProviderMetadata | undefined;
|
1651
1669
|
}) => Promise<void> | void;
|
1670
|
+
/**
|
1671
|
+
* Internal. For test use only. May change without notice.
|
1672
|
+
*/
|
1673
|
+
_internal?: {
|
1674
|
+
now?: () => number;
|
1675
|
+
};
|
1652
1676
|
}): Promise<StreamTextResult<TOOLS>>;
|
1653
1677
|
|
1654
1678
|
/**
|
package/dist/index.js
CHANGED
@@ -1819,10 +1819,13 @@ async function generateObject({
|
|
1819
1819
|
},
|
1820
1820
|
"ai.settings.mode": mode,
|
1821
1821
|
// standardized gen-ai llm span attributes:
|
1822
|
-
"gen_ai.request.model": model.modelId,
|
1823
1822
|
"gen_ai.system": model.provider,
|
1823
|
+
"gen_ai.request.model": model.modelId,
|
1824
|
+
"gen_ai.request.frequency_penalty": settings.frequencyPenalty,
|
1824
1825
|
"gen_ai.request.max_tokens": settings.maxTokens,
|
1826
|
+
"gen_ai.request.presence_penalty": settings.presencePenalty,
|
1825
1827
|
"gen_ai.request.temperature": settings.temperature,
|
1828
|
+
"gen_ai.request.top_k": settings.topK,
|
1826
1829
|
"gen_ai.request.top_p": settings.topP
|
1827
1830
|
}
|
1828
1831
|
}),
|
@@ -1848,9 +1851,12 @@ async function generateObject({
|
|
1848
1851
|
selectTelemetryAttributes({
|
1849
1852
|
telemetry,
|
1850
1853
|
attributes: {
|
1851
|
-
"ai.finishReason": result2.finishReason,
|
1854
|
+
"ai.response.finishReason": result2.finishReason,
|
1855
|
+
"ai.response.object": { output: () => result2.text },
|
1852
1856
|
"ai.usage.promptTokens": result2.usage.promptTokens,
|
1853
1857
|
"ai.usage.completionTokens": result2.usage.completionTokens,
|
1858
|
+
// deprecated:
|
1859
|
+
"ai.finishReason": result2.finishReason,
|
1854
1860
|
"ai.result.object": { output: () => result2.text },
|
1855
1861
|
// standardized gen-ai llm span attributes:
|
1856
1862
|
"gen_ai.response.finish_reasons": [result2.finishReason],
|
@@ -1902,10 +1908,13 @@ async function generateObject({
|
|
1902
1908
|
},
|
1903
1909
|
"ai.settings.mode": mode,
|
1904
1910
|
// standardized gen-ai llm span attributes:
|
1905
|
-
"gen_ai.request.model": model.modelId,
|
1906
1911
|
"gen_ai.system": model.provider,
|
1912
|
+
"gen_ai.request.model": model.modelId,
|
1913
|
+
"gen_ai.request.frequency_penalty": settings.frequencyPenalty,
|
1907
1914
|
"gen_ai.request.max_tokens": settings.maxTokens,
|
1915
|
+
"gen_ai.request.presence_penalty": settings.presencePenalty,
|
1908
1916
|
"gen_ai.request.temperature": settings.temperature,
|
1917
|
+
"gen_ai.request.top_k": settings.topK,
|
1909
1918
|
"gen_ai.request.top_p": settings.topP
|
1910
1919
|
}
|
1911
1920
|
}),
|
@@ -1936,14 +1945,17 @@ async function generateObject({
|
|
1936
1945
|
selectTelemetryAttributes({
|
1937
1946
|
telemetry,
|
1938
1947
|
attributes: {
|
1939
|
-
"ai.finishReason": result2.finishReason,
|
1948
|
+
"ai.response.finishReason": result2.finishReason,
|
1949
|
+
"ai.response.object": { output: () => objectText },
|
1940
1950
|
"ai.usage.promptTokens": result2.usage.promptTokens,
|
1941
1951
|
"ai.usage.completionTokens": result2.usage.completionTokens,
|
1952
|
+
// deprecated:
|
1953
|
+
"ai.finishReason": result2.finishReason,
|
1942
1954
|
"ai.result.object": { output: () => objectText },
|
1943
1955
|
// standardized gen-ai llm span attributes:
|
1944
1956
|
"gen_ai.response.finish_reasons": [result2.finishReason],
|
1945
|
-
"gen_ai.usage.
|
1946
|
-
"gen_ai.usage.
|
1957
|
+
"gen_ai.usage.input_tokens": result2.usage.promptTokens,
|
1958
|
+
"gen_ai.usage.output_tokens": result2.usage.completionTokens
|
1947
1959
|
}
|
1948
1960
|
})
|
1949
1961
|
);
|
@@ -1984,9 +1996,14 @@ async function generateObject({
|
|
1984
1996
|
selectTelemetryAttributes({
|
1985
1997
|
telemetry,
|
1986
1998
|
attributes: {
|
1987
|
-
"ai.finishReason": finishReason,
|
1999
|
+
"ai.response.finishReason": finishReason,
|
2000
|
+
"ai.response.object": {
|
2001
|
+
output: () => JSON.stringify(validationResult.value)
|
2002
|
+
},
|
1988
2003
|
"ai.usage.promptTokens": usage.promptTokens,
|
1989
2004
|
"ai.usage.completionTokens": usage.completionTokens,
|
2005
|
+
// deprecated:
|
2006
|
+
"ai.finishReason": finishReason,
|
1990
2007
|
"ai.result.object": {
|
1991
2008
|
output: () => JSON.stringify(validationResult.value)
|
1992
2009
|
}
|
@@ -2083,6 +2100,12 @@ var DelayedPromise = class {
|
|
2083
2100
|
}
|
2084
2101
|
};
|
2085
2102
|
|
2103
|
+
// core/util/now.ts
|
2104
|
+
function now() {
|
2105
|
+
var _a11, _b;
|
2106
|
+
return (_b = (_a11 = globalThis == null ? void 0 : globalThis.performance) == null ? void 0 : _a11.now()) != null ? _b : Date.now();
|
2107
|
+
}
|
2108
|
+
|
2086
2109
|
// core/generate-object/stream-object.ts
|
2087
2110
|
async function streamObject({
|
2088
2111
|
model,
|
@@ -2099,6 +2122,7 @@ async function streamObject({
|
|
2099
2122
|
headers,
|
2100
2123
|
experimental_telemetry: telemetry,
|
2101
2124
|
onFinish,
|
2125
|
+
_internal: { now: now2 = now } = {},
|
2102
2126
|
...settings
|
2103
2127
|
}) {
|
2104
2128
|
var _a11;
|
@@ -2244,7 +2268,7 @@ async function streamObject({
|
|
2244
2268
|
const {
|
2245
2269
|
result: { stream, warnings, rawResponse },
|
2246
2270
|
doStreamSpan,
|
2247
|
-
|
2271
|
+
startTimestampMs
|
2248
2272
|
} = await retry(
|
2249
2273
|
() => recordSpan({
|
2250
2274
|
name: "ai.streamObject.doStream",
|
@@ -2264,18 +2288,20 @@ async function streamObject({
|
|
2264
2288
|
},
|
2265
2289
|
"ai.settings.mode": mode,
|
2266
2290
|
// standardized gen-ai llm span attributes:
|
2267
|
-
"gen_ai.request.model": model.modelId,
|
2268
2291
|
"gen_ai.system": model.provider,
|
2292
|
+
"gen_ai.request.model": model.modelId,
|
2293
|
+
"gen_ai.request.frequency_penalty": settings.frequencyPenalty,
|
2269
2294
|
"gen_ai.request.max_tokens": settings.maxTokens,
|
2295
|
+
"gen_ai.request.presence_penalty": settings.presencePenalty,
|
2270
2296
|
"gen_ai.request.temperature": settings.temperature,
|
2297
|
+
"gen_ai.request.top_k": settings.topK,
|
2271
2298
|
"gen_ai.request.top_p": settings.topP
|
2272
2299
|
}
|
2273
2300
|
}),
|
2274
2301
|
tracer,
|
2275
2302
|
endWhenDone: false,
|
2276
2303
|
fn: async (doStreamSpan2) => ({
|
2277
|
-
|
2278
|
-
// get before the call
|
2304
|
+
startTimestampMs: now2(),
|
2279
2305
|
doStreamSpan: doStreamSpan2,
|
2280
2306
|
result: await model.doStream(callOptions)
|
2281
2307
|
})
|
@@ -2290,7 +2316,8 @@ async function streamObject({
|
|
2290
2316
|
rootSpan,
|
2291
2317
|
doStreamSpan,
|
2292
2318
|
telemetry,
|
2293
|
-
|
2319
|
+
startTimestampMs,
|
2320
|
+
now: now2
|
2294
2321
|
});
|
2295
2322
|
}
|
2296
2323
|
});
|
@@ -2305,7 +2332,8 @@ var DefaultStreamObjectResult = class {
|
|
2305
2332
|
rootSpan,
|
2306
2333
|
doStreamSpan,
|
2307
2334
|
telemetry,
|
2308
|
-
|
2335
|
+
startTimestampMs,
|
2336
|
+
now: now2
|
2309
2337
|
}) {
|
2310
2338
|
this.warnings = warnings;
|
2311
2339
|
this.rawResponse = rawResponse;
|
@@ -2334,7 +2362,7 @@ var DefaultStreamObjectResult = class {
|
|
2334
2362
|
new TransformStream({
|
2335
2363
|
async transform(chunk, controller) {
|
2336
2364
|
if (isFirstChunk) {
|
2337
|
-
const msToFirstChunk =
|
2365
|
+
const msToFirstChunk = now2() - startTimestampMs;
|
2338
2366
|
isFirstChunk = false;
|
2339
2367
|
doStreamSpan.addEvent("ai.stream.firstChunk", {
|
2340
2368
|
"ai.stream.msToFirstChunk": msToFirstChunk
|
@@ -2411,15 +2439,18 @@ var DefaultStreamObjectResult = class {
|
|
2411
2439
|
selectTelemetryAttributes({
|
2412
2440
|
telemetry,
|
2413
2441
|
attributes: {
|
2414
|
-
"ai.finishReason": finishReason,
|
2415
|
-
"ai.
|
2416
|
-
"ai.usage.completionTokens": finalUsage.completionTokens,
|
2417
|
-
"ai.result.object": {
|
2442
|
+
"ai.response.finishReason": finishReason,
|
2443
|
+
"ai.response.object": {
|
2418
2444
|
output: () => JSON.stringify(object)
|
2419
2445
|
},
|
2446
|
+
"ai.usage.promptTokens": finalUsage.promptTokens,
|
2447
|
+
"ai.usage.completionTokens": finalUsage.completionTokens,
|
2448
|
+
// deprecated
|
2449
|
+
"ai.finishReason": finishReason,
|
2450
|
+
"ai.result.object": { output: () => JSON.stringify(object) },
|
2420
2451
|
// standardized gen-ai llm span attributes:
|
2421
|
-
"gen_ai.usage.
|
2422
|
-
"gen_ai.usage.
|
2452
|
+
"gen_ai.usage.input_tokens": finalUsage.promptTokens,
|
2453
|
+
"gen_ai.usage.output_tokens": finalUsage.completionTokens,
|
2423
2454
|
"gen_ai.response.finish_reasons": [finishReason]
|
2424
2455
|
}
|
2425
2456
|
})
|
@@ -2431,9 +2462,11 @@ var DefaultStreamObjectResult = class {
|
|
2431
2462
|
attributes: {
|
2432
2463
|
"ai.usage.promptTokens": finalUsage.promptTokens,
|
2433
2464
|
"ai.usage.completionTokens": finalUsage.completionTokens,
|
2434
|
-
"ai.
|
2465
|
+
"ai.response.object": {
|
2435
2466
|
output: () => JSON.stringify(object)
|
2436
|
-
}
|
2467
|
+
},
|
2468
|
+
// deprecated
|
2469
|
+
"ai.result.object": { output: () => JSON.stringify(object) }
|
2437
2470
|
}
|
2438
2471
|
})
|
2439
2472
|
);
|
@@ -2812,10 +2845,14 @@ async function generateText({
|
|
2812
2845
|
input: () => JSON.stringify(promptMessages)
|
2813
2846
|
},
|
2814
2847
|
// standardized gen-ai llm span attributes:
|
2815
|
-
"gen_ai.request.model": model.modelId,
|
2816
2848
|
"gen_ai.system": model.provider,
|
2849
|
+
"gen_ai.request.model": model.modelId,
|
2850
|
+
"gen_ai.request.frequency_penalty": settings.frequencyPenalty,
|
2817
2851
|
"gen_ai.request.max_tokens": settings.maxTokens,
|
2852
|
+
"gen_ai.request.presence_penalty": settings.presencePenalty,
|
2853
|
+
"gen_ai.request.stop_sequences": settings.stopSequences,
|
2818
2854
|
"gen_ai.request.temperature": settings.temperature,
|
2855
|
+
"gen_ai.request.top_k": settings.topK,
|
2819
2856
|
"gen_ai.request.top_p": settings.topP
|
2820
2857
|
}
|
2821
2858
|
}),
|
@@ -2833,9 +2870,17 @@ async function generateText({
|
|
2833
2870
|
selectTelemetryAttributes({
|
2834
2871
|
telemetry,
|
2835
2872
|
attributes: {
|
2836
|
-
"ai.finishReason": result.finishReason,
|
2873
|
+
"ai.response.finishReason": result.finishReason,
|
2874
|
+
"ai.response.text": {
|
2875
|
+
output: () => result.text
|
2876
|
+
},
|
2877
|
+
"ai.response.toolCalls": {
|
2878
|
+
output: () => JSON.stringify(result.toolCalls)
|
2879
|
+
},
|
2837
2880
|
"ai.usage.promptTokens": result.usage.promptTokens,
|
2838
2881
|
"ai.usage.completionTokens": result.usage.completionTokens,
|
2882
|
+
// deprecated:
|
2883
|
+
"ai.finishReason": result.finishReason,
|
2839
2884
|
"ai.result.text": {
|
2840
2885
|
output: () => result.text
|
2841
2886
|
},
|
@@ -2844,8 +2889,8 @@ async function generateText({
|
|
2844
2889
|
},
|
2845
2890
|
// standardized gen-ai llm span attributes:
|
2846
2891
|
"gen_ai.response.finish_reasons": [result.finishReason],
|
2847
|
-
"gen_ai.usage.
|
2848
|
-
"gen_ai.usage.
|
2892
|
+
"gen_ai.usage.input_tokens": result.usage.promptTokens,
|
2893
|
+
"gen_ai.usage.output_tokens": result.usage.completionTokens
|
2849
2894
|
}
|
2850
2895
|
})
|
2851
2896
|
);
|
@@ -2898,9 +2943,17 @@ async function generateText({
|
|
2898
2943
|
selectTelemetryAttributes({
|
2899
2944
|
telemetry,
|
2900
2945
|
attributes: {
|
2901
|
-
"ai.finishReason": currentModelResponse.finishReason,
|
2946
|
+
"ai.response.finishReason": currentModelResponse.finishReason,
|
2947
|
+
"ai.response.text": {
|
2948
|
+
output: () => currentModelResponse.text
|
2949
|
+
},
|
2950
|
+
"ai.response.toolCalls": {
|
2951
|
+
output: () => JSON.stringify(currentModelResponse.toolCalls)
|
2952
|
+
},
|
2902
2953
|
"ai.usage.promptTokens": currentModelResponse.usage.promptTokens,
|
2903
2954
|
"ai.usage.completionTokens": currentModelResponse.usage.completionTokens,
|
2955
|
+
// deprecated:
|
2956
|
+
"ai.finishReason": currentModelResponse.finishReason,
|
2904
2957
|
"ai.result.text": {
|
2905
2958
|
output: () => currentModelResponse.text
|
2906
2959
|
},
|
@@ -3358,6 +3411,7 @@ async function streamText({
|
|
3358
3411
|
experimental_toolCallStreaming: toolCallStreaming = false,
|
3359
3412
|
onChunk,
|
3360
3413
|
onFinish,
|
3414
|
+
_internal: { now: now2 = now } = {},
|
3361
3415
|
...settings
|
3362
3416
|
}) {
|
3363
3417
|
var _a11;
|
@@ -3392,7 +3446,7 @@ async function streamText({
|
|
3392
3446
|
const {
|
3393
3447
|
result: { stream: stream2, warnings: warnings2, rawResponse: rawResponse2 },
|
3394
3448
|
doStreamSpan: doStreamSpan2,
|
3395
|
-
|
3449
|
+
startTimestampMs: startTimestampMs2
|
3396
3450
|
} = await retry(
|
3397
3451
|
() => recordSpan({
|
3398
3452
|
name: "ai.streamText.doStream",
|
@@ -3411,17 +3465,21 @@ async function streamText({
|
|
3411
3465
|
input: () => JSON.stringify(promptMessages2)
|
3412
3466
|
},
|
3413
3467
|
// standardized gen-ai llm span attributes:
|
3414
|
-
"gen_ai.request.model": model.modelId,
|
3415
3468
|
"gen_ai.system": model.provider,
|
3469
|
+
"gen_ai.request.model": model.modelId,
|
3470
|
+
"gen_ai.request.frequency_penalty": settings.frequencyPenalty,
|
3416
3471
|
"gen_ai.request.max_tokens": settings.maxTokens,
|
3472
|
+
"gen_ai.request.presence_penalty": settings.presencePenalty,
|
3473
|
+
"gen_ai.request.stop_sequences": settings.stopSequences,
|
3417
3474
|
"gen_ai.request.temperature": settings.temperature,
|
3475
|
+
"gen_ai.request.top_k": settings.topK,
|
3418
3476
|
"gen_ai.request.top_p": settings.topP
|
3419
3477
|
}
|
3420
3478
|
}),
|
3421
3479
|
tracer,
|
3422
3480
|
endWhenDone: false,
|
3423
3481
|
fn: async (doStreamSpan3) => ({
|
3424
|
-
|
3482
|
+
startTimestampMs: now2(),
|
3425
3483
|
// get before the call
|
3426
3484
|
doStreamSpan: doStreamSpan3,
|
3427
3485
|
result: await model.doStream({
|
@@ -3451,7 +3509,7 @@ async function streamText({
|
|
3451
3509
|
rawResponse: rawResponse2
|
3452
3510
|
},
|
3453
3511
|
doStreamSpan: doStreamSpan2,
|
3454
|
-
|
3512
|
+
startTimestampMs: startTimestampMs2
|
3455
3513
|
};
|
3456
3514
|
};
|
3457
3515
|
const promptMessages = await convertToLanguageModelPrompt({
|
@@ -3461,7 +3519,7 @@ async function streamText({
|
|
3461
3519
|
const {
|
3462
3520
|
result: { stream, warnings, rawResponse },
|
3463
3521
|
doStreamSpan,
|
3464
|
-
|
3522
|
+
startTimestampMs
|
3465
3523
|
} = await startRoundtrip({
|
3466
3524
|
promptType: validatePrompt({ system, prompt, messages }).type,
|
3467
3525
|
promptMessages
|
@@ -3475,10 +3533,11 @@ async function streamText({
|
|
3475
3533
|
rootSpan,
|
3476
3534
|
doStreamSpan,
|
3477
3535
|
telemetry,
|
3478
|
-
|
3536
|
+
startTimestampMs,
|
3479
3537
|
maxToolRoundtrips,
|
3480
3538
|
startRoundtrip,
|
3481
|
-
promptMessages
|
3539
|
+
promptMessages,
|
3540
|
+
now: now2
|
3482
3541
|
});
|
3483
3542
|
}
|
3484
3543
|
});
|
@@ -3493,10 +3552,11 @@ var DefaultStreamTextResult = class {
|
|
3493
3552
|
rootSpan,
|
3494
3553
|
doStreamSpan,
|
3495
3554
|
telemetry,
|
3496
|
-
|
3555
|
+
startTimestampMs,
|
3497
3556
|
maxToolRoundtrips,
|
3498
3557
|
startRoundtrip,
|
3499
|
-
promptMessages
|
3558
|
+
promptMessages,
|
3559
|
+
now: now2
|
3500
3560
|
}) {
|
3501
3561
|
this.warnings = warnings;
|
3502
3562
|
this.rawResponse = rawResponse;
|
@@ -3524,7 +3584,7 @@ var DefaultStreamTextResult = class {
|
|
3524
3584
|
const self = this;
|
3525
3585
|
function addRoundtripStream({
|
3526
3586
|
stream: stream2,
|
3527
|
-
startTimestamp
|
3587
|
+
startTimestamp,
|
3528
3588
|
doStreamSpan: doStreamSpan2,
|
3529
3589
|
currentToolRoundtrip,
|
3530
3590
|
promptMessages: promptMessages2,
|
@@ -3551,12 +3611,16 @@ var DefaultStreamTextResult = class {
|
|
3551
3611
|
new TransformStream({
|
3552
3612
|
async transform(chunk, controller) {
|
3553
3613
|
if (roundtripFirstChunk) {
|
3554
|
-
const msToFirstChunk =
|
3614
|
+
const msToFirstChunk = now2() - startTimestamp;
|
3555
3615
|
roundtripFirstChunk = false;
|
3556
3616
|
doStreamSpan2.addEvent("ai.stream.firstChunk", {
|
3617
|
+
"ai.response.msToFirstChunk": msToFirstChunk,
|
3618
|
+
// deprecated:
|
3557
3619
|
"ai.stream.msToFirstChunk": msToFirstChunk
|
3558
3620
|
});
|
3559
3621
|
doStreamSpan2.setAttributes({
|
3622
|
+
"ai.response.msToFirstChunk": msToFirstChunk,
|
3623
|
+
// deprecated:
|
3560
3624
|
"ai.stream.msToFirstChunk": msToFirstChunk
|
3561
3625
|
});
|
3562
3626
|
}
|
@@ -3585,6 +3649,12 @@ var DefaultStreamTextResult = class {
|
|
3585
3649
|
roundtripFinishReason = chunk.finishReason;
|
3586
3650
|
roundtripProviderMetadata = chunk.experimental_providerMetadata;
|
3587
3651
|
roundtripLogProbs = chunk.logprobs;
|
3652
|
+
const msToFinish = now2() - startTimestamp;
|
3653
|
+
doStreamSpan2.addEvent("ai.stream.finish");
|
3654
|
+
doStreamSpan2.setAttributes({
|
3655
|
+
"ai.response.msToFinish": msToFinish,
|
3656
|
+
"ai.response.avgCompletionTokensPerSecond": 1e3 * roundtripUsage.completionTokens / msToFinish
|
3657
|
+
});
|
3588
3658
|
break;
|
3589
3659
|
case "tool-call-streaming-start":
|
3590
3660
|
case "tool-call-delta": {
|
@@ -3617,17 +3687,23 @@ var DefaultStreamTextResult = class {
|
|
3617
3687
|
selectTelemetryAttributes({
|
3618
3688
|
telemetry,
|
3619
3689
|
attributes: {
|
3620
|
-
"ai.finishReason": roundtripFinishReason,
|
3690
|
+
"ai.response.finishReason": roundtripFinishReason,
|
3691
|
+
"ai.response.text": { output: () => roundtripText },
|
3692
|
+
"ai.response.toolCalls": {
|
3693
|
+
output: () => telemetryToolCalls
|
3694
|
+
},
|
3621
3695
|
"ai.usage.promptTokens": roundtripUsage.promptTokens,
|
3622
3696
|
"ai.usage.completionTokens": roundtripUsage.completionTokens,
|
3697
|
+
// deprecated
|
3698
|
+
"ai.finishReason": roundtripFinishReason,
|
3623
3699
|
"ai.result.text": { output: () => roundtripText },
|
3624
3700
|
"ai.result.toolCalls": {
|
3625
3701
|
output: () => telemetryToolCalls
|
3626
3702
|
},
|
3627
3703
|
// standardized gen-ai llm span attributes:
|
3628
3704
|
"gen_ai.response.finish_reasons": [roundtripFinishReason],
|
3629
|
-
"gen_ai.usage.
|
3630
|
-
"gen_ai.usage.
|
3705
|
+
"gen_ai.usage.input_tokens": roundtripUsage.promptTokens,
|
3706
|
+
"gen_ai.usage.output_tokens": roundtripUsage.completionTokens
|
3631
3707
|
}
|
3632
3708
|
})
|
3633
3709
|
);
|
@@ -3655,7 +3731,11 @@ var DefaultStreamTextResult = class {
|
|
3655
3731
|
(message) => convertToLanguageModelMessage(message, null)
|
3656
3732
|
)
|
3657
3733
|
);
|
3658
|
-
const {
|
3734
|
+
const {
|
3735
|
+
result,
|
3736
|
+
doStreamSpan: doStreamSpan3,
|
3737
|
+
startTimestampMs: startTimestamp2
|
3738
|
+
} = await startRoundtrip({
|
3659
3739
|
promptType: "messages",
|
3660
3740
|
promptMessages: promptMessages2
|
3661
3741
|
});
|
@@ -3663,7 +3743,7 @@ var DefaultStreamTextResult = class {
|
|
3663
3743
|
self.rawResponse = result.rawResponse;
|
3664
3744
|
addRoundtripStream({
|
3665
3745
|
stream: result.stream,
|
3666
|
-
startTimestamp:
|
3746
|
+
startTimestamp: startTimestamp2,
|
3667
3747
|
doStreamSpan: doStreamSpan3,
|
3668
3748
|
currentToolRoundtrip: currentToolRoundtrip + 1,
|
3669
3749
|
promptMessages: promptMessages2,
|
@@ -3684,9 +3764,15 @@ var DefaultStreamTextResult = class {
|
|
3684
3764
|
selectTelemetryAttributes({
|
3685
3765
|
telemetry,
|
3686
3766
|
attributes: {
|
3687
|
-
"ai.finishReason": roundtripFinishReason,
|
3767
|
+
"ai.response.finishReason": roundtripFinishReason,
|
3768
|
+
"ai.response.text": { output: () => roundtripText },
|
3769
|
+
"ai.response.toolCalls": {
|
3770
|
+
output: () => telemetryToolCalls
|
3771
|
+
},
|
3688
3772
|
"ai.usage.promptTokens": combinedUsage.promptTokens,
|
3689
3773
|
"ai.usage.completionTokens": combinedUsage.completionTokens,
|
3774
|
+
// deprecated
|
3775
|
+
"ai.finishReason": roundtripFinishReason,
|
3690
3776
|
"ai.result.text": { output: () => roundtripText },
|
3691
3777
|
"ai.result.toolCalls": {
|
3692
3778
|
output: () => telemetryToolCalls
|
@@ -3726,7 +3812,7 @@ var DefaultStreamTextResult = class {
|
|
3726
3812
|
}
|
3727
3813
|
addRoundtripStream({
|
3728
3814
|
stream,
|
3729
|
-
startTimestamp,
|
3815
|
+
startTimestamp: startTimestampMs,
|
3730
3816
|
doStreamSpan,
|
3731
3817
|
currentToolRoundtrip: 0,
|
3732
3818
|
promptMessages,
|