ai 3.2.41 → 3.2.42
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.js +63 -12
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +63 -12
- package/dist/index.mjs.map +1 -1
- package/package.json +6 -6
package/dist/index.js
CHANGED
@@ -1208,7 +1208,13 @@ async function generateObject({
|
|
1208
1208
|
"ai.prompt.messages": {
|
1209
1209
|
input: () => JSON.stringify(promptMessages)
|
1210
1210
|
},
|
1211
|
-
"ai.settings.mode": mode
|
1211
|
+
"ai.settings.mode": mode,
|
1212
|
+
// standardized gen-ai llm span attributes:
|
1213
|
+
"gen_ai.request.model": model.modelId,
|
1214
|
+
"gen_ai.system": model.provider,
|
1215
|
+
"gen_ai.request.max_tokens": settings.maxTokens,
|
1216
|
+
"gen_ai.request.temperature": settings.temperature,
|
1217
|
+
"gen_ai.request.top_p": settings.topP
|
1212
1218
|
}
|
1213
1219
|
}),
|
1214
1220
|
tracer,
|
@@ -1231,7 +1237,11 @@ async function generateObject({
|
|
1231
1237
|
"ai.finishReason": result2.finishReason,
|
1232
1238
|
"ai.usage.promptTokens": result2.usage.promptTokens,
|
1233
1239
|
"ai.usage.completionTokens": result2.usage.completionTokens,
|
1234
|
-
"ai.result.object": { output: () => result2.text }
|
1240
|
+
"ai.result.object": { output: () => result2.text },
|
1241
|
+
// standardized gen-ai llm span attributes:
|
1242
|
+
"gen_ai.response.finish_reasons": [result2.finishReason],
|
1243
|
+
"gen_ai.usage.prompt_tokens": result2.usage.promptTokens,
|
1244
|
+
"gen_ai.usage.completion_tokens": result2.usage.completionTokens
|
1235
1245
|
}
|
1236
1246
|
})
|
1237
1247
|
);
|
@@ -1271,7 +1281,13 @@ async function generateObject({
|
|
1271
1281
|
"ai.prompt.messages": {
|
1272
1282
|
input: () => JSON.stringify(promptMessages)
|
1273
1283
|
},
|
1274
|
-
"ai.settings.mode": mode
|
1284
|
+
"ai.settings.mode": mode,
|
1285
|
+
// standardized gen-ai llm span attributes:
|
1286
|
+
"gen_ai.request.model": model.modelId,
|
1287
|
+
"gen_ai.system": model.provider,
|
1288
|
+
"gen_ai.request.max_tokens": settings.maxTokens,
|
1289
|
+
"gen_ai.request.temperature": settings.temperature,
|
1290
|
+
"gen_ai.request.top_p": settings.topP
|
1275
1291
|
}
|
1276
1292
|
}),
|
1277
1293
|
tracer,
|
@@ -1304,7 +1320,11 @@ async function generateObject({
|
|
1304
1320
|
"ai.finishReason": result2.finishReason,
|
1305
1321
|
"ai.usage.promptTokens": result2.usage.promptTokens,
|
1306
1322
|
"ai.usage.completionTokens": result2.usage.completionTokens,
|
1307
|
-
"ai.result.object": { output: () => objectText }
|
1323
|
+
"ai.result.object": { output: () => objectText },
|
1324
|
+
// standardized gen-ai llm span attributes:
|
1325
|
+
"gen_ai.response.finish_reasons": [result2.finishReason],
|
1326
|
+
"gen_ai.usage.prompt_tokens": result2.usage.promptTokens,
|
1327
|
+
"gen_ai.usage.completion_tokens": result2.usage.completionTokens
|
1308
1328
|
}
|
1309
1329
|
})
|
1310
1330
|
);
|
@@ -1588,7 +1608,13 @@ async function streamObject({
|
|
1588
1608
|
"ai.prompt.messages": {
|
1589
1609
|
input: () => JSON.stringify(callOptions.prompt)
|
1590
1610
|
},
|
1591
|
-
"ai.settings.mode": mode
|
1611
|
+
"ai.settings.mode": mode,
|
1612
|
+
// standardized gen-ai llm span attributes:
|
1613
|
+
"gen_ai.request.model": model.modelId,
|
1614
|
+
"gen_ai.system": model.provider,
|
1615
|
+
"gen_ai.request.max_tokens": settings.maxTokens,
|
1616
|
+
"gen_ai.request.temperature": settings.temperature,
|
1617
|
+
"gen_ai.request.top_p": settings.topP
|
1592
1618
|
}
|
1593
1619
|
}),
|
1594
1620
|
tracer,
|
@@ -1631,6 +1657,7 @@ var DefaultStreamObjectResult = class {
|
|
1631
1657
|
resolveUsage = resolve;
|
1632
1658
|
});
|
1633
1659
|
let usage;
|
1660
|
+
let finishReason;
|
1634
1661
|
let object;
|
1635
1662
|
let error;
|
1636
1663
|
let accumulatedText = "";
|
@@ -1673,6 +1700,7 @@ var DefaultStreamObjectResult = class {
|
|
1673
1700
|
textDelta: delta
|
1674
1701
|
});
|
1675
1702
|
}
|
1703
|
+
finishReason = chunk.finishReason;
|
1676
1704
|
usage = calculateCompletionTokenUsage(chunk.usage);
|
1677
1705
|
controller.enqueue({ ...chunk, usage });
|
1678
1706
|
resolveUsage(usage);
|
@@ -1707,11 +1735,16 @@ var DefaultStreamObjectResult = class {
|
|
1707
1735
|
selectTelemetryAttributes({
|
1708
1736
|
telemetry,
|
1709
1737
|
attributes: {
|
1738
|
+
"ai.finishReason": finishReason,
|
1710
1739
|
"ai.usage.promptTokens": finalUsage.promptTokens,
|
1711
1740
|
"ai.usage.completionTokens": finalUsage.completionTokens,
|
1712
1741
|
"ai.result.object": {
|
1713
1742
|
output: () => JSON.stringify(object)
|
1714
|
-
}
|
1743
|
+
},
|
1744
|
+
// standardized gen-ai llm span attributes:
|
1745
|
+
"gen_ai.usage.prompt_tokens": finalUsage.promptTokens,
|
1746
|
+
"gen_ai.usage.completion_tokens": finalUsage.completionTokens,
|
1747
|
+
"gen_ai.response.finish_reasons": [finishReason]
|
1715
1748
|
}
|
1716
1749
|
})
|
1717
1750
|
);
|
@@ -1974,7 +2007,13 @@ async function generateText({
|
|
1974
2007
|
"ai.prompt.format": { input: () => currentInputFormat },
|
1975
2008
|
"ai.prompt.messages": {
|
1976
2009
|
input: () => JSON.stringify(promptMessages)
|
1977
|
-
}
|
2010
|
+
},
|
2011
|
+
// standardized gen-ai llm span attributes:
|
2012
|
+
"gen_ai.request.model": model.modelId,
|
2013
|
+
"gen_ai.system": model.provider,
|
2014
|
+
"gen_ai.request.max_tokens": settings.maxTokens,
|
2015
|
+
"gen_ai.request.temperature": settings.temperature,
|
2016
|
+
"gen_ai.request.top_p": settings.topP
|
1978
2017
|
}
|
1979
2018
|
}),
|
1980
2019
|
tracer,
|
@@ -1999,7 +2038,11 @@ async function generateText({
|
|
1999
2038
|
},
|
2000
2039
|
"ai.result.toolCalls": {
|
2001
2040
|
output: () => JSON.stringify(result.toolCalls)
|
2002
|
-
}
|
2041
|
+
},
|
2042
|
+
// standardized gen-ai llm span attributes:
|
2043
|
+
"gen_ai.response.finish_reasons": [result.finishReason],
|
2044
|
+
"gen_ai.usage.prompt_tokens": result.usage.promptTokens,
|
2045
|
+
"gen_ai.usage.completion_tokens": result.usage.completionTokens
|
2003
2046
|
}
|
2004
2047
|
})
|
2005
2048
|
);
|
@@ -2509,7 +2552,13 @@ async function streamText({
|
|
2509
2552
|
},
|
2510
2553
|
"ai.prompt.messages": {
|
2511
2554
|
input: () => JSON.stringify(promptMessages)
|
2512
|
-
}
|
2555
|
+
},
|
2556
|
+
// standardized gen-ai llm span attributes:
|
2557
|
+
"gen_ai.request.model": model.modelId,
|
2558
|
+
"gen_ai.system": model.provider,
|
2559
|
+
"gen_ai.request.max_tokens": settings.maxTokens,
|
2560
|
+
"gen_ai.request.temperature": settings.temperature,
|
2561
|
+
"gen_ai.request.top_p": settings.topP
|
2513
2562
|
}
|
2514
2563
|
}),
|
2515
2564
|
tracer,
|
@@ -2646,7 +2695,11 @@ var DefaultStreamTextResult = class {
|
|
2646
2695
|
"ai.usage.promptTokens": finalUsage.promptTokens,
|
2647
2696
|
"ai.usage.completionTokens": finalUsage.completionTokens,
|
2648
2697
|
"ai.result.text": { output: () => text },
|
2649
|
-
"ai.result.toolCalls": { output: () => telemetryToolCalls }
|
2698
|
+
"ai.result.toolCalls": { output: () => telemetryToolCalls },
|
2699
|
+
// standardized gen-ai llm span attributes:
|
2700
|
+
"gen_ai.response.finish_reasons": [finalFinishReason],
|
2701
|
+
"gen_ai.usage.prompt_tokens": finalUsage.promptTokens,
|
2702
|
+
"gen_ai.usage.completion_tokens": finalUsage.completionTokens
|
2650
2703
|
}
|
2651
2704
|
})
|
2652
2705
|
);
|
@@ -2786,8 +2839,6 @@ var DefaultStreamTextResult = class {
|
|
2786
2839
|
controller.enqueue(
|
2787
2840
|
(0, import_ui_utils6.formatStreamPart)("tool_result", {
|
2788
2841
|
toolCallId: chunk.toolCallId,
|
2789
|
-
toolName: chunk.toolName,
|
2790
|
-
args: chunk.args,
|
2791
2842
|
result: chunk.result
|
2792
2843
|
})
|
2793
2844
|
);
|