ai 3.2.41 → 3.2.43

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -1117,7 +1117,13 @@ async function generateObject({
1117
1117
  "ai.prompt.messages": {
1118
1118
  input: () => JSON.stringify(promptMessages)
1119
1119
  },
1120
- "ai.settings.mode": mode
1120
+ "ai.settings.mode": mode,
1121
+ // standardized gen-ai llm span attributes:
1122
+ "gen_ai.request.model": model.modelId,
1123
+ "gen_ai.system": model.provider,
1124
+ "gen_ai.request.max_tokens": settings.maxTokens,
1125
+ "gen_ai.request.temperature": settings.temperature,
1126
+ "gen_ai.request.top_p": settings.topP
1121
1127
  }
1122
1128
  }),
1123
1129
  tracer,
@@ -1140,7 +1146,11 @@ async function generateObject({
1140
1146
  "ai.finishReason": result2.finishReason,
1141
1147
  "ai.usage.promptTokens": result2.usage.promptTokens,
1142
1148
  "ai.usage.completionTokens": result2.usage.completionTokens,
1143
- "ai.result.object": { output: () => result2.text }
1149
+ "ai.result.object": { output: () => result2.text },
1150
+ // standardized gen-ai llm span attributes:
1151
+ "gen_ai.response.finish_reasons": [result2.finishReason],
1152
+ "gen_ai.usage.prompt_tokens": result2.usage.promptTokens,
1153
+ "gen_ai.usage.completion_tokens": result2.usage.completionTokens
1144
1154
  }
1145
1155
  })
1146
1156
  );
@@ -1180,7 +1190,13 @@ async function generateObject({
1180
1190
  "ai.prompt.messages": {
1181
1191
  input: () => JSON.stringify(promptMessages)
1182
1192
  },
1183
- "ai.settings.mode": mode
1193
+ "ai.settings.mode": mode,
1194
+ // standardized gen-ai llm span attributes:
1195
+ "gen_ai.request.model": model.modelId,
1196
+ "gen_ai.system": model.provider,
1197
+ "gen_ai.request.max_tokens": settings.maxTokens,
1198
+ "gen_ai.request.temperature": settings.temperature,
1199
+ "gen_ai.request.top_p": settings.topP
1184
1200
  }
1185
1201
  }),
1186
1202
  tracer,
@@ -1213,7 +1229,11 @@ async function generateObject({
1213
1229
  "ai.finishReason": result2.finishReason,
1214
1230
  "ai.usage.promptTokens": result2.usage.promptTokens,
1215
1231
  "ai.usage.completionTokens": result2.usage.completionTokens,
1216
- "ai.result.object": { output: () => objectText }
1232
+ "ai.result.object": { output: () => objectText },
1233
+ // standardized gen-ai llm span attributes:
1234
+ "gen_ai.response.finish_reasons": [result2.finishReason],
1235
+ "gen_ai.usage.prompt_tokens": result2.usage.promptTokens,
1236
+ "gen_ai.usage.completion_tokens": result2.usage.completionTokens
1217
1237
  }
1218
1238
  })
1219
1239
  );
@@ -1500,7 +1520,13 @@ async function streamObject({
1500
1520
  "ai.prompt.messages": {
1501
1521
  input: () => JSON.stringify(callOptions.prompt)
1502
1522
  },
1503
- "ai.settings.mode": mode
1523
+ "ai.settings.mode": mode,
1524
+ // standardized gen-ai llm span attributes:
1525
+ "gen_ai.request.model": model.modelId,
1526
+ "gen_ai.system": model.provider,
1527
+ "gen_ai.request.max_tokens": settings.maxTokens,
1528
+ "gen_ai.request.temperature": settings.temperature,
1529
+ "gen_ai.request.top_p": settings.topP
1504
1530
  }
1505
1531
  }),
1506
1532
  tracer,
@@ -1543,6 +1569,7 @@ var DefaultStreamObjectResult = class {
1543
1569
  resolveUsage = resolve;
1544
1570
  });
1545
1571
  let usage;
1572
+ let finishReason;
1546
1573
  let object;
1547
1574
  let error;
1548
1575
  let accumulatedText = "";
@@ -1585,6 +1612,7 @@ var DefaultStreamObjectResult = class {
1585
1612
  textDelta: delta
1586
1613
  });
1587
1614
  }
1615
+ finishReason = chunk.finishReason;
1588
1616
  usage = calculateCompletionTokenUsage(chunk.usage);
1589
1617
  controller.enqueue({ ...chunk, usage });
1590
1618
  resolveUsage(usage);
@@ -1619,11 +1647,16 @@ var DefaultStreamObjectResult = class {
1619
1647
  selectTelemetryAttributes({
1620
1648
  telemetry,
1621
1649
  attributes: {
1650
+ "ai.finishReason": finishReason,
1622
1651
  "ai.usage.promptTokens": finalUsage.promptTokens,
1623
1652
  "ai.usage.completionTokens": finalUsage.completionTokens,
1624
1653
  "ai.result.object": {
1625
1654
  output: () => JSON.stringify(object)
1626
- }
1655
+ },
1656
+ // standardized gen-ai llm span attributes:
1657
+ "gen_ai.usage.prompt_tokens": finalUsage.promptTokens,
1658
+ "gen_ai.usage.completion_tokens": finalUsage.completionTokens,
1659
+ "gen_ai.response.finish_reasons": [finishReason]
1627
1660
  }
1628
1661
  })
1629
1662
  );
@@ -1889,7 +1922,13 @@ async function generateText({
1889
1922
  "ai.prompt.format": { input: () => currentInputFormat },
1890
1923
  "ai.prompt.messages": {
1891
1924
  input: () => JSON.stringify(promptMessages)
1892
- }
1925
+ },
1926
+ // standardized gen-ai llm span attributes:
1927
+ "gen_ai.request.model": model.modelId,
1928
+ "gen_ai.system": model.provider,
1929
+ "gen_ai.request.max_tokens": settings.maxTokens,
1930
+ "gen_ai.request.temperature": settings.temperature,
1931
+ "gen_ai.request.top_p": settings.topP
1893
1932
  }
1894
1933
  }),
1895
1934
  tracer,
@@ -1914,7 +1953,11 @@ async function generateText({
1914
1953
  },
1915
1954
  "ai.result.toolCalls": {
1916
1955
  output: () => JSON.stringify(result.toolCalls)
1917
- }
1956
+ },
1957
+ // standardized gen-ai llm span attributes:
1958
+ "gen_ai.response.finish_reasons": [result.finishReason],
1959
+ "gen_ai.usage.prompt_tokens": result.usage.promptTokens,
1960
+ "gen_ai.usage.completion_tokens": result.usage.completionTokens
1918
1961
  }
1919
1962
  })
1920
1963
  );
@@ -2424,7 +2467,13 @@ async function streamText({
2424
2467
  },
2425
2468
  "ai.prompt.messages": {
2426
2469
  input: () => JSON.stringify(promptMessages)
2427
- }
2470
+ },
2471
+ // standardized gen-ai llm span attributes:
2472
+ "gen_ai.request.model": model.modelId,
2473
+ "gen_ai.system": model.provider,
2474
+ "gen_ai.request.max_tokens": settings.maxTokens,
2475
+ "gen_ai.request.temperature": settings.temperature,
2476
+ "gen_ai.request.top_p": settings.topP
2428
2477
  }
2429
2478
  }),
2430
2479
  tracer,
@@ -2561,7 +2610,11 @@ var DefaultStreamTextResult = class {
2561
2610
  "ai.usage.promptTokens": finalUsage.promptTokens,
2562
2611
  "ai.usage.completionTokens": finalUsage.completionTokens,
2563
2612
  "ai.result.text": { output: () => text },
2564
- "ai.result.toolCalls": { output: () => telemetryToolCalls }
2613
+ "ai.result.toolCalls": { output: () => telemetryToolCalls },
2614
+ // standardized gen-ai llm span attributes:
2615
+ "gen_ai.response.finish_reasons": [finalFinishReason],
2616
+ "gen_ai.usage.prompt_tokens": finalUsage.promptTokens,
2617
+ "gen_ai.usage.completion_tokens": finalUsage.completionTokens
2565
2618
  }
2566
2619
  })
2567
2620
  );
@@ -2701,8 +2754,6 @@ var DefaultStreamTextResult = class {
2701
2754
  controller.enqueue(
2702
2755
  formatStreamPart("tool_result", {
2703
2756
  toolCallId: chunk.toolCallId,
2704
- toolName: chunk.toolName,
2705
- args: chunk.args,
2706
2757
  result: chunk.result
2707
2758
  })
2708
2759
  );