ai 4.3.15 → 4.3.17

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,17 @@
1
1
  # ai
2
2
 
3
+ ## 4.3.17
4
+
5
+ ### Patch Changes
6
+
7
+ - a288694: Expose provider metadata as an attribute on exported OTEL spans
8
+
9
+ ## 4.3.16
10
+
11
+ ### Patch Changes
12
+
13
+ - ed0ebeb: Avoid JSON.strinfigy on UInt8Arrays for telemetry
14
+
3
15
  ## 4.3.15
4
16
 
5
17
  ### Patch Changes
package/dist/index.js CHANGED
@@ -2691,6 +2691,26 @@ function validateObjectGenerationInput({
2691
2691
  }
2692
2692
  }
2693
2693
 
2694
+ // core/prompt/stringify-for-telemetry.ts
2695
+ function stringifyForTelemetry(prompt) {
2696
+ const processedPrompt = prompt.map((message) => {
2697
+ return {
2698
+ ...message,
2699
+ content: typeof message.content === "string" ? message.content : message.content.map(processPart)
2700
+ };
2701
+ });
2702
+ return JSON.stringify(processedPrompt);
2703
+ }
2704
+ function processPart(part) {
2705
+ if (part.type === "image") {
2706
+ return {
2707
+ ...part,
2708
+ image: part.image instanceof Uint8Array ? convertDataContentToBase64String(part.image) : part.image
2709
+ };
2710
+ }
2711
+ return part;
2712
+ }
2713
+
2694
2714
  // core/generate-object/generate-object.ts
2695
2715
  var originalGenerateId = (0, import_provider_utils7.createIdGenerator)({ prefix: "aiobj", size: 24 });
2696
2716
  async function generateObject({
@@ -2865,6 +2885,9 @@ async function generateObject({
2865
2885
  "ai.response.id": responseData.id,
2866
2886
  "ai.response.model": responseData.modelId,
2867
2887
  "ai.response.timestamp": responseData.timestamp.toISOString(),
2888
+ "ai.response.providerMetadata": JSON.stringify(
2889
+ result2.providerMetadata
2890
+ ),
2868
2891
  "ai.usage.promptTokens": result2.usage.promptTokens,
2869
2892
  "ai.usage.completionTokens": result2.usage.completionTokens,
2870
2893
  // standardized gen-ai llm span attributes:
@@ -2918,7 +2941,7 @@ async function generateObject({
2918
2941
  input: () => inputFormat
2919
2942
  },
2920
2943
  "ai.prompt.messages": {
2921
- input: () => JSON.stringify(promptMessages)
2944
+ input: () => stringifyForTelemetry(promptMessages)
2922
2945
  },
2923
2946
  "ai.settings.mode": mode,
2924
2947
  // standardized gen-ai llm span attributes:
@@ -2975,6 +2998,9 @@ async function generateObject({
2975
2998
  "ai.response.id": responseData.id,
2976
2999
  "ai.response.model": responseData.modelId,
2977
3000
  "ai.response.timestamp": responseData.timestamp.toISOString(),
3001
+ "ai.response.providerMetadata": JSON.stringify(
3002
+ result2.providerMetadata
3003
+ ),
2978
3004
  "ai.usage.promptTokens": result2.usage.promptTokens,
2979
3005
  "ai.usage.completionTokens": result2.usage.completionTokens,
2980
3006
  // standardized gen-ai llm span attributes:
@@ -3517,7 +3543,7 @@ var DefaultStreamObjectResult = class {
3517
3543
  input: () => callOptions.inputFormat
3518
3544
  },
3519
3545
  "ai.prompt.messages": {
3520
- input: () => JSON.stringify(callOptions.prompt)
3546
+ input: () => stringifyForTelemetry(callOptions.prompt)
3521
3547
  },
3522
3548
  "ai.settings.mode": mode,
3523
3549
  // standardized gen-ai llm span attributes:
@@ -3675,6 +3701,7 @@ var DefaultStreamObjectResult = class {
3675
3701
  "ai.response.id": response.id,
3676
3702
  "ai.response.model": response.modelId,
3677
3703
  "ai.response.timestamp": response.timestamp.toISOString(),
3704
+ "ai.response.providerMetadata": JSON.stringify(providerMetadata),
3678
3705
  "ai.usage.promptTokens": finalUsage.promptTokens,
3679
3706
  "ai.usage.completionTokens": finalUsage.completionTokens,
3680
3707
  // standardized gen-ai llm span attributes:
@@ -3695,7 +3722,8 @@ var DefaultStreamObjectResult = class {
3695
3722
  "ai.usage.completionTokens": finalUsage.completionTokens,
3696
3723
  "ai.response.object": {
3697
3724
  output: () => JSON.stringify(object2)
3698
- }
3725
+ },
3726
+ "ai.response.providerMetadata": JSON.stringify(providerMetadata)
3699
3727
  }
3700
3728
  })
3701
3729
  );
@@ -4308,7 +4336,7 @@ async function generateText({
4308
4336
  // prompt:
4309
4337
  "ai.prompt.format": { input: () => promptFormat },
4310
4338
  "ai.prompt.messages": {
4311
- input: () => JSON.stringify(promptMessages)
4339
+ input: () => stringifyForTelemetry(promptMessages)
4312
4340
  },
4313
4341
  "ai.prompt.tools": {
4314
4342
  // convert the language model level tools:
@@ -4364,6 +4392,9 @@ async function generateText({
4364
4392
  "ai.response.id": responseData.id,
4365
4393
  "ai.response.model": responseData.modelId,
4366
4394
  "ai.response.timestamp": responseData.timestamp.toISOString(),
4395
+ "ai.response.providerMetadata": JSON.stringify(
4396
+ result.providerMetadata
4397
+ ),
4367
4398
  "ai.usage.promptTokens": result.usage.promptTokens,
4368
4399
  "ai.usage.completionTokens": result.usage.completionTokens,
4369
4400
  // standardized gen-ai llm span attributes:
@@ -4490,7 +4521,10 @@ async function generateText({
4490
4521
  output: () => JSON.stringify(currentModelResponse.toolCalls)
4491
4522
  },
4492
4523
  "ai.usage.promptTokens": currentModelResponse.usage.promptTokens,
4493
- "ai.usage.completionTokens": currentModelResponse.usage.completionTokens
4524
+ "ai.usage.completionTokens": currentModelResponse.usage.completionTokens,
4525
+ "ai.response.providerMetadata": JSON.stringify(
4526
+ currentModelResponse.providerMetadata
4527
+ )
4494
4528
  }
4495
4529
  })
4496
4530
  );
@@ -5543,7 +5577,10 @@ var DefaultStreamTextResult = class {
5543
5577
  }
5544
5578
  },
5545
5579
  "ai.usage.promptTokens": usage.promptTokens,
5546
- "ai.usage.completionTokens": usage.completionTokens
5580
+ "ai.usage.completionTokens": usage.completionTokens,
5581
+ "ai.response.providerMetadata": JSON.stringify(
5582
+ lastStep.providerMetadata
5583
+ )
5547
5584
  }
5548
5585
  })
5549
5586
  );
@@ -5654,7 +5691,7 @@ var DefaultStreamTextResult = class {
5654
5691
  input: () => promptFormat
5655
5692
  },
5656
5693
  "ai.prompt.messages": {
5657
- input: () => JSON.stringify(promptMessages)
5694
+ input: () => stringifyForTelemetry(promptMessages)
5658
5695
  },
5659
5696
  "ai.prompt.tools": {
5660
5697
  // convert the language model level tools:
@@ -5920,6 +5957,7 @@ var DefaultStreamTextResult = class {
5920
5957
  "ai.response.id": stepResponse.id,
5921
5958
  "ai.response.model": stepResponse.modelId,
5922
5959
  "ai.response.timestamp": stepResponse.timestamp.toISOString(),
5960
+ "ai.response.providerMetadata": JSON.stringify(stepProviderMetadata),
5923
5961
  "ai.usage.promptTokens": stepUsage.promptTokens,
5924
5962
  "ai.usage.completionTokens": stepUsage.completionTokens,
5925
5963
  // standardized gen-ai llm span attributes: