ai 4.3.15 → 4.3.17

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -2619,6 +2619,26 @@ function validateObjectGenerationInput({
2619
2619
  }
2620
2620
  }
2621
2621
 
2622
+ // core/prompt/stringify-for-telemetry.ts
2623
+ function stringifyForTelemetry(prompt) {
2624
+ const processedPrompt = prompt.map((message) => {
2625
+ return {
2626
+ ...message,
2627
+ content: typeof message.content === "string" ? message.content : message.content.map(processPart)
2628
+ };
2629
+ });
2630
+ return JSON.stringify(processedPrompt);
2631
+ }
2632
+ function processPart(part) {
2633
+ if (part.type === "image") {
2634
+ return {
2635
+ ...part,
2636
+ image: part.image instanceof Uint8Array ? convertDataContentToBase64String(part.image) : part.image
2637
+ };
2638
+ }
2639
+ return part;
2640
+ }
2641
+
2622
2642
  // core/generate-object/generate-object.ts
2623
2643
  var originalGenerateId = createIdGenerator({ prefix: "aiobj", size: 24 });
2624
2644
  async function generateObject({
@@ -2793,6 +2813,9 @@ async function generateObject({
2793
2813
  "ai.response.id": responseData.id,
2794
2814
  "ai.response.model": responseData.modelId,
2795
2815
  "ai.response.timestamp": responseData.timestamp.toISOString(),
2816
+ "ai.response.providerMetadata": JSON.stringify(
2817
+ result2.providerMetadata
2818
+ ),
2796
2819
  "ai.usage.promptTokens": result2.usage.promptTokens,
2797
2820
  "ai.usage.completionTokens": result2.usage.completionTokens,
2798
2821
  // standardized gen-ai llm span attributes:
@@ -2846,7 +2869,7 @@ async function generateObject({
2846
2869
  input: () => inputFormat
2847
2870
  },
2848
2871
  "ai.prompt.messages": {
2849
- input: () => JSON.stringify(promptMessages)
2872
+ input: () => stringifyForTelemetry(promptMessages)
2850
2873
  },
2851
2874
  "ai.settings.mode": mode,
2852
2875
  // standardized gen-ai llm span attributes:
@@ -2903,6 +2926,9 @@ async function generateObject({
2903
2926
  "ai.response.id": responseData.id,
2904
2927
  "ai.response.model": responseData.modelId,
2905
2928
  "ai.response.timestamp": responseData.timestamp.toISOString(),
2929
+ "ai.response.providerMetadata": JSON.stringify(
2930
+ result2.providerMetadata
2931
+ ),
2906
2932
  "ai.usage.promptTokens": result2.usage.promptTokens,
2907
2933
  "ai.usage.completionTokens": result2.usage.completionTokens,
2908
2934
  // standardized gen-ai llm span attributes:
@@ -3448,7 +3474,7 @@ var DefaultStreamObjectResult = class {
3448
3474
  input: () => callOptions.inputFormat
3449
3475
  },
3450
3476
  "ai.prompt.messages": {
3451
- input: () => JSON.stringify(callOptions.prompt)
3477
+ input: () => stringifyForTelemetry(callOptions.prompt)
3452
3478
  },
3453
3479
  "ai.settings.mode": mode,
3454
3480
  // standardized gen-ai llm span attributes:
@@ -3606,6 +3632,7 @@ var DefaultStreamObjectResult = class {
3606
3632
  "ai.response.id": response.id,
3607
3633
  "ai.response.model": response.modelId,
3608
3634
  "ai.response.timestamp": response.timestamp.toISOString(),
3635
+ "ai.response.providerMetadata": JSON.stringify(providerMetadata),
3609
3636
  "ai.usage.promptTokens": finalUsage.promptTokens,
3610
3637
  "ai.usage.completionTokens": finalUsage.completionTokens,
3611
3638
  // standardized gen-ai llm span attributes:
@@ -3626,7 +3653,8 @@ var DefaultStreamObjectResult = class {
3626
3653
  "ai.usage.completionTokens": finalUsage.completionTokens,
3627
3654
  "ai.response.object": {
3628
3655
  output: () => JSON.stringify(object2)
3629
- }
3656
+ },
3657
+ "ai.response.providerMetadata": JSON.stringify(providerMetadata)
3630
3658
  }
3631
3659
  })
3632
3660
  );
@@ -4239,7 +4267,7 @@ async function generateText({
4239
4267
  // prompt:
4240
4268
  "ai.prompt.format": { input: () => promptFormat },
4241
4269
  "ai.prompt.messages": {
4242
- input: () => JSON.stringify(promptMessages)
4270
+ input: () => stringifyForTelemetry(promptMessages)
4243
4271
  },
4244
4272
  "ai.prompt.tools": {
4245
4273
  // convert the language model level tools:
@@ -4295,6 +4323,9 @@ async function generateText({
4295
4323
  "ai.response.id": responseData.id,
4296
4324
  "ai.response.model": responseData.modelId,
4297
4325
  "ai.response.timestamp": responseData.timestamp.toISOString(),
4326
+ "ai.response.providerMetadata": JSON.stringify(
4327
+ result.providerMetadata
4328
+ ),
4298
4329
  "ai.usage.promptTokens": result.usage.promptTokens,
4299
4330
  "ai.usage.completionTokens": result.usage.completionTokens,
4300
4331
  // standardized gen-ai llm span attributes:
@@ -4421,7 +4452,10 @@ async function generateText({
4421
4452
  output: () => JSON.stringify(currentModelResponse.toolCalls)
4422
4453
  },
4423
4454
  "ai.usage.promptTokens": currentModelResponse.usage.promptTokens,
4424
- "ai.usage.completionTokens": currentModelResponse.usage.completionTokens
4455
+ "ai.usage.completionTokens": currentModelResponse.usage.completionTokens,
4456
+ "ai.response.providerMetadata": JSON.stringify(
4457
+ currentModelResponse.providerMetadata
4458
+ )
4425
4459
  }
4426
4460
  })
4427
4461
  );
@@ -5489,7 +5523,10 @@ var DefaultStreamTextResult = class {
5489
5523
  }
5490
5524
  },
5491
5525
  "ai.usage.promptTokens": usage.promptTokens,
5492
- "ai.usage.completionTokens": usage.completionTokens
5526
+ "ai.usage.completionTokens": usage.completionTokens,
5527
+ "ai.response.providerMetadata": JSON.stringify(
5528
+ lastStep.providerMetadata
5529
+ )
5493
5530
  }
5494
5531
  })
5495
5532
  );
@@ -5600,7 +5637,7 @@ var DefaultStreamTextResult = class {
5600
5637
  input: () => promptFormat
5601
5638
  },
5602
5639
  "ai.prompt.messages": {
5603
- input: () => JSON.stringify(promptMessages)
5640
+ input: () => stringifyForTelemetry(promptMessages)
5604
5641
  },
5605
5642
  "ai.prompt.tools": {
5606
5643
  // convert the language model level tools:
@@ -5866,6 +5903,7 @@ var DefaultStreamTextResult = class {
5866
5903
  "ai.response.id": stepResponse.id,
5867
5904
  "ai.response.model": stepResponse.modelId,
5868
5905
  "ai.response.timestamp": stepResponse.timestamp.toISOString(),
5906
+ "ai.response.providerMetadata": JSON.stringify(stepProviderMetadata),
5869
5907
  "ai.usage.promptTokens": stepUsage.promptTokens,
5870
5908
  "ai.usage.completionTokens": stepUsage.completionTokens,
5871
5909
  // standardized gen-ai llm span attributes: