@brizz/sdk 0.1.2 → 0.1.3-rc.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.cjs CHANGED
@@ -33,8 +33,7 @@ __export(src_exports, {
33
33
  Brizz: () => Brizz,
34
34
  DEFAULT_PII_PATTERNS: () => DEFAULT_PII_PATTERNS,
35
35
  LogLevel: () => LogLevel,
36
- SeverityNumber: () => import_api_logs3.SeverityNumber,
37
- VercelAIInstrumentation: () => VercelAIInstrumentation,
36
+ SeverityNumber: () => import_api_logs2.SeverityNumber,
38
37
  WithSessionId: () => WithSessionId,
39
38
  detectRuntime: () => detectRuntime,
40
39
  emitEvent: () => emitEvent,
@@ -53,7 +52,7 @@ module.exports = __toCommonJS(src_exports);
53
52
 
54
53
  // src/internal/instrumentation/auto-init.ts
55
54
  var import_auto_instrumentations_node = require("@opentelemetry/auto-instrumentations-node");
56
- var import_instrumentation3 = require("@opentelemetry/instrumentation");
55
+ var import_instrumentation = require("@opentelemetry/instrumentation");
57
56
  var import_instrumentation_anthropic = require("@traceloop/instrumentation-anthropic");
58
57
  var import_instrumentation_bedrock = require("@traceloop/instrumentation-bedrock");
59
58
  var import_instrumentation_chromadb = require("@traceloop/instrumentation-chromadb");
@@ -210,936 +209,6 @@ function getLogLevel() {
210
209
  return logger.getLevel();
211
210
  }
212
211
 
213
- // src/internal/instrumentation/vercel-ai/instrumentation.ts
214
- var import_instrumentation = require("@opentelemetry/instrumentation");
215
-
216
- // src/internal/instrumentation/vercel-ai/patchers/base-patcher.ts
217
- var import_api2 = require("@opentelemetry/api");
218
-
219
- // src/internal/instrumentation/vercel-ai/semconv.ts
220
- var ATTR_GEN_AI_SYSTEM = "gen_ai.system";
221
- var ATTR_GEN_AI_OPERATION_NAME = "gen_ai.operation.name";
222
- var ATTR_GEN_AI_REQUEST_MODEL = "gen_ai.request.model";
223
- var ATTR_GEN_AI_REQUEST_MAX_TOKENS = "gen_ai.request.max_tokens";
224
- var ATTR_GEN_AI_REQUEST_TEMPERATURE = "gen_ai.request.temperature";
225
- var ATTR_GEN_AI_REQUEST_TOP_P = "gen_ai.request.top_p";
226
- var ATTR_GEN_AI_REQUEST_TOP_K = "gen_ai.request.top_k";
227
- var ATTR_GEN_AI_REQUEST_STOP_SEQUENCES = "gen_ai.request.stop_sequences";
228
- var ATTR_GEN_AI_REQUEST_FREQUENCY_PENALTY = "gen_ai.request.frequency_penalty";
229
- var ATTR_GEN_AI_REQUEST_PRESENCE_PENALTY = "gen_ai.request.presence_penalty";
230
- var ATTR_GEN_AI_RESPONSE_ID = "gen_ai.response.id";
231
- var ATTR_GEN_AI_RESPONSE_MODEL = "gen_ai.response.model";
232
- var ATTR_GEN_AI_RESPONSE_FINISH_REASONS = "gen_ai.response.finish_reasons";
233
- var ATTR_GEN_AI_TOKEN_TYPE = "gen_ai.token.type";
234
- var ATTR_GEN_AI_PROMPT = "gen_ai.prompt";
235
- var ATTR_GEN_AI_COMPLETION = "gen_ai.completion";
236
- var ATTR_GEN_AI_OPENAI_API_BASE = "gen_ai.openai.api_base";
237
- var ATTR_EVENT_NAME = "event.name";
238
- var EVENT_GEN_AI_USER_MESSAGE = "gen_ai.user.message";
239
- var EVENT_GEN_AI_ASSISTANT_MESSAGE = "gen_ai.assistant.message";
240
- var EVENT_GEN_AI_SYSTEM_MESSAGE = "gen_ai.system.message";
241
- var EVENT_GEN_AI_TOOL_MESSAGE = "gen_ai.tool.message";
242
- var METRIC_GEN_AI_CLIENT_OPERATION_DURATION = "gen_ai.client.operation.duration";
243
- var METRIC_GEN_AI_CLIENT_TOKEN_USAGE = "gen_ai.client.token.usage";
244
- var OPERATION_NAME_CHAT = "chat";
245
- var OPERATION_NAME_EMBEDDINGS = "embeddings";
246
- var TOKEN_TYPE_INPUT = "input";
247
- var TOKEN_TYPE_OUTPUT = "output";
248
- var PROVIDER_OPENAI = "openai";
249
- var PROVIDER_ANTHROPIC = "anthropic";
250
- var PROVIDER_GOOGLE = "google";
251
- var PROVIDER_AMAZON = "amazon";
252
- var PROVIDER_AZURE = "azure";
253
- var PROVIDER_VERCEL = "vercel";
254
- var PROVIDER_UNKNOWN = "unknown";
255
- var SPAN_NAME_GEN_AI_CHAT = "gen_ai.chat";
256
- var SPAN_NAME_GEN_AI_EMBEDDINGS = "gen_ai.embeddings";
257
-
258
- // src/internal/instrumentation/vercel-ai/utils.ts
259
- function detectProvider(model) {
260
- if (typeof model === "object" && model !== null) {
261
- const modelObj = model;
262
- if (modelObj.provider) {
263
- return {
264
- system: normalizeProviderName(modelObj.provider),
265
- apiBase: extractApiBase(modelObj)
266
- };
267
- }
268
- if (modelObj.modelId) {
269
- return detectProviderFromModelId(modelObj.modelId);
270
- }
271
- }
272
- if (typeof model === "string") {
273
- return detectProviderFromModelId(model);
274
- }
275
- return { system: PROVIDER_UNKNOWN };
276
- }
277
- function detectProviderFromModelId(modelId) {
278
- const lowerModel = modelId.toLowerCase();
279
- if (lowerModel.startsWith("gpt-") || lowerModel.startsWith("text-davinci-") || lowerModel.startsWith("text-embedding-") || lowerModel.startsWith("dall-e") || lowerModel.startsWith("whisper-") || lowerModel.startsWith("tts-")) {
280
- return { system: PROVIDER_OPENAI };
281
- }
282
- if (lowerModel.startsWith("claude-")) {
283
- return { system: PROVIDER_ANTHROPIC };
284
- }
285
- if (lowerModel.startsWith("gemini-") || lowerModel.startsWith("palm-") || lowerModel.includes("bison") || lowerModel.includes("gecko")) {
286
- return { system: PROVIDER_GOOGLE };
287
- }
288
- if (lowerModel.startsWith("amazon.") || lowerModel.startsWith("anthropic.claude-") || lowerModel.startsWith("ai21.") || lowerModel.startsWith("cohere.") || lowerModel.startsWith("meta.llama")) {
289
- return { system: PROVIDER_AMAZON };
290
- }
291
- if (lowerModel.includes("azure") || lowerModel.includes(".openai.azure.com")) {
292
- return { system: PROVIDER_AZURE };
293
- }
294
- const parts = modelId.split(/[-._/]/);
295
- if (parts.length > 0 && parts[0]) {
296
- return { system: normalizeProviderName(parts[0]) };
297
- }
298
- return { system: PROVIDER_UNKNOWN };
299
- }
300
- function normalizeProviderName(provider) {
301
- const normalized = provider.toLowerCase().trim();
302
- switch (normalized) {
303
- case "openai":
304
- case "open-ai":
305
- case "open_ai": {
306
- return PROVIDER_OPENAI;
307
- }
308
- case "anthropic":
309
- case "claude": {
310
- return PROVIDER_ANTHROPIC;
311
- }
312
- case "google":
313
- case "vertex":
314
- case "vertexai":
315
- case "vertex-ai":
316
- case "gemini": {
317
- return PROVIDER_GOOGLE;
318
- }
319
- case "amazon":
320
- case "aws":
321
- case "bedrock":
322
- case "amazon-bedrock": {
323
- return PROVIDER_AMAZON;
324
- }
325
- case "azure":
326
- case "azure-openai":
327
- case "microsoft": {
328
- return PROVIDER_AZURE;
329
- }
330
- case "vercel":
331
- case "vercel-ai": {
332
- return PROVIDER_VERCEL;
333
- }
334
- default: {
335
- return normalized;
336
- }
337
- }
338
- }
339
- function extractApiBase(model) {
340
- if (typeof model === "object" && model !== null) {
341
- const anyModel = model;
342
- return anyModel.apiBase || anyModel.baseURL || anyModel.endpoint || void 0;
343
- }
344
- return void 0;
345
- }
346
- function extractModelId(model) {
347
- if (typeof model === "string") {
348
- return model;
349
- }
350
- if (typeof model === "object" && model !== null) {
351
- return model.modelId || "unknown";
352
- }
353
- return "unknown";
354
- }
355
- function messagesToAttributes(messages, prefix, captureContent) {
356
- const attributes = {};
357
- for (const [index, msg] of messages.entries()) {
358
- const baseKey = `${prefix}.${index}`;
359
- attributes[`${baseKey}.role`] = msg.role;
360
- if (captureContent && msg.content) {
361
- if (typeof msg.content === "string") {
362
- attributes[`${baseKey}.content`] = msg.content;
363
- } else if (Array.isArray(msg.content)) {
364
- const textParts = msg.content.filter((part) => part.type === "text" && part.text).map((part) => part.text).join(" ");
365
- if (textParts) {
366
- attributes[`${baseKey}.content`] = textParts;
367
- }
368
- }
369
- }
370
- if (msg.toolInvocations && msg.toolInvocations.length > 0) {
371
- attributes[`${baseKey}.tool_calls`] = msg.toolInvocations.length;
372
- }
373
- }
374
- return attributes;
375
- }
376
- function promptToAttributes(prompt, captureContent) {
377
- const attributes = {};
378
- attributes[`${ATTR_GEN_AI_PROMPT}.0.role`] = "user";
379
- if (captureContent) {
380
- attributes[`${ATTR_GEN_AI_PROMPT}.0.content`] = prompt;
381
- }
382
- return attributes;
383
- }
384
- function completionToAttributes(text, finishReason, captureContent) {
385
- const attributes = {};
386
- attributes[`${ATTR_GEN_AI_COMPLETION}.0.role`] = "assistant";
387
- if (captureContent) {
388
- attributes[`${ATTR_GEN_AI_COMPLETION}.0.content`] = text;
389
- }
390
- if (finishReason) {
391
- attributes[`${ATTR_GEN_AI_COMPLETION}.0.finish_reason`] = finishReason;
392
- }
393
- return attributes;
394
- }
395
- function tokenUsageToAttributes(usage) {
396
- if (!usage) {
397
- return {};
398
- }
399
- const attributes = {};
400
- if (usage.inputTokens !== void 0) {
401
- attributes["gen_ai.usage.prompt_tokens"] = usage.inputTokens;
402
- attributes["gen_ai.usage.input_tokens"] = usage.inputTokens;
403
- attributes["llm.usage.prompt_tokens"] = usage.inputTokens;
404
- } else if (usage.promptTokens !== void 0) {
405
- attributes["gen_ai.usage.prompt_tokens"] = usage.promptTokens;
406
- attributes["gen_ai.usage.input_tokens"] = usage.promptTokens;
407
- attributes["llm.usage.prompt_tokens"] = usage.promptTokens;
408
- }
409
- if (usage.outputTokens !== void 0) {
410
- attributes["gen_ai.usage.completion_tokens"] = usage.outputTokens;
411
- attributes["gen_ai.usage.output_tokens"] = usage.outputTokens;
412
- attributes["llm.usage.completion_tokens"] = usage.outputTokens;
413
- } else if (usage.completionTokens !== void 0) {
414
- attributes["gen_ai.usage.completion_tokens"] = usage.completionTokens;
415
- attributes["gen_ai.usage.output_tokens"] = usage.completionTokens;
416
- attributes["llm.usage.completion_tokens"] = usage.completionTokens;
417
- }
418
- if (usage.totalTokens === void 0) {
419
- const inputTokens = usage.inputTokens || usage.promptTokens;
420
- const outputTokens = usage.outputTokens || usage.completionTokens;
421
- if (inputTokens !== void 0 && outputTokens !== void 0) {
422
- const totalTokens = inputTokens + outputTokens;
423
- attributes["gen_ai.usage.total_tokens"] = totalTokens;
424
- attributes["llm.usage.total_tokens"] = totalTokens;
425
- }
426
- } else {
427
- attributes["gen_ai.usage.total_tokens"] = usage.totalTokens;
428
- attributes["llm.usage.total_tokens"] = usage.totalTokens;
429
- }
430
- return attributes;
431
- }
432
- function shouldRecordError(error) {
433
- if (error instanceof Error) {
434
- const message = error.message.toLowerCase();
435
- if (message.includes("abort") || message.includes("cancel")) {
436
- return false;
437
- }
438
- }
439
- return true;
440
- }
441
- function getEnvBool(name) {
442
- const value = process.env[name];
443
- if (value === void 0) {
444
- return void 0;
445
- }
446
- return value.toLowerCase() === "true" || value === "1";
447
- }
448
-
449
- // src/internal/instrumentation/vercel-ai/patchers/base-patcher.ts
450
- var BasePatcher = class {
451
- constructor(context8) {
452
- this.context = context8;
453
- }
454
- createSpan(spanName, params, operationName, additionalAttributes) {
455
- const provider = detectProvider(params.model);
456
- const modelId = extractModelId(params.model);
457
- const span = this.context.tracer.startSpan(spanName, {
458
- kind: import_api2.SpanKind.CLIENT,
459
- attributes: {
460
- [ATTR_GEN_AI_SYSTEM]: provider.system,
461
- [ATTR_GEN_AI_OPERATION_NAME]: operationName,
462
- [ATTR_GEN_AI_REQUEST_MODEL]: modelId,
463
- ...params.maxTokens && { [ATTR_GEN_AI_REQUEST_MAX_TOKENS]: params.maxTokens },
464
- ...params.temperature !== void 0 && {
465
- [ATTR_GEN_AI_REQUEST_TEMPERATURE]: params.temperature
466
- },
467
- ...params.topP !== void 0 && { [ATTR_GEN_AI_REQUEST_TOP_P]: params.topP },
468
- ...params.topK !== void 0 && { [ATTR_GEN_AI_REQUEST_TOP_K]: params.topK },
469
- ...params.frequencyPenalty !== void 0 && {
470
- [ATTR_GEN_AI_REQUEST_FREQUENCY_PENALTY]: params.frequencyPenalty
471
- },
472
- ...params.presencePenalty !== void 0 && {
473
- [ATTR_GEN_AI_REQUEST_PRESENCE_PENALTY]: params.presencePenalty
474
- },
475
- ...params.stopSequences && {
476
- [ATTR_GEN_AI_REQUEST_STOP_SEQUENCES]: params.stopSequences
477
- },
478
- ...provider.apiBase && { [ATTR_GEN_AI_OPENAI_API_BASE]: provider.apiBase },
479
- ...additionalAttributes
480
- }
481
- });
482
- return { span, provider, modelId };
483
- }
484
- handleError(error, span) {
485
- if (shouldRecordError(error)) {
486
- span.recordException(error);
487
- span.setStatus({ code: import_api2.SpanStatusCode.ERROR, message: error.message });
488
- }
489
- }
490
- finalizeDuration(span, startTime, config, provider, modelId, operationName) {
491
- if (config.enableMetrics) {
492
- const duration = (globalThis.performance.now() - startTime) / 1e3;
493
- this.context.recordDurationMetric(duration, provider.system, modelId, operationName);
494
- }
495
- span.end();
496
- }
497
- };
498
-
499
- // src/internal/instrumentation/vercel-ai/patchers/generate-text-patcher.ts
500
- var import_api3 = require("@opentelemetry/api");
501
- var GenerateTextPatcher = class extends BasePatcher {
502
- patch(original) {
503
- return async (params) => {
504
- const config = this.context.getConfig();
505
- const startTime = globalThis.performance.now();
506
- const { span, provider, modelId } = this.createSpan(
507
- SPAN_NAME_GEN_AI_CHAT,
508
- params,
509
- OPERATION_NAME_CHAT
510
- );
511
- if (params.prompt) {
512
- span.setAttributes(
513
- promptToAttributes(params.prompt, config.captureMessageContent || false)
514
- );
515
- } else if (params.messages) {
516
- span.setAttributes(
517
- messagesToAttributes(
518
- params.messages,
519
- "gen_ai.prompt",
520
- config.captureMessageContent || false
521
- )
522
- );
523
- if (config.emitEvents) {
524
- this.context.emitMessageEvents(params.messages, provider.system, span);
525
- }
526
- }
527
- try {
528
- const result = await import_api3.context.with(
529
- import_api3.trace.setSpan(import_api3.context.active(), span),
530
- () => original(params)
531
- );
532
- if (result.response) {
533
- span.setAttributes({
534
- ...result.response.id && { [ATTR_GEN_AI_RESPONSE_ID]: result.response.id },
535
- ...result.response.model && { [ATTR_GEN_AI_RESPONSE_MODEL]: result.response.model }
536
- });
537
- }
538
- if (result.finishReason) {
539
- span.setAttribute(ATTR_GEN_AI_RESPONSE_FINISH_REASONS, [result.finishReason]);
540
- }
541
- span.setAttributes(
542
- completionToAttributes(
543
- result.text,
544
- result.finishReason,
545
- config.captureMessageContent || false
546
- )
547
- );
548
- const usage = result.usage || result.totalUsage || result.steps?.[0]?.usage;
549
- if (usage) {
550
- span.setAttributes(tokenUsageToAttributes(usage));
551
- if (config.enableMetrics) {
552
- this.context.recordTokenMetrics(usage, provider.system, modelId);
553
- }
554
- }
555
- if (config.emitEvents) {
556
- this.context.emitAssistantMessageEvent(result.text, provider.system, span);
557
- }
558
- span.setStatus({ code: import_api3.SpanStatusCode.OK });
559
- return result;
560
- } catch (error) {
561
- this.handleError(error, span);
562
- throw error;
563
- } finally {
564
- this.finalizeDuration(span, startTime, config, provider, modelId, OPERATION_NAME_CHAT);
565
- }
566
- };
567
- }
568
- };
569
-
570
- // src/internal/instrumentation/vercel-ai/patchers/stream-text-patcher.ts
571
- var import_api4 = require("@opentelemetry/api");
572
- var StreamTextPatcher = class extends BasePatcher {
573
- constructor(context8, streamHandler) {
574
- super(context8);
575
- this.streamHandler = streamHandler;
576
- }
577
- patch(original) {
578
- return async (params) => {
579
- const config = this.context.getConfig();
580
- const startTime = globalThis.performance.now();
581
- const { span, provider, modelId } = this.createSpan(
582
- SPAN_NAME_GEN_AI_CHAT,
583
- params,
584
- OPERATION_NAME_CHAT,
585
- { "gen_ai.streaming": true }
586
- );
587
- if (params.prompt) {
588
- span.setAttributes(
589
- promptToAttributes(params.prompt, config.captureMessageContent || false)
590
- );
591
- } else if (params.messages) {
592
- span.setAttributes(
593
- messagesToAttributes(
594
- params.messages,
595
- "gen_ai.prompt",
596
- config.captureMessageContent || false
597
- )
598
- );
599
- if (config.emitEvents) {
600
- this.context.emitMessageEvents(params.messages, provider.system, span);
601
- }
602
- }
603
- try {
604
- const stream = await import_api4.context.with(
605
- import_api4.trace.setSpan(import_api4.context.active(), span),
606
- () => original(params)
607
- );
608
- return this.streamHandler.wrapStream(stream, span, config, provider, modelId, startTime);
609
- } catch (error) {
610
- this.handleError(error, span);
611
- span.end();
612
- throw error;
613
- }
614
- };
615
- }
616
- };
617
-
618
- // src/internal/instrumentation/vercel-ai/patchers/embeddings-patcher.ts
619
- var import_api5 = require("@opentelemetry/api");
620
- var EmbeddingsPatcher = class extends BasePatcher {
621
- patch(original, isMany = false) {
622
- return async (params) => {
623
- const config = this.context.getConfig();
624
- const startTime = globalThis.performance.now();
625
- const additionalAttributes = isMany ? { "gen_ai.embeddings.count": params.values ? params.values.length : 0 } : {};
626
- const { span, provider, modelId } = this.createSpan(
627
- SPAN_NAME_GEN_AI_EMBEDDINGS,
628
- params,
629
- OPERATION_NAME_EMBEDDINGS,
630
- additionalAttributes
631
- );
632
- if (!isMany && config.captureMessageContent && params.value) {
633
- span.setAttribute("gen_ai.prompt.0.content", params.value);
634
- }
635
- try {
636
- const result = await import_api5.context.with(
637
- import_api5.trace.setSpan(import_api5.context.active(), span),
638
- () => original(params)
639
- );
640
- if (result.response) {
641
- span.setAttributes({
642
- ...result.response.id && { [ATTR_GEN_AI_RESPONSE_ID]: result.response.id },
643
- ...result.response.model && { [ATTR_GEN_AI_RESPONSE_MODEL]: result.response.model }
644
- });
645
- }
646
- if (isMany) {
647
- if (result.embeddings && result.embeddings.length > 0 && result.embeddings[0]) {
648
- span.setAttribute("gen_ai.response.embedding_dimensions", result.embeddings[0].length);
649
- }
650
- } else {
651
- if (result.embedding) {
652
- span.setAttribute("gen_ai.response.embedding_dimensions", result.embedding.length);
653
- }
654
- }
655
- if (result.usage) {
656
- span.setAttributes(tokenUsageToAttributes(result.usage));
657
- if (config.enableMetrics) {
658
- this.context.recordTokenMetrics(result.usage, provider.system, modelId);
659
- }
660
- }
661
- span.setStatus({ code: import_api5.SpanStatusCode.OK });
662
- return result;
663
- } catch (error) {
664
- this.handleError(error, span);
665
- throw error;
666
- } finally {
667
- this.finalizeDuration(span, startTime, config, provider, modelId, OPERATION_NAME_EMBEDDINGS);
668
- }
669
- };
670
- }
671
- };
672
-
673
- // src/internal/instrumentation/vercel-ai/stream-handler.ts
674
- var import_api6 = require("@opentelemetry/api");
675
- var StreamHandler = class {
676
- constructor(context8) {
677
- this.context = context8;
678
- }
679
- wrapStream(stream, span, config, provider, modelId, startTime) {
680
- const self = this;
681
- let fullText = "";
682
- let finishReason;
683
- let usage;
684
- let response;
685
- const wrappedStream = new Proxy(stream, {
686
- get(target, prop) {
687
- if (prop === Symbol.asyncIterator) {
688
- return async function* () {
689
- try {
690
- for await (const chunk of target) {
691
- if (chunk.type === "text-delta" && chunk.textDelta) {
692
- fullText += chunk.textDelta;
693
- } else if (chunk.type === "finish") {
694
- finishReason = chunk.finishReason;
695
- usage = chunk.usage;
696
- } else if (chunk.type === "response-metadata") {
697
- response = chunk.response;
698
- }
699
- yield chunk;
700
- }
701
- } finally {
702
- self.finalizeStream(
703
- span,
704
- config,
705
- provider,
706
- modelId,
707
- startTime,
708
- fullText,
709
- finishReason,
710
- usage,
711
- response
712
- );
713
- }
714
- };
715
- }
716
- if (prop === "textStream" || prop === "fullStream") {
717
- const originalStream = target[prop];
718
- return {
719
- [Symbol.asyncIterator]: async function* () {
720
- try {
721
- for await (const chunk of originalStream) {
722
- if (prop === "textStream") {
723
- fullText += chunk;
724
- }
725
- yield chunk;
726
- }
727
- } finally {
728
- const streamUsage = await target.usage.catch(() => null);
729
- if (streamUsage) {
730
- usage = streamUsage;
731
- }
732
- self.finalizeStream(
733
- span,
734
- config,
735
- provider,
736
- modelId,
737
- startTime,
738
- fullText,
739
- finishReason,
740
- usage,
741
- response
742
- );
743
- }
744
- }
745
- };
746
- }
747
- const value = target[prop];
748
- if (typeof value === "function") {
749
- return value.bind(target);
750
- }
751
- return value;
752
- }
753
- });
754
- return wrappedStream;
755
- }
756
- finalizeStream(span, config, provider, modelId, startTime, fullText, finishReason, usage, response) {
757
- if (response) {
758
- span.setAttributes({
759
- ...response.id && { [ATTR_GEN_AI_RESPONSE_ID]: response.id },
760
- ...response.model && { [ATTR_GEN_AI_RESPONSE_MODEL]: response.model }
761
- });
762
- }
763
- if (finishReason) {
764
- span.setAttribute(ATTR_GEN_AI_RESPONSE_FINISH_REASONS, [finishReason]);
765
- }
766
- if (fullText) {
767
- span.setAttributes(
768
- completionToAttributes(
769
- fullText,
770
- finishReason,
771
- config.captureMessageContent || false
772
- )
773
- );
774
- }
775
- if (usage) {
776
- span.setAttributes(tokenUsageToAttributes(usage));
777
- if (config.enableMetrics) {
778
- this.context.recordTokenMetrics(usage, provider.system, modelId);
779
- }
780
- }
781
- if (config.enableMetrics) {
782
- const duration = (performance.now() - startTime) / 1e3;
783
- this.context.recordDurationMetric(duration, provider.system, modelId, OPERATION_NAME_CHAT);
784
- }
785
- span.setStatus({ code: import_api6.SpanStatusCode.OK });
786
- span.end();
787
- }
788
- };
789
-
790
- // src/internal/instrumentation/vercel-ai/telemetry-recorder.ts
791
- var import_api7 = require("@opentelemetry/api");
792
- var import_api_logs = require("@opentelemetry/api-logs");
793
- var TelemetryRecorder = class {
794
- constructor(genaiClientOperationDuration, genaiClientTokenUsage, logger2) {
795
- this.genaiClientOperationDuration = genaiClientOperationDuration;
796
- this.genaiClientTokenUsage = genaiClientTokenUsage;
797
- this.logger = logger2;
798
- }
799
- /**
800
- * Record token usage metrics
801
- */
802
- recordTokenMetrics(usage, system, model) {
803
- if (!this.genaiClientTokenUsage) {
804
- return;
805
- }
806
- const commonAttrs = {
807
- [ATTR_GEN_AI_SYSTEM]: system,
808
- [ATTR_GEN_AI_REQUEST_MODEL]: model
809
- };
810
- const inputTokens = usage.inputTokens || usage.promptTokens;
811
- const outputTokens = usage.outputTokens || usage.completionTokens;
812
- if (inputTokens !== void 0) {
813
- this.genaiClientTokenUsage.record(inputTokens, {
814
- ...commonAttrs,
815
- [ATTR_GEN_AI_TOKEN_TYPE]: TOKEN_TYPE_INPUT
816
- });
817
- }
818
- if (outputTokens !== void 0) {
819
- this.genaiClientTokenUsage.record(outputTokens, {
820
- ...commonAttrs,
821
- [ATTR_GEN_AI_TOKEN_TYPE]: TOKEN_TYPE_OUTPUT
822
- });
823
- }
824
- }
825
- /**
826
- * Record operation duration metric
827
- */
828
- recordDurationMetric(duration, system, model, operation) {
829
- if (!this.genaiClientOperationDuration) {
830
- return;
831
- }
832
- this.genaiClientOperationDuration.record(duration, {
833
- [ATTR_GEN_AI_SYSTEM]: system,
834
- [ATTR_GEN_AI_REQUEST_MODEL]: model,
835
- [ATTR_GEN_AI_OPERATION_NAME]: operation
836
- });
837
- }
838
- /**
839
- * Emit message events
840
- */
841
- emitMessageEvents(messages, system, span) {
842
- if (!this.logger) {
843
- return;
844
- }
845
- const ctx = import_api7.trace.setSpan(import_api7.context.active(), span);
846
- for (const msg of messages) {
847
- let eventName;
848
- switch (msg.role) {
849
- case "system": {
850
- eventName = EVENT_GEN_AI_SYSTEM_MESSAGE;
851
- break;
852
- }
853
- case "user": {
854
- eventName = EVENT_GEN_AI_USER_MESSAGE;
855
- break;
856
- }
857
- case "assistant": {
858
- eventName = EVENT_GEN_AI_ASSISTANT_MESSAGE;
859
- break;
860
- }
861
- case "tool":
862
- case "function": {
863
- eventName = EVENT_GEN_AI_TOOL_MESSAGE;
864
- break;
865
- }
866
- default: {
867
- continue;
868
- }
869
- }
870
- this.logger.emit({
871
- timestamp: Date.now(),
872
- context: ctx,
873
- severityNumber: import_api_logs.SeverityNumber.INFO,
874
- attributes: {
875
- [ATTR_EVENT_NAME]: eventName,
876
- [ATTR_GEN_AI_SYSTEM]: system
877
- },
878
- body: {
879
- role: msg.role,
880
- content: typeof msg.content === "string" ? msg.content : JSON.stringify(msg.content),
881
- name: msg.name
882
- }
883
- });
884
- }
885
- }
886
- /**
887
- * Emit assistant message event
888
- */
889
- emitAssistantMessageEvent(text, system, span) {
890
- if (!this.logger) {
891
- return;
892
- }
893
- const ctx = import_api7.trace.setSpan(import_api7.context.active(), span);
894
- this.logger.emit({
895
- timestamp: Date.now(),
896
- context: ctx,
897
- severityNumber: import_api_logs.SeverityNumber.INFO,
898
- attributes: {
899
- [ATTR_EVENT_NAME]: EVENT_GEN_AI_ASSISTANT_MESSAGE,
900
- [ATTR_GEN_AI_SYSTEM]: system
901
- },
902
- body: {
903
- role: "assistant",
904
- content: text
905
- }
906
- });
907
- }
908
- };
909
-
910
- // src/internal/instrumentation/vercel-ai/instrumentation.ts
911
- var PACKAGE_NAME = "@brizz/vercel-ai-instrumentation";
912
- var PACKAGE_VERSION = "0.1.0";
913
- var VercelAIInstrumentation = class _VercelAIInstrumentation extends import_instrumentation.InstrumentationBase {
914
- _genaiClientOperationDuration;
915
- _genaiClientTokenUsage;
916
- _telemetryRecorder;
917
- _streamHandler;
918
- _patchers = /* @__PURE__ */ new Map();
919
- // Holds last patched namespace when available (reserved for future factory wrapping)
920
- _vercelAiNamespace = null;
921
- static _WRAPPED_SYMBOL = Symbol.for("brizz.vercel-ai.patched");
922
- constructor(config = {}) {
923
- super(PACKAGE_NAME, PACKAGE_VERSION, config);
924
- const cfg = this.getConfig();
925
- const envCC = getEnvBool("OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT");
926
- if (envCC !== void 0) {
927
- cfg.captureMessageContent = envCC;
928
- }
929
- this._initializeComponents();
930
- }
931
- setConfig(config = {}) {
932
- const {
933
- captureMessageContent = true,
934
- enableMetrics = true,
935
- emitEvents = true,
936
- ...validConfig
937
- } = config;
938
- const fullConfig = {
939
- ...validConfig,
940
- captureMessageContent,
941
- enableMetrics,
942
- emitEvents
943
- };
944
- super.setConfig(fullConfig);
945
- }
946
- _initializeComponents() {
947
- this._telemetryRecorder = new TelemetryRecorder(
948
- this._genaiClientOperationDuration,
949
- this._genaiClientTokenUsage,
950
- this.logger
951
- );
952
- this._streamHandler = new StreamHandler({
953
- recordTokenMetrics: this._telemetryRecorder.recordTokenMetrics.bind(this._telemetryRecorder),
954
- recordDurationMetric: this._telemetryRecorder.recordDurationMetric.bind(
955
- this._telemetryRecorder
956
- )
957
- });
958
- const patcherContext = {
959
- tracer: this.tracer,
960
- getConfig: this.getConfig.bind(this),
961
- recordTokenMetrics: this._telemetryRecorder.recordTokenMetrics.bind(this._telemetryRecorder),
962
- recordDurationMetric: this._telemetryRecorder.recordDurationMetric.bind(
963
- this._telemetryRecorder
964
- ),
965
- emitMessageEvents: this._telemetryRecorder.emitMessageEvents.bind(this._telemetryRecorder),
966
- emitAssistantMessageEvent: this._telemetryRecorder.emitAssistantMessageEvent.bind(
967
- this._telemetryRecorder
968
- )
969
- };
970
- this._patchers.set("generateText", new GenerateTextPatcher(patcherContext));
971
- this._patchers.set("streamText", new StreamTextPatcher(patcherContext, this._streamHandler));
972
- this._patchers.set("embed", new EmbeddingsPatcher(patcherContext));
973
- this._patchers.set("embedMany", new EmbeddingsPatcher(patcherContext));
974
- }
975
- init() {
976
- return [
977
- new import_instrumentation.InstrumentationNodeModuleDefinition(
978
- "ai",
979
- [">=4.0.0 <6"],
980
- (moduleExports) => {
981
- logger.info("Starting instrumentation of Vercel AI SDK module");
982
- this._vercelAiNamespace = moduleExports;
983
- const patched = this._patchModuleExports(moduleExports);
984
- return patched ?? moduleExports;
985
- },
986
- (moduleExports) => {
987
- logger.debug("Uninstrumenting @vercel/ai module");
988
- return moduleExports;
989
- }
990
- )
991
- ];
992
- }
993
- _updateMetricInstruments() {
994
- const config = this.getConfig();
995
- if (!config.enableMetrics) {
996
- return;
997
- }
998
- this._genaiClientOperationDuration = this.meter.createHistogram(
999
- METRIC_GEN_AI_CLIENT_OPERATION_DURATION,
1000
- {
1001
- description: "GenAI operation duration",
1002
- unit: "s",
1003
- advice: {
1004
- explicitBucketBoundaries: [
1005
- 0.01,
1006
- 0.02,
1007
- 0.04,
1008
- 0.08,
1009
- 0.16,
1010
- 0.32,
1011
- 0.64,
1012
- 1.28,
1013
- 2.56,
1014
- 5.12,
1015
- 10.24,
1016
- 20.48,
1017
- 40.96,
1018
- 81.92
1019
- ]
1020
- }
1021
- }
1022
- );
1023
- this._genaiClientTokenUsage = this.meter.createHistogram(METRIC_GEN_AI_CLIENT_TOKEN_USAGE, {
1024
- description: "Measures number of input and output tokens used",
1025
- unit: "{token}",
1026
- advice: {
1027
- explicitBucketBoundaries: [
1028
- 1,
1029
- 4,
1030
- 16,
1031
- 64,
1032
- 256,
1033
- 1024,
1034
- 4096,
1035
- 16384,
1036
- 65536,
1037
- 262144,
1038
- 1048576,
1039
- 4194304,
1040
- 16777216,
1041
- 67108864
1042
- ]
1043
- }
1044
- });
1045
- this._telemetryRecorder = new TelemetryRecorder(
1046
- this._genaiClientOperationDuration,
1047
- this._genaiClientTokenUsage,
1048
- this.logger
1049
- );
1050
- }
1051
- /**
1052
- * Patch known AI SDK functions in-place on the provided module exports object.
1053
- * This approach is compatible with both CJS and ESM module loaders.
1054
- */
1055
- _patchModuleExports(moduleExports) {
1056
- if (!moduleExports || typeof moduleExports !== "object") {
1057
- return null;
1058
- }
1059
- let inPlacePatched = true;
1060
- const wrapFunction = (name, isEmbedMany = false) => {
1061
- const current = moduleExports[name];
1062
- if (typeof current !== "function") {
1063
- return;
1064
- }
1065
- const currentFn = current;
1066
- if (currentFn[_VercelAIInstrumentation._WRAPPED_SYMBOL]) {
1067
- return;
1068
- }
1069
- const descriptor = Object.getOwnPropertyDescriptor(moduleExports, name);
1070
- if (descriptor && (!descriptor.writable || !descriptor.configurable) && !descriptor.set) {
1071
- inPlacePatched = false;
1072
- return;
1073
- }
1074
- const patcher = this._patchers.get(name);
1075
- if (!patcher) {
1076
- return;
1077
- }
1078
- const patched = isEmbedMany ? patcher.patch(currentFn, true) : patcher.patch(currentFn);
1079
- try {
1080
- Object.defineProperty(patched, _VercelAIInstrumentation._WRAPPED_SYMBOL, {
1081
- value: true,
1082
- enumerable: false,
1083
- configurable: false
1084
- });
1085
- } catch {
1086
- }
1087
- try {
1088
- moduleExports[name] = patched;
1089
- } catch {
1090
- inPlacePatched = false;
1091
- }
1092
- };
1093
- wrapFunction("generateText");
1094
- wrapFunction("streamText");
1095
- wrapFunction("embed");
1096
- wrapFunction("embedMany", true);
1097
- if (!inPlacePatched) {
1098
- const proxiedModule = new Proxy(moduleExports, {
1099
- get: (target, prop, receiver) => {
1100
- const originalValue = Reflect.get(target, prop, receiver);
1101
- if (typeof originalValue === "function" && typeof prop === "string" && this._patchers.has(prop)) {
1102
- const patcher = this._patchers.get(prop);
1103
- const isEmbedMany = prop === "embedMany";
1104
- const wrapped = isEmbedMany ? patcher.patch(originalValue, true) : patcher.patch(originalValue);
1105
- return wrapped;
1106
- }
1107
- return originalValue;
1108
- }
1109
- });
1110
- return proxiedModule;
1111
- }
1112
- return moduleExports;
1113
- }
1114
- /**
1115
- * Manual instrumentation hook for bundlers/Next.js. Applies in-place wrapping
1116
- * on the provided module namespace.
1117
- */
1118
- manuallyInstrument(module2) {
1119
- try {
1120
- const result = this._patchModuleExports(module2);
1121
- if (result !== null) {
1122
- logger.debug("Applied manual Vercel AI instrumentation");
1123
- this._vercelAiNamespace = result;
1124
- return result;
1125
- }
1126
- logger.warn("Manual Vercel AI instrumentation received invalid module");
1127
- return module2;
1128
- } catch (error) {
1129
- logger.error(`Failed manual Vercel AI instrumentation: ${String(error)}`);
1130
- return this._vercelAiNamespace || module2;
1131
- }
1132
- }
1133
- /**
1134
- * Wrap a created provider/client instance (factory return) when possible.
1135
- * Call this from wrappers that construct provider clients (e.g., OpenAI SDK).
1136
- */
1137
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
1138
- wrapFactoryReturn(instance) {
1139
- return instance;
1140
- }
1141
- };
1142
-
1143
212
  // src/internal/instrumentation/auto-init.ts
1144
213
  var autoInstrumentationsLoaded = false;
1145
214
  var exceptionLogger = (error) => {
@@ -1148,7 +217,7 @@ var exceptionLogger = (error) => {
1148
217
  function loadNodeAutoInstrumentations() {
1149
218
  try {
1150
219
  const nodeInstrumentations = (0, import_auto_instrumentations_node.getNodeAutoInstrumentations)();
1151
- (0, import_instrumentation3.registerInstrumentations)({ instrumentations: nodeInstrumentations });
220
+ (0, import_instrumentation.registerInstrumentations)({ instrumentations: nodeInstrumentations });
1152
221
  return nodeInstrumentations;
1153
222
  } catch (error) {
1154
223
  logger.error(`Failed to load Node.js auto-instrumentations: ${String(error)}`);
@@ -1158,8 +227,6 @@ function loadNodeAutoInstrumentations() {
1158
227
  function loadGenAIInstrumentations() {
1159
228
  const instrumentations = [];
1160
229
  const genAIInstrumentationClasses = [
1161
- { class: VercelAIInstrumentation, name: "Vercel AI" },
1162
- // Load first to avoid conflicts
1163
230
  { class: import_instrumentation_openai.OpenAIInstrumentation, name: "OpenAI" },
1164
231
  { class: import_instrumentation_anthropic.AnthropicInstrumentation, name: "Anthropic" },
1165
232
  { class: import_instrumentation_cohere.CohereInstrumentation, name: "Cohere" },
@@ -1182,7 +249,7 @@ function loadGenAIInstrumentations() {
1182
249
  }
1183
250
  }
1184
251
  try {
1185
- (0, import_instrumentation3.registerInstrumentations)({ instrumentations });
252
+ (0, import_instrumentation.registerInstrumentations)({ instrumentations });
1186
253
  logger.info(`Auto-registered ${instrumentations.length} GenAI instrumentations`);
1187
254
  } catch (error) {
1188
255
  logger.error(`Failed to register GenAI instrumentations: ${String(error)}`);
@@ -1378,8 +445,7 @@ var InstrumentationRegistry = class _InstrumentationRegistry {
1378
445
  },
1379
446
  { class: import_instrumentation_chromadb2.ChromaDBInstrumentation, name: "ChromaDB", module: this.manualModules?.chromadb },
1380
447
  { class: import_instrumentation_qdrant2.QdrantInstrumentation, name: "Qdrant", module: this.manualModules?.qdrant },
1381
- { class: import_instrumentation_together2.TogetherInstrumentation, name: "Together", module: this.manualModules?.together },
1382
- { class: VercelAIInstrumentation, name: "Vercel AI", module: this.manualModules?.vercelAI }
448
+ { class: import_instrumentation_together2.TogetherInstrumentation, name: "Together", module: this.manualModules?.together }
1383
449
  ];
1384
450
  for (const config of instrumentationConfigs) {
1385
451
  if (config.module) {
@@ -1398,13 +464,13 @@ var InstrumentationRegistry = class _InstrumentationRegistry {
1398
464
  };
1399
465
 
1400
466
  // src/internal/log/logging.ts
1401
- var import_api_logs2 = require("@opentelemetry/api-logs");
467
+ var import_api_logs = require("@opentelemetry/api-logs");
1402
468
  var import_exporter_logs_otlp_http = require("@opentelemetry/exporter-logs-otlp-http");
1403
469
  var import_resources = require("@opentelemetry/resources");
1404
470
  var import_sdk_logs2 = require("@opentelemetry/sdk-logs");
1405
471
 
1406
472
  // src/internal/log/processors/log-processor.ts
1407
- var import_api9 = require("@opentelemetry/api");
473
+ var import_api3 = require("@opentelemetry/api");
1408
474
  var import_sdk_logs = require("@opentelemetry/sdk-logs");
1409
475
 
1410
476
  // src/internal/masking/patterns.ts
@@ -2030,11 +1096,11 @@ function maskAttributes(attributes, rules, outputOriginalValue = false) {
2030
1096
  }
2031
1097
 
2032
1098
  // src/internal/semantic-conventions.ts
2033
- var import_api8 = require("@opentelemetry/api");
1099
+ var import_api2 = require("@opentelemetry/api");
2034
1100
  var BRIZZ = "brizz";
2035
1101
  var PROPERTIES = "properties";
2036
1102
  var SESSION_ID = "session.id";
2037
- var PROPERTIES_CONTEXT_KEY = (0, import_api8.createContextKey)(PROPERTIES);
1103
+ var PROPERTIES_CONTEXT_KEY = (0, import_api2.createContextKey)(PROPERTIES);
2038
1104
 
2039
1105
  // src/internal/log/processors/log-processor.ts
2040
1106
  var DEFAULT_LOG_MASKING_RULES = [
@@ -2055,7 +1121,7 @@ var BrizzSimpleLogRecordProcessor = class extends import_sdk_logs.SimpleLogRecor
2055
1121
  if (maskingConfig) {
2056
1122
  maskLog(logRecord, maskingConfig);
2057
1123
  }
2058
- const associationProperties = import_api9.context.active().getValue(PROPERTIES_CONTEXT_KEY);
1124
+ const associationProperties = import_api3.context.active().getValue(PROPERTIES_CONTEXT_KEY);
2059
1125
  if (associationProperties) {
2060
1126
  for (const [key, value] of Object.entries(associationProperties)) {
2061
1127
  logRecord.setAttribute(`${BRIZZ}.${key}`, value);
@@ -2075,7 +1141,7 @@ var BrizzBatchLogRecordProcessor = class extends import_sdk_logs.BatchLogRecordP
2075
1141
  if (maskingConfig) {
2076
1142
  maskLog(logRecord, maskingConfig);
2077
1143
  }
2078
- const associationProperties = import_api9.context.active().getValue(PROPERTIES_CONTEXT_KEY);
1144
+ const associationProperties = import_api3.context.active().getValue(PROPERTIES_CONTEXT_KEY);
2079
1145
  if (associationProperties) {
2080
1146
  for (const [key, value] of Object.entries(associationProperties)) {
2081
1147
  logRecord.setAttribute(`${BRIZZ}.${key}`, value);
@@ -2212,7 +1278,7 @@ var LoggingModule = class _LoggingModule {
2212
1278
  /**
2213
1279
  * Emit a custom event to the telemetry pipeline
2214
1280
  */
2215
- emitEvent(name, attributes, body, severityNumber = import_api_logs2.SeverityNumber.INFO) {
1281
+ emitEvent(name, attributes, body, severityNumber = import_api_logs.SeverityNumber.INFO) {
2216
1282
  logger.debug("Attempting to emit event", {
2217
1283
  name,
2218
1284
  hasAttributes: !!attributes,
@@ -2280,7 +1346,7 @@ var LoggingModule = class _LoggingModule {
2280
1346
  logger.debug("Logging module shutdown completed");
2281
1347
  }
2282
1348
  };
2283
- function emitEvent(name, attributes, body, severityNumber = import_api_logs2.SeverityNumber.INFO) {
1349
+ function emitEvent(name, attributes, body, severityNumber = import_api_logs.SeverityNumber.INFO) {
2284
1350
  return LoggingModule.getInstance().emitEvent(name, attributes, body, severityNumber);
2285
1351
  }
2286
1352
 
@@ -2390,8 +1456,122 @@ function getMetricsReader() {
2390
1456
  var import_exporter_trace_otlp_http = require("@opentelemetry/exporter-trace-otlp-http");
2391
1457
 
2392
1458
  // src/internal/trace/processors/span-processor.ts
2393
- var import_api10 = require("@opentelemetry/api");
1459
+ var import_api4 = require("@opentelemetry/api");
2394
1460
  var import_sdk_trace_base = require("@opentelemetry/sdk-trace-base");
1461
+
1462
+ // src/internal/trace/transformations/vercel-ai.ts
1463
+ var import_ai_semantic_conventions = require("@traceloop/ai-semantic-conventions");
1464
+ var AI_GENERATE_TEXT_DO_GENERATE = "ai.generateText.doGenerate";
1465
+ var AI_STREAM_TEXT_DO_STREAM = "ai.streamText.doStream";
1466
+ var HANDLED_SPAN_NAMES = {
1467
+ [AI_GENERATE_TEXT_DO_GENERATE]: "gen_ai.chat",
1468
+ [AI_STREAM_TEXT_DO_STREAM]: "gen_ai.chat",
1469
+ "ai.streamText": "ai.streamText",
1470
+ "ai.toolCall": (span) => {
1471
+ const toolName = span.attributes["ai.toolCall.name"];
1472
+ return `${toolName}.tool`;
1473
+ }
1474
+ };
1475
+ var AI_RESPONSE_TEXT = "ai.response.text";
1476
+ var AI_PROMPT_MESSAGES = "ai.prompt.messages";
1477
+ var AI_USAGE_PROMPT_TOKENS = "ai.usage.promptTokens";
1478
+ var AI_USAGE_COMPLETION_TOKENS = "ai.usage.completionTokens";
1479
+ var AI_MODEL_PROVIDER = "ai.model.provider";
1480
+ var transformAiSdkSpanName = (span) => {
1481
+ if (span.name in HANDLED_SPAN_NAMES) {
1482
+ if (typeof HANDLED_SPAN_NAMES[span.name] === "function") {
1483
+ span.name = HANDLED_SPAN_NAMES[span.name](span);
1484
+ } else {
1485
+ span.name = HANDLED_SPAN_NAMES[span.name];
1486
+ }
1487
+ }
1488
+ };
1489
+ var transformResponseText = (attributes) => {
1490
+ if (AI_RESPONSE_TEXT in attributes) {
1491
+ attributes[`${import_ai_semantic_conventions.SpanAttributes.LLM_COMPLETIONS}.0.content`] = attributes[AI_RESPONSE_TEXT];
1492
+ attributes[`${import_ai_semantic_conventions.SpanAttributes.LLM_COMPLETIONS}.0.role`] = "assistant";
1493
+ delete attributes[AI_RESPONSE_TEXT];
1494
+ }
1495
+ };
1496
+ var transformPromptMessages = (attributes) => {
1497
+ if (AI_PROMPT_MESSAGES in attributes) {
1498
+ try {
1499
+ const messages = JSON.parse(attributes[AI_PROMPT_MESSAGES]);
1500
+ messages.forEach((msg, index) => {
1501
+ logger.debug("Transforming prompt message", { msg, type: typeof msg.content });
1502
+ if (typeof msg.content === "string") {
1503
+ attributes[`${import_ai_semantic_conventions.SpanAttributes.LLM_PROMPTS}.${index}.content`] = msg.content;
1504
+ } else {
1505
+ if (Array.isArray(msg.content) && msg.content.length > 0) {
1506
+ const lastContent = msg.content[msg.content.length - 1];
1507
+ if (lastContent.text) {
1508
+ attributes[`${import_ai_semantic_conventions.SpanAttributes.LLM_PROMPTS}.${index}.content`] = lastContent.text;
1509
+ }
1510
+ } else {
1511
+ attributes[`${import_ai_semantic_conventions.SpanAttributes.LLM_PROMPTS}.${index}.content`] = JSON.stringify(
1512
+ msg.content
1513
+ );
1514
+ }
1515
+ }
1516
+ attributes[`${import_ai_semantic_conventions.SpanAttributes.LLM_PROMPTS}.${index}.role`] = msg.role;
1517
+ });
1518
+ delete attributes[AI_PROMPT_MESSAGES];
1519
+ } catch {
1520
+ }
1521
+ }
1522
+ };
1523
+ var transformPromptTokens = (attributes) => {
1524
+ if (AI_USAGE_PROMPT_TOKENS in attributes) {
1525
+ attributes[`${import_ai_semantic_conventions.SpanAttributes.LLM_USAGE_PROMPT_TOKENS}`] = attributes[AI_USAGE_PROMPT_TOKENS];
1526
+ delete attributes[AI_USAGE_PROMPT_TOKENS];
1527
+ }
1528
+ };
1529
+ var transformCompletionTokens = (attributes) => {
1530
+ if (AI_USAGE_COMPLETION_TOKENS in attributes) {
1531
+ attributes[`${import_ai_semantic_conventions.SpanAttributes.LLM_USAGE_COMPLETION_TOKENS}`] = attributes[AI_USAGE_COMPLETION_TOKENS];
1532
+ delete attributes[AI_USAGE_COMPLETION_TOKENS];
1533
+ }
1534
+ };
1535
+ var calculateTotalTokens = (attributes) => {
1536
+ const promptTokens = attributes[`${import_ai_semantic_conventions.SpanAttributes.LLM_USAGE_PROMPT_TOKENS}`];
1537
+ const completionTokens = attributes[`${import_ai_semantic_conventions.SpanAttributes.LLM_USAGE_COMPLETION_TOKENS}`];
1538
+ if (promptTokens && completionTokens) {
1539
+ attributes[`${import_ai_semantic_conventions.SpanAttributes.LLM_USAGE_TOTAL_TOKENS}`] = Number(promptTokens) + Number(completionTokens);
1540
+ }
1541
+ };
1542
+ var transformVendor = (attributes) => {
1543
+ if (AI_MODEL_PROVIDER in attributes) {
1544
+ const vendor = attributes[AI_MODEL_PROVIDER];
1545
+ if (vendor && vendor.startsWith("openai")) {
1546
+ attributes[import_ai_semantic_conventions.SpanAttributes.LLM_SYSTEM] = "OpenAI";
1547
+ } else {
1548
+ attributes[import_ai_semantic_conventions.SpanAttributes.LLM_SYSTEM] = vendor;
1549
+ }
1550
+ delete attributes[AI_MODEL_PROVIDER];
1551
+ }
1552
+ };
1553
+ var transformAiSdkAttributes = (attributes) => {
1554
+ transformResponseText(attributes);
1555
+ transformPromptMessages(attributes);
1556
+ transformPromptTokens(attributes);
1557
+ transformCompletionTokens(attributes);
1558
+ calculateTotalTokens(attributes);
1559
+ transformVendor(attributes);
1560
+ };
1561
+ var shouldHandleSpan = (span) => {
1562
+ return span.name in HANDLED_SPAN_NAMES;
1563
+ };
1564
+ var transformAiSdkSpan = (span) => {
1565
+ logger.debug("Transforming AI SDK span", { spanName: span.name });
1566
+ if (!shouldHandleSpan(span)) {
1567
+ logger.debug("Skipping span transformation", { spanName: span.name });
1568
+ return;
1569
+ }
1570
+ transformAiSdkSpanName(span);
1571
+ transformAiSdkAttributes(span.attributes);
1572
+ };
1573
+
1574
+ // src/internal/trace/processors/span-processor.ts
2395
1575
  var DEFAULT_MASKING_RULES = [
2396
1576
  {
2397
1577
  mode: "partial",
@@ -2402,16 +1582,6 @@ var DEFAULT_MASKING_RULES = [
2402
1582
  mode: "partial",
2403
1583
  attributePattern: "gen_ai.completion",
2404
1584
  patterns: DEFAULT_PII_PATTERNS
2405
- },
2406
- {
2407
- mode: "partial",
2408
- attributePattern: "traceloop.entity.input",
2409
- patterns: DEFAULT_PII_PATTERNS
2410
- },
2411
- {
2412
- mode: "partial",
2413
- attributePattern: "traceloop.entity.output",
2414
- patterns: DEFAULT_PII_PATTERNS
2415
1585
  }
2416
1586
  ];
2417
1587
  var BrizzSimpleSpanProcessor = class extends import_sdk_trace_base.SimpleSpanProcessor {
@@ -2436,7 +1606,7 @@ var BrizzSimpleSpanProcessor = class extends import_sdk_trace_base.SimpleSpanPro
2436
1606
  if (maskingConfig) {
2437
1607
  maskSpan(span, maskingConfig);
2438
1608
  }
2439
- const associationProperties = import_api10.context.active().getValue(PROPERTIES_CONTEXT_KEY);
1609
+ const associationProperties = import_api4.context.active().getValue(PROPERTIES_CONTEXT_KEY);
2440
1610
  if (associationProperties) {
2441
1611
  for (const [key, value] of Object.entries(associationProperties)) {
2442
1612
  span.setAttribute(`${BRIZZ}.${key}`, value);
@@ -2444,6 +1614,10 @@ var BrizzSimpleSpanProcessor = class extends import_sdk_trace_base.SimpleSpanPro
2444
1614
  }
2445
1615
  super.onStart(span, parentContext);
2446
1616
  }
1617
+ onEnd(span) {
1618
+ transformAiSdkSpan(span);
1619
+ super.onEnd(span);
1620
+ }
2447
1621
  };
2448
1622
  var BrizzBatchSpanProcessor = class extends import_sdk_trace_base.BatchSpanProcessor {
2449
1623
  config;
@@ -2456,7 +1630,7 @@ var BrizzBatchSpanProcessor = class extends import_sdk_trace_base.BatchSpanProce
2456
1630
  if (maskingConfig) {
2457
1631
  maskSpan(span, maskingConfig);
2458
1632
  }
2459
- const associationProperties = import_api10.context.active().getValue(PROPERTIES_CONTEXT_KEY);
1633
+ const associationProperties = import_api4.context.active().getValue(PROPERTIES_CONTEXT_KEY);
2460
1634
  if (associationProperties) {
2461
1635
  for (const [key, value] of Object.entries(associationProperties)) {
2462
1636
  span.setAttribute(`${BRIZZ}.${key}`, value);
@@ -2464,6 +1638,10 @@ var BrizzBatchSpanProcessor = class extends import_sdk_trace_base.BatchSpanProce
2464
1638
  }
2465
1639
  super.onStart(span, parentContext);
2466
1640
  }
1641
+ onEnd(span) {
1642
+ transformAiSdkSpan(span);
1643
+ super.onEnd(span);
1644
+ }
2467
1645
  };
2468
1646
  function maskSpan(span, config) {
2469
1647
  if (!span.attributes || Object.keys(span.attributes).length === 0) {
@@ -2551,8 +1729,9 @@ var TracingModule = class _TracingModule {
2551
1729
  disableBatch: config.disableBatch,
2552
1730
  hasMasking: !!config.masking?.spanMasking
2553
1731
  });
2554
- this.spanProcessor = config.disableBatch ? new BrizzSimpleSpanProcessor(this.spanExporter, config) : new BrizzBatchSpanProcessor(this.spanExporter, config);
1732
+ const spanProcessor = config.disableBatch ? new BrizzSimpleSpanProcessor(this.spanExporter, config) : new BrizzBatchSpanProcessor(this.spanExporter, config);
2555
1733
  logger.debug("Span processor initialized successfully");
1734
+ this.spanProcessor = spanProcessor;
2556
1735
  }
2557
1736
  /**
2558
1737
  * Get the span exporter
@@ -2594,13 +1773,13 @@ function getSpanProcessor() {
2594
1773
  }
2595
1774
 
2596
1775
  // src/internal/trace/session.ts
2597
- var import_api11 = require("@opentelemetry/api");
1776
+ var import_api5 = require("@opentelemetry/api");
2598
1777
  function withProperties(properties, fn, thisArg, ...args) {
2599
1778
  if (Object.keys(properties).length === 0) {
2600
1779
  return fn.apply(thisArg, args);
2601
1780
  }
2602
- const newContext = import_api11.context.active().setValue(PROPERTIES_CONTEXT_KEY, properties);
2603
- return import_api11.context.with(newContext, fn, thisArg, ...args);
1781
+ const newContext = import_api5.context.active().setValue(PROPERTIES_CONTEXT_KEY, properties);
1782
+ return import_api5.context.with(newContext, fn, thisArg, ...args);
2604
1783
  }
2605
1784
  function WithSessionId(sessionId, fn, thisArg, ...args) {
2606
1785
  return withProperties({ [SESSION_ID]: sessionId }, fn, thisArg, ...args);
@@ -2794,7 +1973,7 @@ var _Brizz = class __Brizz {
2794
1973
  var Brizz = new _Brizz();
2795
1974
 
2796
1975
  // src/index.ts
2797
- var import_api_logs3 = require("@opentelemetry/api-logs");
1976
+ var import_api_logs2 = require("@opentelemetry/api-logs");
2798
1977
 
2799
1978
  // src/node/runtime.ts
2800
1979
  function detectRuntime() {
@@ -2846,7 +2025,6 @@ var init_exports = {};
2846
2025
  DEFAULT_PII_PATTERNS,
2847
2026
  LogLevel,
2848
2027
  SeverityNumber,
2849
- VercelAIInstrumentation,
2850
2028
  WithSessionId,
2851
2029
  detectRuntime,
2852
2030
  emitEvent,