@brizz/sdk 0.1.2 → 0.1.3-rc.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/preload.js CHANGED
@@ -234,941 +234,6 @@ import { PineconeInstrumentation } from "@traceloop/instrumentation-pinecone";
234
234
  import { QdrantInstrumentation } from "@traceloop/instrumentation-qdrant";
235
235
  import { TogetherInstrumentation } from "@traceloop/instrumentation-together";
236
236
  import { VertexAIInstrumentation } from "@traceloop/instrumentation-vertexai";
237
-
238
- // src/internal/instrumentation/vercel-ai/instrumentation.ts
239
- import {
240
- InstrumentationBase,
241
- InstrumentationNodeModuleDefinition
242
- } from "@opentelemetry/instrumentation";
243
-
244
- // src/internal/instrumentation/vercel-ai/patchers/base-patcher.ts
245
- import { SpanKind, SpanStatusCode } from "@opentelemetry/api";
246
-
247
- // src/internal/instrumentation/vercel-ai/semconv.ts
248
- var ATTR_GEN_AI_SYSTEM = "gen_ai.system";
249
- var ATTR_GEN_AI_OPERATION_NAME = "gen_ai.operation.name";
250
- var ATTR_GEN_AI_REQUEST_MODEL = "gen_ai.request.model";
251
- var ATTR_GEN_AI_REQUEST_MAX_TOKENS = "gen_ai.request.max_tokens";
252
- var ATTR_GEN_AI_REQUEST_TEMPERATURE = "gen_ai.request.temperature";
253
- var ATTR_GEN_AI_REQUEST_TOP_P = "gen_ai.request.top_p";
254
- var ATTR_GEN_AI_REQUEST_TOP_K = "gen_ai.request.top_k";
255
- var ATTR_GEN_AI_REQUEST_STOP_SEQUENCES = "gen_ai.request.stop_sequences";
256
- var ATTR_GEN_AI_REQUEST_FREQUENCY_PENALTY = "gen_ai.request.frequency_penalty";
257
- var ATTR_GEN_AI_REQUEST_PRESENCE_PENALTY = "gen_ai.request.presence_penalty";
258
- var ATTR_GEN_AI_RESPONSE_ID = "gen_ai.response.id";
259
- var ATTR_GEN_AI_RESPONSE_MODEL = "gen_ai.response.model";
260
- var ATTR_GEN_AI_RESPONSE_FINISH_REASONS = "gen_ai.response.finish_reasons";
261
- var ATTR_GEN_AI_TOKEN_TYPE = "gen_ai.token.type";
262
- var ATTR_GEN_AI_PROMPT = "gen_ai.prompt";
263
- var ATTR_GEN_AI_COMPLETION = "gen_ai.completion";
264
- var ATTR_GEN_AI_OPENAI_API_BASE = "gen_ai.openai.api_base";
265
- var ATTR_EVENT_NAME = "event.name";
266
- var EVENT_GEN_AI_USER_MESSAGE = "gen_ai.user.message";
267
- var EVENT_GEN_AI_ASSISTANT_MESSAGE = "gen_ai.assistant.message";
268
- var EVENT_GEN_AI_SYSTEM_MESSAGE = "gen_ai.system.message";
269
- var EVENT_GEN_AI_TOOL_MESSAGE = "gen_ai.tool.message";
270
- var METRIC_GEN_AI_CLIENT_OPERATION_DURATION = "gen_ai.client.operation.duration";
271
- var METRIC_GEN_AI_CLIENT_TOKEN_USAGE = "gen_ai.client.token.usage";
272
- var OPERATION_NAME_CHAT = "chat";
273
- var OPERATION_NAME_EMBEDDINGS = "embeddings";
274
- var TOKEN_TYPE_INPUT = "input";
275
- var TOKEN_TYPE_OUTPUT = "output";
276
- var PROVIDER_OPENAI = "openai";
277
- var PROVIDER_ANTHROPIC = "anthropic";
278
- var PROVIDER_GOOGLE = "google";
279
- var PROVIDER_AMAZON = "amazon";
280
- var PROVIDER_AZURE = "azure";
281
- var PROVIDER_VERCEL = "vercel";
282
- var PROVIDER_UNKNOWN = "unknown";
283
- var SPAN_NAME_GEN_AI_CHAT = "gen_ai.chat";
284
- var SPAN_NAME_GEN_AI_EMBEDDINGS = "gen_ai.embeddings";
285
-
286
- // src/internal/instrumentation/vercel-ai/utils.ts
287
- function detectProvider(model) {
288
- if (typeof model === "object" && model !== null) {
289
- const modelObj = model;
290
- if (modelObj.provider) {
291
- return {
292
- system: normalizeProviderName(modelObj.provider),
293
- apiBase: extractApiBase(modelObj)
294
- };
295
- }
296
- if (modelObj.modelId) {
297
- return detectProviderFromModelId(modelObj.modelId);
298
- }
299
- }
300
- if (typeof model === "string") {
301
- return detectProviderFromModelId(model);
302
- }
303
- return { system: PROVIDER_UNKNOWN };
304
- }
305
- function detectProviderFromModelId(modelId) {
306
- const lowerModel = modelId.toLowerCase();
307
- if (lowerModel.startsWith("gpt-") || lowerModel.startsWith("text-davinci-") || lowerModel.startsWith("text-embedding-") || lowerModel.startsWith("dall-e") || lowerModel.startsWith("whisper-") || lowerModel.startsWith("tts-")) {
308
- return { system: PROVIDER_OPENAI };
309
- }
310
- if (lowerModel.startsWith("claude-")) {
311
- return { system: PROVIDER_ANTHROPIC };
312
- }
313
- if (lowerModel.startsWith("gemini-") || lowerModel.startsWith("palm-") || lowerModel.includes("bison") || lowerModel.includes("gecko")) {
314
- return { system: PROVIDER_GOOGLE };
315
- }
316
- if (lowerModel.startsWith("amazon.") || lowerModel.startsWith("anthropic.claude-") || lowerModel.startsWith("ai21.") || lowerModel.startsWith("cohere.") || lowerModel.startsWith("meta.llama")) {
317
- return { system: PROVIDER_AMAZON };
318
- }
319
- if (lowerModel.includes("azure") || lowerModel.includes(".openai.azure.com")) {
320
- return { system: PROVIDER_AZURE };
321
- }
322
- const parts = modelId.split(/[-._/]/);
323
- if (parts.length > 0 && parts[0]) {
324
- return { system: normalizeProviderName(parts[0]) };
325
- }
326
- return { system: PROVIDER_UNKNOWN };
327
- }
328
- function normalizeProviderName(provider) {
329
- const normalized = provider.toLowerCase().trim();
330
- switch (normalized) {
331
- case "openai":
332
- case "open-ai":
333
- case "open_ai": {
334
- return PROVIDER_OPENAI;
335
- }
336
- case "anthropic":
337
- case "claude": {
338
- return PROVIDER_ANTHROPIC;
339
- }
340
- case "google":
341
- case "vertex":
342
- case "vertexai":
343
- case "vertex-ai":
344
- case "gemini": {
345
- return PROVIDER_GOOGLE;
346
- }
347
- case "amazon":
348
- case "aws":
349
- case "bedrock":
350
- case "amazon-bedrock": {
351
- return PROVIDER_AMAZON;
352
- }
353
- case "azure":
354
- case "azure-openai":
355
- case "microsoft": {
356
- return PROVIDER_AZURE;
357
- }
358
- case "vercel":
359
- case "vercel-ai": {
360
- return PROVIDER_VERCEL;
361
- }
362
- default: {
363
- return normalized;
364
- }
365
- }
366
- }
367
- function extractApiBase(model) {
368
- if (typeof model === "object" && model !== null) {
369
- const anyModel = model;
370
- return anyModel.apiBase || anyModel.baseURL || anyModel.endpoint || void 0;
371
- }
372
- return void 0;
373
- }
374
- function extractModelId(model) {
375
- if (typeof model === "string") {
376
- return model;
377
- }
378
- if (typeof model === "object" && model !== null) {
379
- return model.modelId || "unknown";
380
- }
381
- return "unknown";
382
- }
383
- function messagesToAttributes(messages, prefix, captureContent) {
384
- const attributes = {};
385
- for (const [index, msg] of messages.entries()) {
386
- const baseKey = `${prefix}.${index}`;
387
- attributes[`${baseKey}.role`] = msg.role;
388
- if (captureContent && msg.content) {
389
- if (typeof msg.content === "string") {
390
- attributes[`${baseKey}.content`] = msg.content;
391
- } else if (Array.isArray(msg.content)) {
392
- const textParts = msg.content.filter((part) => part.type === "text" && part.text).map((part) => part.text).join(" ");
393
- if (textParts) {
394
- attributes[`${baseKey}.content`] = textParts;
395
- }
396
- }
397
- }
398
- if (msg.toolInvocations && msg.toolInvocations.length > 0) {
399
- attributes[`${baseKey}.tool_calls`] = msg.toolInvocations.length;
400
- }
401
- }
402
- return attributes;
403
- }
404
- function promptToAttributes(prompt, captureContent) {
405
- const attributes = {};
406
- attributes[`${ATTR_GEN_AI_PROMPT}.0.role`] = "user";
407
- if (captureContent) {
408
- attributes[`${ATTR_GEN_AI_PROMPT}.0.content`] = prompt;
409
- }
410
- return attributes;
411
- }
412
- function completionToAttributes(text, finishReason, captureContent) {
413
- const attributes = {};
414
- attributes[`${ATTR_GEN_AI_COMPLETION}.0.role`] = "assistant";
415
- if (captureContent) {
416
- attributes[`${ATTR_GEN_AI_COMPLETION}.0.content`] = text;
417
- }
418
- if (finishReason) {
419
- attributes[`${ATTR_GEN_AI_COMPLETION}.0.finish_reason`] = finishReason;
420
- }
421
- return attributes;
422
- }
423
- function tokenUsageToAttributes(usage) {
424
- if (!usage) {
425
- return {};
426
- }
427
- const attributes = {};
428
- if (usage.inputTokens !== void 0) {
429
- attributes["gen_ai.usage.prompt_tokens"] = usage.inputTokens;
430
- attributes["gen_ai.usage.input_tokens"] = usage.inputTokens;
431
- attributes["llm.usage.prompt_tokens"] = usage.inputTokens;
432
- } else if (usage.promptTokens !== void 0) {
433
- attributes["gen_ai.usage.prompt_tokens"] = usage.promptTokens;
434
- attributes["gen_ai.usage.input_tokens"] = usage.promptTokens;
435
- attributes["llm.usage.prompt_tokens"] = usage.promptTokens;
436
- }
437
- if (usage.outputTokens !== void 0) {
438
- attributes["gen_ai.usage.completion_tokens"] = usage.outputTokens;
439
- attributes["gen_ai.usage.output_tokens"] = usage.outputTokens;
440
- attributes["llm.usage.completion_tokens"] = usage.outputTokens;
441
- } else if (usage.completionTokens !== void 0) {
442
- attributes["gen_ai.usage.completion_tokens"] = usage.completionTokens;
443
- attributes["gen_ai.usage.output_tokens"] = usage.completionTokens;
444
- attributes["llm.usage.completion_tokens"] = usage.completionTokens;
445
- }
446
- if (usage.totalTokens === void 0) {
447
- const inputTokens = usage.inputTokens || usage.promptTokens;
448
- const outputTokens = usage.outputTokens || usage.completionTokens;
449
- if (inputTokens !== void 0 && outputTokens !== void 0) {
450
- const totalTokens = inputTokens + outputTokens;
451
- attributes["gen_ai.usage.total_tokens"] = totalTokens;
452
- attributes["llm.usage.total_tokens"] = totalTokens;
453
- }
454
- } else {
455
- attributes["gen_ai.usage.total_tokens"] = usage.totalTokens;
456
- attributes["llm.usage.total_tokens"] = usage.totalTokens;
457
- }
458
- return attributes;
459
- }
460
- function shouldRecordError(error) {
461
- if (error instanceof Error) {
462
- const message = error.message.toLowerCase();
463
- if (message.includes("abort") || message.includes("cancel")) {
464
- return false;
465
- }
466
- }
467
- return true;
468
- }
469
- function getEnvBool(name) {
470
- const value = process.env[name];
471
- if (value === void 0) {
472
- return void 0;
473
- }
474
- return value.toLowerCase() === "true" || value === "1";
475
- }
476
-
477
- // src/internal/instrumentation/vercel-ai/patchers/base-patcher.ts
478
- var BasePatcher = class {
479
- constructor(context8) {
480
- this.context = context8;
481
- }
482
- createSpan(spanName, params, operationName, additionalAttributes) {
483
- const provider = detectProvider(params.model);
484
- const modelId = extractModelId(params.model);
485
- const span = this.context.tracer.startSpan(spanName, {
486
- kind: SpanKind.CLIENT,
487
- attributes: {
488
- [ATTR_GEN_AI_SYSTEM]: provider.system,
489
- [ATTR_GEN_AI_OPERATION_NAME]: operationName,
490
- [ATTR_GEN_AI_REQUEST_MODEL]: modelId,
491
- ...params.maxTokens && { [ATTR_GEN_AI_REQUEST_MAX_TOKENS]: params.maxTokens },
492
- ...params.temperature !== void 0 && {
493
- [ATTR_GEN_AI_REQUEST_TEMPERATURE]: params.temperature
494
- },
495
- ...params.topP !== void 0 && { [ATTR_GEN_AI_REQUEST_TOP_P]: params.topP },
496
- ...params.topK !== void 0 && { [ATTR_GEN_AI_REQUEST_TOP_K]: params.topK },
497
- ...params.frequencyPenalty !== void 0 && {
498
- [ATTR_GEN_AI_REQUEST_FREQUENCY_PENALTY]: params.frequencyPenalty
499
- },
500
- ...params.presencePenalty !== void 0 && {
501
- [ATTR_GEN_AI_REQUEST_PRESENCE_PENALTY]: params.presencePenalty
502
- },
503
- ...params.stopSequences && {
504
- [ATTR_GEN_AI_REQUEST_STOP_SEQUENCES]: params.stopSequences
505
- },
506
- ...provider.apiBase && { [ATTR_GEN_AI_OPENAI_API_BASE]: provider.apiBase },
507
- ...additionalAttributes
508
- }
509
- });
510
- return { span, provider, modelId };
511
- }
512
- handleError(error, span) {
513
- if (shouldRecordError(error)) {
514
- span.recordException(error);
515
- span.setStatus({ code: SpanStatusCode.ERROR, message: error.message });
516
- }
517
- }
518
- finalizeDuration(span, startTime, config, provider, modelId, operationName) {
519
- if (config.enableMetrics) {
520
- const duration = (globalThis.performance.now() - startTime) / 1e3;
521
- this.context.recordDurationMetric(duration, provider.system, modelId, operationName);
522
- }
523
- span.end();
524
- }
525
- };
526
-
527
- // src/internal/instrumentation/vercel-ai/patchers/generate-text-patcher.ts
528
- import { context, SpanStatusCode as SpanStatusCode2, trace } from "@opentelemetry/api";
529
- var GenerateTextPatcher = class extends BasePatcher {
530
- patch(original) {
531
- return async (params) => {
532
- const config = this.context.getConfig();
533
- const startTime = globalThis.performance.now();
534
- const { span, provider, modelId } = this.createSpan(
535
- SPAN_NAME_GEN_AI_CHAT,
536
- params,
537
- OPERATION_NAME_CHAT
538
- );
539
- if (params.prompt) {
540
- span.setAttributes(
541
- promptToAttributes(params.prompt, config.captureMessageContent || false)
542
- );
543
- } else if (params.messages) {
544
- span.setAttributes(
545
- messagesToAttributes(
546
- params.messages,
547
- "gen_ai.prompt",
548
- config.captureMessageContent || false
549
- )
550
- );
551
- if (config.emitEvents) {
552
- this.context.emitMessageEvents(params.messages, provider.system, span);
553
- }
554
- }
555
- try {
556
- const result = await context.with(
557
- trace.setSpan(context.active(), span),
558
- () => original(params)
559
- );
560
- if (result.response) {
561
- span.setAttributes({
562
- ...result.response.id && { [ATTR_GEN_AI_RESPONSE_ID]: result.response.id },
563
- ...result.response.model && { [ATTR_GEN_AI_RESPONSE_MODEL]: result.response.model }
564
- });
565
- }
566
- if (result.finishReason) {
567
- span.setAttribute(ATTR_GEN_AI_RESPONSE_FINISH_REASONS, [result.finishReason]);
568
- }
569
- span.setAttributes(
570
- completionToAttributes(
571
- result.text,
572
- result.finishReason,
573
- config.captureMessageContent || false
574
- )
575
- );
576
- const usage = result.usage || result.totalUsage || result.steps?.[0]?.usage;
577
- if (usage) {
578
- span.setAttributes(tokenUsageToAttributes(usage));
579
- if (config.enableMetrics) {
580
- this.context.recordTokenMetrics(usage, provider.system, modelId);
581
- }
582
- }
583
- if (config.emitEvents) {
584
- this.context.emitAssistantMessageEvent(result.text, provider.system, span);
585
- }
586
- span.setStatus({ code: SpanStatusCode2.OK });
587
- return result;
588
- } catch (error) {
589
- this.handleError(error, span);
590
- throw error;
591
- } finally {
592
- this.finalizeDuration(span, startTime, config, provider, modelId, OPERATION_NAME_CHAT);
593
- }
594
- };
595
- }
596
- };
597
-
598
- // src/internal/instrumentation/vercel-ai/patchers/stream-text-patcher.ts
599
- import { context as context2, trace as trace2 } from "@opentelemetry/api";
600
- var StreamTextPatcher = class extends BasePatcher {
601
- constructor(context8, streamHandler) {
602
- super(context8);
603
- this.streamHandler = streamHandler;
604
- }
605
- patch(original) {
606
- return async (params) => {
607
- const config = this.context.getConfig();
608
- const startTime = globalThis.performance.now();
609
- const { span, provider, modelId } = this.createSpan(
610
- SPAN_NAME_GEN_AI_CHAT,
611
- params,
612
- OPERATION_NAME_CHAT,
613
- { "gen_ai.streaming": true }
614
- );
615
- if (params.prompt) {
616
- span.setAttributes(
617
- promptToAttributes(params.prompt, config.captureMessageContent || false)
618
- );
619
- } else if (params.messages) {
620
- span.setAttributes(
621
- messagesToAttributes(
622
- params.messages,
623
- "gen_ai.prompt",
624
- config.captureMessageContent || false
625
- )
626
- );
627
- if (config.emitEvents) {
628
- this.context.emitMessageEvents(params.messages, provider.system, span);
629
- }
630
- }
631
- try {
632
- const stream = await context2.with(
633
- trace2.setSpan(context2.active(), span),
634
- () => original(params)
635
- );
636
- return this.streamHandler.wrapStream(stream, span, config, provider, modelId, startTime);
637
- } catch (error) {
638
- this.handleError(error, span);
639
- span.end();
640
- throw error;
641
- }
642
- };
643
- }
644
- };
645
-
646
- // src/internal/instrumentation/vercel-ai/patchers/embeddings-patcher.ts
647
- import { context as context3, SpanStatusCode as SpanStatusCode3, trace as trace3 } from "@opentelemetry/api";
648
- var EmbeddingsPatcher = class extends BasePatcher {
649
- patch(original, isMany = false) {
650
- return async (params) => {
651
- const config = this.context.getConfig();
652
- const startTime = globalThis.performance.now();
653
- const additionalAttributes = isMany ? { "gen_ai.embeddings.count": params.values ? params.values.length : 0 } : {};
654
- const { span, provider, modelId } = this.createSpan(
655
- SPAN_NAME_GEN_AI_EMBEDDINGS,
656
- params,
657
- OPERATION_NAME_EMBEDDINGS,
658
- additionalAttributes
659
- );
660
- if (!isMany && config.captureMessageContent && params.value) {
661
- span.setAttribute("gen_ai.prompt.0.content", params.value);
662
- }
663
- try {
664
- const result = await context3.with(
665
- trace3.setSpan(context3.active(), span),
666
- () => original(params)
667
- );
668
- if (result.response) {
669
- span.setAttributes({
670
- ...result.response.id && { [ATTR_GEN_AI_RESPONSE_ID]: result.response.id },
671
- ...result.response.model && { [ATTR_GEN_AI_RESPONSE_MODEL]: result.response.model }
672
- });
673
- }
674
- if (isMany) {
675
- if (result.embeddings && result.embeddings.length > 0 && result.embeddings[0]) {
676
- span.setAttribute("gen_ai.response.embedding_dimensions", result.embeddings[0].length);
677
- }
678
- } else {
679
- if (result.embedding) {
680
- span.setAttribute("gen_ai.response.embedding_dimensions", result.embedding.length);
681
- }
682
- }
683
- if (result.usage) {
684
- span.setAttributes(tokenUsageToAttributes(result.usage));
685
- if (config.enableMetrics) {
686
- this.context.recordTokenMetrics(result.usage, provider.system, modelId);
687
- }
688
- }
689
- span.setStatus({ code: SpanStatusCode3.OK });
690
- return result;
691
- } catch (error) {
692
- this.handleError(error, span);
693
- throw error;
694
- } finally {
695
- this.finalizeDuration(span, startTime, config, provider, modelId, OPERATION_NAME_EMBEDDINGS);
696
- }
697
- };
698
- }
699
- };
700
-
701
- // src/internal/instrumentation/vercel-ai/stream-handler.ts
702
- import { SpanStatusCode as SpanStatusCode4 } from "@opentelemetry/api";
703
- var StreamHandler = class {
704
- constructor(context8) {
705
- this.context = context8;
706
- }
707
- wrapStream(stream, span, config, provider, modelId, startTime) {
708
- const self = this;
709
- let fullText = "";
710
- let finishReason;
711
- let usage;
712
- let response;
713
- const wrappedStream = new Proxy(stream, {
714
- get(target, prop) {
715
- if (prop === Symbol.asyncIterator) {
716
- return async function* () {
717
- try {
718
- for await (const chunk of target) {
719
- if (chunk.type === "text-delta" && chunk.textDelta) {
720
- fullText += chunk.textDelta;
721
- } else if (chunk.type === "finish") {
722
- finishReason = chunk.finishReason;
723
- usage = chunk.usage;
724
- } else if (chunk.type === "response-metadata") {
725
- response = chunk.response;
726
- }
727
- yield chunk;
728
- }
729
- } finally {
730
- self.finalizeStream(
731
- span,
732
- config,
733
- provider,
734
- modelId,
735
- startTime,
736
- fullText,
737
- finishReason,
738
- usage,
739
- response
740
- );
741
- }
742
- };
743
- }
744
- if (prop === "textStream" || prop === "fullStream") {
745
- const originalStream = target[prop];
746
- return {
747
- [Symbol.asyncIterator]: async function* () {
748
- try {
749
- for await (const chunk of originalStream) {
750
- if (prop === "textStream") {
751
- fullText += chunk;
752
- }
753
- yield chunk;
754
- }
755
- } finally {
756
- const streamUsage = await target.usage.catch(() => null);
757
- if (streamUsage) {
758
- usage = streamUsage;
759
- }
760
- self.finalizeStream(
761
- span,
762
- config,
763
- provider,
764
- modelId,
765
- startTime,
766
- fullText,
767
- finishReason,
768
- usage,
769
- response
770
- );
771
- }
772
- }
773
- };
774
- }
775
- const value = target[prop];
776
- if (typeof value === "function") {
777
- return value.bind(target);
778
- }
779
- return value;
780
- }
781
- });
782
- return wrappedStream;
783
- }
784
- finalizeStream(span, config, provider, modelId, startTime, fullText, finishReason, usage, response) {
785
- if (response) {
786
- span.setAttributes({
787
- ...response.id && { [ATTR_GEN_AI_RESPONSE_ID]: response.id },
788
- ...response.model && { [ATTR_GEN_AI_RESPONSE_MODEL]: response.model }
789
- });
790
- }
791
- if (finishReason) {
792
- span.setAttribute(ATTR_GEN_AI_RESPONSE_FINISH_REASONS, [finishReason]);
793
- }
794
- if (fullText) {
795
- span.setAttributes(
796
- completionToAttributes(
797
- fullText,
798
- finishReason,
799
- config.captureMessageContent || false
800
- )
801
- );
802
- }
803
- if (usage) {
804
- span.setAttributes(tokenUsageToAttributes(usage));
805
- if (config.enableMetrics) {
806
- this.context.recordTokenMetrics(usage, provider.system, modelId);
807
- }
808
- }
809
- if (config.enableMetrics) {
810
- const duration = (performance.now() - startTime) / 1e3;
811
- this.context.recordDurationMetric(duration, provider.system, modelId, OPERATION_NAME_CHAT);
812
- }
813
- span.setStatus({ code: SpanStatusCode4.OK });
814
- span.end();
815
- }
816
- };
817
-
818
- // src/internal/instrumentation/vercel-ai/telemetry-recorder.ts
819
- import { context as context4, trace as trace4 } from "@opentelemetry/api";
820
- import { SeverityNumber } from "@opentelemetry/api-logs";
821
- var TelemetryRecorder = class {
822
- constructor(genaiClientOperationDuration, genaiClientTokenUsage, logger2) {
823
- this.genaiClientOperationDuration = genaiClientOperationDuration;
824
- this.genaiClientTokenUsage = genaiClientTokenUsage;
825
- this.logger = logger2;
826
- }
827
- /**
828
- * Record token usage metrics
829
- */
830
- recordTokenMetrics(usage, system, model) {
831
- if (!this.genaiClientTokenUsage) {
832
- return;
833
- }
834
- const commonAttrs = {
835
- [ATTR_GEN_AI_SYSTEM]: system,
836
- [ATTR_GEN_AI_REQUEST_MODEL]: model
837
- };
838
- const inputTokens = usage.inputTokens || usage.promptTokens;
839
- const outputTokens = usage.outputTokens || usage.completionTokens;
840
- if (inputTokens !== void 0) {
841
- this.genaiClientTokenUsage.record(inputTokens, {
842
- ...commonAttrs,
843
- [ATTR_GEN_AI_TOKEN_TYPE]: TOKEN_TYPE_INPUT
844
- });
845
- }
846
- if (outputTokens !== void 0) {
847
- this.genaiClientTokenUsage.record(outputTokens, {
848
- ...commonAttrs,
849
- [ATTR_GEN_AI_TOKEN_TYPE]: TOKEN_TYPE_OUTPUT
850
- });
851
- }
852
- }
853
- /**
854
- * Record operation duration metric
855
- */
856
- recordDurationMetric(duration, system, model, operation) {
857
- if (!this.genaiClientOperationDuration) {
858
- return;
859
- }
860
- this.genaiClientOperationDuration.record(duration, {
861
- [ATTR_GEN_AI_SYSTEM]: system,
862
- [ATTR_GEN_AI_REQUEST_MODEL]: model,
863
- [ATTR_GEN_AI_OPERATION_NAME]: operation
864
- });
865
- }
866
- /**
867
- * Emit message events
868
- */
869
- emitMessageEvents(messages, system, span) {
870
- if (!this.logger) {
871
- return;
872
- }
873
- const ctx = trace4.setSpan(context4.active(), span);
874
- for (const msg of messages) {
875
- let eventName;
876
- switch (msg.role) {
877
- case "system": {
878
- eventName = EVENT_GEN_AI_SYSTEM_MESSAGE;
879
- break;
880
- }
881
- case "user": {
882
- eventName = EVENT_GEN_AI_USER_MESSAGE;
883
- break;
884
- }
885
- case "assistant": {
886
- eventName = EVENT_GEN_AI_ASSISTANT_MESSAGE;
887
- break;
888
- }
889
- case "tool":
890
- case "function": {
891
- eventName = EVENT_GEN_AI_TOOL_MESSAGE;
892
- break;
893
- }
894
- default: {
895
- continue;
896
- }
897
- }
898
- this.logger.emit({
899
- timestamp: Date.now(),
900
- context: ctx,
901
- severityNumber: SeverityNumber.INFO,
902
- attributes: {
903
- [ATTR_EVENT_NAME]: eventName,
904
- [ATTR_GEN_AI_SYSTEM]: system
905
- },
906
- body: {
907
- role: msg.role,
908
- content: typeof msg.content === "string" ? msg.content : JSON.stringify(msg.content),
909
- name: msg.name
910
- }
911
- });
912
- }
913
- }
914
- /**
915
- * Emit assistant message event
916
- */
917
- emitAssistantMessageEvent(text, system, span) {
918
- if (!this.logger) {
919
- return;
920
- }
921
- const ctx = trace4.setSpan(context4.active(), span);
922
- this.logger.emit({
923
- timestamp: Date.now(),
924
- context: ctx,
925
- severityNumber: SeverityNumber.INFO,
926
- attributes: {
927
- [ATTR_EVENT_NAME]: EVENT_GEN_AI_ASSISTANT_MESSAGE,
928
- [ATTR_GEN_AI_SYSTEM]: system
929
- },
930
- body: {
931
- role: "assistant",
932
- content: text
933
- }
934
- });
935
- }
936
- };
937
-
938
- // src/internal/instrumentation/vercel-ai/instrumentation.ts
939
- var PACKAGE_NAME = "@brizz/vercel-ai-instrumentation";
940
- var PACKAGE_VERSION = "0.1.0";
941
- var VercelAIInstrumentation = class _VercelAIInstrumentation extends InstrumentationBase {
942
- _genaiClientOperationDuration;
943
- _genaiClientTokenUsage;
944
- _telemetryRecorder;
945
- _streamHandler;
946
- _patchers = /* @__PURE__ */ new Map();
947
- // Holds last patched namespace when available (reserved for future factory wrapping)
948
- _vercelAiNamespace = null;
949
- static _WRAPPED_SYMBOL = Symbol.for("brizz.vercel-ai.patched");
950
- constructor(config = {}) {
951
- super(PACKAGE_NAME, PACKAGE_VERSION, config);
952
- const cfg = this.getConfig();
953
- const envCC = getEnvBool("OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT");
954
- if (envCC !== void 0) {
955
- cfg.captureMessageContent = envCC;
956
- }
957
- this._initializeComponents();
958
- }
959
- setConfig(config = {}) {
960
- const {
961
- captureMessageContent = true,
962
- enableMetrics = true,
963
- emitEvents = true,
964
- ...validConfig
965
- } = config;
966
- const fullConfig = {
967
- ...validConfig,
968
- captureMessageContent,
969
- enableMetrics,
970
- emitEvents
971
- };
972
- super.setConfig(fullConfig);
973
- }
974
- _initializeComponents() {
975
- this._telemetryRecorder = new TelemetryRecorder(
976
- this._genaiClientOperationDuration,
977
- this._genaiClientTokenUsage,
978
- this.logger
979
- );
980
- this._streamHandler = new StreamHandler({
981
- recordTokenMetrics: this._telemetryRecorder.recordTokenMetrics.bind(this._telemetryRecorder),
982
- recordDurationMetric: this._telemetryRecorder.recordDurationMetric.bind(
983
- this._telemetryRecorder
984
- )
985
- });
986
- const patcherContext = {
987
- tracer: this.tracer,
988
- getConfig: this.getConfig.bind(this),
989
- recordTokenMetrics: this._telemetryRecorder.recordTokenMetrics.bind(this._telemetryRecorder),
990
- recordDurationMetric: this._telemetryRecorder.recordDurationMetric.bind(
991
- this._telemetryRecorder
992
- ),
993
- emitMessageEvents: this._telemetryRecorder.emitMessageEvents.bind(this._telemetryRecorder),
994
- emitAssistantMessageEvent: this._telemetryRecorder.emitAssistantMessageEvent.bind(
995
- this._telemetryRecorder
996
- )
997
- };
998
- this._patchers.set("generateText", new GenerateTextPatcher(patcherContext));
999
- this._patchers.set("streamText", new StreamTextPatcher(patcherContext, this._streamHandler));
1000
- this._patchers.set("embed", new EmbeddingsPatcher(patcherContext));
1001
- this._patchers.set("embedMany", new EmbeddingsPatcher(patcherContext));
1002
- }
1003
- init() {
1004
- return [
1005
- new InstrumentationNodeModuleDefinition(
1006
- "ai",
1007
- [">=4.0.0 <6"],
1008
- (moduleExports) => {
1009
- logger.info("Starting instrumentation of Vercel AI SDK module");
1010
- this._vercelAiNamespace = moduleExports;
1011
- const patched = this._patchModuleExports(moduleExports);
1012
- return patched ?? moduleExports;
1013
- },
1014
- (moduleExports) => {
1015
- logger.debug("Uninstrumenting @vercel/ai module");
1016
- return moduleExports;
1017
- }
1018
- )
1019
- ];
1020
- }
1021
- _updateMetricInstruments() {
1022
- const config = this.getConfig();
1023
- if (!config.enableMetrics) {
1024
- return;
1025
- }
1026
- this._genaiClientOperationDuration = this.meter.createHistogram(
1027
- METRIC_GEN_AI_CLIENT_OPERATION_DURATION,
1028
- {
1029
- description: "GenAI operation duration",
1030
- unit: "s",
1031
- advice: {
1032
- explicitBucketBoundaries: [
1033
- 0.01,
1034
- 0.02,
1035
- 0.04,
1036
- 0.08,
1037
- 0.16,
1038
- 0.32,
1039
- 0.64,
1040
- 1.28,
1041
- 2.56,
1042
- 5.12,
1043
- 10.24,
1044
- 20.48,
1045
- 40.96,
1046
- 81.92
1047
- ]
1048
- }
1049
- }
1050
- );
1051
- this._genaiClientTokenUsage = this.meter.createHistogram(METRIC_GEN_AI_CLIENT_TOKEN_USAGE, {
1052
- description: "Measures number of input and output tokens used",
1053
- unit: "{token}",
1054
- advice: {
1055
- explicitBucketBoundaries: [
1056
- 1,
1057
- 4,
1058
- 16,
1059
- 64,
1060
- 256,
1061
- 1024,
1062
- 4096,
1063
- 16384,
1064
- 65536,
1065
- 262144,
1066
- 1048576,
1067
- 4194304,
1068
- 16777216,
1069
- 67108864
1070
- ]
1071
- }
1072
- });
1073
- this._telemetryRecorder = new TelemetryRecorder(
1074
- this._genaiClientOperationDuration,
1075
- this._genaiClientTokenUsage,
1076
- this.logger
1077
- );
1078
- }
1079
- /**
1080
- * Patch known AI SDK functions in-place on the provided module exports object.
1081
- * This approach is compatible with both CJS and ESM module loaders.
1082
- */
1083
- _patchModuleExports(moduleExports) {
1084
- if (!moduleExports || typeof moduleExports !== "object") {
1085
- return null;
1086
- }
1087
- let inPlacePatched = true;
1088
- const wrapFunction = (name, isEmbedMany = false) => {
1089
- const current = moduleExports[name];
1090
- if (typeof current !== "function") {
1091
- return;
1092
- }
1093
- const currentFn = current;
1094
- if (currentFn[_VercelAIInstrumentation._WRAPPED_SYMBOL]) {
1095
- return;
1096
- }
1097
- const descriptor = Object.getOwnPropertyDescriptor(moduleExports, name);
1098
- if (descriptor && (!descriptor.writable || !descriptor.configurable) && !descriptor.set) {
1099
- inPlacePatched = false;
1100
- return;
1101
- }
1102
- const patcher = this._patchers.get(name);
1103
- if (!patcher) {
1104
- return;
1105
- }
1106
- const patched = isEmbedMany ? patcher.patch(currentFn, true) : patcher.patch(currentFn);
1107
- try {
1108
- Object.defineProperty(patched, _VercelAIInstrumentation._WRAPPED_SYMBOL, {
1109
- value: true,
1110
- enumerable: false,
1111
- configurable: false
1112
- });
1113
- } catch {
1114
- }
1115
- try {
1116
- moduleExports[name] = patched;
1117
- } catch {
1118
- inPlacePatched = false;
1119
- }
1120
- };
1121
- wrapFunction("generateText");
1122
- wrapFunction("streamText");
1123
- wrapFunction("embed");
1124
- wrapFunction("embedMany", true);
1125
- if (!inPlacePatched) {
1126
- const proxiedModule = new Proxy(moduleExports, {
1127
- get: (target, prop, receiver) => {
1128
- const originalValue = Reflect.get(target, prop, receiver);
1129
- if (typeof originalValue === "function" && typeof prop === "string" && this._patchers.has(prop)) {
1130
- const patcher = this._patchers.get(prop);
1131
- const isEmbedMany = prop === "embedMany";
1132
- const wrapped = isEmbedMany ? patcher.patch(originalValue, true) : patcher.patch(originalValue);
1133
- return wrapped;
1134
- }
1135
- return originalValue;
1136
- }
1137
- });
1138
- return proxiedModule;
1139
- }
1140
- return moduleExports;
1141
- }
1142
- /**
1143
- * Manual instrumentation hook for bundlers/Next.js. Applies in-place wrapping
1144
- * on the provided module namespace.
1145
- */
1146
- manuallyInstrument(module3) {
1147
- try {
1148
- const result = this._patchModuleExports(module3);
1149
- if (result !== null) {
1150
- logger.debug("Applied manual Vercel AI instrumentation");
1151
- this._vercelAiNamespace = result;
1152
- return result;
1153
- }
1154
- logger.warn("Manual Vercel AI instrumentation received invalid module");
1155
- return module3;
1156
- } catch (error) {
1157
- logger.error(`Failed manual Vercel AI instrumentation: ${String(error)}`);
1158
- return this._vercelAiNamespace || module3;
1159
- }
1160
- }
1161
- /**
1162
- * Wrap a created provider/client instance (factory return) when possible.
1163
- * Call this from wrappers that construct provider clients (e.g., OpenAI SDK).
1164
- */
1165
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
1166
- wrapFactoryReturn(instance) {
1167
- return instance;
1168
- }
1169
- };
1170
-
1171
- // src/internal/instrumentation/registry.ts
1172
237
  var InstrumentationRegistry = class _InstrumentationRegistry {
1173
238
  static instance;
1174
239
  manualModules = null;
@@ -1244,8 +309,7 @@ var InstrumentationRegistry = class _InstrumentationRegistry {
1244
309
  },
1245
310
  { class: ChromaDBInstrumentation, name: "ChromaDB", module: this.manualModules?.chromadb },
1246
311
  { class: QdrantInstrumentation, name: "Qdrant", module: this.manualModules?.qdrant },
1247
- { class: TogetherInstrumentation, name: "Together", module: this.manualModules?.together },
1248
- { class: VercelAIInstrumentation, name: "Vercel AI", module: this.manualModules?.vercelAI }
312
+ { class: TogetherInstrumentation, name: "Together", module: this.manualModules?.together }
1249
313
  ];
1250
314
  for (const config of instrumentationConfigs) {
1251
315
  if (config.module) {
@@ -1264,7 +328,7 @@ var InstrumentationRegistry = class _InstrumentationRegistry {
1264
328
  };
1265
329
 
1266
330
  // src/internal/log/logging.ts
1267
- import { SeverityNumber as SeverityNumber2 } from "@opentelemetry/api-logs";
331
+ import { SeverityNumber } from "@opentelemetry/api-logs";
1268
332
  import { OTLPLogExporter } from "@opentelemetry/exporter-logs-otlp-http";
1269
333
  import { resourceFromAttributes } from "@opentelemetry/resources";
1270
334
  import {
@@ -1272,7 +336,7 @@ import {
1272
336
  } from "@opentelemetry/sdk-logs";
1273
337
 
1274
338
  // src/internal/log/processors/log-processor.ts
1275
- import { context as context5 } from "@opentelemetry/api";
339
+ import { context } from "@opentelemetry/api";
1276
340
  import { BatchLogRecordProcessor, SimpleLogRecordProcessor } from "@opentelemetry/sdk-logs";
1277
341
 
1278
342
  // src/internal/masking/patterns.ts
@@ -1922,7 +986,7 @@ var BrizzSimpleLogRecordProcessor = class extends SimpleLogRecordProcessor {
1922
986
  if (maskingConfig) {
1923
987
  maskLog(logRecord, maskingConfig);
1924
988
  }
1925
- const associationProperties = context5.active().getValue(PROPERTIES_CONTEXT_KEY);
989
+ const associationProperties = context.active().getValue(PROPERTIES_CONTEXT_KEY);
1926
990
  if (associationProperties) {
1927
991
  for (const [key, value] of Object.entries(associationProperties)) {
1928
992
  logRecord.setAttribute(`${BRIZZ}.${key}`, value);
@@ -1942,7 +1006,7 @@ var BrizzBatchLogRecordProcessor = class extends BatchLogRecordProcessor {
1942
1006
  if (maskingConfig) {
1943
1007
  maskLog(logRecord, maskingConfig);
1944
1008
  }
1945
- const associationProperties = context5.active().getValue(PROPERTIES_CONTEXT_KEY);
1009
+ const associationProperties = context.active().getValue(PROPERTIES_CONTEXT_KEY);
1946
1010
  if (associationProperties) {
1947
1011
  for (const [key, value] of Object.entries(associationProperties)) {
1948
1012
  logRecord.setAttribute(`${BRIZZ}.${key}`, value);
@@ -2079,7 +1143,7 @@ var LoggingModule = class _LoggingModule {
2079
1143
  /**
2080
1144
  * Emit a custom event to the telemetry pipeline
2081
1145
  */
2082
- emitEvent(name, attributes, body, severityNumber = SeverityNumber2.INFO) {
1146
+ emitEvent(name, attributes, body, severityNumber = SeverityNumber.INFO) {
2083
1147
  logger.debug("Attempting to emit event", {
2084
1148
  name,
2085
1149
  hasAttributes: !!attributes,
@@ -2251,11 +1315,125 @@ function getMetricsReader() {
2251
1315
  import { OTLPTraceExporter } from "@opentelemetry/exporter-trace-otlp-http";
2252
1316
 
2253
1317
  // src/internal/trace/processors/span-processor.ts
2254
- import { context as context6 } from "@opentelemetry/api";
1318
+ import { context as context2 } from "@opentelemetry/api";
2255
1319
  import {
2256
1320
  BatchSpanProcessor,
2257
1321
  SimpleSpanProcessor
2258
1322
  } from "@opentelemetry/sdk-trace-base";
1323
+
1324
+ // src/internal/trace/transformations/vercel-ai.ts
1325
+ import { SpanAttributes } from "@traceloop/ai-semantic-conventions";
1326
+ var AI_GENERATE_TEXT_DO_GENERATE = "ai.generateText.doGenerate";
1327
+ var AI_STREAM_TEXT_DO_STREAM = "ai.streamText.doStream";
1328
+ var HANDLED_SPAN_NAMES = {
1329
+ [AI_GENERATE_TEXT_DO_GENERATE]: "gen_ai.chat",
1330
+ [AI_STREAM_TEXT_DO_STREAM]: "gen_ai.chat",
1331
+ "ai.streamText": "ai.streamText",
1332
+ "ai.toolCall": (span) => {
1333
+ const toolName = span.attributes["ai.toolCall.name"];
1334
+ return `${toolName}.tool`;
1335
+ }
1336
+ };
1337
+ var AI_RESPONSE_TEXT = "ai.response.text";
1338
+ var AI_PROMPT_MESSAGES = "ai.prompt.messages";
1339
+ var AI_USAGE_PROMPT_TOKENS = "ai.usage.promptTokens";
1340
+ var AI_USAGE_COMPLETION_TOKENS = "ai.usage.completionTokens";
1341
+ var AI_MODEL_PROVIDER = "ai.model.provider";
1342
+ var transformAiSdkSpanName = (span) => {
1343
+ if (span.name in HANDLED_SPAN_NAMES) {
1344
+ if (typeof HANDLED_SPAN_NAMES[span.name] === "function") {
1345
+ span.name = HANDLED_SPAN_NAMES[span.name](span);
1346
+ } else {
1347
+ span.name = HANDLED_SPAN_NAMES[span.name];
1348
+ }
1349
+ }
1350
+ };
1351
+ var transformResponseText = (attributes) => {
1352
+ if (AI_RESPONSE_TEXT in attributes) {
1353
+ attributes[`${SpanAttributes.LLM_COMPLETIONS}.0.content`] = attributes[AI_RESPONSE_TEXT];
1354
+ attributes[`${SpanAttributes.LLM_COMPLETIONS}.0.role`] = "assistant";
1355
+ delete attributes[AI_RESPONSE_TEXT];
1356
+ }
1357
+ };
1358
+ var transformPromptMessages = (attributes) => {
1359
+ if (AI_PROMPT_MESSAGES in attributes) {
1360
+ try {
1361
+ const messages = JSON.parse(attributes[AI_PROMPT_MESSAGES]);
1362
+ messages.forEach((msg, index) => {
1363
+ logger.debug("Transforming prompt message", { msg, type: typeof msg.content });
1364
+ if (typeof msg.content === "string") {
1365
+ attributes[`${SpanAttributes.LLM_PROMPTS}.${index}.content`] = msg.content;
1366
+ } else {
1367
+ if (Array.isArray(msg.content) && msg.content.length > 0) {
1368
+ const lastContent = msg.content[msg.content.length - 1];
1369
+ if (lastContent.text) {
1370
+ attributes[`${SpanAttributes.LLM_PROMPTS}.${index}.content`] = lastContent.text;
1371
+ }
1372
+ } else {
1373
+ attributes[`${SpanAttributes.LLM_PROMPTS}.${index}.content`] = JSON.stringify(
1374
+ msg.content
1375
+ );
1376
+ }
1377
+ }
1378
+ attributes[`${SpanAttributes.LLM_PROMPTS}.${index}.role`] = msg.role;
1379
+ });
1380
+ delete attributes[AI_PROMPT_MESSAGES];
1381
+ } catch {
1382
+ }
1383
+ }
1384
+ };
1385
+ var transformPromptTokens = (attributes) => {
1386
+ if (AI_USAGE_PROMPT_TOKENS in attributes) {
1387
+ attributes[`${SpanAttributes.LLM_USAGE_PROMPT_TOKENS}`] = attributes[AI_USAGE_PROMPT_TOKENS];
1388
+ delete attributes[AI_USAGE_PROMPT_TOKENS];
1389
+ }
1390
+ };
1391
+ var transformCompletionTokens = (attributes) => {
1392
+ if (AI_USAGE_COMPLETION_TOKENS in attributes) {
1393
+ attributes[`${SpanAttributes.LLM_USAGE_COMPLETION_TOKENS}`] = attributes[AI_USAGE_COMPLETION_TOKENS];
1394
+ delete attributes[AI_USAGE_COMPLETION_TOKENS];
1395
+ }
1396
+ };
1397
+ var calculateTotalTokens = (attributes) => {
1398
+ const promptTokens = attributes[`${SpanAttributes.LLM_USAGE_PROMPT_TOKENS}`];
1399
+ const completionTokens = attributes[`${SpanAttributes.LLM_USAGE_COMPLETION_TOKENS}`];
1400
+ if (promptTokens && completionTokens) {
1401
+ attributes[`${SpanAttributes.LLM_USAGE_TOTAL_TOKENS}`] = Number(promptTokens) + Number(completionTokens);
1402
+ }
1403
+ };
1404
+ var transformVendor = (attributes) => {
1405
+ if (AI_MODEL_PROVIDER in attributes) {
1406
+ const vendor = attributes[AI_MODEL_PROVIDER];
1407
+ if (vendor && vendor.startsWith("openai")) {
1408
+ attributes[SpanAttributes.LLM_SYSTEM] = "OpenAI";
1409
+ } else {
1410
+ attributes[SpanAttributes.LLM_SYSTEM] = vendor;
1411
+ }
1412
+ delete attributes[AI_MODEL_PROVIDER];
1413
+ }
1414
+ };
1415
+ var transformAiSdkAttributes = (attributes) => {
1416
+ transformResponseText(attributes);
1417
+ transformPromptMessages(attributes);
1418
+ transformPromptTokens(attributes);
1419
+ transformCompletionTokens(attributes);
1420
+ calculateTotalTokens(attributes);
1421
+ transformVendor(attributes);
1422
+ };
1423
+ var shouldHandleSpan = (span) => {
1424
+ return span.name in HANDLED_SPAN_NAMES;
1425
+ };
1426
+ var transformAiSdkSpan = (span) => {
1427
+ logger.debug("Transforming AI SDK span", { spanName: span.name });
1428
+ if (!shouldHandleSpan(span)) {
1429
+ logger.debug("Skipping span transformation", { spanName: span.name });
1430
+ return;
1431
+ }
1432
+ transformAiSdkSpanName(span);
1433
+ transformAiSdkAttributes(span.attributes);
1434
+ };
1435
+
1436
+ // src/internal/trace/processors/span-processor.ts
2259
1437
  var DEFAULT_MASKING_RULES = [
2260
1438
  {
2261
1439
  mode: "partial",
@@ -2266,16 +1444,6 @@ var DEFAULT_MASKING_RULES = [
2266
1444
  mode: "partial",
2267
1445
  attributePattern: "gen_ai.completion",
2268
1446
  patterns: DEFAULT_PII_PATTERNS
2269
- },
2270
- {
2271
- mode: "partial",
2272
- attributePattern: "traceloop.entity.input",
2273
- patterns: DEFAULT_PII_PATTERNS
2274
- },
2275
- {
2276
- mode: "partial",
2277
- attributePattern: "traceloop.entity.output",
2278
- patterns: DEFAULT_PII_PATTERNS
2279
1447
  }
2280
1448
  ];
2281
1449
  var BrizzSimpleSpanProcessor = class extends SimpleSpanProcessor {
@@ -2300,7 +1468,7 @@ var BrizzSimpleSpanProcessor = class extends SimpleSpanProcessor {
2300
1468
  if (maskingConfig) {
2301
1469
  maskSpan(span, maskingConfig);
2302
1470
  }
2303
- const associationProperties = context6.active().getValue(PROPERTIES_CONTEXT_KEY);
1471
+ const associationProperties = context2.active().getValue(PROPERTIES_CONTEXT_KEY);
2304
1472
  if (associationProperties) {
2305
1473
  for (const [key, value] of Object.entries(associationProperties)) {
2306
1474
  span.setAttribute(`${BRIZZ}.${key}`, value);
@@ -2308,6 +1476,10 @@ var BrizzSimpleSpanProcessor = class extends SimpleSpanProcessor {
2308
1476
  }
2309
1477
  super.onStart(span, parentContext);
2310
1478
  }
1479
+ onEnd(span) {
1480
+ transformAiSdkSpan(span);
1481
+ super.onEnd(span);
1482
+ }
2311
1483
  };
2312
1484
  var BrizzBatchSpanProcessor = class extends BatchSpanProcessor {
2313
1485
  config;
@@ -2320,7 +1492,7 @@ var BrizzBatchSpanProcessor = class extends BatchSpanProcessor {
2320
1492
  if (maskingConfig) {
2321
1493
  maskSpan(span, maskingConfig);
2322
1494
  }
2323
- const associationProperties = context6.active().getValue(PROPERTIES_CONTEXT_KEY);
1495
+ const associationProperties = context2.active().getValue(PROPERTIES_CONTEXT_KEY);
2324
1496
  if (associationProperties) {
2325
1497
  for (const [key, value] of Object.entries(associationProperties)) {
2326
1498
  span.setAttribute(`${BRIZZ}.${key}`, value);
@@ -2328,6 +1500,10 @@ var BrizzBatchSpanProcessor = class extends BatchSpanProcessor {
2328
1500
  }
2329
1501
  super.onStart(span, parentContext);
2330
1502
  }
1503
+ onEnd(span) {
1504
+ transformAiSdkSpan(span);
1505
+ super.onEnd(span);
1506
+ }
2331
1507
  };
2332
1508
  function maskSpan(span, config) {
2333
1509
  if (!span.attributes || Object.keys(span.attributes).length === 0) {
@@ -2415,8 +1591,9 @@ var TracingModule = class _TracingModule {
2415
1591
  disableBatch: config.disableBatch,
2416
1592
  hasMasking: !!config.masking?.spanMasking
2417
1593
  });
2418
- this.spanProcessor = config.disableBatch ? new BrizzSimpleSpanProcessor(this.spanExporter, config) : new BrizzBatchSpanProcessor(this.spanExporter, config);
1594
+ const spanProcessor = config.disableBatch ? new BrizzSimpleSpanProcessor(this.spanExporter, config) : new BrizzBatchSpanProcessor(this.spanExporter, config);
2419
1595
  logger.debug("Span processor initialized successfully");
1596
+ this.spanProcessor = spanProcessor;
2420
1597
  }
2421
1598
  /**
2422
1599
  * Get the span exporter
@@ -2455,7 +1632,7 @@ function getSpanProcessor() {
2455
1632
  }
2456
1633
 
2457
1634
  // src/internal/trace/session.ts
2458
- import { context as context7 } from "@opentelemetry/api";
1635
+ import { context as context3 } from "@opentelemetry/api";
2459
1636
 
2460
1637
  // src/internal/sdk.ts
2461
1638
  var _Brizz = class __Brizz {