@brizz/sdk 0.1.2 → 0.1.3-rc.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -164,939 +164,6 @@ function getLogLevel() {
164
164
  return logger.getLevel();
165
165
  }
166
166
 
167
- // src/internal/instrumentation/vercel-ai/instrumentation.ts
168
- import {
169
- InstrumentationBase,
170
- InstrumentationNodeModuleDefinition
171
- } from "@opentelemetry/instrumentation";
172
-
173
- // src/internal/instrumentation/vercel-ai/patchers/base-patcher.ts
174
- import { SpanKind, SpanStatusCode } from "@opentelemetry/api";
175
-
176
- // src/internal/instrumentation/vercel-ai/semconv.ts
177
- var ATTR_GEN_AI_SYSTEM = "gen_ai.system";
178
- var ATTR_GEN_AI_OPERATION_NAME = "gen_ai.operation.name";
179
- var ATTR_GEN_AI_REQUEST_MODEL = "gen_ai.request.model";
180
- var ATTR_GEN_AI_REQUEST_MAX_TOKENS = "gen_ai.request.max_tokens";
181
- var ATTR_GEN_AI_REQUEST_TEMPERATURE = "gen_ai.request.temperature";
182
- var ATTR_GEN_AI_REQUEST_TOP_P = "gen_ai.request.top_p";
183
- var ATTR_GEN_AI_REQUEST_TOP_K = "gen_ai.request.top_k";
184
- var ATTR_GEN_AI_REQUEST_STOP_SEQUENCES = "gen_ai.request.stop_sequences";
185
- var ATTR_GEN_AI_REQUEST_FREQUENCY_PENALTY = "gen_ai.request.frequency_penalty";
186
- var ATTR_GEN_AI_REQUEST_PRESENCE_PENALTY = "gen_ai.request.presence_penalty";
187
- var ATTR_GEN_AI_RESPONSE_ID = "gen_ai.response.id";
188
- var ATTR_GEN_AI_RESPONSE_MODEL = "gen_ai.response.model";
189
- var ATTR_GEN_AI_RESPONSE_FINISH_REASONS = "gen_ai.response.finish_reasons";
190
- var ATTR_GEN_AI_TOKEN_TYPE = "gen_ai.token.type";
191
- var ATTR_GEN_AI_PROMPT = "gen_ai.prompt";
192
- var ATTR_GEN_AI_COMPLETION = "gen_ai.completion";
193
- var ATTR_GEN_AI_OPENAI_API_BASE = "gen_ai.openai.api_base";
194
- var ATTR_EVENT_NAME = "event.name";
195
- var EVENT_GEN_AI_USER_MESSAGE = "gen_ai.user.message";
196
- var EVENT_GEN_AI_ASSISTANT_MESSAGE = "gen_ai.assistant.message";
197
- var EVENT_GEN_AI_SYSTEM_MESSAGE = "gen_ai.system.message";
198
- var EVENT_GEN_AI_TOOL_MESSAGE = "gen_ai.tool.message";
199
- var METRIC_GEN_AI_CLIENT_OPERATION_DURATION = "gen_ai.client.operation.duration";
200
- var METRIC_GEN_AI_CLIENT_TOKEN_USAGE = "gen_ai.client.token.usage";
201
- var OPERATION_NAME_CHAT = "chat";
202
- var OPERATION_NAME_EMBEDDINGS = "embeddings";
203
- var TOKEN_TYPE_INPUT = "input";
204
- var TOKEN_TYPE_OUTPUT = "output";
205
- var PROVIDER_OPENAI = "openai";
206
- var PROVIDER_ANTHROPIC = "anthropic";
207
- var PROVIDER_GOOGLE = "google";
208
- var PROVIDER_AMAZON = "amazon";
209
- var PROVIDER_AZURE = "azure";
210
- var PROVIDER_VERCEL = "vercel";
211
- var PROVIDER_UNKNOWN = "unknown";
212
- var SPAN_NAME_GEN_AI_CHAT = "gen_ai.chat";
213
- var SPAN_NAME_GEN_AI_EMBEDDINGS = "gen_ai.embeddings";
214
-
215
- // src/internal/instrumentation/vercel-ai/utils.ts
216
- function detectProvider(model) {
217
- if (typeof model === "object" && model !== null) {
218
- const modelObj = model;
219
- if (modelObj.provider) {
220
- return {
221
- system: normalizeProviderName(modelObj.provider),
222
- apiBase: extractApiBase(modelObj)
223
- };
224
- }
225
- if (modelObj.modelId) {
226
- return detectProviderFromModelId(modelObj.modelId);
227
- }
228
- }
229
- if (typeof model === "string") {
230
- return detectProviderFromModelId(model);
231
- }
232
- return { system: PROVIDER_UNKNOWN };
233
- }
234
- function detectProviderFromModelId(modelId) {
235
- const lowerModel = modelId.toLowerCase();
236
- if (lowerModel.startsWith("gpt-") || lowerModel.startsWith("text-davinci-") || lowerModel.startsWith("text-embedding-") || lowerModel.startsWith("dall-e") || lowerModel.startsWith("whisper-") || lowerModel.startsWith("tts-")) {
237
- return { system: PROVIDER_OPENAI };
238
- }
239
- if (lowerModel.startsWith("claude-")) {
240
- return { system: PROVIDER_ANTHROPIC };
241
- }
242
- if (lowerModel.startsWith("gemini-") || lowerModel.startsWith("palm-") || lowerModel.includes("bison") || lowerModel.includes("gecko")) {
243
- return { system: PROVIDER_GOOGLE };
244
- }
245
- if (lowerModel.startsWith("amazon.") || lowerModel.startsWith("anthropic.claude-") || lowerModel.startsWith("ai21.") || lowerModel.startsWith("cohere.") || lowerModel.startsWith("meta.llama")) {
246
- return { system: PROVIDER_AMAZON };
247
- }
248
- if (lowerModel.includes("azure") || lowerModel.includes(".openai.azure.com")) {
249
- return { system: PROVIDER_AZURE };
250
- }
251
- const parts = modelId.split(/[-._/]/);
252
- if (parts.length > 0 && parts[0]) {
253
- return { system: normalizeProviderName(parts[0]) };
254
- }
255
- return { system: PROVIDER_UNKNOWN };
256
- }
257
- function normalizeProviderName(provider) {
258
- const normalized = provider.toLowerCase().trim();
259
- switch (normalized) {
260
- case "openai":
261
- case "open-ai":
262
- case "open_ai": {
263
- return PROVIDER_OPENAI;
264
- }
265
- case "anthropic":
266
- case "claude": {
267
- return PROVIDER_ANTHROPIC;
268
- }
269
- case "google":
270
- case "vertex":
271
- case "vertexai":
272
- case "vertex-ai":
273
- case "gemini": {
274
- return PROVIDER_GOOGLE;
275
- }
276
- case "amazon":
277
- case "aws":
278
- case "bedrock":
279
- case "amazon-bedrock": {
280
- return PROVIDER_AMAZON;
281
- }
282
- case "azure":
283
- case "azure-openai":
284
- case "microsoft": {
285
- return PROVIDER_AZURE;
286
- }
287
- case "vercel":
288
- case "vercel-ai": {
289
- return PROVIDER_VERCEL;
290
- }
291
- default: {
292
- return normalized;
293
- }
294
- }
295
- }
296
- function extractApiBase(model) {
297
- if (typeof model === "object" && model !== null) {
298
- const anyModel = model;
299
- return anyModel.apiBase || anyModel.baseURL || anyModel.endpoint || void 0;
300
- }
301
- return void 0;
302
- }
303
- function extractModelId(model) {
304
- if (typeof model === "string") {
305
- return model;
306
- }
307
- if (typeof model === "object" && model !== null) {
308
- return model.modelId || "unknown";
309
- }
310
- return "unknown";
311
- }
312
- function messagesToAttributes(messages, prefix, captureContent) {
313
- const attributes = {};
314
- for (const [index, msg] of messages.entries()) {
315
- const baseKey = `${prefix}.${index}`;
316
- attributes[`${baseKey}.role`] = msg.role;
317
- if (captureContent && msg.content) {
318
- if (typeof msg.content === "string") {
319
- attributes[`${baseKey}.content`] = msg.content;
320
- } else if (Array.isArray(msg.content)) {
321
- const textParts = msg.content.filter((part) => part.type === "text" && part.text).map((part) => part.text).join(" ");
322
- if (textParts) {
323
- attributes[`${baseKey}.content`] = textParts;
324
- }
325
- }
326
- }
327
- if (msg.toolInvocations && msg.toolInvocations.length > 0) {
328
- attributes[`${baseKey}.tool_calls`] = msg.toolInvocations.length;
329
- }
330
- }
331
- return attributes;
332
- }
333
- function promptToAttributes(prompt, captureContent) {
334
- const attributes = {};
335
- attributes[`${ATTR_GEN_AI_PROMPT}.0.role`] = "user";
336
- if (captureContent) {
337
- attributes[`${ATTR_GEN_AI_PROMPT}.0.content`] = prompt;
338
- }
339
- return attributes;
340
- }
341
- function completionToAttributes(text, finishReason, captureContent) {
342
- const attributes = {};
343
- attributes[`${ATTR_GEN_AI_COMPLETION}.0.role`] = "assistant";
344
- if (captureContent) {
345
- attributes[`${ATTR_GEN_AI_COMPLETION}.0.content`] = text;
346
- }
347
- if (finishReason) {
348
- attributes[`${ATTR_GEN_AI_COMPLETION}.0.finish_reason`] = finishReason;
349
- }
350
- return attributes;
351
- }
352
- function tokenUsageToAttributes(usage) {
353
- if (!usage) {
354
- return {};
355
- }
356
- const attributes = {};
357
- if (usage.inputTokens !== void 0) {
358
- attributes["gen_ai.usage.prompt_tokens"] = usage.inputTokens;
359
- attributes["gen_ai.usage.input_tokens"] = usage.inputTokens;
360
- attributes["llm.usage.prompt_tokens"] = usage.inputTokens;
361
- } else if (usage.promptTokens !== void 0) {
362
- attributes["gen_ai.usage.prompt_tokens"] = usage.promptTokens;
363
- attributes["gen_ai.usage.input_tokens"] = usage.promptTokens;
364
- attributes["llm.usage.prompt_tokens"] = usage.promptTokens;
365
- }
366
- if (usage.outputTokens !== void 0) {
367
- attributes["gen_ai.usage.completion_tokens"] = usage.outputTokens;
368
- attributes["gen_ai.usage.output_tokens"] = usage.outputTokens;
369
- attributes["llm.usage.completion_tokens"] = usage.outputTokens;
370
- } else if (usage.completionTokens !== void 0) {
371
- attributes["gen_ai.usage.completion_tokens"] = usage.completionTokens;
372
- attributes["gen_ai.usage.output_tokens"] = usage.completionTokens;
373
- attributes["llm.usage.completion_tokens"] = usage.completionTokens;
374
- }
375
- if (usage.totalTokens === void 0) {
376
- const inputTokens = usage.inputTokens || usage.promptTokens;
377
- const outputTokens = usage.outputTokens || usage.completionTokens;
378
- if (inputTokens !== void 0 && outputTokens !== void 0) {
379
- const totalTokens = inputTokens + outputTokens;
380
- attributes["gen_ai.usage.total_tokens"] = totalTokens;
381
- attributes["llm.usage.total_tokens"] = totalTokens;
382
- }
383
- } else {
384
- attributes["gen_ai.usage.total_tokens"] = usage.totalTokens;
385
- attributes["llm.usage.total_tokens"] = usage.totalTokens;
386
- }
387
- return attributes;
388
- }
389
- function shouldRecordError(error) {
390
- if (error instanceof Error) {
391
- const message = error.message.toLowerCase();
392
- if (message.includes("abort") || message.includes("cancel")) {
393
- return false;
394
- }
395
- }
396
- return true;
397
- }
398
- function getEnvBool(name) {
399
- const value = process.env[name];
400
- if (value === void 0) {
401
- return void 0;
402
- }
403
- return value.toLowerCase() === "true" || value === "1";
404
- }
405
-
406
- // src/internal/instrumentation/vercel-ai/patchers/base-patcher.ts
407
- var BasePatcher = class {
408
- constructor(context8) {
409
- this.context = context8;
410
- }
411
- createSpan(spanName, params, operationName, additionalAttributes) {
412
- const provider = detectProvider(params.model);
413
- const modelId = extractModelId(params.model);
414
- const span = this.context.tracer.startSpan(spanName, {
415
- kind: SpanKind.CLIENT,
416
- attributes: {
417
- [ATTR_GEN_AI_SYSTEM]: provider.system,
418
- [ATTR_GEN_AI_OPERATION_NAME]: operationName,
419
- [ATTR_GEN_AI_REQUEST_MODEL]: modelId,
420
- ...params.maxTokens && { [ATTR_GEN_AI_REQUEST_MAX_TOKENS]: params.maxTokens },
421
- ...params.temperature !== void 0 && {
422
- [ATTR_GEN_AI_REQUEST_TEMPERATURE]: params.temperature
423
- },
424
- ...params.topP !== void 0 && { [ATTR_GEN_AI_REQUEST_TOP_P]: params.topP },
425
- ...params.topK !== void 0 && { [ATTR_GEN_AI_REQUEST_TOP_K]: params.topK },
426
- ...params.frequencyPenalty !== void 0 && {
427
- [ATTR_GEN_AI_REQUEST_FREQUENCY_PENALTY]: params.frequencyPenalty
428
- },
429
- ...params.presencePenalty !== void 0 && {
430
- [ATTR_GEN_AI_REQUEST_PRESENCE_PENALTY]: params.presencePenalty
431
- },
432
- ...params.stopSequences && {
433
- [ATTR_GEN_AI_REQUEST_STOP_SEQUENCES]: params.stopSequences
434
- },
435
- ...provider.apiBase && { [ATTR_GEN_AI_OPENAI_API_BASE]: provider.apiBase },
436
- ...additionalAttributes
437
- }
438
- });
439
- return { span, provider, modelId };
440
- }
441
- handleError(error, span) {
442
- if (shouldRecordError(error)) {
443
- span.recordException(error);
444
- span.setStatus({ code: SpanStatusCode.ERROR, message: error.message });
445
- }
446
- }
447
- finalizeDuration(span, startTime, config, provider, modelId, operationName) {
448
- if (config.enableMetrics) {
449
- const duration = (globalThis.performance.now() - startTime) / 1e3;
450
- this.context.recordDurationMetric(duration, provider.system, modelId, operationName);
451
- }
452
- span.end();
453
- }
454
- };
455
-
456
- // src/internal/instrumentation/vercel-ai/patchers/generate-text-patcher.ts
457
- import { context, SpanStatusCode as SpanStatusCode2, trace } from "@opentelemetry/api";
458
- var GenerateTextPatcher = class extends BasePatcher {
459
- patch(original) {
460
- return async (params) => {
461
- const config = this.context.getConfig();
462
- const startTime = globalThis.performance.now();
463
- const { span, provider, modelId } = this.createSpan(
464
- SPAN_NAME_GEN_AI_CHAT,
465
- params,
466
- OPERATION_NAME_CHAT
467
- );
468
- if (params.prompt) {
469
- span.setAttributes(
470
- promptToAttributes(params.prompt, config.captureMessageContent || false)
471
- );
472
- } else if (params.messages) {
473
- span.setAttributes(
474
- messagesToAttributes(
475
- params.messages,
476
- "gen_ai.prompt",
477
- config.captureMessageContent || false
478
- )
479
- );
480
- if (config.emitEvents) {
481
- this.context.emitMessageEvents(params.messages, provider.system, span);
482
- }
483
- }
484
- try {
485
- const result = await context.with(
486
- trace.setSpan(context.active(), span),
487
- () => original(params)
488
- );
489
- if (result.response) {
490
- span.setAttributes({
491
- ...result.response.id && { [ATTR_GEN_AI_RESPONSE_ID]: result.response.id },
492
- ...result.response.model && { [ATTR_GEN_AI_RESPONSE_MODEL]: result.response.model }
493
- });
494
- }
495
- if (result.finishReason) {
496
- span.setAttribute(ATTR_GEN_AI_RESPONSE_FINISH_REASONS, [result.finishReason]);
497
- }
498
- span.setAttributes(
499
- completionToAttributes(
500
- result.text,
501
- result.finishReason,
502
- config.captureMessageContent || false
503
- )
504
- );
505
- const usage = result.usage || result.totalUsage || result.steps?.[0]?.usage;
506
- if (usage) {
507
- span.setAttributes(tokenUsageToAttributes(usage));
508
- if (config.enableMetrics) {
509
- this.context.recordTokenMetrics(usage, provider.system, modelId);
510
- }
511
- }
512
- if (config.emitEvents) {
513
- this.context.emitAssistantMessageEvent(result.text, provider.system, span);
514
- }
515
- span.setStatus({ code: SpanStatusCode2.OK });
516
- return result;
517
- } catch (error) {
518
- this.handleError(error, span);
519
- throw error;
520
- } finally {
521
- this.finalizeDuration(span, startTime, config, provider, modelId, OPERATION_NAME_CHAT);
522
- }
523
- };
524
- }
525
- };
526
-
527
- // src/internal/instrumentation/vercel-ai/patchers/stream-text-patcher.ts
528
- import { context as context2, trace as trace2 } from "@opentelemetry/api";
529
- var StreamTextPatcher = class extends BasePatcher {
530
- constructor(context8, streamHandler) {
531
- super(context8);
532
- this.streamHandler = streamHandler;
533
- }
534
- patch(original) {
535
- return async (params) => {
536
- const config = this.context.getConfig();
537
- const startTime = globalThis.performance.now();
538
- const { span, provider, modelId } = this.createSpan(
539
- SPAN_NAME_GEN_AI_CHAT,
540
- params,
541
- OPERATION_NAME_CHAT,
542
- { "gen_ai.streaming": true }
543
- );
544
- if (params.prompt) {
545
- span.setAttributes(
546
- promptToAttributes(params.prompt, config.captureMessageContent || false)
547
- );
548
- } else if (params.messages) {
549
- span.setAttributes(
550
- messagesToAttributes(
551
- params.messages,
552
- "gen_ai.prompt",
553
- config.captureMessageContent || false
554
- )
555
- );
556
- if (config.emitEvents) {
557
- this.context.emitMessageEvents(params.messages, provider.system, span);
558
- }
559
- }
560
- try {
561
- const stream = await context2.with(
562
- trace2.setSpan(context2.active(), span),
563
- () => original(params)
564
- );
565
- return this.streamHandler.wrapStream(stream, span, config, provider, modelId, startTime);
566
- } catch (error) {
567
- this.handleError(error, span);
568
- span.end();
569
- throw error;
570
- }
571
- };
572
- }
573
- };
574
-
575
- // src/internal/instrumentation/vercel-ai/patchers/embeddings-patcher.ts
576
- import { context as context3, SpanStatusCode as SpanStatusCode3, trace as trace3 } from "@opentelemetry/api";
577
- var EmbeddingsPatcher = class extends BasePatcher {
578
- patch(original, isMany = false) {
579
- return async (params) => {
580
- const config = this.context.getConfig();
581
- const startTime = globalThis.performance.now();
582
- const additionalAttributes = isMany ? { "gen_ai.embeddings.count": params.values ? params.values.length : 0 } : {};
583
- const { span, provider, modelId } = this.createSpan(
584
- SPAN_NAME_GEN_AI_EMBEDDINGS,
585
- params,
586
- OPERATION_NAME_EMBEDDINGS,
587
- additionalAttributes
588
- );
589
- if (!isMany && config.captureMessageContent && params.value) {
590
- span.setAttribute("gen_ai.prompt.0.content", params.value);
591
- }
592
- try {
593
- const result = await context3.with(
594
- trace3.setSpan(context3.active(), span),
595
- () => original(params)
596
- );
597
- if (result.response) {
598
- span.setAttributes({
599
- ...result.response.id && { [ATTR_GEN_AI_RESPONSE_ID]: result.response.id },
600
- ...result.response.model && { [ATTR_GEN_AI_RESPONSE_MODEL]: result.response.model }
601
- });
602
- }
603
- if (isMany) {
604
- if (result.embeddings && result.embeddings.length > 0 && result.embeddings[0]) {
605
- span.setAttribute("gen_ai.response.embedding_dimensions", result.embeddings[0].length);
606
- }
607
- } else {
608
- if (result.embedding) {
609
- span.setAttribute("gen_ai.response.embedding_dimensions", result.embedding.length);
610
- }
611
- }
612
- if (result.usage) {
613
- span.setAttributes(tokenUsageToAttributes(result.usage));
614
- if (config.enableMetrics) {
615
- this.context.recordTokenMetrics(result.usage, provider.system, modelId);
616
- }
617
- }
618
- span.setStatus({ code: SpanStatusCode3.OK });
619
- return result;
620
- } catch (error) {
621
- this.handleError(error, span);
622
- throw error;
623
- } finally {
624
- this.finalizeDuration(span, startTime, config, provider, modelId, OPERATION_NAME_EMBEDDINGS);
625
- }
626
- };
627
- }
628
- };
629
-
630
- // src/internal/instrumentation/vercel-ai/stream-handler.ts
631
- import { SpanStatusCode as SpanStatusCode4 } from "@opentelemetry/api";
632
- var StreamHandler = class {
633
- constructor(context8) {
634
- this.context = context8;
635
- }
636
- wrapStream(stream, span, config, provider, modelId, startTime) {
637
- const self = this;
638
- let fullText = "";
639
- let finishReason;
640
- let usage;
641
- let response;
642
- const wrappedStream = new Proxy(stream, {
643
- get(target, prop) {
644
- if (prop === Symbol.asyncIterator) {
645
- return async function* () {
646
- try {
647
- for await (const chunk of target) {
648
- if (chunk.type === "text-delta" && chunk.textDelta) {
649
- fullText += chunk.textDelta;
650
- } else if (chunk.type === "finish") {
651
- finishReason = chunk.finishReason;
652
- usage = chunk.usage;
653
- } else if (chunk.type === "response-metadata") {
654
- response = chunk.response;
655
- }
656
- yield chunk;
657
- }
658
- } finally {
659
- self.finalizeStream(
660
- span,
661
- config,
662
- provider,
663
- modelId,
664
- startTime,
665
- fullText,
666
- finishReason,
667
- usage,
668
- response
669
- );
670
- }
671
- };
672
- }
673
- if (prop === "textStream" || prop === "fullStream") {
674
- const originalStream = target[prop];
675
- return {
676
- [Symbol.asyncIterator]: async function* () {
677
- try {
678
- for await (const chunk of originalStream) {
679
- if (prop === "textStream") {
680
- fullText += chunk;
681
- }
682
- yield chunk;
683
- }
684
- } finally {
685
- const streamUsage = await target.usage.catch(() => null);
686
- if (streamUsage) {
687
- usage = streamUsage;
688
- }
689
- self.finalizeStream(
690
- span,
691
- config,
692
- provider,
693
- modelId,
694
- startTime,
695
- fullText,
696
- finishReason,
697
- usage,
698
- response
699
- );
700
- }
701
- }
702
- };
703
- }
704
- const value = target[prop];
705
- if (typeof value === "function") {
706
- return value.bind(target);
707
- }
708
- return value;
709
- }
710
- });
711
- return wrappedStream;
712
- }
713
- finalizeStream(span, config, provider, modelId, startTime, fullText, finishReason, usage, response) {
714
- if (response) {
715
- span.setAttributes({
716
- ...response.id && { [ATTR_GEN_AI_RESPONSE_ID]: response.id },
717
- ...response.model && { [ATTR_GEN_AI_RESPONSE_MODEL]: response.model }
718
- });
719
- }
720
- if (finishReason) {
721
- span.setAttribute(ATTR_GEN_AI_RESPONSE_FINISH_REASONS, [finishReason]);
722
- }
723
- if (fullText) {
724
- span.setAttributes(
725
- completionToAttributes(
726
- fullText,
727
- finishReason,
728
- config.captureMessageContent || false
729
- )
730
- );
731
- }
732
- if (usage) {
733
- span.setAttributes(tokenUsageToAttributes(usage));
734
- if (config.enableMetrics) {
735
- this.context.recordTokenMetrics(usage, provider.system, modelId);
736
- }
737
- }
738
- if (config.enableMetrics) {
739
- const duration = (performance.now() - startTime) / 1e3;
740
- this.context.recordDurationMetric(duration, provider.system, modelId, OPERATION_NAME_CHAT);
741
- }
742
- span.setStatus({ code: SpanStatusCode4.OK });
743
- span.end();
744
- }
745
- };
746
-
747
- // src/internal/instrumentation/vercel-ai/telemetry-recorder.ts
748
- import { context as context4, trace as trace4 } from "@opentelemetry/api";
749
- import { SeverityNumber } from "@opentelemetry/api-logs";
750
- var TelemetryRecorder = class {
751
- constructor(genaiClientOperationDuration, genaiClientTokenUsage, logger2) {
752
- this.genaiClientOperationDuration = genaiClientOperationDuration;
753
- this.genaiClientTokenUsage = genaiClientTokenUsage;
754
- this.logger = logger2;
755
- }
756
- /**
757
- * Record token usage metrics
758
- */
759
- recordTokenMetrics(usage, system, model) {
760
- if (!this.genaiClientTokenUsage) {
761
- return;
762
- }
763
- const commonAttrs = {
764
- [ATTR_GEN_AI_SYSTEM]: system,
765
- [ATTR_GEN_AI_REQUEST_MODEL]: model
766
- };
767
- const inputTokens = usage.inputTokens || usage.promptTokens;
768
- const outputTokens = usage.outputTokens || usage.completionTokens;
769
- if (inputTokens !== void 0) {
770
- this.genaiClientTokenUsage.record(inputTokens, {
771
- ...commonAttrs,
772
- [ATTR_GEN_AI_TOKEN_TYPE]: TOKEN_TYPE_INPUT
773
- });
774
- }
775
- if (outputTokens !== void 0) {
776
- this.genaiClientTokenUsage.record(outputTokens, {
777
- ...commonAttrs,
778
- [ATTR_GEN_AI_TOKEN_TYPE]: TOKEN_TYPE_OUTPUT
779
- });
780
- }
781
- }
782
- /**
783
- * Record operation duration metric
784
- */
785
- recordDurationMetric(duration, system, model, operation) {
786
- if (!this.genaiClientOperationDuration) {
787
- return;
788
- }
789
- this.genaiClientOperationDuration.record(duration, {
790
- [ATTR_GEN_AI_SYSTEM]: system,
791
- [ATTR_GEN_AI_REQUEST_MODEL]: model,
792
- [ATTR_GEN_AI_OPERATION_NAME]: operation
793
- });
794
- }
795
- /**
796
- * Emit message events
797
- */
798
- emitMessageEvents(messages, system, span) {
799
- if (!this.logger) {
800
- return;
801
- }
802
- const ctx = trace4.setSpan(context4.active(), span);
803
- for (const msg of messages) {
804
- let eventName;
805
- switch (msg.role) {
806
- case "system": {
807
- eventName = EVENT_GEN_AI_SYSTEM_MESSAGE;
808
- break;
809
- }
810
- case "user": {
811
- eventName = EVENT_GEN_AI_USER_MESSAGE;
812
- break;
813
- }
814
- case "assistant": {
815
- eventName = EVENT_GEN_AI_ASSISTANT_MESSAGE;
816
- break;
817
- }
818
- case "tool":
819
- case "function": {
820
- eventName = EVENT_GEN_AI_TOOL_MESSAGE;
821
- break;
822
- }
823
- default: {
824
- continue;
825
- }
826
- }
827
- this.logger.emit({
828
- timestamp: Date.now(),
829
- context: ctx,
830
- severityNumber: SeverityNumber.INFO,
831
- attributes: {
832
- [ATTR_EVENT_NAME]: eventName,
833
- [ATTR_GEN_AI_SYSTEM]: system
834
- },
835
- body: {
836
- role: msg.role,
837
- content: typeof msg.content === "string" ? msg.content : JSON.stringify(msg.content),
838
- name: msg.name
839
- }
840
- });
841
- }
842
- }
843
- /**
844
- * Emit assistant message event
845
- */
846
- emitAssistantMessageEvent(text, system, span) {
847
- if (!this.logger) {
848
- return;
849
- }
850
- const ctx = trace4.setSpan(context4.active(), span);
851
- this.logger.emit({
852
- timestamp: Date.now(),
853
- context: ctx,
854
- severityNumber: SeverityNumber.INFO,
855
- attributes: {
856
- [ATTR_EVENT_NAME]: EVENT_GEN_AI_ASSISTANT_MESSAGE,
857
- [ATTR_GEN_AI_SYSTEM]: system
858
- },
859
- body: {
860
- role: "assistant",
861
- content: text
862
- }
863
- });
864
- }
865
- };
866
-
867
- // src/internal/instrumentation/vercel-ai/instrumentation.ts
868
- var PACKAGE_NAME = "@brizz/vercel-ai-instrumentation";
869
- var PACKAGE_VERSION = "0.1.0";
870
- var VercelAIInstrumentation = class _VercelAIInstrumentation extends InstrumentationBase {
871
- _genaiClientOperationDuration;
872
- _genaiClientTokenUsage;
873
- _telemetryRecorder;
874
- _streamHandler;
875
- _patchers = /* @__PURE__ */ new Map();
876
- // Holds last patched namespace when available (reserved for future factory wrapping)
877
- _vercelAiNamespace = null;
878
- static _WRAPPED_SYMBOL = Symbol.for("brizz.vercel-ai.patched");
879
- constructor(config = {}) {
880
- super(PACKAGE_NAME, PACKAGE_VERSION, config);
881
- const cfg = this.getConfig();
882
- const envCC = getEnvBool("OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT");
883
- if (envCC !== void 0) {
884
- cfg.captureMessageContent = envCC;
885
- }
886
- this._initializeComponents();
887
- }
888
- setConfig(config = {}) {
889
- const {
890
- captureMessageContent = true,
891
- enableMetrics = true,
892
- emitEvents = true,
893
- ...validConfig
894
- } = config;
895
- const fullConfig = {
896
- ...validConfig,
897
- captureMessageContent,
898
- enableMetrics,
899
- emitEvents
900
- };
901
- super.setConfig(fullConfig);
902
- }
903
- _initializeComponents() {
904
- this._telemetryRecorder = new TelemetryRecorder(
905
- this._genaiClientOperationDuration,
906
- this._genaiClientTokenUsage,
907
- this.logger
908
- );
909
- this._streamHandler = new StreamHandler({
910
- recordTokenMetrics: this._telemetryRecorder.recordTokenMetrics.bind(this._telemetryRecorder),
911
- recordDurationMetric: this._telemetryRecorder.recordDurationMetric.bind(
912
- this._telemetryRecorder
913
- )
914
- });
915
- const patcherContext = {
916
- tracer: this.tracer,
917
- getConfig: this.getConfig.bind(this),
918
- recordTokenMetrics: this._telemetryRecorder.recordTokenMetrics.bind(this._telemetryRecorder),
919
- recordDurationMetric: this._telemetryRecorder.recordDurationMetric.bind(
920
- this._telemetryRecorder
921
- ),
922
- emitMessageEvents: this._telemetryRecorder.emitMessageEvents.bind(this._telemetryRecorder),
923
- emitAssistantMessageEvent: this._telemetryRecorder.emitAssistantMessageEvent.bind(
924
- this._telemetryRecorder
925
- )
926
- };
927
- this._patchers.set("generateText", new GenerateTextPatcher(patcherContext));
928
- this._patchers.set("streamText", new StreamTextPatcher(patcherContext, this._streamHandler));
929
- this._patchers.set("embed", new EmbeddingsPatcher(patcherContext));
930
- this._patchers.set("embedMany", new EmbeddingsPatcher(patcherContext));
931
- }
932
- init() {
933
- return [
934
- new InstrumentationNodeModuleDefinition(
935
- "ai",
936
- [">=4.0.0 <6"],
937
- (moduleExports) => {
938
- logger.info("Starting instrumentation of Vercel AI SDK module");
939
- this._vercelAiNamespace = moduleExports;
940
- const patched = this._patchModuleExports(moduleExports);
941
- return patched ?? moduleExports;
942
- },
943
- (moduleExports) => {
944
- logger.debug("Uninstrumenting @vercel/ai module");
945
- return moduleExports;
946
- }
947
- )
948
- ];
949
- }
950
- _updateMetricInstruments() {
951
- const config = this.getConfig();
952
- if (!config.enableMetrics) {
953
- return;
954
- }
955
- this._genaiClientOperationDuration = this.meter.createHistogram(
956
- METRIC_GEN_AI_CLIENT_OPERATION_DURATION,
957
- {
958
- description: "GenAI operation duration",
959
- unit: "s",
960
- advice: {
961
- explicitBucketBoundaries: [
962
- 0.01,
963
- 0.02,
964
- 0.04,
965
- 0.08,
966
- 0.16,
967
- 0.32,
968
- 0.64,
969
- 1.28,
970
- 2.56,
971
- 5.12,
972
- 10.24,
973
- 20.48,
974
- 40.96,
975
- 81.92
976
- ]
977
- }
978
- }
979
- );
980
- this._genaiClientTokenUsage = this.meter.createHistogram(METRIC_GEN_AI_CLIENT_TOKEN_USAGE, {
981
- description: "Measures number of input and output tokens used",
982
- unit: "{token}",
983
- advice: {
984
- explicitBucketBoundaries: [
985
- 1,
986
- 4,
987
- 16,
988
- 64,
989
- 256,
990
- 1024,
991
- 4096,
992
- 16384,
993
- 65536,
994
- 262144,
995
- 1048576,
996
- 4194304,
997
- 16777216,
998
- 67108864
999
- ]
1000
- }
1001
- });
1002
- this._telemetryRecorder = new TelemetryRecorder(
1003
- this._genaiClientOperationDuration,
1004
- this._genaiClientTokenUsage,
1005
- this.logger
1006
- );
1007
- }
1008
- /**
1009
- * Patch known AI SDK functions in-place on the provided module exports object.
1010
- * This approach is compatible with both CJS and ESM module loaders.
1011
- */
1012
- _patchModuleExports(moduleExports) {
1013
- if (!moduleExports || typeof moduleExports !== "object") {
1014
- return null;
1015
- }
1016
- let inPlacePatched = true;
1017
- const wrapFunction = (name, isEmbedMany = false) => {
1018
- const current = moduleExports[name];
1019
- if (typeof current !== "function") {
1020
- return;
1021
- }
1022
- const currentFn = current;
1023
- if (currentFn[_VercelAIInstrumentation._WRAPPED_SYMBOL]) {
1024
- return;
1025
- }
1026
- const descriptor = Object.getOwnPropertyDescriptor(moduleExports, name);
1027
- if (descriptor && (!descriptor.writable || !descriptor.configurable) && !descriptor.set) {
1028
- inPlacePatched = false;
1029
- return;
1030
- }
1031
- const patcher = this._patchers.get(name);
1032
- if (!patcher) {
1033
- return;
1034
- }
1035
- const patched = isEmbedMany ? patcher.patch(currentFn, true) : patcher.patch(currentFn);
1036
- try {
1037
- Object.defineProperty(patched, _VercelAIInstrumentation._WRAPPED_SYMBOL, {
1038
- value: true,
1039
- enumerable: false,
1040
- configurable: false
1041
- });
1042
- } catch {
1043
- }
1044
- try {
1045
- moduleExports[name] = patched;
1046
- } catch {
1047
- inPlacePatched = false;
1048
- }
1049
- };
1050
- wrapFunction("generateText");
1051
- wrapFunction("streamText");
1052
- wrapFunction("embed");
1053
- wrapFunction("embedMany", true);
1054
- if (!inPlacePatched) {
1055
- const proxiedModule = new Proxy(moduleExports, {
1056
- get: (target, prop, receiver) => {
1057
- const originalValue = Reflect.get(target, prop, receiver);
1058
- if (typeof originalValue === "function" && typeof prop === "string" && this._patchers.has(prop)) {
1059
- const patcher = this._patchers.get(prop);
1060
- const isEmbedMany = prop === "embedMany";
1061
- const wrapped = isEmbedMany ? patcher.patch(originalValue, true) : patcher.patch(originalValue);
1062
- return wrapped;
1063
- }
1064
- return originalValue;
1065
- }
1066
- });
1067
- return proxiedModule;
1068
- }
1069
- return moduleExports;
1070
- }
1071
- /**
1072
- * Manual instrumentation hook for bundlers/Next.js. Applies in-place wrapping
1073
- * on the provided module namespace.
1074
- */
1075
- manuallyInstrument(module2) {
1076
- try {
1077
- const result = this._patchModuleExports(module2);
1078
- if (result !== null) {
1079
- logger.debug("Applied manual Vercel AI instrumentation");
1080
- this._vercelAiNamespace = result;
1081
- return result;
1082
- }
1083
- logger.warn("Manual Vercel AI instrumentation received invalid module");
1084
- return module2;
1085
- } catch (error) {
1086
- logger.error(`Failed manual Vercel AI instrumentation: ${String(error)}`);
1087
- return this._vercelAiNamespace || module2;
1088
- }
1089
- }
1090
- /**
1091
- * Wrap a created provider/client instance (factory return) when possible.
1092
- * Call this from wrappers that construct provider clients (e.g., OpenAI SDK).
1093
- */
1094
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
1095
- wrapFactoryReturn(instance) {
1096
- return instance;
1097
- }
1098
- };
1099
-
1100
167
  // src/internal/instrumentation/auto-init.ts
1101
168
  var autoInstrumentationsLoaded = false;
1102
169
  var exceptionLogger = (error) => {
@@ -1115,8 +182,6 @@ function loadNodeAutoInstrumentations() {
1115
182
  function loadGenAIInstrumentations() {
1116
183
  const instrumentations = [];
1117
184
  const genAIInstrumentationClasses = [
1118
- { class: VercelAIInstrumentation, name: "Vercel AI" },
1119
- // Load first to avoid conflicts
1120
185
  { class: OpenAIInstrumentation, name: "OpenAI" },
1121
186
  { class: AnthropicInstrumentation, name: "Anthropic" },
1122
187
  { class: CohereInstrumentation, name: "Cohere" },
@@ -1335,8 +400,7 @@ var InstrumentationRegistry = class _InstrumentationRegistry {
1335
400
  },
1336
401
  { class: ChromaDBInstrumentation2, name: "ChromaDB", module: this.manualModules?.chromadb },
1337
402
  { class: QdrantInstrumentation2, name: "Qdrant", module: this.manualModules?.qdrant },
1338
- { class: TogetherInstrumentation2, name: "Together", module: this.manualModules?.together },
1339
- { class: VercelAIInstrumentation, name: "Vercel AI", module: this.manualModules?.vercelAI }
403
+ { class: TogetherInstrumentation2, name: "Together", module: this.manualModules?.together }
1340
404
  ];
1341
405
  for (const config of instrumentationConfigs) {
1342
406
  if (config.module) {
@@ -1355,7 +419,7 @@ var InstrumentationRegistry = class _InstrumentationRegistry {
1355
419
  };
1356
420
 
1357
421
  // src/internal/log/logging.ts
1358
- import { SeverityNumber as SeverityNumber2 } from "@opentelemetry/api-logs";
422
+ import { SeverityNumber } from "@opentelemetry/api-logs";
1359
423
  import { OTLPLogExporter } from "@opentelemetry/exporter-logs-otlp-http";
1360
424
  import { resourceFromAttributes } from "@opentelemetry/resources";
1361
425
  import {
@@ -1363,7 +427,7 @@ import {
1363
427
  } from "@opentelemetry/sdk-logs";
1364
428
 
1365
429
  // src/internal/log/processors/log-processor.ts
1366
- import { context as context5 } from "@opentelemetry/api";
430
+ import { context } from "@opentelemetry/api";
1367
431
  import { BatchLogRecordProcessor, SimpleLogRecordProcessor } from "@opentelemetry/sdk-logs";
1368
432
 
1369
433
  // src/internal/masking/patterns.ts
@@ -2014,7 +1078,7 @@ var BrizzSimpleLogRecordProcessor = class extends SimpleLogRecordProcessor {
2014
1078
  if (maskingConfig) {
2015
1079
  maskLog(logRecord, maskingConfig);
2016
1080
  }
2017
- const associationProperties = context5.active().getValue(PROPERTIES_CONTEXT_KEY);
1081
+ const associationProperties = context.active().getValue(PROPERTIES_CONTEXT_KEY);
2018
1082
  if (associationProperties) {
2019
1083
  for (const [key, value] of Object.entries(associationProperties)) {
2020
1084
  logRecord.setAttribute(`${BRIZZ}.${key}`, value);
@@ -2034,7 +1098,7 @@ var BrizzBatchLogRecordProcessor = class extends BatchLogRecordProcessor {
2034
1098
  if (maskingConfig) {
2035
1099
  maskLog(logRecord, maskingConfig);
2036
1100
  }
2037
- const associationProperties = context5.active().getValue(PROPERTIES_CONTEXT_KEY);
1101
+ const associationProperties = context.active().getValue(PROPERTIES_CONTEXT_KEY);
2038
1102
  if (associationProperties) {
2039
1103
  for (const [key, value] of Object.entries(associationProperties)) {
2040
1104
  logRecord.setAttribute(`${BRIZZ}.${key}`, value);
@@ -2171,7 +1235,7 @@ var LoggingModule = class _LoggingModule {
2171
1235
  /**
2172
1236
  * Emit a custom event to the telemetry pipeline
2173
1237
  */
2174
- emitEvent(name, attributes, body, severityNumber = SeverityNumber2.INFO) {
1238
+ emitEvent(name, attributes, body, severityNumber = SeverityNumber.INFO) {
2175
1239
  logger.debug("Attempting to emit event", {
2176
1240
  name,
2177
1241
  hasAttributes: !!attributes,
@@ -2239,7 +1303,7 @@ var LoggingModule = class _LoggingModule {
2239
1303
  logger.debug("Logging module shutdown completed");
2240
1304
  }
2241
1305
  };
2242
- function emitEvent(name, attributes, body, severityNumber = SeverityNumber2.INFO) {
1306
+ function emitEvent(name, attributes, body, severityNumber = SeverityNumber.INFO) {
2243
1307
  return LoggingModule.getInstance().emitEvent(name, attributes, body, severityNumber);
2244
1308
  }
2245
1309
 
@@ -2349,11 +1413,134 @@ function getMetricsReader() {
2349
1413
  import { OTLPTraceExporter } from "@opentelemetry/exporter-trace-otlp-http";
2350
1414
 
2351
1415
  // src/internal/trace/processors/span-processor.ts
2352
- import { context as context6 } from "@opentelemetry/api";
1416
+ import { context as context2 } from "@opentelemetry/api";
2353
1417
  import {
2354
1418
  BatchSpanProcessor,
2355
1419
  SimpleSpanProcessor
2356
1420
  } from "@opentelemetry/sdk-trace-base";
1421
+
1422
+ // src/internal/trace/transformations/vercel-ai.ts
1423
+ import { SpanAttributes } from "@traceloop/ai-semantic-conventions";
1424
+ var AI_GENERATE_TEXT_DO_GENERATE = "ai.generateText.doGenerate";
1425
+ var AI_STREAM_TEXT_DO_STREAM = "ai.streamText.doStream";
1426
+ var HANDLED_SPAN_NAMES = {
1427
+ [AI_GENERATE_TEXT_DO_GENERATE]: "gen_ai.chat",
1428
+ [AI_STREAM_TEXT_DO_STREAM]: "gen_ai.chat",
1429
+ "ai.streamText": "ai.streamText",
1430
+ "ai.toolCall": (span) => {
1431
+ const toolName = span.attributes["ai.toolCall.name"];
1432
+ return `${toolName}.tool`;
1433
+ }
1434
+ };
1435
+ var AI_RESPONSE_TEXT = "ai.response.text";
1436
+ var AI_PROMPT_MESSAGES = "ai.prompt.messages";
1437
+ var AI_USAGE_PROMPT_TOKENS = "ai.usage.promptTokens";
1438
+ var AI_USAGE_COMPLETION_TOKENS = "ai.usage.completionTokens";
1439
+ var AI_MODEL_PROVIDER = "ai.model.provider";
1440
+ var transformAiSdkSpanName = (span) => {
1441
+ if (span.name in HANDLED_SPAN_NAMES) {
1442
+ if (typeof HANDLED_SPAN_NAMES[span.name] === "function") {
1443
+ span.name = HANDLED_SPAN_NAMES[span.name](span);
1444
+ } else {
1445
+ span.name = HANDLED_SPAN_NAMES[span.name];
1446
+ }
1447
+ }
1448
+ };
1449
+ var transformResponseText = (attributes) => {
1450
+ if (AI_RESPONSE_TEXT in attributes) {
1451
+ attributes[`${SpanAttributes.LLM_COMPLETIONS}.0.content`] = attributes[AI_RESPONSE_TEXT];
1452
+ attributes[`${SpanAttributes.LLM_COMPLETIONS}.0.role`] = "assistant";
1453
+ delete attributes[AI_RESPONSE_TEXT];
1454
+ }
1455
+ };
1456
+ var transformPromptMessages = (attributes) => {
1457
+ if (AI_PROMPT_MESSAGES in attributes) {
1458
+ try {
1459
+ const messages = JSON.parse(attributes[AI_PROMPT_MESSAGES]);
1460
+ messages.forEach((msg, index) => {
1461
+ logger.debug("Transforming prompt message", { msg, type: typeof msg.content });
1462
+ if (typeof msg.content === "string") {
1463
+ attributes[`${SpanAttributes.LLM_PROMPTS}.${index}.content`] = msg.content;
1464
+ } else {
1465
+ if (Array.isArray(msg.content) && msg.content.length > 0) {
1466
+ const lastContent = msg.content[msg.content.length - 1];
1467
+ if (lastContent.text) {
1468
+ attributes[`${SpanAttributes.LLM_PROMPTS}.${index}.content`] = lastContent.text;
1469
+ }
1470
+ } else {
1471
+ attributes[`${SpanAttributes.LLM_PROMPTS}.${index}.content`] = JSON.stringify(
1472
+ msg.content
1473
+ );
1474
+ }
1475
+ }
1476
+ attributes[`${SpanAttributes.LLM_PROMPTS}.${index}.role`] = msg.role;
1477
+ });
1478
+ delete attributes[AI_PROMPT_MESSAGES];
1479
+ } catch {
1480
+ }
1481
+ }
1482
+ };
1483
+ var transformPromptTokens = (attributes) => {
1484
+ if (AI_USAGE_PROMPT_TOKENS in attributes) {
1485
+ attributes[`${SpanAttributes.LLM_USAGE_PROMPT_TOKENS}`] = attributes[AI_USAGE_PROMPT_TOKENS];
1486
+ delete attributes[AI_USAGE_PROMPT_TOKENS];
1487
+ }
1488
+ };
1489
+ var transformCompletionTokens = (attributes) => {
1490
+ if (AI_USAGE_COMPLETION_TOKENS in attributes) {
1491
+ attributes[`${SpanAttributes.LLM_USAGE_COMPLETION_TOKENS}`] = attributes[AI_USAGE_COMPLETION_TOKENS];
1492
+ delete attributes[AI_USAGE_COMPLETION_TOKENS];
1493
+ }
1494
+ };
1495
+ var calculateTotalTokens = (attributes) => {
1496
+ const promptTokens = attributes[`${SpanAttributes.LLM_USAGE_PROMPT_TOKENS}`];
1497
+ const completionTokens = attributes[`${SpanAttributes.LLM_USAGE_COMPLETION_TOKENS}`];
1498
+ if (promptTokens && completionTokens) {
1499
+ attributes[`${SpanAttributes.LLM_USAGE_TOTAL_TOKENS}`] = Number(promptTokens) + Number(completionTokens);
1500
+ }
1501
+ };
1502
+ var transformVendor = (attributes) => {
1503
+ if (AI_MODEL_PROVIDER in attributes) {
1504
+ const vendor = attributes[AI_MODEL_PROVIDER];
1505
+ if (vendor && vendor.startsWith("openai")) {
1506
+ attributes[SpanAttributes.LLM_SYSTEM] = "OpenAI";
1507
+ } else {
1508
+ attributes[SpanAttributes.LLM_SYSTEM] = vendor;
1509
+ }
1510
+ delete attributes[AI_MODEL_PROVIDER];
1511
+ }
1512
+ };
1513
+ var transformAiSdkAttributes = (attributes) => {
1514
+ transformResponseText(attributes);
1515
+ transformPromptMessages(attributes);
1516
+ transformPromptTokens(attributes);
1517
+ transformCompletionTokens(attributes);
1518
+ calculateTotalTokens(attributes);
1519
+ transformVendor(attributes);
1520
+ };
1521
+ var shouldHandleSpan = (span) => {
1522
+ return span.name in HANDLED_SPAN_NAMES;
1523
+ };
1524
+ var transformAiSdkSpan = (span) => {
1525
+ for (const key in span.attributes) {
1526
+ if (Number.isNaN(span.attributes[key])) {
1527
+ span.attributes[key] = 0;
1528
+ }
1529
+ }
1530
+ logger.debug("Transforming AI SDK span", {
1531
+ spanName: span.name,
1532
+ spanContext: span.spanContext(),
1533
+ attributes: span.attributes
1534
+ });
1535
+ if (!shouldHandleSpan(span)) {
1536
+ logger.debug("Skipping span transformation", { spanName: span.name });
1537
+ return;
1538
+ }
1539
+ transformAiSdkSpanName(span);
1540
+ transformAiSdkAttributes(span.attributes);
1541
+ };
1542
+
1543
+ // src/internal/trace/processors/span-processor.ts
2357
1544
  var DEFAULT_MASKING_RULES = [
2358
1545
  {
2359
1546
  mode: "partial",
@@ -2364,16 +1551,6 @@ var DEFAULT_MASKING_RULES = [
2364
1551
  mode: "partial",
2365
1552
  attributePattern: "gen_ai.completion",
2366
1553
  patterns: DEFAULT_PII_PATTERNS
2367
- },
2368
- {
2369
- mode: "partial",
2370
- attributePattern: "traceloop.entity.input",
2371
- patterns: DEFAULT_PII_PATTERNS
2372
- },
2373
- {
2374
- mode: "partial",
2375
- attributePattern: "traceloop.entity.output",
2376
- patterns: DEFAULT_PII_PATTERNS
2377
1554
  }
2378
1555
  ];
2379
1556
  var BrizzSimpleSpanProcessor = class extends SimpleSpanProcessor {
@@ -2398,7 +1575,7 @@ var BrizzSimpleSpanProcessor = class extends SimpleSpanProcessor {
2398
1575
  if (maskingConfig) {
2399
1576
  maskSpan(span, maskingConfig);
2400
1577
  }
2401
- const associationProperties = context6.active().getValue(PROPERTIES_CONTEXT_KEY);
1578
+ const associationProperties = context2.active().getValue(PROPERTIES_CONTEXT_KEY);
2402
1579
  if (associationProperties) {
2403
1580
  for (const [key, value] of Object.entries(associationProperties)) {
2404
1581
  span.setAttribute(`${BRIZZ}.${key}`, value);
@@ -2406,6 +1583,10 @@ var BrizzSimpleSpanProcessor = class extends SimpleSpanProcessor {
2406
1583
  }
2407
1584
  super.onStart(span, parentContext);
2408
1585
  }
1586
+ onEnd(span) {
1587
+ transformAiSdkSpan(span);
1588
+ super.onEnd(span);
1589
+ }
2409
1590
  };
2410
1591
  var BrizzBatchSpanProcessor = class extends BatchSpanProcessor {
2411
1592
  config;
@@ -2418,7 +1599,7 @@ var BrizzBatchSpanProcessor = class extends BatchSpanProcessor {
2418
1599
  if (maskingConfig) {
2419
1600
  maskSpan(span, maskingConfig);
2420
1601
  }
2421
- const associationProperties = context6.active().getValue(PROPERTIES_CONTEXT_KEY);
1602
+ const associationProperties = context2.active().getValue(PROPERTIES_CONTEXT_KEY);
2422
1603
  if (associationProperties) {
2423
1604
  for (const [key, value] of Object.entries(associationProperties)) {
2424
1605
  span.setAttribute(`${BRIZZ}.${key}`, value);
@@ -2426,6 +1607,10 @@ var BrizzBatchSpanProcessor = class extends BatchSpanProcessor {
2426
1607
  }
2427
1608
  super.onStart(span, parentContext);
2428
1609
  }
1610
+ onEnd(span) {
1611
+ transformAiSdkSpan(span);
1612
+ super.onEnd(span);
1613
+ }
2429
1614
  };
2430
1615
  function maskSpan(span, config) {
2431
1616
  if (!span.attributes || Object.keys(span.attributes).length === 0) {
@@ -2513,8 +1698,9 @@ var TracingModule = class _TracingModule {
2513
1698
  disableBatch: config.disableBatch,
2514
1699
  hasMasking: !!config.masking?.spanMasking
2515
1700
  });
2516
- this.spanProcessor = config.disableBatch ? new BrizzSimpleSpanProcessor(this.spanExporter, config) : new BrizzBatchSpanProcessor(this.spanExporter, config);
1701
+ const spanProcessor = config.disableBatch ? new BrizzSimpleSpanProcessor(this.spanExporter, config) : new BrizzBatchSpanProcessor(this.spanExporter, config);
2517
1702
  logger.debug("Span processor initialized successfully");
1703
+ this.spanProcessor = spanProcessor;
2518
1704
  }
2519
1705
  /**
2520
1706
  * Get the span exporter
@@ -2556,13 +1742,13 @@ function getSpanProcessor() {
2556
1742
  }
2557
1743
 
2558
1744
  // src/internal/trace/session.ts
2559
- import { context as context7 } from "@opentelemetry/api";
1745
+ import { context as context3 } from "@opentelemetry/api";
2560
1746
  function withProperties(properties, fn, thisArg, ...args) {
2561
1747
  if (Object.keys(properties).length === 0) {
2562
1748
  return fn.apply(thisArg, args);
2563
1749
  }
2564
- const newContext = context7.active().setValue(PROPERTIES_CONTEXT_KEY, properties);
2565
- return context7.with(newContext, fn, thisArg, ...args);
1750
+ const newContext = context3.active().setValue(PROPERTIES_CONTEXT_KEY, properties);
1751
+ return context3.with(newContext, fn, thisArg, ...args);
2566
1752
  }
2567
1753
  function WithSessionId(sessionId, fn, thisArg, ...args) {
2568
1754
  return withProperties({ [SESSION_ID]: sessionId }, fn, thisArg, ...args);
@@ -2756,7 +1942,7 @@ var _Brizz = class __Brizz {
2756
1942
  var Brizz = new _Brizz();
2757
1943
 
2758
1944
  // src/index.ts
2759
- import { SeverityNumber as SeverityNumber3 } from "@opentelemetry/api-logs";
1945
+ import { SeverityNumber as SeverityNumber2 } from "@opentelemetry/api-logs";
2760
1946
 
2761
1947
  // src/node/runtime.ts
2762
1948
  function detectRuntime() {
@@ -2806,8 +1992,7 @@ export {
2806
1992
  Brizz,
2807
1993
  DEFAULT_PII_PATTERNS,
2808
1994
  LogLevel,
2809
- SeverityNumber3 as SeverityNumber,
2810
- VercelAIInstrumentation,
1995
+ SeverityNumber2 as SeverityNumber,
2811
1996
  WithSessionId,
2812
1997
  detectRuntime,
2813
1998
  emitEvent,