@mastra/otel-exporter 0.0.0-unified-sidebar-20251010130811 → 0.0.0-vnext-20251104230439

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.cjs CHANGED
@@ -1,7 +1,7 @@
1
1
  'use strict';
2
2
 
3
- var aiTracing = require('@mastra/core/ai-tracing');
4
- var logger = require('@mastra/core/logger');
3
+ var observability$1 = require('@mastra/core/observability');
4
+ var observability = require('@mastra/observability');
5
5
  var api = require('@opentelemetry/api');
6
6
  var resources = require('@opentelemetry/resources');
7
7
  var sdkTraceBase = require('@opentelemetry/sdk-trace-base');
@@ -209,6 +209,7 @@ var MastraReadableSpan = class {
209
209
  name;
210
210
  kind;
211
211
  spanContext;
212
+ parentSpanContext;
212
213
  parentSpanId;
213
214
  startTime;
214
215
  endTime;
@@ -276,6 +277,14 @@ var MastraReadableSpan = class {
276
277
  traceFlags: api.TraceFlags.SAMPLED,
277
278
  isRemote: false
278
279
  });
280
+ if (parentSpanId) {
281
+ this.parentSpanContext = {
282
+ traceId: aiSpan.traceId,
283
+ spanId: parentSpanId,
284
+ traceFlags: api.TraceFlags.SAMPLED,
285
+ isRemote: false
286
+ };
287
+ }
279
288
  this.resource = resource || {};
280
289
  this.instrumentationLibrary = instrumentationLibrary || {
281
290
  name: "@mastra/otel",
@@ -296,24 +305,14 @@ var MastraReadableSpan = class {
296
305
 
297
306
  // src/span-converter.ts
298
307
  var SPAN_KIND_MAPPING = {
299
- // LLM operations are CLIENT spans (calling external AI services)
300
- [aiTracing.AISpanType.LLM_GENERATION]: api.SpanKind.CLIENT,
301
- [aiTracing.AISpanType.LLM_CHUNK]: api.SpanKind.CLIENT,
302
- // Tool calls can be CLIENT (external) or INTERNAL based on context
303
- [aiTracing.AISpanType.TOOL_CALL]: api.SpanKind.INTERNAL,
304
- [aiTracing.AISpanType.MCP_TOOL_CALL]: api.SpanKind.CLIENT,
308
+ // Model operations are CLIENT spans (calling external AI services)
309
+ [observability$1.AISpanType.MODEL_GENERATION]: api.SpanKind.CLIENT,
310
+ [observability$1.AISpanType.MODEL_CHUNK]: api.SpanKind.CLIENT,
311
+ // MCP tool calls are CLIENT (external service calls)
312
+ [observability$1.AISpanType.MCP_TOOL_CALL]: api.SpanKind.CLIENT,
305
313
  // Root spans for agent/workflow are SERVER (entry points)
306
- [aiTracing.AISpanType.AGENT_RUN]: api.SpanKind.SERVER,
307
- [aiTracing.AISpanType.WORKFLOW_RUN]: api.SpanKind.SERVER,
308
- // Internal workflow operations
309
- [aiTracing.AISpanType.WORKFLOW_STEP]: api.SpanKind.INTERNAL,
310
- [aiTracing.AISpanType.WORKFLOW_LOOP]: api.SpanKind.INTERNAL,
311
- [aiTracing.AISpanType.WORKFLOW_PARALLEL]: api.SpanKind.INTERNAL,
312
- [aiTracing.AISpanType.WORKFLOW_CONDITIONAL]: api.SpanKind.INTERNAL,
313
- [aiTracing.AISpanType.WORKFLOW_CONDITIONAL_EVAL]: api.SpanKind.INTERNAL,
314
- [aiTracing.AISpanType.WORKFLOW_SLEEP]: api.SpanKind.INTERNAL,
315
- [aiTracing.AISpanType.WORKFLOW_WAIT_EVENT]: api.SpanKind.INTERNAL,
316
- [aiTracing.AISpanType.GENERIC]: api.SpanKind.INTERNAL
314
+ [observability$1.AISpanType.AGENT_RUN]: api.SpanKind.SERVER,
315
+ [observability$1.AISpanType.WORKFLOW_RUN]: api.SpanKind.SERVER
317
316
  };
318
317
  var SpanConverter = class {
319
318
  resource;
@@ -349,7 +348,7 @@ var SpanConverter = class {
349
348
  */
350
349
  getSpanKind(aiSpan) {
351
350
  if (aiSpan.isRootSpan) {
352
- if (aiSpan.type === aiTracing.AISpanType.AGENT_RUN || aiSpan.type === aiTracing.AISpanType.WORKFLOW_RUN) {
351
+ if (aiSpan.type === observability$1.AISpanType.AGENT_RUN || aiSpan.type === observability$1.AISpanType.WORKFLOW_RUN) {
353
352
  return api.SpanKind.SERVER;
354
353
  }
355
354
  }
@@ -360,29 +359,29 @@ var SpanConverter = class {
360
359
  */
361
360
  buildSpanName(aiSpan) {
362
361
  switch (aiSpan.type) {
363
- case aiTracing.AISpanType.LLM_GENERATION: {
362
+ case observability$1.AISpanType.MODEL_GENERATION: {
364
363
  const attrs = aiSpan.attributes;
365
364
  const operation = attrs?.resultType === "tool_selection" ? "tool_selection" : "chat";
366
365
  const model = attrs?.model || "unknown";
367
366
  return `${operation} ${model}`;
368
367
  }
369
- case aiTracing.AISpanType.TOOL_CALL:
370
- case aiTracing.AISpanType.MCP_TOOL_CALL: {
368
+ case observability$1.AISpanType.TOOL_CALL:
369
+ case observability$1.AISpanType.MCP_TOOL_CALL: {
371
370
  const toolAttrs = aiSpan.attributes;
372
371
  const toolName = toolAttrs?.toolId || "unknown";
373
372
  return `tool.execute ${toolName}`;
374
373
  }
375
- case aiTracing.AISpanType.AGENT_RUN: {
374
+ case observability$1.AISpanType.AGENT_RUN: {
376
375
  const agentAttrs = aiSpan.attributes;
377
376
  const agentId = agentAttrs?.agentId || "unknown";
378
377
  return `agent.${agentId}`;
379
378
  }
380
- case aiTracing.AISpanType.WORKFLOW_RUN: {
379
+ case observability$1.AISpanType.WORKFLOW_RUN: {
381
380
  const workflowAttrs = aiSpan.attributes;
382
381
  const workflowId = workflowAttrs?.workflowId || "unknown";
383
382
  return `workflow.${workflowId}`;
384
383
  }
385
- case aiTracing.AISpanType.WORKFLOW_STEP:
384
+ case observability$1.AISpanType.WORKFLOW_STEP:
386
385
  return aiSpan.name;
387
386
  default:
388
387
  return aiSpan.name;
@@ -405,81 +404,81 @@ var SpanConverter = class {
405
404
  if (aiSpan.input !== void 0) {
406
405
  const inputStr = typeof aiSpan.input === "string" ? aiSpan.input : JSON.stringify(aiSpan.input);
407
406
  attributes["input"] = inputStr;
408
- if (aiSpan.type === aiTracing.AISpanType.LLM_GENERATION) {
407
+ if (aiSpan.type === observability$1.AISpanType.MODEL_GENERATION) {
409
408
  attributes["gen_ai.prompt"] = inputStr;
410
- } else if (aiSpan.type === aiTracing.AISpanType.TOOL_CALL || aiSpan.type === aiTracing.AISpanType.MCP_TOOL_CALL) {
409
+ } else if (aiSpan.type === observability$1.AISpanType.TOOL_CALL || aiSpan.type === observability$1.AISpanType.MCP_TOOL_CALL) {
411
410
  attributes["gen_ai.tool.input"] = inputStr;
412
411
  }
413
412
  }
414
413
  if (aiSpan.output !== void 0) {
415
414
  const outputStr = typeof aiSpan.output === "string" ? aiSpan.output : JSON.stringify(aiSpan.output);
416
415
  attributes["output"] = outputStr;
417
- if (aiSpan.type === aiTracing.AISpanType.LLM_GENERATION) {
416
+ if (aiSpan.type === observability$1.AISpanType.MODEL_GENERATION) {
418
417
  attributes["gen_ai.completion"] = outputStr;
419
- } else if (aiSpan.type === aiTracing.AISpanType.TOOL_CALL || aiSpan.type === aiTracing.AISpanType.MCP_TOOL_CALL) {
418
+ } else if (aiSpan.type === observability$1.AISpanType.TOOL_CALL || aiSpan.type === observability$1.AISpanType.MCP_TOOL_CALL) {
420
419
  attributes["gen_ai.tool.output"] = outputStr;
421
420
  }
422
421
  }
423
- if (aiSpan.type === aiTracing.AISpanType.LLM_GENERATION && aiSpan.attributes) {
424
- const llmAttrs = aiSpan.attributes;
425
- if (llmAttrs.model) {
426
- attributes["gen_ai.request.model"] = llmAttrs.model;
422
+ if (aiSpan.type === observability$1.AISpanType.MODEL_GENERATION && aiSpan.attributes) {
423
+ const modelAttrs = aiSpan.attributes;
424
+ if (modelAttrs.model) {
425
+ attributes["gen_ai.request.model"] = modelAttrs.model;
427
426
  }
428
- if (llmAttrs.provider) {
429
- attributes["gen_ai.system"] = llmAttrs.provider;
427
+ if (modelAttrs.provider) {
428
+ attributes["gen_ai.system"] = modelAttrs.provider;
430
429
  }
431
- if (llmAttrs.usage) {
432
- const inputTokens = llmAttrs.usage.inputTokens ?? llmAttrs.usage.promptTokens;
433
- const outputTokens = llmAttrs.usage.outputTokens ?? llmAttrs.usage.completionTokens;
430
+ if (modelAttrs.usage) {
431
+ const inputTokens = modelAttrs.usage.inputTokens ?? modelAttrs.usage.promptTokens;
432
+ const outputTokens = modelAttrs.usage.outputTokens ?? modelAttrs.usage.completionTokens;
434
433
  if (inputTokens !== void 0) {
435
434
  attributes["gen_ai.usage.input_tokens"] = inputTokens;
436
435
  }
437
436
  if (outputTokens !== void 0) {
438
437
  attributes["gen_ai.usage.output_tokens"] = outputTokens;
439
438
  }
440
- if (llmAttrs.usage.totalTokens !== void 0) {
441
- attributes["gen_ai.usage.total_tokens"] = llmAttrs.usage.totalTokens;
439
+ if (modelAttrs.usage.totalTokens !== void 0) {
440
+ attributes["gen_ai.usage.total_tokens"] = modelAttrs.usage.totalTokens;
442
441
  }
443
- if (llmAttrs.usage.reasoningTokens !== void 0) {
444
- attributes["gen_ai.usage.reasoning_tokens"] = llmAttrs.usage.reasoningTokens;
442
+ if (modelAttrs.usage.reasoningTokens !== void 0) {
443
+ attributes["gen_ai.usage.reasoning_tokens"] = modelAttrs.usage.reasoningTokens;
445
444
  }
446
- if (llmAttrs.usage.cachedInputTokens !== void 0) {
447
- attributes["gen_ai.usage.cached_input_tokens"] = llmAttrs.usage.cachedInputTokens;
445
+ if (modelAttrs.usage.cachedInputTokens !== void 0) {
446
+ attributes["gen_ai.usage.cached_input_tokens"] = modelAttrs.usage.cachedInputTokens;
448
447
  }
449
448
  }
450
- if (llmAttrs.parameters) {
451
- if (llmAttrs.parameters.temperature !== void 0) {
452
- attributes["gen_ai.request.temperature"] = llmAttrs.parameters.temperature;
449
+ if (modelAttrs.parameters) {
450
+ if (modelAttrs.parameters.temperature !== void 0) {
451
+ attributes["gen_ai.request.temperature"] = modelAttrs.parameters.temperature;
453
452
  }
454
- if (llmAttrs.parameters.maxOutputTokens !== void 0) {
455
- attributes["gen_ai.request.max_tokens"] = llmAttrs.parameters.maxOutputTokens;
453
+ if (modelAttrs.parameters.maxOutputTokens !== void 0) {
454
+ attributes["gen_ai.request.max_tokens"] = modelAttrs.parameters.maxOutputTokens;
456
455
  }
457
- if (llmAttrs.parameters.topP !== void 0) {
458
- attributes["gen_ai.request.top_p"] = llmAttrs.parameters.topP;
456
+ if (modelAttrs.parameters.topP !== void 0) {
457
+ attributes["gen_ai.request.top_p"] = modelAttrs.parameters.topP;
459
458
  }
460
- if (llmAttrs.parameters.topK !== void 0) {
461
- attributes["gen_ai.request.top_k"] = llmAttrs.parameters.topK;
459
+ if (modelAttrs.parameters.topK !== void 0) {
460
+ attributes["gen_ai.request.top_k"] = modelAttrs.parameters.topK;
462
461
  }
463
- if (llmAttrs.parameters.presencePenalty !== void 0) {
464
- attributes["gen_ai.request.presence_penalty"] = llmAttrs.parameters.presencePenalty;
462
+ if (modelAttrs.parameters.presencePenalty !== void 0) {
463
+ attributes["gen_ai.request.presence_penalty"] = modelAttrs.parameters.presencePenalty;
465
464
  }
466
- if (llmAttrs.parameters.frequencyPenalty !== void 0) {
467
- attributes["gen_ai.request.frequency_penalty"] = llmAttrs.parameters.frequencyPenalty;
465
+ if (modelAttrs.parameters.frequencyPenalty !== void 0) {
466
+ attributes["gen_ai.request.frequency_penalty"] = modelAttrs.parameters.frequencyPenalty;
468
467
  }
469
- if (llmAttrs.parameters.stopSequences) {
470
- attributes["gen_ai.request.stop_sequences"] = JSON.stringify(llmAttrs.parameters.stopSequences);
468
+ if (modelAttrs.parameters.stopSequences) {
469
+ attributes["gen_ai.request.stop_sequences"] = JSON.stringify(modelAttrs.parameters.stopSequences);
471
470
  }
472
471
  }
473
- if (llmAttrs.finishReason) {
474
- attributes["gen_ai.response.finish_reasons"] = llmAttrs.finishReason;
472
+ if (modelAttrs.finishReason) {
473
+ attributes["gen_ai.response.finish_reasons"] = modelAttrs.finishReason;
475
474
  }
476
475
  }
477
- if ((aiSpan.type === aiTracing.AISpanType.TOOL_CALL || aiSpan.type === aiTracing.AISpanType.MCP_TOOL_CALL) && aiSpan.attributes) {
476
+ if ((aiSpan.type === observability$1.AISpanType.TOOL_CALL || aiSpan.type === observability$1.AISpanType.MCP_TOOL_CALL) && aiSpan.attributes) {
478
477
  const toolAttrs = aiSpan.attributes;
479
478
  if (toolAttrs.toolId) {
480
479
  attributes["gen_ai.tool.name"] = toolAttrs.toolId;
481
480
  }
482
- if (aiSpan.type === aiTracing.AISpanType.MCP_TOOL_CALL) {
481
+ if (aiSpan.type === observability$1.AISpanType.MCP_TOOL_CALL) {
483
482
  const mcpAttrs = toolAttrs;
484
483
  if (mcpAttrs.mcpServer) {
485
484
  attributes["mcp.server"] = mcpAttrs.mcpServer;
@@ -496,10 +495,11 @@ var SpanConverter = class {
496
495
  attributes["gen_ai.tool.success"] = toolAttrs.success;
497
496
  }
498
497
  }
499
- if (aiSpan.type === aiTracing.AISpanType.AGENT_RUN && aiSpan.attributes) {
498
+ if (aiSpan.type === observability$1.AISpanType.AGENT_RUN && aiSpan.attributes) {
500
499
  const agentAttrs = aiSpan.attributes;
501
500
  if (agentAttrs.agentId) {
502
501
  attributes["agent.id"] = agentAttrs.agentId;
502
+ attributes["gen_ai.agent.id"] = agentAttrs.agentId;
503
503
  }
504
504
  if (agentAttrs.maxSteps) {
505
505
  attributes["agent.max_steps"] = agentAttrs.maxSteps;
@@ -508,7 +508,7 @@ var SpanConverter = class {
508
508
  attributes["agent.available_tools"] = JSON.stringify(agentAttrs.availableTools);
509
509
  }
510
510
  }
511
- if (aiSpan.type === aiTracing.AISpanType.WORKFLOW_RUN && aiSpan.attributes) {
511
+ if (aiSpan.type === observability$1.AISpanType.WORKFLOW_RUN && aiSpan.attributes) {
512
512
  const workflowAttrs = aiSpan.attributes;
513
513
  if (workflowAttrs.workflowId) {
514
514
  attributes["workflow.id"] = workflowAttrs.workflowId;
@@ -557,16 +557,16 @@ var SpanConverter = class {
557
557
  */
558
558
  getOperationName(aiSpan) {
559
559
  switch (aiSpan.type) {
560
- case aiTracing.AISpanType.LLM_GENERATION: {
560
+ case observability$1.AISpanType.MODEL_GENERATION: {
561
561
  const attrs = aiSpan.attributes;
562
562
  return attrs?.resultType === "tool_selection" ? "tool_selection" : "chat";
563
563
  }
564
- case aiTracing.AISpanType.TOOL_CALL:
565
- case aiTracing.AISpanType.MCP_TOOL_CALL:
564
+ case observability$1.AISpanType.TOOL_CALL:
565
+ case observability$1.AISpanType.MCP_TOOL_CALL:
566
566
  return "tool.execute";
567
- case aiTracing.AISpanType.AGENT_RUN:
567
+ case observability$1.AISpanType.AGENT_RUN:
568
568
  return "agent.run";
569
- case aiTracing.AISpanType.WORKFLOW_RUN:
569
+ case observability$1.AISpanType.WORKFLOW_RUN:
570
570
  return "workflow.run";
571
571
  default:
572
572
  return aiSpan.type.replace(/_/g, ".");
@@ -595,20 +595,18 @@ var SpanConverter = class {
595
595
  };
596
596
 
597
597
  // src/ai-tracing.ts
598
- var OtelExporter = class {
598
+ var OtelExporter = class extends observability.BaseExporter {
599
599
  config;
600
600
  tracingConfig;
601
601
  spanConverter;
602
602
  processor;
603
603
  exporter;
604
604
  isSetup = false;
605
- isDisabled = false;
606
- logger;
607
605
  name = "opentelemetry";
608
606
  constructor(config) {
607
+ super(config);
609
608
  this.config = config;
610
609
  this.spanConverter = new SpanConverter();
611
- this.logger = new logger.ConsoleLogger({ level: config.logLevel ?? "warn" });
612
610
  if (config.logLevel === "debug") {
613
611
  api.diag.setLogger(new api.DiagConsoleLogger(), api.DiagLogLevel.DEBUG);
614
612
  }
@@ -616,11 +614,11 @@ var OtelExporter = class {
616
614
  /**
617
615
  * Initialize with tracing configuration
618
616
  */
619
- init(config) {
620
- this.tracingConfig = config;
617
+ init(options) {
618
+ this.tracingConfig = options.config;
621
619
  }
622
620
  async setupExporter() {
623
- if (this.isSetup) return;
621
+ if (this.isSetup || this.exporter) return;
624
622
  if (!this.config.provider) {
625
623
  this.logger.error(
626
624
  '[OtelExporter] Provider configuration is required. Use the "custom" provider for generic endpoints.'
@@ -635,6 +633,10 @@ var OtelExporter = class {
635
633
  this.isSetup = true;
636
634
  return;
637
635
  }
636
+ if (this.config.exporter) {
637
+ this.exporter = this.config.exporter;
638
+ return;
639
+ }
638
640
  const endpoint = resolved.endpoint;
639
641
  const headers = resolved.headers;
640
642
  const protocol = resolved.protocol;
@@ -688,7 +690,10 @@ var OtelExporter = class {
688
690
  this.isSetup = true;
689
691
  return;
690
692
  }
691
- const resource = resources.resourceFromAttributes({
693
+ }
694
+ async setupProcessor() {
695
+ if (this.processor || this.isSetup) return;
696
+ let resource = resources.resourceFromAttributes({
692
697
  [semanticConventions.ATTR_SERVICE_NAME]: this.tracingConfig?.serviceName || "mastra-service",
693
698
  [semanticConventions.ATTR_SERVICE_VERSION]: "1.0.0",
694
699
  // Add telemetry SDK information
@@ -696,6 +701,12 @@ var OtelExporter = class {
696
701
  [semanticConventions.ATTR_TELEMETRY_SDK_VERSION]: "1.0.0",
697
702
  [semanticConventions.ATTR_TELEMETRY_SDK_LANGUAGE]: "nodejs"
698
703
  });
704
+ if (this.config.resourceAttributes) {
705
+ resource = resource.merge(
706
+ // Duplicate attributes from config will override defaults above
707
+ resources.resourceFromAttributes(this.config.resourceAttributes)
708
+ );
709
+ }
699
710
  this.spanConverter = new SpanConverter(resource);
700
711
  this.processor = new sdkTraceBase.BatchSpanProcessor(this.exporter, {
701
712
  maxExportBatchSize: this.config.batchSize || 512,
@@ -710,13 +721,15 @@ var OtelExporter = class {
710
721
  this.logger.debug(
711
722
  `[OtelExporter] Using BatchSpanProcessor (batch size: ${this.config.batchSize || 512}, delay: 5s)`
712
723
  );
724
+ }
725
+ async setup() {
726
+ if (this.isSetup) return;
727
+ await this.setupExporter();
728
+ await this.setupProcessor();
713
729
  this.isSetup = true;
714
730
  }
715
- async exportEvent(event) {
716
- if (this.isDisabled) {
717
- return;
718
- }
719
- if (event.type !== aiTracing.AITracingEventType.SPAN_ENDED) {
731
+ async _exportEvent(event) {
732
+ if (event.type !== observability$1.AITracingEventType.SPAN_ENDED) {
720
733
  return;
721
734
  }
722
735
  const span = event.exportedSpan;
@@ -724,7 +737,7 @@ var OtelExporter = class {
724
737
  }
725
738
  async exportSpan(span) {
726
739
  if (!this.isSetup) {
727
- await this.setupExporter();
740
+ await this.setup();
728
741
  }
729
742
  if (this.isDisabled || !this.processor) {
730
743
  return;