@iqai/adk 0.5.9 → 0.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -886,7 +886,7 @@ init_logger();
886
886
  import { generateId } from "ai";
887
887
 
888
888
  // src/runners.ts
889
- import { context as context3, SpanStatusCode, trace as trace3 } from "@opentelemetry/api";
889
+ import { context as context3, SpanStatusCode, trace as trace4 } from "@opentelemetry/api";
890
890
 
891
891
  // src/agents/invocation-context.ts
892
892
  var LlmCallsLimitExceededError = class extends Error {
@@ -971,6 +971,11 @@ var InvocationContext = class _InvocationContext {
971
971
  * Configurations for live agents under this invocation.
972
972
  */
973
973
  runConfig;
974
+ /**
975
+ * Transfer context for multi-agent workflows
976
+ * Tracks agent transfer chain for telemetry
977
+ */
978
+ transferContext;
974
979
  /**
975
980
  * A container to keep track of different kinds of costs incurred as a part
976
981
  * of this invocation.
@@ -994,6 +999,7 @@ var InvocationContext = class _InvocationContext {
994
999
  this.activeStreamingTools = options.activeStreamingTools;
995
1000
  this.transcriptionCache = options.transcriptionCache;
996
1001
  this.runConfig = options.runConfig;
1002
+ this.transferContext = options.transferContext;
997
1003
  }
998
1004
  /**
999
1005
  * App name from the session
@@ -1039,7 +1045,9 @@ var InvocationContext = class _InvocationContext {
1039
1045
  liveRequestQueue: this.liveRequestQueue,
1040
1046
  activeStreamingTools: this.activeStreamingTools,
1041
1047
  transcriptionCache: this.transcriptionCache,
1042
- runConfig: this.runConfig
1048
+ runConfig: this.runConfig,
1049
+ transferContext: this.transferContext
1050
+ // Propagate transfer context
1043
1051
  });
1044
1052
  }
1045
1053
  };
@@ -1259,288 +1267,1675 @@ var LlmResponse = class _LlmResponse {
1259
1267
 
1260
1268
  // src/models/base-llm.ts
1261
1269
  init_logger();
1270
+ import { trace as trace2 } from "@opentelemetry/api";
1271
+
1272
+ // src/telemetry/metrics.ts
1273
+ import { metrics } from "@opentelemetry/api";
1274
+
1275
+ // src/telemetry/constants.ts
1276
+ var SEMCONV = {
1277
+ // System identification (REQUIRED)
1278
+ GEN_AI_PROVIDER_NAME: "gen_ai.provider.name",
1279
+ // e.g., "openai", "anthropic", "aws.bedrock"
1280
+ // Operation names (REQUIRED)
1281
+ GEN_AI_OPERATION_NAME: "gen_ai.operation.name",
1282
+ // Agent attributes
1283
+ GEN_AI_AGENT_ID: "gen_ai.agent.id",
1284
+ // Unique agent identifier
1285
+ GEN_AI_AGENT_NAME: "gen_ai.agent.name",
1286
+ GEN_AI_AGENT_DESCRIPTION: "gen_ai.agent.description",
1287
+ GEN_AI_CONVERSATION_ID: "gen_ai.conversation.id",
1288
+ // Tool attributes
1289
+ GEN_AI_TOOL_NAME: "gen_ai.tool.name",
1290
+ GEN_AI_TOOL_DESCRIPTION: "gen_ai.tool.description",
1291
+ GEN_AI_TOOL_TYPE: "gen_ai.tool.type",
1292
+ GEN_AI_TOOL_CALL_ID: "gen_ai.tool.call.id",
1293
+ GEN_AI_TOOL_CALL_ARGUMENTS: "gen_ai.tool.call.arguments",
1294
+ // Structured tool input (opt-in)
1295
+ GEN_AI_TOOL_CALL_RESULT: "gen_ai.tool.call.result",
1296
+ // Structured tool output (opt-in)
1297
+ GEN_AI_TOOL_DEFINITIONS: "gen_ai.tool.definitions",
1298
+ // Tool schemas (opt-in)
1299
+ // LLM request attributes
1300
+ GEN_AI_REQUEST_MODEL: "gen_ai.request.model",
1301
+ // Conditionally required
1302
+ GEN_AI_REQUEST_MAX_TOKENS: "gen_ai.request.max_tokens",
1303
+ GEN_AI_REQUEST_TEMPERATURE: "gen_ai.request.temperature",
1304
+ GEN_AI_REQUEST_TOP_P: "gen_ai.request.top_p",
1305
+ GEN_AI_REQUEST_TOP_K: "gen_ai.request.top_k",
1306
+ GEN_AI_REQUEST_FREQUENCY_PENALTY: "gen_ai.request.frequency_penalty",
1307
+ GEN_AI_REQUEST_PRESENCE_PENALTY: "gen_ai.request.presence_penalty",
1308
+ GEN_AI_REQUEST_STOP_SEQUENCES: "gen_ai.request.stop_sequences",
1309
+ GEN_AI_REQUEST_CHOICE_COUNT: "gen_ai.request.choice.count",
1310
+ // Conditionally required (if !=1)
1311
+ GEN_AI_REQUEST_SEED: "gen_ai.request.seed",
1312
+ // Conditionally required
1313
+ // LLM response attributes
1314
+ GEN_AI_RESPONSE_ID: "gen_ai.response.id",
1315
+ // Recommended - completion ID
1316
+ GEN_AI_RESPONSE_MODEL: "gen_ai.response.model",
1317
+ // Recommended - actual model name returned
1318
+ GEN_AI_RESPONSE_FINISH_REASONS: "gen_ai.response.finish_reasons",
1319
+ GEN_AI_OUTPUT_TYPE: "gen_ai.output.type",
1320
+ // Conditionally required - "text", "json", "image", "speech"
1321
+ // Token usage (input/output only; total is computed client-side)
1322
+ GEN_AI_USAGE_INPUT_TOKENS: "gen_ai.usage.input_tokens",
1323
+ GEN_AI_USAGE_OUTPUT_TOKENS: "gen_ai.usage.output_tokens",
1324
+ // Content attributes (opt-in, large/PII)
1325
+ GEN_AI_SYSTEM_INSTRUCTIONS: "gen_ai.system_instructions",
1326
+ // System prompt
1327
+ GEN_AI_INPUT_MESSAGES: "gen_ai.input.messages",
1328
+ // Full chat history input
1329
+ GEN_AI_OUTPUT_MESSAGES: "gen_ai.output.messages",
1330
+ // Model output messages
1331
+ // Server attributes (Recommended)
1332
+ SERVER_ADDRESS: "server.address",
1333
+ // GenAI server host
1334
+ SERVER_PORT: "server.port",
1335
+ // Conditionally required if server.address is set
1336
+ // Error attributes (Conditionally required on error)
1337
+ ERROR_TYPE: "error.type",
1338
+ // Low-cardinality error identifier
1339
+ // Data source (for RAG/knowledge base)
1340
+ GEN_AI_DATA_SOURCE_ID: "gen_ai.data_source.id",
1341
+ // Embeddings (for future support)
1342
+ GEN_AI_EMBEDDINGS_DIMENSION_COUNT: "gen_ai.embeddings.dimension.count",
1343
+ GEN_AI_REQUEST_ENCODING_FORMATS: "gen_ai.request.encoding_formats",
1344
+ // ============================================
1345
+ // DEPRECATED ATTRIBUTES (kept for backward compatibility)
1346
+ // These will be removed in a future release
1347
+ // ============================================
1348
+ /**
1349
+ * @deprecated Use GEN_AI_PROVIDER_NAME instead
1350
+ */
1351
+ GEN_AI_SYSTEM: "gen_ai.system",
1352
+ /**
1353
+ * @deprecated Total tokens should be computed client-side from input + output
1354
+ */
1355
+ GEN_AI_USAGE_TOTAL_TOKENS: "gen_ai.usage.total_tokens",
1356
+ /**
1357
+ * @deprecated Use GEN_AI_INPUT_MESSAGES or structured events instead
1358
+ */
1359
+ GEN_AI_CONTENT_PROMPT: "gen_ai.content.prompt",
1360
+ /**
1361
+ * @deprecated Use GEN_AI_OUTPUT_MESSAGES or structured events instead
1362
+ */
1363
+ GEN_AI_CONTENT_COMPLETION: "gen_ai.content.completion"
1364
+ };
1365
+ var ADK_ATTRS = {
1366
+ // ============================================
1367
+ // TIER 1: Always Present (Core Identity)
1368
+ // ============================================
1369
+ // System identification
1370
+ SYSTEM_NAME: "adk.system.name",
1371
+ SYSTEM_VERSION: "adk.system.version",
1372
+ // Session and context
1373
+ SESSION_ID: "adk.session.id",
1374
+ USER_ID: "adk.user.id",
1375
+ INVOCATION_ID: "adk.invocation.id",
1376
+ EVENT_ID: "adk.event.id",
1377
+ // Environment
1378
+ ENVIRONMENT: "adk.environment",
1379
+ // ============================================
1380
+ // TIER 2: Operation-Specific (Standard)
1381
+ // ============================================
1382
+ // Agent attributes
1383
+ AGENT_NAME: "adk.agent.name",
1384
+ AGENT_DESCRIPTION: "adk.agent.description",
1385
+ // Transfer attributes (for multi-agent)
1386
+ TRANSFER_SOURCE_AGENT: "adk.transfer.source_agent",
1387
+ TRANSFER_TARGET_AGENT: "adk.transfer.target_agent",
1388
+ TRANSFER_CHAIN: "adk.transfer.chain",
1389
+ TRANSFER_DEPTH: "adk.transfer.depth",
1390
+ TRANSFER_ROOT_AGENT: "adk.transfer.root_agent",
1391
+ TRANSFER_REASON: "adk.transfer.reason",
1392
+ // Tool attributes
1393
+ TOOL_NAME: "adk.tool.name",
1394
+ TOOL_ARGS: "adk.tool.args",
1395
+ TOOL_RESPONSE: "adk.tool.response",
1396
+ TOOL_EXECUTION_ORDER: "adk.tool.execution_order",
1397
+ TOOL_PARALLEL_GROUP: "adk.tool.parallel_group",
1398
+ TOOL_RETRY_COUNT: "adk.tool.retry_count",
1399
+ TOOL_IS_CALLBACK_OVERRIDE: "adk.tool.is_callback_override",
1400
+ // LLM attributes
1401
+ LLM_MODEL: "adk.llm.model",
1402
+ LLM_REQUEST: "adk.llm.request",
1403
+ LLM_RESPONSE: "adk.llm.response",
1404
+ LLM_STREAMING: "adk.llm.streaming",
1405
+ LLM_TIME_TO_FIRST_TOKEN: "adk.llm.time_to_first_token_ms",
1406
+ LLM_CHUNK_COUNT: "adk.llm.chunk_count",
1407
+ LLM_CACHED_TOKENS: "adk.llm.cached_tokens",
1408
+ LLM_CONTEXT_WINDOW_USED_PCT: "adk.llm.context_window_used_pct",
1409
+ // Callback attributes
1410
+ CALLBACK_TYPE: "adk.callback.type",
1411
+ CALLBACK_NAME: "adk.callback.name",
1412
+ CALLBACK_INDEX: "adk.callback.index",
1413
+ // ============================================
1414
+ // Error Categorization
1415
+ // ============================================
1416
+ ERROR_CATEGORY: "adk.error.category",
1417
+ ERROR_RECOVERABLE: "adk.error.recoverable",
1418
+ ERROR_RETRY_RECOMMENDED: "adk.error.retry_recommended",
1419
+ // ============================================
1420
+ // Memory & Session
1421
+ // ============================================
1422
+ MEMORY_QUERY: "adk.memory.query",
1423
+ MEMORY_RESULTS_COUNT: "adk.memory.results_count",
1424
+ // Plugin
1425
+ PLUGIN_NAME: "adk.plugin.name",
1426
+ PLUGIN_HOOK: "adk.plugin.hook"
1427
+ };
1428
+ var OPERATIONS = {
1429
+ // ============================================
1430
+ // Standard OpenTelemetry GenAI operations
1431
+ // ============================================
1432
+ CHAT: "chat",
1433
+ // Chat completion (most common)
1434
+ TEXT_COMPLETION: "text_completion",
1435
+ // Legacy text completion
1436
+ GENERATE_CONTENT: "generate_content",
1437
+ // Generic content generation
1438
+ // Agent operations
1439
+ INVOKE_AGENT: "invoke_agent",
1440
+ CREATE_AGENT: "create_agent",
1441
+ // Tool operations
1442
+ EXECUTE_TOOL: "execute_tool",
1443
+ // ============================================
1444
+ // ADK-specific operations (framework extensions)
1445
+ // These are non-standard but useful for the ADK framework
1446
+ // ============================================
1447
+ TRANSFER_AGENT: "transfer_agent",
1448
+ // Multi-agent transfer
1449
+ EXECUTE_CALLBACK: "execute_callback",
1450
+ // Callback execution
1451
+ // Service operations
1452
+ SEARCH_MEMORY: "search_memory",
1453
+ // Memory search
1454
+ INSERT_MEMORY: "insert_memory",
1455
+ // Memory insert
1456
+ EXECUTE_PLUGIN: "execute_plugin",
1457
+ // Plugin execution
1458
+ // ============================================
1459
+ // DEPRECATED OPERATIONS (kept for backward compatibility)
1460
+ // ============================================
1461
+ /**
1462
+ * @deprecated Use CHAT, TEXT_COMPLETION, or GENERATE_CONTENT instead
1463
+ */
1464
+ CALL_LLM: "call_llm"
1465
+ };
1466
+ var ADK_SYSTEM_NAME = "iqai-adk";
1467
+ var ENV_VARS = {
1468
+ // Privacy control
1469
+ CAPTURE_MESSAGE_CONTENT: "ADK_CAPTURE_MESSAGE_CONTENT",
1470
+ // OpenTelemetry standard env vars
1471
+ OTEL_SERVICE_NAME: "OTEL_SERVICE_NAME",
1472
+ OTEL_RESOURCE_ATTRIBUTES: "OTEL_RESOURCE_ATTRIBUTES",
1473
+ OTEL_EXPORTER_OTLP_ENDPOINT: "OTEL_EXPORTER_OTLP_ENDPOINT",
1474
+ OTEL_EXPORTER_OTLP_HEADERS: "OTEL_EXPORTER_OTLP_HEADERS",
1475
+ // Node environment
1476
+ NODE_ENV: "NODE_ENV"
1477
+ };
1478
+ var METRICS = {
1479
+ // ============================================
1480
+ // Standard OpenTelemetry GenAI Metrics
1481
+ // Reference: https://opentelemetry.io/docs/specs/semconv/gen-ai/gen-ai-metrics/
1482
+ // ============================================
1483
+ // Required: Client operation duration (Histogram, seconds)
1484
+ GEN_AI_CLIENT_OPERATION_DURATION: "gen_ai.client.operation.duration",
1485
+ // Recommended: Token usage (Histogram, tokens)
1486
+ // Use with gen_ai.token.type attribute: "input" or "output"
1487
+ GEN_AI_CLIENT_TOKEN_USAGE: "gen_ai.client.token.usage",
1488
+ // Server-side metrics (if hosting GenAI service)
1489
+ GEN_AI_SERVER_REQUEST_DURATION: "gen_ai.server.request.duration",
1490
+ GEN_AI_SERVER_TIME_TO_FIRST_TOKEN: "gen_ai.server.time_to_first_token",
1491
+ GEN_AI_SERVER_TIME_PER_OUTPUT_TOKEN: "gen_ai.server.time_per_output_token",
1492
+ // ============================================
1493
+ // ADK-specific Metrics (framework extensions)
1494
+ // ============================================
1495
+ // Counters
1496
+ AGENT_INVOCATIONS: "adk.agent.invocations",
1497
+ TOOL_EXECUTIONS: "adk.tool.executions",
1498
+ LLM_CALLS: "adk.llm.calls",
1499
+ ERRORS: "adk.errors",
1500
+ // Histograms
1501
+ AGENT_DURATION: "adk.agent.duration",
1502
+ TOOL_DURATION: "adk.tool.duration",
1503
+ LLM_DURATION: "adk.llm.duration",
1504
+ LLM_TOKENS: "adk.llm.tokens",
1505
+ LLM_INPUT_TOKENS: "adk.llm.tokens.input",
1506
+ LLM_OUTPUT_TOKENS: "adk.llm.tokens.output"
1507
+ };
1508
+ var SPAN_STATUS = {
1509
+ UNSET: 0,
1510
+ OK: 1,
1511
+ ERROR: 2
1512
+ };
1513
+ var DEFAULTS = {
1514
+ SAMPLING_RATIO: 1,
1515
+ METRIC_EXPORT_INTERVAL_MS: 6e4,
1516
+ // 1 minute
1517
+ SHUTDOWN_TIMEOUT_MS: 5e3,
1518
+ CAPTURE_MESSAGE_CONTENT: true,
1519
+ ENABLE_TRACING: true,
1520
+ ENABLE_METRICS: true,
1521
+ ENABLE_AUTO_INSTRUMENTATION: true
1522
+ };
1523
+
1524
+ // src/telemetry/metrics.ts
1525
+ var MetricsService = class {
1526
+ meter = null;
1527
+ // Counters
1528
+ agentInvocationsCounter = null;
1529
+ toolExecutionsCounter = null;
1530
+ llmCallsCounter = null;
1531
+ errorsCounter = null;
1532
+ // Histograms
1533
+ agentDurationHistogram = null;
1534
+ toolDurationHistogram = null;
1535
+ llmDurationHistogram = null;
1536
+ llmTokensHistogram = null;
1537
+ llmInputTokensHistogram = null;
1538
+ llmOutputTokensHistogram = null;
1539
+ /**
1540
+ * Initialize metrics with the provided meter
1541
+ */
1542
+ initialize(meterName, version) {
1543
+ this.meter = metrics.getMeter(meterName, version);
1544
+ this.createMetrics();
1545
+ }
1546
+ /**
1547
+ * Check if metrics are initialized
1548
+ */
1549
+ get initialized() {
1550
+ return this.meter !== null;
1551
+ }
1552
+ /**
1553
+ * Create all metric instruments
1554
+ */
1555
+ createMetrics() {
1556
+ if (!this.meter) return;
1557
+ this.agentInvocationsCounter = this.meter.createCounter(
1558
+ METRICS.AGENT_INVOCATIONS,
1559
+ {
1560
+ description: "Total number of agent invocations",
1561
+ unit: "1"
1562
+ }
1563
+ );
1564
+ this.toolExecutionsCounter = this.meter.createCounter(
1565
+ METRICS.TOOL_EXECUTIONS,
1566
+ {
1567
+ description: "Total number of tool executions",
1568
+ unit: "1"
1569
+ }
1570
+ );
1571
+ this.llmCallsCounter = this.meter.createCounter(METRICS.LLM_CALLS, {
1572
+ description: "Total number of LLM calls",
1573
+ unit: "1"
1574
+ });
1575
+ this.errorsCounter = this.meter.createCounter(METRICS.ERRORS, {
1576
+ description: "Total number of errors",
1577
+ unit: "1"
1578
+ });
1579
+ this.agentDurationHistogram = this.meter.createHistogram(
1580
+ METRICS.AGENT_DURATION,
1581
+ {
1582
+ description: "Duration of agent invocations",
1583
+ unit: "ms"
1584
+ }
1585
+ );
1586
+ this.toolDurationHistogram = this.meter.createHistogram(
1587
+ METRICS.TOOL_DURATION,
1588
+ {
1589
+ description: "Duration of tool executions",
1590
+ unit: "ms"
1591
+ }
1592
+ );
1593
+ this.llmDurationHistogram = this.meter.createHistogram(
1594
+ METRICS.LLM_DURATION,
1595
+ {
1596
+ description: "Duration of LLM calls",
1597
+ unit: "ms"
1598
+ }
1599
+ );
1600
+ this.llmTokensHistogram = this.meter.createHistogram(METRICS.LLM_TOKENS, {
1601
+ description: "Total tokens used in LLM calls",
1602
+ unit: "1"
1603
+ });
1604
+ this.llmInputTokensHistogram = this.meter.createHistogram(
1605
+ METRICS.LLM_INPUT_TOKENS,
1606
+ {
1607
+ description: "Input tokens used in LLM calls",
1608
+ unit: "1"
1609
+ }
1610
+ );
1611
+ this.llmOutputTokensHistogram = this.meter.createHistogram(
1612
+ METRICS.LLM_OUTPUT_TOKENS,
1613
+ {
1614
+ description: "Output tokens generated by LLM calls",
1615
+ unit: "1"
1616
+ }
1617
+ );
1618
+ }
1619
+ /**
1620
+ * Record an agent invocation
1621
+ */
1622
+ recordAgentInvocation(dimensions) {
1623
+ if (!this.agentInvocationsCounter) return;
1624
+ const attributes = {
1625
+ "agent.name": dimensions.agentName,
1626
+ status: dimensions.status
1627
+ };
1628
+ if (dimensions.environment) {
1629
+ attributes.environment = dimensions.environment;
1630
+ }
1631
+ this.agentInvocationsCounter.add(1, attributes);
1632
+ }
1633
+ /**
1634
+ * Record agent execution duration
1635
+ */
1636
+ recordAgentDuration(durationMs, dimensions) {
1637
+ if (!this.agentDurationHistogram) return;
1638
+ const attributes = {
1639
+ "agent.name": dimensions.agentName,
1640
+ status: dimensions.status
1641
+ };
1642
+ if (dimensions.environment) {
1643
+ attributes.environment = dimensions.environment;
1644
+ }
1645
+ this.agentDurationHistogram.record(durationMs, attributes);
1646
+ }
1647
+ /**
1648
+ * Record a tool execution
1649
+ */
1650
+ recordToolExecution(dimensions) {
1651
+ if (!this.toolExecutionsCounter) return;
1652
+ const attributes = {
1653
+ "tool.name": dimensions.toolName,
1654
+ status: dimensions.status
1655
+ };
1656
+ if (dimensions.agentName) {
1657
+ attributes["agent.name"] = dimensions.agentName;
1658
+ }
1659
+ if (dimensions.environment) {
1660
+ attributes.environment = dimensions.environment;
1661
+ }
1662
+ this.toolExecutionsCounter.add(1, attributes);
1663
+ }
1664
+ /**
1665
+ * Record tool execution duration
1666
+ */
1667
+ recordToolDuration(durationMs, dimensions) {
1668
+ if (!this.toolDurationHistogram) return;
1669
+ const attributes = {
1670
+ "tool.name": dimensions.toolName,
1671
+ status: dimensions.status
1672
+ };
1673
+ if (dimensions.agentName) {
1674
+ attributes["agent.name"] = dimensions.agentName;
1675
+ }
1676
+ if (dimensions.environment) {
1677
+ attributes.environment = dimensions.environment;
1678
+ }
1679
+ this.toolDurationHistogram.record(durationMs, attributes);
1680
+ }
1681
+ /**
1682
+ * Record an LLM call
1683
+ */
1684
+ recordLlmCall(dimensions) {
1685
+ if (!this.llmCallsCounter) return;
1686
+ const attributes = {
1687
+ model: dimensions.model,
1688
+ status: dimensions.status
1689
+ };
1690
+ if (dimensions.agentName) {
1691
+ attributes["agent.name"] = dimensions.agentName;
1692
+ }
1693
+ if (dimensions.environment) {
1694
+ attributes.environment = dimensions.environment;
1695
+ }
1696
+ this.llmCallsCounter.add(1, attributes);
1697
+ }
1698
+ /**
1699
+ * Record LLM call duration
1700
+ */
1701
+ recordLlmDuration(durationMs, dimensions) {
1702
+ if (!this.llmDurationHistogram) return;
1703
+ const attributes = {
1704
+ model: dimensions.model,
1705
+ status: dimensions.status
1706
+ };
1707
+ if (dimensions.agentName) {
1708
+ attributes["agent.name"] = dimensions.agentName;
1709
+ }
1710
+ if (dimensions.environment) {
1711
+ attributes.environment = dimensions.environment;
1712
+ }
1713
+ this.llmDurationHistogram.record(durationMs, attributes);
1714
+ }
1715
+ /**
1716
+ * Record LLM token usage
1717
+ */
1718
+ recordLlmTokens(inputTokens, outputTokens, dimensions) {
1719
+ if (!this.llmTokensHistogram || !this.llmInputTokensHistogram || !this.llmOutputTokensHistogram) {
1720
+ return;
1721
+ }
1722
+ const attributes = {
1723
+ model: dimensions.model,
1724
+ status: dimensions.status
1725
+ };
1726
+ if (dimensions.agentName) {
1727
+ attributes["agent.name"] = dimensions.agentName;
1728
+ }
1729
+ if (dimensions.environment) {
1730
+ attributes.environment = dimensions.environment;
1731
+ }
1732
+ const totalTokens = inputTokens + outputTokens;
1733
+ this.llmTokensHistogram.record(totalTokens, attributes);
1734
+ this.llmInputTokensHistogram.record(inputTokens, attributes);
1735
+ this.llmOutputTokensHistogram.record(outputTokens, attributes);
1736
+ }
1737
+ /**
1738
+ * Record an error
1739
+ */
1740
+ recordError(errorType, context4) {
1741
+ if (!this.errorsCounter) return;
1742
+ this.errorsCounter.add(1, {
1743
+ error_type: errorType,
1744
+ context: context4
1745
+ });
1746
+ }
1747
+ };
1748
+ var metricsService = new MetricsService();
1262
1749
 
1263
- // src/telemetry.ts
1750
+ // src/telemetry/setup.ts
1264
1751
  import {
1265
- context,
1266
1752
  DiagConsoleLogger,
1267
1753
  DiagLogLevel,
1268
1754
  diag,
1269
- trace
1755
+ metrics as metrics2
1270
1756
  } from "@opentelemetry/api";
1271
1757
  import { getNodeAutoInstrumentations } from "@opentelemetry/auto-instrumentations-node";
1758
+ import { OTLPMetricExporter } from "@opentelemetry/exporter-metrics-otlp-http";
1272
1759
  import { OTLPTraceExporter } from "@opentelemetry/exporter-trace-otlp-http";
1273
- import { resourceFromAttributes } from "@opentelemetry/resources";
1760
+ import {
1761
+ detectResources,
1762
+ envDetector,
1763
+ processDetector,
1764
+ resourceFromAttributes
1765
+ } from "@opentelemetry/resources";
1766
+ import {
1767
+ MeterProvider,
1768
+ PeriodicExportingMetricReader
1769
+ } from "@opentelemetry/sdk-metrics";
1274
1770
  import { NodeSDK } from "@opentelemetry/sdk-node";
1771
+ import {
1772
+ BatchSpanProcessor,
1773
+ NodeTracerProvider,
1774
+ TraceIdRatioBasedSampler
1775
+ } from "@opentelemetry/sdk-trace-node";
1275
1776
  import {
1276
1777
  ATTR_SERVICE_NAME,
1277
1778
  ATTR_SERVICE_VERSION
1278
1779
  } from "@opentelemetry/semantic-conventions";
1279
- var TelemetryService = class {
1780
+
1781
+ // src/telemetry/utils.ts
1782
+ function shouldCaptureContent() {
1783
+ const value = process.env[ENV_VARS.CAPTURE_MESSAGE_CONTENT];
1784
+ if (value === void 0) {
1785
+ return DEFAULTS.CAPTURE_MESSAGE_CONTENT;
1786
+ }
1787
+ return value === "true" || value === "1" || value === "yes";
1788
+ }
1789
+ function extractTextFromContent(content) {
1790
+ if (!content) return "";
1791
+ const parts = "parts" in content ? content.parts : void 0;
1792
+ if (!parts || !Array.isArray(parts)) return "";
1793
+ return parts.map(
1794
+ (part) => part && typeof part === "object" && "text" in part ? part.text : ""
1795
+ ).join("").trim();
1796
+ }
1797
+ function safeJsonStringify(obj) {
1798
+ try {
1799
+ return JSON.stringify(obj);
1800
+ } catch {
1801
+ return "<serialization_failed>";
1802
+ }
1803
+ }
1804
+ function parseResourceAttributes(envValue) {
1805
+ if (!envValue) {
1806
+ return {};
1807
+ }
1808
+ const attributes = {};
1809
+ try {
1810
+ const pairs = envValue.split(",");
1811
+ for (const pair of pairs) {
1812
+ const [key, value] = pair.split("=");
1813
+ if (key && value) {
1814
+ attributes[key.trim()] = value.trim();
1815
+ }
1816
+ }
1817
+ } catch (error) {
1818
+ console.warn("Failed to parse OTEL_RESOURCE_ATTRIBUTES:", error);
1819
+ }
1820
+ return attributes;
1821
+ }
1822
+ function getEnvironment() {
1823
+ return process.env[ENV_VARS.NODE_ENV];
1824
+ }
1825
+ function detectProvider(model) {
1826
+ const lowerModel = model.toLowerCase();
1827
+ if (lowerModel.startsWith("gpt-") || lowerModel.startsWith("o1-") || lowerModel.startsWith("text-") || lowerModel.startsWith("davinci-") || lowerModel.startsWith("curie-") || lowerModel.startsWith("babbage-") || lowerModel.startsWith("ada-")) {
1828
+ return "openai";
1829
+ }
1830
+ if (lowerModel.startsWith("claude-")) {
1831
+ return "anthropic";
1832
+ }
1833
+ if (lowerModel.startsWith("gemini-") || lowerModel.startsWith("palm-") || lowerModel.startsWith("text-bison") || lowerModel.startsWith("chat-bison")) {
1834
+ return "gcp.gemini";
1835
+ }
1836
+ if (lowerModel.includes("bedrock") || lowerModel.startsWith("amazon.") || lowerModel.startsWith("anthropic.claude") || lowerModel.startsWith("ai21.") || lowerModel.startsWith("cohere.") || lowerModel.startsWith("meta.llama")) {
1837
+ return "aws.bedrock";
1838
+ }
1839
+ if (lowerModel.includes("azure") && !lowerModel.includes("openai")) {
1840
+ return "azure.ai.inference";
1841
+ }
1842
+ if (lowerModel.startsWith("mistral-") || lowerModel.startsWith("mixtral-") || lowerModel.startsWith("codestral-")) {
1843
+ return "mistral_ai";
1844
+ }
1845
+ if (lowerModel.includes("groq")) {
1846
+ return "groq";
1847
+ }
1848
+ if (lowerModel.startsWith("command-") || lowerModel.startsWith("embed-") || lowerModel.includes("cohere")) {
1849
+ return "cohere";
1850
+ }
1851
+ if (lowerModel.startsWith("deepseek-")) {
1852
+ return "deepseek";
1853
+ }
1854
+ if (lowerModel.startsWith("grok-")) {
1855
+ return "x_ai";
1856
+ }
1857
+ if (lowerModel.startsWith("pplx-") || lowerModel.startsWith("llama-3.1-sonar") || lowerModel.includes("perplexity")) {
1858
+ return "perplexity";
1859
+ }
1860
+ if (lowerModel.includes("watsonx") || lowerModel.startsWith("ibm/")) {
1861
+ return "ibm.watsonx.ai";
1862
+ }
1863
+ if (lowerModel.startsWith("llama-") || lowerModel.startsWith("meta-llama")) {
1864
+ return "meta";
1865
+ }
1866
+ if (lowerModel.includes("ollama")) {
1867
+ return "ollama";
1868
+ }
1869
+ if (lowerModel.includes("huggingface") || lowerModel.includes("hf/")) {
1870
+ return "huggingface";
1871
+ }
1872
+ return "unknown";
1873
+ }
1874
+ function extractFinishReason(llmResponse) {
1875
+ if (llmResponse.finishReason) {
1876
+ return String(llmResponse.finishReason);
1877
+ }
1878
+ if (llmResponse.candidates?.[0]?.finishReason) {
1879
+ return String(llmResponse.candidates[0].finishReason);
1880
+ }
1881
+ return void 0;
1882
+ }
1883
+ function buildLlmRequestForTrace(llmRequest, includeContent = true) {
1884
+ const result = {
1885
+ model: llmRequest.model,
1886
+ config: excludeNonSerializableFromConfig(llmRequest.config || {})
1887
+ };
1888
+ if (includeContent && llmRequest.contents) {
1889
+ result.contents = llmRequest.contents.map((content) => ({
1890
+ role: content.role,
1891
+ parts: content.parts?.filter((part) => !part.inlineData) || []
1892
+ }));
1893
+ }
1894
+ return result;
1895
+ }
1896
+ function excludeNonSerializableFromConfig(config) {
1897
+ const result = {};
1898
+ for (const [key, value] of Object.entries(config)) {
1899
+ if (key === "response_schema" || key === "responseSchema") {
1900
+ continue;
1901
+ }
1902
+ if (value === void 0 || value === null) {
1903
+ continue;
1904
+ }
1905
+ if (key === "functions" && Array.isArray(value)) {
1906
+ result[key] = value.map((func) => ({
1907
+ name: func.name,
1908
+ description: func.description,
1909
+ parameters: func.parameters
1910
+ }));
1911
+ } else if (typeof value !== "function") {
1912
+ result[key] = value;
1913
+ }
1914
+ }
1915
+ return result;
1916
+ }
1917
+ function buildLlmResponseForTrace(llmResponse, includeContent = true) {
1918
+ const result = {};
1919
+ if (llmResponse.usageMetadata) {
1920
+ result.usageMetadata = llmResponse.usageMetadata;
1921
+ }
1922
+ const finishReason = extractFinishReason(llmResponse);
1923
+ if (finishReason) {
1924
+ result.finishReason = finishReason;
1925
+ }
1926
+ if (includeContent && llmResponse.content) {
1927
+ result.content = llmResponse.content;
1928
+ }
1929
+ return result;
1930
+ }
1931
+ function formatSpanAttributes(attributes) {
1932
+ const formatted = {};
1933
+ for (const [key, value] of Object.entries(attributes)) {
1934
+ if (value === void 0 || value === null) {
1935
+ continue;
1936
+ }
1937
+ if (typeof value === "string" || typeof value === "number" || typeof value === "boolean") {
1938
+ formatted[key] = value;
1939
+ } else if (Array.isArray(value)) {
1940
+ formatted[key] = value.map((v) => String(v));
1941
+ } else if (typeof value === "object") {
1942
+ formatted[key] = safeJsonStringify(value);
1943
+ }
1944
+ }
1945
+ return formatted;
1946
+ }
1947
+ function getServiceName(configName) {
1948
+ return process.env[ENV_VARS.OTEL_SERVICE_NAME] || configName || "iqai-adk-app";
1949
+ }
1950
+ function validateConfig(config) {
1951
+ const errors = [];
1952
+ if (!config.appName) {
1953
+ errors.push("appName is required");
1954
+ }
1955
+ if (!config.otlpEndpoint) {
1956
+ errors.push("otlpEndpoint is required");
1957
+ }
1958
+ if (config.samplingRatio !== void 0 && (config.samplingRatio < 0 || config.samplingRatio > 1)) {
1959
+ errors.push("samplingRatio must be between 0.0 and 1.0");
1960
+ }
1961
+ if (config.metricExportIntervalMs !== void 0 && config.metricExportIntervalMs < 1e3) {
1962
+ errors.push("metricExportIntervalMs must be at least 1000ms");
1963
+ }
1964
+ return errors;
1965
+ }
1966
+
1967
+ // src/telemetry/setup.ts
1968
+ var SetupService = class {
1280
1969
  sdk = null;
1970
+ meterProvider = null;
1971
+ tracerProvider = null;
1281
1972
  isInitialized = false;
1282
- tracer;
1283
1973
  config = null;
1284
- constructor() {
1285
- this.tracer = trace.getTracer("iqai-adk", "0.1.0");
1974
+ /**
1975
+ * Initialize OpenTelemetry with comprehensive configuration
1976
+ */
1977
+ async initialize(config) {
1978
+ if (this.isInitialized) {
1979
+ diag.warn("Telemetry is already initialized. Skipping.");
1980
+ return;
1981
+ }
1982
+ const errors = validateConfig(config);
1983
+ if (errors.length > 0) {
1984
+ throw new Error(`Invalid telemetry configuration: ${errors.join(", ")}`);
1985
+ }
1986
+ this.config = config;
1987
+ diag.setLogger(new DiagConsoleLogger(), DiagLogLevel.INFO);
1988
+ const resource = this.createResource(config);
1989
+ const enableTracing = config.enableTracing ?? DEFAULTS.ENABLE_TRACING;
1990
+ const enableMetrics = config.enableMetrics ?? DEFAULTS.ENABLE_METRICS;
1991
+ const enableAutoInstrumentation = config.enableAutoInstrumentation ?? DEFAULTS.ENABLE_AUTO_INSTRUMENTATION;
1992
+ try {
1993
+ if (enableAutoInstrumentation) {
1994
+ await this.initializeAutoInstrumentation(config, resource);
1995
+ } else {
1996
+ if (enableTracing) {
1997
+ this.initializeTracing(config, resource);
1998
+ }
1999
+ if (enableMetrics) {
2000
+ this.initializeMetrics(config, resource);
2001
+ }
2002
+ }
2003
+ this.isInitialized = true;
2004
+ diag.info(
2005
+ `Telemetry initialized successfully for ${config.appName} v${config.appVersion || "unknown"}`
2006
+ );
2007
+ } catch (error) {
2008
+ diag.error("Error initializing telemetry:", error);
2009
+ throw error;
2010
+ }
2011
+ }
2012
+ /**
2013
+ * Create OpenTelemetry resource with auto-detection
2014
+ */
2015
+ createResource(config) {
2016
+ const detectedResource = detectResources({
2017
+ detectors: [envDetector, processDetector]
2018
+ });
2019
+ const customAttributes = {
2020
+ [ATTR_SERVICE_NAME]: getServiceName(config.appName),
2021
+ [ATTR_SERVICE_VERSION]: config.appVersion || "unknown",
2022
+ [ADK_ATTRS.SYSTEM_NAME]: ADK_SYSTEM_NAME,
2023
+ [ADK_ATTRS.SYSTEM_VERSION]: config.appVersion || "unknown"
2024
+ };
2025
+ const environment = config.environment || getEnvironment();
2026
+ if (environment) {
2027
+ customAttributes[ADK_ATTRS.ENVIRONMENT] = environment;
2028
+ customAttributes["deployment.environment.name"] = environment;
2029
+ }
2030
+ if (config.resourceAttributes) {
2031
+ Object.assign(customAttributes, config.resourceAttributes);
2032
+ }
2033
+ const envAttributes = parseResourceAttributes(
2034
+ process.env[ENV_VARS.OTEL_RESOURCE_ATTRIBUTES]
2035
+ );
2036
+ Object.assign(customAttributes, envAttributes);
2037
+ const customResource = resourceFromAttributes(customAttributes);
2038
+ return detectedResource.merge(customResource);
2039
+ }
2040
+ /**
2041
+ * Initialize tracing provider
2042
+ */
2043
+ initializeTracing(config, resource) {
2044
+ const traceExporter = new OTLPTraceExporter({
2045
+ url: config.otlpEndpoint,
2046
+ headers: config.otlpHeaders
2047
+ });
2048
+ const spanProcessor = new BatchSpanProcessor(traceExporter);
2049
+ const sampler = config.samplingRatio !== void 0 ? new TraceIdRatioBasedSampler(config.samplingRatio) : void 0;
2050
+ this.tracerProvider = new NodeTracerProvider({
2051
+ resource,
2052
+ sampler,
2053
+ spanProcessors: [spanProcessor]
2054
+ });
2055
+ this.tracerProvider.register();
2056
+ diag.debug("Tracing provider initialized");
2057
+ }
2058
+ /**
2059
+ * Initialize metrics provider
2060
+ */
2061
+ initializeMetrics(config, resource) {
2062
+ const metricsEndpoint = config.otlpEndpoint.replace(
2063
+ "/v1/traces",
2064
+ "/v1/metrics"
2065
+ );
2066
+ if (config.otlpEndpoint.includes("localhost:4318") || config.otlpEndpoint.includes("jaeger")) {
2067
+ diag.warn(
2068
+ "Jaeger typically only supports traces, not metrics. Consider using Prometheus or a full OTLP backend for metrics."
2069
+ );
2070
+ }
2071
+ const metricExporter = new OTLPMetricExporter({
2072
+ url: metricsEndpoint,
2073
+ headers: config.otlpHeaders
2074
+ });
2075
+ const metricReader = new PeriodicExportingMetricReader({
2076
+ exporter: metricExporter,
2077
+ exportIntervalMillis: config.metricExportIntervalMs ?? DEFAULTS.METRIC_EXPORT_INTERVAL_MS
2078
+ });
2079
+ this.meterProvider = new MeterProvider({
2080
+ resource,
2081
+ readers: [metricReader]
2082
+ });
2083
+ metrics2.setGlobalMeterProvider(this.meterProvider);
2084
+ diag.debug("Metrics provider initialized");
2085
+ }
2086
+ /**
2087
+ * Initialize NodeSDK for auto-instrumentation
2088
+ */
2089
+ async initializeAutoInstrumentation(config, resource) {
2090
+ const enableTracing = config.enableTracing ?? DEFAULTS.ENABLE_TRACING;
2091
+ const enableMetrics = config.enableMetrics ?? DEFAULTS.ENABLE_METRICS;
2092
+ const traceExporter = enableTracing ? new OTLPTraceExporter({
2093
+ url: config.otlpEndpoint,
2094
+ headers: config.otlpHeaders
2095
+ }) : void 0;
2096
+ const metricsEndpoint = config.otlpEndpoint.replace(
2097
+ "/v1/traces",
2098
+ "/v1/metrics"
2099
+ );
2100
+ const metricReader = enableMetrics ? new PeriodicExportingMetricReader({
2101
+ exporter: new OTLPMetricExporter({
2102
+ url: metricsEndpoint,
2103
+ headers: config.otlpHeaders
2104
+ }),
2105
+ exportIntervalMillis: config.metricExportIntervalMs ?? DEFAULTS.METRIC_EXPORT_INTERVAL_MS
2106
+ }) : void 0;
2107
+ const sampler = config.samplingRatio !== void 0 ? new TraceIdRatioBasedSampler(config.samplingRatio) : void 0;
2108
+ this.sdk = new NodeSDK({
2109
+ resource,
2110
+ traceExporter,
2111
+ metricReader,
2112
+ sampler,
2113
+ instrumentations: [
2114
+ getNodeAutoInstrumentations({
2115
+ // Ignore incoming HTTP requests (we're usually making outgoing calls)
2116
+ "@opentelemetry/instrumentation-http": {
2117
+ ignoreIncomingRequestHook: () => true
2118
+ }
2119
+ })
2120
+ ]
2121
+ });
2122
+ await this.sdk.start();
2123
+ diag.debug("Auto-instrumentation initialized with NodeSDK");
2124
+ }
2125
+ /**
2126
+ * Check if telemetry is initialized
2127
+ */
2128
+ get initialized() {
2129
+ return this.isInitialized;
2130
+ }
2131
+ /**
2132
+ * Get current configuration
2133
+ */
2134
+ getConfig() {
2135
+ return this.config;
2136
+ }
2137
+ /**
2138
+ * Shutdown telemetry with timeout
2139
+ */
2140
+ async shutdown(timeoutMs) {
2141
+ const timeout = timeoutMs ?? DEFAULTS.SHUTDOWN_TIMEOUT_MS;
2142
+ if (!this.isInitialized) {
2143
+ diag.warn("Telemetry is not initialized or already shut down.");
2144
+ return;
2145
+ }
2146
+ try {
2147
+ const shutdownPromises = [];
2148
+ if (this.sdk) {
2149
+ shutdownPromises.push(this.sdk.shutdown());
2150
+ }
2151
+ if (this.tracerProvider) {
2152
+ shutdownPromises.push(this.tracerProvider.shutdown());
2153
+ }
2154
+ if (this.meterProvider) {
2155
+ shutdownPromises.push(this.meterProvider.shutdown());
2156
+ }
2157
+ const timeoutPromise = new Promise((_, reject) => {
2158
+ setTimeout(
2159
+ () => reject(new Error(`Telemetry shutdown timeout after ${timeout}ms`)),
2160
+ timeout
2161
+ );
2162
+ });
2163
+ await Promise.race([Promise.all(shutdownPromises), timeoutPromise]);
2164
+ this.isInitialized = false;
2165
+ this.sdk = null;
2166
+ this.tracerProvider = null;
2167
+ this.meterProvider = null;
2168
+ diag.info("Telemetry shut down successfully");
2169
+ } catch (error) {
2170
+ if (error instanceof Error && error.message.includes("timeout")) {
2171
+ diag.warn("Telemetry shutdown timed out, some data may be lost");
2172
+ } else {
2173
+ diag.error("Error shutting down telemetry:", error);
2174
+ }
2175
+ throw error;
2176
+ }
2177
+ }
2178
+ /**
2179
+ * Force flush all pending telemetry data
2180
+ */
2181
+ async flush(timeoutMs = 5e3) {
2182
+ const flushPromises = [];
2183
+ if (this.tracerProvider) {
2184
+ flushPromises.push(this.tracerProvider.forceFlush());
2185
+ }
2186
+ if (this.meterProvider) {
2187
+ flushPromises.push(this.meterProvider.forceFlush());
2188
+ }
2189
+ const timeoutPromise = new Promise((_, reject) => {
2190
+ setTimeout(
2191
+ () => reject(new Error(`Flush timeout after ${timeoutMs}ms`)),
2192
+ timeoutMs
2193
+ );
2194
+ });
2195
+ await Promise.race([Promise.all(flushPromises), timeoutPromise]);
2196
+ }
2197
+ };
2198
+ var setupService = new SetupService();
2199
+
2200
+ // src/telemetry/tracing.ts
2201
+ import { context, trace } from "@opentelemetry/api";
2202
+ var TracingService = class {
2203
+ tracer = null;
2204
+ /**
2205
+ * Initialize tracing with the provided tracer
2206
+ */
2207
+ initialize(tracerName, version) {
2208
+ this.tracer = trace.getTracer(tracerName, version);
2209
+ }
2210
+ /**
2211
+ * Get the current tracer instance
2212
+ */
2213
+ getTracer() {
2214
+ if (!this.tracer) {
2215
+ return trace.getTracer("iqai-adk", "0.0.0");
2216
+ }
2217
+ return this.tracer;
2218
+ }
2219
+ /**
2220
+ * Check if tracing is initialized
2221
+ */
2222
+ get initialized() {
2223
+ return this.tracer !== null;
2224
+ }
2225
+ /**
2226
+ * Get the currently active span, or undefined if none
2227
+ * Use this for conditional span operations
2228
+ */
2229
+ getActiveSpan() {
2230
+ return trace.getActiveSpan();
2231
+ }
2232
+ /**
2233
+ * Build common invocation context attributes
2234
+ * Reduces duplication across tracing methods
2235
+ */
2236
+ buildInvocationContextAttrs(invocationContext) {
2237
+ if (!invocationContext) return {};
2238
+ return {
2239
+ [ADK_ATTRS.SESSION_ID]: invocationContext.session.id,
2240
+ [ADK_ATTRS.USER_ID]: invocationContext.userId || "",
2241
+ [ADK_ATTRS.INVOCATION_ID]: invocationContext.invocationId
2242
+ };
2243
+ }
2244
+ /**
2245
+ * Set attributes on span with content capture check
2246
+ * Helper to reduce boilerplate for optional content attributes
2247
+ */
2248
+ setContentAttributes(span, inputKey, inputValue, outputKey, outputValue, eventPrefix) {
2249
+ if (!shouldCaptureContent()) return;
2250
+ if (inputValue !== void 0) {
2251
+ const inputStr = typeof inputValue === "string" ? inputValue : safeJsonStringify(inputValue);
2252
+ span.setAttribute(inputKey, inputStr);
2253
+ span.addEvent(`${eventPrefix}.input`, { "gen_ai.input": inputStr });
2254
+ }
2255
+ if (outputValue !== void 0) {
2256
+ const outputStr = typeof outputValue === "string" ? outputValue : safeJsonStringify(outputValue);
2257
+ span.setAttribute(outputKey, outputStr);
2258
+ span.addEvent(`${eventPrefix}.output`, { "gen_ai.output": outputStr });
2259
+ }
2260
+ }
2261
+ /**
2262
+ * Trace an agent invocation
2263
+ * Sets standard OpenTelemetry GenAI attributes for agents
2264
+ */
2265
+ traceAgentInvocation(agent, invocationContext, input, output) {
2266
+ const span = this.getActiveSpan();
2267
+ if (!span) return;
2268
+ const agentId = `${agent.name}-${invocationContext.session.id}`;
2269
+ const attributes = formatSpanAttributes({
2270
+ // Standard GenAI attributes
2271
+ [SEMCONV.GEN_AI_PROVIDER_NAME]: "iqai-adk",
2272
+ [SEMCONV.GEN_AI_OPERATION_NAME]: OPERATIONS.INVOKE_AGENT,
2273
+ [SEMCONV.GEN_AI_AGENT_ID]: agentId,
2274
+ [SEMCONV.GEN_AI_AGENT_NAME]: agent.name,
2275
+ [SEMCONV.GEN_AI_AGENT_DESCRIPTION]: agent.description || "",
2276
+ [SEMCONV.GEN_AI_CONVERSATION_ID]: invocationContext.session.id,
2277
+ // ADK-specific attributes
2278
+ [ADK_ATTRS.AGENT_NAME]: agent.name,
2279
+ [ADK_ATTRS.ENVIRONMENT]: getEnvironment() || "",
2280
+ ...this.buildInvocationContextAttrs(invocationContext)
2281
+ });
2282
+ span.setAttributes(attributes);
2283
+ this.setContentAttributes(
2284
+ span,
2285
+ SEMCONV.GEN_AI_INPUT_MESSAGES,
2286
+ input,
2287
+ SEMCONV.GEN_AI_OUTPUT_MESSAGES,
2288
+ output,
2289
+ "gen_ai.agent"
2290
+ );
2291
+ }
2292
+ /**
2293
+ * Trace a tool call
2294
+ * Sets standard OpenTelemetry GenAI attributes for tool execution
2295
+ */
2296
+ traceToolCall(tool, args, functionResponseEvent, llmRequest, invocationContext) {
2297
+ const span = this.getActiveSpan();
2298
+ if (!span) return;
2299
+ const functionResponse = functionResponseEvent.content?.parts?.[0]?.functionResponse;
2300
+ const toolCallId = functionResponse?.id || "<not_specified>";
2301
+ const toolResponse = functionResponse?.response || "<not_specified>";
2302
+ const captureContent = shouldCaptureContent();
2303
+ const argsJson = safeJsonStringify(args);
2304
+ const responseJson = safeJsonStringify(toolResponse);
2305
+ const attributes = formatSpanAttributes({
2306
+ // Standard GenAI attributes
2307
+ [SEMCONV.GEN_AI_PROVIDER_NAME]: "iqai-adk",
2308
+ [SEMCONV.GEN_AI_OPERATION_NAME]: OPERATIONS.EXECUTE_TOOL,
2309
+ [SEMCONV.GEN_AI_TOOL_NAME]: tool.name,
2310
+ [SEMCONV.GEN_AI_TOOL_DESCRIPTION]: tool.description || "",
2311
+ [SEMCONV.GEN_AI_TOOL_TYPE]: tool.constructor.name,
2312
+ [SEMCONV.GEN_AI_TOOL_CALL_ID]: toolCallId,
2313
+ // ADK-specific attributes
2314
+ [ADK_ATTRS.TOOL_NAME]: tool.name,
2315
+ [ADK_ATTRS.EVENT_ID]: functionResponseEvent.invocationId,
2316
+ [ADK_ATTRS.ENVIRONMENT]: getEnvironment() || "",
2317
+ ...this.buildInvocationContextAttrs(invocationContext)
2318
+ });
2319
+ span.setAttributes(attributes);
2320
+ if (captureContent) {
2321
+ span.setAttribute(SEMCONV.GEN_AI_TOOL_CALL_ARGUMENTS, argsJson);
2322
+ span.setAttribute(SEMCONV.GEN_AI_TOOL_CALL_RESULT, responseJson);
2323
+ span.setAttribute(ADK_ATTRS.TOOL_ARGS, argsJson);
2324
+ span.setAttribute(ADK_ATTRS.TOOL_RESPONSE, responseJson);
2325
+ span.addEvent("gen_ai.tool.input", { "gen_ai.tool.input": argsJson });
2326
+ span.addEvent("gen_ai.tool.output", {
2327
+ "gen_ai.tool.output": responseJson
2328
+ });
2329
+ if (llmRequest) {
2330
+ const llmRequestData = buildLlmRequestForTrace(llmRequest, true);
2331
+ span.setAttribute(
2332
+ ADK_ATTRS.LLM_REQUEST,
2333
+ safeJsonStringify(llmRequestData)
2334
+ );
2335
+ }
2336
+ }
2337
+ }
2338
+ /**
2339
+ * Trace an LLM call
2340
+ * Sets standard OpenTelemetry GenAI attributes for LLM requests/responses
2341
+ */
2342
+ traceLlmCall(invocationContext, eventId, llmRequest, llmResponse) {
2343
+ const span = trace.getActiveSpan();
2344
+ if (!span) return;
2345
+ const captureContent = shouldCaptureContent();
2346
+ const llmRequestData = buildLlmRequestForTrace(llmRequest, captureContent);
2347
+ const llmResponseData = buildLlmResponseForTrace(
2348
+ llmResponse,
2349
+ captureContent
2350
+ );
2351
+ const finishReason = extractFinishReason(llmResponse);
2352
+ const provider = detectProvider(llmRequest.model || "");
2353
+ const responseId = llmResponse.id;
2354
+ let outputType = "text";
2355
+ if (llmRequest.config?.responseSchema) {
2356
+ outputType = "json";
2357
+ }
2358
+ const attributes = formatSpanAttributes({
2359
+ // Standard GenAI attributes (v1.38.0)
2360
+ [SEMCONV.GEN_AI_PROVIDER_NAME]: provider,
2361
+ [SEMCONV.GEN_AI_OPERATION_NAME]: OPERATIONS.CHAT,
2362
+ // Most common operation
2363
+ [SEMCONV.GEN_AI_REQUEST_MODEL]: llmRequest.model || "",
2364
+ // Response metadata (Recommended)
2365
+ ...responseId && {
2366
+ [SEMCONV.GEN_AI_RESPONSE_ID]: String(responseId)
2367
+ },
2368
+ // Response model same as request model in most cases
2369
+ [SEMCONV.GEN_AI_RESPONSE_MODEL]: llmRequest.model || "",
2370
+ // Model parameters
2371
+ [SEMCONV.GEN_AI_REQUEST_MAX_TOKENS]: llmRequest.config?.maxOutputTokens || 0,
2372
+ [SEMCONV.GEN_AI_REQUEST_TEMPERATURE]: llmRequest.config?.temperature || 0,
2373
+ [SEMCONV.GEN_AI_REQUEST_TOP_P]: llmRequest.config?.topP || 0,
2374
+ // Additional model parameters (if present)
2375
+ ...llmRequest.config?.topK !== void 0 && {
2376
+ [SEMCONV.GEN_AI_REQUEST_TOP_K]: llmRequest.config.topK
2377
+ },
2378
+ ...llmRequest.config?.frequencyPenalty !== void 0 && {
2379
+ [SEMCONV.GEN_AI_REQUEST_FREQUENCY_PENALTY]: llmRequest.config.frequencyPenalty
2380
+ },
2381
+ ...llmRequest.config?.presencePenalty !== void 0 && {
2382
+ [SEMCONV.GEN_AI_REQUEST_PRESENCE_PENALTY]: llmRequest.config.presencePenalty
2383
+ },
2384
+ ...llmRequest.config?.stopSequences !== void 0 && {
2385
+ [SEMCONV.GEN_AI_REQUEST_STOP_SEQUENCES]: llmRequest.config.stopSequences
2386
+ },
2387
+ ...llmRequest.config?.candidateCount !== void 0 && llmRequest.config.candidateCount !== 1 && {
2388
+ [SEMCONV.GEN_AI_REQUEST_CHOICE_COUNT]: llmRequest.config.candidateCount
2389
+ },
2390
+ // Output type
2391
+ [SEMCONV.GEN_AI_OUTPUT_TYPE]: outputType,
2392
+ // Response metadata
2393
+ ...finishReason && {
2394
+ [SEMCONV.GEN_AI_RESPONSE_FINISH_REASONS]: [finishReason]
2395
+ },
2396
+ // Token usage (input and output only; total computed client-side)
2397
+ ...llmResponse.usageMetadata && {
2398
+ [SEMCONV.GEN_AI_USAGE_INPUT_TOKENS]: llmResponse.usageMetadata.promptTokenCount || 0,
2399
+ [SEMCONV.GEN_AI_USAGE_OUTPUT_TOKENS]: llmResponse.usageMetadata.candidatesTokenCount || 0
2400
+ },
2401
+ // ADK-specific attributes
2402
+ [ADK_ATTRS.LLM_MODEL]: llmRequest.model || "",
2403
+ [ADK_ATTRS.SESSION_ID]: invocationContext.session.id,
2404
+ [ADK_ATTRS.USER_ID]: invocationContext.userId || "",
2405
+ [ADK_ATTRS.INVOCATION_ID]: invocationContext.invocationId,
2406
+ [ADK_ATTRS.EVENT_ID]: eventId,
2407
+ [ADK_ATTRS.ENVIRONMENT]: getEnvironment() || "",
2408
+ // Content attributes (only if capture is enabled) - ADK namespace for backward compat
2409
+ [ADK_ATTRS.LLM_REQUEST]: captureContent ? safeJsonStringify(llmRequestData) : "{}",
2410
+ [ADK_ATTRS.LLM_RESPONSE]: captureContent ? safeJsonStringify(llmResponseData) : "{}"
2411
+ });
2412
+ span.setAttributes(attributes);
2413
+ if (captureContent) {
2414
+ if (llmRequest.config?.systemInstruction) {
2415
+ span.setAttribute(
2416
+ SEMCONV.GEN_AI_SYSTEM_INSTRUCTIONS,
2417
+ safeJsonStringify(llmRequest.config.systemInstruction)
2418
+ );
2419
+ }
2420
+ span.setAttribute(
2421
+ SEMCONV.GEN_AI_INPUT_MESSAGES,
2422
+ safeJsonStringify(llmRequestData.contents || [])
2423
+ );
2424
+ span.setAttribute(
2425
+ SEMCONV.GEN_AI_OUTPUT_MESSAGES,
2426
+ safeJsonStringify(llmResponse.content || llmResponse.text || "")
2427
+ );
2428
+ if (llmRequest.config?.tools) {
2429
+ span.setAttribute(
2430
+ SEMCONV.GEN_AI_TOOL_DEFINITIONS,
2431
+ safeJsonStringify(llmRequest.config.tools)
2432
+ );
2433
+ }
2434
+ span.addEvent(SEMCONV.GEN_AI_CONTENT_PROMPT, {
2435
+ "gen_ai.prompt": safeJsonStringify(llmRequestData.contents || [])
2436
+ });
2437
+ span.addEvent(SEMCONV.GEN_AI_CONTENT_COMPLETION, {
2438
+ "gen_ai.completion": safeJsonStringify(
2439
+ llmResponse.content || llmResponse.text || ""
2440
+ )
2441
+ });
2442
+ }
2443
+ }
2444
+ /**
2445
+ * Wrap an async generator with tracing
2446
+ * Automatically handles span lifecycle, context propagation, and exceptions
2447
+ */
2448
+ async *traceAsyncGenerator(spanName, generator, attributes) {
2449
+ const tracer2 = this.getTracer();
2450
+ const span = tracer2.startSpan(spanName);
2451
+ const spanContext = trace.setSpan(context.active(), span);
2452
+ if (attributes) {
2453
+ span.setAttributes(formatSpanAttributes(attributes));
2454
+ }
2455
+ try {
2456
+ while (true) {
2457
+ const result = await context.with(spanContext, () => generator.next());
2458
+ if (result.done) {
2459
+ span.setStatus({ code: SPAN_STATUS.OK });
2460
+ break;
2461
+ }
2462
+ yield result.value;
2463
+ }
2464
+ } catch (error) {
2465
+ span.recordException(error);
2466
+ span.setStatus({
2467
+ code: SPAN_STATUS.ERROR,
2468
+ message: error.message
2469
+ });
2470
+ throw error;
2471
+ } finally {
2472
+ span.end();
2473
+ }
2474
+ }
2475
+ /**
2476
+ * Create a new span and execute a function within it
2477
+ */
2478
+ async withSpan(spanName, fn, attributes) {
2479
+ const tracer2 = this.getTracer();
2480
+ return tracer2.startActiveSpan(spanName, async (span) => {
2481
+ try {
2482
+ if (attributes) {
2483
+ span.setAttributes(formatSpanAttributes(attributes));
2484
+ }
2485
+ const result = await fn(span);
2486
+ span.setStatus({ code: SPAN_STATUS.OK });
2487
+ return result;
2488
+ } catch (error) {
2489
+ span.recordException(error);
2490
+ span.setStatus({
2491
+ code: SPAN_STATUS.ERROR,
2492
+ message: error.message
2493
+ });
2494
+ throw error;
2495
+ } finally {
2496
+ span.end();
2497
+ }
2498
+ });
2499
+ }
2500
+ /**
2501
+ * Set attributes on the currently active span
2502
+ */
2503
+ setActiveSpanAttributes(attributes) {
2504
+ const span = this.getActiveSpan();
2505
+ if (span) {
2506
+ span.setAttributes(formatSpanAttributes(attributes));
2507
+ }
2508
+ }
2509
+ /**
2510
+ * Record an exception on the currently active span
2511
+ */
2512
+ recordException(error, attributes) {
2513
+ const span = this.getActiveSpan();
2514
+ if (span) {
2515
+ span.recordException(error);
2516
+ if (attributes) {
2517
+ span.setAttributes(formatSpanAttributes(attributes));
2518
+ }
2519
+ }
2520
+ }
2521
+ /**
2522
+ * Add an event to the currently active span
2523
+ */
2524
+ addEvent(name, attributes) {
2525
+ const span = this.getActiveSpan();
2526
+ if (span && attributes) {
2527
+ span.addEvent(name, formatSpanAttributes(attributes));
2528
+ }
2529
+ }
2530
+ /**
2531
+ * Trace a callback execution
2532
+ * Wraps callback execution in a span with appropriate attributes
2533
+ */
2534
+ traceCallback(callbackType, callbackName, callbackIndex, invocationContext) {
2535
+ const span = this.getActiveSpan();
2536
+ if (!span) return;
2537
+ const attributes = formatSpanAttributes({
2538
+ [SEMCONV.GEN_AI_PROVIDER_NAME]: "iqai-adk",
2539
+ [SEMCONV.GEN_AI_OPERATION_NAME]: OPERATIONS.EXECUTE_CALLBACK,
2540
+ [ADK_ATTRS.CALLBACK_TYPE]: callbackType,
2541
+ [ADK_ATTRS.CALLBACK_NAME]: callbackName || "<anonymous>",
2542
+ [ADK_ATTRS.CALLBACK_INDEX]: callbackIndex,
2543
+ [ADK_ATTRS.ENVIRONMENT]: getEnvironment() || "",
2544
+ ...this.buildInvocationContextAttrs(invocationContext)
2545
+ });
2546
+ span.setAttributes(attributes);
2547
+ }
2548
+ /**
2549
+ * Trace an agent transfer
2550
+ * Records transfer events and attributes for multi-agent workflows
2551
+ */
2552
+ traceAgentTransfer(sourceAgent, targetAgent, transferChain, transferDepth, reason, invocationContext) {
2553
+ const span = this.getActiveSpan();
2554
+ if (!span) return;
2555
+ const attributes = formatSpanAttributes({
2556
+ [SEMCONV.GEN_AI_PROVIDER_NAME]: "iqai-adk",
2557
+ [SEMCONV.GEN_AI_OPERATION_NAME]: OPERATIONS.TRANSFER_AGENT,
2558
+ [ADK_ATTRS.TRANSFER_SOURCE_AGENT]: sourceAgent,
2559
+ [ADK_ATTRS.TRANSFER_TARGET_AGENT]: targetAgent,
2560
+ [ADK_ATTRS.TRANSFER_CHAIN]: JSON.stringify(transferChain),
2561
+ [ADK_ATTRS.TRANSFER_DEPTH]: transferDepth,
2562
+ [ADK_ATTRS.TRANSFER_ROOT_AGENT]: transferChain[0] || sourceAgent,
2563
+ [ADK_ATTRS.ENVIRONMENT]: getEnvironment() || "",
2564
+ ...reason && { [ADK_ATTRS.TRANSFER_REASON]: reason },
2565
+ ...this.buildInvocationContextAttrs(invocationContext)
2566
+ });
2567
+ span.setAttributes(attributes);
2568
+ span.addEvent("agent_transfer_initiated", {
2569
+ target_agent: targetAgent,
2570
+ transfer_depth: transferDepth
2571
+ });
2572
+ }
2573
+ /**
2574
+ * Record enhanced tool execution attributes
2575
+ * Extends the basic tool tracing with execution order and parallel tracking
2576
+ */
2577
+ traceEnhancedTool(executionOrder, parallelGroup, retryCount, isCallbackOverride) {
2578
+ this.setActiveSpanAttributes({
2579
+ ...executionOrder !== void 0 && {
2580
+ [ADK_ATTRS.TOOL_EXECUTION_ORDER]: executionOrder
2581
+ },
2582
+ ...parallelGroup !== void 0 && {
2583
+ [ADK_ATTRS.TOOL_PARALLEL_GROUP]: parallelGroup
2584
+ },
2585
+ ...retryCount !== void 0 && {
2586
+ [ADK_ATTRS.TOOL_RETRY_COUNT]: retryCount
2587
+ },
2588
+ ...isCallbackOverride !== void 0 && {
2589
+ [ADK_ATTRS.TOOL_IS_CALLBACK_OVERRIDE]: isCallbackOverride
2590
+ }
2591
+ });
2592
+ }
2593
+ /**
2594
+ * Record enhanced LLM attributes
2595
+ * Extends basic LLM tracing with streaming metrics
2596
+ */
2597
+ traceEnhancedLlm(streaming, timeToFirstTokenMs, chunkCount, cachedTokens, contextWindowUsedPct) {
2598
+ this.setActiveSpanAttributes({
2599
+ ...streaming !== void 0 && {
2600
+ [ADK_ATTRS.LLM_STREAMING]: streaming
2601
+ },
2602
+ ...timeToFirstTokenMs !== void 0 && {
2603
+ [ADK_ATTRS.LLM_TIME_TO_FIRST_TOKEN]: timeToFirstTokenMs
2604
+ },
2605
+ ...chunkCount !== void 0 && {
2606
+ [ADK_ATTRS.LLM_CHUNK_COUNT]: chunkCount
2607
+ },
2608
+ ...cachedTokens !== void 0 && {
2609
+ [ADK_ATTRS.LLM_CACHED_TOKENS]: cachedTokens
2610
+ },
2611
+ ...contextWindowUsedPct !== void 0 && {
2612
+ [ADK_ATTRS.LLM_CONTEXT_WINDOW_USED_PCT]: contextWindowUsedPct
2613
+ }
2614
+ });
2615
+ }
2616
+ /**
2617
+ * Record standardized error information
2618
+ * Provides consistent error handling across all operations
2619
+ */
2620
+ traceError(error, category, recoverable = false, retryRecommended = false) {
2621
+ const span = this.getActiveSpan();
2622
+ if (!span) return;
2623
+ span.recordException(error);
2624
+ span.setStatus({ code: SPAN_STATUS.ERROR, message: error.message });
2625
+ span.setAttributes(
2626
+ formatSpanAttributes({
2627
+ [SEMCONV.ERROR_TYPE]: error.constructor.name,
2628
+ "error.message": error.message,
2629
+ "error.stack": error.stack?.substring(0, 1e3) || "",
2630
+ [ADK_ATTRS.ERROR_CATEGORY]: category,
2631
+ [ADK_ATTRS.ERROR_RECOVERABLE]: recoverable,
2632
+ [ADK_ATTRS.ERROR_RETRY_RECOMMENDED]: retryRecommended
2633
+ })
2634
+ );
2635
+ }
2636
+ /**
2637
+ * Trace memory operations
2638
+ * Records memory search, insert, and delete operations
2639
+ */
2640
+ traceMemoryOperation(operation, sessionId, query, resultsCount, invocationContext) {
2641
+ const span = this.getActiveSpan();
2642
+ if (!span) return;
2643
+ span.setAttributes(
2644
+ formatSpanAttributes({
2645
+ [SEMCONV.GEN_AI_PROVIDER_NAME]: "iqai-adk",
2646
+ [SEMCONV.GEN_AI_OPERATION_NAME]: operation === "search" ? OPERATIONS.SEARCH_MEMORY : OPERATIONS.INSERT_MEMORY,
2647
+ [ADK_ATTRS.SESSION_ID]: sessionId,
2648
+ [ADK_ATTRS.ENVIRONMENT]: getEnvironment() || "",
2649
+ ...query && { [ADK_ATTRS.MEMORY_QUERY]: query },
2650
+ ...resultsCount !== void 0 && {
2651
+ [ADK_ATTRS.MEMORY_RESULTS_COUNT]: resultsCount
2652
+ },
2653
+ ...invocationContext && {
2654
+ [ADK_ATTRS.USER_ID]: invocationContext.userId || "",
2655
+ [ADK_ATTRS.INVOCATION_ID]: invocationContext.invocationId
2656
+ }
2657
+ })
2658
+ );
2659
+ }
2660
+ /**
2661
+ * Trace plugin hook execution
2662
+ * Records plugin lifecycle hooks
2663
+ */
2664
+ tracePluginHook(pluginName, hook, agentName, invocationContext) {
2665
+ const span = this.getActiveSpan();
2666
+ if (!span) return;
2667
+ span.setAttributes(
2668
+ formatSpanAttributes({
2669
+ [SEMCONV.GEN_AI_PROVIDER_NAME]: "iqai-adk",
2670
+ [SEMCONV.GEN_AI_OPERATION_NAME]: OPERATIONS.EXECUTE_PLUGIN,
2671
+ [ADK_ATTRS.PLUGIN_NAME]: pluginName,
2672
+ [ADK_ATTRS.PLUGIN_HOOK]: hook,
2673
+ [ADK_ATTRS.ENVIRONMENT]: getEnvironment() || "",
2674
+ ...agentName && { [ADK_ATTRS.AGENT_NAME]: agentName },
2675
+ ...this.buildInvocationContextAttrs(invocationContext)
2676
+ })
2677
+ );
2678
+ }
2679
+ };
2680
+ var tracingService = new TracingService();
2681
+
2682
+ // src/telemetry/index.ts
2683
+ var TelemetryService = class {
2684
+ /**
2685
+ * Initialize telemetry system
2686
+ */
2687
+ async initialize(config) {
2688
+ await setupService.initialize(config);
2689
+ const appVersion = config.appVersion || "0.0.0";
2690
+ tracingService.initialize("iqai-adk", appVersion);
2691
+ if (config.enableMetrics !== false) {
2692
+ metricsService.initialize("iqai-adk", appVersion);
2693
+ }
2694
+ }
2695
+ /**
2696
+ * Check if telemetry is initialized
2697
+ */
2698
+ get initialized() {
2699
+ return setupService.initialized;
2700
+ }
2701
+ /**
2702
+ * Get current configuration
2703
+ */
2704
+ getConfig() {
2705
+ return setupService.getConfig();
2706
+ }
2707
+ /**
2708
+ * Get tracer instance
2709
+ */
2710
+ getTracer() {
2711
+ return tracingService.getTracer();
2712
+ }
2713
+ /**
2714
+ * Get currently active span
2715
+ */
2716
+ getActiveSpan() {
2717
+ return tracingService.getActiveSpan();
2718
+ }
2719
+ /**
2720
+ * Check if content capture is enabled
2721
+ */
2722
+ shouldCaptureContent() {
2723
+ return shouldCaptureContent();
2724
+ }
2725
+ // --- Tracing Methods ---
2726
+ /**
2727
+ * Trace an agent invocation
2728
+ */
2729
+ traceAgentInvocation(agent, invocationContext, input, output) {
2730
+ tracingService.traceAgentInvocation(
2731
+ agent,
2732
+ invocationContext,
2733
+ input,
2734
+ output
2735
+ );
2736
+ }
2737
+ /**
2738
+ * Trace a tool call
2739
+ */
2740
+ traceToolCall(tool, args, functionResponseEvent, llmRequest, invocationContext) {
2741
+ tracingService.traceToolCall(
2742
+ tool,
2743
+ args,
2744
+ functionResponseEvent,
2745
+ llmRequest,
2746
+ invocationContext
2747
+ );
2748
+ }
2749
+ /**
2750
+ * Trace an LLM call
2751
+ */
2752
+ traceLlmCall(invocationContext, eventId, llmRequest, llmResponse) {
2753
+ tracingService.traceLlmCall(
2754
+ invocationContext,
2755
+ eventId,
2756
+ llmRequest,
2757
+ llmResponse
2758
+ );
2759
+ }
2760
+ /**
2761
+ * Wrap an async generator with tracing
2762
+ */
2763
+ traceAsyncGenerator(spanName, generator, attributes) {
2764
+ return tracingService.traceAsyncGenerator(spanName, generator, attributes);
2765
+ }
2766
+ /**
2767
+ * Execute a function within a traced span
2768
+ */
2769
+ async withSpan(spanName, fn, attributes) {
2770
+ return tracingService.withSpan(spanName, fn, attributes);
2771
+ }
2772
+ /**
2773
+ * Set attributes on the active span
2774
+ */
2775
+ setActiveSpanAttributes(attributes) {
2776
+ tracingService.setActiveSpanAttributes(attributes);
2777
+ }
2778
+ /**
2779
+ * Record an exception on the active span
2780
+ */
2781
+ recordException(error, attributes) {
2782
+ tracingService.recordException(error, attributes);
2783
+ }
2784
+ /**
2785
+ * Add an event to the active span
2786
+ */
2787
+ addEvent(name, attributes) {
2788
+ tracingService.addEvent(name, attributes);
2789
+ }
2790
+ /**
2791
+ * Trace a callback execution
2792
+ */
2793
+ traceCallback(callbackType, callbackName, callbackIndex, invocationContext) {
2794
+ tracingService.traceCallback(
2795
+ callbackType,
2796
+ callbackName,
2797
+ callbackIndex,
2798
+ invocationContext
2799
+ );
2800
+ }
2801
+ /**
2802
+ * Trace an agent transfer
2803
+ */
2804
+ traceAgentTransfer(sourceAgent, targetAgent, transferChain, transferDepth, reason, invocationContext) {
2805
+ tracingService.traceAgentTransfer(
2806
+ sourceAgent,
2807
+ targetAgent,
2808
+ transferChain,
2809
+ transferDepth,
2810
+ reason,
2811
+ invocationContext
2812
+ );
2813
+ }
2814
+ /**
2815
+ * Record enhanced tool execution attributes
2816
+ */
2817
+ traceEnhancedTool(executionOrder, parallelGroup, retryCount, isCallbackOverride) {
2818
+ tracingService.traceEnhancedTool(
2819
+ executionOrder,
2820
+ parallelGroup,
2821
+ retryCount,
2822
+ isCallbackOverride
2823
+ );
2824
+ }
2825
+ /**
2826
+ * Record enhanced LLM attributes
2827
+ */
2828
+ traceEnhancedLlm(streaming, timeToFirstTokenMs, chunkCount, cachedTokens, contextWindowUsedPct) {
2829
+ tracingService.traceEnhancedLlm(
2830
+ streaming,
2831
+ timeToFirstTokenMs,
2832
+ chunkCount,
2833
+ cachedTokens,
2834
+ contextWindowUsedPct
2835
+ );
2836
+ }
2837
+ /**
2838
+ * Record standardized error information
2839
+ */
2840
+ traceError(error, category, recoverable = false, retryRecommended = false) {
2841
+ tracingService.traceError(error, category, recoverable, retryRecommended);
2842
+ }
2843
+ /**
2844
+ * Trace memory operations
2845
+ */
2846
+ traceMemoryOperation(operation, sessionId, query, resultsCount, invocationContext) {
2847
+ tracingService.traceMemoryOperation(
2848
+ operation,
2849
+ sessionId,
2850
+ query,
2851
+ resultsCount,
2852
+ invocationContext
2853
+ );
2854
+ }
2855
+ /**
2856
+ * Trace plugin hook execution
2857
+ */
2858
+ tracePluginHook(pluginName, hook, agentName, invocationContext) {
2859
+ tracingService.tracePluginHook(
2860
+ pluginName,
2861
+ hook,
2862
+ agentName,
2863
+ invocationContext
2864
+ );
1286
2865
  }
2866
+ // --- Metrics Methods ---
1287
2867
  /**
1288
- * Initialize telemetry with the provided configuration
2868
+ * Record an agent invocation
1289
2869
  */
1290
- initialize(config) {
1291
- if (this.isInitialized) {
1292
- diag.warn("Telemetry is already initialized. Skipping.");
1293
- return;
1294
- }
1295
- this.config = config;
1296
- diag.setLogger(new DiagConsoleLogger(), DiagLogLevel.INFO);
1297
- const resource = resourceFromAttributes({
1298
- [ATTR_SERVICE_NAME]: config.appName,
1299
- [ATTR_SERVICE_VERSION]: config.appVersion
1300
- });
1301
- const traceExporter = new OTLPTraceExporter({
1302
- url: config.otlpEndpoint,
1303
- headers: config.otlpHeaders
1304
- });
1305
- this.sdk = new NodeSDK({
1306
- resource,
1307
- traceExporter,
1308
- instrumentations: [
1309
- getNodeAutoInstrumentations({
1310
- // Follow Python ADK approach: let all HTTP instrumentation through.
1311
- // This provides transparency and aligns with standard OpenTelemetry behavior.
1312
- // High-level LLM tracing is provided through dedicated ADK spans.
1313
- "@opentelemetry/instrumentation-http": {
1314
- ignoreIncomingRequestHook: (req) => {
1315
- return true;
1316
- }
1317
- }
1318
- })
1319
- ]
1320
- });
1321
- try {
1322
- this.sdk.start();
1323
- this.isInitialized = true;
1324
- this.tracer = trace.getTracer("iqai-adk", config.appVersion || "0.1.0");
1325
- diag.debug("OpenTelemetry SDK started successfully.");
1326
- } catch (error) {
1327
- diag.error("Error starting OpenTelemetry SDK:", error);
1328
- throw error;
1329
- }
2870
+ recordAgentInvocation(dimensions) {
2871
+ metricsService.recordAgentInvocation(dimensions);
1330
2872
  }
1331
2873
  /**
1332
- * Get the tracer instance
2874
+ * Record agent duration
1333
2875
  */
1334
- getTracer() {
1335
- return this.tracer;
2876
+ recordAgentDuration(durationMs, dimensions) {
2877
+ metricsService.recordAgentDuration(durationMs, dimensions);
1336
2878
  }
1337
2879
  /**
1338
- * Check if telemetry is initialized
2880
+ * Record a tool execution
1339
2881
  */
1340
- get initialized() {
1341
- return this.isInitialized;
2882
+ recordToolExecution(dimensions) {
2883
+ metricsService.recordToolExecution(dimensions);
1342
2884
  }
1343
2885
  /**
1344
- * Get the current configuration
2886
+ * Record tool duration
1345
2887
  */
1346
- getConfig() {
1347
- return this.config;
2888
+ recordToolDuration(durationMs, dimensions) {
2889
+ metricsService.recordToolDuration(durationMs, dimensions);
1348
2890
  }
1349
2891
  /**
1350
- * Shutdown telemetry with optional timeout
2892
+ * Record an LLM call
1351
2893
  */
1352
- async shutdown(timeoutMs = 5e3) {
1353
- if (!this.sdk || !this.isInitialized) {
1354
- diag.warn("Telemetry is not initialized or already shut down.");
1355
- return;
1356
- }
1357
- try {
1358
- const timeoutPromise = new Promise((_, reject) => {
1359
- setTimeout(
1360
- () => reject(
1361
- new Error(`Telemetry shutdown timeout after ${timeoutMs}ms`)
1362
- ),
1363
- timeoutMs
1364
- );
1365
- });
1366
- await Promise.race([this.sdk.shutdown(), timeoutPromise]);
1367
- this.isInitialized = false;
1368
- diag.debug("Telemetry terminated successfully.");
1369
- } catch (error) {
1370
- if (error instanceof Error && error.message.includes("timeout")) {
1371
- diag.warn("Telemetry shutdown timed out, some traces may be lost");
1372
- } else {
1373
- diag.error("Error terminating telemetry:", error);
1374
- }
1375
- throw error;
1376
- } finally {
1377
- this.sdk = null;
1378
- }
2894
+ recordLlmCall(dimensions) {
2895
+ metricsService.recordLlmCall(dimensions);
1379
2896
  }
1380
2897
  /**
1381
- * Traces a tool call by adding detailed attributes to the current span.
2898
+ * Record LLM duration
1382
2899
  */
1383
- traceToolCall(tool, args, functionResponseEvent, llmRequest, invocationContext) {
1384
- const span = trace.getActiveSpan();
1385
- if (!span) return;
1386
- let toolCallId = "<not specified>";
1387
- let toolResponse = "<not specified>";
1388
- if (functionResponseEvent.content?.parts && functionResponseEvent.content.parts.length > 0) {
1389
- const functionResponse = functionResponseEvent.content.parts[0].functionResponse;
1390
- if (functionResponse) {
1391
- toolCallId = functionResponse.id || "<not specified>";
1392
- toolResponse = JSON.stringify(functionResponse.response) || "<not specified>";
1393
- }
1394
- }
1395
- span.setAttributes({
1396
- "gen_ai.system": "iqai-adk",
1397
- "gen_ai.operation.name": "execute_tool",
1398
- "gen_ai.tool.name": tool.name,
1399
- "gen_ai.tool.description": tool.description,
1400
- "gen_ai.tool.call.id": toolCallId,
1401
- // Session and user tracking
1402
- ...invocationContext && {
1403
- "session.id": invocationContext.session.id,
1404
- "user.id": invocationContext.userId
1405
- },
1406
- // Environment
1407
- ...process.env.NODE_ENV && {
1408
- "deployment.environment.name": process.env.NODE_ENV
1409
- },
1410
- // ADK-specific attributes (matching Python namespace pattern)
1411
- "adk.tool_call_args": this._safeJsonStringify(args),
1412
- "adk.event_id": functionResponseEvent.invocationId,
1413
- "adk.tool_response": this._safeJsonStringify(toolResponse),
1414
- "adk.llm_request": llmRequest ? this._safeJsonStringify(this._buildLlmRequestForTrace(llmRequest)) : "{}",
1415
- "adk.llm_response": "{}"
1416
- });
2900
+ recordLlmDuration(durationMs, dimensions) {
2901
+ metricsService.recordLlmDuration(durationMs, dimensions);
1417
2902
  }
1418
2903
  /**
1419
- * Traces a call to the LLM by adding detailed attributes to the current span.
2904
+ * Record LLM token usage
1420
2905
  */
1421
- traceLlmCall(invocationContext, eventId, llmRequest, llmResponse) {
1422
- const span = trace.getActiveSpan();
1423
- if (!span) return;
1424
- const requestData = this._buildLlmRequestForTrace(llmRequest);
1425
- span.setAttributes({
1426
- // Standard OpenTelemetry attributes (following Python pattern)
1427
- "gen_ai.system": "iqai-adk",
1428
- "gen_ai.request.model": llmRequest.model,
1429
- // Session and user tracking (maps to Langfuse sessionId, userId)
1430
- "session.id": invocationContext.session.id,
1431
- "user.id": invocationContext.userId,
1432
- // Environment (maps to Langfuse environment)
1433
- ...process.env.NODE_ENV && {
1434
- "deployment.environment.name": process.env.NODE_ENV
1435
- },
1436
- // Model parameters (maps to Langfuse modelParameters)
1437
- "gen_ai.request.max_tokens": llmRequest.config.maxOutputTokens || 0,
1438
- "gen_ai.request.temperature": llmRequest.config.temperature || 0,
1439
- "gen_ai.request.top_p": llmRequest.config.topP || 0,
1440
- "adk.system_name": "iqai-adk",
1441
- "adk.request_model": llmRequest.model,
1442
- // ADK-specific attributes (matching Python namespace pattern)
1443
- "adk.invocation_id": invocationContext.invocationId,
1444
- "adk.session_id": invocationContext.session.id,
1445
- "adk.event_id": eventId,
1446
- "adk.llm_request": this._safeJsonStringify(requestData),
1447
- "adk.llm_response": this._safeJsonStringify(llmResponse)
1448
- });
1449
- if (llmResponse.usageMetadata) {
1450
- span.setAttributes({
1451
- "gen_ai.usage.input_tokens": llmResponse.usageMetadata.promptTokenCount || 0,
1452
- "gen_ai.usage.output_tokens": llmResponse.usageMetadata.candidatesTokenCount || 0
1453
- });
1454
- }
1455
- span.addEvent("gen_ai.content.prompt", {
1456
- "gen_ai.prompt": this._safeJsonStringify(requestData.messages)
1457
- });
1458
- span.addEvent("gen_ai.content.completion", {
1459
- "gen_ai.completion": this._safeJsonStringify(llmResponse.content || "")
1460
- });
2906
+ recordLlmTokens(inputTokens, outputTokens, dimensions) {
2907
+ metricsService.recordLlmTokens(inputTokens, outputTokens, dimensions);
1461
2908
  }
1462
2909
  /**
1463
- * Wraps an async generator with tracing
2910
+ * Record an error
1464
2911
  */
1465
- async *traceAsyncGenerator(spanName, generator) {
1466
- const span = this.tracer.startSpan(spanName);
1467
- const spanContext = trace.setSpan(context.active(), span);
1468
- try {
1469
- while (true) {
1470
- const result = await context.with(spanContext, () => generator.next());
1471
- if (result.done) {
1472
- break;
1473
- }
1474
- yield result.value;
1475
- }
1476
- } catch (error) {
1477
- span.recordException(error);
1478
- span.setStatus({ code: 2, message: error.message });
1479
- throw error;
1480
- } finally {
1481
- span.end();
1482
- }
1483
- }
1484
- // --- Private Helper Methods ---
1485
- _safeJsonStringify(obj) {
1486
- try {
1487
- return JSON.stringify(obj);
1488
- } catch (e) {
1489
- return "<not serializable>";
1490
- }
2912
+ recordError(errorType, context4) {
2913
+ metricsService.recordError(errorType, context4);
1491
2914
  }
2915
+ // --- Lifecycle Methods ---
1492
2916
  /**
1493
- * Builds a dictionary representation of the LLM request for tracing.
1494
- *
1495
- * This function prepares a dictionary representation of the LlmRequest
1496
- * object, suitable for inclusion in a trace. It excludes fields that cannot
1497
- * be serialized (e.g., function pointers) and avoids sending bytes data.
1498
- */
1499
- _buildLlmRequestForTrace(llmRequest) {
1500
- const result = {
1501
- model: llmRequest.model,
1502
- config: this._excludeNonSerializableFromConfig(llmRequest.config),
1503
- contents: []
1504
- };
1505
- for (const content of llmRequest.contents || []) {
1506
- const parts = content.parts?.filter((part) => !part.inlineData) || [];
1507
- result.contents.push({
1508
- role: content.role,
1509
- parts
1510
- });
1511
- }
1512
- return result;
2917
+ * Flush all pending telemetry data
2918
+ */
2919
+ async flush(timeoutMs = 5e3) {
2920
+ await setupService.flush(timeoutMs);
1513
2921
  }
1514
2922
  /**
1515
- * Excludes non-serializable fields from config, similar to Python's exclude logic
2923
+ * Shutdown telemetry system
1516
2924
  */
1517
- _excludeNonSerializableFromConfig(config) {
1518
- const result = {};
1519
- for (const [key, value] of Object.entries(config)) {
1520
- if (key === "response_schema") {
1521
- continue;
1522
- }
1523
- if (value === void 0 || value === null) {
1524
- continue;
1525
- }
1526
- if (key === "functions" && Array.isArray(value)) {
1527
- result[key] = value.map((func) => ({
1528
- name: func.name,
1529
- description: func.description,
1530
- parameters: func.parameters
1531
- // Exclude actual function pointers
1532
- }));
1533
- } else {
1534
- result[key] = value;
1535
- }
1536
- }
1537
- return result;
2925
+ async shutdown(timeoutMs) {
2926
+ await setupService.shutdown(timeoutMs);
1538
2927
  }
1539
2928
  };
1540
2929
  var telemetryService = new TelemetryService();
1541
2930
  var tracer = telemetryService.getTracer();
1542
2931
  var initializeTelemetry = (config) => telemetryService.initialize(config);
1543
2932
  var shutdownTelemetry = (timeoutMs) => telemetryService.shutdown(timeoutMs);
2933
+ var traceAgentInvocation = (agent, invocationContext, input, output) => telemetryService.traceAgentInvocation(
2934
+ agent,
2935
+ invocationContext,
2936
+ input,
2937
+ output
2938
+ );
1544
2939
  var traceToolCall = (tool, args, functionResponseEvent, llmRequest, invocationContext) => telemetryService.traceToolCall(
1545
2940
  tool,
1546
2941
  args,
@@ -1554,6 +2949,9 @@ var traceLlmCall = (invocationContext, eventId, llmRequest, llmResponse) => tele
1554
2949
  llmRequest,
1555
2950
  llmResponse
1556
2951
  );
2952
+ var recordAgentInvocation = (dimensions) => telemetryService.recordAgentInvocation(dimensions);
2953
+ var recordToolExecution = (dimensions) => telemetryService.recordToolExecution(dimensions);
2954
+ var recordLlmCall = (dimensions) => telemetryService.recordLlmCall(dimensions);
1557
2955
 
1558
2956
  // src/models/base-llm.ts
1559
2957
  var BaseLlm = class {
@@ -1589,66 +2987,119 @@ var BaseLlm = class {
1589
2987
  */
1590
2988
  async *generateContentAsync(llmRequest, stream) {
1591
2989
  this.maybeAppendUserContent(llmRequest);
1592
- yield* tracer.startActiveSpan(
1593
- `llm_generate [${this.model}]`,
1594
- async function* (span) {
1595
- try {
1596
- span.setAttributes({
1597
- "gen_ai.system.name": "iqai-adk",
1598
- "gen_ai.operation.name": "generate",
1599
- "gen_ai.request.model": this.model,
1600
- "gen_ai.request.max_tokens": llmRequest.config?.maxOutputTokens || 0,
1601
- "gen_ai.request.temperature": llmRequest.config?.temperature || 0,
1602
- "gen_ai.request.top_p": llmRequest.config?.topP || 0,
1603
- "adk.llm_request": JSON.stringify({
1604
- model: this.model,
1605
- contents: llmRequest.contents?.map((content) => ({
1606
- role: content.role,
1607
- parts: content.parts?.map((part) => ({
1608
- text: typeof part.text === "string" ? part.text.substring(0, 200) + (part.text.length > 200 ? "..." : "") : "[non_text_content]"
1609
- }))
1610
- })),
1611
- config: llmRequest.config
1612
- }),
1613
- "adk.streaming": stream || false
1614
- });
1615
- let responseCount = 0;
1616
- let totalTokens = 0;
1617
- for await (const response of this.generateContentAsyncImpl(
1618
- llmRequest,
1619
- stream
1620
- )) {
1621
- responseCount++;
1622
- if (response.usage) {
1623
- totalTokens += response.usage.total_tokens || 0;
1624
- span.setAttributes({
1625
- "gen_ai.response.finish_reasons": [
1626
- response.finish_reason || "unknown"
1627
- ],
1628
- "gen_ai.usage.input_tokens": response.usage.prompt_tokens || 0,
1629
- "gen_ai.usage.output_tokens": response.usage.completion_tokens || 0,
1630
- "gen_ai.usage.total_tokens": response.usage.total_tokens || 0
1631
- });
1632
- }
1633
- yield response;
2990
+ const span = trace2.getActiveSpan();
2991
+ const captureContent = shouldCaptureContent();
2992
+ if (span && captureContent) {
2993
+ span.setAttributes({
2994
+ "gen_ai.system": "iqai-adk",
2995
+ "gen_ai.operation.name": stream ? "stream" : "generate",
2996
+ "gen_ai.request.model": this.model,
2997
+ "gen_ai.request.max_tokens": llmRequest.config?.maxOutputTokens || 0,
2998
+ "gen_ai.request.temperature": llmRequest.config?.temperature || 0,
2999
+ "gen_ai.request.top_p": llmRequest.config?.topP || 0,
3000
+ "adk.llm.streaming": stream || false
3001
+ });
3002
+ if (llmRequest.contents) {
3003
+ span.addEvent("gen_ai.content.prompt", {
3004
+ "gen_ai.prompt": JSON.stringify(llmRequest.contents)
3005
+ });
3006
+ }
3007
+ }
3008
+ let responseCount = 0;
3009
+ let totalTokens = 0;
3010
+ let inputTokens = 0;
3011
+ let outputTokens = 0;
3012
+ let firstTokenTime;
3013
+ const startTime = Date.now();
3014
+ let chunkCount = 0;
3015
+ let accumulatedContent = null;
3016
+ try {
3017
+ for await (const response of this.generateContentAsyncImpl(
3018
+ llmRequest,
3019
+ stream
3020
+ )) {
3021
+ responseCount++;
3022
+ if (stream && !firstTokenTime) {
3023
+ firstTokenTime = Date.now();
3024
+ const timeToFirstToken = firstTokenTime - startTime;
3025
+ telemetryService.traceEnhancedLlm(
3026
+ true,
3027
+ // streaming
3028
+ timeToFirstToken
3029
+ );
3030
+ if (span) {
3031
+ span.addEvent("gen_ai.stream.first_token", {
3032
+ time_to_first_token_ms: timeToFirstToken
3033
+ });
3034
+ }
3035
+ }
3036
+ if (stream) {
3037
+ chunkCount++;
3038
+ if (span) {
3039
+ span.addEvent("gen_ai.stream.chunk", {
3040
+ chunk_index: chunkCount,
3041
+ timestamp: Date.now()
3042
+ });
3043
+ }
3044
+ }
3045
+ if (response.content) {
3046
+ accumulatedContent = response.content;
3047
+ } else if (response.text) {
3048
+ if (!accumulatedContent) {
3049
+ accumulatedContent = { role: "model", parts: [{ text: "" }] };
3050
+ }
3051
+ if (accumulatedContent.parts?.[0]) {
3052
+ accumulatedContent.parts[0].text = (accumulatedContent.parts[0].text || "") + response.text;
1634
3053
  }
3054
+ }
3055
+ if (response.usageMetadata && span) {
3056
+ inputTokens = response.usageMetadata.promptTokenCount || inputTokens;
3057
+ outputTokens = response.usageMetadata.candidatesTokenCount || outputTokens;
3058
+ totalTokens = response.usageMetadata.totalTokenCount || totalTokens;
1635
3059
  span.setAttributes({
1636
- "adk.response_count": responseCount,
1637
- "adk.total_tokens": totalTokens
3060
+ "gen_ai.usage.input_tokens": inputTokens,
3061
+ "gen_ai.usage.output_tokens": outputTokens,
3062
+ "gen_ai.usage.total_tokens": totalTokens
1638
3063
  });
1639
- } catch (error) {
1640
- span.recordException(error);
1641
- span.setStatus({ code: 2, message: error.message });
1642
- this.logger.error("\u274C ADK LLM Error:", {
1643
- model: this.model,
1644
- error: error.message
3064
+ }
3065
+ if (response.finishReason && span) {
3066
+ span.setAttribute("gen_ai.response.finish_reasons", [
3067
+ response.finishReason
3068
+ ]);
3069
+ }
3070
+ yield response;
3071
+ }
3072
+ if (span) {
3073
+ span.setAttributes({
3074
+ "adk.response_count": responseCount
3075
+ });
3076
+ if (captureContent && accumulatedContent) {
3077
+ span.addEvent("gen_ai.content.completion", {
3078
+ "gen_ai.completion": JSON.stringify(accumulatedContent)
1645
3079
  });
1646
- throw error;
1647
- } finally {
1648
- span.end();
1649
3080
  }
1650
- }.bind(this)
1651
- );
3081
+ }
3082
+ if (stream && chunkCount > 0) {
3083
+ telemetryService.traceEnhancedLlm(
3084
+ true,
3085
+ firstTokenTime ? firstTokenTime - startTime : void 0,
3086
+ chunkCount
3087
+ );
3088
+ }
3089
+ } catch (error) {
3090
+ telemetryService.traceError(
3091
+ error,
3092
+ "model_error",
3093
+ false,
3094
+ true
3095
+ // retry may be recommended for transient model errors
3096
+ );
3097
+ this.logger.error("\u274C ADK LLM Error:", {
3098
+ model: this.model,
3099
+ error: error.message
3100
+ });
3101
+ throw error;
3102
+ }
1652
3103
  }
1653
3104
  /**
1654
3105
  * Appends a user content, so that model can continue to output.
@@ -6413,7 +7864,7 @@ var ToolContext = class extends CallbackContext {
6413
7864
  };
6414
7865
 
6415
7866
  // src/flows/llm-flows/functions.ts
6416
- import { context as context2, trace as trace2 } from "@opentelemetry/api";
7867
+ import { context as context2, trace as trace3 } from "@opentelemetry/api";
6417
7868
  var AF_FUNCTION_CALL_ID_PREFIX = "adk-";
6418
7869
  var REQUEST_EUC_FUNCTION_CALL_NAME = "adk_request_credential";
6419
7870
  function generateClientFunctionCallId() {
@@ -6492,6 +7943,7 @@ async function handleFunctionCallsAsync(invocationContext, functionCallEvent, to
6492
7943
  return null;
6493
7944
  }
6494
7945
  const functionResponseEvents = [];
7946
+ let executionOrder = 0;
6495
7947
  for (const functionCall of functionCalls) {
6496
7948
  if (filters && functionCall.id && !filters.has(functionCall.id)) {
6497
7949
  continue;
@@ -6503,8 +7955,11 @@ async function handleFunctionCallsAsync(invocationContext, functionCallEvent, to
6503
7955
  );
6504
7956
  const functionArgs = functionCall.args || {};
6505
7957
  const tracer2 = telemetryService.getTracer();
6506
- const span = tracer2.startSpan(`execute_tool ${tool.name}`);
6507
- const spanContext = trace2.setSpan(context2.active(), span);
7958
+ const span = tracer2.startSpan(`execute_tool [${tool.name}]`);
7959
+ const spanContext = trace3.setSpan(context2.active(), span);
7960
+ const toolStartTime = Date.now();
7961
+ let toolStatus = "success";
7962
+ let wasOverridden = false;
6508
7963
  try {
6509
7964
  const functionResponse = await context2.with(spanContext, async () => {
6510
7965
  const argsForTool = { ...functionArgs };
@@ -6512,6 +7967,7 @@ async function handleFunctionCallsAsync(invocationContext, functionCallEvent, to
6512
7967
  for (const cb of agent.canonicalBeforeToolCallbacks) {
6513
7968
  const maybeOverride = await cb(tool, argsForTool, toolContext);
6514
7969
  if (maybeOverride !== null && maybeOverride !== void 0) {
7970
+ wasOverridden = true;
6515
7971
  const overriddenEvent = buildResponseEvent(
6516
7972
  tool,
6517
7973
  maybeOverride,
@@ -6521,7 +7977,19 @@ async function handleFunctionCallsAsync(invocationContext, functionCallEvent, to
6521
7977
  telemetryService.traceToolCall(
6522
7978
  tool,
6523
7979
  argsForTool,
6524
- overriddenEvent
7980
+ overriddenEvent,
7981
+ void 0,
7982
+ // llmRequest
7983
+ invocationContext
7984
+ );
7985
+ telemetryService.traceEnhancedTool(
7986
+ executionOrder,
7987
+ void 0,
7988
+ // parallelGroup not used in sequential execution
7989
+ 0,
7990
+ // no retry
7991
+ true
7992
+ // callback override
6525
7993
  );
6526
7994
  return { result: maybeOverride, event: overriddenEvent };
6527
7995
  }
@@ -6554,7 +8022,18 @@ async function handleFunctionCallsAsync(invocationContext, functionCallEvent, to
6554
8022
  telemetryService.traceToolCall(
6555
8023
  tool,
6556
8024
  argsForTool,
6557
- functionResponseEvent
8025
+ functionResponseEvent,
8026
+ void 0,
8027
+ // llmRequest
8028
+ invocationContext
8029
+ );
8030
+ telemetryService.traceEnhancedTool(
8031
+ executionOrder,
8032
+ void 0,
8033
+ // parallelGroup not used in sequential execution
8034
+ 0,
8035
+ // no retry
8036
+ wasOverridden
6558
8037
  );
6559
8038
  return { result, event: functionResponseEvent };
6560
8039
  });
@@ -6563,11 +8042,41 @@ async function handleFunctionCallsAsync(invocationContext, functionCallEvent, to
6563
8042
  }
6564
8043
  functionResponseEvents.push(functionResponse.event);
6565
8044
  span.setStatus({ code: 1 });
8045
+ telemetryService.recordToolExecution({
8046
+ toolName: tool.name,
8047
+ agentName: agent.name,
8048
+ environment: process.env.NODE_ENV,
8049
+ status: toolStatus
8050
+ });
8051
+ executionOrder++;
6566
8052
  } catch (error) {
8053
+ toolStatus = "error";
8054
+ telemetryService.traceError(
8055
+ error,
8056
+ "tool_error",
8057
+ false,
8058
+ // not recoverable by default
8059
+ false
8060
+ // retry not recommended by default
8061
+ );
6567
8062
  span.recordException(error);
6568
8063
  span.setStatus({ code: 2, message: error.message });
8064
+ telemetryService.recordToolExecution({
8065
+ toolName: tool.name,
8066
+ agentName: agent.name,
8067
+ environment: process.env.NODE_ENV,
8068
+ status: toolStatus
8069
+ });
8070
+ telemetryService.recordError("tool", tool.name);
6569
8071
  throw error;
6570
8072
  } finally {
8073
+ const toolDuration = Date.now() - toolStartTime;
8074
+ telemetryService.recordToolDuration(toolDuration, {
8075
+ toolName: tool.name,
8076
+ agentName: agent.name,
8077
+ environment: process.env.NODE_ENV,
8078
+ status: toolStatus
8079
+ });
6571
8080
  span.end();
6572
8081
  }
6573
8082
  }
@@ -6716,21 +8225,29 @@ var BaseLlmFlow = class {
6716
8225
  author: invocationContext.agent.name,
6717
8226
  branch: invocationContext.branch
6718
8227
  });
6719
- for await (const llmResponse of this._callLlmAsync(
6720
- invocationContext,
6721
- llmRequest,
6722
- modelResponseEvent
6723
- )) {
6724
- for await (const event of this._postprocessAsync(
8228
+ const llm = this.__getLlm(invocationContext);
8229
+ const isStreaming = invocationContext.runConfig.streamingMode === "sse" /* SSE */;
8230
+ const generator = async function* () {
8231
+ for await (const llmResponse of this._callLlmAsync(
6725
8232
  invocationContext,
6726
8233
  llmRequest,
6727
- llmResponse,
6728
8234
  modelResponseEvent
6729
8235
  )) {
6730
- modelResponseEvent.id = Event.newId();
6731
- yield event;
8236
+ for await (const event of this._postprocessAsync(
8237
+ invocationContext,
8238
+ llmRequest,
8239
+ llmResponse,
8240
+ modelResponseEvent
8241
+ )) {
8242
+ modelResponseEvent.id = Event.newId();
8243
+ yield event;
8244
+ }
6732
8245
  }
6733
- }
8246
+ }.bind(this)();
8247
+ yield* telemetryService.traceAsyncGenerator(
8248
+ isStreaming ? `llm_stream [${llm.model}]` : `llm_generate [${llm.model}]`,
8249
+ generator
8250
+ );
6734
8251
  }
6735
8252
  async *_preprocessAsync(invocationContext, llmRequest) {
6736
8253
  const agent = invocationContext.agent;
@@ -6976,37 +8493,71 @@ var BaseLlmFlow = class {
6976
8493
  Streaming: isStreaming ? "Yes" : "No"
6977
8494
  });
6978
8495
  let responseCount = 0;
6979
- for await (const llmResponse of llm.generateContentAsync(
6980
- llmRequest,
6981
- isStreaming
6982
- )) {
6983
- responseCount++;
6984
- traceLlmCall(
6985
- invocationContext,
6986
- modelResponseEvent.id,
8496
+ const llmStartTime = Date.now();
8497
+ let llmStatus = "success";
8498
+ try {
8499
+ for await (const llmResponse of llm.generateContentAsync(
6987
8500
  llmRequest,
6988
- llmResponse
6989
- );
6990
- const tokenCount = llmResponse.usageMetadata?.totalTokenCount || "unknown";
6991
- const functionCalls = llmResponse.content?.parts?.filter((part) => part.functionCall) || [];
6992
- const functionCallsDisplay = LogFormatter.formatFunctionCalls(functionCalls);
6993
- const responsePreview = LogFormatter.formatResponsePreview(llmResponse);
6994
- this.logger.debugStructured("\u{1F4E5} LLM Response", {
6995
- Model: llm.model,
6996
- "Token Count": tokenCount,
6997
- "Function Calls": functionCallsDisplay,
6998
- "Response Preview": responsePreview,
6999
- "Finish Reason": llmResponse.finishReason || "unknown",
7000
- "Response #": responseCount,
7001
- Partial: llmResponse.partial ? "Yes" : "No",
7002
- Error: llmResponse.errorCode || "none"
8501
+ isStreaming
8502
+ )) {
8503
+ responseCount++;
8504
+ traceLlmCall(
8505
+ invocationContext,
8506
+ modelResponseEvent.id,
8507
+ llmRequest,
8508
+ llmResponse
8509
+ );
8510
+ if (llmResponse.usageMetadata) {
8511
+ telemetryService.recordLlmTokens(
8512
+ llmResponse.usageMetadata.promptTokenCount || 0,
8513
+ llmResponse.usageMetadata.candidatesTokenCount || 0,
8514
+ {
8515
+ model: llm.model,
8516
+ agentName: invocationContext.agent.name,
8517
+ environment: process.env.NODE_ENV,
8518
+ status: llmStatus
8519
+ }
8520
+ );
8521
+ }
8522
+ telemetryService.recordLlmCall({
8523
+ model: llm.model,
8524
+ agentName: invocationContext.agent.name,
8525
+ environment: process.env.NODE_ENV,
8526
+ status: llmStatus
8527
+ });
8528
+ const tokenCount = llmResponse.usageMetadata?.totalTokenCount || "unknown";
8529
+ const functionCalls = llmResponse.content?.parts?.filter((part) => part.functionCall) || [];
8530
+ const functionCallsDisplay = LogFormatter.formatFunctionCalls(functionCalls);
8531
+ const responsePreview = LogFormatter.formatResponsePreview(llmResponse);
8532
+ this.logger.debugStructured("\u{1F4E5} LLM Response", {
8533
+ Model: llm.model,
8534
+ "Token Count": tokenCount,
8535
+ "Function Calls": functionCallsDisplay,
8536
+ "Response Preview": responsePreview,
8537
+ "Finish Reason": llmResponse.finishReason || "unknown",
8538
+ "Response #": responseCount,
8539
+ Partial: llmResponse.partial ? "Yes" : "No",
8540
+ Error: llmResponse.errorCode || "none"
8541
+ });
8542
+ const alteredLlmResponse = await this._handleAfterModelCallback(
8543
+ invocationContext,
8544
+ llmResponse,
8545
+ modelResponseEvent
8546
+ );
8547
+ yield alteredLlmResponse || llmResponse;
8548
+ }
8549
+ } catch (error) {
8550
+ llmStatus = "error";
8551
+ telemetryService.recordError("llm", llm.model);
8552
+ throw error;
8553
+ } finally {
8554
+ const llmDuration = Date.now() - llmStartTime;
8555
+ telemetryService.recordLlmDuration(llmDuration, {
8556
+ model: llm.model,
8557
+ agentName: invocationContext.agent.name,
8558
+ environment: process.env.NODE_ENV,
8559
+ status: llmStatus
7003
8560
  });
7004
- const alteredLlmResponse = await this._handleAfterModelCallback(
7005
- invocationContext,
7006
- llmResponse,
7007
- modelResponseEvent
7008
- );
7009
- yield alteredLlmResponse || llmResponse;
7010
8561
  }
7011
8562
  }
7012
8563
  async _handleBeforeModelCallback(invocationContext, llmRequest, modelResponseEvent) {
@@ -7303,6 +8854,7 @@ var requestProcessor = new AuthLlmRequestProcessor();
7303
8854
 
7304
8855
  // src/flows/llm-flows/basic.ts
7305
8856
  init_logger();
8857
+ import * as z2 from "zod";
7306
8858
  var BasicLlmRequestProcessor = class extends BaseLlmRequestProcessor {
7307
8859
  async *runAsync(invocationContext, llmRequest) {
7308
8860
  const agent = invocationContext.agent;
@@ -7321,7 +8873,13 @@ var BasicLlmRequestProcessor = class extends BaseLlmRequestProcessor {
7321
8873
  const hasTools = (await agent.canonicalTools?.(invocationContext))?.length > 0;
7322
8874
  const hasTransfers = !!("subAgents" in agent && agent.subAgents && agent.subAgents.length > 0 && !(agent.disallowTransferToParent && agent.disallowTransferToPeers));
7323
8875
  if (!hasTools && !hasTransfers) {
7324
- llmRequest.setOutputSchema(agent.outputSchema);
8876
+ try {
8877
+ const jsonSchema2 = z2.toJSONSchema(agent.outputSchema);
8878
+ const { $schema, ...cleanSchema } = jsonSchema2;
8879
+ llmRequest.setOutputSchema(cleanSchema);
8880
+ } catch {
8881
+ llmRequest.setOutputSchema(agent.outputSchema);
8882
+ }
7325
8883
  } else {
7326
8884
  (() => {
7327
8885
  try {
@@ -7329,7 +8887,7 @@ var BasicLlmRequestProcessor = class extends BaseLlmRequestProcessor {
7329
8887
  logger2.debug(
7330
8888
  `Skipping request-level output schema for agent ${agent.name} because tools/transfers are present. Schema will be validated during response processing.`
7331
8889
  );
7332
- } catch (e) {
8890
+ } catch {
7333
8891
  }
7334
8892
  })();
7335
8893
  }
@@ -8408,7 +9966,7 @@ var IdentityLlmRequestProcessor = class extends BaseLlmRequestProcessor {
8408
9966
  var requestProcessor5 = new IdentityLlmRequestProcessor();
8409
9967
 
8410
9968
  // src/flows/llm-flows/instructions.ts
8411
- import z2 from "zod";
9969
+ import z3 from "zod";
8412
9970
 
8413
9971
  // src/utils/instructions-utils.ts
8414
9972
  async function injectSessionState(template, readonlyContext) {
@@ -8596,7 +10154,7 @@ var InstructionsLlmRequestProcessor = class extends BaseLlmRequestProcessor {
8596
10154
  }
8597
10155
  if (agent.outputSchema) {
8598
10156
  try {
8599
- const raw = z2.toJSONSchema(agent.outputSchema);
10157
+ const raw = z3.toJSONSchema(agent.outputSchema);
8600
10158
  const { $schema, ...json } = raw || {};
8601
10159
  llmRequest.appendInstructions([
8602
10160
  "You must respond with application/json that validates against this JSON Schema (do NOT wrap the output in markdown or code fences):",
@@ -9260,22 +10818,88 @@ var BaseAgent = class {
9260
10818
  */
9261
10819
  async *runAsyncInternal(parentContext) {
9262
10820
  const ctx = this.createInvocationContext(parentContext);
9263
- const beforeEvent = await this.handleBeforeAgentCallback(ctx);
9264
- if (beforeEvent) {
9265
- yield beforeEvent;
9266
- }
9267
- if (ctx.endInvocation) {
9268
- return;
9269
- }
9270
- for await (const event of this.runAsyncImpl(ctx)) {
9271
- yield event;
9272
- }
9273
- if (ctx.endInvocation) {
9274
- return;
9275
- }
9276
- const afterEvent = await this.handleAfterAgentCallback(ctx);
9277
- if (afterEvent) {
9278
- yield afterEvent;
10821
+ const startTime = Date.now();
10822
+ let status = "success";
10823
+ try {
10824
+ if (!ctx.transferContext) {
10825
+ ctx.transferContext = {
10826
+ transferChain: [this.name],
10827
+ transferDepth: 0,
10828
+ rootAgentName: this.name
10829
+ };
10830
+ } else {
10831
+ const previousAgentName = ctx.transferContext.transferChain[ctx.transferContext.transferChain.length - 1];
10832
+ telemetryService.addEvent("agent_transfer_received", {
10833
+ source_agent: previousAgentName,
10834
+ transfer_chain: JSON.stringify(ctx.transferContext.transferChain),
10835
+ transfer_depth: ctx.transferContext.transferDepth
10836
+ });
10837
+ ctx.transferContext = {
10838
+ ...ctx.transferContext,
10839
+ transferChain: [...ctx.transferContext.transferChain, this.name],
10840
+ transferDepth: ctx.transferContext.transferDepth + 1
10841
+ };
10842
+ }
10843
+ const lastUserEvent = ctx.session.events.slice().reverse().find((e) => e.author === "user");
10844
+ const input = extractTextFromContent(lastUserEvent?.content);
10845
+ telemetryService.traceAgentInvocation(
10846
+ { name: this.name, description: this.description },
10847
+ ctx,
10848
+ input
10849
+ );
10850
+ if (ctx.transferContext.transferDepth > 0) {
10851
+ telemetryService.setActiveSpanAttributes({
10852
+ "adk.agent.depth": ctx.transferContext.transferDepth,
10853
+ "adk.transfer.chain": JSON.stringify(
10854
+ ctx.transferContext.transferChain
10855
+ ),
10856
+ "adk.transfer.depth": ctx.transferContext.transferDepth,
10857
+ "adk.transfer.root_agent": ctx.transferContext.rootAgentName
10858
+ });
10859
+ }
10860
+ telemetryService.recordAgentInvocation({
10861
+ agentName: this.name,
10862
+ environment: process.env.NODE_ENV,
10863
+ status: "success"
10864
+ });
10865
+ const beforeEvent = await this.handleBeforeAgentCallback(ctx);
10866
+ if (beforeEvent) {
10867
+ yield beforeEvent;
10868
+ }
10869
+ if (ctx.endInvocation) {
10870
+ return;
10871
+ }
10872
+ for await (const event of this.runAsyncImpl(ctx)) {
10873
+ yield event;
10874
+ }
10875
+ if (ctx.endInvocation) {
10876
+ return;
10877
+ }
10878
+ const afterEvent = await this.handleAfterAgentCallback(ctx);
10879
+ if (afterEvent) {
10880
+ yield afterEvent;
10881
+ }
10882
+ const lastAgentEvent = ctx.session.events.slice().reverse().find((e) => e.author === this.name);
10883
+ const output = extractTextFromContent(lastAgentEvent?.content);
10884
+ if (output) {
10885
+ telemetryService.setActiveSpanAttributes({
10886
+ ["gen_ai.output.messages"]: output
10887
+ });
10888
+ telemetryService.addEvent("gen_ai.agent.output", {
10889
+ "gen_ai.output": output
10890
+ });
10891
+ }
10892
+ } catch (error) {
10893
+ status = "error";
10894
+ telemetryService.recordError("agent", this.name);
10895
+ throw error;
10896
+ } finally {
10897
+ const durationMs = Date.now() - startTime;
10898
+ telemetryService.recordAgentDuration(durationMs, {
10899
+ agentName: this.name,
10900
+ environment: process.env.NODE_ENV,
10901
+ status
10902
+ });
9279
10903
  }
9280
10904
  }
9281
10905
  /**
@@ -9404,11 +11028,29 @@ var BaseAgent = class {
9404
11028
  callbackContext
9405
11029
  });
9406
11030
  if (!beforeAgentCallbackContent && this.canonicalBeforeAgentCallbacks.length > 0) {
9407
- for (const callback of this.canonicalBeforeAgentCallbacks) {
9408
- let result = callback(callbackContext);
9409
- if (result instanceof Promise) {
9410
- result = await result;
9411
- }
11031
+ for (let i = 0; i < this.canonicalBeforeAgentCallbacks.length; i++) {
11032
+ const callback = this.canonicalBeforeAgentCallbacks[i];
11033
+ const result = await telemetryService.withSpan(
11034
+ `callback [before_agent] ${this.name}`,
11035
+ async (_span) => {
11036
+ telemetryService.traceCallback(
11037
+ "before_agent",
11038
+ callback.name,
11039
+ i,
11040
+ ctx
11041
+ );
11042
+ let callbackResult = callback(callbackContext);
11043
+ if (callbackResult instanceof Promise) {
11044
+ callbackResult = await callbackResult;
11045
+ }
11046
+ if (callbackResult) {
11047
+ telemetryService.setActiveSpanAttributes({
11048
+ "adk.callback.override_returned": true
11049
+ });
11050
+ }
11051
+ return callbackResult;
11052
+ }
11053
+ );
9412
11054
  if (result) {
9413
11055
  beforeAgentCallbackContent = result;
9414
11056
  break;
@@ -9449,11 +11091,29 @@ var BaseAgent = class {
9449
11091
  callbackContext
9450
11092
  });
9451
11093
  if (!afterAgentCallbackContent && this.canonicalAfterAgentCallbacks.length > 0) {
9452
- for (const callback of this.canonicalAfterAgentCallbacks) {
9453
- let result = callback(callbackContext);
9454
- if (result instanceof Promise) {
9455
- result = await result;
9456
- }
11094
+ for (let i = 0; i < this.canonicalAfterAgentCallbacks.length; i++) {
11095
+ const callback = this.canonicalAfterAgentCallbacks[i];
11096
+ const result = await telemetryService.withSpan(
11097
+ `callback [after_agent] ${this.name}`,
11098
+ async (_span) => {
11099
+ telemetryService.traceCallback(
11100
+ "after_agent",
11101
+ callback.name,
11102
+ i,
11103
+ invocationContext
11104
+ );
11105
+ let callbackResult = callback(callbackContext);
11106
+ if (callbackResult instanceof Promise) {
11107
+ callbackResult = await callbackResult;
11108
+ }
11109
+ if (callbackResult) {
11110
+ telemetryService.setActiveSpanAttributes({
11111
+ "adk.callback.override_returned": true
11112
+ });
11113
+ }
11114
+ return callbackResult;
11115
+ }
11116
+ );
9457
11117
  if (result) {
9458
11118
  afterAgentCallbackContent = result;
9459
11119
  break;
@@ -10211,8 +11871,8 @@ var InMemoryMemoryService = class {
10211
11871
  };
10212
11872
 
10213
11873
  // src/plugins/plugin-manager.ts
10214
- import { z as z3 } from "zod";
10215
- var pluginCallbackNameSchema = z3.enum([
11874
+ import { z as z4 } from "zod";
11875
+ var pluginCallbackNameSchema = z4.enum([
10216
11876
  "onUserMessageCallback",
10217
11877
  "beforeRunCallback",
10218
11878
  "afterRunCallback",
@@ -10763,7 +12423,14 @@ var Runner = class {
10763
12423
  runConfig = new RunConfig()
10764
12424
  }) {
10765
12425
  const span = tracer.startSpan("invocation");
10766
- const spanContext = trace3.setSpan(context3.active(), span);
12426
+ const spanContext = trace4.setSpan(context3.active(), span);
12427
+ const inputText = extractTextFromContent(newMessage);
12428
+ if (inputText && telemetryService.shouldCaptureContent()) {
12429
+ span.setAttribute(SEMCONV.GEN_AI_INPUT_MESSAGES, inputText);
12430
+ span.addEvent("gen_ai.invocation.input", {
12431
+ "gen_ai.input": inputText
12432
+ });
12433
+ }
10767
12434
  try {
10768
12435
  const session = await context3.with(
10769
12436
  spanContext,
@@ -10812,6 +12479,14 @@ var Runner = class {
10812
12479
  }
10813
12480
  yield event;
10814
12481
  }
12482
+ const lastAgentEvent = session.events.slice().reverse().find((e) => e.author && e.author !== "user");
12483
+ const outputText = extractTextFromContent(lastAgentEvent?.content);
12484
+ if (outputText && telemetryService.shouldCaptureContent()) {
12485
+ span.setAttribute(SEMCONV.GEN_AI_OUTPUT_MESSAGES, outputText);
12486
+ span.addEvent("gen_ai.invocation.output", {
12487
+ "gen_ai.output": outputText
12488
+ });
12489
+ }
10815
12490
  await context3.with(
10816
12491
  spanContext,
10817
12492
  () => this._runCompaction(session, invocationContext)
@@ -12612,7 +14287,7 @@ var VertexAiEvalFacade = class _VertexAiEvalFacade {
12612
14287
  }
12613
14288
  return 3 /* NOT_EVALUATED */;
12614
14289
  }
12615
- static async _performEval(dataset, metrics) {
14290
+ static async _performEval(dataset, metrics3) {
12616
14291
  const projectId = process.env.GOOGLE_CLOUD_PROJECT;
12617
14292
  const location = process.env.GOOGLE_CLOUD_LOCATION;
12618
14293
  if (!projectId) {
@@ -14139,10 +15814,10 @@ var LangfusePlugin = class extends BasePlugin {
14139
15814
  return `${invocationId}:gen:${model || "unknown"}`;
14140
15815
  }
14141
15816
  getOrCreateTrace(ctx) {
14142
- let trace4 = this.traces.get(ctx.invocationId);
14143
- if (trace4) return trace4;
15817
+ let trace5 = this.traces.get(ctx.invocationId);
15818
+ if (trace5) return trace5;
14144
15819
  const userInput = this.toPlainText(ctx.userContent);
14145
- trace4 = this.client.trace({
15820
+ trace5 = this.client.trace({
14146
15821
  id: ctx.invocationId,
14147
15822
  name: `${ctx.agent.name}-session`,
14148
15823
  userId: ctx.userId,
@@ -14155,14 +15830,14 @@ var LangfusePlugin = class extends BasePlugin {
14155
15830
  agentType: ctx.agent.constructor.name
14156
15831
  }
14157
15832
  });
14158
- this.traces.set(ctx.invocationId, trace4);
14159
- return trace4;
15833
+ this.traces.set(ctx.invocationId, trace5);
15834
+ return trace5;
14160
15835
  }
14161
15836
  async onUserMessageCallback(params) {
14162
- const trace4 = this.getOrCreateTrace(params.invocationContext);
15837
+ const trace5 = this.getOrCreateTrace(params.invocationContext);
14163
15838
  const userInput = this.toPlainText(params.userMessage);
14164
- trace4.update({ input: userInput });
14165
- trace4.event({
15839
+ trace5.update({ input: userInput });
15840
+ trace5.event({
14166
15841
  name: "user_message",
14167
15842
  input: this.serializeContent(params.userMessage),
14168
15843
  metadata: {
@@ -14172,8 +15847,8 @@ var LangfusePlugin = class extends BasePlugin {
14172
15847
  return void 0;
14173
15848
  }
14174
15849
  async beforeRunCallback(params) {
14175
- const trace4 = this.getOrCreateTrace(params.invocationContext);
14176
- trace4.event({
15850
+ const trace5 = this.getOrCreateTrace(params.invocationContext);
15851
+ trace5.event({
14177
15852
  name: "run_start",
14178
15853
  metadata: {
14179
15854
  agentName: params.invocationContext.agent.name,
@@ -14184,14 +15859,14 @@ var LangfusePlugin = class extends BasePlugin {
14184
15859
  return void 0;
14185
15860
  }
14186
15861
  async onEventCallback(params) {
14187
- const trace4 = this.getOrCreateTrace(params.invocationContext);
15862
+ const trace5 = this.getOrCreateTrace(params.invocationContext);
14188
15863
  const agentSpan = this.agentSpans.get(
14189
15864
  this.getAgentSpanKey(
14190
15865
  params.invocationContext.invocationId,
14191
15866
  params.event.author
14192
15867
  )
14193
15868
  );
14194
- const parent = agentSpan || trace4;
15869
+ const parent = agentSpan || trace5;
14195
15870
  const eventText = this.toPlainText(params.event.content);
14196
15871
  if (params.event.content?.parts?.some((p) => p.text || p.codeExecutionResult)) {
14197
15872
  this.lastEventByInvocation.set(
@@ -14219,8 +15894,8 @@ var LangfusePlugin = class extends BasePlugin {
14219
15894
  return void 0;
14220
15895
  }
14221
15896
  async afterRunCallback(params) {
14222
- const trace4 = this.traces.get(params.invocationContext.invocationId);
14223
- if (!trace4) return;
15897
+ const trace5 = this.traces.get(params.invocationContext.invocationId);
15898
+ if (!trace5) return;
14224
15899
  const lastEvent = this.lastEventByInvocation.get(
14225
15900
  params.invocationContext.invocationId
14226
15901
  );
@@ -14242,7 +15917,7 @@ var LangfusePlugin = class extends BasePlugin {
14242
15917
  }
14243
15918
  }
14244
15919
  if (output || outputText) {
14245
- trace4.update({
15920
+ trace5.update({
14246
15921
  output: outputText || output,
14247
15922
  metadata: {
14248
15923
  outputText,
@@ -14250,7 +15925,7 @@ var LangfusePlugin = class extends BasePlugin {
14250
15925
  resultType: lastEvent ? "event" : typeof params.result
14251
15926
  }
14252
15927
  });
14253
- trace4.event({
15928
+ trace5.event({
14254
15929
  name: "run_complete",
14255
15930
  output,
14256
15931
  metadata: {
@@ -14261,7 +15936,7 @@ var LangfusePlugin = class extends BasePlugin {
14261
15936
  }
14262
15937
  const usage = this.tokenUsage.get(params.invocationContext.invocationId);
14263
15938
  if (usage) {
14264
- trace4.update({
15939
+ trace5.update({
14265
15940
  metadata: {
14266
15941
  usage: {
14267
15942
  input: usage.inputTokens,
@@ -14289,7 +15964,7 @@ var LangfusePlugin = class extends BasePlugin {
14289
15964
  }
14290
15965
  }
14291
15966
  async beforeAgentCallback(params) {
14292
- const trace4 = this.getOrCreateTrace(
15967
+ const trace5 = this.getOrCreateTrace(
14293
15968
  params.callbackContext.invocationContext
14294
15969
  );
14295
15970
  const agentSpanKey = this.getAgentSpanKey(
@@ -14306,7 +15981,7 @@ var LangfusePlugin = class extends BasePlugin {
14306
15981
  parentAgent.name
14307
15982
  )
14308
15983
  ) : void 0;
14309
- const parent = parentSpan || trace4;
15984
+ const parent = parentSpan || trace5;
14310
15985
  const span = parent.span({
14311
15986
  name: params.agent.name,
14312
15987
  input,
@@ -15609,6 +17284,8 @@ function createDatabaseSessionService(databaseUrl, options) {
15609
17284
  // src/version.ts
15610
17285
  var VERSION = "0.1.0";
15611
17286
  export {
17287
+ ADK_ATTRS,
17288
+ ADK_SYSTEM_NAME,
15612
17289
  AF_FUNCTION_CALL_ID_PREFIX,
15613
17290
  LlmAgent as Agent,
15614
17291
  AgentBuilder,
@@ -15645,7 +17322,9 @@ export {
15645
17322
  CallbackContext,
15646
17323
  CodeExecutionUtils,
15647
17324
  CodeExecutorContext,
17325
+ DEFAULTS,
15648
17326
  DatabaseSessionService,
17327
+ ENV_VARS,
15649
17328
  EnhancedAuthConfig,
15650
17329
  EvalResult,
15651
17330
  EvalStatus,
@@ -15682,6 +17361,7 @@ export {
15682
17361
  LoadMemoryTool,
15683
17362
  LocalEvalService,
15684
17363
  LoopAgent,
17364
+ METRICS,
15685
17365
  McpAbi,
15686
17366
  McpAtp,
15687
17367
  McpBamm,
@@ -15707,6 +17387,7 @@ export {
15707
17387
  models_exports as Models,
15708
17388
  OAuth2Credential,
15709
17389
  OAuth2Scheme,
17390
+ OPERATIONS,
15710
17391
  OpenAiLlm,
15711
17392
  OpenIdConnectScheme,
15712
17393
  ParallelAgent,
@@ -15720,6 +17401,7 @@ export {
15720
17401
  RougeEvaluator,
15721
17402
  RunConfig,
15722
17403
  Runner,
17404
+ SEMCONV,
15723
17405
  SafetyEvaluatorV1,
15724
17406
  SequentialAgent,
15725
17407
  sessions_exports as Sessions,
@@ -15740,6 +17422,8 @@ export {
15740
17422
  requestProcessor8 as agentTransferRequestProcessor,
15741
17423
  requestProcessor2 as basicRequestProcessor,
15742
17424
  buildFunctionDeclaration,
17425
+ buildLlmRequestForTrace,
17426
+ buildLlmResponseForTrace,
15743
17427
  requestProcessor3 as codeExecutionRequestProcessor,
15744
17428
  responseProcessor as codeExecutionResponseProcessor,
15745
17429
  requestProcessor4 as contentRequestProcessor,
@@ -15753,9 +17437,12 @@ export {
15753
17437
  createSamplingHandler,
15754
17438
  createSqliteSessionService,
15755
17439
  createTool,
17440
+ extractTextFromContent,
17441
+ formatSpanAttributes,
15756
17442
  generateAuthEvent,
15757
17443
  generateClientFunctionCallId,
15758
17444
  getArtifactUri,
17445
+ getEnvironment,
15759
17446
  getLongRunningFunctionCalls,
15760
17447
  getMcpTools,
15761
17448
  handleFunctionCallsAsync,
@@ -15777,12 +17464,18 @@ export {
15777
17464
  parseArtifactUri,
15778
17465
  pluginCallbackNameSchema,
15779
17466
  populateClientFunctionCallId,
17467
+ recordAgentInvocation,
17468
+ recordLlmCall,
17469
+ recordToolExecution,
15780
17470
  registerProviders,
15781
17471
  removeClientFunctionCallId,
15782
17472
  requestProcessor,
15783
17473
  runCompactionForSlidingWindow,
17474
+ safeJsonStringify,
17475
+ shouldCaptureContent,
15784
17476
  shutdownTelemetry,
15785
17477
  telemetryService,
17478
+ traceAgentInvocation,
15786
17479
  traceLlmCall,
15787
17480
  traceToolCall,
15788
17481
  tracer