openlit 1.10.0 → 1.12.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (195) hide show
  1. package/README.md +35 -1
  2. package/dist/config.d.ts +12 -4
  3. package/dist/config.js +7 -17
  4. package/dist/config.js.map +1 -1
  5. package/dist/evals/llm/anthropic.js +10 -6
  6. package/dist/evals/llm/anthropic.js.map +1 -1
  7. package/dist/evals/llm/openai.js +9 -5
  8. package/dist/evals/llm/openai.js.map +1 -1
  9. package/dist/features/__tests__/rule-engine.test.d.ts +1 -0
  10. package/dist/features/__tests__/rule-engine.test.js +146 -0
  11. package/dist/features/__tests__/rule-engine.test.js.map +1 -0
  12. package/dist/features/base.d.ts +2 -0
  13. package/dist/features/base.js +2 -0
  14. package/dist/features/base.js.map +1 -1
  15. package/dist/features/rule-engine.d.ts +6 -0
  16. package/dist/features/rule-engine.js +60 -0
  17. package/dist/features/rule-engine.js.map +1 -0
  18. package/dist/features/vault.js +1 -1
  19. package/dist/features/vault.js.map +1 -1
  20. package/dist/helpers.d.ts +93 -1
  21. package/dist/helpers.js +270 -8
  22. package/dist/helpers.js.map +1 -1
  23. package/dist/index.d.ts +6 -5
  24. package/dist/index.js +95 -50
  25. package/dist/index.js.map +1 -1
  26. package/dist/instrumentation/__tests__/anthropic-wrapper.test.js +215 -27
  27. package/dist/instrumentation/__tests__/anthropic-wrapper.test.js.map +1 -1
  28. package/dist/instrumentation/__tests__/base-wrapper.test.js +19 -23
  29. package/dist/instrumentation/__tests__/base-wrapper.test.js.map +1 -1
  30. package/dist/instrumentation/__tests__/bedrock-trace-comparison.test.d.ts +1 -0
  31. package/dist/instrumentation/__tests__/bedrock-trace-comparison.test.js +422 -0
  32. package/dist/instrumentation/__tests__/bedrock-trace-comparison.test.js.map +1 -0
  33. package/dist/instrumentation/__tests__/chroma-trace-comparison.test.js +1 -1
  34. package/dist/instrumentation/__tests__/chroma-trace-comparison.test.js.map +1 -1
  35. package/dist/instrumentation/__tests__/cohere-wrapper.test.js +150 -25
  36. package/dist/instrumentation/__tests__/cohere-wrapper.test.js.map +1 -1
  37. package/dist/instrumentation/__tests__/google-ai-trace-comparison.test.js +152 -33
  38. package/dist/instrumentation/__tests__/google-ai-trace-comparison.test.js.map +1 -1
  39. package/dist/instrumentation/__tests__/groq-trace-comparison.test.js +391 -45
  40. package/dist/instrumentation/__tests__/groq-trace-comparison.test.js.map +1 -1
  41. package/dist/instrumentation/__tests__/huggingface-trace-comparison.test.d.ts +2 -2
  42. package/dist/instrumentation/__tests__/huggingface-trace-comparison.test.js +323 -31
  43. package/dist/instrumentation/__tests__/huggingface-trace-comparison.test.js.map +1 -1
  44. package/dist/instrumentation/__tests__/langchain-wrapper.test.d.ts +1 -0
  45. package/dist/instrumentation/__tests__/langchain-wrapper.test.js +282 -0
  46. package/dist/instrumentation/__tests__/langchain-wrapper.test.js.map +1 -0
  47. package/dist/instrumentation/__tests__/milvus-trace-comparison.test.js +1 -1
  48. package/dist/instrumentation/__tests__/milvus-trace-comparison.test.js.map +1 -1
  49. package/dist/instrumentation/__tests__/mistral-trace-comparison.test.d.ts +0 -3
  50. package/dist/instrumentation/__tests__/mistral-trace-comparison.test.js +275 -68
  51. package/dist/instrumentation/__tests__/mistral-trace-comparison.test.js.map +1 -1
  52. package/dist/instrumentation/__tests__/openai-wrapper.test.js +7 -9
  53. package/dist/instrumentation/__tests__/openai-wrapper.test.js.map +1 -1
  54. package/dist/instrumentation/__tests__/qdrant-trace-comparison.test.js +1 -1
  55. package/dist/instrumentation/__tests__/qdrant-trace-comparison.test.js.map +1 -1
  56. package/dist/instrumentation/__tests__/replicate-trace-comparison.test.d.ts +2 -1
  57. package/dist/instrumentation/__tests__/replicate-trace-comparison.test.js +209 -21
  58. package/dist/instrumentation/__tests__/replicate-trace-comparison.test.js.map +1 -1
  59. package/dist/instrumentation/__tests__/together-trace-comparison.test.js +231 -51
  60. package/dist/instrumentation/__tests__/together-trace-comparison.test.js.map +1 -1
  61. package/dist/instrumentation/__tests__/vercel-ai-trace-comparison.test.d.ts +8 -0
  62. package/dist/instrumentation/__tests__/vercel-ai-trace-comparison.test.js +446 -0
  63. package/dist/instrumentation/__tests__/vercel-ai-trace-comparison.test.js.map +1 -0
  64. package/dist/instrumentation/anthropic/index.d.ts +2 -3
  65. package/dist/instrumentation/anthropic/index.js.map +1 -1
  66. package/dist/instrumentation/anthropic/wrapper.d.ts +1 -3
  67. package/dist/instrumentation/anthropic/wrapper.js +211 -91
  68. package/dist/instrumentation/anthropic/wrapper.js.map +1 -1
  69. package/dist/instrumentation/azure-ai-inference/index.d.ts +11 -0
  70. package/dist/instrumentation/azure-ai-inference/index.js +76 -0
  71. package/dist/instrumentation/azure-ai-inference/index.js.map +1 -0
  72. package/dist/instrumentation/azure-ai-inference/wrapper.d.ts +42 -0
  73. package/dist/instrumentation/azure-ai-inference/wrapper.js +515 -0
  74. package/dist/instrumentation/azure-ai-inference/wrapper.js.map +1 -0
  75. package/dist/instrumentation/base-wrapper.d.ts +2 -1
  76. package/dist/instrumentation/base-wrapper.js +35 -23
  77. package/dist/instrumentation/base-wrapper.js.map +1 -1
  78. package/dist/instrumentation/bedrock/wrapper.d.ts +21 -3
  79. package/dist/instrumentation/bedrock/wrapper.js +318 -265
  80. package/dist/instrumentation/bedrock/wrapper.js.map +1 -1
  81. package/dist/instrumentation/chroma/wrapper.js +1 -1
  82. package/dist/instrumentation/chroma/wrapper.js.map +1 -1
  83. package/dist/instrumentation/claude-agent-sdk/index.d.ts +23 -0
  84. package/dist/instrumentation/claude-agent-sdk/index.js +83 -0
  85. package/dist/instrumentation/claude-agent-sdk/index.js.map +1 -0
  86. package/dist/instrumentation/claude-agent-sdk/wrapper.d.ts +13 -0
  87. package/dist/instrumentation/claude-agent-sdk/wrapper.js +1031 -0
  88. package/dist/instrumentation/claude-agent-sdk/wrapper.js.map +1 -0
  89. package/dist/instrumentation/cohere/index.d.ts +2 -3
  90. package/dist/instrumentation/cohere/index.js.map +1 -1
  91. package/dist/instrumentation/cohere/wrapper.d.ts +1 -1
  92. package/dist/instrumentation/cohere/wrapper.js +215 -56
  93. package/dist/instrumentation/cohere/wrapper.js.map +1 -1
  94. package/dist/instrumentation/google-adk/index.d.ts +57 -0
  95. package/dist/instrumentation/google-adk/index.js +371 -0
  96. package/dist/instrumentation/google-adk/index.js.map +1 -0
  97. package/dist/instrumentation/google-adk/utils.d.ts +45 -0
  98. package/dist/instrumentation/google-adk/utils.js +663 -0
  99. package/dist/instrumentation/google-adk/utils.js.map +1 -0
  100. package/dist/instrumentation/google-adk/wrapper.d.ts +11 -0
  101. package/dist/instrumentation/google-adk/wrapper.js +391 -0
  102. package/dist/instrumentation/google-adk/wrapper.js.map +1 -0
  103. package/dist/instrumentation/google-ai/wrapper.d.ts +7 -4
  104. package/dist/instrumentation/google-ai/wrapper.js +197 -61
  105. package/dist/instrumentation/google-ai/wrapper.js.map +1 -1
  106. package/dist/instrumentation/groq/wrapper.js +137 -65
  107. package/dist/instrumentation/groq/wrapper.js.map +1 -1
  108. package/dist/instrumentation/huggingface/wrapper.js +241 -39
  109. package/dist/instrumentation/huggingface/wrapper.js.map +1 -1
  110. package/dist/instrumentation/index.d.ts +2 -2
  111. package/dist/instrumentation/index.js +64 -6
  112. package/dist/instrumentation/index.js.map +1 -1
  113. package/dist/instrumentation/langchain/index.d.ts +0 -7
  114. package/dist/instrumentation/langchain/index.js +2 -20
  115. package/dist/instrumentation/langchain/index.js.map +1 -1
  116. package/dist/instrumentation/langchain/wrapper.d.ts +35 -0
  117. package/dist/instrumentation/langchain/wrapper.js +1098 -184
  118. package/dist/instrumentation/langchain/wrapper.js.map +1 -1
  119. package/dist/instrumentation/langgraph/index.d.ts +12 -0
  120. package/dist/instrumentation/langgraph/index.js +99 -0
  121. package/dist/instrumentation/langgraph/index.js.map +1 -0
  122. package/dist/instrumentation/langgraph/wrapper.d.ts +20 -0
  123. package/dist/instrumentation/langgraph/wrapper.js +619 -0
  124. package/dist/instrumentation/langgraph/wrapper.js.map +1 -0
  125. package/dist/instrumentation/llamaindex/index.d.ts +31 -6
  126. package/dist/instrumentation/llamaindex/index.js +180 -61
  127. package/dist/instrumentation/llamaindex/index.js.map +1 -1
  128. package/dist/instrumentation/llamaindex/wrapper.d.ts +15 -3
  129. package/dist/instrumentation/llamaindex/wrapper.js +670 -179
  130. package/dist/instrumentation/llamaindex/wrapper.js.map +1 -1
  131. package/dist/instrumentation/milvus/wrapper.js +1 -1
  132. package/dist/instrumentation/milvus/wrapper.js.map +1 -1
  133. package/dist/instrumentation/mistral/wrapper.js +154 -79
  134. package/dist/instrumentation/mistral/wrapper.js.map +1 -1
  135. package/dist/instrumentation/ollama/index.js +33 -4
  136. package/dist/instrumentation/ollama/index.js.map +1 -1
  137. package/dist/instrumentation/ollama/wrapper.d.ts +28 -2
  138. package/dist/instrumentation/ollama/wrapper.js +432 -48
  139. package/dist/instrumentation/ollama/wrapper.js.map +1 -1
  140. package/dist/instrumentation/openai/index.d.ts +2 -3
  141. package/dist/instrumentation/openai/index.js.map +1 -1
  142. package/dist/instrumentation/openai/wrapper.js +293 -194
  143. package/dist/instrumentation/openai/wrapper.js.map +1 -1
  144. package/dist/instrumentation/openai-agents/index.d.ts +20 -0
  145. package/dist/instrumentation/openai-agents/index.js +174 -0
  146. package/dist/instrumentation/openai-agents/index.js.map +1 -0
  147. package/dist/instrumentation/openai-agents/processor.d.ts +35 -0
  148. package/dist/instrumentation/openai-agents/processor.js +249 -0
  149. package/dist/instrumentation/openai-agents/processor.js.map +1 -0
  150. package/dist/instrumentation/openai-agents/utils.d.ts +20 -0
  151. package/dist/instrumentation/openai-agents/utils.js +624 -0
  152. package/dist/instrumentation/openai-agents/utils.js.map +1 -0
  153. package/dist/instrumentation/pinecone/wrapper.js +2 -2
  154. package/dist/instrumentation/pinecone/wrapper.js.map +1 -1
  155. package/dist/instrumentation/qdrant/wrapper.js +1 -1
  156. package/dist/instrumentation/qdrant/wrapper.js.map +1 -1
  157. package/dist/instrumentation/replicate/wrapper.js +103 -21
  158. package/dist/instrumentation/replicate/wrapper.js.map +1 -1
  159. package/dist/instrumentation/strands/index.d.ts +21 -0
  160. package/dist/instrumentation/strands/index.js +83 -0
  161. package/dist/instrumentation/strands/index.js.map +1 -0
  162. package/dist/instrumentation/strands/processor.d.ts +45 -0
  163. package/dist/instrumentation/strands/processor.js +545 -0
  164. package/dist/instrumentation/strands/processor.js.map +1 -0
  165. package/dist/instrumentation/strands/utils.d.ts +24 -0
  166. package/dist/instrumentation/strands/utils.js +360 -0
  167. package/dist/instrumentation/strands/utils.js.map +1 -0
  168. package/dist/instrumentation/together/wrapper.js +125 -51
  169. package/dist/instrumentation/together/wrapper.js.map +1 -1
  170. package/dist/instrumentation/vercel-ai/wrapper.d.ts +28 -2
  171. package/dist/instrumentation/vercel-ai/wrapper.js +314 -164
  172. package/dist/instrumentation/vercel-ai/wrapper.js.map +1 -1
  173. package/dist/llm/anthropic.js +10 -6
  174. package/dist/llm/anthropic.js.map +1 -1
  175. package/dist/llm/openai.js +9 -5
  176. package/dist/llm/openai.js.map +1 -1
  177. package/dist/otel/__tests__/metrics.test.js +16 -27
  178. package/dist/otel/__tests__/metrics.test.js.map +1 -1
  179. package/dist/otel/events.d.ts +11 -0
  180. package/dist/otel/events.js +74 -0
  181. package/dist/otel/events.js.map +1 -0
  182. package/dist/otel/metrics.d.ts +5 -6
  183. package/dist/otel/metrics.js +66 -48
  184. package/dist/otel/metrics.js.map +1 -1
  185. package/dist/otel/tracing.d.ts +6 -2
  186. package/dist/otel/tracing.js +71 -24
  187. package/dist/otel/tracing.js.map +1 -1
  188. package/dist/otel/utils.d.ts +11 -0
  189. package/dist/otel/utils.js +34 -0
  190. package/dist/otel/utils.js.map +1 -0
  191. package/dist/semantic-convention.d.ts +44 -5
  192. package/dist/semantic-convention.js +51 -8
  193. package/dist/semantic-convention.js.map +1 -1
  194. package/dist/types.d.ts +74 -22
  195. package/package.json +41 -9
@@ -1,15 +1,46 @@
1
1
  "use strict";
2
+ var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
3
+ if (k2 === undefined) k2 = k;
4
+ var desc = Object.getOwnPropertyDescriptor(m, k);
5
+ if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
6
+ desc = { enumerable: true, get: function() { return m[k]; } };
7
+ }
8
+ Object.defineProperty(o, k2, desc);
9
+ }) : (function(o, m, k, k2) {
10
+ if (k2 === undefined) k2 = k;
11
+ o[k2] = m[k];
12
+ }));
13
+ var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
14
+ Object.defineProperty(o, "default", { enumerable: true, value: v });
15
+ }) : function(o, v) {
16
+ o["default"] = v;
17
+ });
18
+ var __importStar = (this && this.__importStar) || (function () {
19
+ var ownKeys = function(o) {
20
+ ownKeys = Object.getOwnPropertyNames || function (o) {
21
+ var ar = [];
22
+ for (var k in o) if (Object.prototype.hasOwnProperty.call(o, k)) ar[ar.length] = k;
23
+ return ar;
24
+ };
25
+ return ownKeys(o);
26
+ };
27
+ return function (mod) {
28
+ if (mod && mod.__esModule) return mod;
29
+ var result = {};
30
+ if (mod != null) for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
31
+ __setModuleDefault(result, mod);
32
+ return result;
33
+ };
34
+ })();
2
35
  var __importDefault = (this && this.__importDefault) || function (mod) {
3
36
  return (mod && mod.__esModule) ? mod : { "default": mod };
4
37
  };
5
38
  Object.defineProperty(exports, "__esModule", { value: true });
6
39
  const api_1 = require("@opentelemetry/api");
7
40
  const config_1 = __importDefault(require("../../config"));
8
- const helpers_1 = __importDefault(require("../../helpers"));
41
+ const helpers_1 = __importStar(require("../../helpers"));
9
42
  const semantic_convention_1 = __importDefault(require("../../semantic-convention"));
10
43
  const base_wrapper_1 = __importDefault(require("../base-wrapper"));
11
- const BEDROCK_SERVER_ADDRESS = 'bedrock-runtime.amazonaws.com';
12
- const BEDROCK_SERVER_PORT = 443;
13
44
  function mapFinishReason(stopReason) {
14
45
  const map = {
15
46
  end_turn: 'stop',
@@ -21,22 +52,58 @@ function mapFinishReason(stopReason) {
21
52
  };
22
53
  return map[stopReason] || stopReason;
23
54
  }
24
- function applyInferenceConfigAttributes(span, inferenceConfig) {
25
- if (inferenceConfig.maxTokens !== undefined)
26
- span.setAttribute(semantic_convention_1.default.GEN_AI_REQUEST_MAX_TOKENS, inferenceConfig.maxTokens);
27
- if (inferenceConfig.temperature !== undefined)
28
- span.setAttribute(semantic_convention_1.default.GEN_AI_REQUEST_TEMPERATURE, inferenceConfig.temperature);
29
- if (inferenceConfig.topP !== undefined)
30
- span.setAttribute(semantic_convention_1.default.GEN_AI_REQUEST_TOP_P, inferenceConfig.topP);
31
- if (inferenceConfig.topK !== undefined)
32
- span.setAttribute(semantic_convention_1.default.GEN_AI_REQUEST_TOP_K, inferenceConfig.topK);
33
- if (inferenceConfig.stopSequences !== undefined)
34
- span.setAttribute(semantic_convention_1.default.GEN_AI_REQUEST_STOP_SEQUENCES, inferenceConfig.stopSequences);
55
+ function spanCreationAttrs(operationName, requestModel) {
56
+ return {
57
+ [semantic_convention_1.default.GEN_AI_OPERATION]: operationName,
58
+ [semantic_convention_1.default.GEN_AI_PROVIDER_NAME_OTEL]: semantic_convention_1.default.GEN_AI_SYSTEM_AWS_BEDROCK,
59
+ [semantic_convention_1.default.GEN_AI_REQUEST_MODEL]: requestModel,
60
+ [semantic_convention_1.default.SERVER_ADDRESS]: BedrockWrapper.serverAddress,
61
+ [semantic_convention_1.default.SERVER_PORT]: BedrockWrapper.serverPort,
62
+ };
63
+ }
64
+ /**
65
+ * Convert Bedrock message content blocks ({text: "..."}) to the format
66
+ * expected by OpenLitHelper.buildInputMessages ({type: "text", text: "..."}).
67
+ */
68
+ function convertBedrockMessages(messages) {
69
+ return (messages || []).map((m) => {
70
+ const role = m.role || 'user';
71
+ const content = m.content;
72
+ if (!Array.isArray(content)) {
73
+ return { role, content: typeof content === 'string' ? content : '' };
74
+ }
75
+ return {
76
+ role,
77
+ content: content.map((c) => {
78
+ if (c.text !== undefined)
79
+ return { type: 'text', text: c.text };
80
+ if (c.toolUse) {
81
+ return {
82
+ type: 'tool_use',
83
+ id: c.toolUse.toolUseId || '',
84
+ name: c.toolUse.name || '',
85
+ input: c.toolUse.input || {},
86
+ };
87
+ }
88
+ if (c.toolResult) {
89
+ const rc = c.toolResult.content;
90
+ return {
91
+ type: 'tool_result',
92
+ tool_use_id: c.toolResult.toolUseId || '',
93
+ content: typeof rc === 'string' ? rc : JSON.stringify(rc || ''),
94
+ };
95
+ }
96
+ return c;
97
+ }),
98
+ };
99
+ });
35
100
  }
36
101
  class BedrockWrapper extends base_wrapper_1.default {
37
102
  static _patchSend(tracer) {
38
103
  return (originalMethod) => {
39
104
  return async function (...args) {
105
+ if ((0, helpers_1.isFrameworkLlmActive)())
106
+ return originalMethod.apply(this, args);
40
107
  const command = args[0];
41
108
  if (!command)
42
109
  return originalMethod.apply(this, args);
@@ -47,9 +114,6 @@ class BedrockWrapper extends base_wrapper_1.default {
47
114
  if (commandName === 'ConverseStreamCommand') {
48
115
  return BedrockWrapper._handleConverseStreamCommand(tracer, originalMethod, this, args);
49
116
  }
50
- if (commandName === 'InvokeModelCommand' || commandName === 'InvokeModelWithResponseStreamCommand') {
51
- return BedrockWrapper._handleInvokeModelCommand(tracer, originalMethod, this, args, commandName);
52
- }
53
117
  return originalMethod.apply(this, args);
54
118
  };
55
119
  };
@@ -57,107 +121,100 @@ class BedrockWrapper extends base_wrapper_1.default {
57
121
  static async _handleConverseCommand(tracer, originalMethod, instance, args) {
58
122
  const command = args[0];
59
123
  const input = command.input || {};
60
- const modelId = input.modelId || 'unknown';
124
+ const modelId = input.modelId || 'amazon.titan-text-express-v1';
61
125
  const genAIEndpoint = 'bedrock.converse';
62
- const span = tracer.startSpan(genAIEndpoint, { kind: api_1.SpanKind.CLIENT });
63
- return api_1.context.with(api_1.trace.setSpan(api_1.context.active(), span), async () => {
64
- const startTime = Date.now();
65
- let metricParams;
66
- try {
67
- const response = await originalMethod.apply(instance, args);
68
- const duration = (Date.now() - startTime) / 1000;
69
- const usage = response.usage || {};
70
- const promptTokens = usage.inputTokens || 0;
71
- const completionTokens = usage.outputTokens || 0;
72
- const totalTokens = usage.totalTokens || promptTokens + completionTokens;
73
- const cacheReadTokens = usage.cacheReadInputTokens || 0;
74
- const cacheWriteTokens = usage.cacheWriteInputTokens || 0;
75
- const finishReason = mapFinishReason(response.stopReason || 'stop');
76
- const pricingInfo = await config_1.default.updatePricingJson(config_1.default.pricing_json);
77
- const cost = helpers_1.default.getChatModelCost(modelId, pricingInfo, promptTokens, completionTokens);
78
- BedrockWrapper.setBaseSpanAttributes(span, {
79
- genAIEndpoint,
80
- model: modelId,
81
- cost,
82
- aiSystem: BedrockWrapper.aiSystem,
83
- serverAddress: BEDROCK_SERVER_ADDRESS,
84
- serverPort: BEDROCK_SERVER_PORT,
85
- });
86
- span.setAttribute(semantic_convention_1.default.GEN_AI_OPERATION, semantic_convention_1.default.GEN_AI_OPERATION_TYPE_CHAT);
87
- span.setAttribute(semantic_convention_1.default.GEN_AI_REQUEST_IS_STREAM, false);
88
- span.setAttribute(semantic_convention_1.default.GEN_AI_RESPONSE_MODEL, modelId);
89
- span.setAttribute(semantic_convention_1.default.GEN_AI_USAGE_INPUT_TOKENS, promptTokens);
90
- span.setAttribute(semantic_convention_1.default.GEN_AI_USAGE_OUTPUT_TOKENS, completionTokens);
91
- span.setAttribute(semantic_convention_1.default.GEN_AI_USAGE_TOTAL_TOKENS, totalTokens);
92
- span.setAttribute(semantic_convention_1.default.GEN_AI_CLIENT_TOKEN_USAGE, totalTokens);
93
- span.setAttribute(semantic_convention_1.default.GEN_AI_RESPONSE_FINISH_REASON, [finishReason]);
94
- span.setAttribute(semantic_convention_1.default.GEN_AI_OUTPUT_TYPE, semantic_convention_1.default.GEN_AI_OUTPUT_TYPE_TEXT);
95
- span.setAttribute(semantic_convention_1.default.GEN_AI_CLIENT_OPERATION_DURATION, duration);
96
- span.setAttribute(semantic_convention_1.default.SERVER_ADDRESS, BEDROCK_SERVER_ADDRESS);
97
- span.setAttribute(semantic_convention_1.default.SERVER_PORT, BEDROCK_SERVER_PORT);
98
- if (cacheReadTokens > 0)
99
- span.setAttribute(semantic_convention_1.default.GEN_AI_USAGE_CACHE_READ_INPUT_TOKENS, cacheReadTokens);
100
- if (cacheWriteTokens > 0)
101
- span.setAttribute(semantic_convention_1.default.GEN_AI_USAGE_CACHE_CREATION_INPUT_TOKENS, cacheWriteTokens);
102
- const requestId = response.$metadata?.requestId;
103
- if (requestId)
104
- span.setAttribute(semantic_convention_1.default.GEN_AI_RESPONSE_ID, requestId);
105
- applyInferenceConfigAttributes(span, input.inferenceConfig || {});
106
- if (config_1.default.traceContent) {
107
- const messages = (input.messages || []).map((m) => ({
108
- role: m.role,
109
- content: m.content?.map((c) => c.text || '').join('') || '',
110
- }));
111
- const systemText = input.system?.[0]?.text;
112
- span.setAttribute(semantic_convention_1.default.GEN_AI_INPUT_MESSAGES, helpers_1.default.buildInputMessages(messages, systemText));
113
- if (systemText) {
114
- const systemStr = JSON.stringify([{ type: 'text', content: systemText }]);
115
- span.setAttribute(semantic_convention_1.default.GEN_AI_SYSTEM_INSTRUCTIONS, systemStr);
116
- span.setAttribute(semantic_convention_1.default.GEN_AI_SYSTEM_INSTRUCTIONS_OTEL, systemStr);
117
- }
118
- const outputText = response.output?.message?.content?.map((c) => c.text || '').join('') || '';
119
- span.setAttribute(semantic_convention_1.default.GEN_AI_OUTPUT_MESSAGES, helpers_1.default.buildOutputMessages(outputText, finishReason));
120
- }
121
- metricParams = { genAIEndpoint, model: modelId, cost, aiSystem: BedrockWrapper.aiSystem };
122
- return response;
123
- }
124
- catch (e) {
125
- helpers_1.default.handleException(span, e);
126
- throw e;
127
- }
128
- finally {
129
- span.end();
130
- if (metricParams)
131
- base_wrapper_1.default.recordMetrics(span, metricParams);
132
- }
126
+ const spanName = `${semantic_convention_1.default.GEN_AI_OPERATION_TYPE_CHAT} ${modelId}`;
127
+ const effectiveCtx = (0, helpers_1.getFrameworkParentContext)() ?? api_1.context.active();
128
+ const span = tracer.startSpan(spanName, {
129
+ kind: api_1.SpanKind.CLIENT,
130
+ attributes: spanCreationAttrs(semantic_convention_1.default.GEN_AI_OPERATION_TYPE_CHAT, modelId),
131
+ }, effectiveCtx);
132
+ return api_1.context
133
+ .with(api_1.trace.setSpan(effectiveCtx, span), async () => {
134
+ return originalMethod.apply(instance, args);
135
+ })
136
+ .then((response) => {
137
+ return BedrockWrapper._converseComplete({ input, genAIEndpoint, response, span, modelId });
138
+ })
139
+ .catch((e) => {
140
+ helpers_1.default.handleException(span, e);
141
+ base_wrapper_1.default.recordMetrics(span, {
142
+ genAIEndpoint,
143
+ model: modelId,
144
+ aiSystem: BedrockWrapper.aiSystem,
145
+ serverAddress: BedrockWrapper.serverAddress,
146
+ serverPort: BedrockWrapper.serverPort,
147
+ errorType: e?.constructor?.name || '_OTHER',
148
+ });
149
+ span.end();
150
+ throw e;
133
151
  });
134
152
  }
153
+ static async _converseComplete({ input, genAIEndpoint, response, span, modelId, }) {
154
+ let metricParams;
155
+ try {
156
+ metricParams = BedrockWrapper._converseCommonSetter({
157
+ input,
158
+ genAIEndpoint,
159
+ result: response,
160
+ span,
161
+ modelId,
162
+ isStream: false,
163
+ });
164
+ return response;
165
+ }
166
+ catch (e) {
167
+ helpers_1.default.handleException(span, e);
168
+ throw e;
169
+ }
170
+ finally {
171
+ span.end();
172
+ if (metricParams) {
173
+ base_wrapper_1.default.recordMetrics(span, metricParams);
174
+ }
175
+ }
176
+ }
135
177
  static async _handleConverseStreamCommand(tracer, originalMethod, instance, args) {
136
178
  const command = args[0];
137
179
  const input = command.input || {};
138
- const modelId = input.modelId || 'unknown';
180
+ const modelId = input.modelId || 'amazon.titan-text-express-v1';
139
181
  const genAIEndpoint = 'bedrock.converse_stream';
140
182
  const startTime = Date.now();
141
- const span = tracer.startSpan(genAIEndpoint, { kind: api_1.SpanKind.CLIENT });
142
- // Call the original method to get the response object (with .stream async iterable)
143
- const response = await api_1.context.with(api_1.trace.setSpan(api_1.context.active(), span), () => originalMethod.apply(instance, args));
144
- // Accumulated state from stream events
183
+ const spanName = `${semantic_convention_1.default.GEN_AI_OPERATION_TYPE_CHAT} ${modelId}`;
184
+ const effectiveCtx = (0, helpers_1.getFrameworkParentContext)() ?? api_1.context.active();
185
+ const span = tracer.startSpan(spanName, {
186
+ kind: api_1.SpanKind.CLIENT,
187
+ attributes: spanCreationAttrs(semantic_convention_1.default.GEN_AI_OPERATION_TYPE_CHAT, modelId),
188
+ }, effectiveCtx);
189
+ let response;
190
+ try {
191
+ response = await api_1.context.with(api_1.trace.setSpan(effectiveCtx, span), () => originalMethod.apply(instance, args));
192
+ }
193
+ catch (e) {
194
+ helpers_1.default.handleException(span, e);
195
+ base_wrapper_1.default.recordMetrics(span, {
196
+ genAIEndpoint,
197
+ model: modelId,
198
+ aiSystem: BedrockWrapper.aiSystem,
199
+ serverAddress: BedrockWrapper.serverAddress,
200
+ serverPort: BedrockWrapper.serverPort,
201
+ errorType: e?.constructor?.name || '_OTHER',
202
+ });
203
+ span.end();
204
+ throw e;
205
+ }
145
206
  let llmResponse = '';
146
207
  let finishReason = 'stop';
147
208
  let inputTokens = 0;
148
209
  let outputTokens = 0;
149
210
  let cacheReadTokens = 0;
150
211
  let cacheWriteTokens = 0;
151
- let firstChunkTime = null;
152
- const chunkTimestamps = [];
212
+ const timestamps = [];
153
213
  const originalStream = response.stream;
154
214
  async function* wrappedStream() {
155
215
  try {
156
216
  for await (const event of originalStream) {
157
- const now = Date.now();
158
- if (firstChunkTime === null)
159
- firstChunkTime = now;
160
- chunkTimestamps.push(now);
217
+ timestamps.push(Date.now());
161
218
  if (event.contentBlockDelta?.delta?.text)
162
219
  llmResponse += event.contentBlockDelta.delta.text;
163
220
  if (event.messageStop?.stopReason)
@@ -172,63 +229,34 @@ class BedrockWrapper extends base_wrapper_1.default {
172
229
  }
173
230
  }
174
231
  finally {
175
- // Record telemetry once the stream is fully consumed
176
232
  try {
177
- const duration = (Date.now() - startTime) / 1000;
178
- const ttft = firstChunkTime !== null ? (firstChunkTime - startTime) / 1000 : 0;
233
+ const ttft = timestamps.length > 0 ? (timestamps[0] - startTime) / 1000 : 0;
179
234
  let tbt = 0;
180
- if (chunkTimestamps.length > 1) {
181
- const timeDiffs = chunkTimestamps.slice(1).map((t, i) => t - chunkTimestamps[i]);
235
+ if (timestamps.length > 1) {
236
+ const timeDiffs = timestamps.slice(1).map((t, i) => t - timestamps[i]);
182
237
  tbt = timeDiffs.reduce((a, b) => a + b, 0) / timeDiffs.length / 1000;
183
238
  }
184
- const totalTokens = inputTokens + outputTokens;
185
- const pricingInfo = await config_1.default.updatePricingJson(config_1.default.pricing_json);
186
- const cost = helpers_1.default.getChatModelCost(modelId, pricingInfo, inputTokens, outputTokens);
187
- BedrockWrapper.setBaseSpanAttributes(span, {
239
+ const result = {
240
+ output: { message: { content: [{ text: llmResponse }] } },
241
+ stopReason: finishReason,
242
+ usage: {
243
+ inputTokens,
244
+ outputTokens,
245
+ cacheReadInputTokens: cacheReadTokens,
246
+ cacheWriteInputTokens: cacheWriteTokens,
247
+ },
248
+ $metadata: response.$metadata,
249
+ };
250
+ const metricParams = BedrockWrapper._converseCommonSetter({
251
+ input,
188
252
  genAIEndpoint,
189
- model: modelId,
190
- cost,
191
- aiSystem: BedrockWrapper.aiSystem,
253
+ result,
254
+ span,
255
+ modelId,
256
+ isStream: true,
257
+ ttft,
258
+ tbt,
192
259
  });
193
- span.setAttribute(semantic_convention_1.default.GEN_AI_OPERATION, semantic_convention_1.default.GEN_AI_OPERATION_TYPE_CHAT);
194
- span.setAttribute(semantic_convention_1.default.GEN_AI_REQUEST_IS_STREAM, true);
195
- span.setAttribute(semantic_convention_1.default.GEN_AI_RESPONSE_MODEL, modelId);
196
- span.setAttribute(semantic_convention_1.default.GEN_AI_USAGE_INPUT_TOKENS, inputTokens);
197
- span.setAttribute(semantic_convention_1.default.GEN_AI_USAGE_OUTPUT_TOKENS, outputTokens);
198
- span.setAttribute(semantic_convention_1.default.GEN_AI_USAGE_TOTAL_TOKENS, totalTokens);
199
- span.setAttribute(semantic_convention_1.default.GEN_AI_CLIENT_TOKEN_USAGE, totalTokens);
200
- span.setAttribute(semantic_convention_1.default.GEN_AI_RESPONSE_FINISH_REASON, [finishReason]);
201
- span.setAttribute(semantic_convention_1.default.GEN_AI_OUTPUT_TYPE, semantic_convention_1.default.GEN_AI_OUTPUT_TYPE_TEXT);
202
- span.setAttribute(semantic_convention_1.default.GEN_AI_CLIENT_OPERATION_DURATION, duration);
203
- if (ttft > 0)
204
- span.setAttribute(semantic_convention_1.default.GEN_AI_SERVER_TTFT, ttft);
205
- if (tbt > 0)
206
- span.setAttribute(semantic_convention_1.default.GEN_AI_SERVER_TBT, tbt);
207
- span.setAttribute(semantic_convention_1.default.SERVER_ADDRESS, BEDROCK_SERVER_ADDRESS);
208
- span.setAttribute(semantic_convention_1.default.SERVER_PORT, BEDROCK_SERVER_PORT);
209
- if (cacheReadTokens > 0)
210
- span.setAttribute(semantic_convention_1.default.GEN_AI_USAGE_CACHE_READ_INPUT_TOKENS, cacheReadTokens);
211
- if (cacheWriteTokens > 0)
212
- span.setAttribute(semantic_convention_1.default.GEN_AI_USAGE_CACHE_CREATION_INPUT_TOKENS, cacheWriteTokens);
213
- const requestId = response.$metadata?.requestId;
214
- if (requestId)
215
- span.setAttribute(semantic_convention_1.default.GEN_AI_RESPONSE_ID, requestId);
216
- applyInferenceConfigAttributes(span, input.inferenceConfig || {});
217
- if (config_1.default.traceContent) {
218
- const messages = (input.messages || []).map((m) => ({
219
- role: m.role,
220
- content: m.content?.map((c) => c.text || '').join('') || '',
221
- }));
222
- const systemText = input.system?.[0]?.text;
223
- span.setAttribute(semantic_convention_1.default.GEN_AI_INPUT_MESSAGES, helpers_1.default.buildInputMessages(messages, systemText));
224
- if (systemText) {
225
- const systemStr = JSON.stringify([{ type: 'text', content: systemText }]);
226
- span.setAttribute(semantic_convention_1.default.GEN_AI_SYSTEM_INSTRUCTIONS, systemStr);
227
- span.setAttribute(semantic_convention_1.default.GEN_AI_SYSTEM_INSTRUCTIONS_OTEL, systemStr);
228
- }
229
- span.setAttribute(semantic_convention_1.default.GEN_AI_OUTPUT_MESSAGES, helpers_1.default.buildOutputMessages(llmResponse, finishReason));
230
- }
231
- const metricParams = { genAIEndpoint, model: modelId, cost, aiSystem: BedrockWrapper.aiSystem };
232
260
  base_wrapper_1.default.recordMetrics(span, metricParams);
233
261
  }
234
262
  catch { /* ignore telemetry errors in finally */ }
@@ -239,126 +267,151 @@ class BedrockWrapper extends base_wrapper_1.default {
239
267
  }
240
268
  return { ...response, stream: wrappedStream() };
241
269
  }
242
- static async _handleInvokeModelCommand(tracer, originalMethod, instance, args, commandName) {
243
- const command = args[0];
244
- const input = command.input || {};
245
- const modelId = input.modelId || 'unknown';
246
- const isStream = commandName === 'InvokeModelWithResponseStreamCommand';
247
- const genAIEndpoint = isStream ? 'bedrock.invoke_model_stream' : 'bedrock.invoke_model';
248
- const span = tracer.startSpan(genAIEndpoint, { kind: api_1.SpanKind.CLIENT });
249
- return api_1.context.with(api_1.trace.setSpan(api_1.context.active(), span), async () => {
250
- const startTime = Date.now();
251
- let metricParams;
252
- try {
253
- const response = await originalMethod.apply(instance, args);
254
- const duration = (Date.now() - startTime) / 1000;
255
- // Parse response body
256
- let parsedBody = {};
257
- let promptTokens = 0;
258
- let completionTokens = 0;
259
- let outputText = '';
260
- let rawFinishReason = 'stop';
261
- try {
262
- const bodyBytes = response.body;
263
- if (bodyBytes) {
264
- const bodyStr = typeof bodyBytes === 'string'
265
- ? bodyBytes
266
- : Buffer.from(bodyBytes).toString('utf-8');
267
- parsedBody = JSON.parse(bodyStr);
268
- }
269
- }
270
- catch { /* ignore parse errors */ }
271
- // Handle different provider response formats
272
- if (modelId.startsWith('anthropic')) {
273
- promptTokens = parsedBody.usage?.input_tokens || 0;
274
- completionTokens = parsedBody.usage?.output_tokens || 0;
275
- outputText = parsedBody.content?.[0]?.text || '';
276
- rawFinishReason = parsedBody.stop_reason || 'stop';
277
- }
278
- else if (modelId.startsWith('amazon')) {
279
- promptTokens = parsedBody.inputTextTokenCount || 0;
280
- completionTokens = parsedBody.results?.[0]?.tokenCount || 0;
281
- outputText = parsedBody.results?.[0]?.outputText || '';
282
- rawFinishReason = parsedBody.results?.[0]?.completionReason || 'stop';
283
- }
284
- else if (modelId.startsWith('meta')) {
285
- promptTokens = parsedBody.prompt_token_count || 0;
286
- completionTokens = parsedBody.generation_token_count || 0;
287
- outputText = parsedBody.generation || '';
288
- rawFinishReason = parsedBody.stop_reason || 'stop';
289
- }
290
- else if (modelId.startsWith('mistral') || modelId.startsWith('mixtral')) {
291
- promptTokens = parsedBody.usage?.prompt_tokens || 0;
292
- completionTokens = parsedBody.usage?.completion_tokens || 0;
293
- outputText = parsedBody.outputs?.[0]?.text || '';
294
- rawFinishReason = parsedBody.outputs?.[0]?.stop_reason || 'stop';
295
- }
296
- else if (modelId.startsWith('ai21')) {
297
- promptTokens = parsedBody.prompt?.tokens?.length || 0;
298
- completionTokens = parsedBody.completions?.[0]?.data?.tokens?.length || 0;
299
- outputText = parsedBody.completions?.[0]?.data?.text || '';
300
- rawFinishReason = parsedBody.completions?.[0]?.finishReason?.reason || 'stop';
301
- }
302
- else {
303
- outputText = parsedBody.output || parsedBody.generation || parsedBody.text || '';
304
- }
305
- const finishReason = mapFinishReason(rawFinishReason);
306
- const totalTokens = promptTokens + completionTokens;
307
- const pricingInfo = await config_1.default.updatePricingJson(config_1.default.pricing_json);
308
- const cost = helpers_1.default.getChatModelCost(modelId, pricingInfo, promptTokens, completionTokens);
309
- BedrockWrapper.setBaseSpanAttributes(span, {
310
- genAIEndpoint,
311
- model: modelId,
312
- cost,
313
- aiSystem: BedrockWrapper.aiSystem,
314
- serverAddress: BEDROCK_SERVER_ADDRESS,
315
- serverPort: BEDROCK_SERVER_PORT,
316
- });
317
- span.setAttribute(semantic_convention_1.default.GEN_AI_OPERATION, semantic_convention_1.default.GEN_AI_OPERATION_TYPE_CHAT);
318
- span.setAttribute(semantic_convention_1.default.GEN_AI_REQUEST_IS_STREAM, isStream);
319
- span.setAttribute(semantic_convention_1.default.GEN_AI_RESPONSE_MODEL, modelId);
320
- span.setAttribute(semantic_convention_1.default.GEN_AI_USAGE_INPUT_TOKENS, promptTokens);
321
- span.setAttribute(semantic_convention_1.default.GEN_AI_USAGE_OUTPUT_TOKENS, completionTokens);
322
- span.setAttribute(semantic_convention_1.default.GEN_AI_USAGE_TOTAL_TOKENS, totalTokens);
323
- span.setAttribute(semantic_convention_1.default.GEN_AI_CLIENT_TOKEN_USAGE, totalTokens);
324
- span.setAttribute(semantic_convention_1.default.GEN_AI_RESPONSE_FINISH_REASON, [finishReason]);
325
- span.setAttribute(semantic_convention_1.default.GEN_AI_OUTPUT_TYPE, semantic_convention_1.default.GEN_AI_OUTPUT_TYPE_TEXT);
326
- span.setAttribute(semantic_convention_1.default.GEN_AI_CLIENT_OPERATION_DURATION, duration);
327
- span.setAttribute(semantic_convention_1.default.SERVER_ADDRESS, BEDROCK_SERVER_ADDRESS);
328
- span.setAttribute(semantic_convention_1.default.SERVER_PORT, BEDROCK_SERVER_PORT);
329
- const requestId = response.$metadata?.requestId;
330
- if (requestId)
331
- span.setAttribute(semantic_convention_1.default.GEN_AI_RESPONSE_ID, requestId);
332
- if (config_1.default.traceContent) {
333
- try {
334
- const reqBody = input.body
335
- ? JSON.parse(typeof input.body === 'string' ? input.body : Buffer.from(input.body).toString())
336
- : {};
337
- const prompt = reqBody.prompt || reqBody.inputText || '';
338
- if (prompt) {
339
- span.setAttribute(semantic_convention_1.default.GEN_AI_INPUT_MESSAGES, helpers_1.default.buildInputMessages([{ role: 'user', content: prompt }]));
340
- }
341
- }
342
- catch { /* ignore */ }
343
- if (outputText) {
344
- span.setAttribute(semantic_convention_1.default.GEN_AI_OUTPUT_MESSAGES, helpers_1.default.buildOutputMessages(outputText, finishReason));
345
- }
270
+ static _converseCommonSetter({ input, genAIEndpoint, result, span, modelId, isStream, ttft = 0, tbt = 0, }) {
271
+ const captureContent = config_1.default.captureMessageContent;
272
+ const inferenceConfig = input.inferenceConfig || {};
273
+ if (inferenceConfig.temperature !== undefined) {
274
+ span.setAttribute(semantic_convention_1.default.GEN_AI_REQUEST_TEMPERATURE, inferenceConfig.temperature);
275
+ }
276
+ if (inferenceConfig.topP !== undefined) {
277
+ span.setAttribute(semantic_convention_1.default.GEN_AI_REQUEST_TOP_P, inferenceConfig.topP);
278
+ }
279
+ if (inferenceConfig.topK !== undefined) {
280
+ span.setAttribute(semantic_convention_1.default.GEN_AI_REQUEST_TOP_K, inferenceConfig.topK);
281
+ }
282
+ if (inferenceConfig.maxTokens != null) {
283
+ span.setAttribute(semantic_convention_1.default.GEN_AI_REQUEST_MAX_TOKENS, inferenceConfig.maxTokens);
284
+ }
285
+ if (inferenceConfig.stopSequences) {
286
+ span.setAttribute(semantic_convention_1.default.GEN_AI_REQUEST_STOP_SEQUENCES, inferenceConfig.stopSequences);
287
+ }
288
+ if (inferenceConfig.frequencyPenalty) {
289
+ span.setAttribute(semantic_convention_1.default.GEN_AI_REQUEST_FREQUENCY_PENALTY, inferenceConfig.frequencyPenalty);
290
+ }
291
+ if (inferenceConfig.presencePenalty) {
292
+ span.setAttribute(semantic_convention_1.default.GEN_AI_REQUEST_PRESENCE_PENALTY, inferenceConfig.presencePenalty);
293
+ }
294
+ span.setAttribute(semantic_convention_1.default.GEN_AI_REQUEST_IS_STREAM, isStream);
295
+ const usage = result.usage || {};
296
+ const inputTokens = usage.inputTokens || 0;
297
+ const outputTokens = usage.outputTokens || 0;
298
+ const cacheReadTokens = usage.cacheReadInputTokens || 0;
299
+ const cacheWriteTokens = usage.cacheWriteInputTokens || 0;
300
+ const responseModel = modelId;
301
+ const finishReason = mapFinishReason(result.stopReason || 'stop');
302
+ const pricingInfo = config_1.default.pricingInfo || {};
303
+ const cost = helpers_1.default.getChatModelCost(modelId, pricingInfo, inputTokens, outputTokens);
304
+ BedrockWrapper.setBaseSpanAttributes(span, {
305
+ genAIEndpoint,
306
+ model: modelId,
307
+ cost,
308
+ aiSystem: BedrockWrapper.aiSystem,
309
+ serverAddress: BedrockWrapper.serverAddress,
310
+ serverPort: BedrockWrapper.serverPort,
311
+ });
312
+ span.setAttribute(semantic_convention_1.default.GEN_AI_RESPONSE_MODEL, responseModel);
313
+ const requestId = result.$metadata?.requestId;
314
+ if (requestId) {
315
+ span.setAttribute(semantic_convention_1.default.GEN_AI_RESPONSE_ID, requestId);
316
+ }
317
+ span.setAttribute(semantic_convention_1.default.GEN_AI_USAGE_INPUT_TOKENS, inputTokens);
318
+ span.setAttribute(semantic_convention_1.default.GEN_AI_USAGE_OUTPUT_TOKENS, outputTokens);
319
+ if (cacheReadTokens > 0) {
320
+ span.setAttribute(semantic_convention_1.default.GEN_AI_USAGE_CACHE_READ_INPUT_TOKENS, cacheReadTokens);
321
+ }
322
+ if (cacheWriteTokens > 0) {
323
+ span.setAttribute(semantic_convention_1.default.GEN_AI_USAGE_CACHE_CREATION_INPUT_TOKENS, cacheWriteTokens);
324
+ }
325
+ if (ttft > 0) {
326
+ span.setAttribute(semantic_convention_1.default.GEN_AI_SERVER_TTFT, ttft);
327
+ }
328
+ if (tbt > 0) {
329
+ span.setAttribute(semantic_convention_1.default.GEN_AI_SERVER_TBT, tbt);
330
+ }
331
+ span.setAttribute(semantic_convention_1.default.GEN_AI_RESPONSE_FINISH_REASON, [finishReason]);
332
+ const outputText = result.output?.message?.content?.map((c) => c.text || '').join('') || '';
333
+ const outputType = typeof outputText === 'string'
334
+ ? semantic_convention_1.default.GEN_AI_OUTPUT_TYPE_TEXT
335
+ : semantic_convention_1.default.GEN_AI_OUTPUT_TYPE_JSON;
336
+ span.setAttribute(semantic_convention_1.default.GEN_AI_OUTPUT_TYPE, outputType);
337
+ const contentBlocks = result.output?.message?.content || [];
338
+ const toolCalls = contentBlocks
339
+ .filter((c) => c.toolUse)
340
+ .map((c) => ({
341
+ id: c.toolUse.toolUseId || '',
342
+ name: c.toolUse.name || '',
343
+ arguments: c.toolUse.input || {},
344
+ }));
345
+ if (toolCalls.length > 0) {
346
+ const toolNames = toolCalls.map((t) => t.name).filter(Boolean);
347
+ const toolIds = toolCalls.map((t) => t.id).filter(Boolean);
348
+ const toolArgs = toolCalls
349
+ .map((t) => (typeof t.arguments === 'string' ? t.arguments : JSON.stringify(t.arguments)))
350
+ .filter(Boolean);
351
+ if (toolNames.length > 0) {
352
+ span.setAttribute(semantic_convention_1.default.GEN_AI_TOOL_NAME, toolNames.join(', '));
353
+ }
354
+ if (toolIds.length > 0) {
355
+ span.setAttribute(semantic_convention_1.default.GEN_AI_TOOL_CALL_ID, toolIds.join(', '));
356
+ }
357
+ if (toolArgs.length > 0) {
358
+ span.setAttribute(semantic_convention_1.default.GEN_AI_TOOL_ARGS, toolArgs.join(', '));
359
+ }
360
+ }
361
+ const messages = convertBedrockMessages(input.messages || []);
362
+ const systemBlock = input.system || [];
363
+ const systemParts = [];
364
+ if (Array.isArray(systemBlock)) {
365
+ for (const item of systemBlock) {
366
+ if (item?.text) {
367
+ systemParts.push({ type: 'text', content: item.text });
346
368
  }
347
- metricParams = { genAIEndpoint, model: modelId, cost, aiSystem: BedrockWrapper.aiSystem };
348
- return response;
349
369
  }
350
- catch (e) {
351
- helpers_1.default.handleException(span, e);
352
- throw e;
370
+ }
371
+ let inputMessagesJson;
372
+ let outputMessagesJson;
373
+ if (captureContent) {
374
+ inputMessagesJson = helpers_1.default.buildInputMessages(messages);
375
+ span.setAttribute(semantic_convention_1.default.GEN_AI_INPUT_MESSAGES, inputMessagesJson);
376
+ if (systemParts.length > 0) {
377
+ span.setAttribute(semantic_convention_1.default.GEN_AI_SYSTEM_INSTRUCTIONS, JSON.stringify(systemParts));
353
378
  }
354
- finally {
355
- span.end();
356
- if (metricParams)
357
- base_wrapper_1.default.recordMetrics(span, metricParams);
379
+ outputMessagesJson = helpers_1.default.buildOutputMessages(outputText, finishReason, toolCalls.length > 0 ? toolCalls : undefined);
380
+ span.setAttribute(semantic_convention_1.default.GEN_AI_OUTPUT_MESSAGES, outputMessagesJson);
381
+ }
382
+ if (!config_1.default.disableEvents) {
383
+ const eventAttrs = {
384
+ [semantic_convention_1.default.GEN_AI_OPERATION]: semantic_convention_1.default.GEN_AI_OPERATION_TYPE_CHAT,
385
+ [semantic_convention_1.default.GEN_AI_REQUEST_MODEL]: modelId,
386
+ [semantic_convention_1.default.GEN_AI_RESPONSE_MODEL]: responseModel,
387
+ [semantic_convention_1.default.SERVER_ADDRESS]: BedrockWrapper.serverAddress,
388
+ [semantic_convention_1.default.SERVER_PORT]: BedrockWrapper.serverPort,
389
+ [semantic_convention_1.default.GEN_AI_RESPONSE_FINISH_REASON]: [finishReason],
390
+ [semantic_convention_1.default.GEN_AI_OUTPUT_TYPE]: outputType,
391
+ [semantic_convention_1.default.GEN_AI_USAGE_INPUT_TOKENS]: inputTokens,
392
+ [semantic_convention_1.default.GEN_AI_USAGE_OUTPUT_TOKENS]: outputTokens,
393
+ };
394
+ if (requestId) {
395
+ eventAttrs[semantic_convention_1.default.GEN_AI_RESPONSE_ID] = requestId;
358
396
  }
359
- });
397
+ if (captureContent) {
398
+ if (inputMessagesJson)
399
+ eventAttrs[semantic_convention_1.default.GEN_AI_INPUT_MESSAGES] = inputMessagesJson;
400
+ if (outputMessagesJson)
401
+ eventAttrs[semantic_convention_1.default.GEN_AI_OUTPUT_MESSAGES] = outputMessagesJson;
402
+ }
403
+ helpers_1.default.emitInferenceEvent(span, eventAttrs);
404
+ }
405
+ return {
406
+ genAIEndpoint,
407
+ model: modelId,
408
+ cost,
409
+ aiSystem: BedrockWrapper.aiSystem,
410
+ };
360
411
  }
361
412
  }
362
413
  BedrockWrapper.aiSystem = semantic_convention_1.default.GEN_AI_SYSTEM_AWS_BEDROCK;
414
+ BedrockWrapper.serverAddress = 'bedrock-runtime.amazonaws.com';
415
+ BedrockWrapper.serverPort = 443;
363
416
  exports.default = BedrockWrapper;
364
417
  //# sourceMappingURL=wrapper.js.map