mseep-agentops 0.4.18__py3-none-any.whl → 0.4.22__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (153) hide show
  1. agentops/__init__.py +0 -0
  2. agentops/client/api/base.py +28 -30
  3. agentops/client/api/versions/v3.py +29 -25
  4. agentops/client/api/versions/v4.py +87 -46
  5. agentops/client/client.py +98 -29
  6. agentops/client/http/README.md +87 -0
  7. agentops/client/http/http_client.py +126 -172
  8. agentops/config.py +8 -2
  9. agentops/instrumentation/OpenTelemetry.md +133 -0
  10. agentops/instrumentation/README.md +167 -0
  11. agentops/instrumentation/__init__.py +13 -1
  12. agentops/instrumentation/agentic/ag2/__init__.py +18 -0
  13. agentops/instrumentation/agentic/ag2/instrumentor.py +922 -0
  14. agentops/instrumentation/agentic/agno/__init__.py +19 -0
  15. agentops/instrumentation/agentic/agno/attributes/__init__.py +20 -0
  16. agentops/instrumentation/agentic/agno/attributes/agent.py +250 -0
  17. agentops/instrumentation/agentic/agno/attributes/metrics.py +214 -0
  18. agentops/instrumentation/agentic/agno/attributes/storage.py +158 -0
  19. agentops/instrumentation/agentic/agno/attributes/team.py +195 -0
  20. agentops/instrumentation/agentic/agno/attributes/tool.py +210 -0
  21. agentops/instrumentation/agentic/agno/attributes/workflow.py +254 -0
  22. agentops/instrumentation/agentic/agno/instrumentor.py +1313 -0
  23. agentops/instrumentation/agentic/crewai/LICENSE +201 -0
  24. agentops/instrumentation/agentic/crewai/NOTICE.md +10 -0
  25. agentops/instrumentation/agentic/crewai/__init__.py +6 -0
  26. agentops/instrumentation/agentic/crewai/crewai_span_attributes.py +335 -0
  27. agentops/instrumentation/agentic/crewai/instrumentation.py +535 -0
  28. agentops/instrumentation/agentic/crewai/version.py +1 -0
  29. agentops/instrumentation/agentic/google_adk/__init__.py +19 -0
  30. agentops/instrumentation/agentic/google_adk/instrumentor.py +68 -0
  31. agentops/instrumentation/agentic/google_adk/patch.py +767 -0
  32. agentops/instrumentation/agentic/haystack/__init__.py +1 -0
  33. agentops/instrumentation/agentic/haystack/instrumentor.py +186 -0
  34. agentops/instrumentation/agentic/langgraph/__init__.py +3 -0
  35. agentops/instrumentation/agentic/langgraph/attributes.py +54 -0
  36. agentops/instrumentation/agentic/langgraph/instrumentation.py +598 -0
  37. agentops/instrumentation/agentic/langgraph/version.py +1 -0
  38. agentops/instrumentation/agentic/openai_agents/README.md +156 -0
  39. agentops/instrumentation/agentic/openai_agents/SPANS.md +145 -0
  40. agentops/instrumentation/agentic/openai_agents/TRACING_API.md +144 -0
  41. agentops/instrumentation/agentic/openai_agents/__init__.py +30 -0
  42. agentops/instrumentation/agentic/openai_agents/attributes/common.py +549 -0
  43. agentops/instrumentation/agentic/openai_agents/attributes/completion.py +172 -0
  44. agentops/instrumentation/agentic/openai_agents/attributes/model.py +58 -0
  45. agentops/instrumentation/agentic/openai_agents/attributes/tokens.py +275 -0
  46. agentops/instrumentation/agentic/openai_agents/exporter.py +469 -0
  47. agentops/instrumentation/agentic/openai_agents/instrumentor.py +107 -0
  48. agentops/instrumentation/agentic/openai_agents/processor.py +58 -0
  49. agentops/instrumentation/agentic/smolagents/README.md +88 -0
  50. agentops/instrumentation/agentic/smolagents/__init__.py +12 -0
  51. agentops/instrumentation/agentic/smolagents/attributes/agent.py +354 -0
  52. agentops/instrumentation/agentic/smolagents/attributes/model.py +205 -0
  53. agentops/instrumentation/agentic/smolagents/instrumentor.py +286 -0
  54. agentops/instrumentation/agentic/smolagents/stream_wrapper.py +258 -0
  55. agentops/instrumentation/agentic/xpander/__init__.py +15 -0
  56. agentops/instrumentation/agentic/xpander/context.py +112 -0
  57. agentops/instrumentation/agentic/xpander/instrumentor.py +877 -0
  58. agentops/instrumentation/agentic/xpander/trace_probe.py +86 -0
  59. agentops/instrumentation/agentic/xpander/version.py +3 -0
  60. agentops/instrumentation/common/README.md +65 -0
  61. agentops/instrumentation/common/attributes.py +1 -2
  62. agentops/instrumentation/providers/anthropic/__init__.py +24 -0
  63. agentops/instrumentation/providers/anthropic/attributes/__init__.py +23 -0
  64. agentops/instrumentation/providers/anthropic/attributes/common.py +64 -0
  65. agentops/instrumentation/providers/anthropic/attributes/message.py +541 -0
  66. agentops/instrumentation/providers/anthropic/attributes/tools.py +231 -0
  67. agentops/instrumentation/providers/anthropic/event_handler_wrapper.py +90 -0
  68. agentops/instrumentation/providers/anthropic/instrumentor.py +146 -0
  69. agentops/instrumentation/providers/anthropic/stream_wrapper.py +436 -0
  70. agentops/instrumentation/providers/google_genai/README.md +33 -0
  71. agentops/instrumentation/providers/google_genai/__init__.py +24 -0
  72. agentops/instrumentation/providers/google_genai/attributes/__init__.py +25 -0
  73. agentops/instrumentation/providers/google_genai/attributes/chat.py +125 -0
  74. agentops/instrumentation/providers/google_genai/attributes/common.py +88 -0
  75. agentops/instrumentation/providers/google_genai/attributes/model.py +284 -0
  76. agentops/instrumentation/providers/google_genai/instrumentor.py +170 -0
  77. agentops/instrumentation/providers/google_genai/stream_wrapper.py +238 -0
  78. agentops/instrumentation/providers/ibm_watsonx_ai/__init__.py +28 -0
  79. agentops/instrumentation/providers/ibm_watsonx_ai/attributes/__init__.py +27 -0
  80. agentops/instrumentation/providers/ibm_watsonx_ai/attributes/attributes.py +277 -0
  81. agentops/instrumentation/providers/ibm_watsonx_ai/attributes/common.py +104 -0
  82. agentops/instrumentation/providers/ibm_watsonx_ai/instrumentor.py +162 -0
  83. agentops/instrumentation/providers/ibm_watsonx_ai/stream_wrapper.py +302 -0
  84. agentops/instrumentation/providers/mem0/__init__.py +45 -0
  85. agentops/instrumentation/providers/mem0/common.py +377 -0
  86. agentops/instrumentation/providers/mem0/instrumentor.py +270 -0
  87. agentops/instrumentation/providers/mem0/memory.py +430 -0
  88. agentops/instrumentation/providers/openai/__init__.py +21 -0
  89. agentops/instrumentation/providers/openai/attributes/__init__.py +7 -0
  90. agentops/instrumentation/providers/openai/attributes/common.py +55 -0
  91. agentops/instrumentation/providers/openai/attributes/response.py +607 -0
  92. agentops/instrumentation/providers/openai/config.py +36 -0
  93. agentops/instrumentation/providers/openai/instrumentor.py +312 -0
  94. agentops/instrumentation/providers/openai/stream_wrapper.py +941 -0
  95. agentops/instrumentation/providers/openai/utils.py +44 -0
  96. agentops/instrumentation/providers/openai/v0.py +176 -0
  97. agentops/instrumentation/providers/openai/v0_wrappers.py +483 -0
  98. agentops/instrumentation/providers/openai/wrappers/__init__.py +30 -0
  99. agentops/instrumentation/providers/openai/wrappers/assistant.py +277 -0
  100. agentops/instrumentation/providers/openai/wrappers/chat.py +259 -0
  101. agentops/instrumentation/providers/openai/wrappers/completion.py +109 -0
  102. agentops/instrumentation/providers/openai/wrappers/embeddings.py +94 -0
  103. agentops/instrumentation/providers/openai/wrappers/image_gen.py +75 -0
  104. agentops/instrumentation/providers/openai/wrappers/responses.py +191 -0
  105. agentops/instrumentation/providers/openai/wrappers/shared.py +81 -0
  106. agentops/instrumentation/utilities/concurrent_futures/__init__.py +10 -0
  107. agentops/instrumentation/utilities/concurrent_futures/instrumentation.py +206 -0
  108. agentops/integration/callbacks/dspy/__init__.py +11 -0
  109. agentops/integration/callbacks/dspy/callback.py +471 -0
  110. agentops/integration/callbacks/langchain/README.md +59 -0
  111. agentops/integration/callbacks/langchain/__init__.py +15 -0
  112. agentops/integration/callbacks/langchain/callback.py +791 -0
  113. agentops/integration/callbacks/langchain/utils.py +54 -0
  114. agentops/legacy/crewai.md +121 -0
  115. agentops/logging/instrument_logging.py +4 -0
  116. agentops/sdk/README.md +220 -0
  117. agentops/sdk/core.py +75 -32
  118. agentops/sdk/descriptors/classproperty.py +28 -0
  119. agentops/sdk/exporters.py +152 -33
  120. agentops/semconv/README.md +125 -0
  121. agentops/semconv/span_kinds.py +0 -2
  122. agentops/validation.py +102 -63
  123. {mseep_agentops-0.4.18.dist-info → mseep_agentops-0.4.22.dist-info}/METADATA +30 -40
  124. mseep_agentops-0.4.22.dist-info/RECORD +178 -0
  125. {mseep_agentops-0.4.18.dist-info → mseep_agentops-0.4.22.dist-info}/WHEEL +1 -2
  126. mseep_agentops-0.4.18.dist-info/RECORD +0 -94
  127. mseep_agentops-0.4.18.dist-info/top_level.txt +0 -2
  128. tests/conftest.py +0 -10
  129. tests/unit/client/__init__.py +0 -1
  130. tests/unit/client/test_http_adapter.py +0 -221
  131. tests/unit/client/test_http_client.py +0 -206
  132. tests/unit/conftest.py +0 -54
  133. tests/unit/sdk/__init__.py +0 -1
  134. tests/unit/sdk/instrumentation_tester.py +0 -207
  135. tests/unit/sdk/test_attributes.py +0 -392
  136. tests/unit/sdk/test_concurrent_instrumentation.py +0 -468
  137. tests/unit/sdk/test_decorators.py +0 -763
  138. tests/unit/sdk/test_exporters.py +0 -241
  139. tests/unit/sdk/test_factory.py +0 -1188
  140. tests/unit/sdk/test_internal_span_processor.py +0 -397
  141. tests/unit/sdk/test_resource_attributes.py +0 -35
  142. tests/unit/test_config.py +0 -82
  143. tests/unit/test_context_manager.py +0 -777
  144. tests/unit/test_events.py +0 -27
  145. tests/unit/test_host_env.py +0 -54
  146. tests/unit/test_init_py.py +0 -501
  147. tests/unit/test_serialization.py +0 -433
  148. tests/unit/test_session.py +0 -676
  149. tests/unit/test_user_agent.py +0 -34
  150. tests/unit/test_validation.py +0 -405
  151. {tests → agentops/instrumentation/agentic/openai_agents/attributes}/__init__.py +0 -0
  152. /tests/unit/__init__.py → /agentops/instrumentation/providers/openai/attributes/tools.py +0 -0
  153. {mseep_agentops-0.4.18.dist-info → mseep_agentops-0.4.22.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,172 @@
1
+ """Completion processing utilities for OpenAI Agents instrumentation.
2
+
3
+ This module handles completion content processing from both the Chat Completions API
4
+ and the OpenAI Response API formats, extracting messages, tool calls, function calls, etc.
5
+ """
6
+
7
+ from typing import Any, Dict
8
+
9
+ from agentops.instrumentation.common.attributes import AttributeMap
10
+
11
+ from agentops.logging import logger
12
+ from agentops.helpers.serialization import model_to_dict
13
+ from agentops.semconv import (
14
+ SpanAttributes,
15
+ MessageAttributes,
16
+ )
17
+ from agentops.instrumentation.agentic.openai_agents.attributes.tokens import process_token_usage
18
+
19
+
20
+ def get_generation_output_attributes(output: Any) -> Dict[str, Any]:
21
+ """Extract LLM response attributes from an `openai/completions` object.
22
+
23
+ Args:
24
+ output: The response object (can be dict, Response object, or other format)
25
+
26
+ Returns:
27
+ Dictionary of attributes extracted from the response in a consistent format
28
+ """
29
+ # Convert model to dictionary for easier processing
30
+ response_dict = model_to_dict(output)
31
+ result: AttributeMap = {}
32
+
33
+ if not response_dict:
34
+ # Handle output as string if it's not a dict
35
+ if isinstance(output, str):
36
+ # For string output, just return the minimal set of attributes
37
+ return {}
38
+ return result
39
+
40
+ # Check for OpenAI Agents SDK response format (has raw_responses array)
41
+ if "raw_responses" in response_dict and isinstance(response_dict["raw_responses"], list):
42
+ result.update(get_raw_response_attributes(response_dict))
43
+ else:
44
+ # TODO base attributes for completion type
45
+
46
+ # Get completions or response API output attributes first
47
+ if "choices" in response_dict:
48
+ result.update(get_chat_completions_attributes(response_dict))
49
+
50
+ # Extract token usage from dictionary for standard formats
51
+ usage_attributes: AttributeMap = {}
52
+ if "usage" in response_dict:
53
+ process_token_usage(response_dict["usage"], usage_attributes)
54
+ result.update(usage_attributes)
55
+
56
+ # Extract token usage from Response object directly if dict conversion didn't work
57
+ if hasattr(output, "usage") and output.usage:
58
+ direct_usage_attributes: AttributeMap = {}
59
+ process_token_usage(output.usage, direct_usage_attributes)
60
+ result.update(direct_usage_attributes)
61
+
62
+ return result
63
+
64
+
65
+ def get_raw_response_attributes(response: Dict[str, Any]) -> Dict[str, Any]:
66
+ """Extract attributes from OpenAI Agents SDK response format (with raw_responses).
67
+
68
+ This function handles the specific structure of OpenAI Agents SDK responses,
69
+ which include a raw_responses array containing the actual API responses.
70
+ This is the format used specifically by the Agents SDK, not the standard OpenAI API.
71
+
72
+ Args:
73
+ response: The OpenAI Agents SDK response dictionary (containing raw_responses array)
74
+
75
+ Returns:
76
+ Dictionary of attributes extracted from the Agents SDK response
77
+ """
78
+ result: AttributeMap = {}
79
+
80
+ # Set the LLM system to OpenAI
81
+ result[SpanAttributes.LLM_SYSTEM] = "openai"
82
+
83
+ # Process raw responses
84
+ if "raw_responses" in response and isinstance(response["raw_responses"], list):
85
+ for i, raw_response in enumerate(response["raw_responses"]):
86
+ # Extract token usage from the first raw response
87
+ if "usage" in raw_response and isinstance(raw_response["usage"], dict):
88
+ usage_attrs: AttributeMap = {}
89
+ process_token_usage(raw_response["usage"], usage_attrs)
90
+ result.update(usage_attrs)
91
+ logger.debug(f"Extracted token usage from raw_responses[{i}]: {usage_attrs}")
92
+
93
+ # Extract output content
94
+ if "output" in raw_response and isinstance(raw_response["output"], list):
95
+ for j, output_item in enumerate(raw_response["output"]):
96
+ # Process message content
97
+ if "content" in output_item and isinstance(output_item["content"], list):
98
+ for content_item in output_item["content"]:
99
+ if content_item.get("type") == "output_text" and "text" in content_item:
100
+ # Set message content attribute using the standard convention
101
+ result[MessageAttributes.COMPLETION_CONTENT.format(i=j)] = content_item["text"]
102
+
103
+ # Process role
104
+ if "role" in output_item:
105
+ result[MessageAttributes.COMPLETION_ROLE.format(i=j)] = output_item["role"]
106
+
107
+ # Process tool calls
108
+ if "tool_calls" in output_item and isinstance(output_item["tool_calls"], list):
109
+ for k, tool_call in enumerate(output_item["tool_calls"]):
110
+ tool_id = tool_call.get("id", "")
111
+ # Handle function format
112
+ if "function" in tool_call and isinstance(tool_call["function"], dict):
113
+ function = tool_call["function"]
114
+ result[MessageAttributes.COMPLETION_TOOL_CALL_ID.format(i=j, j=k)] = tool_id
115
+ result[MessageAttributes.COMPLETION_TOOL_CALL_NAME.format(i=j, j=k)] = function.get(
116
+ "name", ""
117
+ )
118
+ result[MessageAttributes.COMPLETION_TOOL_CALL_ARGUMENTS.format(i=j, j=k)] = (
119
+ function.get("arguments", "")
120
+ )
121
+
122
+ return result
123
+
124
+
125
+ def get_chat_completions_attributes(response: Dict[str, Any]) -> Dict[str, Any]:
126
+ """Get attributes from OpenAI Chat Completions API format (with choices array).
127
+
128
+ This function specifically handles the original Chat Completions API format
129
+ that uses a 'choices' array with 'message' objects, as opposed to the newer
130
+ Response API format that uses an 'output' array.
131
+
132
+ Args:
133
+ response: The response dictionary containing chat completions (with choices array)
134
+
135
+ Returns:
136
+ Dictionary of chat completion attributes
137
+ """
138
+ result: AttributeMap = {}
139
+
140
+ if "choices" not in response:
141
+ return result
142
+
143
+ for i, choice in enumerate(response["choices"]):
144
+ if "finish_reason" in choice:
145
+ result[MessageAttributes.COMPLETION_FINISH_REASON.format(i=i)] = choice["finish_reason"]
146
+
147
+ message = choice.get("message", {})
148
+
149
+ if "role" in message:
150
+ result[MessageAttributes.COMPLETION_ROLE.format(i=i)] = message["role"]
151
+
152
+ if "content" in message:
153
+ content = message["content"] if message["content"] is not None else ""
154
+ result[MessageAttributes.COMPLETION_CONTENT.format(i=i)] = content
155
+
156
+ if "tool_calls" in message and message["tool_calls"] is not None:
157
+ tool_calls = message["tool_calls"]
158
+ for j, tool_call in enumerate(tool_calls):
159
+ if "function" in tool_call:
160
+ function = tool_call["function"]
161
+ result[MessageAttributes.COMPLETION_TOOL_CALL_ID.format(i=i, j=j)] = tool_call.get("id")
162
+ result[MessageAttributes.COMPLETION_TOOL_CALL_NAME.format(i=i, j=j)] = function.get("name")
163
+ result[MessageAttributes.COMPLETION_TOOL_CALL_ARGUMENTS.format(i=i, j=j)] = function.get(
164
+ "arguments"
165
+ )
166
+
167
+ if "function_call" in message and message["function_call"] is not None:
168
+ function_call = message["function_call"]
169
+ result[MessageAttributes.COMPLETION_TOOL_CALL_NAME.format(i=i)] = function_call.get("name")
170
+ result[MessageAttributes.COMPLETION_TOOL_CALL_ARGUMENTS.format(i=i)] = function_call.get("arguments")
171
+
172
+ return result
@@ -0,0 +1,58 @@
1
+ """Model information extraction for OpenAI Agents instrumentation.
2
+
3
+ This module provides utilities for extracting model information and parameters
4
+ from various object types, centralizing model attribute handling logic.
5
+ """
6
+
7
+ from typing import Any, Dict
8
+ from agentops.semconv import SpanAttributes
9
+ from agentops.instrumentation.common.attributes import AttributeMap, _extract_attributes_from_mapping
10
+
11
+
12
+ # Parameter mapping dictionary for model parameters
13
+ MODEL_CONFIG_ATTRIBUTES: AttributeMap = {
14
+ SpanAttributes.LLM_REQUEST_TEMPERATURE: "temperature",
15
+ SpanAttributes.LLM_REQUEST_TOP_P: "top_p",
16
+ SpanAttributes.LLM_REQUEST_FREQUENCY_PENALTY: "frequency_penalty",
17
+ SpanAttributes.LLM_REQUEST_PRESENCE_PENALTY: "presence_penalty",
18
+ SpanAttributes.LLM_REQUEST_MAX_TOKENS: "max_tokens",
19
+ # TODO we need to establish semantic conventions for the following:
20
+ # tool_choice
21
+ # parallel_tool_calls
22
+ # truncation
23
+ # store
24
+ # language
25
+ # prompt
26
+ # turn_detection
27
+ SpanAttributes.LLM_REQUEST_INSTRUCTIONS: "instructions",
28
+ SpanAttributes.LLM_REQUEST_VOICE: "voice",
29
+ SpanAttributes.LLM_REQUEST_SPEED: "speed",
30
+ }
31
+
32
+
33
+ def get_model_attributes(model_name: str) -> Dict[str, Any]:
34
+ """Get model name attributes for both request and response for consistency.
35
+
36
+ Args:
37
+ model_name: The model name to set
38
+
39
+ Returns:
40
+ Dictionary of model name attributes
41
+ """
42
+ return {
43
+ SpanAttributes.LLM_REQUEST_MODEL: model_name,
44
+ SpanAttributes.LLM_RESPONSE_MODEL: model_name,
45
+ SpanAttributes.LLM_SYSTEM: "openai",
46
+ }
47
+
48
+
49
+ def get_model_config_attributes(model_config: Any) -> Dict[str, Any]:
50
+ """Extract model configuration attributes using the model parameter mapping.
51
+
52
+ Args:
53
+ model_config: The model configuration object
54
+
55
+ Returns:
56
+ Dictionary of extracted model configuration attributes
57
+ """
58
+ return _extract_attributes_from_mapping(model_config, MODEL_CONFIG_ATTRIBUTES)
@@ -0,0 +1,275 @@
1
+ """Token processing and metrics for the OpenAI Agents instrumentation.
2
+
3
+ This module contains functions for processing token usage data from OpenAI responses,
4
+ including standardized handling of different API formats (Chat Completions API vs Response API)
5
+ and recording token usage metrics.
6
+ """
7
+
8
+ import json
9
+ from typing import Any, Dict, Optional
10
+
11
+ from agentops.semconv import SpanAttributes
12
+ from agentops.logging import logger
13
+
14
+
15
+ def safe_parse(content: str) -> Optional[Dict[str, Any]]:
16
+ """Safely parse JSON content from a string.
17
+
18
+ Args:
19
+ content: String content that might contain JSON
20
+
21
+ Returns:
22
+ Parsed dictionary if content is valid JSON, None otherwise
23
+ """
24
+ if not isinstance(content, str):
25
+ return None
26
+
27
+ try:
28
+ # Try to parse the string as JSON
29
+ return json.loads(content)
30
+ except (json.JSONDecodeError, TypeError, ValueError):
31
+ # If parsing fails, log a debug message and return None
32
+ logger.debug(f"Failed to parse JSON content: {content[:100]}...")
33
+ return None
34
+
35
+
36
+ def extract_nested_usage(content: Any) -> Optional[Dict[str, Any]]:
37
+ """Recursively extract usage data from potentially nested response structures.
38
+
39
+ Handles multiple nesting patterns:
40
+ 1. Direct usage field at the top level
41
+ 2. Usage nested in completion content JSON string
42
+ 3. Usage nested in response.output[].content[].text
43
+
44
+ Args:
45
+ content: Any content object that might contain usage data
46
+
47
+ Returns:
48
+ Extracted usage dictionary or None if not found
49
+ """
50
+ # Case: direct dictionary with usage field
51
+ if isinstance(content, dict) and "usage" in content:
52
+ return content["usage"]
53
+
54
+ # Case: JSON string that might contain usage
55
+ if isinstance(content, str):
56
+ parsed_data = safe_parse(content)
57
+ if parsed_data:
58
+ # Direct usage field in parsed JSON
59
+ if "usage" in parsed_data and isinstance(parsed_data["usage"], dict):
60
+ return parsed_data["usage"]
61
+
62
+ # Response API format with nested output structure
63
+ if "output" in parsed_data and isinstance(parsed_data["output"], list):
64
+ # Usage at top level in Response format
65
+ if "usage" in parsed_data:
66
+ return parsed_data["usage"]
67
+
68
+ # Case: complex nested structure with output array
69
+ # This handles the Response API format where usage is at the top level
70
+ if isinstance(content, dict):
71
+ if "output" in content and isinstance(content["output"], list):
72
+ if "usage" in content:
73
+ return content["usage"]
74
+
75
+ return None
76
+
77
+
78
+ def process_token_usage(
79
+ usage: Dict[str, Any], attributes: Dict[str, Any], completion_content: Optional[str] = None
80
+ ) -> Dict[str, Any]:
81
+ """Process token usage data from OpenAI responses using standardized attribute naming.
82
+
83
+ Args:
84
+ usage: Dictionary containing token usage data
85
+ attributes: Dictionary where attributes will be set
86
+ completion_content: Optional JSON string that may contain token usage info
87
+
88
+ Returns:
89
+ Dictionary mapping token types to counts for metrics
90
+ """
91
+ # Result dictionary for metric recording
92
+ result = {}
93
+
94
+ # If usage is empty or None, use completion_content to find usage data
95
+ if not usage or (isinstance(usage, dict) and len(usage) == 0):
96
+ if completion_content:
97
+ logger.debug("TOKENS: Usage is empty, trying to extract from completion content")
98
+ extracted_usage = extract_nested_usage(completion_content)
99
+ if extracted_usage:
100
+ usage = extracted_usage
101
+
102
+ # Always set token usage attributes directly on the span to ensure they're captured
103
+ # For both Chat Completions API and Response API formats
104
+
105
+ # Helper to get an attribute from either a dict or an object
106
+ def get_value(obj, key):
107
+ if isinstance(obj, dict) and key in obj:
108
+ return obj[key]
109
+ elif hasattr(obj, key):
110
+ return getattr(obj, key)
111
+ return None
112
+
113
+ # Helper to check if an object has an attribute
114
+ def has_key(obj, key):
115
+ if isinstance(obj, dict):
116
+ return key in obj
117
+ return hasattr(obj, key)
118
+
119
+ # Process prompt/input tokens
120
+ if has_key(usage, "prompt_tokens"):
121
+ prompt_tokens = get_value(usage, "prompt_tokens")
122
+ attributes[SpanAttributes.LLM_USAGE_PROMPT_TOKENS] = prompt_tokens
123
+ result["prompt_tokens"] = prompt_tokens
124
+ elif has_key(usage, "input_tokens"):
125
+ input_tokens = get_value(usage, "input_tokens")
126
+ attributes[SpanAttributes.LLM_USAGE_PROMPT_TOKENS] = input_tokens
127
+ result["prompt_tokens"] = input_tokens
128
+
129
+ # Process completion/output tokens
130
+ if has_key(usage, "completion_tokens"):
131
+ completion_tokens = get_value(usage, "completion_tokens")
132
+ attributes[SpanAttributes.LLM_USAGE_COMPLETION_TOKENS] = completion_tokens
133
+ result["completion_tokens"] = completion_tokens
134
+ elif has_key(usage, "output_tokens"):
135
+ output_tokens = get_value(usage, "output_tokens")
136
+ attributes[SpanAttributes.LLM_USAGE_COMPLETION_TOKENS] = output_tokens
137
+ result["completion_tokens"] = output_tokens
138
+
139
+ # Process total tokens
140
+ if has_key(usage, "total_tokens"):
141
+ total_tokens = get_value(usage, "total_tokens")
142
+ attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] = total_tokens
143
+ result["total_tokens"] = total_tokens
144
+
145
+ # Process Response API specific token details using defined semantic conventions
146
+
147
+ # Process reasoning tokens (from Response API output_tokens_details)
148
+ output_tokens_details = None
149
+ if has_key(usage, "output_tokens_details"):
150
+ output_tokens_details = get_value(usage, "output_tokens_details")
151
+
152
+ if output_tokens_details:
153
+ # Handle both dict and object types
154
+ if isinstance(output_tokens_details, dict):
155
+ details = output_tokens_details
156
+ if "reasoning_tokens" in details:
157
+ attributes[SpanAttributes.LLM_USAGE_REASONING_TOKENS] = details["reasoning_tokens"]
158
+ result["reasoning_tokens"] = details["reasoning_tokens"]
159
+ elif hasattr(output_tokens_details, "reasoning_tokens"):
160
+ reasoning_tokens = output_tokens_details.reasoning_tokens
161
+ attributes[SpanAttributes.LLM_USAGE_REASONING_TOKENS] = reasoning_tokens
162
+ result["reasoning_tokens"] = reasoning_tokens
163
+
164
+ # Process cached tokens (from Response API input_tokens_details)
165
+ input_tokens_details = None
166
+ if has_key(usage, "input_tokens_details"):
167
+ input_tokens_details = get_value(usage, "input_tokens_details")
168
+
169
+ if input_tokens_details:
170
+ # Handle both dict and object types
171
+ if isinstance(input_tokens_details, dict):
172
+ details = input_tokens_details
173
+ if "cached_tokens" in details:
174
+ attributes[SpanAttributes.LLM_USAGE_CACHE_READ_INPUT_TOKENS] = details["cached_tokens"]
175
+ result["cached_input_tokens"] = details["cached_tokens"]
176
+ # Handle object with cached_tokens attribute
177
+ elif hasattr(input_tokens_details, "cached_tokens"):
178
+ cached_tokens = input_tokens_details.cached_tokens
179
+ attributes[SpanAttributes.LLM_USAGE_CACHE_READ_INPUT_TOKENS] = cached_tokens
180
+ result["cached_input_tokens"] = cached_tokens
181
+
182
+ # Log all token-related attributes that were set
183
+ token_attrs = {k: v for k, v in attributes.items() if k.startswith("gen_ai.usage")}
184
+
185
+ # If we still have no token attributes, try one more approach - look for nested output structure
186
+ if not token_attrs and completion_content:
187
+ try:
188
+ # Parse the completion content to see if we can find more deeply nested usage data
189
+ parsed_content = safe_parse(completion_content)
190
+ if parsed_content and isinstance(parsed_content, dict):
191
+ # If this is a Response API format, check for nested output structure
192
+ if "output" in parsed_content and isinstance(parsed_content["output"], list):
193
+ for output_item in parsed_content["output"]:
194
+ # Check if this has nested content with usage
195
+ if "content" in output_item and isinstance(output_item["content"], list):
196
+ for content_item in output_item["content"]:
197
+ if "text" in content_item:
198
+ # Try to parse this text for usage data
199
+ parsed_text = safe_parse(content_item["text"])
200
+ if parsed_text and "usage" in parsed_text:
201
+ logger.debug(f"Found deeply nested usage data: {parsed_text['usage']}")
202
+ # Process this usage data recursively
203
+ return process_token_usage(parsed_text["usage"], attributes)
204
+ except Exception as e:
205
+ logger.debug(f"Error during deep token extraction: {e}")
206
+
207
+ return result
208
+
209
+
210
+ def map_token_type_to_metric_name(token_type: str) -> str:
211
+ """Maps token type names from SpanAttributes to simplified metric names.
212
+
213
+ Args:
214
+ token_type: Token type name, could be a full semantic convention or a simple name
215
+
216
+ Returns:
217
+ Simplified token type name for metrics
218
+ """
219
+ # If token_type is a semantic convention (contains a dot), extract the last part
220
+ if isinstance(token_type, str) and "." in token_type:
221
+ parts = token_type.split(".")
222
+ token_type = parts[-1]
223
+
224
+ # Map to simplified metric names
225
+ if token_type == "prompt_tokens":
226
+ return "input"
227
+ elif token_type == "completion_tokens":
228
+ return "output"
229
+ elif token_type == "reasoning_tokens":
230
+ return "reasoning"
231
+
232
+ # Return as-is if no mapping needed
233
+ return token_type
234
+
235
+
236
+ def get_token_metric_attributes(usage: Dict[str, Any], model_name: str) -> Dict[str, Dict[str, Any]]:
237
+ """Get token usage metric attributes from usage data.
238
+
239
+ Args:
240
+ usage: Dictionary containing token usage data
241
+ model_name: Name of the model used
242
+
243
+ Returns:
244
+ Dictionary mapping token types to metric data including value and attributes
245
+ """
246
+ # Process all token types using our standardized processor
247
+ token_counts = process_token_usage(usage, {})
248
+
249
+ # Common attributes for all metrics
250
+ common_attributes = {
251
+ "model": model_name,
252
+ SpanAttributes.LLM_REQUEST_MODEL: model_name,
253
+ SpanAttributes.LLM_SYSTEM: "openai",
254
+ }
255
+
256
+ # Prepare metrics data for each token type
257
+ metrics_data = {}
258
+ for token_type, count in token_counts.items():
259
+ # Skip if no count
260
+ if not count:
261
+ continue
262
+
263
+ # Map token type to simplified metric name
264
+ metric_token_type = map_token_type_to_metric_name(token_type)
265
+
266
+ # Prepare the metric data
267
+ metrics_data[token_type] = {
268
+ "value": count,
269
+ "attributes": {
270
+ "token_type": metric_token_type,
271
+ **common_attributes,
272
+ },
273
+ }
274
+
275
+ return metrics_data