mseep-agentops 0.4.18__py3-none-any.whl → 0.4.23__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (153) hide show
  1. agentops/__init__.py +0 -0
  2. agentops/client/api/base.py +28 -30
  3. agentops/client/api/versions/v3.py +29 -25
  4. agentops/client/api/versions/v4.py +87 -46
  5. agentops/client/client.py +98 -29
  6. agentops/client/http/README.md +87 -0
  7. agentops/client/http/http_client.py +126 -172
  8. agentops/config.py +8 -2
  9. agentops/instrumentation/OpenTelemetry.md +133 -0
  10. agentops/instrumentation/README.md +167 -0
  11. agentops/instrumentation/__init__.py +13 -1
  12. agentops/instrumentation/agentic/ag2/__init__.py +18 -0
  13. agentops/instrumentation/agentic/ag2/instrumentor.py +922 -0
  14. agentops/instrumentation/agentic/agno/__init__.py +19 -0
  15. agentops/instrumentation/agentic/agno/attributes/__init__.py +20 -0
  16. agentops/instrumentation/agentic/agno/attributes/agent.py +250 -0
  17. agentops/instrumentation/agentic/agno/attributes/metrics.py +214 -0
  18. agentops/instrumentation/agentic/agno/attributes/storage.py +158 -0
  19. agentops/instrumentation/agentic/agno/attributes/team.py +195 -0
  20. agentops/instrumentation/agentic/agno/attributes/tool.py +210 -0
  21. agentops/instrumentation/agentic/agno/attributes/workflow.py +254 -0
  22. agentops/instrumentation/agentic/agno/instrumentor.py +1313 -0
  23. agentops/instrumentation/agentic/crewai/LICENSE +201 -0
  24. agentops/instrumentation/agentic/crewai/NOTICE.md +10 -0
  25. agentops/instrumentation/agentic/crewai/__init__.py +6 -0
  26. agentops/instrumentation/agentic/crewai/crewai_span_attributes.py +335 -0
  27. agentops/instrumentation/agentic/crewai/instrumentation.py +535 -0
  28. agentops/instrumentation/agentic/crewai/version.py +1 -0
  29. agentops/instrumentation/agentic/google_adk/__init__.py +19 -0
  30. agentops/instrumentation/agentic/google_adk/instrumentor.py +68 -0
  31. agentops/instrumentation/agentic/google_adk/patch.py +767 -0
  32. agentops/instrumentation/agentic/haystack/__init__.py +1 -0
  33. agentops/instrumentation/agentic/haystack/instrumentor.py +186 -0
  34. agentops/instrumentation/agentic/langgraph/__init__.py +3 -0
  35. agentops/instrumentation/agentic/langgraph/attributes.py +54 -0
  36. agentops/instrumentation/agentic/langgraph/instrumentation.py +598 -0
  37. agentops/instrumentation/agentic/langgraph/version.py +1 -0
  38. agentops/instrumentation/agentic/openai_agents/README.md +156 -0
  39. agentops/instrumentation/agentic/openai_agents/SPANS.md +145 -0
  40. agentops/instrumentation/agentic/openai_agents/TRACING_API.md +144 -0
  41. agentops/instrumentation/agentic/openai_agents/__init__.py +30 -0
  42. agentops/instrumentation/agentic/openai_agents/attributes/common.py +549 -0
  43. agentops/instrumentation/agentic/openai_agents/attributes/completion.py +172 -0
  44. agentops/instrumentation/agentic/openai_agents/attributes/model.py +58 -0
  45. agentops/instrumentation/agentic/openai_agents/attributes/tokens.py +275 -0
  46. agentops/instrumentation/agentic/openai_agents/exporter.py +469 -0
  47. agentops/instrumentation/agentic/openai_agents/instrumentor.py +107 -0
  48. agentops/instrumentation/agentic/openai_agents/processor.py +58 -0
  49. agentops/instrumentation/agentic/smolagents/README.md +88 -0
  50. agentops/instrumentation/agentic/smolagents/__init__.py +12 -0
  51. agentops/instrumentation/agentic/smolagents/attributes/agent.py +354 -0
  52. agentops/instrumentation/agentic/smolagents/attributes/model.py +205 -0
  53. agentops/instrumentation/agentic/smolagents/instrumentor.py +286 -0
  54. agentops/instrumentation/agentic/smolagents/stream_wrapper.py +258 -0
  55. agentops/instrumentation/agentic/xpander/__init__.py +15 -0
  56. agentops/instrumentation/agentic/xpander/context.py +112 -0
  57. agentops/instrumentation/agentic/xpander/instrumentor.py +877 -0
  58. agentops/instrumentation/agentic/xpander/trace_probe.py +86 -0
  59. agentops/instrumentation/agentic/xpander/version.py +3 -0
  60. agentops/instrumentation/common/README.md +65 -0
  61. agentops/instrumentation/common/attributes.py +1 -2
  62. agentops/instrumentation/providers/anthropic/__init__.py +24 -0
  63. agentops/instrumentation/providers/anthropic/attributes/__init__.py +23 -0
  64. agentops/instrumentation/providers/anthropic/attributes/common.py +64 -0
  65. agentops/instrumentation/providers/anthropic/attributes/message.py +541 -0
  66. agentops/instrumentation/providers/anthropic/attributes/tools.py +231 -0
  67. agentops/instrumentation/providers/anthropic/event_handler_wrapper.py +90 -0
  68. agentops/instrumentation/providers/anthropic/instrumentor.py +146 -0
  69. agentops/instrumentation/providers/anthropic/stream_wrapper.py +436 -0
  70. agentops/instrumentation/providers/google_genai/README.md +33 -0
  71. agentops/instrumentation/providers/google_genai/__init__.py +24 -0
  72. agentops/instrumentation/providers/google_genai/attributes/__init__.py +25 -0
  73. agentops/instrumentation/providers/google_genai/attributes/chat.py +125 -0
  74. agentops/instrumentation/providers/google_genai/attributes/common.py +88 -0
  75. agentops/instrumentation/providers/google_genai/attributes/model.py +284 -0
  76. agentops/instrumentation/providers/google_genai/instrumentor.py +170 -0
  77. agentops/instrumentation/providers/google_genai/stream_wrapper.py +238 -0
  78. agentops/instrumentation/providers/ibm_watsonx_ai/__init__.py +28 -0
  79. agentops/instrumentation/providers/ibm_watsonx_ai/attributes/__init__.py +27 -0
  80. agentops/instrumentation/providers/ibm_watsonx_ai/attributes/attributes.py +277 -0
  81. agentops/instrumentation/providers/ibm_watsonx_ai/attributes/common.py +104 -0
  82. agentops/instrumentation/providers/ibm_watsonx_ai/instrumentor.py +162 -0
  83. agentops/instrumentation/providers/ibm_watsonx_ai/stream_wrapper.py +302 -0
  84. agentops/instrumentation/providers/mem0/__init__.py +45 -0
  85. agentops/instrumentation/providers/mem0/common.py +377 -0
  86. agentops/instrumentation/providers/mem0/instrumentor.py +270 -0
  87. agentops/instrumentation/providers/mem0/memory.py +430 -0
  88. agentops/instrumentation/providers/openai/__init__.py +21 -0
  89. agentops/instrumentation/providers/openai/attributes/__init__.py +7 -0
  90. agentops/instrumentation/providers/openai/attributes/common.py +55 -0
  91. agentops/instrumentation/providers/openai/attributes/response.py +607 -0
  92. agentops/instrumentation/providers/openai/config.py +36 -0
  93. agentops/instrumentation/providers/openai/instrumentor.py +312 -0
  94. agentops/instrumentation/providers/openai/stream_wrapper.py +941 -0
  95. agentops/instrumentation/providers/openai/utils.py +44 -0
  96. agentops/instrumentation/providers/openai/v0.py +176 -0
  97. agentops/instrumentation/providers/openai/v0_wrappers.py +483 -0
  98. agentops/instrumentation/providers/openai/wrappers/__init__.py +30 -0
  99. agentops/instrumentation/providers/openai/wrappers/assistant.py +277 -0
  100. agentops/instrumentation/providers/openai/wrappers/chat.py +259 -0
  101. agentops/instrumentation/providers/openai/wrappers/completion.py +109 -0
  102. agentops/instrumentation/providers/openai/wrappers/embeddings.py +94 -0
  103. agentops/instrumentation/providers/openai/wrappers/image_gen.py +75 -0
  104. agentops/instrumentation/providers/openai/wrappers/responses.py +191 -0
  105. agentops/instrumentation/providers/openai/wrappers/shared.py +81 -0
  106. agentops/instrumentation/utilities/concurrent_futures/__init__.py +10 -0
  107. agentops/instrumentation/utilities/concurrent_futures/instrumentation.py +206 -0
  108. agentops/integration/callbacks/dspy/__init__.py +11 -0
  109. agentops/integration/callbacks/dspy/callback.py +471 -0
  110. agentops/integration/callbacks/langchain/README.md +59 -0
  111. agentops/integration/callbacks/langchain/__init__.py +15 -0
  112. agentops/integration/callbacks/langchain/callback.py +791 -0
  113. agentops/integration/callbacks/langchain/utils.py +54 -0
  114. agentops/legacy/crewai.md +121 -0
  115. agentops/logging/instrument_logging.py +4 -0
  116. agentops/sdk/README.md +220 -0
  117. agentops/sdk/core.py +75 -32
  118. agentops/sdk/descriptors/classproperty.py +28 -0
  119. agentops/sdk/exporters.py +152 -33
  120. agentops/semconv/README.md +125 -0
  121. agentops/semconv/span_kinds.py +0 -2
  122. agentops/validation.py +102 -63
  123. {mseep_agentops-0.4.18.dist-info → mseep_agentops-0.4.23.dist-info}/METADATA +30 -40
  124. mseep_agentops-0.4.23.dist-info/RECORD +178 -0
  125. {mseep_agentops-0.4.18.dist-info → mseep_agentops-0.4.23.dist-info}/WHEEL +1 -2
  126. mseep_agentops-0.4.18.dist-info/RECORD +0 -94
  127. mseep_agentops-0.4.18.dist-info/top_level.txt +0 -2
  128. tests/conftest.py +0 -10
  129. tests/unit/client/__init__.py +0 -1
  130. tests/unit/client/test_http_adapter.py +0 -221
  131. tests/unit/client/test_http_client.py +0 -206
  132. tests/unit/conftest.py +0 -54
  133. tests/unit/sdk/__init__.py +0 -1
  134. tests/unit/sdk/instrumentation_tester.py +0 -207
  135. tests/unit/sdk/test_attributes.py +0 -392
  136. tests/unit/sdk/test_concurrent_instrumentation.py +0 -468
  137. tests/unit/sdk/test_decorators.py +0 -763
  138. tests/unit/sdk/test_exporters.py +0 -241
  139. tests/unit/sdk/test_factory.py +0 -1188
  140. tests/unit/sdk/test_internal_span_processor.py +0 -397
  141. tests/unit/sdk/test_resource_attributes.py +0 -35
  142. tests/unit/test_config.py +0 -82
  143. tests/unit/test_context_manager.py +0 -777
  144. tests/unit/test_events.py +0 -27
  145. tests/unit/test_host_env.py +0 -54
  146. tests/unit/test_init_py.py +0 -501
  147. tests/unit/test_serialization.py +0 -433
  148. tests/unit/test_session.py +0 -676
  149. tests/unit/test_user_agent.py +0 -34
  150. tests/unit/test_validation.py +0 -405
  151. {tests → agentops/instrumentation/agentic/openai_agents/attributes}/__init__.py +0 -0
  152. /tests/unit/__init__.py → /agentops/instrumentation/providers/openai/attributes/tools.py +0 -0
  153. {mseep_agentops-0.4.18.dist-info → mseep_agentops-0.4.23.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,277 @@
1
+ """Assistant API wrapper for OpenAI instrumentation.
2
+
3
+ This module provides attribute extraction for OpenAI Assistant API endpoints.
4
+ """
5
+
6
+ import json
7
+ import logging
8
+ from typing import Any, Dict, Optional, Tuple
9
+
10
+ from agentops.instrumentation.providers.openai.utils import is_openai_v1
11
+ from agentops.instrumentation.providers.openai.wrappers.shared import (
12
+ model_as_dict,
13
+ should_send_prompts,
14
+ )
15
+ from agentops.instrumentation.providers.openai.config import Config
16
+ from agentops.instrumentation.common.attributes import AttributeMap
17
+ from agentops.semconv import SpanAttributes
18
+
19
+ logger = logging.getLogger(__name__)
20
+
21
+
22
+ def handle_assistant_attributes(
23
+ args: Optional[Tuple] = None,
24
+ kwargs: Optional[Dict] = None,
25
+ return_value: Optional[Any] = None,
26
+ ) -> AttributeMap:
27
+ """Extract attributes from assistant creation calls."""
28
+ attributes = {
29
+ SpanAttributes.LLM_SYSTEM: "OpenAI",
30
+ "gen_ai.operation.name": "assistant.create",
31
+ }
32
+
33
+ # Extract request attributes from kwargs
34
+ if kwargs:
35
+ if "model" in kwargs:
36
+ attributes[SpanAttributes.LLM_REQUEST_MODEL] = kwargs["model"]
37
+ if "name" in kwargs:
38
+ attributes["gen_ai.assistant.name"] = kwargs["name"]
39
+ if "description" in kwargs:
40
+ attributes["gen_ai.assistant.description"] = kwargs["description"]
41
+ if "instructions" in kwargs:
42
+ attributes["gen_ai.assistant.instructions"] = kwargs["instructions"]
43
+
44
+ # Tools
45
+ tools = kwargs.get("tools", [])
46
+ for i, tool in enumerate(tools):
47
+ if isinstance(tool, dict):
48
+ attributes[f"gen_ai.assistant.tools.{i}.type"] = tool.get("type")
49
+ else:
50
+ attributes[f"gen_ai.assistant.tools.{i}.type"] = str(tool)
51
+
52
+ # Extract response attributes
53
+ if return_value:
54
+ response_dict = {}
55
+ if hasattr(return_value, "__dict__"):
56
+ response_dict = model_as_dict(return_value)
57
+ elif isinstance(return_value, dict):
58
+ response_dict = return_value
59
+
60
+ if "id" in response_dict:
61
+ attributes["gen_ai.assistant.id"] = response_dict["id"]
62
+ if "model" in response_dict:
63
+ attributes[SpanAttributes.LLM_RESPONSE_MODEL] = response_dict["model"]
64
+ if "created_at" in response_dict:
65
+ attributes["gen_ai.assistant.created_at"] = response_dict["created_at"]
66
+
67
+ if Config.enrich_assistant:
68
+ if "object" in response_dict:
69
+ attributes["gen_ai.assistant.object"] = response_dict["object"]
70
+ if "file_ids" in response_dict:
71
+ attributes["gen_ai.assistant.file_ids"] = json.dumps(response_dict["file_ids"])
72
+ if "metadata" in response_dict:
73
+ attributes["gen_ai.assistant.metadata"] = json.dumps(response_dict["metadata"])
74
+
75
+ return attributes
76
+
77
+
78
+ def handle_run_attributes(
79
+ args: Optional[Tuple] = None,
80
+ kwargs: Optional[Dict] = None,
81
+ return_value: Optional[Any] = None,
82
+ ) -> AttributeMap:
83
+ """Extract attributes from run creation calls."""
84
+ attributes = {
85
+ SpanAttributes.LLM_SYSTEM: "OpenAI",
86
+ "gen_ai.operation.name": "run.create",
87
+ }
88
+
89
+ # Extract request attributes from kwargs
90
+ if kwargs:
91
+ if "thread_id" in kwargs:
92
+ attributes["gen_ai.thread.id"] = kwargs["thread_id"]
93
+ if "assistant_id" in kwargs:
94
+ attributes["gen_ai.assistant.id"] = kwargs["assistant_id"]
95
+ if "model" in kwargs:
96
+ attributes[SpanAttributes.LLM_REQUEST_MODEL] = kwargs["model"]
97
+ if "instructions" in kwargs:
98
+ attributes["gen_ai.run.instructions"] = kwargs["instructions"]
99
+
100
+ # Additional messages
101
+ additional_messages = kwargs.get("additional_messages", [])
102
+ if additional_messages and should_send_prompts():
103
+ for i, msg in enumerate(additional_messages):
104
+ prefix = f"gen_ai.run.additional_messages.{i}"
105
+ if "role" in msg:
106
+ attributes[f"{prefix}.role"] = msg["role"]
107
+ if "content" in msg:
108
+ attributes[f"{prefix}.content"] = msg["content"]
109
+
110
+ # Extract response attributes
111
+ if return_value:
112
+ response_dict = {}
113
+ if hasattr(return_value, "__dict__"):
114
+ response_dict = model_as_dict(return_value)
115
+ elif isinstance(return_value, dict):
116
+ response_dict = return_value
117
+
118
+ if "id" in response_dict:
119
+ attributes["gen_ai.run.id"] = response_dict["id"]
120
+ if "status" in response_dict:
121
+ attributes["gen_ai.run.status"] = response_dict["status"]
122
+ if "thread_id" in response_dict:
123
+ attributes["gen_ai.thread.id"] = response_dict["thread_id"]
124
+ if "assistant_id" in response_dict:
125
+ attributes["gen_ai.assistant.id"] = response_dict["assistant_id"]
126
+ if "model" in response_dict:
127
+ attributes[SpanAttributes.LLM_RESPONSE_MODEL] = response_dict["model"]
128
+
129
+ # Usage
130
+ usage = response_dict.get("usage", {})
131
+ if usage:
132
+ if is_openai_v1() and hasattr(usage, "__dict__"):
133
+ usage = usage.__dict__
134
+ if "prompt_tokens" in usage:
135
+ attributes[SpanAttributes.LLM_USAGE_PROMPT_TOKENS] = usage["prompt_tokens"]
136
+ if "completion_tokens" in usage:
137
+ attributes[SpanAttributes.LLM_USAGE_COMPLETION_TOKENS] = usage["completion_tokens"]
138
+ if "total_tokens" in usage:
139
+ attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] = usage["total_tokens"]
140
+
141
+ if Config.enrich_assistant:
142
+ if "created_at" in response_dict:
143
+ attributes["gen_ai.run.created_at"] = response_dict["created_at"]
144
+ if "started_at" in response_dict:
145
+ attributes["gen_ai.run.started_at"] = response_dict["started_at"]
146
+ if "completed_at" in response_dict:
147
+ attributes["gen_ai.run.completed_at"] = response_dict["completed_at"]
148
+ if "failed_at" in response_dict:
149
+ attributes["gen_ai.run.failed_at"] = response_dict["failed_at"]
150
+ if "metadata" in response_dict:
151
+ attributes["gen_ai.run.metadata"] = json.dumps(response_dict["metadata"])
152
+
153
+ return attributes
154
+
155
+
156
+ def handle_run_retrieve_attributes(
157
+ args: Optional[Tuple] = None,
158
+ kwargs: Optional[Dict] = None,
159
+ return_value: Optional[Any] = None,
160
+ ) -> AttributeMap:
161
+ """Extract attributes from run retrieval calls."""
162
+ attributes = {
163
+ SpanAttributes.LLM_SYSTEM: "OpenAI",
164
+ "gen_ai.operation.name": "run.retrieve",
165
+ }
166
+
167
+ # Extract run_id from args or kwargs
168
+ run_id = None
169
+ if args and len(args) > 0:
170
+ run_id = args[0]
171
+ elif kwargs:
172
+ run_id = kwargs.get("run_id")
173
+
174
+ if run_id:
175
+ attributes["gen_ai.run.id"] = run_id
176
+
177
+ # Response attributes are same as run creation
178
+ if return_value:
179
+ response_attrs = handle_run_attributes(None, None, return_value)
180
+ # Update with response attributes but keep our operation name
181
+ response_attrs.pop("gen_ai.operation.name", None)
182
+ attributes.update(response_attrs)
183
+
184
+ return attributes
185
+
186
+
187
+ def handle_run_stream_attributes(
188
+ args: Optional[Tuple] = None,
189
+ kwargs: Optional[Dict] = None,
190
+ return_value: Optional[Any] = None,
191
+ ) -> AttributeMap:
192
+ """Extract attributes from run create_and_stream calls."""
193
+ attributes = {
194
+ SpanAttributes.LLM_SYSTEM: "OpenAI",
195
+ "gen_ai.operation.name": "run.create_and_stream",
196
+ SpanAttributes.LLM_REQUEST_STREAMING: True,
197
+ }
198
+
199
+ # Request attributes are same as run creation
200
+ if kwargs:
201
+ request_attrs = handle_run_attributes(None, kwargs, None)
202
+ # Update with request attributes but keep our operation name
203
+ request_attrs.pop("gen_ai.operation.name", None)
204
+ attributes.update(request_attrs)
205
+
206
+ # For streaming, we don't have immediate response attributes
207
+
208
+ return attributes
209
+
210
+
211
+ def handle_messages_attributes(
212
+ args: Optional[Tuple] = None,
213
+ kwargs: Optional[Dict] = None,
214
+ return_value: Optional[Any] = None,
215
+ ) -> AttributeMap:
216
+ """Extract attributes from messages list calls."""
217
+ attributes = {
218
+ SpanAttributes.LLM_SYSTEM: "OpenAI",
219
+ "gen_ai.operation.name": "messages.list",
220
+ }
221
+
222
+ # Extract thread_id
223
+ thread_id = None
224
+ if args and len(args) > 0:
225
+ thread_id = args[0]
226
+ elif kwargs:
227
+ thread_id = kwargs.get("thread_id")
228
+
229
+ if thread_id:
230
+ attributes["gen_ai.thread.id"] = thread_id
231
+
232
+ # Extract response attributes
233
+ if return_value:
234
+ response_dict = {}
235
+ if hasattr(return_value, "__dict__"):
236
+ response_dict = model_as_dict(return_value)
237
+ elif isinstance(return_value, dict):
238
+ response_dict = return_value
239
+
240
+ # For list responses, note the count
241
+ data = response_dict.get("data", [])
242
+ attributes["gen_ai.messages.count"] = len(data)
243
+
244
+ if Config.enrich_assistant and should_send_prompts():
245
+ # Include details of first few messages
246
+ for i, msg in enumerate(data[:10]): # Limit to first 10
247
+ if isinstance(msg, dict):
248
+ msg_dict = msg
249
+ else:
250
+ msg_dict = model_as_dict(msg)
251
+
252
+ prefix = f"gen_ai.messages.{i}"
253
+ if "id" in msg_dict:
254
+ attributes[f"{prefix}.id"] = msg_dict["id"]
255
+ if "role" in msg_dict:
256
+ attributes[f"{prefix}.role"] = msg_dict["role"]
257
+ if "created_at" in msg_dict:
258
+ attributes[f"{prefix}.created_at"] = msg_dict["created_at"]
259
+
260
+ # Handle content
261
+ content = msg_dict.get("content", [])
262
+ if content and isinstance(content, list):
263
+ for j, content_item in enumerate(content):
264
+ try:
265
+ if isinstance(content_item, dict) and content_item.get("type") == "text":
266
+ text_obj = content_item.get("text")
267
+ if text_obj and isinstance(text_obj, dict):
268
+ text_value = text_obj.get("value", "")
269
+ attributes[f"{prefix}.content.{j}"] = text_value
270
+ elif hasattr(content_item, "text") and hasattr(content_item.text, "value"):
271
+ # Handle object-style content
272
+ attributes[f"{prefix}.content.{j}"] = content_item.text.value
273
+ except Exception:
274
+ # Continue processing other content items
275
+ continue
276
+
277
+ return attributes
@@ -0,0 +1,259 @@
1
+ """Chat completions wrapper for OpenAI instrumentation.
2
+
3
+ This module provides attribute extraction for OpenAI chat completions API,
4
+ compatible with the common wrapper pattern.
5
+ """
6
+
7
+ import json
8
+ import logging
9
+ from typing import Any, Dict, Optional, Tuple
10
+
11
+ from opentelemetry.trace import Span
12
+
13
+ from agentops.instrumentation.providers.openai.utils import is_openai_v1
14
+ from agentops.instrumentation.providers.openai.wrappers.shared import (
15
+ model_as_dict,
16
+ should_send_prompts,
17
+ )
18
+ from agentops.instrumentation.common.attributes import AttributeMap
19
+ from agentops.semconv import SpanAttributes, LLMRequestTypeValues
20
+ from agentops.semconv.tool import ToolAttributes
21
+ from agentops.semconv.span_kinds import AgentOpsSpanKindValues
22
+
23
+ from opentelemetry import context as context_api
24
+ from opentelemetry.trace import SpanKind, Status, StatusCode, get_tracer
25
+
26
+ logger = logging.getLogger(__name__)
27
+
28
+ LLM_REQUEST_TYPE = LLMRequestTypeValues.CHAT
29
+
30
+
31
+ def _create_tool_span(parent_span, tool_call_data):
32
+ """
33
+ Create a distinct span for each tool call.
34
+
35
+ Args:
36
+ parent_span: The parent LLM span
37
+ tool_call_data: The tool call data dictionary
38
+ """
39
+ # Get the tracer for this module
40
+ tracer = get_tracer(__name__)
41
+
42
+ # Create a child span for the tool call
43
+ with tracer.start_as_current_span(
44
+ name=f"tool_call.{tool_call_data['function']['name']}",
45
+ kind=SpanKind.INTERNAL,
46
+ context=context_api.set_value("current_span", parent_span),
47
+ ) as tool_span:
48
+ # Set the span kind to TOOL
49
+ tool_span.set_attribute("agentops.span.kind", AgentOpsSpanKindValues.TOOL)
50
+
51
+ # Set tool-specific attributes
52
+ tool_span.set_attribute(ToolAttributes.TOOL_NAME, tool_call_data["function"]["name"])
53
+ tool_span.set_attribute(ToolAttributes.TOOL_PARAMETERS, tool_call_data["function"]["arguments"])
54
+ tool_span.set_attribute("tool.call.id", tool_call_data["id"])
55
+ tool_span.set_attribute("tool.call.type", tool_call_data["type"])
56
+
57
+ # Set status to OK for successful tool call creation
58
+ tool_span.set_status(Status(StatusCode.OK))
59
+
60
+
61
+ def handle_chat_attributes(
62
+ args: Optional[Tuple] = None,
63
+ kwargs: Optional[Dict] = None,
64
+ return_value: Optional[Any] = None,
65
+ span: Optional[Span] = None,
66
+ ) -> AttributeMap:
67
+ """Extract attributes from chat completion calls.
68
+
69
+ This function is designed to work with the common wrapper pattern,
70
+ extracting attributes from the method arguments and return value.
71
+
72
+ Args:
73
+ args: Method arguments (not used in this implementation)
74
+ kwargs: Method keyword arguments
75
+ return_value: Method return value
76
+ span: The parent span for creating tool spans
77
+ """
78
+ attributes = {
79
+ SpanAttributes.LLM_REQUEST_TYPE: LLM_REQUEST_TYPE.value,
80
+ SpanAttributes.LLM_SYSTEM: "OpenAI",
81
+ }
82
+
83
+ # Extract request attributes from kwargs
84
+ if kwargs:
85
+ # Model
86
+ if "model" in kwargs:
87
+ attributes[SpanAttributes.LLM_REQUEST_MODEL] = kwargs["model"]
88
+
89
+ # Request parameters
90
+ if "max_tokens" in kwargs:
91
+ attributes[SpanAttributes.LLM_REQUEST_MAX_TOKENS] = kwargs["max_tokens"]
92
+ if "temperature" in kwargs:
93
+ attributes[SpanAttributes.LLM_REQUEST_TEMPERATURE] = kwargs["temperature"]
94
+ if "top_p" in kwargs:
95
+ attributes[SpanAttributes.LLM_REQUEST_TOP_P] = kwargs["top_p"]
96
+ if "frequency_penalty" in kwargs:
97
+ attributes[SpanAttributes.LLM_REQUEST_FREQUENCY_PENALTY] = kwargs["frequency_penalty"]
98
+ if "presence_penalty" in kwargs:
99
+ attributes[SpanAttributes.LLM_REQUEST_PRESENCE_PENALTY] = kwargs["presence_penalty"]
100
+ if "user" in kwargs:
101
+ attributes[SpanAttributes.LLM_USER] = kwargs["user"]
102
+
103
+ # Streaming
104
+ attributes[SpanAttributes.LLM_REQUEST_STREAMING] = kwargs.get("stream", False)
105
+
106
+ # Headers
107
+ headers = kwargs.get("extra_headers") or kwargs.get("headers")
108
+ if headers:
109
+ attributes[SpanAttributes.LLM_REQUEST_HEADERS] = str(headers)
110
+
111
+ # Messages
112
+ if should_send_prompts() and "messages" in kwargs:
113
+ messages = kwargs["messages"]
114
+ for i, msg in enumerate(messages):
115
+ prefix = f"{SpanAttributes.LLM_PROMPTS}.{i}"
116
+ if "role" in msg:
117
+ attributes[f"{prefix}.role"] = msg["role"]
118
+ if "content" in msg:
119
+ content = msg["content"]
120
+ if isinstance(content, list):
121
+ # Handle multi-modal content
122
+ content = json.dumps(content)
123
+ attributes[f"{prefix}.content"] = content
124
+ if "tool_call_id" in msg:
125
+ attributes[f"{prefix}.tool_call_id"] = msg["tool_call_id"]
126
+
127
+ # Tool calls
128
+ if "tool_calls" in msg:
129
+ tool_calls = msg["tool_calls"]
130
+ if tool_calls: # Check if tool_calls is not None
131
+ for j, tool_call in enumerate(tool_calls):
132
+ if is_openai_v1() and hasattr(tool_call, "__dict__"):
133
+ tool_call = model_as_dict(tool_call)
134
+ function = tool_call.get("function", {})
135
+ attributes[f"{prefix}.tool_calls.{j}.id"] = tool_call.get("id")
136
+ attributes[f"{prefix}.tool_calls.{j}.name"] = function.get("name")
137
+ attributes[f"{prefix}.tool_calls.{j}.arguments"] = function.get("arguments")
138
+
139
+ # Functions
140
+ if "functions" in kwargs:
141
+ functions = kwargs["functions"]
142
+ for i, function in enumerate(functions):
143
+ prefix = f"{SpanAttributes.LLM_REQUEST_FUNCTIONS}.{i}"
144
+ attributes[f"{prefix}.name"] = function.get("name")
145
+ attributes[f"{prefix}.description"] = function.get("description")
146
+ attributes[f"{prefix}.parameters"] = json.dumps(function.get("parameters"))
147
+
148
+ # Tools
149
+ if "tools" in kwargs:
150
+ tools = kwargs["tools"]
151
+ if tools: # Check if tools is not None
152
+ for i, tool in enumerate(tools):
153
+ function = tool.get("function", {})
154
+ prefix = f"{SpanAttributes.LLM_REQUEST_FUNCTIONS}.{i}"
155
+ attributes[f"{prefix}.name"] = function.get("name")
156
+ attributes[f"{prefix}.description"] = function.get("description")
157
+ attributes[f"{prefix}.parameters"] = json.dumps(function.get("parameters"))
158
+
159
+ # Extract response attributes from return value
160
+ if return_value:
161
+ # Note: For streaming responses, return_value might be a generator/stream
162
+ # In that case, we won't have the full response data here
163
+
164
+ # Convert to dict if needed
165
+ response_dict = {}
166
+ if hasattr(return_value, "__dict__") and not hasattr(return_value, "__iter__"):
167
+ response_dict = model_as_dict(return_value)
168
+ elif isinstance(return_value, dict):
169
+ response_dict = return_value
170
+ elif hasattr(return_value, "model_dump"):
171
+ # Handle Pydantic models directly
172
+ response_dict = return_value.model_dump()
173
+ elif hasattr(return_value, "__dict__"):
174
+ # Try to use model_as_dict even if it has __iter__(fallback)
175
+ response_dict = model_as_dict(return_value)
176
+
177
+ logger.debug(f"[OPENAI DEBUG] response_dict keys: {list(response_dict.keys()) if response_dict else 'empty'}")
178
+
179
+ # Basic response attributes
180
+ if "id" in response_dict:
181
+ attributes[SpanAttributes.LLM_RESPONSE_ID] = response_dict["id"]
182
+ if "model" in response_dict:
183
+ attributes[SpanAttributes.LLM_RESPONSE_MODEL] = response_dict["model"]
184
+ if "system_fingerprint" in response_dict and response_dict["system_fingerprint"] is not None:
185
+ attributes[SpanAttributes.LLM_OPENAI_RESPONSE_SYSTEM_FINGERPRINT] = response_dict["system_fingerprint"]
186
+
187
+ # Usage
188
+ usage = response_dict.get("usage", {})
189
+ if usage:
190
+ if is_openai_v1() and hasattr(usage, "__dict__"):
191
+ usage = usage.__dict__
192
+ if "total_tokens" in usage:
193
+ attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] = usage["total_tokens"]
194
+ if "prompt_tokens" in usage:
195
+ attributes[SpanAttributes.LLM_USAGE_PROMPT_TOKENS] = usage["prompt_tokens"]
196
+ if "completion_tokens" in usage:
197
+ attributes[SpanAttributes.LLM_USAGE_COMPLETION_TOKENS] = usage["completion_tokens"]
198
+
199
+ # Reasoning tokens
200
+ output_details = usage.get("output_tokens_details", {})
201
+ if isinstance(output_details, dict) and "reasoning_tokens" in output_details:
202
+ attributes[SpanAttributes.LLM_USAGE_REASONING_TOKENS] = output_details["reasoning_tokens"]
203
+
204
+ # Choices
205
+ if should_send_prompts() and "choices" in response_dict:
206
+ choices = response_dict["choices"]
207
+ for choice in choices:
208
+ index = choice.get("index", 0)
209
+ prefix = f"{SpanAttributes.LLM_COMPLETIONS}.{index}"
210
+
211
+ if "finish_reason" in choice:
212
+ attributes[f"{prefix}.finish_reason"] = choice["finish_reason"]
213
+
214
+ # Content filter
215
+ if "content_filter_results" in choice:
216
+ attributes[f"{prefix}.content_filter_results"] = json.dumps(choice["content_filter_results"])
217
+
218
+ # Message
219
+ message = choice.get("message", {})
220
+ if message:
221
+ if "role" in message:
222
+ attributes[f"{prefix}.role"] = message["role"]
223
+ if "content" in message and message["content"] is not None:
224
+ attributes[f"{prefix}.content"] = message["content"]
225
+ if "refusal" in message and message["refusal"] is not None:
226
+ attributes[f"{prefix}.refusal"] = message["refusal"]
227
+
228
+ # Function call
229
+ if "function_call" in message:
230
+ function_call = message["function_call"]
231
+ if function_call: # Check if function_call is not None
232
+ attributes[f"{prefix}.tool_calls.0.name"] = function_call.get("name")
233
+ attributes[f"{prefix}.tool_calls.0.arguments"] = function_call.get("arguments")
234
+
235
+ # Tool calls
236
+ if "tool_calls" in message:
237
+ tool_calls = message["tool_calls"]
238
+ if tool_calls and span is not None:
239
+ for i, tool_call in enumerate(tool_calls):
240
+ # Convert tool_call to the format expected by _create_tool_span
241
+ function = tool_call.get("function", {})
242
+ tool_call_data = {
243
+ "id": tool_call.get("id", ""),
244
+ "type": tool_call.get("type", "function"),
245
+ "function": {
246
+ "name": function.get("name", ""),
247
+ "arguments": function.get("arguments", ""),
248
+ },
249
+ }
250
+ # Create a child span for this tool call
251
+ _create_tool_span(span, tool_call_data)
252
+
253
+ # Prompt filter results
254
+ if "prompt_filter_results" in response_dict:
255
+ attributes[f"{SpanAttributes.LLM_PROMPTS}.prompt_filter_results"] = json.dumps(
256
+ response_dict["prompt_filter_results"]
257
+ )
258
+
259
+ return attributes
@@ -0,0 +1,109 @@
1
+ """Completion wrapper for OpenAI instrumentation.
2
+
3
+ This module provides attribute extraction for OpenAI text completions API.
4
+ """
5
+
6
+ import logging
7
+ from typing import Any, Dict, Optional, Tuple
8
+
9
+ from agentops.instrumentation.providers.openai.utils import is_openai_v1
10
+ from agentops.instrumentation.providers.openai.wrappers.shared import (
11
+ model_as_dict,
12
+ should_send_prompts,
13
+ )
14
+ from agentops.instrumentation.common.attributes import AttributeMap
15
+ from agentops.semconv import SpanAttributes, LLMRequestTypeValues
16
+
17
+ logger = logging.getLogger(__name__)
18
+
19
+ LLM_REQUEST_TYPE = LLMRequestTypeValues.COMPLETION
20
+
21
+
22
+ def handle_completion_attributes(
23
+ args: Optional[Tuple] = None,
24
+ kwargs: Optional[Dict] = None,
25
+ return_value: Optional[Any] = None,
26
+ ) -> AttributeMap:
27
+ """Extract attributes from completion calls."""
28
+ attributes = {
29
+ SpanAttributes.LLM_REQUEST_TYPE: LLM_REQUEST_TYPE.value,
30
+ SpanAttributes.LLM_SYSTEM: "OpenAI",
31
+ }
32
+
33
+ # Extract request attributes from kwargs
34
+ if kwargs:
35
+ # Model
36
+ if "model" in kwargs:
37
+ attributes[SpanAttributes.LLM_REQUEST_MODEL] = kwargs["model"]
38
+
39
+ # Request parameters
40
+ if "max_tokens" in kwargs:
41
+ attributes[SpanAttributes.LLM_REQUEST_MAX_TOKENS] = kwargs["max_tokens"]
42
+ if "temperature" in kwargs:
43
+ attributes[SpanAttributes.LLM_REQUEST_TEMPERATURE] = kwargs["temperature"]
44
+ if "top_p" in kwargs:
45
+ attributes[SpanAttributes.LLM_REQUEST_TOP_P] = kwargs["top_p"]
46
+ if "frequency_penalty" in kwargs:
47
+ attributes[SpanAttributes.LLM_REQUEST_FREQUENCY_PENALTY] = kwargs["frequency_penalty"]
48
+ if "presence_penalty" in kwargs:
49
+ attributes[SpanAttributes.LLM_REQUEST_PRESENCE_PENALTY] = kwargs["presence_penalty"]
50
+ if "user" in kwargs:
51
+ attributes[SpanAttributes.LLM_USER] = kwargs["user"]
52
+
53
+ # Streaming
54
+ attributes[SpanAttributes.LLM_REQUEST_STREAMING] = kwargs.get("stream", False)
55
+
56
+ # Headers
57
+ headers = kwargs.get("extra_headers") or kwargs.get("headers")
58
+ if headers:
59
+ attributes[SpanAttributes.LLM_REQUEST_HEADERS] = str(headers)
60
+
61
+ # Prompt
62
+ if should_send_prompts() and "prompt" in kwargs:
63
+ prompt = kwargs["prompt"]
64
+ if isinstance(prompt, list):
65
+ for i, p in enumerate(prompt):
66
+ attributes[f"{SpanAttributes.LLM_PROMPTS}.{i}.content"] = p
67
+ else:
68
+ attributes[f"{SpanAttributes.LLM_PROMPTS}.0.content"] = prompt
69
+
70
+ # Extract response attributes from return value
71
+ if return_value:
72
+ # Convert to dict if needed
73
+ response_dict = {}
74
+ if hasattr(return_value, "__dict__") and not hasattr(return_value, "__iter__"):
75
+ response_dict = model_as_dict(return_value)
76
+ elif isinstance(return_value, dict):
77
+ response_dict = return_value
78
+
79
+ # Basic response attributes
80
+ if "id" in response_dict:
81
+ attributes[SpanAttributes.LLM_RESPONSE_ID] = response_dict["id"]
82
+ if "model" in response_dict:
83
+ attributes[SpanAttributes.LLM_RESPONSE_MODEL] = response_dict["model"]
84
+
85
+ # Usage
86
+ usage = response_dict.get("usage", {})
87
+ if usage:
88
+ if is_openai_v1() and hasattr(usage, "__dict__"):
89
+ usage = usage.__dict__
90
+ if "total_tokens" in usage:
91
+ attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] = usage["total_tokens"]
92
+ if "prompt_tokens" in usage:
93
+ attributes[SpanAttributes.LLM_USAGE_PROMPT_TOKENS] = usage["prompt_tokens"]
94
+ if "completion_tokens" in usage:
95
+ attributes[SpanAttributes.LLM_USAGE_COMPLETION_TOKENS] = usage["completion_tokens"]
96
+
97
+ # Choices
98
+ if should_send_prompts() and "choices" in response_dict:
99
+ choices = response_dict["choices"]
100
+ for choice in choices:
101
+ index = choice.get("index", 0)
102
+ prefix = f"{SpanAttributes.LLM_COMPLETIONS}.{index}"
103
+
104
+ if "finish_reason" in choice:
105
+ attributes[f"{prefix}.finish_reason"] = choice["finish_reason"]
106
+ if "text" in choice:
107
+ attributes[f"{prefix}.content"] = choice["text"]
108
+
109
+ return attributes