mseep-agentops 0.4.18__py3-none-any.whl → 0.4.23__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (153) hide show
  1. agentops/__init__.py +0 -0
  2. agentops/client/api/base.py +28 -30
  3. agentops/client/api/versions/v3.py +29 -25
  4. agentops/client/api/versions/v4.py +87 -46
  5. agentops/client/client.py +98 -29
  6. agentops/client/http/README.md +87 -0
  7. agentops/client/http/http_client.py +126 -172
  8. agentops/config.py +8 -2
  9. agentops/instrumentation/OpenTelemetry.md +133 -0
  10. agentops/instrumentation/README.md +167 -0
  11. agentops/instrumentation/__init__.py +13 -1
  12. agentops/instrumentation/agentic/ag2/__init__.py +18 -0
  13. agentops/instrumentation/agentic/ag2/instrumentor.py +922 -0
  14. agentops/instrumentation/agentic/agno/__init__.py +19 -0
  15. agentops/instrumentation/agentic/agno/attributes/__init__.py +20 -0
  16. agentops/instrumentation/agentic/agno/attributes/agent.py +250 -0
  17. agentops/instrumentation/agentic/agno/attributes/metrics.py +214 -0
  18. agentops/instrumentation/agentic/agno/attributes/storage.py +158 -0
  19. agentops/instrumentation/agentic/agno/attributes/team.py +195 -0
  20. agentops/instrumentation/agentic/agno/attributes/tool.py +210 -0
  21. agentops/instrumentation/agentic/agno/attributes/workflow.py +254 -0
  22. agentops/instrumentation/agentic/agno/instrumentor.py +1313 -0
  23. agentops/instrumentation/agentic/crewai/LICENSE +201 -0
  24. agentops/instrumentation/agentic/crewai/NOTICE.md +10 -0
  25. agentops/instrumentation/agentic/crewai/__init__.py +6 -0
  26. agentops/instrumentation/agentic/crewai/crewai_span_attributes.py +335 -0
  27. agentops/instrumentation/agentic/crewai/instrumentation.py +535 -0
  28. agentops/instrumentation/agentic/crewai/version.py +1 -0
  29. agentops/instrumentation/agentic/google_adk/__init__.py +19 -0
  30. agentops/instrumentation/agentic/google_adk/instrumentor.py +68 -0
  31. agentops/instrumentation/agentic/google_adk/patch.py +767 -0
  32. agentops/instrumentation/agentic/haystack/__init__.py +1 -0
  33. agentops/instrumentation/agentic/haystack/instrumentor.py +186 -0
  34. agentops/instrumentation/agentic/langgraph/__init__.py +3 -0
  35. agentops/instrumentation/agentic/langgraph/attributes.py +54 -0
  36. agentops/instrumentation/agentic/langgraph/instrumentation.py +598 -0
  37. agentops/instrumentation/agentic/langgraph/version.py +1 -0
  38. agentops/instrumentation/agentic/openai_agents/README.md +156 -0
  39. agentops/instrumentation/agentic/openai_agents/SPANS.md +145 -0
  40. agentops/instrumentation/agentic/openai_agents/TRACING_API.md +144 -0
  41. agentops/instrumentation/agentic/openai_agents/__init__.py +30 -0
  42. agentops/instrumentation/agentic/openai_agents/attributes/common.py +549 -0
  43. agentops/instrumentation/agentic/openai_agents/attributes/completion.py +172 -0
  44. agentops/instrumentation/agentic/openai_agents/attributes/model.py +58 -0
  45. agentops/instrumentation/agentic/openai_agents/attributes/tokens.py +275 -0
  46. agentops/instrumentation/agentic/openai_agents/exporter.py +469 -0
  47. agentops/instrumentation/agentic/openai_agents/instrumentor.py +107 -0
  48. agentops/instrumentation/agentic/openai_agents/processor.py +58 -0
  49. agentops/instrumentation/agentic/smolagents/README.md +88 -0
  50. agentops/instrumentation/agentic/smolagents/__init__.py +12 -0
  51. agentops/instrumentation/agentic/smolagents/attributes/agent.py +354 -0
  52. agentops/instrumentation/agentic/smolagents/attributes/model.py +205 -0
  53. agentops/instrumentation/agentic/smolagents/instrumentor.py +286 -0
  54. agentops/instrumentation/agentic/smolagents/stream_wrapper.py +258 -0
  55. agentops/instrumentation/agentic/xpander/__init__.py +15 -0
  56. agentops/instrumentation/agentic/xpander/context.py +112 -0
  57. agentops/instrumentation/agentic/xpander/instrumentor.py +877 -0
  58. agentops/instrumentation/agentic/xpander/trace_probe.py +86 -0
  59. agentops/instrumentation/agentic/xpander/version.py +3 -0
  60. agentops/instrumentation/common/README.md +65 -0
  61. agentops/instrumentation/common/attributes.py +1 -2
  62. agentops/instrumentation/providers/anthropic/__init__.py +24 -0
  63. agentops/instrumentation/providers/anthropic/attributes/__init__.py +23 -0
  64. agentops/instrumentation/providers/anthropic/attributes/common.py +64 -0
  65. agentops/instrumentation/providers/anthropic/attributes/message.py +541 -0
  66. agentops/instrumentation/providers/anthropic/attributes/tools.py +231 -0
  67. agentops/instrumentation/providers/anthropic/event_handler_wrapper.py +90 -0
  68. agentops/instrumentation/providers/anthropic/instrumentor.py +146 -0
  69. agentops/instrumentation/providers/anthropic/stream_wrapper.py +436 -0
  70. agentops/instrumentation/providers/google_genai/README.md +33 -0
  71. agentops/instrumentation/providers/google_genai/__init__.py +24 -0
  72. agentops/instrumentation/providers/google_genai/attributes/__init__.py +25 -0
  73. agentops/instrumentation/providers/google_genai/attributes/chat.py +125 -0
  74. agentops/instrumentation/providers/google_genai/attributes/common.py +88 -0
  75. agentops/instrumentation/providers/google_genai/attributes/model.py +284 -0
  76. agentops/instrumentation/providers/google_genai/instrumentor.py +170 -0
  77. agentops/instrumentation/providers/google_genai/stream_wrapper.py +238 -0
  78. agentops/instrumentation/providers/ibm_watsonx_ai/__init__.py +28 -0
  79. agentops/instrumentation/providers/ibm_watsonx_ai/attributes/__init__.py +27 -0
  80. agentops/instrumentation/providers/ibm_watsonx_ai/attributes/attributes.py +277 -0
  81. agentops/instrumentation/providers/ibm_watsonx_ai/attributes/common.py +104 -0
  82. agentops/instrumentation/providers/ibm_watsonx_ai/instrumentor.py +162 -0
  83. agentops/instrumentation/providers/ibm_watsonx_ai/stream_wrapper.py +302 -0
  84. agentops/instrumentation/providers/mem0/__init__.py +45 -0
  85. agentops/instrumentation/providers/mem0/common.py +377 -0
  86. agentops/instrumentation/providers/mem0/instrumentor.py +270 -0
  87. agentops/instrumentation/providers/mem0/memory.py +430 -0
  88. agentops/instrumentation/providers/openai/__init__.py +21 -0
  89. agentops/instrumentation/providers/openai/attributes/__init__.py +7 -0
  90. agentops/instrumentation/providers/openai/attributes/common.py +55 -0
  91. agentops/instrumentation/providers/openai/attributes/response.py +607 -0
  92. agentops/instrumentation/providers/openai/config.py +36 -0
  93. agentops/instrumentation/providers/openai/instrumentor.py +312 -0
  94. agentops/instrumentation/providers/openai/stream_wrapper.py +941 -0
  95. agentops/instrumentation/providers/openai/utils.py +44 -0
  96. agentops/instrumentation/providers/openai/v0.py +176 -0
  97. agentops/instrumentation/providers/openai/v0_wrappers.py +483 -0
  98. agentops/instrumentation/providers/openai/wrappers/__init__.py +30 -0
  99. agentops/instrumentation/providers/openai/wrappers/assistant.py +277 -0
  100. agentops/instrumentation/providers/openai/wrappers/chat.py +259 -0
  101. agentops/instrumentation/providers/openai/wrappers/completion.py +109 -0
  102. agentops/instrumentation/providers/openai/wrappers/embeddings.py +94 -0
  103. agentops/instrumentation/providers/openai/wrappers/image_gen.py +75 -0
  104. agentops/instrumentation/providers/openai/wrappers/responses.py +191 -0
  105. agentops/instrumentation/providers/openai/wrappers/shared.py +81 -0
  106. agentops/instrumentation/utilities/concurrent_futures/__init__.py +10 -0
  107. agentops/instrumentation/utilities/concurrent_futures/instrumentation.py +206 -0
  108. agentops/integration/callbacks/dspy/__init__.py +11 -0
  109. agentops/integration/callbacks/dspy/callback.py +471 -0
  110. agentops/integration/callbacks/langchain/README.md +59 -0
  111. agentops/integration/callbacks/langchain/__init__.py +15 -0
  112. agentops/integration/callbacks/langchain/callback.py +791 -0
  113. agentops/integration/callbacks/langchain/utils.py +54 -0
  114. agentops/legacy/crewai.md +121 -0
  115. agentops/logging/instrument_logging.py +4 -0
  116. agentops/sdk/README.md +220 -0
  117. agentops/sdk/core.py +75 -32
  118. agentops/sdk/descriptors/classproperty.py +28 -0
  119. agentops/sdk/exporters.py +152 -33
  120. agentops/semconv/README.md +125 -0
  121. agentops/semconv/span_kinds.py +0 -2
  122. agentops/validation.py +102 -63
  123. {mseep_agentops-0.4.18.dist-info → mseep_agentops-0.4.23.dist-info}/METADATA +30 -40
  124. mseep_agentops-0.4.23.dist-info/RECORD +178 -0
  125. {mseep_agentops-0.4.18.dist-info → mseep_agentops-0.4.23.dist-info}/WHEEL +1 -2
  126. mseep_agentops-0.4.18.dist-info/RECORD +0 -94
  127. mseep_agentops-0.4.18.dist-info/top_level.txt +0 -2
  128. tests/conftest.py +0 -10
  129. tests/unit/client/__init__.py +0 -1
  130. tests/unit/client/test_http_adapter.py +0 -221
  131. tests/unit/client/test_http_client.py +0 -206
  132. tests/unit/conftest.py +0 -54
  133. tests/unit/sdk/__init__.py +0 -1
  134. tests/unit/sdk/instrumentation_tester.py +0 -207
  135. tests/unit/sdk/test_attributes.py +0 -392
  136. tests/unit/sdk/test_concurrent_instrumentation.py +0 -468
  137. tests/unit/sdk/test_decorators.py +0 -763
  138. tests/unit/sdk/test_exporters.py +0 -241
  139. tests/unit/sdk/test_factory.py +0 -1188
  140. tests/unit/sdk/test_internal_span_processor.py +0 -397
  141. tests/unit/sdk/test_resource_attributes.py +0 -35
  142. tests/unit/test_config.py +0 -82
  143. tests/unit/test_context_manager.py +0 -777
  144. tests/unit/test_events.py +0 -27
  145. tests/unit/test_host_env.py +0 -54
  146. tests/unit/test_init_py.py +0 -501
  147. tests/unit/test_serialization.py +0 -433
  148. tests/unit/test_session.py +0 -676
  149. tests/unit/test_user_agent.py +0 -34
  150. tests/unit/test_validation.py +0 -405
  151. {tests → agentops/instrumentation/agentic/openai_agents/attributes}/__init__.py +0 -0
  152. /tests/unit/__init__.py → /agentops/instrumentation/providers/openai/attributes/tools.py +0 -0
  153. {mseep_agentops-0.4.18.dist-info → mseep_agentops-0.4.23.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,483 @@
1
+ """Wrapper functions for OpenAI v0 API instrumentation.
2
+
3
+ This module provides wrapper functions for instrumenting OpenAI v0 API calls
4
+ (before v1.0.0). These wrappers extract attributes, create spans, and handle
5
+ metrics for the legacy API format.
6
+ """
7
+
8
+ import json
9
+ import time
10
+ from typing import Any, Dict
11
+ from opentelemetry.trace import Tracer, Status, StatusCode
12
+ from opentelemetry import context as context_api
13
+ from opentelemetry.instrumentation.utils import _SUPPRESS_INSTRUMENTATION_KEY
14
+
15
+ from agentops.instrumentation.providers.openai.utils import is_metrics_enabled
16
+ from agentops.instrumentation.providers.openai.wrappers.shared import should_send_prompts
17
+ from agentops.semconv import SpanAttributes
18
+
19
+
20
+ def _extract_chat_messages(kwargs: Dict[str, Any]) -> list:
21
+ """Extract messages from chat completion kwargs."""
22
+ messages = kwargs.get("messages", [])
23
+ if should_send_prompts():
24
+ return messages
25
+ return []
26
+
27
+
28
+ def _extract_chat_attributes(kwargs: Dict[str, Any], response: Any = None) -> Dict[str, Any]:
29
+ """Extract attributes from chat completion calls."""
30
+ attributes = {
31
+ SpanAttributes.LLM_SYSTEM: "OpenAI",
32
+ SpanAttributes.LLM_REQUEST_TYPE: "chat",
33
+ }
34
+
35
+ # Request attributes
36
+ if "model" in kwargs:
37
+ attributes[SpanAttributes.LLM_REQUEST_MODEL] = kwargs["model"]
38
+ if "temperature" in kwargs:
39
+ attributes[SpanAttributes.LLM_REQUEST_TEMPERATURE] = kwargs["temperature"]
40
+ if "max_tokens" in kwargs:
41
+ attributes[SpanAttributes.LLM_REQUEST_MAX_TOKENS] = kwargs["max_tokens"]
42
+ if "n" in kwargs:
43
+ attributes[SpanAttributes.LLM_REQUEST_MAX_NEW_TOKENS] = kwargs["n"]
44
+
45
+ # Messages
46
+ messages = _extract_chat_messages(kwargs)
47
+ if messages:
48
+ attributes[SpanAttributes.LLM_PROMPTS] = json.dumps(messages)
49
+
50
+ # Response attributes
51
+ if response:
52
+ if hasattr(response, "model"):
53
+ attributes[SpanAttributes.LLM_RESPONSE_MODEL] = response.model
54
+ if hasattr(response, "choices") and response.choices:
55
+ choice = response.choices[0]
56
+ if hasattr(choice, "message") and choice.message:
57
+ if should_send_prompts():
58
+ attributes[SpanAttributes.LLM_COMPLETIONS] = json.dumps(
59
+ [
60
+ {
61
+ "role": choice.message.get("role", "assistant"),
62
+ "content": choice.message.get("content", ""),
63
+ }
64
+ ]
65
+ )
66
+
67
+ # Usage
68
+ if hasattr(response, "usage") and response.usage:
69
+ usage = response.usage
70
+ if hasattr(usage, "prompt_tokens"):
71
+ attributes[SpanAttributes.LLM_USAGE_PROMPT_TOKENS] = usage.prompt_tokens
72
+ if hasattr(usage, "completion_tokens"):
73
+ attributes[SpanAttributes.LLM_USAGE_COMPLETION_TOKENS] = usage.completion_tokens
74
+ if hasattr(usage, "total_tokens"):
75
+ attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] = usage.total_tokens
76
+
77
+ return attributes
78
+
79
+
80
+ def chat_wrapper(
81
+ tracer: Tracer,
82
+ tokens_histogram=None,
83
+ chat_choice_counter=None,
84
+ duration_histogram=None,
85
+ chat_exception_counter=None,
86
+ streaming_time_to_first_token=None,
87
+ streaming_time_to_generate=None,
88
+ ):
89
+ """Create a wrapper for ChatCompletion.create."""
90
+
91
+ def wrapper(wrapped, instance, args, kwargs):
92
+ if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY):
93
+ return wrapped(*args, **kwargs)
94
+
95
+ start_time = time.time()
96
+ span_name = "openai.ChatCompletion.create"
97
+
98
+ with tracer.start_as_current_span(span_name) as span:
99
+ try:
100
+ # Add request attributes
101
+ attributes = _extract_chat_attributes(kwargs)
102
+ for key, value in attributes.items():
103
+ span.set_attribute(key, value)
104
+
105
+ # Call the wrapped function
106
+ response = wrapped(*args, **kwargs)
107
+
108
+ # Add response attributes
109
+ response_attributes = _extract_chat_attributes(kwargs, response)
110
+ for key, value in response_attributes.items():
111
+ span.set_attribute(key, value)
112
+
113
+ # Handle metrics
114
+ if is_metrics_enabled():
115
+ duration = time.time() - start_time
116
+ if duration_histogram:
117
+ duration_histogram.record(duration, attributes)
118
+
119
+ if hasattr(response, "usage") and response.usage:
120
+ if tokens_histogram:
121
+ if hasattr(response.usage, "prompt_tokens"):
122
+ tokens_histogram.record(
123
+ response.usage.prompt_tokens, attributes={**attributes, "token.type": "input"}
124
+ )
125
+ if hasattr(response.usage, "completion_tokens"):
126
+ tokens_histogram.record(
127
+ response.usage.completion_tokens, attributes={**attributes, "token.type": "output"}
128
+ )
129
+
130
+ if chat_choice_counter and hasattr(response, "choices"):
131
+ chat_choice_counter.add(len(response.choices), attributes)
132
+
133
+ span.set_status(Status(StatusCode.OK))
134
+ return response
135
+
136
+ except Exception as e:
137
+ if chat_exception_counter and is_metrics_enabled():
138
+ chat_exception_counter.add(1, attributes)
139
+ span.record_exception(e)
140
+ span.set_status(Status(StatusCode.ERROR, str(e)))
141
+ raise
142
+
143
+ return wrapper
144
+
145
+
146
+ def achat_wrapper(
147
+ tracer: Tracer,
148
+ tokens_histogram=None,
149
+ chat_choice_counter=None,
150
+ duration_histogram=None,
151
+ chat_exception_counter=None,
152
+ streaming_time_to_first_token=None,
153
+ streaming_time_to_generate=None,
154
+ ):
155
+ """Create a wrapper for ChatCompletion.acreate."""
156
+
157
+ async def wrapper(wrapped, instance, args, kwargs):
158
+ if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY):
159
+ return await wrapped(*args, **kwargs)
160
+
161
+ start_time = time.time()
162
+ span_name = "openai.ChatCompletion.acreate"
163
+
164
+ with tracer.start_as_current_span(span_name) as span:
165
+ try:
166
+ # Add request attributes
167
+ attributes = _extract_chat_attributes(kwargs)
168
+ for key, value in attributes.items():
169
+ span.set_attribute(key, value)
170
+
171
+ # Call the wrapped function
172
+ response = await wrapped(*args, **kwargs)
173
+
174
+ # Add response attributes
175
+ response_attributes = _extract_chat_attributes(kwargs, response)
176
+ for key, value in response_attributes.items():
177
+ span.set_attribute(key, value)
178
+
179
+ # Handle metrics (same as sync version)
180
+ if is_metrics_enabled():
181
+ duration = time.time() - start_time
182
+ if duration_histogram:
183
+ duration_histogram.record(duration, attributes)
184
+
185
+ if hasattr(response, "usage") and response.usage:
186
+ if tokens_histogram:
187
+ if hasattr(response.usage, "prompt_tokens"):
188
+ tokens_histogram.record(
189
+ response.usage.prompt_tokens, attributes={**attributes, "token.type": "input"}
190
+ )
191
+ if hasattr(response.usage, "completion_tokens"):
192
+ tokens_histogram.record(
193
+ response.usage.completion_tokens, attributes={**attributes, "token.type": "output"}
194
+ )
195
+
196
+ if chat_choice_counter and hasattr(response, "choices"):
197
+ chat_choice_counter.add(len(response.choices), attributes)
198
+
199
+ span.set_status(Status(StatusCode.OK))
200
+ return response
201
+
202
+ except Exception as e:
203
+ if chat_exception_counter and is_metrics_enabled():
204
+ chat_exception_counter.add(1, attributes)
205
+ span.record_exception(e)
206
+ span.set_status(Status(StatusCode.ERROR, str(e)))
207
+ raise
208
+
209
+ return wrapper
210
+
211
+
212
+ def _extract_completion_attributes(kwargs: Dict[str, Any], response: Any = None) -> Dict[str, Any]:
213
+ """Extract attributes from completion calls."""
214
+ attributes = {
215
+ SpanAttributes.LLM_SYSTEM: "OpenAI",
216
+ SpanAttributes.LLM_REQUEST_TYPE: "completion",
217
+ }
218
+
219
+ # Request attributes
220
+ if "model" in kwargs:
221
+ attributes[SpanAttributes.LLM_REQUEST_MODEL] = kwargs["model"]
222
+ if "temperature" in kwargs:
223
+ attributes[SpanAttributes.LLM_REQUEST_TEMPERATURE] = kwargs["temperature"]
224
+ if "max_tokens" in kwargs:
225
+ attributes[SpanAttributes.LLM_REQUEST_MAX_TOKENS] = kwargs["max_tokens"]
226
+
227
+ # Prompt
228
+ if "prompt" in kwargs and should_send_prompts():
229
+ attributes[SpanAttributes.LLM_PROMPTS] = json.dumps([kwargs["prompt"]])
230
+
231
+ # Response attributes
232
+ if response:
233
+ if hasattr(response, "model"):
234
+ attributes[SpanAttributes.LLM_RESPONSE_MODEL] = response.model
235
+ if hasattr(response, "choices") and response.choices:
236
+ choice = response.choices[0]
237
+ if hasattr(choice, "text") and should_send_prompts():
238
+ attributes[SpanAttributes.LLM_COMPLETIONS] = json.dumps([choice.text])
239
+
240
+ # Usage
241
+ if hasattr(response, "usage") and response.usage:
242
+ usage = response.usage
243
+ if hasattr(usage, "prompt_tokens"):
244
+ attributes[SpanAttributes.LLM_USAGE_PROMPT_TOKENS] = usage.prompt_tokens
245
+ if hasattr(usage, "completion_tokens"):
246
+ attributes[SpanAttributes.LLM_USAGE_COMPLETION_TOKENS] = usage.completion_tokens
247
+ if hasattr(usage, "total_tokens"):
248
+ attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] = usage.total_tokens
249
+
250
+ return attributes
251
+
252
+
253
+ def completion_wrapper(tracer: Tracer):
254
+ """Create a wrapper for Completion.create."""
255
+
256
+ def wrapper(wrapped, instance, args, kwargs):
257
+ if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY):
258
+ return wrapped(*args, **kwargs)
259
+
260
+ span_name = "openai.Completion.create"
261
+
262
+ with tracer.start_as_current_span(span_name) as span:
263
+ try:
264
+ # Add request attributes
265
+ attributes = _extract_completion_attributes(kwargs)
266
+ for key, value in attributes.items():
267
+ span.set_attribute(key, value)
268
+
269
+ # Call the wrapped function
270
+ response = wrapped(*args, **kwargs)
271
+
272
+ # Add response attributes
273
+ response_attributes = _extract_completion_attributes(kwargs, response)
274
+ for key, value in response_attributes.items():
275
+ span.set_attribute(key, value)
276
+
277
+ span.set_status(Status(StatusCode.OK))
278
+ return response
279
+
280
+ except Exception as e:
281
+ span.record_exception(e)
282
+ span.set_status(Status(StatusCode.ERROR, str(e)))
283
+ raise
284
+
285
+ return wrapper
286
+
287
+
288
+ def acompletion_wrapper(tracer: Tracer):
289
+ """Create a wrapper for Completion.acreate."""
290
+
291
+ async def wrapper(wrapped, instance, args, kwargs):
292
+ if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY):
293
+ return await wrapped(*args, **kwargs)
294
+
295
+ span_name = "openai.Completion.acreate"
296
+
297
+ with tracer.start_as_current_span(span_name) as span:
298
+ try:
299
+ # Add request attributes
300
+ attributes = _extract_completion_attributes(kwargs)
301
+ for key, value in attributes.items():
302
+ span.set_attribute(key, value)
303
+
304
+ # Call the wrapped function
305
+ response = await wrapped(*args, **kwargs)
306
+
307
+ # Add response attributes
308
+ response_attributes = _extract_completion_attributes(kwargs, response)
309
+ for key, value in response_attributes.items():
310
+ span.set_attribute(key, value)
311
+
312
+ span.set_status(Status(StatusCode.OK))
313
+ return response
314
+
315
+ except Exception as e:
316
+ span.record_exception(e)
317
+ span.set_status(Status(StatusCode.ERROR, str(e)))
318
+ raise
319
+
320
+ return wrapper
321
+
322
+
323
+ def _extract_embeddings_attributes(kwargs: Dict[str, Any], response: Any = None) -> Dict[str, Any]:
324
+ """Extract attributes from embeddings calls."""
325
+ attributes = {
326
+ SpanAttributes.LLM_SYSTEM: "OpenAI",
327
+ SpanAttributes.LLM_REQUEST_TYPE: "embedding",
328
+ }
329
+
330
+ # Request attributes
331
+ if "model" in kwargs:
332
+ attributes[SpanAttributes.LLM_REQUEST_MODEL] = kwargs["model"]
333
+
334
+ # Input
335
+ if "input" in kwargs and should_send_prompts():
336
+ input_data = kwargs["input"]
337
+ if isinstance(input_data, list):
338
+ attributes[SpanAttributes.LLM_PROMPTS] = json.dumps(input_data)
339
+ else:
340
+ attributes[SpanAttributes.LLM_PROMPTS] = json.dumps([input_data])
341
+
342
+ # Response attributes
343
+ if response:
344
+ if hasattr(response, "model"):
345
+ attributes[SpanAttributes.LLM_RESPONSE_MODEL] = response.model
346
+ if hasattr(response, "data") and response.data:
347
+ attributes["llm.embeddings.count"] = len(response.data)
348
+ if response.data and hasattr(response.data[0], "embedding"):
349
+ attributes["llm.embeddings.vector_size"] = len(response.data[0].embedding)
350
+
351
+ # Usage
352
+ if hasattr(response, "usage") and response.usage:
353
+ usage = response.usage
354
+ if hasattr(usage, "prompt_tokens"):
355
+ attributes[SpanAttributes.LLM_USAGE_PROMPT_TOKENS] = usage.prompt_tokens
356
+ if hasattr(usage, "total_tokens"):
357
+ attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] = usage.total_tokens
358
+
359
+ return attributes
360
+
361
+
362
+ def embeddings_wrapper(
363
+ tracer: Tracer,
364
+ tokens_histogram=None,
365
+ embeddings_vector_size_counter=None,
366
+ duration_histogram=None,
367
+ embeddings_exception_counter=None,
368
+ ):
369
+ """Create a wrapper for Embedding.create."""
370
+
371
+ def wrapper(wrapped, instance, args, kwargs):
372
+ if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY):
373
+ return wrapped(*args, **kwargs)
374
+
375
+ start_time = time.time()
376
+ span_name = "openai.Embedding.create"
377
+
378
+ with tracer.start_as_current_span(span_name) as span:
379
+ try:
380
+ # Add request attributes
381
+ attributes = _extract_embeddings_attributes(kwargs)
382
+ for key, value in attributes.items():
383
+ span.set_attribute(key, value)
384
+
385
+ # Call the wrapped function
386
+ response = wrapped(*args, **kwargs)
387
+
388
+ # Add response attributes
389
+ response_attributes = _extract_embeddings_attributes(kwargs, response)
390
+ for key, value in response_attributes.items():
391
+ span.set_attribute(key, value)
392
+
393
+ # Handle metrics
394
+ if is_metrics_enabled():
395
+ duration = time.time() - start_time
396
+ if duration_histogram:
397
+ duration_histogram.record(duration, attributes)
398
+
399
+ if embeddings_vector_size_counter and hasattr(response, "data") and response.data:
400
+ if response.data and hasattr(response.data[0], "embedding"):
401
+ embeddings_vector_size_counter.add(
402
+ len(response.data[0].embedding) * len(response.data), attributes
403
+ )
404
+
405
+ if tokens_histogram and hasattr(response, "usage") and response.usage:
406
+ if hasattr(response.usage, "prompt_tokens"):
407
+ tokens_histogram.record(
408
+ response.usage.prompt_tokens, attributes={**attributes, "token.type": "input"}
409
+ )
410
+
411
+ span.set_status(Status(StatusCode.OK))
412
+ return response
413
+
414
+ except Exception as e:
415
+ if embeddings_exception_counter and is_metrics_enabled():
416
+ embeddings_exception_counter.add(1, attributes)
417
+ span.record_exception(e)
418
+ span.set_status(Status(StatusCode.ERROR, str(e)))
419
+ raise
420
+
421
+ return wrapper
422
+
423
+
424
+ def aembeddings_wrapper(
425
+ tracer: Tracer,
426
+ tokens_histogram=None,
427
+ embeddings_vector_size_counter=None,
428
+ duration_histogram=None,
429
+ embeddings_exception_counter=None,
430
+ ):
431
+ """Create a wrapper for Embedding.acreate."""
432
+
433
+ async def wrapper(wrapped, instance, args, kwargs):
434
+ if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY):
435
+ return await wrapped(*args, **kwargs)
436
+
437
+ start_time = time.time()
438
+ span_name = "openai.Embedding.acreate"
439
+
440
+ with tracer.start_as_current_span(span_name) as span:
441
+ try:
442
+ # Add request attributes
443
+ attributes = _extract_embeddings_attributes(kwargs)
444
+ for key, value in attributes.items():
445
+ span.set_attribute(key, value)
446
+
447
+ # Call the wrapped function
448
+ response = await wrapped(*args, **kwargs)
449
+
450
+ # Add response attributes
451
+ response_attributes = _extract_embeddings_attributes(kwargs, response)
452
+ for key, value in response_attributes.items():
453
+ span.set_attribute(key, value)
454
+
455
+ # Handle metrics (same as sync version)
456
+ if is_metrics_enabled():
457
+ duration = time.time() - start_time
458
+ if duration_histogram:
459
+ duration_histogram.record(duration, attributes)
460
+
461
+ if embeddings_vector_size_counter and hasattr(response, "data") and response.data:
462
+ if response.data and hasattr(response.data[0], "embedding"):
463
+ embeddings_vector_size_counter.add(
464
+ len(response.data[0].embedding) * len(response.data), attributes
465
+ )
466
+
467
+ if tokens_histogram and hasattr(response, "usage") and response.usage:
468
+ if hasattr(response.usage, "prompt_tokens"):
469
+ tokens_histogram.record(
470
+ response.usage.prompt_tokens, attributes={**attributes, "token.type": "input"}
471
+ )
472
+
473
+ span.set_status(Status(StatusCode.OK))
474
+ return response
475
+
476
+ except Exception as e:
477
+ if embeddings_exception_counter and is_metrics_enabled():
478
+ embeddings_exception_counter.add(1, attributes)
479
+ span.record_exception(e)
480
+ span.set_status(Status(StatusCode.ERROR, str(e)))
481
+ raise
482
+
483
+ return wrapper
@@ -0,0 +1,30 @@
1
+ """OpenAI instrumentation wrappers.
2
+
3
+ This package contains wrapper implementations for different OpenAI API endpoints.
4
+ """
5
+
6
+ from agentops.instrumentation.providers.openai.wrappers.chat import handle_chat_attributes
7
+ from agentops.instrumentation.providers.openai.wrappers.completion import handle_completion_attributes
8
+ from agentops.instrumentation.providers.openai.wrappers.embeddings import handle_embeddings_attributes
9
+ from agentops.instrumentation.providers.openai.wrappers.image_gen import handle_image_gen_attributes
10
+ from agentops.instrumentation.providers.openai.wrappers.assistant import (
11
+ handle_assistant_attributes,
12
+ handle_run_attributes,
13
+ handle_run_retrieve_attributes,
14
+ handle_run_stream_attributes,
15
+ handle_messages_attributes,
16
+ )
17
+ from agentops.instrumentation.providers.openai.wrappers.responses import handle_responses_attributes
18
+
19
+ __all__ = [
20
+ "handle_chat_attributes",
21
+ "handle_completion_attributes",
22
+ "handle_embeddings_attributes",
23
+ "handle_image_gen_attributes",
24
+ "handle_assistant_attributes",
25
+ "handle_run_attributes",
26
+ "handle_run_retrieve_attributes",
27
+ "handle_run_stream_attributes",
28
+ "handle_messages_attributes",
29
+ "handle_responses_attributes",
30
+ ]