mseep-agentops 0.4.18__py3-none-any.whl → 0.4.23__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (153) hide show
  1. agentops/__init__.py +0 -0
  2. agentops/client/api/base.py +28 -30
  3. agentops/client/api/versions/v3.py +29 -25
  4. agentops/client/api/versions/v4.py +87 -46
  5. agentops/client/client.py +98 -29
  6. agentops/client/http/README.md +87 -0
  7. agentops/client/http/http_client.py +126 -172
  8. agentops/config.py +8 -2
  9. agentops/instrumentation/OpenTelemetry.md +133 -0
  10. agentops/instrumentation/README.md +167 -0
  11. agentops/instrumentation/__init__.py +13 -1
  12. agentops/instrumentation/agentic/ag2/__init__.py +18 -0
  13. agentops/instrumentation/agentic/ag2/instrumentor.py +922 -0
  14. agentops/instrumentation/agentic/agno/__init__.py +19 -0
  15. agentops/instrumentation/agentic/agno/attributes/__init__.py +20 -0
  16. agentops/instrumentation/agentic/agno/attributes/agent.py +250 -0
  17. agentops/instrumentation/agentic/agno/attributes/metrics.py +214 -0
  18. agentops/instrumentation/agentic/agno/attributes/storage.py +158 -0
  19. agentops/instrumentation/agentic/agno/attributes/team.py +195 -0
  20. agentops/instrumentation/agentic/agno/attributes/tool.py +210 -0
  21. agentops/instrumentation/agentic/agno/attributes/workflow.py +254 -0
  22. agentops/instrumentation/agentic/agno/instrumentor.py +1313 -0
  23. agentops/instrumentation/agentic/crewai/LICENSE +201 -0
  24. agentops/instrumentation/agentic/crewai/NOTICE.md +10 -0
  25. agentops/instrumentation/agentic/crewai/__init__.py +6 -0
  26. agentops/instrumentation/agentic/crewai/crewai_span_attributes.py +335 -0
  27. agentops/instrumentation/agentic/crewai/instrumentation.py +535 -0
  28. agentops/instrumentation/agentic/crewai/version.py +1 -0
  29. agentops/instrumentation/agentic/google_adk/__init__.py +19 -0
  30. agentops/instrumentation/agentic/google_adk/instrumentor.py +68 -0
  31. agentops/instrumentation/agentic/google_adk/patch.py +767 -0
  32. agentops/instrumentation/agentic/haystack/__init__.py +1 -0
  33. agentops/instrumentation/agentic/haystack/instrumentor.py +186 -0
  34. agentops/instrumentation/agentic/langgraph/__init__.py +3 -0
  35. agentops/instrumentation/agentic/langgraph/attributes.py +54 -0
  36. agentops/instrumentation/agentic/langgraph/instrumentation.py +598 -0
  37. agentops/instrumentation/agentic/langgraph/version.py +1 -0
  38. agentops/instrumentation/agentic/openai_agents/README.md +156 -0
  39. agentops/instrumentation/agentic/openai_agents/SPANS.md +145 -0
  40. agentops/instrumentation/agentic/openai_agents/TRACING_API.md +144 -0
  41. agentops/instrumentation/agentic/openai_agents/__init__.py +30 -0
  42. agentops/instrumentation/agentic/openai_agents/attributes/common.py +549 -0
  43. agentops/instrumentation/agentic/openai_agents/attributes/completion.py +172 -0
  44. agentops/instrumentation/agentic/openai_agents/attributes/model.py +58 -0
  45. agentops/instrumentation/agentic/openai_agents/attributes/tokens.py +275 -0
  46. agentops/instrumentation/agentic/openai_agents/exporter.py +469 -0
  47. agentops/instrumentation/agentic/openai_agents/instrumentor.py +107 -0
  48. agentops/instrumentation/agentic/openai_agents/processor.py +58 -0
  49. agentops/instrumentation/agentic/smolagents/README.md +88 -0
  50. agentops/instrumentation/agentic/smolagents/__init__.py +12 -0
  51. agentops/instrumentation/agentic/smolagents/attributes/agent.py +354 -0
  52. agentops/instrumentation/agentic/smolagents/attributes/model.py +205 -0
  53. agentops/instrumentation/agentic/smolagents/instrumentor.py +286 -0
  54. agentops/instrumentation/agentic/smolagents/stream_wrapper.py +258 -0
  55. agentops/instrumentation/agentic/xpander/__init__.py +15 -0
  56. agentops/instrumentation/agentic/xpander/context.py +112 -0
  57. agentops/instrumentation/agentic/xpander/instrumentor.py +877 -0
  58. agentops/instrumentation/agentic/xpander/trace_probe.py +86 -0
  59. agentops/instrumentation/agentic/xpander/version.py +3 -0
  60. agentops/instrumentation/common/README.md +65 -0
  61. agentops/instrumentation/common/attributes.py +1 -2
  62. agentops/instrumentation/providers/anthropic/__init__.py +24 -0
  63. agentops/instrumentation/providers/anthropic/attributes/__init__.py +23 -0
  64. agentops/instrumentation/providers/anthropic/attributes/common.py +64 -0
  65. agentops/instrumentation/providers/anthropic/attributes/message.py +541 -0
  66. agentops/instrumentation/providers/anthropic/attributes/tools.py +231 -0
  67. agentops/instrumentation/providers/anthropic/event_handler_wrapper.py +90 -0
  68. agentops/instrumentation/providers/anthropic/instrumentor.py +146 -0
  69. agentops/instrumentation/providers/anthropic/stream_wrapper.py +436 -0
  70. agentops/instrumentation/providers/google_genai/README.md +33 -0
  71. agentops/instrumentation/providers/google_genai/__init__.py +24 -0
  72. agentops/instrumentation/providers/google_genai/attributes/__init__.py +25 -0
  73. agentops/instrumentation/providers/google_genai/attributes/chat.py +125 -0
  74. agentops/instrumentation/providers/google_genai/attributes/common.py +88 -0
  75. agentops/instrumentation/providers/google_genai/attributes/model.py +284 -0
  76. agentops/instrumentation/providers/google_genai/instrumentor.py +170 -0
  77. agentops/instrumentation/providers/google_genai/stream_wrapper.py +238 -0
  78. agentops/instrumentation/providers/ibm_watsonx_ai/__init__.py +28 -0
  79. agentops/instrumentation/providers/ibm_watsonx_ai/attributes/__init__.py +27 -0
  80. agentops/instrumentation/providers/ibm_watsonx_ai/attributes/attributes.py +277 -0
  81. agentops/instrumentation/providers/ibm_watsonx_ai/attributes/common.py +104 -0
  82. agentops/instrumentation/providers/ibm_watsonx_ai/instrumentor.py +162 -0
  83. agentops/instrumentation/providers/ibm_watsonx_ai/stream_wrapper.py +302 -0
  84. agentops/instrumentation/providers/mem0/__init__.py +45 -0
  85. agentops/instrumentation/providers/mem0/common.py +377 -0
  86. agentops/instrumentation/providers/mem0/instrumentor.py +270 -0
  87. agentops/instrumentation/providers/mem0/memory.py +430 -0
  88. agentops/instrumentation/providers/openai/__init__.py +21 -0
  89. agentops/instrumentation/providers/openai/attributes/__init__.py +7 -0
  90. agentops/instrumentation/providers/openai/attributes/common.py +55 -0
  91. agentops/instrumentation/providers/openai/attributes/response.py +607 -0
  92. agentops/instrumentation/providers/openai/config.py +36 -0
  93. agentops/instrumentation/providers/openai/instrumentor.py +312 -0
  94. agentops/instrumentation/providers/openai/stream_wrapper.py +941 -0
  95. agentops/instrumentation/providers/openai/utils.py +44 -0
  96. agentops/instrumentation/providers/openai/v0.py +176 -0
  97. agentops/instrumentation/providers/openai/v0_wrappers.py +483 -0
  98. agentops/instrumentation/providers/openai/wrappers/__init__.py +30 -0
  99. agentops/instrumentation/providers/openai/wrappers/assistant.py +277 -0
  100. agentops/instrumentation/providers/openai/wrappers/chat.py +259 -0
  101. agentops/instrumentation/providers/openai/wrappers/completion.py +109 -0
  102. agentops/instrumentation/providers/openai/wrappers/embeddings.py +94 -0
  103. agentops/instrumentation/providers/openai/wrappers/image_gen.py +75 -0
  104. agentops/instrumentation/providers/openai/wrappers/responses.py +191 -0
  105. agentops/instrumentation/providers/openai/wrappers/shared.py +81 -0
  106. agentops/instrumentation/utilities/concurrent_futures/__init__.py +10 -0
  107. agentops/instrumentation/utilities/concurrent_futures/instrumentation.py +206 -0
  108. agentops/integration/callbacks/dspy/__init__.py +11 -0
  109. agentops/integration/callbacks/dspy/callback.py +471 -0
  110. agentops/integration/callbacks/langchain/README.md +59 -0
  111. agentops/integration/callbacks/langchain/__init__.py +15 -0
  112. agentops/integration/callbacks/langchain/callback.py +791 -0
  113. agentops/integration/callbacks/langchain/utils.py +54 -0
  114. agentops/legacy/crewai.md +121 -0
  115. agentops/logging/instrument_logging.py +4 -0
  116. agentops/sdk/README.md +220 -0
  117. agentops/sdk/core.py +75 -32
  118. agentops/sdk/descriptors/classproperty.py +28 -0
  119. agentops/sdk/exporters.py +152 -33
  120. agentops/semconv/README.md +125 -0
  121. agentops/semconv/span_kinds.py +0 -2
  122. agentops/validation.py +102 -63
  123. {mseep_agentops-0.4.18.dist-info → mseep_agentops-0.4.23.dist-info}/METADATA +30 -40
  124. mseep_agentops-0.4.23.dist-info/RECORD +178 -0
  125. {mseep_agentops-0.4.18.dist-info → mseep_agentops-0.4.23.dist-info}/WHEEL +1 -2
  126. mseep_agentops-0.4.18.dist-info/RECORD +0 -94
  127. mseep_agentops-0.4.18.dist-info/top_level.txt +0 -2
  128. tests/conftest.py +0 -10
  129. tests/unit/client/__init__.py +0 -1
  130. tests/unit/client/test_http_adapter.py +0 -221
  131. tests/unit/client/test_http_client.py +0 -206
  132. tests/unit/conftest.py +0 -54
  133. tests/unit/sdk/__init__.py +0 -1
  134. tests/unit/sdk/instrumentation_tester.py +0 -207
  135. tests/unit/sdk/test_attributes.py +0 -392
  136. tests/unit/sdk/test_concurrent_instrumentation.py +0 -468
  137. tests/unit/sdk/test_decorators.py +0 -763
  138. tests/unit/sdk/test_exporters.py +0 -241
  139. tests/unit/sdk/test_factory.py +0 -1188
  140. tests/unit/sdk/test_internal_span_processor.py +0 -397
  141. tests/unit/sdk/test_resource_attributes.py +0 -35
  142. tests/unit/test_config.py +0 -82
  143. tests/unit/test_context_manager.py +0 -777
  144. tests/unit/test_events.py +0 -27
  145. tests/unit/test_host_env.py +0 -54
  146. tests/unit/test_init_py.py +0 -501
  147. tests/unit/test_serialization.py +0 -433
  148. tests/unit/test_session.py +0 -676
  149. tests/unit/test_user_agent.py +0 -34
  150. tests/unit/test_validation.py +0 -405
  151. {tests → agentops/instrumentation/agentic/openai_agents/attributes}/__init__.py +0 -0
  152. /tests/unit/__init__.py → /agentops/instrumentation/providers/openai/attributes/tools.py +0 -0
  153. {mseep_agentops-0.4.18.dist-info → mseep_agentops-0.4.23.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,767 @@
1
+ """Patch functions for Google ADK instrumentation.
2
+
3
+ This module patches key methods in Google ADK to:
4
+ 1. Prevent ADK from creating its own spans
5
+ 2. Create AgentOps spans that mirror ADK's telemetry
6
+ 3. Extract and set proper attributes on spans
7
+ """
8
+
9
+ import json
10
+ import wrapt
11
+ from typing import Any
12
+ from opentelemetry import trace as opentelemetry_api_trace
13
+ from opentelemetry.trace import SpanKind as SpanKind
14
+
15
+ from agentops.logging import logger
16
+ from agentops.semconv import SpanAttributes, ToolAttributes, MessageAttributes, AgentAttributes
17
+
18
+
19
+ _wrapped_methods = []
20
+
21
+
22
+ class NoOpSpan:
23
+ """A no-op span that does nothing."""
24
+
25
+ def __init__(self, *args, **kwargs):
26
+ pass
27
+
28
+ def __enter__(self):
29
+ return self
30
+
31
+ def __exit__(self, *args):
32
+ pass
33
+
34
+ def set_attribute(self, *args, **kwargs):
35
+ pass
36
+
37
+ def set_attributes(self, *args, **kwargs):
38
+ pass
39
+
40
+ def add_event(self, *args, **kwargs):
41
+ pass
42
+
43
+ def set_status(self, *args, **kwargs):
44
+ pass
45
+
46
+ def update_name(self, *args, **kwargs):
47
+ pass
48
+
49
+ def is_recording(self):
50
+ return False
51
+
52
+ def end(self, *args, **kwargs):
53
+ pass
54
+
55
+ def record_exception(self, *args, **kwargs):
56
+ pass
57
+
58
+
59
+ class NoOpTracer:
60
+ """A tracer that creates no-op spans to prevent ADK from creating real spans."""
61
+
62
+ def start_as_current_span(self, *args, **kwargs):
63
+ """Return a no-op context manager."""
64
+ return NoOpSpan()
65
+
66
+ def start_span(self, *args, **kwargs):
67
+ """Return a no-op span."""
68
+ return NoOpSpan()
69
+
70
+ def use_span(self, *args, **kwargs):
71
+ """Return a no-op context manager."""
72
+ return NoOpSpan()
73
+
74
+
75
+ def _build_llm_request_for_trace(llm_request) -> dict:
76
+ """Build a dictionary representation of the LLM request for tracing."""
77
+ from google.genai import types
78
+
79
+ result = {
80
+ "model": llm_request.model,
81
+ "config": llm_request.config.model_dump(exclude_none=True, exclude="response_schema"),
82
+ "contents": [],
83
+ }
84
+
85
+ for content in llm_request.contents:
86
+ parts = [part for part in content.parts if not hasattr(part, "inline_data") or not part.inline_data]
87
+ result["contents"].append(types.Content(role=content.role, parts=parts).model_dump(exclude_none=True))
88
+ return result
89
+
90
+
91
+ def _extract_messages_from_contents(contents: list) -> dict:
92
+ """Extract messages from LLM contents for proper indexing."""
93
+ attributes = {}
94
+
95
+ for i, content in enumerate(contents):
96
+ # Get role and normalize it
97
+ raw_role = content.get("role", "user")
98
+
99
+ # Hardcode role mapping for consistency
100
+ if raw_role == "model":
101
+ role = "assistant"
102
+ elif raw_role == "user":
103
+ role = "user"
104
+ elif raw_role == "system":
105
+ role = "system"
106
+ else:
107
+ role = raw_role # Keep original if not recognized
108
+
109
+ parts = content.get("parts", [])
110
+
111
+ # Set role
112
+ attributes[MessageAttributes.PROMPT_ROLE.format(i=i)] = role
113
+
114
+ # Extract content from parts
115
+ text_parts = []
116
+ for part in parts:
117
+ if "text" in part and part.get("text") is not None:
118
+ text_parts.append(str(part["text"]))
119
+ elif "function_call" in part:
120
+ # Function calls in prompts are typically from the model's previous responses
121
+ func_call = part["function_call"]
122
+ # Store as a generic attribute since MessageAttributes doesn't have prompt tool calls
123
+ attributes[f"gen_ai.prompt.{i}.function_call.name"] = func_call.get("name", "")
124
+ attributes[f"gen_ai.prompt.{i}.function_call.args"] = json.dumps(func_call.get("args", {}))
125
+ if "id" in func_call:
126
+ attributes[f"gen_ai.prompt.{i}.function_call.id"] = func_call["id"]
127
+ elif "function_response" in part:
128
+ # Function responses are typically user messages with tool results
129
+ func_resp = part["function_response"]
130
+ attributes[f"gen_ai.prompt.{i}.function_response.name"] = func_resp.get("name", "")
131
+ attributes[f"gen_ai.prompt.{i}.function_response.result"] = json.dumps(func_resp.get("response", {}))
132
+ if "id" in func_resp:
133
+ attributes[f"gen_ai.prompt.{i}.function_response.id"] = func_resp["id"]
134
+
135
+ # Combine text parts
136
+ if text_parts:
137
+ attributes[MessageAttributes.PROMPT_CONTENT.format(i=i)] = "\n".join(text_parts)
138
+
139
+ return attributes
140
+
141
+
142
+ def _extract_llm_attributes(llm_request_dict: dict, llm_response: Any) -> dict:
143
+ """Extract attributes from LLM request and response."""
144
+ attributes = {}
145
+
146
+ # Model
147
+ if "model" in llm_request_dict:
148
+ attributes[SpanAttributes.LLM_REQUEST_MODEL] = llm_request_dict["model"]
149
+
150
+ # Config
151
+ if "config" in llm_request_dict:
152
+ config = llm_request_dict["config"]
153
+
154
+ # System instruction - commented out, now handled as a system role message
155
+ # if "system_instruction" in config:
156
+ # attributes[SpanAttributes.LLM_REQUEST_SYSTEM_INSTRUCTION] = config["system_instruction"]
157
+
158
+ # Temperature
159
+ if "temperature" in config:
160
+ attributes[SpanAttributes.LLM_REQUEST_TEMPERATURE] = config["temperature"]
161
+
162
+ # Max output tokens
163
+ if "max_output_tokens" in config:
164
+ attributes[SpanAttributes.LLM_REQUEST_MAX_TOKENS] = config["max_output_tokens"]
165
+
166
+ # Top P
167
+ if "top_p" in config:
168
+ attributes[SpanAttributes.LLM_REQUEST_TOP_P] = config["top_p"]
169
+
170
+ # Top K
171
+ if "top_k" in config:
172
+ attributes[SpanAttributes.LLM_REQUEST_TOP_K] = config["top_k"]
173
+
174
+ # Candidate count
175
+ if "candidate_count" in config:
176
+ attributes[SpanAttributes.LLM_REQUEST_CANDIDATE_COUNT] = config["candidate_count"]
177
+
178
+ # Stop sequences
179
+ if "stop_sequences" in config:
180
+ attributes[SpanAttributes.LLM_REQUEST_STOP_SEQUENCES] = json.dumps(config["stop_sequences"])
181
+
182
+ # Response MIME type
183
+ if "response_mime_type" in config:
184
+ attributes["gen_ai.request.response_mime_type"] = config["response_mime_type"]
185
+
186
+ # Tools/Functions
187
+ if "tools" in config:
188
+ # Extract tool definitions
189
+ for i, tool in enumerate(config["tools"]):
190
+ if "function_declarations" in tool:
191
+ for j, func in enumerate(tool["function_declarations"]):
192
+ attributes[f"gen_ai.request.tools.{j}.name"] = func.get("name", "")
193
+ attributes[f"gen_ai.request.tools.{j}.description"] = func.get("description", "")
194
+
195
+ # Messages - handle system instruction and regular contents
196
+ message_index = 0
197
+
198
+ # First, add system instruction as a system role message if present
199
+ # TODO: This is not Chat Completions format but doing this for frontend rendering consistency
200
+ if "config" in llm_request_dict and "system_instruction" in llm_request_dict["config"]:
201
+ system_instruction = llm_request_dict["config"]["system_instruction"]
202
+ attributes[MessageAttributes.PROMPT_ROLE.format(i=message_index)] = "system"
203
+ attributes[MessageAttributes.PROMPT_CONTENT.format(i=message_index)] = system_instruction
204
+ message_index += 1
205
+
206
+ # Then add regular contents with proper indexing
207
+ if "contents" in llm_request_dict:
208
+ for content in llm_request_dict["contents"]:
209
+ # Get role and normalize it
210
+ raw_role = content.get("role", "user")
211
+
212
+ # Hardcode role mapping for consistency
213
+ if raw_role == "model":
214
+ role = "assistant"
215
+ elif raw_role == "user":
216
+ role = "user"
217
+ elif raw_role == "system":
218
+ role = "system"
219
+ else:
220
+ role = raw_role # Keep original if not recognized
221
+
222
+ parts = content.get("parts", [])
223
+
224
+ # Set role
225
+ attributes[MessageAttributes.PROMPT_ROLE.format(i=message_index)] = role
226
+
227
+ # Extract content from parts
228
+ text_parts = []
229
+ for part in parts:
230
+ if "text" in part and part.get("text") is not None:
231
+ text_parts.append(str(part["text"]))
232
+ elif "function_call" in part:
233
+ # Function calls in prompts are typically from the model's previous responses
234
+ func_call = part["function_call"]
235
+ # Store as a generic attribute since MessageAttributes doesn't have prompt tool calls
236
+ attributes[f"gen_ai.prompt.{message_index}.function_call.name"] = func_call.get("name", "")
237
+ attributes[f"gen_ai.prompt.{message_index}.function_call.args"] = json.dumps(
238
+ func_call.get("args", {})
239
+ )
240
+ if "id" in func_call:
241
+ attributes[f"gen_ai.prompt.{message_index}.function_call.id"] = func_call["id"]
242
+ elif "function_response" in part:
243
+ # Function responses are typically user messages with tool results
244
+ func_resp = part["function_response"]
245
+ attributes[f"gen_ai.prompt.{message_index}.function_response.name"] = func_resp.get("name", "")
246
+ attributes[f"gen_ai.prompt.{message_index}.function_response.result"] = json.dumps(
247
+ func_resp.get("response", {})
248
+ )
249
+ if "id" in func_resp:
250
+ attributes[f"gen_ai.prompt.{message_index}.function_response.id"] = func_resp["id"]
251
+
252
+ # Combine text parts
253
+ if text_parts:
254
+ attributes[MessageAttributes.PROMPT_CONTENT.format(i=message_index)] = "\n".join(text_parts)
255
+
256
+ message_index += 1
257
+
258
+ # Response
259
+ if llm_response:
260
+ try:
261
+ response_dict = json.loads(llm_response) if isinstance(llm_response, str) else llm_response
262
+
263
+ # Response model
264
+ if "model" in response_dict:
265
+ attributes[SpanAttributes.LLM_RESPONSE_MODEL] = response_dict["model"]
266
+
267
+ # Usage metadata
268
+ if "usage_metadata" in response_dict:
269
+ usage = response_dict["usage_metadata"]
270
+ if "prompt_token_count" in usage:
271
+ attributes[SpanAttributes.LLM_USAGE_PROMPT_TOKENS] = usage["prompt_token_count"]
272
+ if "candidates_token_count" in usage:
273
+ attributes[SpanAttributes.LLM_USAGE_COMPLETION_TOKENS] = usage["candidates_token_count"]
274
+ if "total_token_count" in usage:
275
+ attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] = usage["total_token_count"]
276
+
277
+ # Additional token details if available
278
+ if "prompt_tokens_details" in usage:
279
+ for detail in usage["prompt_tokens_details"]:
280
+ if "modality" in detail and "token_count" in detail:
281
+ attributes[f"gen_ai.usage.prompt_tokens.{detail['modality'].lower()}"] = detail[
282
+ "token_count"
283
+ ]
284
+
285
+ if "candidates_tokens_details" in usage:
286
+ for detail in usage["candidates_tokens_details"]:
287
+ if "modality" in detail and "token_count" in detail:
288
+ attributes[f"gen_ai.usage.completion_tokens.{detail['modality'].lower()}"] = detail[
289
+ "token_count"
290
+ ]
291
+
292
+ # Response content
293
+ if "content" in response_dict and "parts" in response_dict["content"]:
294
+ parts = response_dict["content"]["parts"]
295
+
296
+ # Set completion role and content - hardcode role as 'assistant' for consistency
297
+ attributes[MessageAttributes.COMPLETION_ROLE.format(i=0)] = "assistant"
298
+
299
+ text_parts = []
300
+ tool_call_index = 0
301
+ for part in parts:
302
+ if "text" in part and part.get("text") is not None:
303
+ text_parts.append(str(part["text"]))
304
+ elif "function_call" in part:
305
+ # This is a function call in the response
306
+ func_call = part["function_call"]
307
+ attributes[MessageAttributes.COMPLETION_TOOL_CALL_NAME.format(i=0, j=tool_call_index)] = (
308
+ func_call.get("name", "")
309
+ )
310
+ attributes[MessageAttributes.COMPLETION_TOOL_CALL_ARGUMENTS.format(i=0, j=tool_call_index)] = (
311
+ json.dumps(func_call.get("args", {}))
312
+ )
313
+ if "id" in func_call:
314
+ attributes[MessageAttributes.COMPLETION_TOOL_CALL_ID.format(i=0, j=tool_call_index)] = (
315
+ func_call["id"]
316
+ )
317
+ tool_call_index += 1
318
+
319
+ if text_parts:
320
+ attributes[MessageAttributes.COMPLETION_CONTENT.format(i=0)] = "\n".join(text_parts)
321
+
322
+ # Finish reason
323
+ if "finish_reason" in response_dict:
324
+ attributes[SpanAttributes.LLM_RESPONSE_FINISH_REASON] = response_dict["finish_reason"]
325
+
326
+ # Response ID
327
+ if "id" in response_dict:
328
+ attributes[SpanAttributes.LLM_RESPONSE_ID] = response_dict["id"]
329
+
330
+ except Exception as e:
331
+ logger.debug(f"Failed to extract response attributes: {e}")
332
+
333
+ return attributes
334
+
335
+
336
+ # Wrapper for Runner.run_async - REMOVED per user request
337
+ # We just pass through without creating a span
338
+ def _runner_run_async_wrapper(agentops_tracer):
339
+ def actual_decorator(wrapped, instance, args, kwargs):
340
+ async def new_function():
341
+ # Just pass through without creating a span
342
+ async_gen = wrapped(*args, **kwargs)
343
+ async for item in async_gen:
344
+ yield item
345
+
346
+ return new_function()
347
+
348
+ return actual_decorator
349
+
350
+
351
+ def extract_agent_attributes(instance):
352
+ attributes = {}
353
+ # Use AgentAttributes from semconv
354
+ attributes[AgentAttributes.AGENT_NAME] = instance.name
355
+ if hasattr(instance, "description"):
356
+ attributes["agent.description"] = instance.description
357
+ if hasattr(instance, "model"):
358
+ attributes["agent.model"] = instance.model
359
+ if hasattr(instance, "instruction"):
360
+ attributes["agent.instruction"] = instance.instruction
361
+ if hasattr(instance, "tools"):
362
+ for tool in instance.tools:
363
+ if hasattr(tool, "name"):
364
+ attributes[ToolAttributes.TOOL_NAME] = tool.name
365
+ if hasattr(tool, "description"):
366
+ attributes[ToolAttributes.TOOL_DESCRIPTION] = tool.description
367
+ if hasattr(instance, "output_key"):
368
+ attributes["agent.output_key"] = instance.output_key
369
+ # Subagents
370
+ if hasattr(instance, "sub_agents"):
371
+ # recursively extract attributes from subagents but add a prefix to the keys, also with indexing, because we can have multiple subagents, also subagent can have subagents, So have to index them even if they are not in the same level
372
+ for i, sub_agent in enumerate(instance.sub_agents):
373
+ sub_agent_attributes = extract_agent_attributes(sub_agent)
374
+ for key, value in sub_agent_attributes.items():
375
+ attributes[f"agent.sub_agents.{i}.{key}"] = value
376
+ return attributes
377
+
378
+
379
+ # Wrapper for BaseAgent.run_async
380
+ def _base_agent_run_async_wrapper(agentops_tracer):
381
+ def actual_decorator(wrapped, instance, args, kwargs):
382
+ async def new_function():
383
+ agent_name = instance.name if hasattr(instance, "name") else "unknown"
384
+ span_name = f"adk.agent.{agent_name}"
385
+
386
+ with agentops_tracer.start_as_current_span(span_name, kind=SpanKind.CLIENT) as span:
387
+ span.set_attribute(SpanAttributes.AGENTOPS_SPAN_KIND, "agent")
388
+ span.set_attribute(SpanAttributes.LLM_SYSTEM, "gcp.vertex.agent")
389
+ span.set_attribute(SpanAttributes.AGENTOPS_ENTITY_NAME, "agent")
390
+
391
+ span.set_attributes(extract_agent_attributes(instance))
392
+ # # Extract invocation context if available
393
+ if len(args) > 0 and hasattr(args[0], "invocation_id"):
394
+ span.set_attribute("adk.invocation_id", args[0].invocation_id)
395
+
396
+ async_gen = wrapped(*args, **kwargs)
397
+ async for item in async_gen:
398
+ yield item
399
+
400
+ return new_function()
401
+
402
+ return actual_decorator
403
+
404
+
405
+ # Wrapper for BaseLlmFlow._call_llm_async
406
+ def _base_llm_flow_call_llm_async_wrapper(agentops_tracer):
407
+ def actual_decorator(wrapped, instance, args, kwargs):
408
+ async def new_function():
409
+ # Extract model info and llm_request if available
410
+ model_name = "unknown"
411
+ llm_request = None
412
+
413
+ if len(args) > 1:
414
+ llm_request = args[1]
415
+ if hasattr(llm_request, "model"):
416
+ model_name = llm_request.model
417
+
418
+ span_name = f"adk.llm.{model_name}"
419
+
420
+ with agentops_tracer.start_as_current_span(span_name, kind=SpanKind.CLIENT) as span:
421
+ span.set_attribute(SpanAttributes.AGENTOPS_SPAN_KIND, "request")
422
+ span.set_attribute(SpanAttributes.LLM_SYSTEM, "gcp.vertex.agent")
423
+ span.set_attribute(SpanAttributes.AGENTOPS_ENTITY_NAME, "request")
424
+
425
+ # Extract and set attributes from llm_request before the call
426
+ if llm_request:
427
+ llm_request_dict = _build_llm_request_for_trace(llm_request)
428
+ # Only extract request attributes here, response will be set later by _finalize_model_response_event
429
+ llm_attrs = _extract_llm_attributes(llm_request_dict, None)
430
+ for key, value in llm_attrs.items():
431
+ span.set_attribute(key, value)
432
+
433
+ # Note: The actual LLM response attributes will be set by
434
+ # _finalize_model_response_event_wrapper when ADK finalizes the response
435
+
436
+ async_gen = wrapped(*args, **kwargs)
437
+ async for item in async_gen:
438
+ yield item
439
+
440
+ return new_function()
441
+
442
+ return actual_decorator
443
+
444
+
445
+ # Wrapper for ADK telemetry functions - these add attributes to current span
446
+ def _adk_trace_tool_call_wrapper(agentops_tracer):
447
+ @wrapt.decorator
448
+ def wrapper(wrapped, instance, args, kwargs):
449
+ # Call original to preserve ADK behavior
450
+ result = wrapped(*args, **kwargs)
451
+
452
+ tool_args = args[0] if args else kwargs.get("args")
453
+ current_span = opentelemetry_api_trace.get_current_span()
454
+ if current_span.is_recording() and tool_args is not None:
455
+ current_span.set_attribute(SpanAttributes.LLM_SYSTEM, "gcp.vertex.agent")
456
+ current_span.set_attribute("gcp.vertex.agent.tool_call_args", json.dumps(tool_args))
457
+ return result
458
+
459
+ return wrapper
460
+
461
+
462
+ def _adk_trace_tool_response_wrapper(agentops_tracer):
463
+ @wrapt.decorator
464
+ def wrapper(wrapped, instance, args, kwargs):
465
+ # Call original to preserve ADK behavior
466
+ result = wrapped(*args, **kwargs)
467
+
468
+ invocation_context = args[0] if len(args) > 0 else kwargs.get("invocation_context")
469
+ event_id = args[1] if len(args) > 1 else kwargs.get("event_id")
470
+ function_response_event = args[2] if len(args) > 2 else kwargs.get("function_response_event")
471
+
472
+ current_span = opentelemetry_api_trace.get_current_span()
473
+ if current_span.is_recording():
474
+ current_span.set_attribute(SpanAttributes.LLM_SYSTEM, "gcp.vertex.agent")
475
+ if invocation_context:
476
+ current_span.set_attribute("gcp.vertex.agent.invocation_id", invocation_context.invocation_id)
477
+ if event_id:
478
+ current_span.set_attribute("gcp.vertex.agent.event_id", event_id)
479
+ if function_response_event:
480
+ current_span.set_attribute(
481
+ "gcp.vertex.agent.tool_response", function_response_event.model_dump_json(exclude_none=True)
482
+ )
483
+ current_span.set_attribute("gcp.vertex.agent.llm_request", "{}")
484
+ current_span.set_attribute("gcp.vertex.agent.llm_response", "{}")
485
+ return result
486
+
487
+ return wrapper
488
+
489
+
490
+ def _adk_trace_call_llm_wrapper(agentops_tracer):
491
+ @wrapt.decorator
492
+ def wrapper(wrapped, instance, args, kwargs):
493
+ # Call the original first to ensure ADK's behavior is preserved
494
+ result = wrapped(*args, **kwargs)
495
+
496
+ invocation_context = args[0] if len(args) > 0 else kwargs.get("invocation_context")
497
+ event_id = args[1] if len(args) > 1 else kwargs.get("event_id")
498
+ llm_request = args[2] if len(args) > 2 else kwargs.get("llm_request")
499
+ llm_response = args[3] if len(args) > 3 else kwargs.get("llm_response")
500
+
501
+ current_span = opentelemetry_api_trace.get_current_span()
502
+ if current_span.is_recording():
503
+ current_span.set_attribute(SpanAttributes.LLM_SYSTEM, "gcp.vertex.agent")
504
+ if llm_request:
505
+ current_span.set_attribute(SpanAttributes.LLM_REQUEST_MODEL, llm_request.model)
506
+ if invocation_context:
507
+ current_span.set_attribute("gcp.vertex.agent.invocation_id", invocation_context.invocation_id)
508
+ current_span.set_attribute("gcp.vertex.agent.session_id", invocation_context.session.id)
509
+ if event_id:
510
+ current_span.set_attribute("gcp.vertex.agent.event_id", event_id)
511
+
512
+ if llm_request:
513
+ llm_request_dict = _build_llm_request_for_trace(llm_request)
514
+ current_span.set_attribute("gcp.vertex.agent.llm_request", json.dumps(llm_request_dict))
515
+
516
+ # Extract and set all attributes including usage
517
+ llm_response_json = None
518
+ if llm_response:
519
+ llm_response_json = llm_response.model_dump_json(exclude_none=True)
520
+ current_span.set_attribute("gcp.vertex.agent.llm_response", llm_response_json)
521
+
522
+ llm_attrs = _extract_llm_attributes(llm_request_dict, llm_response_json)
523
+ for key, value in llm_attrs.items():
524
+ current_span.set_attribute(key, value)
525
+
526
+ return result
527
+
528
+ return wrapper
529
+
530
+
531
+ def _adk_trace_send_data_wrapper(agentops_tracer):
532
+ @wrapt.decorator
533
+ def wrapper(wrapped, instance, args, kwargs):
534
+ # Call original to preserve ADK behavior
535
+ result = wrapped(*args, **kwargs)
536
+
537
+ invocation_context = args[0] if len(args) > 0 else kwargs.get("invocation_context")
538
+ event_id = args[1] if len(args) > 1 else kwargs.get("event_id")
539
+ data = args[2] if len(args) > 2 else kwargs.get("data")
540
+
541
+ current_span = opentelemetry_api_trace.get_current_span()
542
+ if current_span.is_recording():
543
+ if invocation_context:
544
+ current_span.set_attribute("gcp.vertex.agent.invocation_id", invocation_context.invocation_id)
545
+ if event_id:
546
+ current_span.set_attribute("gcp.vertex.agent.event_id", event_id)
547
+ if data:
548
+ from google.genai import types
549
+
550
+ current_span.set_attribute(
551
+ "gcp.vertex.agent.data",
552
+ json.dumps(
553
+ [
554
+ types.Content(role=content.role, parts=content.parts).model_dump(exclude_none=True)
555
+ for content in data
556
+ ]
557
+ ),
558
+ )
559
+ return result
560
+
561
+ return wrapper
562
+
563
+
564
+ # Wrapper for _finalize_model_response_event to capture response attributes
565
+ def _finalize_model_response_event_wrapper(agentops_tracer):
566
+ def actual_decorator(wrapped, instance, args, kwargs):
567
+ # Call the original method
568
+ result = wrapped(*args, **kwargs)
569
+
570
+ # Extract llm_request and llm_response from args
571
+ llm_request = args[0] if len(args) > 0 else kwargs.get("llm_request")
572
+ llm_response = args[1] if len(args) > 1 else kwargs.get("llm_response")
573
+
574
+ # Get the current span and set response attributes
575
+ current_span = opentelemetry_api_trace.get_current_span()
576
+ if current_span.is_recording() and llm_request and llm_response:
577
+ span_name = getattr(current_span, "name", "")
578
+ if "adk.llm" in span_name:
579
+ # Build request dict
580
+ llm_request_dict = _build_llm_request_for_trace(llm_request)
581
+
582
+ # Extract response attributes
583
+ llm_response_json = llm_response.model_dump_json(exclude_none=True)
584
+ llm_attrs = _extract_llm_attributes(llm_request_dict, llm_response_json)
585
+
586
+ # Only set response-related attributes (request attrs already set)
587
+ for key, value in llm_attrs.items():
588
+ if "usage" in key or "completion" in key or "response" in key:
589
+ current_span.set_attribute(key, value)
590
+
591
+ return result
592
+
593
+ return actual_decorator
594
+
595
+
596
+ # Wrapper for tool execution that creates a single merged span
597
+ def _call_tool_async_wrapper(agentops_tracer):
598
+ """Wrapper that creates a single span for tool call and response."""
599
+
600
+ def actual_decorator(wrapped, instance, args, kwargs):
601
+ async def new_function():
602
+ # Extract tool info from args
603
+ tool = args[0] if args else kwargs.get("tool")
604
+ tool_args = args[1] if len(args) > 1 else kwargs.get("args", {})
605
+ tool_context = args[2] if len(args) > 2 else kwargs.get("tool_context")
606
+
607
+ tool_name = getattr(tool, "name", "unknown_tool")
608
+ span_name = f"adk.tool.{tool_name}"
609
+
610
+ with agentops_tracer.start_as_current_span(span_name, kind=SpanKind.CLIENT) as span:
611
+ span.set_attribute(SpanAttributes.AGENTOPS_SPAN_KIND, "tool")
612
+ span.set_attribute(SpanAttributes.LLM_SYSTEM, "gcp.vertex.agent")
613
+ span.set_attribute(SpanAttributes.AGENTOPS_ENTITY_NAME, "tool")
614
+
615
+ # Set tool call attributes
616
+ span.set_attribute(ToolAttributes.TOOL_NAME, tool_name)
617
+ if hasattr(tool, "description"):
618
+ span.set_attribute(ToolAttributes.TOOL_DESCRIPTION, tool.description)
619
+ if hasattr(tool, "is_long_running"):
620
+ span.set_attribute("tool.is_long_running", tool.is_long_running)
621
+ span.set_attribute(ToolAttributes.TOOL_PARAMETERS, json.dumps(tool_args))
622
+
623
+ if tool_context and hasattr(tool_context, "function_call_id"):
624
+ span.set_attribute("tool.call_id", tool_context.function_call_id)
625
+ if tool_context and hasattr(tool_context, "invocation_context"):
626
+ span.set_attribute("adk.invocation_id", tool_context.invocation_context.invocation_id)
627
+
628
+ # Execute the tool
629
+ result = await wrapped(*args, **kwargs)
630
+
631
+ # Set tool response attributes
632
+ if result:
633
+ if isinstance(result, dict):
634
+ span.set_attribute(ToolAttributes.TOOL_RESULT, json.dumps(result))
635
+ else:
636
+ span.set_attribute(ToolAttributes.TOOL_RESULT, str(result))
637
+
638
+ return result
639
+
640
+ return new_function()
641
+
642
+ return actual_decorator
643
+
644
+
645
+ def _patch(module_name: str, object_name: str, method_name: str, wrapper_function, agentops_tracer):
646
+ """Helper to apply a patch and keep track of it."""
647
+ try:
648
+ module = __import__(module_name, fromlist=[object_name])
649
+ obj = getattr(module, object_name)
650
+ wrapt.wrap_function_wrapper(obj, method_name, wrapper_function(agentops_tracer))
651
+ _wrapped_methods.append((obj, method_name))
652
+ logger.debug(f"Successfully wrapped {module_name}.{object_name}.{method_name}")
653
+ except Exception as e:
654
+ logger.warning(f"Could not wrap {module_name}.{object_name}.{method_name}: {e}")
655
+
656
+
657
+ def _patch_module_function(module_name: str, function_name: str, wrapper_function, agentops_tracer):
658
+ """Helper to patch module-level functions."""
659
+ try:
660
+ module = __import__(module_name, fromlist=[function_name])
661
+ wrapt.wrap_function_wrapper(module, function_name, wrapper_function(agentops_tracer))
662
+ _wrapped_methods.append((module, function_name))
663
+ logger.debug(f"Successfully wrapped {module_name}.{function_name}")
664
+ except Exception as e:
665
+ logger.warning(f"Could not wrap {module_name}.{function_name}: {e}")
666
+
667
+
668
+ def patch_adk(agentops_tracer):
669
+ """Apply all patches to Google ADK modules."""
670
+ logger.debug("Applying Google ADK patches for AgentOps instrumentation")
671
+
672
+ # First, disable ADK's own tracer by replacing it with our NoOpTracer
673
+ noop_tracer = NoOpTracer()
674
+ try:
675
+ import google.adk.telemetry as adk_telemetry
676
+
677
+ # Replace the tracer with our no-op version
678
+ adk_telemetry.tracer = noop_tracer
679
+ logger.debug("Replaced ADK's tracer with NoOpTracer")
680
+ except Exception as e:
681
+ logger.warning(f"Failed to replace ADK tracer: {e}")
682
+
683
+ # Also replace the tracer in all modules that have already imported it
684
+ modules_to_patch = [
685
+ "google.adk.runners",
686
+ "google.adk.agents.base_agent",
687
+ "google.adk.flows.llm_flows.base_llm_flow",
688
+ "google.adk.flows.llm_flows.functions",
689
+ ]
690
+
691
+ import sys
692
+
693
+ for module_name in modules_to_patch:
694
+ if module_name in sys.modules:
695
+ try:
696
+ module = sys.modules[module_name]
697
+ if hasattr(module, "tracer"):
698
+ module.tracer = noop_tracer
699
+ logger.debug(f"Replaced tracer in {module_name}")
700
+ except Exception as e:
701
+ logger.warning(f"Failed to replace tracer in {module_name}: {e}")
702
+
703
+ # Patch methods that create top-level AgentOps spans
704
+ # Skip runner patching - we don't want adk.runner spans
705
+ _patch("google.adk.agents.base_agent", "BaseAgent", "run_async", _base_agent_run_async_wrapper, agentops_tracer)
706
+
707
+ # Patch ADK's telemetry functions to add attributes to AgentOps spans
708
+ _patch_module_function("google.adk.telemetry", "trace_tool_call", _adk_trace_tool_call_wrapper, agentops_tracer)
709
+ _patch_module_function(
710
+ "google.adk.telemetry", "trace_tool_response", _adk_trace_tool_response_wrapper, agentops_tracer
711
+ )
712
+ _patch_module_function("google.adk.telemetry", "trace_call_llm", _adk_trace_call_llm_wrapper, agentops_tracer)
713
+
714
+ _patch_module_function("google.adk.telemetry", "trace_send_data", _adk_trace_send_data_wrapper, agentops_tracer)
715
+
716
+ # Patch method that creates nested spans
717
+ _patch(
718
+ "google.adk.flows.llm_flows.base_llm_flow",
719
+ "BaseLlmFlow",
720
+ "_call_llm_async",
721
+ _base_llm_flow_call_llm_async_wrapper,
722
+ agentops_tracer,
723
+ )
724
+
725
+ # Also patch _finalize_model_response_event to capture response attributes
726
+ _patch(
727
+ "google.adk.flows.llm_flows.base_llm_flow",
728
+ "BaseLlmFlow",
729
+ "_finalize_model_response_event",
730
+ _finalize_model_response_event_wrapper,
731
+ agentops_tracer,
732
+ )
733
+
734
+ # Patch tool execution to create merged tool spans
735
+ _patch_module_function(
736
+ "google.adk.flows.llm_flows.functions", "__call_tool_async", _call_tool_async_wrapper, agentops_tracer
737
+ )
738
+
739
+ logger.info("Google ADK patching complete")
740
+
741
+
742
+ def unpatch_adk():
743
+ """Remove all patches from Google ADK modules."""
744
+ logger.debug("Removing Google ADK patches")
745
+
746
+ # Restore ADK's tracer
747
+ try:
748
+ import google.adk.telemetry as adk_telemetry
749
+ from opentelemetry import trace
750
+
751
+ adk_telemetry.tracer = trace.get_tracer("gcp.vertex.agent")
752
+ logger.debug("Restored ADK's built-in tracer")
753
+ except Exception as e:
754
+ logger.warning(f"Failed to restore ADK tracer: {e}")
755
+
756
+ # Unwrap all methods
757
+ for obj, method_name in _wrapped_methods:
758
+ try:
759
+ if hasattr(getattr(obj, method_name), "__wrapped__"):
760
+ original = getattr(obj, method_name).__wrapped__
761
+ setattr(obj, method_name, original)
762
+ logger.debug(f"Successfully unwrapped {obj}.{method_name}")
763
+ except Exception as e:
764
+ logger.warning(f"Failed to unwrap {obj}.{method_name}: {e}")
765
+
766
+ _wrapped_methods.clear()
767
+ logger.info("Google ADK unpatching complete")