mseep-agentops 0.4.18__py3-none-any.whl → 0.4.23__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agentops/__init__.py +0 -0
- agentops/client/api/base.py +28 -30
- agentops/client/api/versions/v3.py +29 -25
- agentops/client/api/versions/v4.py +87 -46
- agentops/client/client.py +98 -29
- agentops/client/http/README.md +87 -0
- agentops/client/http/http_client.py +126 -172
- agentops/config.py +8 -2
- agentops/instrumentation/OpenTelemetry.md +133 -0
- agentops/instrumentation/README.md +167 -0
- agentops/instrumentation/__init__.py +13 -1
- agentops/instrumentation/agentic/ag2/__init__.py +18 -0
- agentops/instrumentation/agentic/ag2/instrumentor.py +922 -0
- agentops/instrumentation/agentic/agno/__init__.py +19 -0
- agentops/instrumentation/agentic/agno/attributes/__init__.py +20 -0
- agentops/instrumentation/agentic/agno/attributes/agent.py +250 -0
- agentops/instrumentation/agentic/agno/attributes/metrics.py +214 -0
- agentops/instrumentation/agentic/agno/attributes/storage.py +158 -0
- agentops/instrumentation/agentic/agno/attributes/team.py +195 -0
- agentops/instrumentation/agentic/agno/attributes/tool.py +210 -0
- agentops/instrumentation/agentic/agno/attributes/workflow.py +254 -0
- agentops/instrumentation/agentic/agno/instrumentor.py +1313 -0
- agentops/instrumentation/agentic/crewai/LICENSE +201 -0
- agentops/instrumentation/agentic/crewai/NOTICE.md +10 -0
- agentops/instrumentation/agentic/crewai/__init__.py +6 -0
- agentops/instrumentation/agentic/crewai/crewai_span_attributes.py +335 -0
- agentops/instrumentation/agentic/crewai/instrumentation.py +535 -0
- agentops/instrumentation/agentic/crewai/version.py +1 -0
- agentops/instrumentation/agentic/google_adk/__init__.py +19 -0
- agentops/instrumentation/agentic/google_adk/instrumentor.py +68 -0
- agentops/instrumentation/agentic/google_adk/patch.py +767 -0
- agentops/instrumentation/agentic/haystack/__init__.py +1 -0
- agentops/instrumentation/agentic/haystack/instrumentor.py +186 -0
- agentops/instrumentation/agentic/langgraph/__init__.py +3 -0
- agentops/instrumentation/agentic/langgraph/attributes.py +54 -0
- agentops/instrumentation/agentic/langgraph/instrumentation.py +598 -0
- agentops/instrumentation/agentic/langgraph/version.py +1 -0
- agentops/instrumentation/agentic/openai_agents/README.md +156 -0
- agentops/instrumentation/agentic/openai_agents/SPANS.md +145 -0
- agentops/instrumentation/agentic/openai_agents/TRACING_API.md +144 -0
- agentops/instrumentation/agentic/openai_agents/__init__.py +30 -0
- agentops/instrumentation/agentic/openai_agents/attributes/common.py +549 -0
- agentops/instrumentation/agentic/openai_agents/attributes/completion.py +172 -0
- agentops/instrumentation/agentic/openai_agents/attributes/model.py +58 -0
- agentops/instrumentation/agentic/openai_agents/attributes/tokens.py +275 -0
- agentops/instrumentation/agentic/openai_agents/exporter.py +469 -0
- agentops/instrumentation/agentic/openai_agents/instrumentor.py +107 -0
- agentops/instrumentation/agentic/openai_agents/processor.py +58 -0
- agentops/instrumentation/agentic/smolagents/README.md +88 -0
- agentops/instrumentation/agentic/smolagents/__init__.py +12 -0
- agentops/instrumentation/agentic/smolagents/attributes/agent.py +354 -0
- agentops/instrumentation/agentic/smolagents/attributes/model.py +205 -0
- agentops/instrumentation/agentic/smolagents/instrumentor.py +286 -0
- agentops/instrumentation/agentic/smolagents/stream_wrapper.py +258 -0
- agentops/instrumentation/agentic/xpander/__init__.py +15 -0
- agentops/instrumentation/agentic/xpander/context.py +112 -0
- agentops/instrumentation/agentic/xpander/instrumentor.py +877 -0
- agentops/instrumentation/agentic/xpander/trace_probe.py +86 -0
- agentops/instrumentation/agentic/xpander/version.py +3 -0
- agentops/instrumentation/common/README.md +65 -0
- agentops/instrumentation/common/attributes.py +1 -2
- agentops/instrumentation/providers/anthropic/__init__.py +24 -0
- agentops/instrumentation/providers/anthropic/attributes/__init__.py +23 -0
- agentops/instrumentation/providers/anthropic/attributes/common.py +64 -0
- agentops/instrumentation/providers/anthropic/attributes/message.py +541 -0
- agentops/instrumentation/providers/anthropic/attributes/tools.py +231 -0
- agentops/instrumentation/providers/anthropic/event_handler_wrapper.py +90 -0
- agentops/instrumentation/providers/anthropic/instrumentor.py +146 -0
- agentops/instrumentation/providers/anthropic/stream_wrapper.py +436 -0
- agentops/instrumentation/providers/google_genai/README.md +33 -0
- agentops/instrumentation/providers/google_genai/__init__.py +24 -0
- agentops/instrumentation/providers/google_genai/attributes/__init__.py +25 -0
- agentops/instrumentation/providers/google_genai/attributes/chat.py +125 -0
- agentops/instrumentation/providers/google_genai/attributes/common.py +88 -0
- agentops/instrumentation/providers/google_genai/attributes/model.py +284 -0
- agentops/instrumentation/providers/google_genai/instrumentor.py +170 -0
- agentops/instrumentation/providers/google_genai/stream_wrapper.py +238 -0
- agentops/instrumentation/providers/ibm_watsonx_ai/__init__.py +28 -0
- agentops/instrumentation/providers/ibm_watsonx_ai/attributes/__init__.py +27 -0
- agentops/instrumentation/providers/ibm_watsonx_ai/attributes/attributes.py +277 -0
- agentops/instrumentation/providers/ibm_watsonx_ai/attributes/common.py +104 -0
- agentops/instrumentation/providers/ibm_watsonx_ai/instrumentor.py +162 -0
- agentops/instrumentation/providers/ibm_watsonx_ai/stream_wrapper.py +302 -0
- agentops/instrumentation/providers/mem0/__init__.py +45 -0
- agentops/instrumentation/providers/mem0/common.py +377 -0
- agentops/instrumentation/providers/mem0/instrumentor.py +270 -0
- agentops/instrumentation/providers/mem0/memory.py +430 -0
- agentops/instrumentation/providers/openai/__init__.py +21 -0
- agentops/instrumentation/providers/openai/attributes/__init__.py +7 -0
- agentops/instrumentation/providers/openai/attributes/common.py +55 -0
- agentops/instrumentation/providers/openai/attributes/response.py +607 -0
- agentops/instrumentation/providers/openai/config.py +36 -0
- agentops/instrumentation/providers/openai/instrumentor.py +312 -0
- agentops/instrumentation/providers/openai/stream_wrapper.py +941 -0
- agentops/instrumentation/providers/openai/utils.py +44 -0
- agentops/instrumentation/providers/openai/v0.py +176 -0
- agentops/instrumentation/providers/openai/v0_wrappers.py +483 -0
- agentops/instrumentation/providers/openai/wrappers/__init__.py +30 -0
- agentops/instrumentation/providers/openai/wrappers/assistant.py +277 -0
- agentops/instrumentation/providers/openai/wrappers/chat.py +259 -0
- agentops/instrumentation/providers/openai/wrappers/completion.py +109 -0
- agentops/instrumentation/providers/openai/wrappers/embeddings.py +94 -0
- agentops/instrumentation/providers/openai/wrappers/image_gen.py +75 -0
- agentops/instrumentation/providers/openai/wrappers/responses.py +191 -0
- agentops/instrumentation/providers/openai/wrappers/shared.py +81 -0
- agentops/instrumentation/utilities/concurrent_futures/__init__.py +10 -0
- agentops/instrumentation/utilities/concurrent_futures/instrumentation.py +206 -0
- agentops/integration/callbacks/dspy/__init__.py +11 -0
- agentops/integration/callbacks/dspy/callback.py +471 -0
- agentops/integration/callbacks/langchain/README.md +59 -0
- agentops/integration/callbacks/langchain/__init__.py +15 -0
- agentops/integration/callbacks/langchain/callback.py +791 -0
- agentops/integration/callbacks/langchain/utils.py +54 -0
- agentops/legacy/crewai.md +121 -0
- agentops/logging/instrument_logging.py +4 -0
- agentops/sdk/README.md +220 -0
- agentops/sdk/core.py +75 -32
- agentops/sdk/descriptors/classproperty.py +28 -0
- agentops/sdk/exporters.py +152 -33
- agentops/semconv/README.md +125 -0
- agentops/semconv/span_kinds.py +0 -2
- agentops/validation.py +102 -63
- {mseep_agentops-0.4.18.dist-info → mseep_agentops-0.4.23.dist-info}/METADATA +30 -40
- mseep_agentops-0.4.23.dist-info/RECORD +178 -0
- {mseep_agentops-0.4.18.dist-info → mseep_agentops-0.4.23.dist-info}/WHEEL +1 -2
- mseep_agentops-0.4.18.dist-info/RECORD +0 -94
- mseep_agentops-0.4.18.dist-info/top_level.txt +0 -2
- tests/conftest.py +0 -10
- tests/unit/client/__init__.py +0 -1
- tests/unit/client/test_http_adapter.py +0 -221
- tests/unit/client/test_http_client.py +0 -206
- tests/unit/conftest.py +0 -54
- tests/unit/sdk/__init__.py +0 -1
- tests/unit/sdk/instrumentation_tester.py +0 -207
- tests/unit/sdk/test_attributes.py +0 -392
- tests/unit/sdk/test_concurrent_instrumentation.py +0 -468
- tests/unit/sdk/test_decorators.py +0 -763
- tests/unit/sdk/test_exporters.py +0 -241
- tests/unit/sdk/test_factory.py +0 -1188
- tests/unit/sdk/test_internal_span_processor.py +0 -397
- tests/unit/sdk/test_resource_attributes.py +0 -35
- tests/unit/test_config.py +0 -82
- tests/unit/test_context_manager.py +0 -777
- tests/unit/test_events.py +0 -27
- tests/unit/test_host_env.py +0 -54
- tests/unit/test_init_py.py +0 -501
- tests/unit/test_serialization.py +0 -433
- tests/unit/test_session.py +0 -676
- tests/unit/test_user_agent.py +0 -34
- tests/unit/test_validation.py +0 -405
- {tests → agentops/instrumentation/agentic/openai_agents/attributes}/__init__.py +0 -0
- /tests/unit/__init__.py → /agentops/instrumentation/providers/openai/attributes/tools.py +0 -0
- {mseep_agentops-0.4.18.dist-info → mseep_agentops-0.4.23.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,922 @@
|
|
1
|
+
"""AG2 (AutoGen) Instrumentation Module
|
2
|
+
|
3
|
+
This module provides the main instrumentor class and wrapping functions for AG2 (AutoGen).
|
4
|
+
It focuses on collecting summary-level telemetry rather than individual message events.
|
5
|
+
"""
|
6
|
+
|
7
|
+
import json
|
8
|
+
from typing import Dict, Any
|
9
|
+
from wrapt import wrap_function_wrapper
|
10
|
+
|
11
|
+
from opentelemetry.trace import SpanKind
|
12
|
+
from opentelemetry.metrics import Meter
|
13
|
+
from opentelemetry.instrumentation.utils import unwrap as otel_unwrap
|
14
|
+
import contextvars
|
15
|
+
import threading
|
16
|
+
from opentelemetry import context as otel_context
|
17
|
+
from agentops.logging import logger
|
18
|
+
from agentops.instrumentation.common import (
|
19
|
+
CommonInstrumentor,
|
20
|
+
InstrumentorConfig,
|
21
|
+
StandardMetrics,
|
22
|
+
create_span,
|
23
|
+
SpanAttributeManager,
|
24
|
+
)
|
25
|
+
from agentops.instrumentation.agentic.ag2 import LIBRARY_NAME, LIBRARY_VERSION
|
26
|
+
from agentops.semconv.message import MessageAttributes
|
27
|
+
from agentops.semconv.span_attributes import SpanAttributes
|
28
|
+
from agentops.semconv.span_kinds import AgentOpsSpanKindValues
|
29
|
+
from agentops.semconv.agent import AgentAttributes
|
30
|
+
from agentops.semconv.workflow import WorkflowAttributes
|
31
|
+
from agentops.semconv.tool import ToolAttributes
|
32
|
+
|
33
|
+
|
34
|
+
class AG2Instrumentor(CommonInstrumentor):
|
35
|
+
"""Instrumentor for AG2 (AutoGen)
|
36
|
+
|
37
|
+
This instrumentor captures high-level events from AG2's agent interactions,
|
38
|
+
focusing on summaries rather than individual messages, and providing detailed
|
39
|
+
tool usage information.
|
40
|
+
"""
|
41
|
+
|
42
|
+
def __init__(self):
|
43
|
+
config = InstrumentorConfig(
|
44
|
+
library_name=LIBRARY_NAME,
|
45
|
+
library_version=LIBRARY_VERSION,
|
46
|
+
wrapped_methods=[], # We'll use custom wrapping
|
47
|
+
metrics_enabled=True,
|
48
|
+
dependencies=["ag2 >= 0.3.2"],
|
49
|
+
)
|
50
|
+
super().__init__(config)
|
51
|
+
self._attribute_manager = None
|
52
|
+
|
53
|
+
def _create_metrics(self, meter: Meter) -> Dict[str, Any]:
|
54
|
+
"""Create metrics for AG2 instrumentation."""
|
55
|
+
return StandardMetrics.create_standard_metrics(meter)
|
56
|
+
|
57
|
+
def _initialize(self, **kwargs):
|
58
|
+
"""Initialize attribute manager and AG2-specific concurrent.futures instrumentation."""
|
59
|
+
self._attribute_manager = SpanAttributeManager(service_name="agentops", deployment_environment="production")
|
60
|
+
|
61
|
+
def _custom_wrap(self, **kwargs):
|
62
|
+
"""Perform custom wrapping for AG2 methods."""
|
63
|
+
|
64
|
+
methods_to_wrap = [
|
65
|
+
("autogen.agentchat.conversable_agent", "ConversableAgent.__init__", self._agent_init_wrapper),
|
66
|
+
("autogen.agentchat.conversable_agent", "ConversableAgent.run", self._agent_run_wrapper_with_context),
|
67
|
+
("autogen.agentchat.conversable_agent", "ConversableAgent.initiate_chat", self._initiate_chat_wrapper),
|
68
|
+
(
|
69
|
+
"autogen.agentchat.conversable_agent",
|
70
|
+
"ConversableAgent.a_initiate_chat",
|
71
|
+
self._async_initiate_chat_wrapper,
|
72
|
+
),
|
73
|
+
(
|
74
|
+
"autogen.agentchat.conversable_agent",
|
75
|
+
"ConversableAgent._generate_oai_reply_from_client",
|
76
|
+
self._generate_oai_reply_from_client_wrapper,
|
77
|
+
),
|
78
|
+
("autogen.agentchat.conversable_agent", "ConversableAgent.receive", self._receive_wrapper),
|
79
|
+
("autogen.agentchat.conversable_agent", "ConversableAgent.a_receive", self._async_receive_wrapper),
|
80
|
+
("autogen.agentchat.groupchat", "GroupChatManager.run_chat", self._group_chat_run_wrapper),
|
81
|
+
("autogen.agentchat.groupchat", "GroupChatManager.a_run_chat", self._async_group_chat_run_wrapper),
|
82
|
+
(
|
83
|
+
"autogen.agentchat.conversable_agent",
|
84
|
+
"ConversableAgent.execute_function",
|
85
|
+
lambda tracer: self._tool_execution_wrapper(tracer, "function"),
|
86
|
+
),
|
87
|
+
(
|
88
|
+
"autogen.agentchat.conversable_agent",
|
89
|
+
"ConversableAgent.run_code",
|
90
|
+
lambda tracer: self._tool_execution_wrapper(tracer, "code"),
|
91
|
+
),
|
92
|
+
("autogen.agentchat.groupchat", "GroupChat.select_speaker", self._group_chat_select_speaker_wrapper),
|
93
|
+
]
|
94
|
+
|
95
|
+
for module, method, wrapper_factory in methods_to_wrap:
|
96
|
+
try:
|
97
|
+
wrap_function_wrapper(module, method, wrapper_factory(self._tracer))
|
98
|
+
except (AttributeError, ModuleNotFoundError) as e:
|
99
|
+
logger.debug(f"Failed to wrap {method}: {e}")
|
100
|
+
|
101
|
+
def _custom_unwrap(self, **kwargs):
|
102
|
+
"""Remove instrumentation from AG2."""
|
103
|
+
# Unwrap all instrumented methods
|
104
|
+
methods_to_unwrap = [
|
105
|
+
("autogen.agentchat.conversable_agent", "ConversableAgent.__init__"),
|
106
|
+
("autogen.agentchat.conversable_agent", "ConversableAgent.run"),
|
107
|
+
("autogen.agentchat.conversable_agent", "ConversableAgent.initiate_chat"),
|
108
|
+
("autogen.agentchat.conversable_agent", "ConversableAgent.a_initiate_chat"),
|
109
|
+
("autogen.agentchat.conversable_agent", "ConversableAgent._generate_oai_reply_from_client"),
|
110
|
+
("autogen.agentchat.conversable_agent", "ConversableAgent.receive"),
|
111
|
+
("autogen.agentchat.conversable_agent", "ConversableAgent.a_receive"),
|
112
|
+
("autogen.agentchat.groupchat", "GroupChatManager.run_chat"),
|
113
|
+
("autogen.agentchat.groupchat", "GroupChatManager.a_run_chat"),
|
114
|
+
("autogen.agentchat.conversable_agent", "ConversableAgent.execute_function"),
|
115
|
+
("autogen.agentchat.conversable_agent", "ConversableAgent.run_code"),
|
116
|
+
("autogen.agentchat.groupchat", "GroupChat.select_speaker"),
|
117
|
+
]
|
118
|
+
|
119
|
+
try:
|
120
|
+
for module, method in methods_to_unwrap:
|
121
|
+
otel_unwrap(module, method)
|
122
|
+
logger.debug("Successfully uninstrumented AG2")
|
123
|
+
except Exception as e:
|
124
|
+
logger.debug(f"Failed to unwrap AG2 methods: {e}")
|
125
|
+
|
126
|
+
def _set_llm_config_attributes(self, span, llm_config):
|
127
|
+
if not isinstance(llm_config, dict):
|
128
|
+
return
|
129
|
+
|
130
|
+
if "model" in llm_config:
|
131
|
+
span.set_attribute(SpanAttributes.LLM_REQUEST_MODEL, llm_config["model"])
|
132
|
+
|
133
|
+
for param, attr in [
|
134
|
+
("temperature", SpanAttributes.LLM_REQUEST_TEMPERATURE),
|
135
|
+
("top_p", SpanAttributes.LLM_REQUEST_TOP_P),
|
136
|
+
("frequency_penalty", SpanAttributes.LLM_REQUEST_FREQUENCY_PENALTY),
|
137
|
+
("presence_penalty", SpanAttributes.LLM_REQUEST_PRESENCE_PENALTY),
|
138
|
+
]:
|
139
|
+
if param in llm_config and llm_config[param] is not None:
|
140
|
+
span.set_attribute(attr, llm_config[param])
|
141
|
+
|
142
|
+
def _agent_init_wrapper(self, tracer):
|
143
|
+
"""Wrapper for capturing agent initialization."""
|
144
|
+
|
145
|
+
def wrapper(wrapped, instance, args, kwargs):
|
146
|
+
try:
|
147
|
+
name = kwargs.get("name", "unnamed_agent")
|
148
|
+
llm_config = kwargs.get("llm_config", {})
|
149
|
+
|
150
|
+
result = wrapped(*args, **kwargs)
|
151
|
+
|
152
|
+
model = "unknown"
|
153
|
+
if isinstance(llm_config, dict) and llm_config:
|
154
|
+
model = llm_config.get("model", "unknown")
|
155
|
+
|
156
|
+
instance._agentops_metadata = {"name": name, "type": "ConversableAgent", "model": model}
|
157
|
+
|
158
|
+
return result
|
159
|
+
except Exception:
|
160
|
+
return wrapped(*args, **kwargs)
|
161
|
+
|
162
|
+
return wrapper
|
163
|
+
|
164
|
+
def _generate_oai_reply_from_client_wrapper(self, tracer):
|
165
|
+
"""Wrapper for capturing _generate_oai_reply_from_client method calls with token metrics."""
|
166
|
+
|
167
|
+
def wrapper(wrapped, instance, args, kwargs):
|
168
|
+
agent_name = getattr(instance, "name", "unnamed_agent")
|
169
|
+
|
170
|
+
# Get model name from llm_client for span naming
|
171
|
+
llm_client = args[0] if args else kwargs.get("llm_client")
|
172
|
+
|
173
|
+
# Extract model from _config_list
|
174
|
+
model_name = "unknown"
|
175
|
+
if hasattr(llm_client, "_config_list") and llm_client._config_list:
|
176
|
+
if isinstance(llm_client._config_list, list) and len(llm_client._config_list) > 0:
|
177
|
+
config = llm_client._config_list[0]
|
178
|
+
if isinstance(config, dict) and "model" in config:
|
179
|
+
model_name = config["model"]
|
180
|
+
|
181
|
+
span_name = f"{model_name}.llm"
|
182
|
+
|
183
|
+
with create_span(
|
184
|
+
tracer, span_name, kind=SpanKind.CLIENT, attribute_manager=self._attribute_manager
|
185
|
+
) as span:
|
186
|
+
# Set span kind for actual LLM client call
|
187
|
+
span.set_attribute(SpanAttributes.AGENTOPS_SPAN_KIND, AgentOpsSpanKindValues.LLM.value)
|
188
|
+
span.set_attribute(AgentAttributes.AGENT_NAME, agent_name)
|
189
|
+
span.set_attribute(SpanAttributes.AGENTOPS_ENTITY_NAME, "llm")
|
190
|
+
span.set_attribute("llm.client_call", "true")
|
191
|
+
|
192
|
+
# Get messages from args
|
193
|
+
messages = args[1] if len(args) > 1 else kwargs.get("messages", [])
|
194
|
+
|
195
|
+
# Extract input from messages and set gen_ai.prompt
|
196
|
+
if messages and isinstance(messages, list) and len(messages) > 0:
|
197
|
+
# Set gen_ai.prompt array with full conversation history
|
198
|
+
prompt_index = 0
|
199
|
+
for msg in messages:
|
200
|
+
if isinstance(msg, dict) and msg.get("role") in ["user", "assistant", "system"]:
|
201
|
+
role = msg.get("role")
|
202
|
+
content = msg.get("content", "")
|
203
|
+
if content and role:
|
204
|
+
span.set_attribute(
|
205
|
+
f"{SpanAttributes.LLM_PROMPTS}.{prompt_index}.content", self._safe_str(content)
|
206
|
+
)
|
207
|
+
span.set_attribute(f"{SpanAttributes.LLM_PROMPTS}.{prompt_index}.role", role)
|
208
|
+
prompt_index += 1
|
209
|
+
|
210
|
+
# Set entity input to the latest user message (what triggered this LLM call)
|
211
|
+
latest_user_message = None
|
212
|
+
for msg in messages:
|
213
|
+
if isinstance(msg, dict) and msg.get("role") == "user":
|
214
|
+
content = msg.get("content", "")
|
215
|
+
if content:
|
216
|
+
latest_user_message = content
|
217
|
+
|
218
|
+
if latest_user_message:
|
219
|
+
span.set_attribute(SpanAttributes.AGENTOPS_ENTITY_INPUT, self._safe_str(latest_user_message))
|
220
|
+
|
221
|
+
# Call the wrapped method - this is where the actual LLM call happens
|
222
|
+
result = wrapped(*args, **kwargs)
|
223
|
+
|
224
|
+
# Set the output and gen_ai.completion
|
225
|
+
if result:
|
226
|
+
if isinstance(result, dict):
|
227
|
+
content = result.get("content", "")
|
228
|
+
if content:
|
229
|
+
content_str = self._safe_str(content)
|
230
|
+
span.set_attribute(SpanAttributes.AGENTOPS_ENTITY_OUTPUT, content_str)
|
231
|
+
span.set_attribute(f"{SpanAttributes.LLM_COMPLETIONS}.0.content", content_str)
|
232
|
+
span.set_attribute(f"{SpanAttributes.LLM_COMPLETIONS}.0.role", "assistant")
|
233
|
+
|
234
|
+
# If model information is in the result
|
235
|
+
if "model" in result:
|
236
|
+
span.set_attribute(SpanAttributes.LLM_RESPONSE_MODEL, result["model"])
|
237
|
+
elif isinstance(result, str):
|
238
|
+
# Handle string result (which is what AG2 returns)
|
239
|
+
result_str = self._safe_str(result)
|
240
|
+
|
241
|
+
# Set entity output
|
242
|
+
span.set_attribute(SpanAttributes.AGENTOPS_ENTITY_OUTPUT, result_str)
|
243
|
+
|
244
|
+
# Set gen_ai.completion with full content
|
245
|
+
span.set_attribute(f"{SpanAttributes.LLM_COMPLETIONS}.0.content", result_str)
|
246
|
+
span.set_attribute(f"{SpanAttributes.LLM_COMPLETIONS}.0.role", "assistant")
|
247
|
+
|
248
|
+
# Try to get token metrics from the client's usage tracking
|
249
|
+
try:
|
250
|
+
# The OpenAIWrapper tracks usage in actual_usage_summary and total_usage_summary
|
251
|
+
if hasattr(llm_client, "actual_usage_summary") and llm_client.actual_usage_summary:
|
252
|
+
# Get the latest usage
|
253
|
+
for model, usage in llm_client.actual_usage_summary.items():
|
254
|
+
if model != "total_cost" and isinstance(usage, dict):
|
255
|
+
prompt_tokens = usage.get("prompt_tokens", 0)
|
256
|
+
completion_tokens = usage.get("completion_tokens", 0)
|
257
|
+
total_tokens = usage.get("total_tokens", 0)
|
258
|
+
cost = usage.get("cost", 0.0)
|
259
|
+
|
260
|
+
# Set token usage metrics
|
261
|
+
if prompt_tokens > 0:
|
262
|
+
span.set_attribute(SpanAttributes.LLM_USAGE_PROMPT_TOKENS, str(prompt_tokens))
|
263
|
+
if completion_tokens > 0:
|
264
|
+
span.set_attribute(
|
265
|
+
SpanAttributes.LLM_USAGE_COMPLETION_TOKENS, str(completion_tokens)
|
266
|
+
)
|
267
|
+
if total_tokens > 0:
|
268
|
+
span.set_attribute(SpanAttributes.LLM_USAGE_TOTAL_TOKENS, str(total_tokens))
|
269
|
+
if cost > 0:
|
270
|
+
span.set_attribute(SpanAttributes.LLM_USAGE_TOOL_COST, str(cost))
|
271
|
+
|
272
|
+
# Set request/response model
|
273
|
+
span.set_attribute(SpanAttributes.LLM_REQUEST_MODEL, model)
|
274
|
+
span.set_attribute(SpanAttributes.LLM_RESPONSE_MODEL, model)
|
275
|
+
span.set_attribute(SpanAttributes.LLM_SYSTEM, "ag2")
|
276
|
+
|
277
|
+
break # Use the first model's metrics
|
278
|
+
except Exception as e:
|
279
|
+
logger.debug(f"[AG2 DEBUG] Could not extract token metrics: {e}")
|
280
|
+
|
281
|
+
return result
|
282
|
+
|
283
|
+
return wrapper
|
284
|
+
|
285
|
+
def _initiate_chat_wrapper(self, tracer):
|
286
|
+
"""Wrapper for capturing individual chat initiation as a parent span."""
|
287
|
+
|
288
|
+
def wrapper(wrapped, instance, args, kwargs):
|
289
|
+
recipient_agent = args[0] if args else None
|
290
|
+
if not recipient_agent:
|
291
|
+
return wrapped(*args, **kwargs)
|
292
|
+
|
293
|
+
# Get agent names for span identification
|
294
|
+
initiator_name = getattr(instance, "name", "unnamed_initiator")
|
295
|
+
recipient_name = getattr(recipient_agent, "name", "unnamed_agent")
|
296
|
+
|
297
|
+
span_name = f"ag2.chat.{initiator_name}_to_{recipient_name}.workflow"
|
298
|
+
|
299
|
+
with create_span(
|
300
|
+
tracer, span_name, kind=SpanKind.INTERNAL, attribute_manager=self._attribute_manager
|
301
|
+
) as span:
|
302
|
+
# Set span kind as agent for proper categorization
|
303
|
+
span.set_attribute(SpanAttributes.AGENTOPS_SPAN_KIND, AgentOpsSpanKindValues.AGENT.value)
|
304
|
+
span.set_attribute(AgentAttributes.FROM_AGENT, initiator_name)
|
305
|
+
span.set_attribute(AgentAttributes.TO_AGENT, recipient_name)
|
306
|
+
span.set_attribute("agent.type", "individual")
|
307
|
+
span.set_attribute("agent.initiator", initiator_name)
|
308
|
+
span.set_attribute("agent.recipient", recipient_name)
|
309
|
+
|
310
|
+
# Set agentops entity attributes
|
311
|
+
span.set_attribute(SpanAttributes.AGENTOPS_ENTITY_NAME, "agent")
|
312
|
+
|
313
|
+
# Extract initial message
|
314
|
+
initial_message = kwargs.get("message", "")
|
315
|
+
if initial_message:
|
316
|
+
initial_message = self._safe_str(initial_message)
|
317
|
+
span.set_attribute("agent.initial_message", initial_message)
|
318
|
+
# Set entity input
|
319
|
+
span.set_attribute(SpanAttributes.AGENTOPS_ENTITY_INPUT, initial_message)
|
320
|
+
|
321
|
+
# Extract system messages and put them in agent attributes
|
322
|
+
initiator_system_msg = getattr(instance, "system_message", "")
|
323
|
+
if initiator_system_msg:
|
324
|
+
initiator_system_msg = self._safe_str(initiator_system_msg)
|
325
|
+
span.set_attribute("agent.initiator_system_message", initiator_system_msg)
|
326
|
+
|
327
|
+
recipient_system_msg = getattr(recipient_agent, "system_message", "")
|
328
|
+
if recipient_system_msg:
|
329
|
+
recipient_system_msg = self._safe_str(recipient_system_msg)
|
330
|
+
span.set_attribute("agent.system_instruction", recipient_system_msg)
|
331
|
+
# Also set in gen_ai for compatibility
|
332
|
+
span.set_attribute(SpanAttributes.LLM_REQUEST_SYSTEM_INSTRUCTION, recipient_system_msg)
|
333
|
+
|
334
|
+
# Extract LLM config and set gen_ai attributes
|
335
|
+
recipient_llm_config = getattr(recipient_agent, "llm_config", {})
|
336
|
+
|
337
|
+
if isinstance(recipient_llm_config, dict) and recipient_llm_config:
|
338
|
+
model = recipient_llm_config.get("model", "unknown")
|
339
|
+
span.set_attribute(SpanAttributes.LLM_REQUEST_MODEL, model)
|
340
|
+
span.set_attribute(SpanAttributes.LLM_RESPONSE_MODEL, model)
|
341
|
+
span.set_attribute(SpanAttributes.LLM_SYSTEM, "ag2")
|
342
|
+
|
343
|
+
# Also set LLM config attributes
|
344
|
+
self._set_llm_config_attributes(span, recipient_llm_config)
|
345
|
+
|
346
|
+
result = wrapped(*args, **kwargs)
|
347
|
+
|
348
|
+
# Extract chat history after completion
|
349
|
+
self._extract_chat_history(span, instance, recipient_agent)
|
350
|
+
|
351
|
+
return result
|
352
|
+
|
353
|
+
return wrapper
|
354
|
+
|
355
|
+
def _async_initiate_chat_wrapper(self, tracer):
|
356
|
+
"""Wrapper for capturing async individual chat initiation as a parent span."""
|
357
|
+
|
358
|
+
async def wrapper(wrapped, instance, args, kwargs):
|
359
|
+
recipient_agent = args[0] if args else None
|
360
|
+
if not recipient_agent:
|
361
|
+
return await wrapped(*args, **kwargs)
|
362
|
+
|
363
|
+
# Get agent names for span identification
|
364
|
+
initiator_name = getattr(instance, "name", "unnamed_initiator")
|
365
|
+
recipient_name = getattr(recipient_agent, "name", "unnamed_agent")
|
366
|
+
|
367
|
+
span_name = f"ag2.chat.{initiator_name}_to_{recipient_name}.workflow"
|
368
|
+
|
369
|
+
with create_span(
|
370
|
+
tracer, span_name, kind=SpanKind.INTERNAL, attribute_manager=self._attribute_manager
|
371
|
+
) as span:
|
372
|
+
# Set span kind as agent for proper categorization
|
373
|
+
span.set_attribute(SpanAttributes.AGENTOPS_SPAN_KIND, AgentOpsSpanKindValues.AGENT.value)
|
374
|
+
span.set_attribute(AgentAttributes.FROM_AGENT, initiator_name)
|
375
|
+
span.set_attribute(AgentAttributes.TO_AGENT, recipient_name)
|
376
|
+
span.set_attribute("agent.type", "individual_async")
|
377
|
+
span.set_attribute("agent.initiator", initiator_name)
|
378
|
+
span.set_attribute("agent.recipient", recipient_name)
|
379
|
+
span.set_attribute(SpanAttributes.AGENTOPS_ENTITY_NAME, "agent")
|
380
|
+
|
381
|
+
# Extract initial message
|
382
|
+
initial_message = kwargs.get("message", "")
|
383
|
+
if initial_message:
|
384
|
+
initial_message = self._safe_str(initial_message)
|
385
|
+
span.set_attribute("agent.initial_message", initial_message)
|
386
|
+
span.set_attribute(SpanAttributes.AGENTOPS_ENTITY_INPUT, initial_message)
|
387
|
+
|
388
|
+
# Extract system messages
|
389
|
+
recipient_system_msg = getattr(recipient_agent, "system_message", "")
|
390
|
+
if recipient_system_msg:
|
391
|
+
recipient_system_msg = self._safe_str(recipient_system_msg)
|
392
|
+
span.set_attribute("agent.system_instruction", recipient_system_msg)
|
393
|
+
span.set_attribute(SpanAttributes.LLM_REQUEST_SYSTEM_INSTRUCTION, recipient_system_msg)
|
394
|
+
|
395
|
+
# Extract LLM config
|
396
|
+
recipient_llm_config = getattr(recipient_agent, "llm_config", {})
|
397
|
+
if isinstance(recipient_llm_config, dict) and recipient_llm_config:
|
398
|
+
self._set_llm_config_attributes(span, recipient_llm_config)
|
399
|
+
|
400
|
+
result = await wrapped(*args, **kwargs)
|
401
|
+
|
402
|
+
# Extract chat history after completion
|
403
|
+
self._extract_chat_history(span, instance, recipient_agent)
|
404
|
+
|
405
|
+
return result
|
406
|
+
|
407
|
+
return wrapper
|
408
|
+
|
409
|
+
def _receive_wrapper(self, tracer):
|
410
|
+
"""Wrapper for capturing message receive events."""
|
411
|
+
|
412
|
+
def wrapper(wrapped, instance, args, kwargs):
|
413
|
+
agent_name = getattr(instance, "name", "unnamed_agent")
|
414
|
+
span_name = f"ag2.agent.{agent_name}.receive"
|
415
|
+
|
416
|
+
with create_span(
|
417
|
+
tracer, span_name, kind=SpanKind.INTERNAL, attribute_manager=self._attribute_manager
|
418
|
+
) as span:
|
419
|
+
span.set_attribute(SpanAttributes.AGENTOPS_SPAN_KIND, AgentOpsSpanKindValues.AGENT.value)
|
420
|
+
span.set_attribute(AgentAttributes.AGENT_NAME, agent_name)
|
421
|
+
span.set_attribute(SpanAttributes.AGENTOPS_ENTITY_NAME, "agent")
|
422
|
+
|
423
|
+
# Get message and sender
|
424
|
+
message = args[0] if args else kwargs.get("message", "")
|
425
|
+
sender = args[1] if len(args) > 1 else kwargs.get("sender")
|
426
|
+
|
427
|
+
if sender:
|
428
|
+
sender_name = getattr(sender, "name", "unknown")
|
429
|
+
span.set_attribute("agent.sender", sender_name)
|
430
|
+
|
431
|
+
if message:
|
432
|
+
if isinstance(message, dict):
|
433
|
+
content = message.get("content", "")
|
434
|
+
if content:
|
435
|
+
span.set_attribute(SpanAttributes.AGENTOPS_ENTITY_INPUT, self._safe_str(content))
|
436
|
+
elif isinstance(message, str):
|
437
|
+
span.set_attribute(SpanAttributes.AGENTOPS_ENTITY_INPUT, self._safe_str(message))
|
438
|
+
|
439
|
+
result = wrapped(*args, **kwargs)
|
440
|
+
return result
|
441
|
+
|
442
|
+
return wrapper
|
443
|
+
|
444
|
+
def _async_receive_wrapper(self, tracer):
|
445
|
+
"""Wrapper for async capturing message reception."""
|
446
|
+
|
447
|
+
async def wrapper(wrapped, instance, args, kwargs):
|
448
|
+
agent_name = getattr(instance, "name", "unnamed_agent")
|
449
|
+
span_name = f"ag2.agent.{agent_name}.async_receive"
|
450
|
+
|
451
|
+
with create_span(
|
452
|
+
tracer, span_name, kind=SpanKind.INTERNAL, attribute_manager=self._attribute_manager
|
453
|
+
) as span:
|
454
|
+
span.set_attribute(SpanAttributes.AGENTOPS_SPAN_KIND, AgentOpsSpanKindValues.AGENT.value)
|
455
|
+
span.set_attribute(AgentAttributes.AGENT_NAME, agent_name)
|
456
|
+
span.set_attribute(SpanAttributes.AGENTOPS_ENTITY_NAME, "agent")
|
457
|
+
|
458
|
+
# Get message from the first argument
|
459
|
+
message = args[0] if args else None
|
460
|
+
|
461
|
+
# Enhanced message processing
|
462
|
+
if message:
|
463
|
+
if isinstance(message, dict):
|
464
|
+
# Dict message format
|
465
|
+
sender_name = message.get("name", "unknown")
|
466
|
+
content = self._extract_message_content(message)
|
467
|
+
role = message.get("role", "user")
|
468
|
+
|
469
|
+
# Set sender and message attributes
|
470
|
+
span.set_attribute("agent.sender", sender_name)
|
471
|
+
span.set_attribute("message.role", role)
|
472
|
+
|
473
|
+
if content:
|
474
|
+
span.set_attribute(SpanAttributes.AGENTOPS_ENTITY_INPUT, content)
|
475
|
+
span.set_attribute("message.content", content)
|
476
|
+
|
477
|
+
elif isinstance(message, str):
|
478
|
+
# String message format
|
479
|
+
span.set_attribute(SpanAttributes.AGENTOPS_ENTITY_INPUT, message)
|
480
|
+
span.set_attribute("message.content", message)
|
481
|
+
|
482
|
+
# Get sender from the second argument if available
|
483
|
+
sender = args[1] if len(args) > 1 else None
|
484
|
+
if sender and hasattr(sender, "name"):
|
485
|
+
span.set_attribute("agent.sender_name", sender.name)
|
486
|
+
|
487
|
+
return await wrapped(*args, **kwargs)
|
488
|
+
|
489
|
+
return wrapper
|
490
|
+
|
491
|
+
def _async_group_chat_run_wrapper(self, tracer):
|
492
|
+
"""Wrapper for capturing async group chat execution."""
|
493
|
+
|
494
|
+
async def wrapper(wrapped, instance, args, kwargs):
|
495
|
+
with create_span(
|
496
|
+
tracer,
|
497
|
+
"ag2.groupchat.run.task.async",
|
498
|
+
kind=SpanKind.INTERNAL,
|
499
|
+
attribute_manager=self._attribute_manager,
|
500
|
+
) as span:
|
501
|
+
group_chat = getattr(instance, "groupchat", None)
|
502
|
+
agents = getattr(group_chat, "agents", []) if group_chat else []
|
503
|
+
agent_names = [getattr(agent, "name", f"agent_{i}") for i, agent in enumerate(agents)]
|
504
|
+
|
505
|
+
span.set_attribute(AgentAttributes.AGENT_ROLE, "GroupChatManager")
|
506
|
+
span.set_attribute(AgentAttributes.AGENT_NAME, getattr(instance, "name", "unnamed_manager"))
|
507
|
+
span.set_attribute("groupchat.agents", ", ".join(agent_names))
|
508
|
+
span.set_attribute("groupchat.agent_count", len(agents))
|
509
|
+
|
510
|
+
# Capture input message if available
|
511
|
+
message = kwargs.get("message", "")
|
512
|
+
if message:
|
513
|
+
content_to_set = self._extract_message_content(message)
|
514
|
+
span.set_attribute("groupchat.input_message", content_to_set)
|
515
|
+
|
516
|
+
result = await wrapped(*args, **kwargs)
|
517
|
+
self._capture_group_chat_summary(span, instance, result)
|
518
|
+
return result
|
519
|
+
|
520
|
+
return wrapper
|
521
|
+
|
522
|
+
def _agent_run_wrapper_with_context(self, tracer):
|
523
|
+
"""Wrapper for capturing agent run with context propagation and proper span lifecycle."""
|
524
|
+
|
525
|
+
def wrapper(wrapped, instance, args, kwargs):
|
526
|
+
agent_name = getattr(instance, "name", "unnamed_agent")
|
527
|
+
agent_type = getattr(instance, "_agentops_metadata", {}).get("type", "ConversableAgent")
|
528
|
+
span_name = f"ag2.agent.{agent_name}.run.workflow"
|
529
|
+
|
530
|
+
with create_span(
|
531
|
+
tracer, span_name, kind=SpanKind.INTERNAL, attribute_manager=self._attribute_manager
|
532
|
+
) as span:
|
533
|
+
model = getattr(instance, "_agentops_metadata", {}).get("model", "unknown")
|
534
|
+
|
535
|
+
span.set_attribute(AgentAttributes.AGENT_NAME, agent_name)
|
536
|
+
span.set_attribute(AgentAttributes.AGENT_ROLE, agent_type)
|
537
|
+
span.set_attribute(SpanAttributes.LLM_REQUEST_MODEL, model)
|
538
|
+
|
539
|
+
llm_config = getattr(instance, "llm_config", None)
|
540
|
+
self._set_llm_config_attributes(span, llm_config)
|
541
|
+
|
542
|
+
# Capture input message if available
|
543
|
+
message = kwargs.get("message", "")
|
544
|
+
if message:
|
545
|
+
content_to_set = self._extract_message_content(message)
|
546
|
+
span.set_attribute("agent.run.input_message", content_to_set)
|
547
|
+
|
548
|
+
# Capture BOTH contextvars and OpenTelemetry context
|
549
|
+
ctx = contextvars.copy_context()
|
550
|
+
current_otel_context = otel_context.get_current()
|
551
|
+
|
552
|
+
# Thread tracking for proper span lifecycle
|
553
|
+
active_threads = []
|
554
|
+
|
555
|
+
# Store the original Thread.__init__ and start methods
|
556
|
+
original_thread_init = threading.Thread.__init__
|
557
|
+
original_thread_start = threading.Thread.start
|
558
|
+
|
559
|
+
def context_aware_init(self, group=None, target=None, name=None, args=(), kwargs=None, *, daemon=None):
|
560
|
+
"""Modified Thread.__init__ that wraps the target to run in both captured contexts."""
|
561
|
+
if kwargs is None:
|
562
|
+
kwargs = {}
|
563
|
+
if target and callable(target):
|
564
|
+
original_target = target
|
565
|
+
|
566
|
+
def wrapped_target(*target_args, **target_kwargs):
|
567
|
+
# Run in both contextvars AND OpenTelemetry context
|
568
|
+
def run_with_otel_context():
|
569
|
+
# Attach the OpenTelemetry context in the thread
|
570
|
+
token = otel_context.attach(current_otel_context)
|
571
|
+
try:
|
572
|
+
return original_target(*target_args, **target_kwargs)
|
573
|
+
finally:
|
574
|
+
otel_context.detach(token)
|
575
|
+
|
576
|
+
# Run with contextvars context
|
577
|
+
return ctx.run(run_with_otel_context)
|
578
|
+
|
579
|
+
target = wrapped_target
|
580
|
+
|
581
|
+
# Keep original daemon setting but ensure conversations don't run indefinitely
|
582
|
+
# If daemon was not explicitly set, default to False (AG2's normal behavior)
|
583
|
+
if daemon is None:
|
584
|
+
daemon = False
|
585
|
+
|
586
|
+
original_thread_init(
|
587
|
+
self, group=group, target=target, name=name, args=args, kwargs=kwargs, daemon=daemon
|
588
|
+
)
|
589
|
+
|
590
|
+
def context_aware_start(self):
|
591
|
+
"""Modified Thread.start that tracks the thread."""
|
592
|
+
active_threads.append(self)
|
593
|
+
return original_thread_start(self)
|
594
|
+
|
595
|
+
# Temporarily patch Thread.__init__ and start just for this run() call
|
596
|
+
threading.Thread.__init__ = context_aware_init
|
597
|
+
threading.Thread.start = context_aware_start
|
598
|
+
try:
|
599
|
+
response = wrapped(*args, **kwargs)
|
600
|
+
except Exception as e:
|
601
|
+
logger.error(f"[AG2 DEBUG] Error in agent.run execution: {e}")
|
602
|
+
raise
|
603
|
+
finally:
|
604
|
+
# Always restore the original Thread methods
|
605
|
+
try:
|
606
|
+
threading.Thread.__init__ = original_thread_init
|
607
|
+
threading.Thread.start = original_thread_start
|
608
|
+
except Exception as e:
|
609
|
+
logger.error(f"[AG2 DEBUG] Error restoring Thread methods: {e}")
|
610
|
+
# Force restore
|
611
|
+
threading.Thread.__init__ = (
|
612
|
+
threading.Thread.__init__.__wrapped__
|
613
|
+
if hasattr(threading.Thread.__init__, "__wrapped__")
|
614
|
+
else original_thread_init
|
615
|
+
)
|
616
|
+
threading.Thread.start = (
|
617
|
+
threading.Thread.start.__wrapped__
|
618
|
+
if hasattr(threading.Thread.start, "__wrapped__")
|
619
|
+
else original_thread_start
|
620
|
+
)
|
621
|
+
|
622
|
+
# Try to get final results from response if available
|
623
|
+
try:
|
624
|
+
if hasattr(response, "get_chat_results"):
|
625
|
+
chat_results = response.get_chat_results()
|
626
|
+
if chat_results:
|
627
|
+
self._capture_conversation_summary(span, instance, chat_results)
|
628
|
+
elif hasattr(response, "chat_history"):
|
629
|
+
self._capture_conversation_summary(span, instance, response)
|
630
|
+
elif hasattr(response, "get") and callable(response.get):
|
631
|
+
model_info = response.get("model", "")
|
632
|
+
if model_info:
|
633
|
+
span.set_attribute(SpanAttributes.LLM_RESPONSE_MODEL, str(model_info))
|
634
|
+
except Exception as e:
|
635
|
+
logger.debug(f"[AG2 DEBUG] Could not extract final results: {e}")
|
636
|
+
|
637
|
+
span.set_attribute(WorkflowAttributes.WORKFLOW_STEP_STATUS, "completed")
|
638
|
+
return response
|
639
|
+
|
640
|
+
return wrapper
|
641
|
+
|
642
|
+
def _group_chat_run_wrapper(self, tracer):
|
643
|
+
"""Wrapper for capturing group chat execution."""
|
644
|
+
|
645
|
+
def wrapper(wrapped, instance, args, kwargs):
|
646
|
+
with create_span(
|
647
|
+
tracer, "ag2.groupchat.run.task", kind=SpanKind.INTERNAL, attribute_manager=self._attribute_manager
|
648
|
+
) as span:
|
649
|
+
group_chat = getattr(instance, "groupchat", None)
|
650
|
+
agents = getattr(group_chat, "agents", []) if group_chat else []
|
651
|
+
agent_names = [getattr(agent, "name", f"agent_{i}") for i, agent in enumerate(agents)]
|
652
|
+
|
653
|
+
span.set_attribute(AgentAttributes.AGENT_ROLE, "GroupChatManager")
|
654
|
+
span.set_attribute(AgentAttributes.AGENT_NAME, getattr(instance, "name", "unnamed_manager"))
|
655
|
+
span.set_attribute("groupchat.agents", ", ".join(agent_names))
|
656
|
+
span.set_attribute("groupchat.agent_count", len(agents))
|
657
|
+
|
658
|
+
# Capture input message if available
|
659
|
+
message = kwargs.get("message", "")
|
660
|
+
if message:
|
661
|
+
content_to_set = self._extract_message_content(message)
|
662
|
+
span.set_attribute("groupchat.input_message", content_to_set)
|
663
|
+
|
664
|
+
result = wrapped(*args, **kwargs)
|
665
|
+
self._capture_group_chat_summary(span, instance, result)
|
666
|
+
return result
|
667
|
+
|
668
|
+
return wrapper
|
669
|
+
|
670
|
+
def _tool_execution_wrapper(self, tracer, tool_type):
|
671
|
+
"""Wrapper for capturing tool execution."""
|
672
|
+
|
673
|
+
def wrapper(wrapped, instance, args, kwargs):
|
674
|
+
span_name = f"ag2.tool.{tool_type}.tool_usage"
|
675
|
+
|
676
|
+
with create_span(
|
677
|
+
tracer, span_name, kind=SpanKind.CLIENT, attribute_manager=self._attribute_manager
|
678
|
+
) as span:
|
679
|
+
# Set span kind and type as tool for proper categorization
|
680
|
+
span.set_attribute(SpanAttributes.AGENTOPS_SPAN_KIND, AgentOpsSpanKindValues.TOOL.value)
|
681
|
+
agent_name = getattr(instance, "name", "unnamed_agent")
|
682
|
+
span.set_attribute(AgentAttributes.AGENT_NAME, agent_name)
|
683
|
+
span.set_attribute(ToolAttributes.TOOL_NAME, tool_type)
|
684
|
+
span.set_attribute(SpanAttributes.AGENTOPS_ENTITY_NAME, "tool")
|
685
|
+
|
686
|
+
if tool_type == "function" and args:
|
687
|
+
func_call = args[0]
|
688
|
+
if isinstance(func_call, dict):
|
689
|
+
span.set_attribute(
|
690
|
+
MessageAttributes.TOOL_CALL_NAME.format(i=0), func_call.get("name", "unknown")
|
691
|
+
)
|
692
|
+
if "arguments" in func_call:
|
693
|
+
try:
|
694
|
+
span.set_attribute(
|
695
|
+
MessageAttributes.TOOL_CALL_ARGUMENTS.format(i=0),
|
696
|
+
json.dumps(func_call["arguments"]),
|
697
|
+
)
|
698
|
+
except:
|
699
|
+
pass
|
700
|
+
|
701
|
+
elif tool_type == "code" and args:
|
702
|
+
code = args[0]
|
703
|
+
if isinstance(code, str):
|
704
|
+
span.set_attribute("tool.code.size", len(code))
|
705
|
+
span.set_attribute("tool.code.language", kwargs.get("lang", "unknown"))
|
706
|
+
|
707
|
+
result = wrapped(*args, **kwargs)
|
708
|
+
|
709
|
+
self._process_tool_result(span, result, tool_type)
|
710
|
+
|
711
|
+
return result
|
712
|
+
|
713
|
+
return wrapper
|
714
|
+
|
715
|
+
def _group_chat_select_speaker_wrapper(self, tracer):
|
716
|
+
"""Wrapper for capturing which agent is selected to speak in a group chat."""
|
717
|
+
|
718
|
+
def wrapper(wrapped, instance, args, kwargs):
|
719
|
+
previous_speaker_name = "unknown"
|
720
|
+
messages = getattr(instance, "messages", [])
|
721
|
+
if messages and len(messages) > 0:
|
722
|
+
previous_speaker_name = messages[-1].get("name", "unknown")
|
723
|
+
|
724
|
+
selected_speaker = wrapped(*args, **kwargs)
|
725
|
+
|
726
|
+
if not selected_speaker:
|
727
|
+
return selected_speaker
|
728
|
+
|
729
|
+
current_speaker_name = getattr(selected_speaker, "name", "unnamed")
|
730
|
+
|
731
|
+
with create_span(
|
732
|
+
tracer, "ag2.handoff", kind=SpanKind.INTERNAL, attribute_manager=self._attribute_manager
|
733
|
+
) as span:
|
734
|
+
span.set_attribute(AgentAttributes.FROM_AGENT, previous_speaker_name)
|
735
|
+
span.set_attribute(AgentAttributes.TO_AGENT, current_speaker_name)
|
736
|
+
span.set_attribute(AgentAttributes.AGENT_NAME, current_speaker_name)
|
737
|
+
span.set_attribute(AgentAttributes.AGENT_ROLE, selected_speaker.__class__.__name__)
|
738
|
+
|
739
|
+
system_message = getattr(selected_speaker, "system_message", "")
|
740
|
+
if system_message:
|
741
|
+
system_message = self._safe_str(system_message)
|
742
|
+
span.set_attribute(SpanAttributes.LLM_REQUEST_SYSTEM_INSTRUCTION, system_message)
|
743
|
+
|
744
|
+
self._set_llm_config_attributes(span, getattr(selected_speaker, "llm_config", None))
|
745
|
+
|
746
|
+
if messages:
|
747
|
+
for msg in reversed(messages):
|
748
|
+
if msg.get("name") == current_speaker_name:
|
749
|
+
if "metadata" in msg and isinstance(msg["metadata"], dict):
|
750
|
+
meta = msg["metadata"]
|
751
|
+
if "model" in meta:
|
752
|
+
span.set_attribute(SpanAttributes.LLM_RESPONSE_MODEL, meta["model"])
|
753
|
+
break
|
754
|
+
|
755
|
+
span.set_attribute("groupchat.role", "participant")
|
756
|
+
|
757
|
+
return selected_speaker
|
758
|
+
|
759
|
+
return wrapper
|
760
|
+
|
761
|
+
# Helper methods
|
762
|
+
def _safe_str(self, value):
|
763
|
+
"""Safely convert value to string."""
|
764
|
+
if value is None:
|
765
|
+
return ""
|
766
|
+
return str(value) if not isinstance(value, str) else value
|
767
|
+
|
768
|
+
def _extract_message_content(self, message):
|
769
|
+
"""Extract content from various message formats."""
|
770
|
+
if isinstance(message, dict):
|
771
|
+
content = message.get("content", "")
|
772
|
+
return self._safe_str(content)
|
773
|
+
elif isinstance(message, str):
|
774
|
+
return message
|
775
|
+
else:
|
776
|
+
return str(message)
|
777
|
+
|
778
|
+
def _extract_chat_history(self, span, initiator, recipient):
|
779
|
+
"""Extract chat history information."""
|
780
|
+
try:
|
781
|
+
# Get recipient chat history
|
782
|
+
recipient_chat_history = getattr(recipient, "chat_history", [])
|
783
|
+
|
784
|
+
if recipient_chat_history:
|
785
|
+
message_count = len(recipient_chat_history)
|
786
|
+
span.set_attribute("conversation.message_count", message_count)
|
787
|
+
|
788
|
+
# Record sample of conversation messages
|
789
|
+
if message_count > 0:
|
790
|
+
first_msg = recipient_chat_history[0]
|
791
|
+
last_msg = recipient_chat_history[-1]
|
792
|
+
|
793
|
+
self._set_message_attributes(span, first_msg, 0, "prompt")
|
794
|
+
self._set_message_attributes(span, last_msg, 0, "completion")
|
795
|
+
|
796
|
+
# Check for tool usage
|
797
|
+
span.set_attribute("chat.used_tools", "tool_calls" in last_msg)
|
798
|
+
|
799
|
+
# Capture metadata
|
800
|
+
if "metadata" in last_msg and isinstance(last_msg["metadata"], dict):
|
801
|
+
meta = last_msg["metadata"]
|
802
|
+
if "model" in meta:
|
803
|
+
span.set_attribute(SpanAttributes.LLM_RESPONSE_MODEL, meta["model"])
|
804
|
+
|
805
|
+
except Exception as e:
|
806
|
+
logger.debug(f"Could not extract chat history: {e}")
|
807
|
+
|
808
|
+
def _set_message_attributes(self, span, message, index, prefix):
|
809
|
+
"""Set message attributes on span."""
|
810
|
+
if isinstance(message, dict):
|
811
|
+
role = message.get("role", "unknown")
|
812
|
+
content = message.get("content", "")
|
813
|
+
name = message.get("name", "unknown")
|
814
|
+
|
815
|
+
span.set_attribute(f"messaging.{prefix}.role.{index}", role)
|
816
|
+
content = self._safe_str(content)
|
817
|
+
span.set_attribute(f"messaging.{prefix}.content.{index}", content)
|
818
|
+
span.set_attribute(f"messaging.{prefix}.speaker.{index}", name)
|
819
|
+
|
820
|
+
def _process_tool_result(self, span, result, tool_type):
|
821
|
+
"""Process and set tool execution result attributes."""
|
822
|
+
if tool_type == "function" and isinstance(result, tuple) and len(result) > 0:
|
823
|
+
success = result[0] if isinstance(result[0], bool) else False
|
824
|
+
span.set_attribute(ToolAttributes.TOOL_STATUS, "success" if success else "error")
|
825
|
+
|
826
|
+
if len(result) > 1 and isinstance(result[1], dict):
|
827
|
+
try:
|
828
|
+
span.set_attribute(ToolAttributes.TOOL_RESULT, json.dumps(result[1]))
|
829
|
+
except:
|
830
|
+
pass
|
831
|
+
|
832
|
+
if tool_type == "code" and isinstance(result, tuple) and len(result) >= 3:
|
833
|
+
exit_code = result[0]
|
834
|
+
span.set_attribute("exit_code", exit_code)
|
835
|
+
span.set_attribute(ToolAttributes.TOOL_STATUS, "success" if exit_code == 0 else "error")
|
836
|
+
|
837
|
+
if len(result) > 1 and result[1]:
|
838
|
+
stdout = self._safe_str(result[1])
|
839
|
+
span.set_attribute("tool.code.stdout", stdout)
|
840
|
+
|
841
|
+
if len(result) > 2 and result[2]:
|
842
|
+
stderr = self._safe_str(result[2])
|
843
|
+
span.set_attribute("tool.code.stderr", stderr)
|
844
|
+
|
845
|
+
def _capture_conversation_summary(self, span, agent, response):
|
846
|
+
"""Extract and record conversation summary data."""
|
847
|
+
if not hasattr(response, "chat_history"):
|
848
|
+
return
|
849
|
+
|
850
|
+
try:
|
851
|
+
chat_history = getattr(response, "chat_history", [])
|
852
|
+
message_count = len(chat_history)
|
853
|
+
user_messages = sum(1 for msg in chat_history if msg.get("role") == "user")
|
854
|
+
assistant_messages = sum(1 for msg in chat_history if msg.get("role") == "assistant")
|
855
|
+
|
856
|
+
span.set_attribute("conversation.message_count", message_count)
|
857
|
+
span.set_attribute("conversation.user_messages", user_messages)
|
858
|
+
span.set_attribute("conversation.assistant_messages", assistant_messages)
|
859
|
+
|
860
|
+
# Set prompts and completions
|
861
|
+
span.set_attribute(SpanAttributes.LLM_PROMPTS, user_messages)
|
862
|
+
span.set_attribute(SpanAttributes.LLM_COMPLETIONS, assistant_messages)
|
863
|
+
if message_count > 0:
|
864
|
+
for i, msg in enumerate(chat_history[: min(2, message_count)]):
|
865
|
+
self._set_message_attributes(span, msg, i, "prompt")
|
866
|
+
|
867
|
+
if message_count > 2:
|
868
|
+
self._set_message_attributes(span, chat_history[-1], 0, "completion")
|
869
|
+
except Exception as e:
|
870
|
+
logger.error(f"[AG2 DEBUG] Error capturing conversation summary: {e}")
|
871
|
+
|
872
|
+
def _capture_group_chat_summary(self, span, manager, result):
|
873
|
+
"""Extract and record group chat summary data."""
|
874
|
+
try:
|
875
|
+
messages = getattr(manager.groupchat, "messages", [])
|
876
|
+
message_count = len(messages)
|
877
|
+
|
878
|
+
agent_message_counts = {}
|
879
|
+
for message in messages:
|
880
|
+
agent_name = message.get("name", "unknown")
|
881
|
+
if agent_name not in agent_message_counts:
|
882
|
+
agent_message_counts[agent_name] = 0
|
883
|
+
agent_message_counts[agent_name] += 1
|
884
|
+
|
885
|
+
span.set_attribute("conversation.message_count", message_count)
|
886
|
+
|
887
|
+
for agent_name, count in agent_message_counts.items():
|
888
|
+
span.set_attribute(f"conversation.agent_messages.{agent_name}", count)
|
889
|
+
|
890
|
+
if hasattr(manager.groupchat, "speaker_selection_method"):
|
891
|
+
span.set_attribute(
|
892
|
+
"groupchat.speaker_selection_method", str(manager.groupchat.speaker_selection_method)
|
893
|
+
)
|
894
|
+
|
895
|
+
if message_count > 0:
|
896
|
+
for i, msg in enumerate(messages[: min(2, message_count)]):
|
897
|
+
role = msg.get("role", "unknown")
|
898
|
+
content = msg.get("content", "")
|
899
|
+
name = msg.get("name", "unknown")
|
900
|
+
|
901
|
+
span.set_attribute(MessageAttributes.PROMPT_ROLE.format(i=i), role)
|
902
|
+
content = self._safe_str(content)
|
903
|
+
span.set_attribute(MessageAttributes.PROMPT_CONTENT.format(i=i), content)
|
904
|
+
span.set_attribute(MessageAttributes.PROMPT_SPEAKER.format(i=i), name)
|
905
|
+
|
906
|
+
if message_count > 2:
|
907
|
+
last_msg = messages[-1]
|
908
|
+
role = last_msg.get("role", "unknown")
|
909
|
+
content = last_msg.get("content", "")
|
910
|
+
name = last_msg.get("name", "unknown")
|
911
|
+
|
912
|
+
span.set_attribute(MessageAttributes.COMPLETION_ROLE.format(i=0), role)
|
913
|
+
content = self._safe_str(content)
|
914
|
+
span.set_attribute(MessageAttributes.COMPLETION_CONTENT.format(i=0), content)
|
915
|
+
span.set_attribute(MessageAttributes.COMPLETION_SPEAKER.format(i=0), name)
|
916
|
+
|
917
|
+
if "metadata" in last_msg and isinstance(last_msg["metadata"], dict):
|
918
|
+
meta = last_msg["metadata"]
|
919
|
+
if "model" in meta:
|
920
|
+
span.set_attribute(SpanAttributes.LLM_RESPONSE_MODEL, meta["model"])
|
921
|
+
except Exception as e:
|
922
|
+
logger.error(f"Error capturing group chat summary: {e}")
|