mseep-agentops 0.4.18__py3-none-any.whl → 0.4.22__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agentops/__init__.py +0 -0
- agentops/client/api/base.py +28 -30
- agentops/client/api/versions/v3.py +29 -25
- agentops/client/api/versions/v4.py +87 -46
- agentops/client/client.py +98 -29
- agentops/client/http/README.md +87 -0
- agentops/client/http/http_client.py +126 -172
- agentops/config.py +8 -2
- agentops/instrumentation/OpenTelemetry.md +133 -0
- agentops/instrumentation/README.md +167 -0
- agentops/instrumentation/__init__.py +13 -1
- agentops/instrumentation/agentic/ag2/__init__.py +18 -0
- agentops/instrumentation/agentic/ag2/instrumentor.py +922 -0
- agentops/instrumentation/agentic/agno/__init__.py +19 -0
- agentops/instrumentation/agentic/agno/attributes/__init__.py +20 -0
- agentops/instrumentation/agentic/agno/attributes/agent.py +250 -0
- agentops/instrumentation/agentic/agno/attributes/metrics.py +214 -0
- agentops/instrumentation/agentic/agno/attributes/storage.py +158 -0
- agentops/instrumentation/agentic/agno/attributes/team.py +195 -0
- agentops/instrumentation/agentic/agno/attributes/tool.py +210 -0
- agentops/instrumentation/agentic/agno/attributes/workflow.py +254 -0
- agentops/instrumentation/agentic/agno/instrumentor.py +1313 -0
- agentops/instrumentation/agentic/crewai/LICENSE +201 -0
- agentops/instrumentation/agentic/crewai/NOTICE.md +10 -0
- agentops/instrumentation/agentic/crewai/__init__.py +6 -0
- agentops/instrumentation/agentic/crewai/crewai_span_attributes.py +335 -0
- agentops/instrumentation/agentic/crewai/instrumentation.py +535 -0
- agentops/instrumentation/agentic/crewai/version.py +1 -0
- agentops/instrumentation/agentic/google_adk/__init__.py +19 -0
- agentops/instrumentation/agentic/google_adk/instrumentor.py +68 -0
- agentops/instrumentation/agentic/google_adk/patch.py +767 -0
- agentops/instrumentation/agentic/haystack/__init__.py +1 -0
- agentops/instrumentation/agentic/haystack/instrumentor.py +186 -0
- agentops/instrumentation/agentic/langgraph/__init__.py +3 -0
- agentops/instrumentation/agentic/langgraph/attributes.py +54 -0
- agentops/instrumentation/agentic/langgraph/instrumentation.py +598 -0
- agentops/instrumentation/agentic/langgraph/version.py +1 -0
- agentops/instrumentation/agentic/openai_agents/README.md +156 -0
- agentops/instrumentation/agentic/openai_agents/SPANS.md +145 -0
- agentops/instrumentation/agentic/openai_agents/TRACING_API.md +144 -0
- agentops/instrumentation/agentic/openai_agents/__init__.py +30 -0
- agentops/instrumentation/agentic/openai_agents/attributes/common.py +549 -0
- agentops/instrumentation/agentic/openai_agents/attributes/completion.py +172 -0
- agentops/instrumentation/agentic/openai_agents/attributes/model.py +58 -0
- agentops/instrumentation/agentic/openai_agents/attributes/tokens.py +275 -0
- agentops/instrumentation/agentic/openai_agents/exporter.py +469 -0
- agentops/instrumentation/agentic/openai_agents/instrumentor.py +107 -0
- agentops/instrumentation/agentic/openai_agents/processor.py +58 -0
- agentops/instrumentation/agentic/smolagents/README.md +88 -0
- agentops/instrumentation/agentic/smolagents/__init__.py +12 -0
- agentops/instrumentation/agentic/smolagents/attributes/agent.py +354 -0
- agentops/instrumentation/agentic/smolagents/attributes/model.py +205 -0
- agentops/instrumentation/agentic/smolagents/instrumentor.py +286 -0
- agentops/instrumentation/agentic/smolagents/stream_wrapper.py +258 -0
- agentops/instrumentation/agentic/xpander/__init__.py +15 -0
- agentops/instrumentation/agentic/xpander/context.py +112 -0
- agentops/instrumentation/agentic/xpander/instrumentor.py +877 -0
- agentops/instrumentation/agentic/xpander/trace_probe.py +86 -0
- agentops/instrumentation/agentic/xpander/version.py +3 -0
- agentops/instrumentation/common/README.md +65 -0
- agentops/instrumentation/common/attributes.py +1 -2
- agentops/instrumentation/providers/anthropic/__init__.py +24 -0
- agentops/instrumentation/providers/anthropic/attributes/__init__.py +23 -0
- agentops/instrumentation/providers/anthropic/attributes/common.py +64 -0
- agentops/instrumentation/providers/anthropic/attributes/message.py +541 -0
- agentops/instrumentation/providers/anthropic/attributes/tools.py +231 -0
- agentops/instrumentation/providers/anthropic/event_handler_wrapper.py +90 -0
- agentops/instrumentation/providers/anthropic/instrumentor.py +146 -0
- agentops/instrumentation/providers/anthropic/stream_wrapper.py +436 -0
- agentops/instrumentation/providers/google_genai/README.md +33 -0
- agentops/instrumentation/providers/google_genai/__init__.py +24 -0
- agentops/instrumentation/providers/google_genai/attributes/__init__.py +25 -0
- agentops/instrumentation/providers/google_genai/attributes/chat.py +125 -0
- agentops/instrumentation/providers/google_genai/attributes/common.py +88 -0
- agentops/instrumentation/providers/google_genai/attributes/model.py +284 -0
- agentops/instrumentation/providers/google_genai/instrumentor.py +170 -0
- agentops/instrumentation/providers/google_genai/stream_wrapper.py +238 -0
- agentops/instrumentation/providers/ibm_watsonx_ai/__init__.py +28 -0
- agentops/instrumentation/providers/ibm_watsonx_ai/attributes/__init__.py +27 -0
- agentops/instrumentation/providers/ibm_watsonx_ai/attributes/attributes.py +277 -0
- agentops/instrumentation/providers/ibm_watsonx_ai/attributes/common.py +104 -0
- agentops/instrumentation/providers/ibm_watsonx_ai/instrumentor.py +162 -0
- agentops/instrumentation/providers/ibm_watsonx_ai/stream_wrapper.py +302 -0
- agentops/instrumentation/providers/mem0/__init__.py +45 -0
- agentops/instrumentation/providers/mem0/common.py +377 -0
- agentops/instrumentation/providers/mem0/instrumentor.py +270 -0
- agentops/instrumentation/providers/mem0/memory.py +430 -0
- agentops/instrumentation/providers/openai/__init__.py +21 -0
- agentops/instrumentation/providers/openai/attributes/__init__.py +7 -0
- agentops/instrumentation/providers/openai/attributes/common.py +55 -0
- agentops/instrumentation/providers/openai/attributes/response.py +607 -0
- agentops/instrumentation/providers/openai/config.py +36 -0
- agentops/instrumentation/providers/openai/instrumentor.py +312 -0
- agentops/instrumentation/providers/openai/stream_wrapper.py +941 -0
- agentops/instrumentation/providers/openai/utils.py +44 -0
- agentops/instrumentation/providers/openai/v0.py +176 -0
- agentops/instrumentation/providers/openai/v0_wrappers.py +483 -0
- agentops/instrumentation/providers/openai/wrappers/__init__.py +30 -0
- agentops/instrumentation/providers/openai/wrappers/assistant.py +277 -0
- agentops/instrumentation/providers/openai/wrappers/chat.py +259 -0
- agentops/instrumentation/providers/openai/wrappers/completion.py +109 -0
- agentops/instrumentation/providers/openai/wrappers/embeddings.py +94 -0
- agentops/instrumentation/providers/openai/wrappers/image_gen.py +75 -0
- agentops/instrumentation/providers/openai/wrappers/responses.py +191 -0
- agentops/instrumentation/providers/openai/wrappers/shared.py +81 -0
- agentops/instrumentation/utilities/concurrent_futures/__init__.py +10 -0
- agentops/instrumentation/utilities/concurrent_futures/instrumentation.py +206 -0
- agentops/integration/callbacks/dspy/__init__.py +11 -0
- agentops/integration/callbacks/dspy/callback.py +471 -0
- agentops/integration/callbacks/langchain/README.md +59 -0
- agentops/integration/callbacks/langchain/__init__.py +15 -0
- agentops/integration/callbacks/langchain/callback.py +791 -0
- agentops/integration/callbacks/langchain/utils.py +54 -0
- agentops/legacy/crewai.md +121 -0
- agentops/logging/instrument_logging.py +4 -0
- agentops/sdk/README.md +220 -0
- agentops/sdk/core.py +75 -32
- agentops/sdk/descriptors/classproperty.py +28 -0
- agentops/sdk/exporters.py +152 -33
- agentops/semconv/README.md +125 -0
- agentops/semconv/span_kinds.py +0 -2
- agentops/validation.py +102 -63
- {mseep_agentops-0.4.18.dist-info → mseep_agentops-0.4.22.dist-info}/METADATA +30 -40
- mseep_agentops-0.4.22.dist-info/RECORD +178 -0
- {mseep_agentops-0.4.18.dist-info → mseep_agentops-0.4.22.dist-info}/WHEEL +1 -2
- mseep_agentops-0.4.18.dist-info/RECORD +0 -94
- mseep_agentops-0.4.18.dist-info/top_level.txt +0 -2
- tests/conftest.py +0 -10
- tests/unit/client/__init__.py +0 -1
- tests/unit/client/test_http_adapter.py +0 -221
- tests/unit/client/test_http_client.py +0 -206
- tests/unit/conftest.py +0 -54
- tests/unit/sdk/__init__.py +0 -1
- tests/unit/sdk/instrumentation_tester.py +0 -207
- tests/unit/sdk/test_attributes.py +0 -392
- tests/unit/sdk/test_concurrent_instrumentation.py +0 -468
- tests/unit/sdk/test_decorators.py +0 -763
- tests/unit/sdk/test_exporters.py +0 -241
- tests/unit/sdk/test_factory.py +0 -1188
- tests/unit/sdk/test_internal_span_processor.py +0 -397
- tests/unit/sdk/test_resource_attributes.py +0 -35
- tests/unit/test_config.py +0 -82
- tests/unit/test_context_manager.py +0 -777
- tests/unit/test_events.py +0 -27
- tests/unit/test_host_env.py +0 -54
- tests/unit/test_init_py.py +0 -501
- tests/unit/test_serialization.py +0 -433
- tests/unit/test_session.py +0 -676
- tests/unit/test_user_agent.py +0 -34
- tests/unit/test_validation.py +0 -405
- {tests → agentops/instrumentation/agentic/openai_agents/attributes}/__init__.py +0 -0
- /tests/unit/__init__.py → /agentops/instrumentation/providers/openai/attributes/tools.py +0 -0
- {mseep_agentops-0.4.18.dist-info → mseep_agentops-0.4.22.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,598 @@
|
|
1
|
+
from functools import wraps
|
2
|
+
from typing import Any, Callable, Collection, Dict, Optional, Tuple
|
3
|
+
import json
|
4
|
+
import inspect
|
5
|
+
|
6
|
+
from opentelemetry import trace
|
7
|
+
from opentelemetry.trace import SpanKind, Status, StatusCode, get_tracer
|
8
|
+
from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
|
9
|
+
from opentelemetry.instrumentation.utils import unwrap
|
10
|
+
from wrapt import wrap_function_wrapper
|
11
|
+
|
12
|
+
from agentops.semconv import (
|
13
|
+
SpanAttributes,
|
14
|
+
WorkflowAttributes,
|
15
|
+
MessageAttributes,
|
16
|
+
)
|
17
|
+
from .attributes import (
|
18
|
+
ensure_no_none_values,
|
19
|
+
set_graph_attributes,
|
20
|
+
extract_messages_from_input,
|
21
|
+
get_message_content,
|
22
|
+
get_message_role,
|
23
|
+
)
|
24
|
+
|
25
|
+
import sys
|
26
|
+
|
27
|
+
if "typing_extensions" not in sys.modules:
|
28
|
+
from unittest import mock
|
29
|
+
|
30
|
+
sys.modules["typing_extensions"] = mock.MagicMock()
|
31
|
+
|
32
|
+
|
33
|
+
class LanggraphInstrumentor(BaseInstrumentor):
|
34
|
+
def __init__(self, config: Optional[Dict[str, Any]] = None):
|
35
|
+
super().__init__()
|
36
|
+
self.config = config or {}
|
37
|
+
self._tracer = None
|
38
|
+
|
39
|
+
def instrumentation_dependencies(self) -> Collection[str]:
|
40
|
+
return ["langgraph >= 0.0.1"]
|
41
|
+
|
42
|
+
def _instrument(self, **kwargs):
|
43
|
+
tracer_provider = kwargs.get("tracer_provider")
|
44
|
+
self._tracer = get_tracer("agentops.instrumentation.agentic.langgraph", "0.1.0", tracer_provider)
|
45
|
+
|
46
|
+
# Initialize context variable for tracking graph executions
|
47
|
+
import contextvars
|
48
|
+
|
49
|
+
self._current_graph_execution = contextvars.ContextVar("current_graph_execution", default=None)
|
50
|
+
|
51
|
+
wrap_function_wrapper("langgraph.graph.state", "StateGraph.__init__", self._wrap_state_graph_init)
|
52
|
+
|
53
|
+
wrap_function_wrapper("langgraph.graph.state", "StateGraph.compile", self._wrap_state_graph_compile)
|
54
|
+
|
55
|
+
wrap_function_wrapper("langgraph.pregel", "Pregel.invoke", self._wrap_invoke)
|
56
|
+
|
57
|
+
wrap_function_wrapper("langgraph.pregel", "Pregel.stream", self._wrap_stream)
|
58
|
+
|
59
|
+
wrap_function_wrapper("langgraph.graph.state", "StateGraph.add_node", self._wrap_add_node)
|
60
|
+
|
61
|
+
def _uninstrument(self, **kwargs):
|
62
|
+
unwrap("langgraph.graph.state", "StateGraph.__init__")
|
63
|
+
unwrap("langgraph.graph.state", "StateGraph.compile")
|
64
|
+
unwrap("langgraph.pregel", "Pregel.invoke")
|
65
|
+
unwrap("langgraph.pregel", "Pregel.stream")
|
66
|
+
unwrap("langgraph.graph.state", "StateGraph.add_node")
|
67
|
+
|
68
|
+
def _wrap_state_graph_init(self, wrapped: Callable, instance: Any, args: Tuple, kwargs: Dict) -> Any:
|
69
|
+
if not self._tracer:
|
70
|
+
return wrapped(*args, **kwargs)
|
71
|
+
|
72
|
+
with self._tracer.start_as_current_span("langgraph.graph.initialize", kind=SpanKind.INTERNAL) as span:
|
73
|
+
span.set_attributes(
|
74
|
+
ensure_no_none_values(
|
75
|
+
{
|
76
|
+
SpanAttributes.AGENTOPS_SPAN_KIND: "workflow",
|
77
|
+
WorkflowAttributes.WORKFLOW_TYPE: "graph_initialization",
|
78
|
+
SpanAttributes.AGENTOPS_ENTITY_NAME: "Graph Initialization",
|
79
|
+
}
|
80
|
+
)
|
81
|
+
)
|
82
|
+
|
83
|
+
try:
|
84
|
+
result = wrapped(*args, **kwargs)
|
85
|
+
|
86
|
+
instance._langgraph_instrumented = True
|
87
|
+
instance._langgraph_nodes = []
|
88
|
+
instance._langgraph_edges = []
|
89
|
+
|
90
|
+
return result
|
91
|
+
except Exception as e:
|
92
|
+
span.record_exception(e)
|
93
|
+
span.set_status(Status(StatusCode.ERROR, str(e)))
|
94
|
+
raise
|
95
|
+
|
96
|
+
def _wrap_state_graph_compile(self, wrapped: Callable, instance: Any, args: Tuple, kwargs: Dict) -> Any:
|
97
|
+
if not self._tracer:
|
98
|
+
return wrapped(*args, **kwargs)
|
99
|
+
|
100
|
+
with self._tracer.start_as_current_span("langgraph.graph.compile", kind=SpanKind.INTERNAL) as span:
|
101
|
+
span.set_attributes(
|
102
|
+
ensure_no_none_values(
|
103
|
+
{
|
104
|
+
SpanAttributes.AGENTOPS_SPAN_KIND: "workflow",
|
105
|
+
WorkflowAttributes.WORKFLOW_TYPE: "graph_compilation",
|
106
|
+
SpanAttributes.AGENTOPS_ENTITY_NAME: "Graph Compilation",
|
107
|
+
SpanAttributes.LLM_SYSTEM: "langgraph",
|
108
|
+
}
|
109
|
+
)
|
110
|
+
)
|
111
|
+
|
112
|
+
try:
|
113
|
+
result = wrapped(*args, **kwargs)
|
114
|
+
|
115
|
+
nodes = []
|
116
|
+
edges = []
|
117
|
+
|
118
|
+
if hasattr(instance, "nodes"):
|
119
|
+
nodes = list(instance.nodes.keys()) if hasattr(instance.nodes, "keys") else []
|
120
|
+
|
121
|
+
if hasattr(instance, "edges") and hasattr(instance.edges, "items"):
|
122
|
+
for source, targets in instance.edges.items():
|
123
|
+
if isinstance(targets, dict):
|
124
|
+
for target in targets.values():
|
125
|
+
edges.append(f"{source}->{target}")
|
126
|
+
elif isinstance(targets, list):
|
127
|
+
for target in targets:
|
128
|
+
edges.append(f"{source}->{target}")
|
129
|
+
|
130
|
+
set_graph_attributes(span, nodes, edges)
|
131
|
+
|
132
|
+
return result
|
133
|
+
except Exception as e:
|
134
|
+
span.record_exception(e)
|
135
|
+
span.set_status(Status(StatusCode.ERROR, str(e)))
|
136
|
+
raise
|
137
|
+
|
138
|
+
def _wrap_invoke(self, wrapped: Callable, instance: Any, args: Tuple, kwargs: Dict) -> Any:
|
139
|
+
if not self._tracer:
|
140
|
+
return wrapped(*args, **kwargs)
|
141
|
+
|
142
|
+
current_span = trace.get_current_span()
|
143
|
+
if current_span and current_span.name == "langgraph.workflow.execute":
|
144
|
+
return wrapped(*args, **kwargs)
|
145
|
+
|
146
|
+
with self._tracer.start_as_current_span("langgraph.workflow.execute", kind=SpanKind.INTERNAL) as span:
|
147
|
+
span.set_attributes(
|
148
|
+
ensure_no_none_values(
|
149
|
+
{
|
150
|
+
SpanAttributes.AGENTOPS_SPAN_KIND: "workflow",
|
151
|
+
WorkflowAttributes.WORKFLOW_TYPE: "langgraph_invoke",
|
152
|
+
SpanAttributes.AGENTOPS_ENTITY_NAME: "Workflow Execution",
|
153
|
+
SpanAttributes.LLM_REQUEST_STREAMING: False,
|
154
|
+
"langgraph.execution.mode": "invoke",
|
155
|
+
}
|
156
|
+
)
|
157
|
+
)
|
158
|
+
|
159
|
+
execution_state = {"executed_nodes": [], "message_count": 0, "final_response": None}
|
160
|
+
|
161
|
+
# Set the current execution state in context
|
162
|
+
token = self._current_graph_execution.set(execution_state)
|
163
|
+
|
164
|
+
try:
|
165
|
+
input_data = args[0] if args else kwargs.get("input", {})
|
166
|
+
messages = extract_messages_from_input(input_data)
|
167
|
+
if messages:
|
168
|
+
execution_state["message_count"] = len(messages)
|
169
|
+
for i, msg in enumerate(messages[:3]):
|
170
|
+
content = get_message_content(msg)
|
171
|
+
role = get_message_role(msg)
|
172
|
+
if content:
|
173
|
+
span.set_attribute(f"gen_ai.prompt.{i}.content", content[:500])
|
174
|
+
span.set_attribute(f"gen_ai.prompt.{i}.role", role)
|
175
|
+
|
176
|
+
result = wrapped(*args, **kwargs)
|
177
|
+
|
178
|
+
# Extract execution information from result
|
179
|
+
if isinstance(result, dict):
|
180
|
+
# Check for messages in result
|
181
|
+
if "messages" in result:
|
182
|
+
output_messages = result["messages"]
|
183
|
+
if isinstance(output_messages, list):
|
184
|
+
# Count all messages in the result
|
185
|
+
total_messages = len([msg for msg in output_messages if hasattr(msg, "content")])
|
186
|
+
execution_state["message_count"] = total_messages
|
187
|
+
|
188
|
+
if output_messages:
|
189
|
+
# Find the last non-tool message
|
190
|
+
for msg in reversed(output_messages):
|
191
|
+
if hasattr(msg, "content") and not hasattr(msg, "tool_call_id"):
|
192
|
+
content = get_message_content(msg)
|
193
|
+
if content:
|
194
|
+
execution_state["final_response"] = content
|
195
|
+
span.set_attribute("gen_ai.response.0.content", content[:500])
|
196
|
+
break
|
197
|
+
|
198
|
+
# Capture final execution state before returning
|
199
|
+
final_executed_nodes = list(execution_state["executed_nodes"]) # Copy the list
|
200
|
+
final_node_count = len(final_executed_nodes)
|
201
|
+
final_message_count = execution_state["message_count"]
|
202
|
+
final_response = execution_state["final_response"]
|
203
|
+
|
204
|
+
span.set_status(Status(StatusCode.OK))
|
205
|
+
|
206
|
+
span.set_attributes(
|
207
|
+
ensure_no_none_values(
|
208
|
+
{
|
209
|
+
"langgraph.graph.executed_nodes": json.dumps(final_executed_nodes),
|
210
|
+
"langgraph.graph.node_execution_count": final_node_count,
|
211
|
+
"langgraph.graph.message_count": final_message_count,
|
212
|
+
"langgraph.graph.final_response": final_response,
|
213
|
+
"langgraph.graph.status": "success",
|
214
|
+
}
|
215
|
+
)
|
216
|
+
)
|
217
|
+
|
218
|
+
return result
|
219
|
+
except Exception as e:
|
220
|
+
span.record_exception(e)
|
221
|
+
span.set_status(Status(StatusCode.ERROR, str(e)))
|
222
|
+
raise
|
223
|
+
finally:
|
224
|
+
# Reset the context
|
225
|
+
self._current_graph_execution.reset(token)
|
226
|
+
|
227
|
+
def _wrap_stream(self, wrapped: Callable, instance: Any, args: Tuple, kwargs: Dict) -> Any:
|
228
|
+
if not self._tracer:
|
229
|
+
return wrapped(*args, **kwargs)
|
230
|
+
|
231
|
+
current_span = trace.get_current_span()
|
232
|
+
if current_span and current_span.name == "langgraph.workflow.execute":
|
233
|
+
return wrapped(*args, **kwargs)
|
234
|
+
|
235
|
+
span = self._tracer.start_span("langgraph.workflow.execute", kind=SpanKind.INTERNAL)
|
236
|
+
span.set_attributes(
|
237
|
+
ensure_no_none_values(
|
238
|
+
{
|
239
|
+
SpanAttributes.AGENTOPS_SPAN_KIND: "workflow",
|
240
|
+
WorkflowAttributes.WORKFLOW_TYPE: "langgraph_stream",
|
241
|
+
SpanAttributes.AGENTOPS_ENTITY_NAME: "Workflow Stream",
|
242
|
+
SpanAttributes.LLM_REQUEST_STREAMING: True,
|
243
|
+
"langgraph.execution.mode": "stream",
|
244
|
+
}
|
245
|
+
)
|
246
|
+
)
|
247
|
+
|
248
|
+
execution_state = {"executed_nodes": [], "message_count": 0, "chunk_count": 0, "final_response": None}
|
249
|
+
|
250
|
+
# Set the current execution state in context
|
251
|
+
token = self._current_graph_execution.set(execution_state)
|
252
|
+
|
253
|
+
try:
|
254
|
+
# Extract input messages
|
255
|
+
input_data = args[0] if args else kwargs.get("input", {})
|
256
|
+
messages = extract_messages_from_input(input_data)
|
257
|
+
if messages:
|
258
|
+
execution_state["message_count"] = len(messages)
|
259
|
+
for i, msg in enumerate(messages[:3]):
|
260
|
+
content = get_message_content(msg)
|
261
|
+
role = get_message_role(msg)
|
262
|
+
if content:
|
263
|
+
span.set_attribute(f"gen_ai.prompt.{i}.content", content[:500])
|
264
|
+
span.set_attribute(f"gen_ai.prompt.{i}.role", role)
|
265
|
+
|
266
|
+
stream_gen = wrapped(*args, **kwargs)
|
267
|
+
|
268
|
+
def stream_wrapper():
|
269
|
+
try:
|
270
|
+
for chunk in stream_gen:
|
271
|
+
execution_state["chunk_count"] += 1
|
272
|
+
|
273
|
+
if isinstance(chunk, dict):
|
274
|
+
# Debug: print chunk structure
|
275
|
+
# print(f"DEBUG: Chunk keys: {list(chunk.keys())}")
|
276
|
+
|
277
|
+
for key in chunk:
|
278
|
+
# Track node executions (excluding special keys)
|
279
|
+
if (
|
280
|
+
key not in ["__start__", "__end__", "__interrupt__"]
|
281
|
+
and key not in execution_state["executed_nodes"]
|
282
|
+
):
|
283
|
+
execution_state["executed_nodes"].append(key)
|
284
|
+
|
285
|
+
# Track messages in the chunk value
|
286
|
+
chunk_value = chunk[key]
|
287
|
+
if isinstance(chunk_value, dict):
|
288
|
+
# Check for messages in the chunk value
|
289
|
+
if "messages" in chunk_value:
|
290
|
+
msg_list = chunk_value["messages"]
|
291
|
+
if isinstance(msg_list, list):
|
292
|
+
execution_state["message_count"] += len(msg_list)
|
293
|
+
for msg in msg_list:
|
294
|
+
content = get_message_content(msg)
|
295
|
+
if content:
|
296
|
+
execution_state["final_response"] = content
|
297
|
+
elif key == "messages" and isinstance(chunk_value, list):
|
298
|
+
# Sometimes messages might be directly in the chunk
|
299
|
+
execution_state["message_count"] += len(chunk_value)
|
300
|
+
for msg in chunk_value:
|
301
|
+
content = get_message_content(msg)
|
302
|
+
if content:
|
303
|
+
execution_state["final_response"] = content
|
304
|
+
|
305
|
+
yield chunk
|
306
|
+
|
307
|
+
# Capture final execution state before ending
|
308
|
+
final_executed_nodes = list(execution_state["executed_nodes"])
|
309
|
+
final_node_count = len(final_executed_nodes)
|
310
|
+
final_message_count = execution_state["message_count"]
|
311
|
+
final_chunk_count = execution_state["chunk_count"]
|
312
|
+
final_response = execution_state["final_response"]
|
313
|
+
|
314
|
+
span.set_status(Status(StatusCode.OK))
|
315
|
+
|
316
|
+
span.set_attributes(
|
317
|
+
ensure_no_none_values(
|
318
|
+
{
|
319
|
+
"langgraph.graph.executed_nodes": json.dumps(final_executed_nodes),
|
320
|
+
"langgraph.graph.node_execution_count": final_node_count,
|
321
|
+
"langgraph.graph.message_count": final_message_count,
|
322
|
+
"langgraph.graph.total_chunks": final_chunk_count,
|
323
|
+
"langgraph.graph.final_response": final_response,
|
324
|
+
"langgraph.graph.status": "success",
|
325
|
+
}
|
326
|
+
)
|
327
|
+
)
|
328
|
+
|
329
|
+
except Exception as e:
|
330
|
+
span.record_exception(e)
|
331
|
+
span.set_status(Status(StatusCode.ERROR, str(e)))
|
332
|
+
raise
|
333
|
+
finally:
|
334
|
+
span.end()
|
335
|
+
|
336
|
+
return stream_wrapper()
|
337
|
+
|
338
|
+
except Exception as e:
|
339
|
+
span.record_exception(e)
|
340
|
+
span.set_status(Status(StatusCode.ERROR, str(e)))
|
341
|
+
span.end()
|
342
|
+
raise
|
343
|
+
finally:
|
344
|
+
# Reset the context
|
345
|
+
self._current_graph_execution.reset(token)
|
346
|
+
|
347
|
+
def _wrap_add_node(self, wrapped: Callable, instance: Any, args: Tuple, kwargs: Dict) -> Any:
|
348
|
+
if not self._tracer:
|
349
|
+
return wrapped(*args, **kwargs)
|
350
|
+
|
351
|
+
# Get node name and function
|
352
|
+
if args:
|
353
|
+
key = args[0]
|
354
|
+
action = args[1] if len(args) > 1 else kwargs.get("action")
|
355
|
+
else:
|
356
|
+
key = kwargs.get("key")
|
357
|
+
action = kwargs.get("action")
|
358
|
+
|
359
|
+
if not action:
|
360
|
+
return wrapped(*args, **kwargs)
|
361
|
+
|
362
|
+
# Create wrapped node function that instruments LLM calls
|
363
|
+
def create_wrapped_node(original_func):
|
364
|
+
if inspect.iscoroutinefunction(original_func):
|
365
|
+
|
366
|
+
@wraps(original_func)
|
367
|
+
async def wrapped_node_async(state):
|
368
|
+
# Track node execution in parent graph span
|
369
|
+
self._track_node_execution(key)
|
370
|
+
|
371
|
+
# Check if this node contains an LLM call
|
372
|
+
is_llm_node = self._detect_llm_node(original_func)
|
373
|
+
|
374
|
+
if is_llm_node:
|
375
|
+
with self._tracer.start_as_current_span("langgraph.node.execute", kind=SpanKind.CLIENT) as span:
|
376
|
+
span.set_attributes(
|
377
|
+
ensure_no_none_values(
|
378
|
+
{
|
379
|
+
SpanAttributes.AGENTOPS_SPAN_KIND: "llm",
|
380
|
+
SpanAttributes.AGENTOPS_ENTITY_NAME: f"Node: {key}",
|
381
|
+
SpanAttributes.LLM_SYSTEM: "langgraph",
|
382
|
+
"langgraph.node.name": key,
|
383
|
+
}
|
384
|
+
)
|
385
|
+
)
|
386
|
+
|
387
|
+
try:
|
388
|
+
# Call the original function
|
389
|
+
result = await original_func(state)
|
390
|
+
|
391
|
+
# Extract LLM information from the result
|
392
|
+
self._extract_llm_info_from_result(span, state, result)
|
393
|
+
|
394
|
+
span.set_status(Status(StatusCode.OK))
|
395
|
+
return result
|
396
|
+
except Exception as e:
|
397
|
+
span.record_exception(e)
|
398
|
+
span.set_status(Status(StatusCode.ERROR, str(e)))
|
399
|
+
raise
|
400
|
+
else:
|
401
|
+
# Non-LLM node, just execute normally
|
402
|
+
return await original_func(state)
|
403
|
+
else:
|
404
|
+
|
405
|
+
@wraps(original_func)
|
406
|
+
def wrapped_node_sync(state):
|
407
|
+
# Track node execution in parent graph span
|
408
|
+
self._track_node_execution(key)
|
409
|
+
|
410
|
+
# Check if this node contains an LLM call
|
411
|
+
is_llm_node = self._detect_llm_node(original_func)
|
412
|
+
|
413
|
+
if is_llm_node:
|
414
|
+
with self._tracer.start_as_current_span("langgraph.node.execute", kind=SpanKind.CLIENT) as span:
|
415
|
+
span.set_attributes(
|
416
|
+
ensure_no_none_values(
|
417
|
+
{
|
418
|
+
SpanAttributes.AGENTOPS_SPAN_KIND: "llm",
|
419
|
+
SpanAttributes.AGENTOPS_ENTITY_NAME: f"Node: {key}",
|
420
|
+
SpanAttributes.LLM_SYSTEM: "langgraph",
|
421
|
+
}
|
422
|
+
)
|
423
|
+
)
|
424
|
+
|
425
|
+
try:
|
426
|
+
# Call the original function
|
427
|
+
result = original_func(state)
|
428
|
+
|
429
|
+
# Extract LLM information from the result
|
430
|
+
self._extract_llm_info_from_result(span, state, result)
|
431
|
+
|
432
|
+
span.set_status(Status(StatusCode.OK))
|
433
|
+
return result
|
434
|
+
except Exception as e:
|
435
|
+
span.record_exception(e)
|
436
|
+
span.set_status(Status(StatusCode.ERROR, str(e)))
|
437
|
+
raise
|
438
|
+
else:
|
439
|
+
# Non-LLM node, just execute normally
|
440
|
+
return original_func(state)
|
441
|
+
|
442
|
+
return wrapped_node_sync
|
443
|
+
|
444
|
+
return wrapped_node_async if inspect.iscoroutinefunction(original_func) else wrapped_node_sync
|
445
|
+
|
446
|
+
# Wrap the action function
|
447
|
+
wrapped_action = create_wrapped_node(action)
|
448
|
+
|
449
|
+
# Call the original add_node with the wrapped action
|
450
|
+
if args and len(args) > 1:
|
451
|
+
new_args = (args[0], wrapped_action) + args[2:]
|
452
|
+
return wrapped(*new_args, **kwargs)
|
453
|
+
else:
|
454
|
+
kwargs["action"] = wrapped_action
|
455
|
+
return wrapped(*args, **kwargs)
|
456
|
+
|
457
|
+
def _track_node_execution(self, node_name: str) -> None:
|
458
|
+
"""Track node execution in the active graph span."""
|
459
|
+
# Use context variable to track the current execution
|
460
|
+
if hasattr(self, "_current_graph_execution"):
|
461
|
+
execution_state = self._current_graph_execution.get()
|
462
|
+
if execution_state and node_name not in execution_state["executed_nodes"]:
|
463
|
+
execution_state["executed_nodes"].append(node_name)
|
464
|
+
|
465
|
+
def _detect_llm_node(self, func: Callable) -> bool:
|
466
|
+
"""Detect if a node function contains LLM calls."""
|
467
|
+
try:
|
468
|
+
# Get the source code of the function
|
469
|
+
source = inspect.getsource(func)
|
470
|
+
|
471
|
+
# Check for common LLM patterns
|
472
|
+
llm_patterns = [
|
473
|
+
"ChatOpenAI",
|
474
|
+
"ChatAnthropic",
|
475
|
+
"ChatGoogleGenerativeAI",
|
476
|
+
".invoke(",
|
477
|
+
".ainvoke(",
|
478
|
+
".stream(",
|
479
|
+
".astream(",
|
480
|
+
"llm.",
|
481
|
+
"model.",
|
482
|
+
"chat.",
|
483
|
+
]
|
484
|
+
|
485
|
+
for pattern in llm_patterns:
|
486
|
+
if pattern in source:
|
487
|
+
return True
|
488
|
+
|
489
|
+
# Check if function has 'llm' or 'model' in its local variables
|
490
|
+
if hasattr(func, "__code__"):
|
491
|
+
local_vars = func.__code__.co_varnames
|
492
|
+
if any(var in ["llm", "model", "chat"] for var in local_vars):
|
493
|
+
return True
|
494
|
+
|
495
|
+
except Exception:
|
496
|
+
# If we can't inspect the source, assume it might be an LLM node
|
497
|
+
pass
|
498
|
+
|
499
|
+
return False
|
500
|
+
|
501
|
+
def _extract_llm_info_from_result(self, span: Any, state: Dict, result: Any) -> None:
|
502
|
+
"""Extract LLM information from the node execution result."""
|
503
|
+
try:
|
504
|
+
# Extract messages from state
|
505
|
+
if isinstance(state, dict) and "messages" in state:
|
506
|
+
messages = state["messages"]
|
507
|
+
# Set input messages
|
508
|
+
for i, msg in enumerate(messages[-5:]): # Last 5 messages as context
|
509
|
+
if hasattr(msg, "content"):
|
510
|
+
span.set_attribute(MessageAttributes.PROMPT_CONTENT.format(i=i), str(msg.content)[:1000])
|
511
|
+
if hasattr(msg, "role"):
|
512
|
+
span.set_attribute(MessageAttributes.PROMPT_ROLE.format(i=i), msg.role)
|
513
|
+
elif hasattr(msg, "type"):
|
514
|
+
span.set_attribute(MessageAttributes.PROMPT_ROLE.format(i=i), msg.type)
|
515
|
+
|
516
|
+
# Extract messages from result
|
517
|
+
if isinstance(result, dict) and "messages" in result:
|
518
|
+
output_messages = result["messages"]
|
519
|
+
if output_messages:
|
520
|
+
last_msg = output_messages[-1] if isinstance(output_messages, list) else output_messages
|
521
|
+
|
522
|
+
# Extract model information from message if available
|
523
|
+
if hasattr(last_msg, "response_metadata"):
|
524
|
+
metadata = last_msg.response_metadata
|
525
|
+
if isinstance(metadata, dict):
|
526
|
+
if "model_name" in metadata:
|
527
|
+
span.set_attribute(SpanAttributes.LLM_REQUEST_MODEL, metadata["model_name"])
|
528
|
+
span.set_attribute(SpanAttributes.LLM_RESPONSE_MODEL, metadata["model_name"])
|
529
|
+
elif "model" in metadata:
|
530
|
+
span.set_attribute(SpanAttributes.LLM_REQUEST_MODEL, metadata["model"])
|
531
|
+
span.set_attribute(SpanAttributes.LLM_RESPONSE_MODEL, metadata["model"])
|
532
|
+
|
533
|
+
# Token usage
|
534
|
+
if "token_usage" in metadata:
|
535
|
+
usage = metadata["token_usage"]
|
536
|
+
if isinstance(usage, dict):
|
537
|
+
if "prompt_tokens" in usage:
|
538
|
+
span.set_attribute(
|
539
|
+
SpanAttributes.LLM_USAGE_PROMPT_TOKENS, usage["prompt_tokens"]
|
540
|
+
)
|
541
|
+
if "completion_tokens" in usage:
|
542
|
+
span.set_attribute(
|
543
|
+
SpanAttributes.LLM_USAGE_COMPLETION_TOKENS, usage["completion_tokens"]
|
544
|
+
)
|
545
|
+
if "total_tokens" in usage:
|
546
|
+
span.set_attribute(SpanAttributes.LLM_USAGE_TOTAL_TOKENS, usage["total_tokens"])
|
547
|
+
|
548
|
+
# Response ID
|
549
|
+
if "id" in metadata and metadata["id"] is not None:
|
550
|
+
span.set_attribute(SpanAttributes.LLM_RESPONSE_ID, metadata["id"])
|
551
|
+
|
552
|
+
# Finish reason
|
553
|
+
if "finish_reason" in metadata:
|
554
|
+
span.set_attribute(
|
555
|
+
MessageAttributes.COMPLETION_FINISH_REASON.format(i=0), metadata["finish_reason"]
|
556
|
+
)
|
557
|
+
|
558
|
+
# Content
|
559
|
+
if hasattr(last_msg, "content"):
|
560
|
+
span.set_attribute(
|
561
|
+
MessageAttributes.COMPLETION_CONTENT.format(i=0), str(last_msg.content)[:1000]
|
562
|
+
)
|
563
|
+
if hasattr(last_msg, "role"):
|
564
|
+
span.set_attribute(MessageAttributes.COMPLETION_ROLE.format(i=0), last_msg.role)
|
565
|
+
|
566
|
+
# Check for tool calls
|
567
|
+
if hasattr(last_msg, "tool_calls") and last_msg.tool_calls:
|
568
|
+
for j, tool_call in enumerate(last_msg.tool_calls[:5]):
|
569
|
+
if hasattr(tool_call, "name"):
|
570
|
+
span.set_attribute(
|
571
|
+
MessageAttributes.COMPLETION_TOOL_CALL_NAME.format(i=0, j=j), tool_call.name
|
572
|
+
)
|
573
|
+
if hasattr(tool_call, "args"):
|
574
|
+
span.set_attribute(
|
575
|
+
MessageAttributes.COMPLETION_TOOL_CALL_ARGUMENTS.format(i=0, j=j),
|
576
|
+
json.dumps(tool_call.args)[:500],
|
577
|
+
)
|
578
|
+
if hasattr(tool_call, "id"):
|
579
|
+
span.set_attribute(
|
580
|
+
MessageAttributes.COMPLETION_TOOL_CALL_ID.format(i=0, j=j), tool_call.id
|
581
|
+
)
|
582
|
+
|
583
|
+
# Additional attributes from message
|
584
|
+
if hasattr(last_msg, "id") and last_msg.id is not None:
|
585
|
+
span.set_attribute(SpanAttributes.LLM_RESPONSE_ID, last_msg.id)
|
586
|
+
|
587
|
+
# Usage information might be on the message itself
|
588
|
+
if hasattr(last_msg, "usage_metadata"):
|
589
|
+
usage = last_msg.usage_metadata
|
590
|
+
if hasattr(usage, "input_tokens"):
|
591
|
+
span.set_attribute(SpanAttributes.LLM_USAGE_PROMPT_TOKENS, usage.input_tokens)
|
592
|
+
if hasattr(usage, "output_tokens"):
|
593
|
+
span.set_attribute(SpanAttributes.LLM_USAGE_COMPLETION_TOKENS, usage.output_tokens)
|
594
|
+
if hasattr(usage, "total_tokens"):
|
595
|
+
span.set_attribute(SpanAttributes.LLM_USAGE_TOTAL_TOKENS, usage.total_tokens)
|
596
|
+
except Exception:
|
597
|
+
# Don't fail the span if we can't extract info
|
598
|
+
pass
|
@@ -0,0 +1 @@
|
|
1
|
+
__version__ = "0.1.0"
|