genai-otel-instrument 0.1.24__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (69) hide show
  1. genai_otel/__init__.py +132 -0
  2. genai_otel/__version__.py +34 -0
  3. genai_otel/auto_instrument.py +602 -0
  4. genai_otel/cli.py +92 -0
  5. genai_otel/config.py +333 -0
  6. genai_otel/cost_calculator.py +467 -0
  7. genai_otel/cost_enriching_exporter.py +207 -0
  8. genai_otel/cost_enrichment_processor.py +174 -0
  9. genai_otel/evaluation/__init__.py +76 -0
  10. genai_otel/evaluation/bias_detector.py +364 -0
  11. genai_otel/evaluation/config.py +261 -0
  12. genai_otel/evaluation/hallucination_detector.py +525 -0
  13. genai_otel/evaluation/pii_detector.py +356 -0
  14. genai_otel/evaluation/prompt_injection_detector.py +262 -0
  15. genai_otel/evaluation/restricted_topics_detector.py +316 -0
  16. genai_otel/evaluation/span_processor.py +962 -0
  17. genai_otel/evaluation/toxicity_detector.py +406 -0
  18. genai_otel/exceptions.py +17 -0
  19. genai_otel/gpu_metrics.py +516 -0
  20. genai_otel/instrumentors/__init__.py +71 -0
  21. genai_otel/instrumentors/anthropic_instrumentor.py +134 -0
  22. genai_otel/instrumentors/anyscale_instrumentor.py +27 -0
  23. genai_otel/instrumentors/autogen_instrumentor.py +394 -0
  24. genai_otel/instrumentors/aws_bedrock_instrumentor.py +94 -0
  25. genai_otel/instrumentors/azure_openai_instrumentor.py +69 -0
  26. genai_otel/instrumentors/base.py +919 -0
  27. genai_otel/instrumentors/bedrock_agents_instrumentor.py +398 -0
  28. genai_otel/instrumentors/cohere_instrumentor.py +140 -0
  29. genai_otel/instrumentors/crewai_instrumentor.py +311 -0
  30. genai_otel/instrumentors/dspy_instrumentor.py +661 -0
  31. genai_otel/instrumentors/google_ai_instrumentor.py +310 -0
  32. genai_otel/instrumentors/groq_instrumentor.py +106 -0
  33. genai_otel/instrumentors/guardrails_ai_instrumentor.py +510 -0
  34. genai_otel/instrumentors/haystack_instrumentor.py +503 -0
  35. genai_otel/instrumentors/huggingface_instrumentor.py +399 -0
  36. genai_otel/instrumentors/hyperbolic_instrumentor.py +236 -0
  37. genai_otel/instrumentors/instructor_instrumentor.py +425 -0
  38. genai_otel/instrumentors/langchain_instrumentor.py +340 -0
  39. genai_otel/instrumentors/langgraph_instrumentor.py +328 -0
  40. genai_otel/instrumentors/llamaindex_instrumentor.py +36 -0
  41. genai_otel/instrumentors/mistralai_instrumentor.py +315 -0
  42. genai_otel/instrumentors/ollama_instrumentor.py +197 -0
  43. genai_otel/instrumentors/ollama_server_metrics_poller.py +336 -0
  44. genai_otel/instrumentors/openai_agents_instrumentor.py +291 -0
  45. genai_otel/instrumentors/openai_instrumentor.py +260 -0
  46. genai_otel/instrumentors/pydantic_ai_instrumentor.py +362 -0
  47. genai_otel/instrumentors/replicate_instrumentor.py +87 -0
  48. genai_otel/instrumentors/sambanova_instrumentor.py +196 -0
  49. genai_otel/instrumentors/togetherai_instrumentor.py +146 -0
  50. genai_otel/instrumentors/vertexai_instrumentor.py +106 -0
  51. genai_otel/llm_pricing.json +1676 -0
  52. genai_otel/logging_config.py +45 -0
  53. genai_otel/mcp_instrumentors/__init__.py +14 -0
  54. genai_otel/mcp_instrumentors/api_instrumentor.py +144 -0
  55. genai_otel/mcp_instrumentors/base.py +105 -0
  56. genai_otel/mcp_instrumentors/database_instrumentor.py +336 -0
  57. genai_otel/mcp_instrumentors/kafka_instrumentor.py +31 -0
  58. genai_otel/mcp_instrumentors/manager.py +139 -0
  59. genai_otel/mcp_instrumentors/redis_instrumentor.py +31 -0
  60. genai_otel/mcp_instrumentors/vector_db_instrumentor.py +265 -0
  61. genai_otel/metrics.py +148 -0
  62. genai_otel/py.typed +2 -0
  63. genai_otel/server_metrics.py +197 -0
  64. genai_otel_instrument-0.1.24.dist-info/METADATA +1404 -0
  65. genai_otel_instrument-0.1.24.dist-info/RECORD +69 -0
  66. genai_otel_instrument-0.1.24.dist-info/WHEEL +5 -0
  67. genai_otel_instrument-0.1.24.dist-info/entry_points.txt +2 -0
  68. genai_otel_instrument-0.1.24.dist-info/licenses/LICENSE +680 -0
  69. genai_otel_instrument-0.1.24.dist-info/top_level.txt +1 -0
@@ -0,0 +1,340 @@
1
+ """OpenTelemetry instrumentor for the LangChain framework.
2
+
3
+ This instrumentor automatically traces various components within LangChain,
4
+ including chains, agents, and chat models, capturing relevant attributes for observability.
5
+ """
6
+
7
+ import asyncio
8
+ import functools
9
+ import logging
10
+ from typing import Any, Dict, Optional
11
+
12
+ from ..config import OTelConfig
13
+ from .base import BaseInstrumentor
14
+
15
+ logger = logging.getLogger(__name__)
16
+
17
+
18
+ class LangChainInstrumentor(BaseInstrumentor):
19
+ """Instrumentor for LangChain"""
20
+
21
+ def __init__(self):
22
+ """Initialize the instrumentor."""
23
+ super().__init__()
24
+ self._langchain_available = False
25
+ self._langchain_core_available = False
26
+ self._check_availability()
27
+
28
+ def _check_availability(self):
29
+ """Check if langchain library is available."""
30
+ try:
31
+ import langchain
32
+
33
+ self._langchain_available = True
34
+ logger.debug("langchain library detected and available for instrumentation")
35
+ except ImportError:
36
+ logger.debug("langchain library not installed, instrumentation will be skipped")
37
+ self._langchain_available = False
38
+
39
+ # Check for langchain_core (required for chat model instrumentation)
40
+ try:
41
+ import langchain_core
42
+
43
+ self._langchain_core_available = True
44
+ logger.debug("langchain_core library detected and available for instrumentation")
45
+ except ImportError:
46
+ logger.debug(
47
+ "langchain_core library not installed, chat model instrumentation will be skipped"
48
+ )
49
+ self._langchain_core_available = False
50
+
51
+ def instrument(self, config: OTelConfig):
52
+ """Instrument langchain components if available."""
53
+ if not self._langchain_available:
54
+ logger.debug("Skipping instrumentation - library not available")
55
+ return
56
+
57
+ self.config = config
58
+
59
+ # Instrument chains and agents
60
+ self._instrument_chains_and_agents()
61
+
62
+ # Instrument chat models if langchain_core is available
63
+ if self._langchain_core_available:
64
+ self._instrument_chat_models()
65
+
66
+ def _instrument_chains_and_agents(self):
67
+ """Instrument LangChain chains and agents."""
68
+ try:
69
+ from langchain.agents.agent import AgentExecutor
70
+ from langchain.chains.base import Chain
71
+
72
+ # Instrument Chains
73
+ original_call = Chain.__call__
74
+
75
+ def wrapped_call(instance, *args, **kwargs):
76
+ chain_type = instance.__class__.__name__
77
+ with self.tracer.start_as_current_span(f"langchain.chain.{chain_type}") as span:
78
+ span.set_attribute("langchain.chain.type", chain_type)
79
+ result = original_call(instance, *args, **kwargs)
80
+ return result
81
+
82
+ Chain.__call__ = wrapped_call
83
+
84
+ # Instrument Agents
85
+ original_agent_call = AgentExecutor.__call__
86
+
87
+ def wrapped_agent_call(instance, *args, **kwargs):
88
+ with self.tracer.start_as_current_span("langchain.agent.execute") as span:
89
+ agent_name = getattr(instance, "agent", {}).get("name", "unknown")
90
+ span.set_attribute("langchain.agent.name", agent_name)
91
+ result = original_agent_call(instance, *args, **kwargs)
92
+ return result
93
+
94
+ AgentExecutor.__call__ = wrapped_agent_call
95
+ logger.debug("Chains and agents instrumentation completed")
96
+
97
+ except ImportError:
98
+ logger.debug("Could not import chains or agents, skipping instrumentation")
99
+
100
+ def _instrument_chat_models(self):
101
+ """Instrument LangChain chat models."""
102
+ try:
103
+ from langchain_core.language_models.chat_models import BaseChatModel
104
+
105
+ # Instrument invoke method
106
+ original_invoke = BaseChatModel.invoke
107
+
108
+ @functools.wraps(original_invoke)
109
+ def wrapped_invoke(instance, *args, **kwargs):
110
+ import time
111
+
112
+ model_name = self._get_model_name(instance)
113
+ with self.tracer.start_as_current_span("langchain.chat_model.invoke") as span:
114
+ start_time = time.time()
115
+ self._set_chat_attributes(span, instance, args, kwargs, model_name)
116
+
117
+ result = original_invoke(instance, *args, **kwargs)
118
+
119
+ # Use standard metrics recording from BaseInstrumentor
120
+ # This will extract usage, calculate costs, and record metrics
121
+ self._record_result_metrics(span, result, start_time, kwargs)
122
+
123
+ return result
124
+
125
+ BaseChatModel.invoke = wrapped_invoke
126
+
127
+ # Instrument ainvoke (async invoke) method
128
+ original_ainvoke = BaseChatModel.ainvoke
129
+
130
+ @functools.wraps(original_ainvoke)
131
+ async def wrapped_ainvoke(instance, *args, **kwargs):
132
+ import time
133
+
134
+ model_name = self._get_model_name(instance)
135
+ with self.tracer.start_as_current_span("langchain.chat_model.ainvoke") as span:
136
+ start_time = time.time()
137
+ self._set_chat_attributes(span, instance, args, kwargs, model_name)
138
+
139
+ result = await original_ainvoke(instance, *args, **kwargs)
140
+
141
+ # Use standard metrics recording from BaseInstrumentor
142
+ # This will extract usage, calculate costs, and record metrics
143
+ self._record_result_metrics(span, result, start_time, kwargs)
144
+
145
+ return result
146
+
147
+ BaseChatModel.ainvoke = wrapped_ainvoke
148
+
149
+ # Instrument batch method
150
+ original_batch = BaseChatModel.batch
151
+
152
+ @functools.wraps(original_batch)
153
+ def wrapped_batch(instance, *args, **kwargs):
154
+ import time
155
+
156
+ model_name = self._get_model_name(instance)
157
+ with self.tracer.start_as_current_span("langchain.chat_model.batch") as span:
158
+ start_time = time.time()
159
+
160
+ # Set standard GenAI attributes
161
+ provider = self._extract_provider(instance)
162
+ if provider:
163
+ span.set_attribute("gen_ai.system", provider)
164
+ else:
165
+ span.set_attribute("gen_ai.system", "langchain")
166
+
167
+ span.set_attribute("gen_ai.request.model", model_name)
168
+ span.set_attribute("gen_ai.operation.name", "batch")
169
+
170
+ # Also set LangChain-specific attributes
171
+ span.set_attribute("langchain.chat_model.name", model_name)
172
+ span.set_attribute("langchain.chat_model.operation", "batch")
173
+
174
+ # Get batch size
175
+ if args and len(args) > 0:
176
+ batch_size = len(args[0]) if hasattr(args[0], "__len__") else 1
177
+ span.set_attribute("langchain.chat_model.batch_size", batch_size)
178
+
179
+ result = original_batch(instance, *args, **kwargs)
180
+
181
+ # Record metrics (though batch results may not have usage info)
182
+ self._record_result_metrics(span, result, start_time, kwargs)
183
+
184
+ return result
185
+
186
+ BaseChatModel.batch = wrapped_batch
187
+
188
+ # Instrument abatch (async batch) method
189
+ original_abatch = BaseChatModel.abatch
190
+
191
+ @functools.wraps(original_abatch)
192
+ async def wrapped_abatch(instance, *args, **kwargs):
193
+ import time
194
+
195
+ model_name = self._get_model_name(instance)
196
+ with self.tracer.start_as_current_span("langchain.chat_model.abatch") as span:
197
+ start_time = time.time()
198
+
199
+ # Set standard GenAI attributes
200
+ provider = self._extract_provider(instance)
201
+ if provider:
202
+ span.set_attribute("gen_ai.system", provider)
203
+ else:
204
+ span.set_attribute("gen_ai.system", "langchain")
205
+
206
+ span.set_attribute("gen_ai.request.model", model_name)
207
+ span.set_attribute("gen_ai.operation.name", "batch")
208
+
209
+ # Also set LangChain-specific attributes
210
+ span.set_attribute("langchain.chat_model.name", model_name)
211
+ span.set_attribute("langchain.chat_model.operation", "abatch")
212
+
213
+ # Get batch size
214
+ if args and len(args) > 0:
215
+ batch_size = len(args[0]) if hasattr(args[0], "__len__") else 1
216
+ span.set_attribute("langchain.chat_model.batch_size", batch_size)
217
+
218
+ result = await original_abatch(instance, *args, **kwargs)
219
+
220
+ # Record metrics (though batch results may not have usage info)
221
+ self._record_result_metrics(span, result, start_time, kwargs)
222
+
223
+ return result
224
+
225
+ BaseChatModel.abatch = wrapped_abatch
226
+
227
+ logger.info("LangChain chat models instrumentation completed")
228
+
229
+ except ImportError as e:
230
+ logger.debug(f"Could not import langchain_core chat models: {e}")
231
+ except Exception as e:
232
+ logger.error(f"Error instrumenting chat models: {e}", exc_info=True)
233
+
234
+ def _get_model_name(self, instance: Any) -> str:
235
+ """Extract model name from chat model instance."""
236
+ # Try common attribute names for model name
237
+ for attr in ["model_name", "model", "model_id"]:
238
+ if hasattr(instance, attr):
239
+ value = getattr(instance, attr)
240
+ if value:
241
+ return str(value)
242
+
243
+ # Fallback to class name
244
+ return instance.__class__.__name__
245
+
246
+ def _set_chat_attributes(self, span, instance: Any, args: tuple, kwargs: dict, model_name: str):
247
+ """Set span attributes for chat model invocations."""
248
+ # Set standard GenAI semantic convention attributes
249
+ provider = self._extract_provider(instance)
250
+ if provider:
251
+ span.set_attribute("gen_ai.system", provider)
252
+ else:
253
+ span.set_attribute("gen_ai.system", "langchain")
254
+
255
+ span.set_attribute("gen_ai.request.model", model_name)
256
+ span.set_attribute("gen_ai.operation.name", "chat")
257
+
258
+ # Also set LangChain-specific attributes for backward compatibility
259
+ span.set_attribute("langchain.chat_model.name", model_name)
260
+ span.set_attribute("langchain.chat_model.operation", "invoke")
261
+ if provider:
262
+ span.set_attribute("langchain.chat_model.provider", provider)
263
+
264
+ # Count messages if available
265
+ if args and len(args) > 0:
266
+ messages = args[0]
267
+ if hasattr(messages, "__len__"):
268
+ message_count = len(messages)
269
+ span.set_attribute("gen_ai.request.message_count", message_count)
270
+ span.set_attribute("langchain.chat_model.message_count", message_count)
271
+
272
+ def _extract_provider(self, instance: Any) -> Optional[str]:
273
+ """Extract provider name from chat model instance."""
274
+ class_name = instance.__class__.__name__.lower()
275
+ module_name = instance.__class__.__module__.lower()
276
+
277
+ # Map class names to providers
278
+ provider_mapping = {
279
+ "openai": "openai",
280
+ "anthropic": "anthropic",
281
+ "google": "google",
282
+ "ollama": "ollama",
283
+ "bedrock": "bedrock",
284
+ "cohere": "cohere",
285
+ "groq": "groq",
286
+ "mistral": "mistral",
287
+ }
288
+
289
+ # Check class name
290
+ for key, value in provider_mapping.items():
291
+ if key in class_name:
292
+ return value
293
+
294
+ # Check module name
295
+ for key, value in provider_mapping.items():
296
+ if key in module_name:
297
+ return value
298
+
299
+ return None
300
+
301
+ def _extract_usage(self, result) -> Optional[Dict[str, int]]:
302
+ """Extract usage information for BaseInstrumentor compatibility."""
303
+ try:
304
+ usage_data = None
305
+
306
+ if hasattr(result, "usage_metadata") and result.usage_metadata:
307
+ usage_data = result.usage_metadata
308
+ elif hasattr(result, "response_metadata") and result.response_metadata:
309
+ metadata = result.response_metadata
310
+ if "token_usage" in metadata:
311
+ usage_data = metadata["token_usage"]
312
+ elif "usage" in metadata:
313
+ usage_data = metadata["usage"]
314
+
315
+ if usage_data:
316
+ if isinstance(usage_data, dict):
317
+ prompt_tokens = usage_data.get("input_tokens") or usage_data.get(
318
+ "prompt_tokens"
319
+ )
320
+ completion_tokens = usage_data.get("output_tokens") or usage_data.get(
321
+ "completion_tokens"
322
+ )
323
+ else:
324
+ prompt_tokens = getattr(usage_data, "input_tokens", None) or getattr(
325
+ usage_data, "prompt_tokens", None
326
+ )
327
+ completion_tokens = getattr(usage_data, "output_tokens", None) or getattr(
328
+ usage_data, "completion_tokens", None
329
+ )
330
+
331
+ if prompt_tokens or completion_tokens:
332
+ return {
333
+ "prompt_tokens": int(prompt_tokens) if prompt_tokens else 0,
334
+ "completion_tokens": int(completion_tokens) if completion_tokens else 0,
335
+ "total_tokens": int(prompt_tokens or 0) + int(completion_tokens or 0),
336
+ }
337
+ except Exception:
338
+ pass
339
+
340
+ return None
@@ -0,0 +1,328 @@
1
+ """OpenTelemetry instrumentor for the LangGraph framework.
2
+
3
+ This instrumentor automatically traces graph execution, nodes, edges, state updates,
4
+ and checkpoints using the LangGraph stateful workflow framework.
5
+ """
6
+
7
+ import json
8
+ import logging
9
+ from typing import Any, Dict, Optional
10
+
11
+ from ..config import OTelConfig
12
+ from .base import BaseInstrumentor
13
+
14
+ logger = logging.getLogger(__name__)
15
+
16
+
17
+ class LangGraphInstrumentor(BaseInstrumentor):
18
+ """Instrumentor for LangGraph stateful workflow framework"""
19
+
20
+ def __init__(self):
21
+ """Initialize the instrumentor."""
22
+ super().__init__()
23
+ self._langgraph_available = False
24
+ self._check_availability()
25
+
26
+ def _check_availability(self):
27
+ """Check if LangGraph library is available."""
28
+ try:
29
+ import langgraph
30
+
31
+ self._langgraph_available = True
32
+ logger.debug("LangGraph library detected and available for instrumentation")
33
+ except ImportError:
34
+ logger.debug("LangGraph library not installed, instrumentation will be skipped")
35
+ self._langgraph_available = False
36
+
37
+ def instrument(self, config: OTelConfig):
38
+ """Instrument LangGraph framework if available.
39
+
40
+ Args:
41
+ config (OTelConfig): The OpenTelemetry configuration object.
42
+ """
43
+ if not self._langgraph_available:
44
+ logger.debug("Skipping LangGraph instrumentation - library not available")
45
+ return
46
+
47
+ self.config = config
48
+
49
+ try:
50
+ import wrapt
51
+ from langgraph.graph import StateGraph
52
+
53
+ # Instrument StateGraph.compile() to wrap the resulting CompiledGraph
54
+ if hasattr(StateGraph, "compile"):
55
+ original_compile = StateGraph.compile
56
+
57
+ def wrapped_compile(wrapped, instance, args, kwargs):
58
+ # Get the compiled graph
59
+ compiled_graph = wrapped(*args, **kwargs)
60
+
61
+ # Instrument the compiled graph's execution methods
62
+ self._instrument_compiled_graph(compiled_graph, instance)
63
+
64
+ return compiled_graph
65
+
66
+ StateGraph.compile = wrapt.FunctionWrapper(original_compile, wrapped_compile)
67
+
68
+ self._instrumented = True
69
+ logger.info("LangGraph instrumentation enabled")
70
+
71
+ except Exception as e:
72
+ logger.error("Failed to instrument LangGraph: %s", e, exc_info=True)
73
+ if config.fail_on_error:
74
+ raise
75
+
76
+ def _instrument_compiled_graph(self, compiled_graph, state_graph):
77
+ """Instrument a compiled graph's execution methods.
78
+
79
+ Args:
80
+ compiled_graph: The compiled graph object.
81
+ state_graph: The original StateGraph instance.
82
+ """
83
+ import wrapt
84
+
85
+ # Instrument invoke method (synchronous execution)
86
+ if hasattr(compiled_graph, "invoke"):
87
+ original_invoke = compiled_graph.invoke
88
+ compiled_graph.invoke = wrapt.FunctionWrapper(
89
+ original_invoke,
90
+ lambda w, i, a, kw: self._wrap_graph_invoke(
91
+ w, i, a, kw, state_graph, is_async=False
92
+ ),
93
+ )
94
+
95
+ # Instrument stream method (synchronous streaming)
96
+ if hasattr(compiled_graph, "stream"):
97
+ original_stream = compiled_graph.stream
98
+ compiled_graph.stream = wrapt.FunctionWrapper(
99
+ original_stream,
100
+ lambda w, i, a, kw: self._wrap_graph_stream(
101
+ w, i, a, kw, state_graph, is_async=False
102
+ ),
103
+ )
104
+
105
+ # Instrument ainvoke method (asynchronous execution)
106
+ if hasattr(compiled_graph, "ainvoke"):
107
+ original_ainvoke = compiled_graph.ainvoke
108
+ compiled_graph.ainvoke = wrapt.FunctionWrapper(
109
+ original_ainvoke,
110
+ lambda w, i, a, kw: self._wrap_graph_invoke(
111
+ w, i, a, kw, state_graph, is_async=True
112
+ ),
113
+ )
114
+
115
+ # Instrument astream method (asynchronous streaming)
116
+ if hasattr(compiled_graph, "astream"):
117
+ original_astream = compiled_graph.astream
118
+ compiled_graph.astream = wrapt.FunctionWrapper(
119
+ original_astream,
120
+ lambda w, i, a, kw: self._wrap_graph_stream(
121
+ w, i, a, kw, state_graph, is_async=True
122
+ ),
123
+ )
124
+
125
+ def _wrap_graph_invoke(self, wrapped, instance, args, kwargs, state_graph, is_async):
126
+ """Wrap graph invoke/ainvoke method with span.
127
+
128
+ Args:
129
+ wrapped: The original method.
130
+ instance: The compiled graph instance.
131
+ args: Positional arguments.
132
+ kwargs: Keyword arguments.
133
+ state_graph: The original StateGraph instance.
134
+ is_async: Whether this is async invocation.
135
+ """
136
+ operation_name = "graph.ainvoke" if is_async else "graph.invoke"
137
+ return self.create_span_wrapper(
138
+ span_name=f"langgraph.{operation_name}",
139
+ extract_attributes=lambda i, a, kw: self._extract_graph_attributes(
140
+ i, a, kw, state_graph
141
+ ),
142
+ )(wrapped)(instance, *args, **kwargs)
143
+
144
+ def _wrap_graph_stream(self, wrapped, instance, args, kwargs, state_graph, is_async):
145
+ """Wrap graph stream/astream method with span.
146
+
147
+ Args:
148
+ wrapped: The original method.
149
+ instance: The compiled graph instance.
150
+ args: Positional arguments.
151
+ kwargs: Keyword arguments.
152
+ state_graph: The original StateGraph instance.
153
+ is_async: Whether this is async streaming.
154
+ """
155
+ operation_name = "graph.astream" if is_async else "graph.stream"
156
+ return self.create_span_wrapper(
157
+ span_name=f"langgraph.{operation_name}",
158
+ extract_attributes=lambda i, a, kw: self._extract_graph_attributes(
159
+ i, a, kw, state_graph
160
+ ),
161
+ )(wrapped)(instance, *args, **kwargs)
162
+
163
+ def _extract_graph_attributes(
164
+ self, instance: Any, args: Any, kwargs: Any, state_graph: Any
165
+ ) -> Dict[str, Any]:
166
+ """Extract attributes from graph execution.
167
+
168
+ Args:
169
+ instance: The compiled graph instance.
170
+ args: Positional arguments (input state).
171
+ kwargs: Keyword arguments.
172
+ state_graph: The original StateGraph instance.
173
+
174
+ Returns:
175
+ Dict[str, Any]: Dictionary of attributes to set on the span.
176
+ """
177
+ attrs = {}
178
+
179
+ # Core attributes
180
+ attrs["gen_ai.system"] = "langgraph"
181
+ attrs["gen_ai.operation.name"] = "graph.execution"
182
+
183
+ # Extract graph structure information from StateGraph
184
+ try:
185
+ # Get nodes from the graph
186
+ if hasattr(state_graph, "nodes"):
187
+ nodes = state_graph.nodes
188
+ if nodes:
189
+ node_names = list(nodes.keys())
190
+ attrs["langgraph.node_count"] = len(node_names)
191
+ attrs["langgraph.nodes"] = node_names[:10] # Limit to 10 nodes
192
+
193
+ # Get edges from the graph
194
+ if hasattr(state_graph, "edges"):
195
+ edges = state_graph.edges
196
+ if edges:
197
+ attrs["langgraph.edge_count"] = len(edges)
198
+
199
+ # Get channels (state schema) if available
200
+ if hasattr(state_graph, "channels"):
201
+ channels = state_graph.channels
202
+ if channels:
203
+ channel_names = list(channels.keys())
204
+ attrs["langgraph.channels"] = channel_names[:10]
205
+ attrs["langgraph.channel_count"] = len(channel_names)
206
+
207
+ except Exception as e:
208
+ logger.debug("Failed to extract graph structure: %s", e)
209
+
210
+ # Extract input state (first positional argument)
211
+ input_state = None
212
+ if len(args) > 0:
213
+ input_state = args[0]
214
+ elif "input" in kwargs:
215
+ input_state = kwargs["input"]
216
+
217
+ if input_state:
218
+ try:
219
+ if isinstance(input_state, dict):
220
+ # Store input state keys
221
+ state_keys = list(input_state.keys())
222
+ attrs["langgraph.input.keys"] = state_keys[:10]
223
+
224
+ # Store truncated values for important keys
225
+ for key in ["messages", "query", "question", "input"][:3]:
226
+ if key in input_state:
227
+ value = str(input_state[key])[:200]
228
+ attrs[f"langgraph.input.{key}"] = value
229
+ else:
230
+ # Non-dict input
231
+ attrs["langgraph.input"] = str(input_state)[:200]
232
+ except Exception as e:
233
+ logger.debug("Failed to extract input state: %s", e)
234
+
235
+ # Extract config if provided
236
+ config = kwargs.get("config")
237
+ if config:
238
+ try:
239
+ # Extract configurable values
240
+ if isinstance(config, dict):
241
+ if "configurable" in config:
242
+ configurable = config["configurable"]
243
+ if "thread_id" in configurable:
244
+ attrs["langgraph.thread_id"] = configurable["thread_id"]
245
+ if "checkpoint_id" in configurable:
246
+ attrs["langgraph.checkpoint_id"] = configurable["checkpoint_id"]
247
+
248
+ # Extract recursion limit
249
+ if "recursion_limit" in config:
250
+ attrs["langgraph.recursion_limit"] = config["recursion_limit"]
251
+
252
+ except Exception as e:
253
+ logger.debug("Failed to extract config: %s", e)
254
+
255
+ return attrs
256
+
257
+ def _extract_usage(self, result) -> Optional[Dict[str, int]]:
258
+ """Extract token usage from graph execution result.
259
+
260
+ Note: LangGraph doesn't directly expose token usage in the result.
261
+ Token usage is captured by underlying LLM provider instrumentors.
262
+
263
+ Args:
264
+ result: The graph execution result.
265
+
266
+ Returns:
267
+ Optional[Dict[str, int]]: Dictionary with token counts or None.
268
+ """
269
+ # LangGraph doesn't directly expose usage
270
+ # Token usage is captured by LLM provider instrumentors (OpenAI, Anthropic, etc.)
271
+ return None
272
+
273
+ def _extract_response_attributes(self, result) -> Dict[str, Any]:
274
+ """Extract response attributes from graph execution result.
275
+
276
+ Args:
277
+ result: The graph execution result.
278
+
279
+ Returns:
280
+ Dict[str, Any]: Dictionary of response attributes.
281
+ """
282
+ attrs = {}
283
+
284
+ try:
285
+ # LangGraph result is typically a dict (the final state)
286
+ if isinstance(result, dict):
287
+ # Store output state keys
288
+ state_keys = list(result.keys())
289
+ attrs["langgraph.output.keys"] = state_keys[:10]
290
+
291
+ # Store truncated values for important keys
292
+ for key in ["messages", "answer", "output", "result"][:3]:
293
+ if key in result:
294
+ value_str = str(result[key])[:500]
295
+ attrs[f"langgraph.output.{key}"] = value_str
296
+
297
+ # Count the number of state updates/steps
298
+ if "messages" in result and isinstance(result["messages"], list):
299
+ attrs["langgraph.message_count"] = len(result["messages"])
300
+
301
+ # Try to extract metadata if available
302
+ if hasattr(result, "__metadata__"):
303
+ try:
304
+ metadata = result.__metadata__
305
+ if "step" in metadata:
306
+ attrs["langgraph.steps"] = metadata["step"]
307
+ except Exception as e:
308
+ logger.debug("Failed to extract metadata: %s", e)
309
+
310
+ except Exception as e:
311
+ logger.debug("Failed to extract response attributes: %s", e)
312
+
313
+ return attrs
314
+
315
+ def _extract_finish_reason(self, result) -> Optional[str]:
316
+ """Extract finish reason from graph execution result.
317
+
318
+ Args:
319
+ result: The graph execution result.
320
+
321
+ Returns:
322
+ Optional[str]: The finish reason string or None if not available.
323
+ """
324
+ # LangGraph doesn't typically provide a finish_reason
325
+ # We could infer completion status
326
+ if result:
327
+ return "completed"
328
+ return None