prela 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- prela/__init__.py +394 -0
- prela/_version.py +3 -0
- prela/contrib/CLI.md +431 -0
- prela/contrib/README.md +118 -0
- prela/contrib/__init__.py +5 -0
- prela/contrib/cli.py +1063 -0
- prela/contrib/explorer.py +571 -0
- prela/core/__init__.py +64 -0
- prela/core/clock.py +98 -0
- prela/core/context.py +228 -0
- prela/core/replay.py +403 -0
- prela/core/sampler.py +178 -0
- prela/core/span.py +295 -0
- prela/core/tracer.py +498 -0
- prela/evals/__init__.py +94 -0
- prela/evals/assertions/README.md +484 -0
- prela/evals/assertions/__init__.py +78 -0
- prela/evals/assertions/base.py +90 -0
- prela/evals/assertions/multi_agent.py +625 -0
- prela/evals/assertions/semantic.py +223 -0
- prela/evals/assertions/structural.py +443 -0
- prela/evals/assertions/tool.py +380 -0
- prela/evals/case.py +370 -0
- prela/evals/n8n/__init__.py +69 -0
- prela/evals/n8n/assertions.py +450 -0
- prela/evals/n8n/runner.py +497 -0
- prela/evals/reporters/README.md +184 -0
- prela/evals/reporters/__init__.py +32 -0
- prela/evals/reporters/console.py +251 -0
- prela/evals/reporters/json.py +176 -0
- prela/evals/reporters/junit.py +278 -0
- prela/evals/runner.py +525 -0
- prela/evals/suite.py +316 -0
- prela/exporters/__init__.py +27 -0
- prela/exporters/base.py +189 -0
- prela/exporters/console.py +443 -0
- prela/exporters/file.py +322 -0
- prela/exporters/http.py +394 -0
- prela/exporters/multi.py +154 -0
- prela/exporters/otlp.py +388 -0
- prela/instrumentation/ANTHROPIC.md +297 -0
- prela/instrumentation/LANGCHAIN.md +480 -0
- prela/instrumentation/OPENAI.md +59 -0
- prela/instrumentation/__init__.py +49 -0
- prela/instrumentation/anthropic.py +1436 -0
- prela/instrumentation/auto.py +129 -0
- prela/instrumentation/base.py +436 -0
- prela/instrumentation/langchain.py +959 -0
- prela/instrumentation/llamaindex.py +719 -0
- prela/instrumentation/multi_agent/__init__.py +48 -0
- prela/instrumentation/multi_agent/autogen.py +357 -0
- prela/instrumentation/multi_agent/crewai.py +404 -0
- prela/instrumentation/multi_agent/langgraph.py +299 -0
- prela/instrumentation/multi_agent/models.py +203 -0
- prela/instrumentation/multi_agent/swarm.py +231 -0
- prela/instrumentation/n8n/__init__.py +68 -0
- prela/instrumentation/n8n/code_node.py +534 -0
- prela/instrumentation/n8n/models.py +336 -0
- prela/instrumentation/n8n/webhook.py +489 -0
- prela/instrumentation/openai.py +1198 -0
- prela/license.py +245 -0
- prela/replay/__init__.py +31 -0
- prela/replay/comparison.py +390 -0
- prela/replay/engine.py +1227 -0
- prela/replay/loader.py +231 -0
- prela/replay/result.py +196 -0
- prela-0.1.0.dist-info/METADATA +399 -0
- prela-0.1.0.dist-info/RECORD +71 -0
- prela-0.1.0.dist-info/WHEEL +4 -0
- prela-0.1.0.dist-info/entry_points.txt +2 -0
- prela-0.1.0.dist-info/licenses/LICENSE +190 -0
|
@@ -0,0 +1,959 @@
|
|
|
1
|
+
"""Instrumentation for LangChain (langchain>=0.1.0).
|
|
2
|
+
|
|
3
|
+
This module provides automatic tracing for LangChain operations via callbacks:
|
|
4
|
+
- LLM calls (OpenAI, Anthropic, etc. through LangChain)
|
|
5
|
+
- Chain executions (sequential, map-reduce, etc.)
|
|
6
|
+
- Tool invocations
|
|
7
|
+
- Retriever queries
|
|
8
|
+
- Agent actions and decisions
|
|
9
|
+
|
|
10
|
+
The instrumentation works by injecting a PrelaCallbackHandler into LangChain's
|
|
11
|
+
global callback system, which automatically captures all executions.
|
|
12
|
+
|
|
13
|
+
Example:
|
|
14
|
+
```python
|
|
15
|
+
from prela.instrumentation.langchain import LangChainInstrumentor
|
|
16
|
+
from prela.core.tracer import Tracer
|
|
17
|
+
from langchain.llms import OpenAI
|
|
18
|
+
from langchain.chains import LLMChain
|
|
19
|
+
from langchain.prompts import PromptTemplate
|
|
20
|
+
|
|
21
|
+
# Setup instrumentation
|
|
22
|
+
tracer = Tracer()
|
|
23
|
+
instrumentor = LangChainInstrumentor()
|
|
24
|
+
instrumentor.instrument(tracer)
|
|
25
|
+
|
|
26
|
+
# Now all LangChain operations are automatically traced
|
|
27
|
+
llm = OpenAI(temperature=0.9)
|
|
28
|
+
prompt = PromptTemplate(
|
|
29
|
+
input_variables=["product"],
|
|
30
|
+
template="What is a good name for a company that makes {product}?"
|
|
31
|
+
)
|
|
32
|
+
chain = LLMChain(llm=llm, prompt=prompt)
|
|
33
|
+
result = chain.run("colorful socks")
|
|
34
|
+
```
|
|
35
|
+
"""
|
|
36
|
+
|
|
37
|
+
from __future__ import annotations
|
|
38
|
+
|
|
39
|
+
import logging
|
|
40
|
+
from typing import TYPE_CHECKING, Any
|
|
41
|
+
from uuid import UUID
|
|
42
|
+
|
|
43
|
+
from prela.core.clock import now
|
|
44
|
+
from prela.core.span import Span, SpanStatus, SpanType
|
|
45
|
+
from prela.instrumentation.base import Instrumentor
|
|
46
|
+
|
|
47
|
+
if TYPE_CHECKING:
|
|
48
|
+
from prela.core.tracer import Tracer
|
|
49
|
+
|
|
50
|
+
logger = logging.getLogger(__name__)
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
class PrelaCallbackHandler:
|
|
54
|
+
"""LangChain callback handler that creates Prela spans.
|
|
55
|
+
|
|
56
|
+
This handler implements LangChain's BaseCallbackHandler interface and
|
|
57
|
+
creates spans for all major LangChain operations. It maintains a mapping
|
|
58
|
+
from run_id to span to properly handle concurrent executions and nested
|
|
59
|
+
operations.
|
|
60
|
+
|
|
61
|
+
The handler tracks:
|
|
62
|
+
- LLM calls: Model invocations with prompts and responses
|
|
63
|
+
- Chains: Sequential operations and workflows
|
|
64
|
+
- Tools: External tool invocations
|
|
65
|
+
- Retrievers: Document retrieval operations
|
|
66
|
+
- Agents: Agent reasoning and actions
|
|
67
|
+
"""
|
|
68
|
+
|
|
69
|
+
def __init__(self, tracer: Tracer) -> None:
|
|
70
|
+
"""Initialize the callback handler.
|
|
71
|
+
|
|
72
|
+
Args:
|
|
73
|
+
tracer: The tracer to use for creating spans
|
|
74
|
+
"""
|
|
75
|
+
self._tracer = tracer
|
|
76
|
+
# Map run_id -> span for tracking concurrent operations
|
|
77
|
+
self._spans: dict[str, Span] = {}
|
|
78
|
+
# Map run_id -> context manager for proper cleanup
|
|
79
|
+
self._contexts: dict[str, Any] = {}
|
|
80
|
+
# Map run_id -> ReplayCapture for replay data
|
|
81
|
+
self._replay_captures: dict[str, Any] = {}
|
|
82
|
+
|
|
83
|
+
def on_llm_start(
|
|
84
|
+
self,
|
|
85
|
+
serialized: dict[str, Any],
|
|
86
|
+
prompts: list[str],
|
|
87
|
+
*,
|
|
88
|
+
run_id: UUID,
|
|
89
|
+
parent_run_id: UUID | None = None,
|
|
90
|
+
tags: list[str] | None = None,
|
|
91
|
+
metadata: dict[str, Any] | None = None,
|
|
92
|
+
**kwargs: Any,
|
|
93
|
+
) -> None:
|
|
94
|
+
"""Called when an LLM starts running.
|
|
95
|
+
|
|
96
|
+
Args:
|
|
97
|
+
serialized: Serialized LLM configuration
|
|
98
|
+
prompts: Input prompts to the LLM
|
|
99
|
+
run_id: Unique identifier for this LLM run
|
|
100
|
+
parent_run_id: ID of parent operation (if nested)
|
|
101
|
+
tags: Optional tags for categorization
|
|
102
|
+
metadata: Optional metadata
|
|
103
|
+
**kwargs: Additional LLM parameters
|
|
104
|
+
"""
|
|
105
|
+
try:
|
|
106
|
+
# Extract LLM info from serialized config
|
|
107
|
+
llm_type = serialized.get("name", "unknown")
|
|
108
|
+
model = serialized.get("kwargs", {}).get("model_name", "unknown")
|
|
109
|
+
|
|
110
|
+
# Start span
|
|
111
|
+
ctx = self._tracer.span(
|
|
112
|
+
name=f"langchain.llm.{llm_type}",
|
|
113
|
+
span_type=SpanType.LLM,
|
|
114
|
+
attributes={
|
|
115
|
+
"llm.vendor": "langchain",
|
|
116
|
+
"llm.type": llm_type,
|
|
117
|
+
"llm.model": model,
|
|
118
|
+
"llm.prompt_count": len(prompts),
|
|
119
|
+
},
|
|
120
|
+
)
|
|
121
|
+
|
|
122
|
+
# Enter context and store span
|
|
123
|
+
span = ctx.__enter__()
|
|
124
|
+
self._spans[str(run_id)] = span
|
|
125
|
+
self._contexts[str(run_id)] = ctx
|
|
126
|
+
|
|
127
|
+
# Add prompts as attributes (truncate if too long)
|
|
128
|
+
for i, prompt in enumerate(prompts[:5]): # Limit to first 5 prompts
|
|
129
|
+
truncated = prompt[:500] if len(prompt) > 500 else prompt
|
|
130
|
+
span.set_attribute(f"llm.prompt.{i}", truncated)
|
|
131
|
+
|
|
132
|
+
# Add tags and metadata
|
|
133
|
+
if tags:
|
|
134
|
+
span.set_attribute("langchain.tags", tags)
|
|
135
|
+
if metadata:
|
|
136
|
+
for key, value in metadata.items():
|
|
137
|
+
span.set_attribute(f"langchain.metadata.{key}", str(value))
|
|
138
|
+
|
|
139
|
+
# Add additional parameters
|
|
140
|
+
for key, value in kwargs.items():
|
|
141
|
+
if key not in ["callbacks", "run_manager"]:
|
|
142
|
+
span.set_attribute(f"llm.{key}", str(value))
|
|
143
|
+
|
|
144
|
+
# NEW: Initialize replay capture if enabled
|
|
145
|
+
if self._tracer.capture_for_replay:
|
|
146
|
+
try:
|
|
147
|
+
from prela.core.replay import ReplayCapture
|
|
148
|
+
|
|
149
|
+
replay_capture = ReplayCapture()
|
|
150
|
+
|
|
151
|
+
# Extract parameters
|
|
152
|
+
invocation_params = kwargs.get("invocation_params", {})
|
|
153
|
+
temperature = invocation_params.get("temperature")
|
|
154
|
+
max_tokens = invocation_params.get("max_tokens")
|
|
155
|
+
|
|
156
|
+
# Capture request
|
|
157
|
+
replay_capture.set_llm_request(
|
|
158
|
+
model=model,
|
|
159
|
+
prompt=prompts[0] if prompts else None, # First prompt
|
|
160
|
+
temperature=temperature,
|
|
161
|
+
max_tokens=max_tokens,
|
|
162
|
+
**{k: v for k, v in invocation_params.items()
|
|
163
|
+
if k not in ["temperature", "max_tokens"]}
|
|
164
|
+
)
|
|
165
|
+
|
|
166
|
+
self._replay_captures[str(run_id)] = replay_capture
|
|
167
|
+
except Exception as e:
|
|
168
|
+
logger.debug(f"Failed to initialize replay capture: {e}")
|
|
169
|
+
|
|
170
|
+
except Exception as e:
|
|
171
|
+
logger.error(f"Error in on_llm_start: {e}", exc_info=True)
|
|
172
|
+
|
|
173
|
+
def on_llm_end(
|
|
174
|
+
self,
|
|
175
|
+
response: Any,
|
|
176
|
+
*,
|
|
177
|
+
run_id: UUID,
|
|
178
|
+
parent_run_id: UUID | None = None,
|
|
179
|
+
**kwargs: Any,
|
|
180
|
+
) -> None:
|
|
181
|
+
"""Called when an LLM finishes running.
|
|
182
|
+
|
|
183
|
+
Args:
|
|
184
|
+
response: LLM response object
|
|
185
|
+
run_id: Unique identifier for this LLM run
|
|
186
|
+
parent_run_id: ID of parent operation
|
|
187
|
+
**kwargs: Additional parameters
|
|
188
|
+
"""
|
|
189
|
+
try:
|
|
190
|
+
run_id_str = str(run_id)
|
|
191
|
+
span = self._spans.get(run_id_str)
|
|
192
|
+
ctx = self._contexts.get(run_id_str)
|
|
193
|
+
|
|
194
|
+
if span and ctx:
|
|
195
|
+
# Extract response information
|
|
196
|
+
if hasattr(response, "generations"):
|
|
197
|
+
generations = response.generations
|
|
198
|
+
for i, gen_list in enumerate(generations[:5]):
|
|
199
|
+
if gen_list:
|
|
200
|
+
text = getattr(gen_list[0], "text", "")
|
|
201
|
+
truncated = text[:500] if len(text) > 500 else text
|
|
202
|
+
span.set_attribute(f"llm.response.{i}", truncated)
|
|
203
|
+
|
|
204
|
+
# Extract token usage if available
|
|
205
|
+
if hasattr(response, "llm_output") and response.llm_output:
|
|
206
|
+
token_usage = response.llm_output.get("token_usage", {})
|
|
207
|
+
if "prompt_tokens" in token_usage:
|
|
208
|
+
span.set_attribute("llm.usage.prompt_tokens", token_usage["prompt_tokens"])
|
|
209
|
+
if "completion_tokens" in token_usage:
|
|
210
|
+
span.set_attribute("llm.usage.completion_tokens", token_usage["completion_tokens"])
|
|
211
|
+
if "total_tokens" in token_usage:
|
|
212
|
+
span.set_attribute("llm.usage.total_tokens", token_usage["total_tokens"])
|
|
213
|
+
|
|
214
|
+
# Mark as successful
|
|
215
|
+
span.set_status(SpanStatus.SUCCESS)
|
|
216
|
+
|
|
217
|
+
# NEW: Complete replay capture if enabled
|
|
218
|
+
if self._tracer.capture_for_replay and run_id_str in self._replay_captures:
|
|
219
|
+
try:
|
|
220
|
+
replay_capture = self._replay_captures[run_id_str]
|
|
221
|
+
|
|
222
|
+
# Extract response text
|
|
223
|
+
text = ""
|
|
224
|
+
if hasattr(response, "generations") and response.generations:
|
|
225
|
+
gen_list = response.generations[0]
|
|
226
|
+
if gen_list:
|
|
227
|
+
text = getattr(gen_list[0], "text", "")
|
|
228
|
+
|
|
229
|
+
# Extract token usage
|
|
230
|
+
prompt_tokens = None
|
|
231
|
+
completion_tokens = None
|
|
232
|
+
if hasattr(response, "llm_output") and response.llm_output:
|
|
233
|
+
token_usage = response.llm_output.get("token_usage", {})
|
|
234
|
+
prompt_tokens = token_usage.get("prompt_tokens")
|
|
235
|
+
completion_tokens = token_usage.get("completion_tokens")
|
|
236
|
+
|
|
237
|
+
# Capture response
|
|
238
|
+
replay_capture.set_llm_response(
|
|
239
|
+
text=text,
|
|
240
|
+
prompt_tokens=prompt_tokens,
|
|
241
|
+
completion_tokens=completion_tokens,
|
|
242
|
+
)
|
|
243
|
+
|
|
244
|
+
# Attach to span
|
|
245
|
+
object.__setattr__(span, "replay_snapshot", replay_capture.build())
|
|
246
|
+
|
|
247
|
+
# Clean up replay capture
|
|
248
|
+
del self._replay_captures[run_id_str]
|
|
249
|
+
except Exception as e:
|
|
250
|
+
logger.debug(f"Failed to capture replay data: {e}")
|
|
251
|
+
|
|
252
|
+
# Exit context
|
|
253
|
+
ctx.__exit__(None, None, None)
|
|
254
|
+
|
|
255
|
+
# Clean up
|
|
256
|
+
del self._spans[run_id_str]
|
|
257
|
+
del self._contexts[run_id_str]
|
|
258
|
+
|
|
259
|
+
except Exception as e:
|
|
260
|
+
logger.error(f"Error in on_llm_end: {e}", exc_info=True)
|
|
261
|
+
|
|
262
|
+
def on_llm_error(
|
|
263
|
+
self,
|
|
264
|
+
error: BaseException,
|
|
265
|
+
*,
|
|
266
|
+
run_id: UUID,
|
|
267
|
+
parent_run_id: UUID | None = None,
|
|
268
|
+
**kwargs: Any,
|
|
269
|
+
) -> None:
|
|
270
|
+
"""Called when an LLM errors.
|
|
271
|
+
|
|
272
|
+
Args:
|
|
273
|
+
error: The error that occurred
|
|
274
|
+
run_id: Unique identifier for this LLM run
|
|
275
|
+
parent_run_id: ID of parent operation
|
|
276
|
+
**kwargs: Additional parameters
|
|
277
|
+
"""
|
|
278
|
+
try:
|
|
279
|
+
run_id_str = str(run_id)
|
|
280
|
+
span = self._spans.get(run_id_str)
|
|
281
|
+
ctx = self._contexts.get(run_id_str)
|
|
282
|
+
|
|
283
|
+
if span and ctx:
|
|
284
|
+
# Mark as error
|
|
285
|
+
span.set_status(SpanStatus.ERROR, str(error))
|
|
286
|
+
span.set_attribute("error.type", type(error).__name__)
|
|
287
|
+
span.set_attribute("error.message", str(error))
|
|
288
|
+
|
|
289
|
+
# Exit context
|
|
290
|
+
ctx.__exit__(type(error), error, None)
|
|
291
|
+
|
|
292
|
+
# Clean up
|
|
293
|
+
del self._spans[run_id_str]
|
|
294
|
+
del self._contexts[run_id_str]
|
|
295
|
+
|
|
296
|
+
# Clean up replay capture if present
|
|
297
|
+
if run_id_str in self._replay_captures:
|
|
298
|
+
del self._replay_captures[run_id_str]
|
|
299
|
+
|
|
300
|
+
except Exception as e:
|
|
301
|
+
logger.error(f"Error in on_llm_error: {e}", exc_info=True)
|
|
302
|
+
|
|
303
|
+
def on_chain_start(
|
|
304
|
+
self,
|
|
305
|
+
serialized: dict[str, Any],
|
|
306
|
+
inputs: dict[str, Any],
|
|
307
|
+
*,
|
|
308
|
+
run_id: UUID,
|
|
309
|
+
parent_run_id: UUID | None = None,
|
|
310
|
+
tags: list[str] | None = None,
|
|
311
|
+
metadata: dict[str, Any] | None = None,
|
|
312
|
+
**kwargs: Any,
|
|
313
|
+
) -> None:
|
|
314
|
+
"""Called when a chain starts running.
|
|
315
|
+
|
|
316
|
+
Args:
|
|
317
|
+
serialized: Serialized chain configuration
|
|
318
|
+
inputs: Input values to the chain
|
|
319
|
+
run_id: Unique identifier for this chain run
|
|
320
|
+
parent_run_id: ID of parent operation
|
|
321
|
+
tags: Optional tags
|
|
322
|
+
metadata: Optional metadata
|
|
323
|
+
**kwargs: Additional parameters
|
|
324
|
+
"""
|
|
325
|
+
try:
|
|
326
|
+
# Extract chain info
|
|
327
|
+
chain_type = serialized.get("name", "unknown")
|
|
328
|
+
|
|
329
|
+
# Start span
|
|
330
|
+
ctx = self._tracer.span(
|
|
331
|
+
name=f"langchain.chain.{chain_type}",
|
|
332
|
+
span_type=SpanType.AGENT, # Chains are agent-level operations
|
|
333
|
+
attributes={
|
|
334
|
+
"langchain.type": "chain",
|
|
335
|
+
"langchain.chain_type": chain_type,
|
|
336
|
+
},
|
|
337
|
+
)
|
|
338
|
+
|
|
339
|
+
# Enter context and store span
|
|
340
|
+
span = ctx.__enter__()
|
|
341
|
+
self._spans[str(run_id)] = span
|
|
342
|
+
self._contexts[str(run_id)] = ctx
|
|
343
|
+
|
|
344
|
+
# Add inputs as attributes (truncate if needed)
|
|
345
|
+
for key, value in inputs.items():
|
|
346
|
+
value_str = str(value)
|
|
347
|
+
truncated = value_str[:500] if len(value_str) > 500 else value_str
|
|
348
|
+
span.set_attribute(f"chain.input.{key}", truncated)
|
|
349
|
+
|
|
350
|
+
# Add tags and metadata
|
|
351
|
+
if tags:
|
|
352
|
+
span.set_attribute("langchain.tags", tags)
|
|
353
|
+
if metadata:
|
|
354
|
+
for key, value in metadata.items():
|
|
355
|
+
span.set_attribute(f"langchain.metadata.{key}", str(value))
|
|
356
|
+
|
|
357
|
+
except Exception as e:
|
|
358
|
+
logger.error(f"Error in on_chain_start: {e}", exc_info=True)
|
|
359
|
+
|
|
360
|
+
def on_chain_end(
|
|
361
|
+
self,
|
|
362
|
+
outputs: dict[str, Any],
|
|
363
|
+
*,
|
|
364
|
+
run_id: UUID,
|
|
365
|
+
parent_run_id: UUID | None = None,
|
|
366
|
+
**kwargs: Any,
|
|
367
|
+
) -> None:
|
|
368
|
+
"""Called when a chain finishes running.
|
|
369
|
+
|
|
370
|
+
Args:
|
|
371
|
+
outputs: Output values from the chain
|
|
372
|
+
run_id: Unique identifier for this chain run
|
|
373
|
+
parent_run_id: ID of parent operation
|
|
374
|
+
**kwargs: Additional parameters
|
|
375
|
+
"""
|
|
376
|
+
try:
|
|
377
|
+
run_id_str = str(run_id)
|
|
378
|
+
span = self._spans.get(run_id_str)
|
|
379
|
+
ctx = self._contexts.get(run_id_str)
|
|
380
|
+
|
|
381
|
+
if span and ctx:
|
|
382
|
+
# Add outputs as attributes
|
|
383
|
+
for key, value in outputs.items():
|
|
384
|
+
value_str = str(value)
|
|
385
|
+
truncated = value_str[:500] if len(value_str) > 500 else value_str
|
|
386
|
+
span.set_attribute(f"chain.output.{key}", truncated)
|
|
387
|
+
|
|
388
|
+
# Mark as successful
|
|
389
|
+
span.set_status(SpanStatus.SUCCESS)
|
|
390
|
+
|
|
391
|
+
# Exit context
|
|
392
|
+
ctx.__exit__(None, None, None)
|
|
393
|
+
|
|
394
|
+
# Clean up
|
|
395
|
+
del self._spans[run_id_str]
|
|
396
|
+
del self._contexts[run_id_str]
|
|
397
|
+
|
|
398
|
+
except Exception as e:
|
|
399
|
+
logger.error(f"Error in on_chain_end: {e}", exc_info=True)
|
|
400
|
+
|
|
401
|
+
def on_chain_error(
|
|
402
|
+
self,
|
|
403
|
+
error: BaseException,
|
|
404
|
+
*,
|
|
405
|
+
run_id: UUID,
|
|
406
|
+
parent_run_id: UUID | None = None,
|
|
407
|
+
**kwargs: Any,
|
|
408
|
+
) -> None:
|
|
409
|
+
"""Called when a chain errors.
|
|
410
|
+
|
|
411
|
+
Args:
|
|
412
|
+
error: The error that occurred
|
|
413
|
+
run_id: Unique identifier for this chain run
|
|
414
|
+
parent_run_id: ID of parent operation
|
|
415
|
+
**kwargs: Additional parameters
|
|
416
|
+
"""
|
|
417
|
+
try:
|
|
418
|
+
run_id_str = str(run_id)
|
|
419
|
+
span = self._spans.get(run_id_str)
|
|
420
|
+
ctx = self._contexts.get(run_id_str)
|
|
421
|
+
|
|
422
|
+
if span and ctx:
|
|
423
|
+
# Mark as error
|
|
424
|
+
span.set_status(SpanStatus.ERROR, str(error))
|
|
425
|
+
span.set_attribute("error.type", type(error).__name__)
|
|
426
|
+
span.set_attribute("error.message", str(error))
|
|
427
|
+
|
|
428
|
+
# Exit context
|
|
429
|
+
ctx.__exit__(type(error), error, None)
|
|
430
|
+
|
|
431
|
+
# Clean up
|
|
432
|
+
del self._spans[run_id_str]
|
|
433
|
+
del self._contexts[run_id_str]
|
|
434
|
+
|
|
435
|
+
except Exception as e:
|
|
436
|
+
logger.error(f"Error in on_chain_error: {e}", exc_info=True)
|
|
437
|
+
|
|
438
|
+
def on_tool_start(
|
|
439
|
+
self,
|
|
440
|
+
serialized: dict[str, Any],
|
|
441
|
+
input_str: str,
|
|
442
|
+
*,
|
|
443
|
+
run_id: UUID,
|
|
444
|
+
parent_run_id: UUID | None = None,
|
|
445
|
+
tags: list[str] | None = None,
|
|
446
|
+
metadata: dict[str, Any] | None = None,
|
|
447
|
+
**kwargs: Any,
|
|
448
|
+
) -> None:
|
|
449
|
+
"""Called when a tool starts running.
|
|
450
|
+
|
|
451
|
+
Args:
|
|
452
|
+
serialized: Serialized tool configuration
|
|
453
|
+
input_str: Input string to the tool
|
|
454
|
+
run_id: Unique identifier for this tool run
|
|
455
|
+
parent_run_id: ID of parent operation
|
|
456
|
+
tags: Optional tags
|
|
457
|
+
metadata: Optional metadata
|
|
458
|
+
**kwargs: Additional parameters
|
|
459
|
+
"""
|
|
460
|
+
try:
|
|
461
|
+
# Extract tool info
|
|
462
|
+
tool_name = serialized.get("name", "unknown")
|
|
463
|
+
|
|
464
|
+
# Start span
|
|
465
|
+
ctx = self._tracer.span(
|
|
466
|
+
name=f"langchain.tool.{tool_name}",
|
|
467
|
+
span_type=SpanType.TOOL,
|
|
468
|
+
attributes={
|
|
469
|
+
"tool.name": tool_name,
|
|
470
|
+
"tool.description": serialized.get("description", ""),
|
|
471
|
+
},
|
|
472
|
+
)
|
|
473
|
+
|
|
474
|
+
# Enter context and store span
|
|
475
|
+
span = ctx.__enter__()
|
|
476
|
+
self._spans[str(run_id)] = span
|
|
477
|
+
self._contexts[str(run_id)] = ctx
|
|
478
|
+
|
|
479
|
+
# Add input
|
|
480
|
+
truncated = input_str[:500] if len(input_str) > 500 else input_str
|
|
481
|
+
span.set_attribute("tool.input", truncated)
|
|
482
|
+
|
|
483
|
+
# Add tags and metadata
|
|
484
|
+
if tags:
|
|
485
|
+
span.set_attribute("langchain.tags", tags)
|
|
486
|
+
if metadata:
|
|
487
|
+
for key, value in metadata.items():
|
|
488
|
+
span.set_attribute(f"langchain.metadata.{key}", str(value))
|
|
489
|
+
|
|
490
|
+
# NEW: Initialize replay capture for tools if enabled
|
|
491
|
+
if self._tracer.capture_for_replay:
|
|
492
|
+
try:
|
|
493
|
+
from prela.core.replay import ReplayCapture
|
|
494
|
+
|
|
495
|
+
replay_capture = ReplayCapture()
|
|
496
|
+
tool_name = serialized.get("name", "unknown")
|
|
497
|
+
tool_description = serialized.get("description")
|
|
498
|
+
|
|
499
|
+
replay_capture.set_tool_call(
|
|
500
|
+
name=tool_name,
|
|
501
|
+
description=tool_description,
|
|
502
|
+
input_args=input_str,
|
|
503
|
+
)
|
|
504
|
+
self._replay_captures[str(run_id)] = replay_capture
|
|
505
|
+
except Exception as e:
|
|
506
|
+
logger.debug(f"Failed to initialize tool replay capture: {e}")
|
|
507
|
+
|
|
508
|
+
except Exception as e:
|
|
509
|
+
logger.error(f"Error in on_tool_start: {e}", exc_info=True)
|
|
510
|
+
|
|
511
|
+
def on_tool_end(
|
|
512
|
+
self,
|
|
513
|
+
output: str,
|
|
514
|
+
*,
|
|
515
|
+
run_id: UUID,
|
|
516
|
+
parent_run_id: UUID | None = None,
|
|
517
|
+
**kwargs: Any,
|
|
518
|
+
) -> None:
|
|
519
|
+
"""Called when a tool finishes running.
|
|
520
|
+
|
|
521
|
+
Args:
|
|
522
|
+
output: Output from the tool
|
|
523
|
+
run_id: Unique identifier for this tool run
|
|
524
|
+
parent_run_id: ID of parent operation
|
|
525
|
+
**kwargs: Additional parameters
|
|
526
|
+
"""
|
|
527
|
+
try:
|
|
528
|
+
run_id_str = str(run_id)
|
|
529
|
+
span = self._spans.get(run_id_str)
|
|
530
|
+
ctx = self._contexts.get(run_id_str)
|
|
531
|
+
|
|
532
|
+
if span and ctx:
|
|
533
|
+
# Add output
|
|
534
|
+
truncated = output[:500] if len(output) > 500 else output
|
|
535
|
+
span.set_attribute("tool.output", truncated)
|
|
536
|
+
|
|
537
|
+
# Mark as successful
|
|
538
|
+
span.set_status(SpanStatus.SUCCESS)
|
|
539
|
+
|
|
540
|
+
# NEW: Complete tool replay capture if enabled
|
|
541
|
+
if self._tracer.capture_for_replay and run_id_str in self._replay_captures:
|
|
542
|
+
try:
|
|
543
|
+
replay_capture = self._replay_captures[run_id_str]
|
|
544
|
+
replay_capture._snapshot.tool_output = output
|
|
545
|
+
|
|
546
|
+
# Attach to span
|
|
547
|
+
object.__setattr__(span, "replay_snapshot", replay_capture.build())
|
|
548
|
+
|
|
549
|
+
# Clean up replay capture
|
|
550
|
+
del self._replay_captures[run_id_str]
|
|
551
|
+
except Exception as e:
|
|
552
|
+
logger.debug(f"Failed to capture tool replay data: {e}")
|
|
553
|
+
|
|
554
|
+
# Exit context
|
|
555
|
+
ctx.__exit__(None, None, None)
|
|
556
|
+
|
|
557
|
+
# Clean up
|
|
558
|
+
del self._spans[run_id_str]
|
|
559
|
+
del self._contexts[run_id_str]
|
|
560
|
+
|
|
561
|
+
except Exception as e:
|
|
562
|
+
logger.error(f"Error in on_tool_end: {e}", exc_info=True)
|
|
563
|
+
|
|
564
|
+
def on_tool_error(
|
|
565
|
+
self,
|
|
566
|
+
error: BaseException,
|
|
567
|
+
*,
|
|
568
|
+
run_id: UUID,
|
|
569
|
+
parent_run_id: UUID | None = None,
|
|
570
|
+
**kwargs: Any,
|
|
571
|
+
) -> None:
|
|
572
|
+
"""Called when a tool errors.
|
|
573
|
+
|
|
574
|
+
Args:
|
|
575
|
+
error: The error that occurred
|
|
576
|
+
run_id: Unique identifier for this tool run
|
|
577
|
+
parent_run_id: ID of parent operation
|
|
578
|
+
**kwargs: Additional parameters
|
|
579
|
+
"""
|
|
580
|
+
try:
|
|
581
|
+
run_id_str = str(run_id)
|
|
582
|
+
span = self._spans.get(run_id_str)
|
|
583
|
+
ctx = self._contexts.get(run_id_str)
|
|
584
|
+
|
|
585
|
+
if span and ctx:
|
|
586
|
+
# Mark as error
|
|
587
|
+
span.set_status(SpanStatus.ERROR, str(error))
|
|
588
|
+
span.set_attribute("error.type", type(error).__name__)
|
|
589
|
+
span.set_attribute("error.message", str(error))
|
|
590
|
+
|
|
591
|
+
# Exit context
|
|
592
|
+
ctx.__exit__(type(error), error, None)
|
|
593
|
+
|
|
594
|
+
# Clean up
|
|
595
|
+
del self._spans[run_id_str]
|
|
596
|
+
del self._contexts[run_id_str]
|
|
597
|
+
|
|
598
|
+
# Clean up replay capture if present
|
|
599
|
+
if run_id_str in self._replay_captures:
|
|
600
|
+
del self._replay_captures[run_id_str]
|
|
601
|
+
|
|
602
|
+
except Exception as e:
|
|
603
|
+
logger.error(f"Error in on_tool_error: {e}", exc_info=True)
|
|
604
|
+
|
|
605
|
+
def on_retriever_start(
|
|
606
|
+
self,
|
|
607
|
+
serialized: dict[str, Any],
|
|
608
|
+
query: str,
|
|
609
|
+
*,
|
|
610
|
+
run_id: UUID,
|
|
611
|
+
parent_run_id: UUID | None = None,
|
|
612
|
+
tags: list[str] | None = None,
|
|
613
|
+
metadata: dict[str, Any] | None = None,
|
|
614
|
+
**kwargs: Any,
|
|
615
|
+
) -> None:
|
|
616
|
+
"""Called when a retriever starts.
|
|
617
|
+
|
|
618
|
+
Args:
|
|
619
|
+
serialized: Serialized retriever configuration
|
|
620
|
+
query: Query string for retrieval
|
|
621
|
+
run_id: Unique identifier for this retriever run
|
|
622
|
+
parent_run_id: ID of parent operation
|
|
623
|
+
tags: Optional tags
|
|
624
|
+
metadata: Optional metadata
|
|
625
|
+
**kwargs: Additional parameters
|
|
626
|
+
"""
|
|
627
|
+
try:
|
|
628
|
+
# Extract retriever info
|
|
629
|
+
retriever_type = serialized.get("name", "unknown")
|
|
630
|
+
|
|
631
|
+
# Start span
|
|
632
|
+
ctx = self._tracer.span(
|
|
633
|
+
name=f"langchain.retriever.{retriever_type}",
|
|
634
|
+
span_type=SpanType.RETRIEVAL,
|
|
635
|
+
attributes={
|
|
636
|
+
"retriever.type": retriever_type,
|
|
637
|
+
},
|
|
638
|
+
)
|
|
639
|
+
|
|
640
|
+
# Enter context and store span
|
|
641
|
+
span = ctx.__enter__()
|
|
642
|
+
self._spans[str(run_id)] = span
|
|
643
|
+
self._contexts[str(run_id)] = ctx
|
|
644
|
+
|
|
645
|
+
# Add query
|
|
646
|
+
truncated = query[:500] if len(query) > 500 else query
|
|
647
|
+
span.set_attribute("retriever.query", truncated)
|
|
648
|
+
|
|
649
|
+
# Add tags and metadata
|
|
650
|
+
if tags:
|
|
651
|
+
span.set_attribute("langchain.tags", tags)
|
|
652
|
+
if metadata:
|
|
653
|
+
for key, value in metadata.items():
|
|
654
|
+
span.set_attribute(f"langchain.metadata.{key}", str(value))
|
|
655
|
+
|
|
656
|
+
# NEW: Initialize replay capture for retrieval if enabled
|
|
657
|
+
if self._tracer.capture_for_replay:
|
|
658
|
+
try:
|
|
659
|
+
from prela.core.replay import ReplayCapture
|
|
660
|
+
|
|
661
|
+
replay_capture = ReplayCapture()
|
|
662
|
+
retriever_type = serialized.get("name", "unknown")
|
|
663
|
+
|
|
664
|
+
# Store for completion in on_retriever_end
|
|
665
|
+
# We'll add query and metadata now, documents later
|
|
666
|
+
self._replay_captures[str(run_id)] = {
|
|
667
|
+
"capture": replay_capture,
|
|
668
|
+
"query": query,
|
|
669
|
+
"retriever_type": retriever_type,
|
|
670
|
+
"metadata": metadata or {},
|
|
671
|
+
}
|
|
672
|
+
except Exception as e:
|
|
673
|
+
logger.debug(f"Failed to initialize retrieval replay capture: {e}")
|
|
674
|
+
|
|
675
|
+
except Exception as e:
|
|
676
|
+
logger.error(f"Error in on_retriever_start: {e}", exc_info=True)
|
|
677
|
+
|
|
678
|
+
def on_retriever_end(
|
|
679
|
+
self,
|
|
680
|
+
documents: list[Any],
|
|
681
|
+
*,
|
|
682
|
+
run_id: UUID,
|
|
683
|
+
parent_run_id: UUID | None = None,
|
|
684
|
+
**kwargs: Any,
|
|
685
|
+
) -> None:
|
|
686
|
+
"""Called when a retriever finishes.
|
|
687
|
+
|
|
688
|
+
Args:
|
|
689
|
+
documents: Retrieved documents
|
|
690
|
+
run_id: Unique identifier for this retriever run
|
|
691
|
+
parent_run_id: ID of parent operation
|
|
692
|
+
**kwargs: Additional parameters
|
|
693
|
+
"""
|
|
694
|
+
try:
|
|
695
|
+
run_id_str = str(run_id)
|
|
696
|
+
span = self._spans.get(run_id_str)
|
|
697
|
+
ctx = self._contexts.get(run_id_str)
|
|
698
|
+
|
|
699
|
+
if span and ctx:
|
|
700
|
+
# Add document count
|
|
701
|
+
span.set_attribute("retriever.document_count", len(documents))
|
|
702
|
+
|
|
703
|
+
# Add document metadata
|
|
704
|
+
for i, doc in enumerate(documents[:5]): # Limit to first 5 docs
|
|
705
|
+
if hasattr(doc, "page_content"):
|
|
706
|
+
content = doc.page_content[:200] # Truncate content
|
|
707
|
+
span.set_attribute(f"retriever.doc.{i}.content", content)
|
|
708
|
+
if hasattr(doc, "metadata"):
|
|
709
|
+
for key, value in doc.metadata.items():
|
|
710
|
+
span.set_attribute(f"retriever.doc.{i}.metadata.{key}", str(value))
|
|
711
|
+
|
|
712
|
+
# Mark as successful
|
|
713
|
+
span.set_status(SpanStatus.SUCCESS)
|
|
714
|
+
|
|
715
|
+
# NEW: Complete retrieval replay capture if enabled
|
|
716
|
+
if self._tracer.capture_for_replay and run_id_str in self._replay_captures:
|
|
717
|
+
try:
|
|
718
|
+
replay_data = self._replay_captures[run_id_str]
|
|
719
|
+
replay_capture = replay_data["capture"]
|
|
720
|
+
|
|
721
|
+
# Extract document data
|
|
722
|
+
docs = []
|
|
723
|
+
scores = []
|
|
724
|
+
for doc in documents[:5]: # Limit to first 5
|
|
725
|
+
doc_dict = {}
|
|
726
|
+
if hasattr(doc, "page_content"):
|
|
727
|
+
doc_dict["content"] = doc.page_content[:200] # Truncate
|
|
728
|
+
if hasattr(doc, "metadata"):
|
|
729
|
+
doc_dict["metadata"] = doc.metadata
|
|
730
|
+
docs.append(doc_dict)
|
|
731
|
+
|
|
732
|
+
# Extract score if available
|
|
733
|
+
if hasattr(doc, "metadata") and "score" in doc.metadata:
|
|
734
|
+
scores.append(doc.metadata["score"])
|
|
735
|
+
|
|
736
|
+
# Capture retrieval
|
|
737
|
+
replay_capture.set_retrieval(
|
|
738
|
+
query=replay_data["query"],
|
|
739
|
+
documents=docs,
|
|
740
|
+
scores=scores if scores else None,
|
|
741
|
+
metadata=replay_data["metadata"],
|
|
742
|
+
)
|
|
743
|
+
|
|
744
|
+
# Attach to span
|
|
745
|
+
object.__setattr__(span, "replay_snapshot", replay_capture.build())
|
|
746
|
+
|
|
747
|
+
# Clean up replay capture
|
|
748
|
+
del self._replay_captures[run_id_str]
|
|
749
|
+
except Exception as e:
|
|
750
|
+
logger.debug(f"Failed to capture retrieval replay data: {e}")
|
|
751
|
+
|
|
752
|
+
# Exit context
|
|
753
|
+
ctx.__exit__(None, None, None)
|
|
754
|
+
|
|
755
|
+
# Clean up
|
|
756
|
+
del self._spans[run_id_str]
|
|
757
|
+
del self._contexts[run_id_str]
|
|
758
|
+
|
|
759
|
+
except Exception as e:
|
|
760
|
+
logger.error(f"Error in on_retriever_end: {e}", exc_info=True)
|
|
761
|
+
|
|
762
|
+
def on_agent_action(
|
|
763
|
+
self,
|
|
764
|
+
action: Any,
|
|
765
|
+
*,
|
|
766
|
+
run_id: UUID,
|
|
767
|
+
parent_run_id: UUID | None = None,
|
|
768
|
+
**kwargs: Any,
|
|
769
|
+
) -> None:
|
|
770
|
+
"""Called when an agent takes an action.
|
|
771
|
+
|
|
772
|
+
Args:
|
|
773
|
+
action: The agent action
|
|
774
|
+
run_id: Unique identifier for this run
|
|
775
|
+
parent_run_id: ID of parent operation
|
|
776
|
+
**kwargs: Additional parameters
|
|
777
|
+
"""
|
|
778
|
+
try:
|
|
779
|
+
run_id_str = str(run_id)
|
|
780
|
+
span = self._spans.get(run_id_str)
|
|
781
|
+
|
|
782
|
+
if span:
|
|
783
|
+
# Record action event
|
|
784
|
+
span.add_event(
|
|
785
|
+
"agent.action",
|
|
786
|
+
attributes={
|
|
787
|
+
"action.tool": str(getattr(action, "tool", "unknown")),
|
|
788
|
+
"action.tool_input": str(getattr(action, "tool_input", ""))[:500],
|
|
789
|
+
"action.log": str(getattr(action, "log", ""))[:500],
|
|
790
|
+
},
|
|
791
|
+
)
|
|
792
|
+
|
|
793
|
+
except Exception as e:
|
|
794
|
+
logger.error(f"Error in on_agent_action: {e}", exc_info=True)
|
|
795
|
+
|
|
796
|
+
def on_agent_finish(
|
|
797
|
+
self,
|
|
798
|
+
finish: Any,
|
|
799
|
+
*,
|
|
800
|
+
run_id: UUID,
|
|
801
|
+
parent_run_id: UUID | None = None,
|
|
802
|
+
**kwargs: Any,
|
|
803
|
+
) -> None:
|
|
804
|
+
"""Called when an agent finishes.
|
|
805
|
+
|
|
806
|
+
Args:
|
|
807
|
+
finish: The agent finish data
|
|
808
|
+
run_id: Unique identifier for this run
|
|
809
|
+
parent_run_id: ID of parent operation
|
|
810
|
+
**kwargs: Additional parameters
|
|
811
|
+
"""
|
|
812
|
+
try:
|
|
813
|
+
run_id_str = str(run_id)
|
|
814
|
+
span = self._spans.get(run_id_str)
|
|
815
|
+
|
|
816
|
+
if span:
|
|
817
|
+
# Record finish event
|
|
818
|
+
return_values = getattr(finish, "return_values", {})
|
|
819
|
+
span.add_event(
|
|
820
|
+
"agent.finish",
|
|
821
|
+
attributes={
|
|
822
|
+
"finish.output": str(return_values)[:500],
|
|
823
|
+
"finish.log": str(getattr(finish, "log", ""))[:500],
|
|
824
|
+
},
|
|
825
|
+
)
|
|
826
|
+
|
|
827
|
+
except Exception as e:
|
|
828
|
+
logger.error(f"Error in on_agent_finish: {e}", exc_info=True)
|
|
829
|
+
|
|
830
|
+
|
|
831
|
+
class LangChainInstrumentor(Instrumentor):
|
|
832
|
+
"""Instrumentor for LangChain framework.
|
|
833
|
+
|
|
834
|
+
This instrumentor injects a PrelaCallbackHandler into LangChain's global
|
|
835
|
+
callback system, which automatically traces all LangChain operations
|
|
836
|
+
including LLM calls, chains, tools, retrievers, and agent actions.
|
|
837
|
+
|
|
838
|
+
Unlike other instrumentors that use function wrapping, this uses LangChain's
|
|
839
|
+
built-in callback mechanism for more robust and comprehensive tracing.
|
|
840
|
+
"""
|
|
841
|
+
|
|
842
|
+
def __init__(self) -> None:
|
|
843
|
+
"""Initialize the LangChain instrumentor."""
|
|
844
|
+
self._callback_handler: PrelaCallbackHandler | None = None
|
|
845
|
+
self._langchain_core_module: Any = None
|
|
846
|
+
|
|
847
|
+
def instrument(self, tracer: Tracer) -> None:
|
|
848
|
+
"""Enable instrumentation for LangChain.
|
|
849
|
+
|
|
850
|
+
This adds a PrelaCallbackHandler to LangChain's global callback
|
|
851
|
+
manager, which will receive events for all LangChain operations.
|
|
852
|
+
|
|
853
|
+
Args:
|
|
854
|
+
tracer: The tracer to use for creating spans
|
|
855
|
+
|
|
856
|
+
Raises:
|
|
857
|
+
ImportError: If langchain-core package is not installed
|
|
858
|
+
RuntimeError: If instrumentation fails
|
|
859
|
+
"""
|
|
860
|
+
if self.is_instrumented:
|
|
861
|
+
logger.debug("LangChain is already instrumented, skipping")
|
|
862
|
+
return
|
|
863
|
+
|
|
864
|
+
try:
|
|
865
|
+
from langchain_core import callbacks as lc_callbacks
|
|
866
|
+
except ImportError as e:
|
|
867
|
+
raise ImportError(
|
|
868
|
+
"langchain-core package is not installed. "
|
|
869
|
+
"Install it with: pip install langchain-core>=0.1.0"
|
|
870
|
+
) from e
|
|
871
|
+
|
|
872
|
+
self._langchain_core_module = lc_callbacks
|
|
873
|
+
|
|
874
|
+
# Create callback handler
|
|
875
|
+
self._callback_handler = PrelaCallbackHandler(tracer)
|
|
876
|
+
|
|
877
|
+
# Add to global callbacks
|
|
878
|
+
try:
|
|
879
|
+
if hasattr(lc_callbacks, "get_callback_manager"):
|
|
880
|
+
# Older API
|
|
881
|
+
callback_manager = lc_callbacks.get_callback_manager()
|
|
882
|
+
if hasattr(callback_manager, "add_handler"):
|
|
883
|
+
callback_manager.add_handler(self._callback_handler)
|
|
884
|
+
else:
|
|
885
|
+
# Newer API - add to default handlers
|
|
886
|
+
if not hasattr(lc_callbacks, "_prela_handlers"):
|
|
887
|
+
lc_callbacks._prela_handlers = []
|
|
888
|
+
lc_callbacks._prela_handlers.append(self._callback_handler)
|
|
889
|
+
|
|
890
|
+
logger.info("Successfully instrumented LangChain")
|
|
891
|
+
|
|
892
|
+
except Exception as e:
|
|
893
|
+
self._callback_handler = None
|
|
894
|
+
raise RuntimeError(f"Failed to instrument LangChain: {e}") from e
|
|
895
|
+
|
|
896
|
+
def uninstrument(self) -> None:
|
|
897
|
+
"""Disable instrumentation and remove callback handler.
|
|
898
|
+
|
|
899
|
+
Raises:
|
|
900
|
+
RuntimeError: If uninstrumentation fails
|
|
901
|
+
"""
|
|
902
|
+
if not self.is_instrumented:
|
|
903
|
+
logger.debug("LangChain is not instrumented, skipping")
|
|
904
|
+
return
|
|
905
|
+
|
|
906
|
+
try:
|
|
907
|
+
if self._langchain_core_module and self._callback_handler:
|
|
908
|
+
lc_callbacks = self._langchain_core_module
|
|
909
|
+
|
|
910
|
+
# Remove from global callbacks
|
|
911
|
+
if hasattr(lc_callbacks, "get_callback_manager"):
|
|
912
|
+
# Older API
|
|
913
|
+
callback_manager = lc_callbacks.get_callback_manager()
|
|
914
|
+
if hasattr(callback_manager, "remove_handler"):
|
|
915
|
+
callback_manager.remove_handler(self._callback_handler)
|
|
916
|
+
else:
|
|
917
|
+
# Newer API - remove from default handlers
|
|
918
|
+
if hasattr(lc_callbacks, "_prela_handlers"):
|
|
919
|
+
lc_callbacks._prela_handlers.remove(self._callback_handler)
|
|
920
|
+
if not lc_callbacks._prela_handlers:
|
|
921
|
+
delattr(lc_callbacks, "_prela_handlers")
|
|
922
|
+
|
|
923
|
+
self._callback_handler = None
|
|
924
|
+
self._langchain_core_module = None
|
|
925
|
+
|
|
926
|
+
logger.info("Successfully uninstrumented LangChain")
|
|
927
|
+
|
|
928
|
+
except Exception as e:
|
|
929
|
+
raise RuntimeError(f"Failed to uninstrument LangChain: {e}") from e
|
|
930
|
+
|
|
931
|
+
@property
|
|
932
|
+
def is_instrumented(self) -> bool:
|
|
933
|
+
"""Check if LangChain is currently instrumented.
|
|
934
|
+
|
|
935
|
+
Returns:
|
|
936
|
+
True if instrumentation is active, False otherwise
|
|
937
|
+
"""
|
|
938
|
+
return self._callback_handler is not None
|
|
939
|
+
|
|
940
|
+
def get_callback(self) -> PrelaCallbackHandler | None:
|
|
941
|
+
"""Get the active callback handler.
|
|
942
|
+
|
|
943
|
+
This can be used to manually add the handler to specific LangChain
|
|
944
|
+
operations if needed, though auto-instrumentation is recommended.
|
|
945
|
+
|
|
946
|
+
Returns:
|
|
947
|
+
PrelaCallbackHandler | None: The callback handler if instrumented
|
|
948
|
+
|
|
949
|
+
Example:
|
|
950
|
+
```python
|
|
951
|
+
instrumentor = LangChainInstrumentor()
|
|
952
|
+
instrumentor.instrument(tracer)
|
|
953
|
+
|
|
954
|
+
# Optional: Get handler for manual use
|
|
955
|
+
handler = instrumentor.get_callback()
|
|
956
|
+
chain.run(input_text, callbacks=[handler])
|
|
957
|
+
```
|
|
958
|
+
"""
|
|
959
|
+
return self._callback_handler
|