deepeval 3.8.0__py3-none-any.whl → 3.8.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- deepeval/_version.py +1 -1
- deepeval/annotation/annotation.py +2 -2
- deepeval/confident/api.py +31 -3
- deepeval/config/settings.py +3 -0
- deepeval/dataset/dataset.py +6 -4
- deepeval/integrations/langchain/callback.py +307 -15
- deepeval/integrations/langchain/utils.py +75 -24
- deepeval/integrations/pydantic_ai/instrumentator.py +43 -11
- deepeval/integrations/pydantic_ai/otel.py +9 -0
- deepeval/metrics/contextual_recall/contextual_recall.py +25 -6
- deepeval/metrics/contextual_recall/schema.py +6 -0
- deepeval/metrics/multimodal_metrics/image_coherence/image_coherence.py +10 -1
- deepeval/metrics/multimodal_metrics/image_helpfulness/image_helpfulness.py +10 -1
- deepeval/metrics/multimodal_metrics/image_reference/image_reference.py +10 -1
- deepeval/metrics/utils.py +12 -1
- deepeval/models/llms/amazon_bedrock_model.py +51 -6
- deepeval/models/llms/azure_model.py +33 -7
- deepeval/models/llms/gemini_model.py +6 -1
- deepeval/prompt/prompt.py +7 -5
- deepeval/simulator/conversation_simulator.py +4 -2
- deepeval/telemetry.py +12 -91
- deepeval/test_case/llm_test_case.py +1 -0
- deepeval/tracing/tracing.py +6 -5
- {deepeval-3.8.0.dist-info → deepeval-3.8.2.dist-info}/METADATA +1 -1
- {deepeval-3.8.0.dist-info → deepeval-3.8.2.dist-info}/RECORD +28 -28
- {deepeval-3.8.0.dist-info → deepeval-3.8.2.dist-info}/LICENSE.md +0 -0
- {deepeval-3.8.0.dist-info → deepeval-3.8.2.dist-info}/WHEEL +0 -0
- {deepeval-3.8.0.dist-info → deepeval-3.8.2.dist-info}/entry_points.txt +0 -0
deepeval/_version.py
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
__version__: str = "3.8.
|
|
1
|
+
__version__: str = "3.8.2"
|
|
@@ -14,7 +14,7 @@ def send_annotation(
|
|
|
14
14
|
explanation: Optional[str] = None,
|
|
15
15
|
user_id: Optional[str] = None,
|
|
16
16
|
type: Optional[AnnotationType] = AnnotationType.THUMBS_RATING,
|
|
17
|
-
) ->
|
|
17
|
+
) -> None:
|
|
18
18
|
api_annotation = APIAnnotation(
|
|
19
19
|
rating=rating,
|
|
20
20
|
traceUuid=trace_uuid,
|
|
@@ -50,7 +50,7 @@ async def a_send_annotation(
|
|
|
50
50
|
explanation: Optional[str] = None,
|
|
51
51
|
type: Optional[AnnotationType] = AnnotationType.THUMBS_RATING,
|
|
52
52
|
user_id: Optional[str] = None,
|
|
53
|
-
) ->
|
|
53
|
+
) -> None:
|
|
54
54
|
api_annotation = APIAnnotation(
|
|
55
55
|
rating=rating,
|
|
56
56
|
traceUuid=trace_uuid,
|
deepeval/confident/api.py
CHANGED
|
@@ -26,16 +26,44 @@ API_BASE_URL_EU = "https://eu.api.confident-ai.com"
|
|
|
26
26
|
retryable_exceptions = requests.exceptions.SSLError
|
|
27
27
|
|
|
28
28
|
|
|
29
|
+
def _infer_region_from_api_key(api_key: Optional[str]) -> Optional[str]:
|
|
30
|
+
"""
|
|
31
|
+
Infer region from Confident API key prefix.
|
|
32
|
+
|
|
33
|
+
Supported:
|
|
34
|
+
- confident_eu_... => "EU"
|
|
35
|
+
- confident_us_... => "US"
|
|
36
|
+
|
|
37
|
+
Returns None if prefix is not recognized or api_key is falsy.
|
|
38
|
+
"""
|
|
39
|
+
if not api_key:
|
|
40
|
+
return None
|
|
41
|
+
key = api_key.strip().lower()
|
|
42
|
+
if key.startswith("confident_eu_"):
|
|
43
|
+
return "EU"
|
|
44
|
+
if key.startswith("confident_us_"):
|
|
45
|
+
return "US"
|
|
46
|
+
return None
|
|
47
|
+
|
|
48
|
+
|
|
29
49
|
def get_base_api_url():
|
|
30
50
|
s = get_settings()
|
|
31
51
|
if s.CONFIDENT_BASE_URL:
|
|
32
52
|
base_url = s.CONFIDENT_BASE_URL.rstrip("/")
|
|
33
53
|
return base_url
|
|
54
|
+
# If the user has explicitly set a region, respect it.
|
|
34
55
|
region = KEY_FILE_HANDLER.fetch_data(KeyValues.CONFIDENT_REGION)
|
|
35
|
-
if region
|
|
56
|
+
if region:
|
|
57
|
+
return API_BASE_URL_EU if region == "EU" else API_BASE_URL
|
|
58
|
+
|
|
59
|
+
# Otherwise, infer region from the API key prefix.
|
|
60
|
+
api_key = get_confident_api_key()
|
|
61
|
+
inferred = _infer_region_from_api_key(api_key)
|
|
62
|
+
if inferred == "EU":
|
|
36
63
|
return API_BASE_URL_EU
|
|
37
|
-
|
|
38
|
-
|
|
64
|
+
|
|
65
|
+
# Default to US (backwards compatible)
|
|
66
|
+
return API_BASE_URL
|
|
39
67
|
|
|
40
68
|
|
|
41
69
|
def get_confident_api_key() -> Optional[str]:
|
deepeval/config/settings.py
CHANGED
|
@@ -447,6 +447,9 @@ class Settings(BaseSettings):
|
|
|
447
447
|
AZURE_OPENAI_API_KEY: Optional[SecretStr] = Field(
|
|
448
448
|
None, description="Azure OpenAI API key."
|
|
449
449
|
)
|
|
450
|
+
AZURE_OPENAI_AD_TOKEN: Optional[SecretStr] = Field(
|
|
451
|
+
None, description="Azure OpenAI Ad Token."
|
|
452
|
+
)
|
|
450
453
|
AZURE_OPENAI_ENDPOINT: Optional[AnyUrl] = Field(
|
|
451
454
|
None, description="Azure OpenAI endpoint URL."
|
|
452
455
|
)
|
deepeval/dataset/dataset.py
CHANGED
|
@@ -84,9 +84,11 @@ class EvaluationDataset:
|
|
|
84
84
|
def __init__(
|
|
85
85
|
self,
|
|
86
86
|
goldens: Union[List[Golden], List[ConversationalGolden]] = [],
|
|
87
|
+
confident_api_key: Optional[str] = None,
|
|
87
88
|
):
|
|
88
89
|
self._alias = None
|
|
89
90
|
self._id = None
|
|
91
|
+
self.confident_api_key = confident_api_key
|
|
90
92
|
if len(goldens) > 0:
|
|
91
93
|
self._multi_turn = (
|
|
92
94
|
True if isinstance(goldens[0], ConversationalGolden) else False
|
|
@@ -722,7 +724,7 @@ class EvaluationDataset:
|
|
|
722
724
|
"Unable to push empty dataset to Confident AI, there must be at least one golden in dataset."
|
|
723
725
|
)
|
|
724
726
|
|
|
725
|
-
api = Api()
|
|
727
|
+
api = Api(api_key=self.confident_api_key)
|
|
726
728
|
api_dataset = APIDataset(
|
|
727
729
|
goldens=self.goldens if not self._multi_turn else None,
|
|
728
730
|
conversationalGoldens=(self.goldens if self._multi_turn else None),
|
|
@@ -755,7 +757,7 @@ class EvaluationDataset:
|
|
|
755
757
|
auto_convert_goldens_to_test_cases: bool = False,
|
|
756
758
|
public: bool = False,
|
|
757
759
|
):
|
|
758
|
-
api = Api()
|
|
760
|
+
api = Api(api_key=self.confident_api_key)
|
|
759
761
|
with capture_pull_dataset():
|
|
760
762
|
with Progress(
|
|
761
763
|
SpinnerColumn(style="rgb(106,0,255)"),
|
|
@@ -839,7 +841,7 @@ class EvaluationDataset:
|
|
|
839
841
|
raise ValueError(
|
|
840
842
|
f"Can't queue empty list of goldens to dataset with alias: {alias} on Confident AI."
|
|
841
843
|
)
|
|
842
|
-
api = Api()
|
|
844
|
+
api = Api(api_key=self.confident_api_key)
|
|
843
845
|
|
|
844
846
|
multi_turn = isinstance(goldens[0], ConversationalGolden)
|
|
845
847
|
|
|
@@ -871,7 +873,7 @@ class EvaluationDataset:
|
|
|
871
873
|
self,
|
|
872
874
|
alias: str,
|
|
873
875
|
):
|
|
874
|
-
api = Api()
|
|
876
|
+
api = Api(api_key=self.confident_api_key)
|
|
875
877
|
api.send_request(
|
|
876
878
|
method=HttpMethods.DELETE,
|
|
877
879
|
endpoint=Endpoints.DATASET_ALIAS_ENDPOINT,
|
|
@@ -1,3 +1,7 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
import os
|
|
3
|
+
import threading
|
|
4
|
+
|
|
1
5
|
from typing import Any, Optional, List, Dict
|
|
2
6
|
from uuid import UUID
|
|
3
7
|
from time import perf_counter
|
|
@@ -20,6 +24,19 @@ from deepeval.tracing.types import (
|
|
|
20
24
|
)
|
|
21
25
|
from deepeval.telemetry import capture_tracing_integration
|
|
22
26
|
|
|
27
|
+
# Debug logging for LangChain callbacks (enable with DEEPEVAL_DEBUG_LANGCHAIN_CALLBACKS=1)
|
|
28
|
+
_DEBUG_CALLBACKS = os.environ.get(
|
|
29
|
+
"DEEPEVAL_DEBUG_LANGCHAIN_CALLBACKS", ""
|
|
30
|
+
).lower() in ("1", "true", "yes")
|
|
31
|
+
|
|
32
|
+
_logger = logging.getLogger(__name__)
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def _debug_log(msg: str):
|
|
36
|
+
if _DEBUG_CALLBACKS:
|
|
37
|
+
_logger.debug(f"[LangChain Callback] {msg}")
|
|
38
|
+
|
|
39
|
+
|
|
23
40
|
try:
|
|
24
41
|
from langchain_core.callbacks.base import BaseCallbackHandler
|
|
25
42
|
from langchain_core.outputs import LLMResult
|
|
@@ -29,6 +46,7 @@ try:
|
|
|
29
46
|
# contains langchain imports
|
|
30
47
|
from deepeval.integrations.langchain.utils import (
|
|
31
48
|
parse_prompts_to_messages,
|
|
49
|
+
convert_chat_messages_to_input,
|
|
32
50
|
extract_name,
|
|
33
51
|
safe_extract_model_name,
|
|
34
52
|
safe_extract_token_usage,
|
|
@@ -50,6 +68,12 @@ def is_langchain_installed():
|
|
|
50
68
|
|
|
51
69
|
|
|
52
70
|
class CallbackHandler(BaseCallbackHandler):
|
|
71
|
+
# When users create multiple CallbackHandler instances for the same logical
|
|
72
|
+
# conversation (same thread_id), we want spans to land on the same trace.
|
|
73
|
+
# Otherwise, each handler lazily creates its own trace, and multi-turn flows
|
|
74
|
+
# become multiple single-turn traces.
|
|
75
|
+
_thread_id_to_trace_uuid: Dict[str, str] = {}
|
|
76
|
+
_thread_id_lock = threading.Lock()
|
|
53
77
|
|
|
54
78
|
def __init__(
|
|
55
79
|
self,
|
|
@@ -74,13 +98,20 @@ class CallbackHandler(BaseCallbackHandler):
|
|
|
74
98
|
self._parent_span = None
|
|
75
99
|
|
|
76
100
|
# Stash trace metadata to apply once we know which trace we are using.
|
|
77
|
-
|
|
101
|
+
# _trace_init_fields is cleared after first apply to prevent re-applying
|
|
102
|
+
# on every callback within the same trace. _original_init_fields is kept
|
|
103
|
+
# permanently so we can re-apply when a new trace is created (e.g., in
|
|
104
|
+
# multi-turn scenarios where the previous trace was ended).
|
|
105
|
+
self._original_init_fields: Dict[str, Any] = {
|
|
78
106
|
"name": name,
|
|
79
107
|
"tags": tags,
|
|
80
108
|
"metadata": metadata,
|
|
81
109
|
"thread_id": thread_id,
|
|
82
110
|
"user_id": user_id,
|
|
83
111
|
}
|
|
112
|
+
self._trace_init_fields: Dict[str, Any] = dict(
|
|
113
|
+
self._original_init_fields
|
|
114
|
+
)
|
|
84
115
|
|
|
85
116
|
# Map LangChain run_id -> our span uuid for parent span restoration
|
|
86
117
|
self._run_id_to_span_uuid: Dict[str, str] = {}
|
|
@@ -96,6 +127,34 @@ class CallbackHandler(BaseCallbackHandler):
|
|
|
96
127
|
This is done lazily during actual callback execution to avoid context
|
|
97
128
|
corruption when the handler is constructed outside the async task/context.
|
|
98
129
|
"""
|
|
130
|
+
# If the user provided a thread_id, attempt to reuse an existing trace for it.
|
|
131
|
+
# This makes multi-turn tests that use multiple CallbackHandler instances behave
|
|
132
|
+
# as expected: one trace containing multiple turns/spans.
|
|
133
|
+
thread_id = None
|
|
134
|
+
fields = self._trace_init_fields or {}
|
|
135
|
+
if fields.get("thread_id"):
|
|
136
|
+
thread_id = fields["thread_id"]
|
|
137
|
+
# In case _trace_init_fields has already been cleared, fall back to trace metadata.
|
|
138
|
+
if thread_id is None and self._trace is not None:
|
|
139
|
+
thread_id = self._trace.thread_id
|
|
140
|
+
|
|
141
|
+
if thread_id:
|
|
142
|
+
with self._thread_id_lock:
|
|
143
|
+
existing_uuid = self._thread_id_to_trace_uuid.get(thread_id)
|
|
144
|
+
if existing_uuid:
|
|
145
|
+
existing_trace = trace_manager.get_trace_by_uuid(existing_uuid)
|
|
146
|
+
if (
|
|
147
|
+
existing_trace
|
|
148
|
+
and existing_trace.uuid in trace_manager.active_traces
|
|
149
|
+
):
|
|
150
|
+
current_trace_context.set(existing_trace)
|
|
151
|
+
self._trace = existing_trace
|
|
152
|
+
self.trace_uuid = existing_trace.uuid
|
|
153
|
+
# Lazily capture the observe parent span if present.
|
|
154
|
+
if self._parent_span is None:
|
|
155
|
+
self._parent_span = current_span_context.get()
|
|
156
|
+
return existing_trace
|
|
157
|
+
|
|
99
158
|
# Prefer current context trace if it is active.
|
|
100
159
|
ctx_trace = current_trace_context.get()
|
|
101
160
|
if ctx_trace and ctx_trace.uuid in trace_manager.active_traces:
|
|
@@ -107,6 +166,10 @@ class CallbackHandler(BaseCallbackHandler):
|
|
|
107
166
|
current_trace_context.set(trace)
|
|
108
167
|
else:
|
|
109
168
|
# Otherwise, create a fresh trace now (in the right context).
|
|
169
|
+
# Restore _trace_init_fields from the original init fields so that
|
|
170
|
+
# the new trace gets the same name/tags/metadata as intended.
|
|
171
|
+
if not self._trace_init_fields and self._original_init_fields:
|
|
172
|
+
self._trace_init_fields = dict(self._original_init_fields)
|
|
110
173
|
trace = trace_manager.start_new_trace()
|
|
111
174
|
current_trace_context.set(trace)
|
|
112
175
|
self._trace = trace
|
|
@@ -114,8 +177,18 @@ class CallbackHandler(BaseCallbackHandler):
|
|
|
114
177
|
# Keep a copy for quick access.
|
|
115
178
|
self.trace_uuid = trace.uuid
|
|
116
179
|
|
|
180
|
+
# Register this trace as the canonical trace for this thread_id (if provided).
|
|
181
|
+
# This allows other CallbackHandler instances created for the same thread_id
|
|
182
|
+
# to reuse the same trace instead of creating parallel traces.
|
|
183
|
+
fields = self._trace_init_fields or {}
|
|
184
|
+
tid = fields.get("thread_id") or trace.thread_id
|
|
185
|
+
if tid:
|
|
186
|
+
with self._thread_id_lock:
|
|
187
|
+
# Only set if absent to preserve the "first trace wins" behavior.
|
|
188
|
+
self._thread_id_to_trace_uuid.setdefault(tid, trace.uuid)
|
|
189
|
+
|
|
117
190
|
# Apply stashed metadata once.
|
|
118
|
-
fields =
|
|
191
|
+
fields = self._trace_init_fields or {}
|
|
119
192
|
if fields:
|
|
120
193
|
if fields.get("name") is not None:
|
|
121
194
|
trace.name = fields["name"]
|
|
@@ -202,6 +275,9 @@ class CallbackHandler(BaseCallbackHandler):
|
|
|
202
275
|
metadata: Optional[dict[str, Any]] = None,
|
|
203
276
|
**kwargs: Any,
|
|
204
277
|
) -> Any:
|
|
278
|
+
_debug_log(
|
|
279
|
+
f"on_chain_start: run_id={run_id}, parent_run_id={parent_run_id}, name={extract_name(serialized, **kwargs)}"
|
|
280
|
+
)
|
|
205
281
|
# Create spans for all chains to establish proper parent-child hierarchy
|
|
206
282
|
# This is important for LangGraph where there are nested chains
|
|
207
283
|
with self._ctx(run_id=run_id, parent_run_id=parent_run_id):
|
|
@@ -232,6 +308,9 @@ class CallbackHandler(BaseCallbackHandler):
|
|
|
232
308
|
parent_run_id: Optional[UUID] = None,
|
|
233
309
|
**kwargs: Any,
|
|
234
310
|
) -> Any:
|
|
311
|
+
_debug_log(
|
|
312
|
+
f"on_chain_end: run_id={run_id}, parent_run_id={parent_run_id}"
|
|
313
|
+
)
|
|
235
314
|
uuid_str = str(run_id)
|
|
236
315
|
base_span = trace_manager.get_span_by_uuid(uuid_str)
|
|
237
316
|
if base_span:
|
|
@@ -246,6 +325,59 @@ class CallbackHandler(BaseCallbackHandler):
|
|
|
246
325
|
trace.output = output
|
|
247
326
|
exit_current_context(uuid_str=uuid_str)
|
|
248
327
|
|
|
328
|
+
def on_chat_model_start(
|
|
329
|
+
self,
|
|
330
|
+
serialized: dict[str, Any],
|
|
331
|
+
messages: list[list[Any]], # list[list[BaseMessage]]
|
|
332
|
+
*,
|
|
333
|
+
run_id: UUID,
|
|
334
|
+
parent_run_id: Optional[UUID] = None,
|
|
335
|
+
tags: Optional[list[str]] = None,
|
|
336
|
+
metadata: Optional[dict[str, Any]] = None,
|
|
337
|
+
**kwargs: Any,
|
|
338
|
+
) -> Any:
|
|
339
|
+
"""
|
|
340
|
+
Handle chat model start callback. In LangChain v1, chat models emit
|
|
341
|
+
on_chat_model_start instead of on_llm_start. The on_llm_end callback
|
|
342
|
+
is still used for both.
|
|
343
|
+
"""
|
|
344
|
+
_debug_log(
|
|
345
|
+
f"on_chat_model_start: run_id={run_id}, parent_run_id={parent_run_id}, messages_len={len(messages)}"
|
|
346
|
+
)
|
|
347
|
+
|
|
348
|
+
# Guard against double-counting if both on_llm_start and on_chat_model_start fire
|
|
349
|
+
uuid_str = str(run_id)
|
|
350
|
+
existing_span = trace_manager.get_span_by_uuid(uuid_str)
|
|
351
|
+
if existing_span is not None:
|
|
352
|
+
_debug_log(
|
|
353
|
+
f"on_chat_model_start: span already exists for run_id={run_id}, skipping"
|
|
354
|
+
)
|
|
355
|
+
return
|
|
356
|
+
|
|
357
|
+
with self._ctx(run_id=run_id, parent_run_id=parent_run_id):
|
|
358
|
+
# Convert messages to our internal format using the shared helper
|
|
359
|
+
input_messages = convert_chat_messages_to_input(messages, **kwargs)
|
|
360
|
+
|
|
361
|
+
# Safe extraction of model name (handle None metadata)
|
|
362
|
+
md = metadata or {}
|
|
363
|
+
model = safe_extract_model_name(md, **kwargs)
|
|
364
|
+
|
|
365
|
+
llm_span: LlmSpan = enter_current_context(
|
|
366
|
+
uuid_str=uuid_str,
|
|
367
|
+
span_type="llm",
|
|
368
|
+
func_name=extract_name(serialized, **kwargs),
|
|
369
|
+
)
|
|
370
|
+
# Register this run_id -> span mapping for child callbacks
|
|
371
|
+
self._run_id_to_span_uuid[str(run_id)] = uuid_str
|
|
372
|
+
|
|
373
|
+
llm_span.input = input_messages
|
|
374
|
+
llm_span.model = model
|
|
375
|
+
|
|
376
|
+
# Extract metrics and prompt from metadata if provided, but don't mutate original
|
|
377
|
+
llm_span.metrics = md.get("metrics")
|
|
378
|
+
llm_span.metric_collection = md.get("metric_collection")
|
|
379
|
+
llm_span.prompt = md.get("prompt")
|
|
380
|
+
|
|
249
381
|
def on_llm_start(
|
|
250
382
|
self,
|
|
251
383
|
serialized: dict[str, Any],
|
|
@@ -257,10 +389,25 @@ class CallbackHandler(BaseCallbackHandler):
|
|
|
257
389
|
metadata: Optional[dict[str, Any]] = None,
|
|
258
390
|
**kwargs: Any,
|
|
259
391
|
) -> Any:
|
|
392
|
+
_debug_log(
|
|
393
|
+
f"on_llm_start: run_id={run_id}, parent_run_id={parent_run_id}, prompts_len={len(prompts)}"
|
|
394
|
+
)
|
|
395
|
+
|
|
396
|
+
# Guard against double-counting if both on_llm_start and on_chat_model_start fire
|
|
397
|
+
uuid_str = str(run_id)
|
|
398
|
+
existing_span = trace_manager.get_span_by_uuid(uuid_str)
|
|
399
|
+
if existing_span is not None:
|
|
400
|
+
_debug_log(
|
|
401
|
+
f"on_llm_start: span already exists for run_id={run_id}, skipping"
|
|
402
|
+
)
|
|
403
|
+
return
|
|
404
|
+
|
|
260
405
|
with self._ctx(run_id=run_id, parent_run_id=parent_run_id):
|
|
261
|
-
uuid_str = str(run_id)
|
|
262
406
|
input_messages = parse_prompts_to_messages(prompts, **kwargs)
|
|
263
|
-
|
|
407
|
+
|
|
408
|
+
# Safe extraction of model name (handle None metadata)
|
|
409
|
+
md = metadata or {}
|
|
410
|
+
model = safe_extract_model_name(md, **kwargs)
|
|
264
411
|
|
|
265
412
|
llm_span: LlmSpan = enter_current_context(
|
|
266
413
|
uuid_str=uuid_str,
|
|
@@ -272,12 +419,11 @@ class CallbackHandler(BaseCallbackHandler):
|
|
|
272
419
|
|
|
273
420
|
llm_span.input = input_messages
|
|
274
421
|
llm_span.model = model
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
llm_span.
|
|
279
|
-
llm_span.
|
|
280
|
-
llm_span.prompt = prompt
|
|
422
|
+
|
|
423
|
+
# Extract metrics and prompt from metadata if provided, but don't mutate original
|
|
424
|
+
llm_span.metrics = md.get("metrics")
|
|
425
|
+
llm_span.metric_collection = md.get("metric_collection")
|
|
426
|
+
llm_span.prompt = md.get("prompt")
|
|
281
427
|
|
|
282
428
|
def on_llm_end(
|
|
283
429
|
self,
|
|
@@ -287,9 +433,20 @@ class CallbackHandler(BaseCallbackHandler):
|
|
|
287
433
|
parent_run_id: Optional[UUID] = None,
|
|
288
434
|
**kwargs: Any, # un-logged kwargs
|
|
289
435
|
) -> Any:
|
|
436
|
+
_debug_log(
|
|
437
|
+
f"on_llm_end: run_id={run_id}, parent_run_id={parent_run_id}, response_type={type(response).__name__}"
|
|
438
|
+
)
|
|
290
439
|
uuid_str = str(run_id)
|
|
291
440
|
llm_span: LlmSpan = trace_manager.get_span_by_uuid(uuid_str)
|
|
292
441
|
if llm_span is None:
|
|
442
|
+
_debug_log(f"on_llm_end: NO SPAN FOUND for run_id={run_id}")
|
|
443
|
+
return
|
|
444
|
+
|
|
445
|
+
# Guard against double-finalization (if both on_llm_end and on_chat_model_end fire)
|
|
446
|
+
if llm_span.end_time is not None:
|
|
447
|
+
_debug_log(
|
|
448
|
+
f"on_llm_end: span already finalized for run_id={run_id}, skipping"
|
|
449
|
+
)
|
|
293
450
|
return
|
|
294
451
|
|
|
295
452
|
with self._ctx(run_id=run_id, parent_run_id=parent_run_id):
|
|
@@ -336,7 +493,6 @@ class CallbackHandler(BaseCallbackHandler):
|
|
|
336
493
|
)
|
|
337
494
|
|
|
338
495
|
llm_span.model = model if model else llm_span.model
|
|
339
|
-
llm_span.input = llm_span.input
|
|
340
496
|
llm_span.output = output
|
|
341
497
|
llm_span.input_token_count = (
|
|
342
498
|
total_input_tokens if total_input_tokens > 0 else None
|
|
@@ -347,6 +503,121 @@ class CallbackHandler(BaseCallbackHandler):
|
|
|
347
503
|
|
|
348
504
|
exit_current_context(uuid_str=uuid_str)
|
|
349
505
|
|
|
506
|
+
def on_chat_model_end(
|
|
507
|
+
self,
|
|
508
|
+
response: Any,
|
|
509
|
+
*,
|
|
510
|
+
run_id: UUID,
|
|
511
|
+
parent_run_id: Optional[UUID] = None,
|
|
512
|
+
**kwargs: Any,
|
|
513
|
+
) -> Any:
|
|
514
|
+
"""
|
|
515
|
+
Handle chat model end callback. This may be called instead of or
|
|
516
|
+
in addition to on_llm_end depending on the LangChain version.
|
|
517
|
+
"""
|
|
518
|
+
_debug_log(
|
|
519
|
+
f"on_chat_model_end: run_id={run_id}, parent_run_id={parent_run_id}, response_type={type(response).__name__}"
|
|
520
|
+
)
|
|
521
|
+
uuid_str = str(run_id)
|
|
522
|
+
llm_span: LlmSpan = trace_manager.get_span_by_uuid(uuid_str)
|
|
523
|
+
if llm_span is None:
|
|
524
|
+
_debug_log(f"on_chat_model_end: NO SPAN FOUND for run_id={run_id}")
|
|
525
|
+
return
|
|
526
|
+
|
|
527
|
+
# Guard against double-finalization, which could happen if both on_llm_end and on_chat_model_end fire
|
|
528
|
+
if llm_span.end_time is not None:
|
|
529
|
+
_debug_log(
|
|
530
|
+
f"on_chat_model_end: span already finalized for run_id={run_id}, skipping"
|
|
531
|
+
)
|
|
532
|
+
return
|
|
533
|
+
|
|
534
|
+
with self._ctx(run_id=run_id, parent_run_id=parent_run_id):
|
|
535
|
+
output = ""
|
|
536
|
+
total_input_tokens = 0
|
|
537
|
+
total_output_tokens = 0
|
|
538
|
+
model = None
|
|
539
|
+
|
|
540
|
+
# Handle LLMResult (same as on_llm_end)
|
|
541
|
+
if isinstance(response, LLMResult):
|
|
542
|
+
for generation in response.generations:
|
|
543
|
+
for gen in generation:
|
|
544
|
+
if isinstance(gen, ChatGeneration):
|
|
545
|
+
if gen.message.response_metadata and isinstance(
|
|
546
|
+
gen.message.response_metadata, dict
|
|
547
|
+
):
|
|
548
|
+
model = gen.message.response_metadata.get(
|
|
549
|
+
"model_name"
|
|
550
|
+
)
|
|
551
|
+
input_tokens, output_tokens = (
|
|
552
|
+
safe_extract_token_usage(
|
|
553
|
+
gen.message.response_metadata
|
|
554
|
+
)
|
|
555
|
+
)
|
|
556
|
+
total_input_tokens += input_tokens
|
|
557
|
+
total_output_tokens += output_tokens
|
|
558
|
+
|
|
559
|
+
if isinstance(gen.message, AIMessage):
|
|
560
|
+
ai_message = gen.message
|
|
561
|
+
tool_calls = []
|
|
562
|
+
for tool_call in ai_message.tool_calls:
|
|
563
|
+
tool_calls.append(
|
|
564
|
+
LlmToolCall(
|
|
565
|
+
name=tool_call["name"],
|
|
566
|
+
args=tool_call["args"],
|
|
567
|
+
id=tool_call["id"],
|
|
568
|
+
)
|
|
569
|
+
)
|
|
570
|
+
output = LlmOutput(
|
|
571
|
+
role="AI",
|
|
572
|
+
content=ai_message.content,
|
|
573
|
+
tool_calls=tool_calls,
|
|
574
|
+
)
|
|
575
|
+
|
|
576
|
+
llm_span.model = model if model else llm_span.model
|
|
577
|
+
llm_span.output = output
|
|
578
|
+
llm_span.input_token_count = (
|
|
579
|
+
total_input_tokens if total_input_tokens > 0 else None
|
|
580
|
+
)
|
|
581
|
+
llm_span.output_token_count = (
|
|
582
|
+
total_output_tokens if total_output_tokens > 0 else None
|
|
583
|
+
)
|
|
584
|
+
|
|
585
|
+
exit_current_context(uuid_str=uuid_str)
|
|
586
|
+
|
|
587
|
+
def on_chat_model_error(
|
|
588
|
+
self,
|
|
589
|
+
error: BaseException,
|
|
590
|
+
*,
|
|
591
|
+
run_id: UUID,
|
|
592
|
+
parent_run_id: Optional[UUID] = None,
|
|
593
|
+
**kwargs: Any,
|
|
594
|
+
) -> Any:
|
|
595
|
+
"""
|
|
596
|
+
Handle chat model error callback.
|
|
597
|
+
"""
|
|
598
|
+
_debug_log(
|
|
599
|
+
f"on_chat_model_error: run_id={run_id}, parent_run_id={parent_run_id}, error={error}"
|
|
600
|
+
)
|
|
601
|
+
uuid_str = str(run_id)
|
|
602
|
+
llm_span: LlmSpan = trace_manager.get_span_by_uuid(uuid_str)
|
|
603
|
+
if llm_span is None:
|
|
604
|
+
_debug_log(
|
|
605
|
+
f"on_chat_model_error: NO SPAN FOUND for run_id={run_id}"
|
|
606
|
+
)
|
|
607
|
+
return
|
|
608
|
+
|
|
609
|
+
# Guard against double-finalization
|
|
610
|
+
if llm_span.end_time is not None:
|
|
611
|
+
_debug_log(
|
|
612
|
+
f"on_chat_model_error: span already finalized for run_id={run_id}, skipping"
|
|
613
|
+
)
|
|
614
|
+
return
|
|
615
|
+
|
|
616
|
+
with self._ctx(run_id=run_id, parent_run_id=parent_run_id):
|
|
617
|
+
llm_span.status = TraceSpanStatus.ERRORED
|
|
618
|
+
llm_span.error = str(error)
|
|
619
|
+
exit_current_context(uuid_str=uuid_str)
|
|
620
|
+
|
|
350
621
|
def on_llm_error(
|
|
351
622
|
self,
|
|
352
623
|
error: BaseException,
|
|
@@ -355,10 +626,22 @@ class CallbackHandler(BaseCallbackHandler):
|
|
|
355
626
|
parent_run_id: Optional[UUID] = None,
|
|
356
627
|
**kwargs: Any,
|
|
357
628
|
) -> Any:
|
|
629
|
+
_debug_log(
|
|
630
|
+
f"on_llm_error: run_id={run_id}, parent_run_id={parent_run_id}, error={error}"
|
|
631
|
+
)
|
|
358
632
|
uuid_str = str(run_id)
|
|
359
633
|
llm_span: LlmSpan = trace_manager.get_span_by_uuid(uuid_str)
|
|
360
634
|
if llm_span is None:
|
|
635
|
+
_debug_log(f"on_llm_error: NO SPAN FOUND for run_id={run_id}")
|
|
361
636
|
return
|
|
637
|
+
|
|
638
|
+
# Guard against double-finalization
|
|
639
|
+
if llm_span.end_time is not None:
|
|
640
|
+
_debug_log(
|
|
641
|
+
f"on_llm_error: span already finalized for run_id={run_id}, skipping"
|
|
642
|
+
)
|
|
643
|
+
return
|
|
644
|
+
|
|
362
645
|
with self._ctx(run_id=run_id, parent_run_id=parent_run_id):
|
|
363
646
|
llm_span.status = TraceSpanStatus.ERRORED
|
|
364
647
|
llm_span.error = str(error)
|
|
@@ -396,6 +679,9 @@ class CallbackHandler(BaseCallbackHandler):
|
|
|
396
679
|
inputs: Optional[dict[str, Any]] = None,
|
|
397
680
|
**kwargs: Any,
|
|
398
681
|
) -> Any:
|
|
682
|
+
_debug_log(
|
|
683
|
+
f"on_tool_start: run_id={run_id}, parent_run_id={parent_run_id}, name={extract_name(serialized, **kwargs)}"
|
|
684
|
+
)
|
|
399
685
|
with self._ctx(run_id=run_id, parent_run_id=parent_run_id):
|
|
400
686
|
uuid_str = str(run_id)
|
|
401
687
|
|
|
@@ -418,6 +704,9 @@ class CallbackHandler(BaseCallbackHandler):
|
|
|
418
704
|
parent_run_id: Optional[UUID] = None,
|
|
419
705
|
**kwargs: Any, # un-logged kwargs
|
|
420
706
|
) -> Any:
|
|
707
|
+
_debug_log(
|
|
708
|
+
f"on_tool_end: run_id={run_id}, parent_run_id={parent_run_id}"
|
|
709
|
+
)
|
|
421
710
|
uuid_str = str(run_id)
|
|
422
711
|
tool_span: ToolSpan = trace_manager.get_span_by_uuid(uuid_str)
|
|
423
712
|
if tool_span is None:
|
|
@@ -485,20 +774,23 @@ class CallbackHandler(BaseCallbackHandler):
|
|
|
485
774
|
) -> Any:
|
|
486
775
|
with self._ctx(run_id=run_id, parent_run_id=parent_run_id):
|
|
487
776
|
uuid_str = str(run_id)
|
|
777
|
+
# Safe access to metadata (handle None)
|
|
778
|
+
md = metadata or {}
|
|
488
779
|
retriever_span = enter_current_context(
|
|
489
780
|
uuid_str=uuid_str,
|
|
490
781
|
span_type="retriever",
|
|
491
782
|
func_name=extract_name(serialized, **kwargs),
|
|
492
783
|
observe_kwargs={
|
|
493
|
-
"embedder":
|
|
494
|
-
"ls_embedding_provider", "unknown"
|
|
495
|
-
),
|
|
784
|
+
"embedder": md.get("ls_embedding_provider", "unknown"),
|
|
496
785
|
},
|
|
497
786
|
)
|
|
498
787
|
# Register this run_id -> span mapping for child callbacks
|
|
499
788
|
self._run_id_to_span_uuid[str(run_id)] = uuid_str
|
|
500
789
|
retriever_span.input = query
|
|
501
790
|
|
|
791
|
+
# Extract metric_collection from metadata if provided
|
|
792
|
+
retriever_span.metric_collection = md.get("metric_collection")
|
|
793
|
+
|
|
502
794
|
def on_retriever_end(
|
|
503
795
|
self,
|
|
504
796
|
output: Any,
|
|
@@ -539,4 +831,4 @@ class CallbackHandler(BaseCallbackHandler):
|
|
|
539
831
|
with self._ctx(run_id=run_id, parent_run_id=parent_run_id):
|
|
540
832
|
retriever_span.status = TraceSpanStatus.ERRORED
|
|
541
833
|
retriever_span.error = str(error)
|
|
542
|
-
exit_current_context(uuid_str=uuid_str)
|
|
834
|
+
exit_current_context(uuid_str=uuid_str)
|