ragaai-catalyst 2.1.3__py3-none-any.whl → 2.1.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ragaai_catalyst/tracers/agentic_tracing/data/data_structure.py +37 -11
- ragaai_catalyst/tracers/agentic_tracing/tracers/agent_tracer.py +240 -81
- ragaai_catalyst/tracers/agentic_tracing/tracers/base.py +632 -114
- ragaai_catalyst/tracers/agentic_tracing/tracers/custom_tracer.py +316 -0
- ragaai_catalyst/tracers/agentic_tracing/tracers/langgraph_tracer.py +0 -0
- ragaai_catalyst/tracers/agentic_tracing/tracers/llm_tracer.py +229 -82
- ragaai_catalyst/tracers/agentic_tracing/tracers/main_tracer.py +214 -59
- ragaai_catalyst/tracers/agentic_tracing/tracers/network_tracer.py +16 -14
- ragaai_catalyst/tracers/agentic_tracing/tracers/tool_tracer.py +147 -28
- ragaai_catalyst/tracers/agentic_tracing/tracers/user_interaction_tracer.py +88 -2
- ragaai_catalyst/tracers/agentic_tracing/upload/upload_agentic_traces.py +9 -51
- ragaai_catalyst/tracers/agentic_tracing/upload/upload_trace_metric.py +83 -0
- ragaai_catalyst/tracers/agentic_tracing/utils/create_dataset_schema.py +26 -0
- ragaai_catalyst/tracers/agentic_tracing/utils/get_user_trace_metrics.py +28 -0
- ragaai_catalyst/tracers/agentic_tracing/utils/llm_utils.py +45 -15
- ragaai_catalyst/tracers/agentic_tracing/utils/model_costs.json +2520 -2152
- ragaai_catalyst/tracers/agentic_tracing/utils/span_attributes.py +59 -0
- ragaai_catalyst/tracers/agentic_tracing/utils/trace_utils.py +23 -0
- ragaai_catalyst/tracers/agentic_tracing/utils/zip_list_of_unique_files.py +284 -15
- ragaai_catalyst/tracers/llamaindex_callback.py +5 -5
- ragaai_catalyst/tracers/tracer.py +83 -10
- ragaai_catalyst/tracers/upload_traces.py +1 -1
- ragaai_catalyst-2.1.4.dist-info/METADATA +431 -0
- {ragaai_catalyst-2.1.3.dist-info → ragaai_catalyst-2.1.4.dist-info}/RECORD +26 -20
- ragaai_catalyst-2.1.3.dist-info/METADATA +0 -43
- {ragaai_catalyst-2.1.3.dist-info → ragaai_catalyst-2.1.4.dist-info}/WHEEL +0 -0
- {ragaai_catalyst-2.1.3.dist-info → ragaai_catalyst-2.1.4.dist-info}/top_level.txt +0 -0
@@ -22,8 +22,17 @@ from ..utils.llm_utils import (
|
|
22
22
|
extract_llm_output,
|
23
23
|
)
|
24
24
|
from ..utils.trace_utils import load_model_costs
|
25
|
-
from ..utils.unique_decorator import generate_unique_hash_simple
|
25
|
+
from ..utils.unique_decorator import generate_unique_hash_simple
|
26
26
|
from ..utils.file_name_tracker import TrackName
|
27
|
+
from ..utils.span_attributes import SpanAttributes
|
28
|
+
import logging
|
29
|
+
|
30
|
+
logger = logging.getLogger(__name__)
|
31
|
+
logging_level = (
|
32
|
+
logger.setLevel(logging.DEBUG)
|
33
|
+
if os.getenv("DEBUG")
|
34
|
+
else logger.setLevel(logging.INFO)
|
35
|
+
)
|
27
36
|
|
28
37
|
|
29
38
|
class LLMTracerMixin:
|
@@ -36,29 +45,36 @@ class LLMTracerMixin:
|
|
36
45
|
except Exception as e:
|
37
46
|
self.model_costs = {
|
38
47
|
# TODO: Default cost handling needs to be improved
|
39
|
-
"default": {
|
40
|
-
"input_cost_per_token": 0.0,
|
41
|
-
"output_cost_per_token": 0.0
|
42
|
-
}
|
48
|
+
"default": {"input_cost_per_token": 0.0, "output_cost_per_token": 0.0}
|
43
49
|
}
|
44
50
|
self.MAX_PARAMETERS_TO_DISPLAY = 10
|
45
|
-
self.current_llm_call_name = contextvars.ContextVar(
|
46
|
-
|
51
|
+
self.current_llm_call_name = contextvars.ContextVar(
|
52
|
+
"llm_call_name", default=None
|
53
|
+
)
|
54
|
+
self.component_network_calls = {}
|
47
55
|
self.component_user_interaction = {}
|
48
|
-
self.current_component_id = None
|
56
|
+
self.current_component_id = None
|
49
57
|
self.total_tokens = 0
|
50
58
|
self.total_cost = 0.0
|
51
59
|
self.llm_data = {}
|
52
60
|
|
61
|
+
# Add auto_instrument options
|
62
|
+
self.auto_instrument_llm = False
|
63
|
+
self.auto_instrument_user_interaction = False
|
64
|
+
self.auto_instrument_network = False
|
65
|
+
|
53
66
|
def instrument_llm_calls(self):
|
67
|
+
"""Enable LLM instrumentation"""
|
68
|
+
self.auto_instrument_llm = True
|
69
|
+
|
54
70
|
# Handle modules that are already imported
|
55
71
|
import sys
|
56
|
-
|
72
|
+
|
57
73
|
if "vertexai" in sys.modules:
|
58
74
|
self.patch_vertex_ai_methods(sys.modules["vertexai"])
|
59
75
|
if "vertexai.generative_models" in sys.modules:
|
60
76
|
self.patch_vertex_ai_methods(sys.modules["vertexai.generative_models"])
|
61
|
-
|
77
|
+
|
62
78
|
if "openai" in sys.modules:
|
63
79
|
self.patch_openai_methods(sys.modules["openai"])
|
64
80
|
if "litellm" in sys.modules:
|
@@ -68,21 +84,39 @@ class LLMTracerMixin:
|
|
68
84
|
if "google.generativeai" in sys.modules:
|
69
85
|
self.patch_google_genai_methods(sys.modules["google.generativeai"])
|
70
86
|
if "langchain_google_vertexai" in sys.modules:
|
71
|
-
self.patch_langchain_google_methods(
|
87
|
+
self.patch_langchain_google_methods(
|
88
|
+
sys.modules["langchain_google_vertexai"]
|
89
|
+
)
|
72
90
|
if "langchain_google_genai" in sys.modules:
|
73
91
|
self.patch_langchain_google_methods(sys.modules["langchain_google_genai"])
|
74
92
|
|
75
93
|
# Register hooks for future imports
|
76
94
|
wrapt.register_post_import_hook(self.patch_vertex_ai_methods, "vertexai")
|
77
|
-
wrapt.register_post_import_hook(
|
95
|
+
wrapt.register_post_import_hook(
|
96
|
+
self.patch_vertex_ai_methods, "vertexai.generative_models"
|
97
|
+
)
|
78
98
|
wrapt.register_post_import_hook(self.patch_openai_methods, "openai")
|
79
99
|
wrapt.register_post_import_hook(self.patch_litellm_methods, "litellm")
|
80
100
|
wrapt.register_post_import_hook(self.patch_anthropic_methods, "anthropic")
|
81
|
-
wrapt.register_post_import_hook(
|
82
|
-
|
101
|
+
wrapt.register_post_import_hook(
|
102
|
+
self.patch_google_genai_methods, "google.generativeai"
|
103
|
+
)
|
104
|
+
|
83
105
|
# Add hooks for LangChain integrations
|
84
|
-
wrapt.register_post_import_hook(
|
85
|
-
|
106
|
+
wrapt.register_post_import_hook(
|
107
|
+
self.patch_langchain_google_methods, "langchain_google_vertexai"
|
108
|
+
)
|
109
|
+
wrapt.register_post_import_hook(
|
110
|
+
self.patch_langchain_google_methods, "langchain_google_genai"
|
111
|
+
)
|
112
|
+
|
113
|
+
def instrument_user_interaction_calls(self):
|
114
|
+
"""Enable user interaction instrumentation for LLM calls"""
|
115
|
+
self.auto_instrument_user_interaction = True
|
116
|
+
|
117
|
+
def instrument_network_calls(self):
|
118
|
+
"""Enable network instrumentation for LLM calls"""
|
119
|
+
self.auto_instrument_network = True
|
86
120
|
|
87
121
|
def patch_openai_methods(self, module):
|
88
122
|
try:
|
@@ -106,20 +140,20 @@ class LLMTracerMixin:
|
|
106
140
|
if hasattr(module, "GenerativeModel"):
|
107
141
|
model_class = getattr(module, "GenerativeModel")
|
108
142
|
self.wrap_genai_model_methods(model_class)
|
109
|
-
|
143
|
+
|
110
144
|
# Patch LangChain integration
|
111
145
|
if hasattr(module, "ChatGoogleGenerativeAI"):
|
112
146
|
chat_class = getattr(module, "ChatGoogleGenerativeAI")
|
113
147
|
# Wrap invoke method to capture messages
|
114
148
|
original_invoke = chat_class.invoke
|
115
|
-
|
149
|
+
|
116
150
|
def patched_invoke(self, messages, *args, **kwargs):
|
117
151
|
# Store messages in the instance for later use
|
118
152
|
self._last_messages = messages
|
119
153
|
return original_invoke(self, messages, *args, **kwargs)
|
120
|
-
|
154
|
+
|
121
155
|
chat_class.invoke = patched_invoke
|
122
|
-
|
156
|
+
|
123
157
|
# LangChain v0.2+ uses invoke/ainvoke
|
124
158
|
self.wrap_method(chat_class, "_generate")
|
125
159
|
if hasattr(chat_class, "_agenerate"):
|
@@ -137,7 +171,7 @@ class LLMTracerMixin:
|
|
137
171
|
if hasattr(gen_models, "GenerativeModel"):
|
138
172
|
model_class = getattr(gen_models, "GenerativeModel")
|
139
173
|
self.wrap_vertex_model_methods(model_class)
|
140
|
-
|
174
|
+
|
141
175
|
# Also patch the class directly if available
|
142
176
|
if hasattr(module, "GenerativeModel"):
|
143
177
|
model_class = getattr(module, "GenerativeModel")
|
@@ -186,23 +220,31 @@ class LLMTracerMixin:
|
|
186
220
|
def patched_init(client_self, *args, **kwargs):
|
187
221
|
original_init(client_self, *args, **kwargs)
|
188
222
|
# Check if this is AsyncOpenAI or OpenAI
|
189
|
-
is_async =
|
190
|
-
|
223
|
+
is_async = "AsyncOpenAI" in client_class.__name__
|
224
|
+
|
191
225
|
if is_async:
|
192
226
|
# Patch async methods for AsyncOpenAI
|
193
227
|
if hasattr(client_self.chat.completions, "create"):
|
194
228
|
original_create = client_self.chat.completions.create
|
229
|
+
|
195
230
|
@functools.wraps(original_create)
|
196
231
|
async def wrapped_create(*args, **kwargs):
|
197
|
-
return await self.trace_llm_call(
|
232
|
+
return await self.trace_llm_call(
|
233
|
+
original_create, *args, **kwargs
|
234
|
+
)
|
235
|
+
|
198
236
|
client_self.chat.completions.create = wrapped_create
|
199
237
|
else:
|
200
238
|
# Patch sync methods for OpenAI
|
201
239
|
if hasattr(client_self.chat.completions, "create"):
|
202
240
|
original_create = client_self.chat.completions.create
|
241
|
+
|
203
242
|
@functools.wraps(original_create)
|
204
243
|
def wrapped_create(*args, **kwargs):
|
205
|
-
return self.trace_llm_call_sync(
|
244
|
+
return self.trace_llm_call_sync(
|
245
|
+
original_create, *args, **kwargs
|
246
|
+
)
|
247
|
+
|
206
248
|
client_self.chat.completions.create = wrapped_create
|
207
249
|
|
208
250
|
setattr(client_class, "__init__", patched_init)
|
@@ -240,41 +282,64 @@ class LLMTracerMixin:
|
|
240
282
|
if isinstance(obj, type):
|
241
283
|
# Store the original class method
|
242
284
|
original_method = getattr(obj, method_name)
|
243
|
-
|
285
|
+
|
244
286
|
@wrapt.decorator
|
245
287
|
def wrapper(wrapped, instance, args, kwargs):
|
246
288
|
if asyncio.iscoroutinefunction(wrapped):
|
247
289
|
return self.trace_llm_call(wrapped, *args, **kwargs)
|
248
290
|
return self.trace_llm_call_sync(wrapped, *args, **kwargs)
|
249
|
-
|
291
|
+
|
250
292
|
# Wrap the class method
|
251
293
|
wrapped_method = wrapper(original_method)
|
252
294
|
setattr(obj, method_name, wrapped_method)
|
253
295
|
self.patches.append((obj, method_name, original_method))
|
254
|
-
|
296
|
+
|
255
297
|
else:
|
256
298
|
# For instance methods
|
257
299
|
original_method = getattr(obj, method_name)
|
258
|
-
|
300
|
+
|
259
301
|
@wrapt.decorator
|
260
302
|
def wrapper(wrapped, instance, args, kwargs):
|
261
303
|
if asyncio.iscoroutinefunction(wrapped):
|
262
304
|
return self.trace_llm_call(wrapped, *args, **kwargs)
|
263
305
|
return self.trace_llm_call_sync(wrapped, *args, **kwargs)
|
264
|
-
|
306
|
+
|
265
307
|
wrapped_method = wrapper(original_method)
|
266
308
|
setattr(obj, method_name, wrapped_method)
|
267
309
|
self.patches.append((obj, method_name, original_method))
|
268
310
|
|
269
|
-
def create_llm_component(
|
311
|
+
def create_llm_component(
|
312
|
+
self,
|
313
|
+
component_id,
|
314
|
+
hash_id,
|
315
|
+
name,
|
316
|
+
llm_type,
|
317
|
+
version,
|
318
|
+
memory_used,
|
319
|
+
start_time,
|
320
|
+
input_data,
|
321
|
+
output_data,
|
322
|
+
cost={},
|
323
|
+
usage={},
|
324
|
+
error=None,
|
325
|
+
parameters={},
|
326
|
+
):
|
270
327
|
# Update total metrics
|
271
328
|
self.total_tokens += usage.get("total_tokens", 0)
|
272
329
|
self.total_cost += cost.get("total_cost", 0)
|
273
330
|
|
331
|
+
network_calls = []
|
332
|
+
if self.auto_instrument_network:
|
333
|
+
network_calls = self.component_network_calls.get(component_id, [])
|
334
|
+
|
335
|
+
interactions = []
|
336
|
+
if self.auto_instrument_user_interaction:
|
337
|
+
interactions = self.component_user_interaction.get(component_id, [])
|
338
|
+
|
274
339
|
parameters_to_display = {}
|
275
|
-
if
|
276
|
-
parameters_obj = parameters[
|
277
|
-
if hasattr(parameters_obj,
|
340
|
+
if "run_manager" in parameters:
|
341
|
+
parameters_obj = parameters["run_manager"]
|
342
|
+
if hasattr(parameters_obj, "metadata"):
|
278
343
|
metadata = parameters_obj.metadata
|
279
344
|
# parameters = {'metadata': metadata}
|
280
345
|
parameters_to_display.update(metadata)
|
@@ -283,9 +348,29 @@ class LLMTracerMixin:
|
|
283
348
|
for key, value in parameters.items():
|
284
349
|
if isinstance(value, (str, int, float, bool)):
|
285
350
|
parameters_to_display[key] = value
|
286
|
-
|
351
|
+
|
287
352
|
# Limit the number of parameters to display
|
288
|
-
parameters_to_display = dict(
|
353
|
+
parameters_to_display = dict(
|
354
|
+
list(parameters_to_display.items())[: self.MAX_PARAMETERS_TO_DISPLAY]
|
355
|
+
)
|
356
|
+
|
357
|
+
# Get tags, metrics
|
358
|
+
# tags
|
359
|
+
tags = []
|
360
|
+
if name in self.span_attributes_dict:
|
361
|
+
tags = self.span_attributes_dict[name].tags or []
|
362
|
+
|
363
|
+
# metrics
|
364
|
+
metrics = []
|
365
|
+
if name in self.span_attributes_dict:
|
366
|
+
raw_metrics = self.span_attributes_dict[name].metrics or []
|
367
|
+
for metric in raw_metrics:
|
368
|
+
base_metric_name = metric["name"]
|
369
|
+
counter = sum(1 for x in self.visited_metrics if x.startswith(base_metric_name))
|
370
|
+
metric_name = f'{base_metric_name}_{counter}' if counter > 0 else base_metric_name
|
371
|
+
self.visited_metrics.append(metric_name)
|
372
|
+
metric["name"] = metric_name
|
373
|
+
metrics.append(metric)
|
289
374
|
|
290
375
|
component = {
|
291
376
|
"id": component_id,
|
@@ -303,23 +388,30 @@ class LLMTracerMixin:
|
|
303
388
|
"memory_used": memory_used,
|
304
389
|
"cost": cost,
|
305
390
|
"tokens": usage,
|
306
|
-
|
391
|
+
"tags": tags,
|
392
|
+
**parameters_to_display,
|
307
393
|
},
|
308
394
|
"extra_info": parameters,
|
309
395
|
"data": {
|
310
|
-
"input":
|
396
|
+
"input": (
|
397
|
+
input_data["args"] if hasattr(input_data, "args") else input_data
|
398
|
+
),
|
311
399
|
"output": output_data.output_response if output_data else None,
|
312
|
-
"memory_used": memory_used
|
400
|
+
"memory_used": memory_used,
|
313
401
|
},
|
314
|
-
"
|
315
|
-
"
|
402
|
+
"metrics": metrics,
|
403
|
+
"network_calls": network_calls,
|
404
|
+
"interactions": interactions,
|
316
405
|
}
|
317
406
|
|
318
|
-
if self.gt:
|
407
|
+
if self.gt:
|
319
408
|
component["data"]["gt"] = self.gt
|
320
409
|
|
410
|
+
# Reset the SpanAttributes context variable
|
411
|
+
self.span_attributes_dict[name] = SpanAttributes(name)
|
412
|
+
|
321
413
|
return component
|
322
|
-
|
414
|
+
|
323
415
|
def start_component(self, component_id):
|
324
416
|
"""Start tracking network calls for a component"""
|
325
417
|
self.component_network_calls[component_id] = []
|
@@ -329,16 +421,18 @@ class LLMTracerMixin:
|
|
329
421
|
"""Stop tracking network calls for a component"""
|
330
422
|
self.current_component_id = None
|
331
423
|
|
332
|
-
|
333
424
|
async def trace_llm_call(self, original_func, *args, **kwargs):
|
334
425
|
"""Trace an LLM API call"""
|
335
426
|
if not self.is_active:
|
336
427
|
return await original_func(*args, **kwargs)
|
337
428
|
|
429
|
+
if not self.auto_instrument_llm:
|
430
|
+
return await original_func(*args, **kwargs)
|
431
|
+
|
338
432
|
start_time = datetime.now().astimezone()
|
339
433
|
start_memory = psutil.Process().memory_info().rss
|
340
434
|
component_id = str(uuid.uuid4())
|
341
|
-
hash_id = generate_unique_hash_simple(original_func)
|
435
|
+
hash_id = generate_unique_hash_simple(original_func)
|
342
436
|
|
343
437
|
# Start tracking network calls for this component
|
344
438
|
self.start_component(component_id)
|
@@ -364,25 +458,26 @@ class LLMTracerMixin:
|
|
364
458
|
name = self.current_llm_call_name.get()
|
365
459
|
if name is None:
|
366
460
|
name = original_func.__name__
|
367
|
-
|
461
|
+
|
368
462
|
# Create LLM component
|
369
463
|
llm_component = self.create_llm_component(
|
370
464
|
component_id=component_id,
|
371
465
|
hash_id=hash_id,
|
372
466
|
name=name,
|
373
467
|
llm_type=model_name,
|
374
|
-
version=
|
468
|
+
version=None,
|
375
469
|
memory_used=memory_used,
|
376
470
|
start_time=start_time,
|
377
471
|
input_data=input_data,
|
378
472
|
output_data=extract_llm_output(result),
|
379
473
|
cost=cost,
|
380
474
|
usage=token_usage,
|
381
|
-
parameters=parameters
|
475
|
+
parameters=parameters,
|
382
476
|
)
|
383
|
-
|
477
|
+
|
384
478
|
# self.add_component(llm_component)
|
385
479
|
self.llm_data = llm_component
|
480
|
+
|
386
481
|
return result
|
387
482
|
|
388
483
|
except Exception as e:
|
@@ -390,30 +485,31 @@ class LLMTracerMixin:
|
|
390
485
|
"code": 500,
|
391
486
|
"type": type(e).__name__,
|
392
487
|
"message": str(e),
|
393
|
-
"details": {}
|
488
|
+
"details": {},
|
394
489
|
}
|
395
|
-
|
490
|
+
|
396
491
|
# End tracking network calls for this component
|
397
492
|
self.end_component(component_id)
|
398
493
|
|
399
494
|
name = self.current_llm_call_name.get()
|
400
495
|
if name is None:
|
401
496
|
name = original_func.__name__
|
402
|
-
|
497
|
+
|
403
498
|
llm_component = self.create_llm_component(
|
404
499
|
component_id=component_id,
|
405
500
|
hash_id=hash_id,
|
406
501
|
name=name,
|
407
502
|
llm_type="unknown",
|
408
|
-
version=
|
503
|
+
version=None,
|
409
504
|
memory_used=0,
|
410
505
|
start_time=start_time,
|
411
506
|
input_data=extract_input_data(args, kwargs, None),
|
412
507
|
output_data=None,
|
413
|
-
error=error_component
|
508
|
+
error=error_component,
|
414
509
|
)
|
415
|
-
|
510
|
+
|
416
511
|
self.add_component(llm_component)
|
512
|
+
|
417
513
|
raise
|
418
514
|
|
419
515
|
def trace_llm_call_sync(self, original_func, *args, **kwargs):
|
@@ -423,6 +519,9 @@ class LLMTracerMixin:
|
|
423
519
|
return asyncio.run(original_func(*args, **kwargs))
|
424
520
|
return original_func(*args, **kwargs)
|
425
521
|
|
522
|
+
if not self.auto_instrument_llm:
|
523
|
+
return original_func(*args, **kwargs)
|
524
|
+
|
426
525
|
start_time = datetime.now().astimezone()
|
427
526
|
component_id = str(uuid.uuid4())
|
428
527
|
hash_id = generate_unique_hash_simple(original_func)
|
@@ -456,24 +555,25 @@ class LLMTracerMixin:
|
|
456
555
|
name = self.current_llm_call_name.get()
|
457
556
|
if name is None:
|
458
557
|
name = original_func.__name__
|
459
|
-
|
558
|
+
|
460
559
|
# Create LLM component
|
461
560
|
llm_component = self.create_llm_component(
|
462
561
|
component_id=component_id,
|
463
562
|
hash_id=hash_id,
|
464
563
|
name=name,
|
465
564
|
llm_type=model_name,
|
466
|
-
version=
|
565
|
+
version=None,
|
467
566
|
memory_used=memory_used,
|
468
567
|
start_time=start_time,
|
469
568
|
input_data=input_data,
|
470
569
|
output_data=extract_llm_output(result),
|
471
570
|
cost=cost,
|
472
571
|
usage=token_usage,
|
473
|
-
parameters=parameters
|
572
|
+
parameters=parameters,
|
474
573
|
)
|
475
574
|
self.llm_data = llm_component
|
476
575
|
self.add_component(llm_component)
|
576
|
+
|
477
577
|
return result
|
478
578
|
|
479
579
|
except Exception as e:
|
@@ -481,9 +581,9 @@ class LLMTracerMixin:
|
|
481
581
|
"code": 500,
|
482
582
|
"type": type(e).__name__,
|
483
583
|
"message": str(e),
|
484
|
-
"details": {}
|
584
|
+
"details": {},
|
485
585
|
}
|
486
|
-
|
586
|
+
|
487
587
|
# End tracking network calls for this component
|
488
588
|
self.end_component(component_id)
|
489
589
|
|
@@ -493,41 +593,79 @@ class LLMTracerMixin:
|
|
493
593
|
|
494
594
|
end_memory = psutil.Process().memory_info().rss
|
495
595
|
memory_used = max(0, end_memory - start_memory)
|
496
|
-
|
596
|
+
|
497
597
|
llm_component = self.create_llm_component(
|
498
598
|
component_id=component_id,
|
499
599
|
hash_id=hash_id,
|
500
600
|
name=name,
|
501
601
|
llm_type="unknown",
|
502
|
-
version=
|
602
|
+
version=None,
|
503
603
|
memory_used=memory_used,
|
504
604
|
start_time=start_time,
|
505
605
|
input_data=extract_input_data(args, kwargs, None),
|
506
606
|
output_data=None,
|
507
|
-
error=error_component
|
607
|
+
error=error_component,
|
508
608
|
)
|
509
609
|
self.llm_data = llm_component
|
510
610
|
self.add_component(llm_component, is_error=True)
|
611
|
+
|
511
612
|
raise
|
512
613
|
|
513
|
-
def trace_llm(
|
614
|
+
def trace_llm(
|
615
|
+
self,
|
616
|
+
name: str = None,
|
617
|
+
tags: List[str] = [],
|
618
|
+
metadata: Dict[str, Any] = {},
|
619
|
+
metrics: List[Dict[str, Any]] = [],
|
620
|
+
feedback: Optional[Any] = None,
|
621
|
+
):
|
622
|
+
if name not in self.span_attributes_dict:
|
623
|
+
self.span_attributes_dict[name] = SpanAttributes(name)
|
624
|
+
if tags:
|
625
|
+
self.span(name).add_tags(tags)
|
626
|
+
if metadata:
|
627
|
+
self.span(name).add_metadata(metadata)
|
628
|
+
if metrics:
|
629
|
+
if isinstance(metrics, dict):
|
630
|
+
metrics = [metrics]
|
631
|
+
try:
|
632
|
+
for metric in metrics:
|
633
|
+
self.span(name).add_metrics(
|
634
|
+
name=metric["name"],
|
635
|
+
score=metric["score"],
|
636
|
+
reasoning=metric.get("reasoning", ""),
|
637
|
+
cost=metric.get("cost", None),
|
638
|
+
latency=metric.get("latency", None),
|
639
|
+
metadata=metric.get("metadata", {}),
|
640
|
+
config=metric.get("config", {}),
|
641
|
+
)
|
642
|
+
except ValueError as e:
|
643
|
+
logger.error(f"Validation Error: {e}")
|
644
|
+
except Exception as e:
|
645
|
+
logger.error(f"Error adding metric: {e}")
|
646
|
+
|
647
|
+
if feedback:
|
648
|
+
self.span(name).add_feedback(feedback)
|
649
|
+
|
650
|
+
self.current_llm_call_name.set(name)
|
651
|
+
|
514
652
|
def decorator(func):
|
515
653
|
@self.file_tracker.trace_decorator
|
516
654
|
@functools.wraps(func)
|
517
655
|
async def async_wrapper(*args, **kwargs):
|
518
|
-
self.gt = kwargs.get(
|
656
|
+
self.gt = kwargs.get("gt", None) if kwargs else None
|
519
657
|
self.current_llm_call_name.set(name)
|
520
658
|
if not self.is_active:
|
521
659
|
return await func(*args, **kwargs)
|
522
|
-
|
523
|
-
hash_id = generate_unique_hash_simple(func)
|
660
|
+
|
661
|
+
hash_id = generate_unique_hash_simple(func)
|
524
662
|
component_id = str(uuid.uuid4())
|
525
663
|
parent_agent_id = self.current_agent_id.get()
|
526
664
|
self.start_component(component_id)
|
527
|
-
|
665
|
+
|
528
666
|
error_info = None
|
529
667
|
result = None
|
530
|
-
|
668
|
+
|
531
669
|
try:
|
532
670
|
result = await func(*args, **kwargs)
|
533
671
|
return result
|
@@ -537,7 +675,7 @@ class LLMTracerMixin:
|
|
537
675
|
"type": type(e).__name__,
|
538
676
|
"message": str(e),
|
539
677
|
"traceback": traceback.format_exc(),
|
540
|
-
"timestamp": datetime.now().isoformat()
|
678
|
+
"timestamp": datetime.now().astimezone().isoformat(),
|
541
679
|
}
|
542
680
|
}
|
543
681
|
raise
|
@@ -545,41 +683,45 @@ class LLMTracerMixin:
|
|
545
683
|
|
546
684
|
llm_component = self.llm_data
|
547
685
|
if (name is not None) or (name != ""):
|
548
|
-
llm_component[
|
686
|
+
llm_component["name"] = name
|
549
687
|
|
550
688
|
if self.gt:
|
551
689
|
llm_component["data"]["gt"] = self.gt
|
552
690
|
|
553
691
|
if error_info:
|
554
692
|
llm_component["error"] = error_info["error"]
|
555
|
-
|
693
|
+
|
556
694
|
if parent_agent_id:
|
557
695
|
children = self.agent_children.get()
|
558
696
|
children.append(llm_component)
|
559
697
|
self.agent_children.set(children)
|
560
698
|
else:
|
561
699
|
self.add_component(llm_component)
|
562
|
-
|
700
|
+
|
563
701
|
self.end_component(component_id)
|
702
|
+
llm_component["interactions"] = self.component_user_interaction.get(
|
703
|
+
component_id, []
|
704
|
+
)
|
705
|
+
self.add_component(llm_component)
|
564
706
|
|
565
707
|
@self.file_tracker.trace_decorator
|
566
708
|
@functools.wraps(func)
|
567
709
|
def sync_wrapper(*args, **kwargs):
|
568
|
-
self.gt = kwargs.get(
|
710
|
+
self.gt = kwargs.get("gt", None) if kwargs else None
|
569
711
|
self.current_llm_call_name.set(name)
|
570
712
|
if not self.is_active:
|
571
713
|
return func(*args, **kwargs)
|
572
|
-
|
714
|
+
|
573
715
|
hash_id = generate_unique_hash_simple(func)
|
574
716
|
|
575
717
|
component_id = str(uuid.uuid4())
|
576
718
|
parent_agent_id = self.current_agent_id.get()
|
577
719
|
self.start_component(component_id)
|
578
|
-
|
579
|
-
start_time = datetime.now()
|
720
|
+
|
721
|
+
start_time = datetime.now().astimezone().isoformat()
|
580
722
|
error_info = None
|
581
723
|
result = None
|
582
|
-
|
724
|
+
|
583
725
|
try:
|
584
726
|
result = func(*args, **kwargs)
|
585
727
|
return result
|
@@ -589,7 +731,7 @@ class LLMTracerMixin:
|
|
589
731
|
"type": type(e).__name__,
|
590
732
|
"message": str(e),
|
591
733
|
"traceback": traceback.format_exc(),
|
592
|
-
"timestamp": datetime.now().isoformat()
|
734
|
+
"timestamp": datetime.now().astimezone().isoformat(),
|
593
735
|
}
|
594
736
|
}
|
595
737
|
raise
|
@@ -597,21 +739,26 @@ class LLMTracerMixin:
|
|
597
739
|
|
598
740
|
llm_component = self.llm_data
|
599
741
|
if (name is not None) or (name != ""):
|
600
|
-
llm_component[
|
742
|
+
llm_component["name"] = name
|
601
743
|
|
602
744
|
if error_info:
|
603
745
|
llm_component["error"] = error_info["error"]
|
604
|
-
|
746
|
+
|
605
747
|
if parent_agent_id:
|
606
748
|
children = self.agent_children.get()
|
607
749
|
children.append(llm_component)
|
608
750
|
self.agent_children.set(children)
|
609
751
|
else:
|
610
752
|
self.add_component(llm_component)
|
611
|
-
|
753
|
+
|
612
754
|
self.end_component(component_id)
|
755
|
+
llm_component["interactions"] = self.component_user_interaction.get(
|
756
|
+
component_id, []
|
757
|
+
)
|
758
|
+
self.add_component(llm_component)
|
613
759
|
|
614
760
|
return async_wrapper if asyncio.iscoroutinefunction(func) else sync_wrapper
|
761
|
+
|
615
762
|
return decorator
|
616
763
|
|
617
764
|
def unpatch_llm_calls(self):
|