ragaai-catalyst 2.1.4b0__py3-none-any.whl → 2.1.4b2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ragaai_catalyst/tracers/agentic_tracing/data/data_structure.py +36 -10
- ragaai_catalyst/tracers/agentic_tracing/tracers/agent_tracer.py +226 -76
- ragaai_catalyst/tracers/agentic_tracing/tracers/base.py +568 -107
- ragaai_catalyst/tracers/agentic_tracing/tracers/custom_tracer.py +325 -0
- ragaai_catalyst/tracers/agentic_tracing/tracers/llm_tracer.py +218 -81
- ragaai_catalyst/tracers/agentic_tracing/tracers/main_tracer.py +210 -58
- ragaai_catalyst/tracers/agentic_tracing/tracers/network_tracer.py +2 -0
- ragaai_catalyst/tracers/agentic_tracing/tracers/tool_tracer.py +137 -28
- ragaai_catalyst/tracers/agentic_tracing/tracers/user_interaction_tracer.py +86 -0
- ragaai_catalyst/tracers/agentic_tracing/upload/upload_agentic_traces.py +9 -51
- ragaai_catalyst/tracers/agentic_tracing/upload/upload_trace_metric.py +83 -0
- ragaai_catalyst/tracers/agentic_tracing/utils/create_dataset_schema.py +26 -0
- ragaai_catalyst/tracers/agentic_tracing/utils/get_user_trace_metrics.py +28 -0
- ragaai_catalyst/tracers/agentic_tracing/utils/llm_utils.py +45 -15
- ragaai_catalyst/tracers/agentic_tracing/utils/model_costs.json +2476 -2122
- ragaai_catalyst/tracers/agentic_tracing/utils/span_attributes.py +59 -0
- ragaai_catalyst/tracers/agentic_tracing/utils/trace_utils.py +23 -0
- ragaai_catalyst/tracers/agentic_tracing/utils/zip_list_of_unique_files.py +284 -15
- ragaai_catalyst/tracers/tracer.py +77 -8
- {ragaai_catalyst-2.1.4b0.dist-info → ragaai_catalyst-2.1.4b2.dist-info}/METADATA +2 -1
- {ragaai_catalyst-2.1.4b0.dist-info → ragaai_catalyst-2.1.4b2.dist-info}/RECORD +23 -18
- {ragaai_catalyst-2.1.4b0.dist-info → ragaai_catalyst-2.1.4b2.dist-info}/WHEEL +0 -0
- {ragaai_catalyst-2.1.4b0.dist-info → ragaai_catalyst-2.1.4b2.dist-info}/top_level.txt +0 -0
@@ -22,8 +22,17 @@ from ..utils.llm_utils import (
|
|
22
22
|
extract_llm_output,
|
23
23
|
)
|
24
24
|
from ..utils.trace_utils import load_model_costs
|
25
|
-
from ..utils.unique_decorator import generate_unique_hash_simple
|
25
|
+
from ..utils.unique_decorator import generate_unique_hash_simple
|
26
26
|
from ..utils.file_name_tracker import TrackName
|
27
|
+
from ..utils.span_attributes import SpanAttributes
|
28
|
+
import logging
|
29
|
+
|
30
|
+
logger = logging.getLogger(__name__)
|
31
|
+
logging_level = (
|
32
|
+
logger.setLevel(logging.DEBUG)
|
33
|
+
if os.getenv("DEBUG")
|
34
|
+
else logger.setLevel(logging.INFO)
|
35
|
+
)
|
27
36
|
|
28
37
|
|
29
38
|
class LLMTracerMixin:
|
@@ -36,29 +45,36 @@ class LLMTracerMixin:
|
|
36
45
|
except Exception as e:
|
37
46
|
self.model_costs = {
|
38
47
|
# TODO: Default cost handling needs to be improved
|
39
|
-
"default": {
|
40
|
-
"input_cost_per_token": 0.0,
|
41
|
-
"output_cost_per_token": 0.0
|
42
|
-
}
|
48
|
+
"default": {"input_cost_per_token": 0.0, "output_cost_per_token": 0.0}
|
43
49
|
}
|
44
50
|
self.MAX_PARAMETERS_TO_DISPLAY = 10
|
45
|
-
self.current_llm_call_name = contextvars.ContextVar(
|
46
|
-
|
51
|
+
self.current_llm_call_name = contextvars.ContextVar(
|
52
|
+
"llm_call_name", default=None
|
53
|
+
)
|
54
|
+
self.component_network_calls = {}
|
47
55
|
self.component_user_interaction = {}
|
48
|
-
self.current_component_id = None
|
56
|
+
self.current_component_id = None
|
49
57
|
self.total_tokens = 0
|
50
58
|
self.total_cost = 0.0
|
51
59
|
self.llm_data = {}
|
52
60
|
|
61
|
+
# Add auto_instrument options
|
62
|
+
self.auto_instrument_llm = False
|
63
|
+
self.auto_instrument_user_interaction = False
|
64
|
+
self.auto_instrument_network = False
|
65
|
+
|
53
66
|
def instrument_llm_calls(self):
|
67
|
+
"""Enable LLM instrumentation"""
|
68
|
+
self.auto_instrument_llm = True
|
69
|
+
|
54
70
|
# Handle modules that are already imported
|
55
71
|
import sys
|
56
|
-
|
72
|
+
|
57
73
|
if "vertexai" in sys.modules:
|
58
74
|
self.patch_vertex_ai_methods(sys.modules["vertexai"])
|
59
75
|
if "vertexai.generative_models" in sys.modules:
|
60
76
|
self.patch_vertex_ai_methods(sys.modules["vertexai.generative_models"])
|
61
|
-
|
77
|
+
|
62
78
|
if "openai" in sys.modules:
|
63
79
|
self.patch_openai_methods(sys.modules["openai"])
|
64
80
|
if "litellm" in sys.modules:
|
@@ -68,21 +84,39 @@ class LLMTracerMixin:
|
|
68
84
|
if "google.generativeai" in sys.modules:
|
69
85
|
self.patch_google_genai_methods(sys.modules["google.generativeai"])
|
70
86
|
if "langchain_google_vertexai" in sys.modules:
|
71
|
-
self.patch_langchain_google_methods(
|
87
|
+
self.patch_langchain_google_methods(
|
88
|
+
sys.modules["langchain_google_vertexai"]
|
89
|
+
)
|
72
90
|
if "langchain_google_genai" in sys.modules:
|
73
91
|
self.patch_langchain_google_methods(sys.modules["langchain_google_genai"])
|
74
92
|
|
75
93
|
# Register hooks for future imports
|
76
94
|
wrapt.register_post_import_hook(self.patch_vertex_ai_methods, "vertexai")
|
77
|
-
wrapt.register_post_import_hook(
|
95
|
+
wrapt.register_post_import_hook(
|
96
|
+
self.patch_vertex_ai_methods, "vertexai.generative_models"
|
97
|
+
)
|
78
98
|
wrapt.register_post_import_hook(self.patch_openai_methods, "openai")
|
79
99
|
wrapt.register_post_import_hook(self.patch_litellm_methods, "litellm")
|
80
100
|
wrapt.register_post_import_hook(self.patch_anthropic_methods, "anthropic")
|
81
|
-
wrapt.register_post_import_hook(
|
82
|
-
|
101
|
+
wrapt.register_post_import_hook(
|
102
|
+
self.patch_google_genai_methods, "google.generativeai"
|
103
|
+
)
|
104
|
+
|
83
105
|
# Add hooks for LangChain integrations
|
84
|
-
wrapt.register_post_import_hook(
|
85
|
-
|
106
|
+
wrapt.register_post_import_hook(
|
107
|
+
self.patch_langchain_google_methods, "langchain_google_vertexai"
|
108
|
+
)
|
109
|
+
wrapt.register_post_import_hook(
|
110
|
+
self.patch_langchain_google_methods, "langchain_google_genai"
|
111
|
+
)
|
112
|
+
|
113
|
+
def instrument_user_interaction_calls(self):
|
114
|
+
"""Enable user interaction instrumentation for LLM calls"""
|
115
|
+
self.auto_instrument_user_interaction = True
|
116
|
+
|
117
|
+
def instrument_network_calls(self):
|
118
|
+
"""Enable network instrumentation for LLM calls"""
|
119
|
+
self.auto_instrument_network = True
|
86
120
|
|
87
121
|
def patch_openai_methods(self, module):
|
88
122
|
try:
|
@@ -106,20 +140,20 @@ class LLMTracerMixin:
|
|
106
140
|
if hasattr(module, "GenerativeModel"):
|
107
141
|
model_class = getattr(module, "GenerativeModel")
|
108
142
|
self.wrap_genai_model_methods(model_class)
|
109
|
-
|
143
|
+
|
110
144
|
# Patch LangChain integration
|
111
145
|
if hasattr(module, "ChatGoogleGenerativeAI"):
|
112
146
|
chat_class = getattr(module, "ChatGoogleGenerativeAI")
|
113
147
|
# Wrap invoke method to capture messages
|
114
148
|
original_invoke = chat_class.invoke
|
115
|
-
|
149
|
+
|
116
150
|
def patched_invoke(self, messages, *args, **kwargs):
|
117
151
|
# Store messages in the instance for later use
|
118
152
|
self._last_messages = messages
|
119
153
|
return original_invoke(self, messages, *args, **kwargs)
|
120
|
-
|
154
|
+
|
121
155
|
chat_class.invoke = patched_invoke
|
122
|
-
|
156
|
+
|
123
157
|
# LangChain v0.2+ uses invoke/ainvoke
|
124
158
|
self.wrap_method(chat_class, "_generate")
|
125
159
|
if hasattr(chat_class, "_agenerate"):
|
@@ -137,7 +171,7 @@ class LLMTracerMixin:
|
|
137
171
|
if hasattr(gen_models, "GenerativeModel"):
|
138
172
|
model_class = getattr(gen_models, "GenerativeModel")
|
139
173
|
self.wrap_vertex_model_methods(model_class)
|
140
|
-
|
174
|
+
|
141
175
|
# Also patch the class directly if available
|
142
176
|
if hasattr(module, "GenerativeModel"):
|
143
177
|
model_class = getattr(module, "GenerativeModel")
|
@@ -186,23 +220,31 @@ class LLMTracerMixin:
|
|
186
220
|
def patched_init(client_self, *args, **kwargs):
|
187
221
|
original_init(client_self, *args, **kwargs)
|
188
222
|
# Check if this is AsyncOpenAI or OpenAI
|
189
|
-
is_async =
|
190
|
-
|
223
|
+
is_async = "AsyncOpenAI" in client_class.__name__
|
224
|
+
|
191
225
|
if is_async:
|
192
226
|
# Patch async methods for AsyncOpenAI
|
193
227
|
if hasattr(client_self.chat.completions, "create"):
|
194
228
|
original_create = client_self.chat.completions.create
|
229
|
+
|
195
230
|
@functools.wraps(original_create)
|
196
231
|
async def wrapped_create(*args, **kwargs):
|
197
|
-
return await self.trace_llm_call(
|
232
|
+
return await self.trace_llm_call(
|
233
|
+
original_create, *args, **kwargs
|
234
|
+
)
|
235
|
+
|
198
236
|
client_self.chat.completions.create = wrapped_create
|
199
237
|
else:
|
200
238
|
# Patch sync methods for OpenAI
|
201
239
|
if hasattr(client_self.chat.completions, "create"):
|
202
240
|
original_create = client_self.chat.completions.create
|
241
|
+
|
203
242
|
@functools.wraps(original_create)
|
204
243
|
def wrapped_create(*args, **kwargs):
|
205
|
-
return self.trace_llm_call_sync(
|
244
|
+
return self.trace_llm_call_sync(
|
245
|
+
original_create, *args, **kwargs
|
246
|
+
)
|
247
|
+
|
206
248
|
client_self.chat.completions.create = wrapped_create
|
207
249
|
|
208
250
|
setattr(client_class, "__init__", patched_init)
|
@@ -240,41 +282,64 @@ class LLMTracerMixin:
|
|
240
282
|
if isinstance(obj, type):
|
241
283
|
# Store the original class method
|
242
284
|
original_method = getattr(obj, method_name)
|
243
|
-
|
285
|
+
|
244
286
|
@wrapt.decorator
|
245
287
|
def wrapper(wrapped, instance, args, kwargs):
|
246
288
|
if asyncio.iscoroutinefunction(wrapped):
|
247
289
|
return self.trace_llm_call(wrapped, *args, **kwargs)
|
248
290
|
return self.trace_llm_call_sync(wrapped, *args, **kwargs)
|
249
|
-
|
291
|
+
|
250
292
|
# Wrap the class method
|
251
293
|
wrapped_method = wrapper(original_method)
|
252
294
|
setattr(obj, method_name, wrapped_method)
|
253
295
|
self.patches.append((obj, method_name, original_method))
|
254
|
-
|
296
|
+
|
255
297
|
else:
|
256
298
|
# For instance methods
|
257
299
|
original_method = getattr(obj, method_name)
|
258
|
-
|
300
|
+
|
259
301
|
@wrapt.decorator
|
260
302
|
def wrapper(wrapped, instance, args, kwargs):
|
261
303
|
if asyncio.iscoroutinefunction(wrapped):
|
262
304
|
return self.trace_llm_call(wrapped, *args, **kwargs)
|
263
305
|
return self.trace_llm_call_sync(wrapped, *args, **kwargs)
|
264
|
-
|
306
|
+
|
265
307
|
wrapped_method = wrapper(original_method)
|
266
308
|
setattr(obj, method_name, wrapped_method)
|
267
309
|
self.patches.append((obj, method_name, original_method))
|
268
310
|
|
269
|
-
def create_llm_component(
|
311
|
+
def create_llm_component(
|
312
|
+
self,
|
313
|
+
component_id,
|
314
|
+
hash_id,
|
315
|
+
name,
|
316
|
+
llm_type,
|
317
|
+
version,
|
318
|
+
memory_used,
|
319
|
+
start_time,
|
320
|
+
input_data,
|
321
|
+
output_data,
|
322
|
+
cost={},
|
323
|
+
usage={},
|
324
|
+
error=None,
|
325
|
+
parameters={},
|
326
|
+
):
|
270
327
|
# Update total metrics
|
271
328
|
self.total_tokens += usage.get("total_tokens", 0)
|
272
329
|
self.total_cost += cost.get("total_cost", 0)
|
273
330
|
|
331
|
+
network_calls = []
|
332
|
+
if self.auto_instrument_network:
|
333
|
+
network_calls = self.component_network_calls.get(component_id, [])
|
334
|
+
|
335
|
+
interactions = []
|
336
|
+
if self.auto_instrument_user_interaction:
|
337
|
+
interactions = self.component_user_interaction.get(component_id, [])
|
338
|
+
|
274
339
|
parameters_to_display = {}
|
275
|
-
if
|
276
|
-
parameters_obj = parameters[
|
277
|
-
if hasattr(parameters_obj,
|
340
|
+
if "run_manager" in parameters:
|
341
|
+
parameters_obj = parameters["run_manager"]
|
342
|
+
if hasattr(parameters_obj, "metadata"):
|
278
343
|
metadata = parameters_obj.metadata
|
279
344
|
# parameters = {'metadata': metadata}
|
280
345
|
parameters_to_display.update(metadata)
|
@@ -283,9 +348,22 @@ class LLMTracerMixin:
|
|
283
348
|
for key, value in parameters.items():
|
284
349
|
if isinstance(value, (str, int, float, bool)):
|
285
350
|
parameters_to_display[key] = value
|
286
|
-
|
351
|
+
|
287
352
|
# Limit the number of parameters to display
|
288
|
-
parameters_to_display = dict(
|
353
|
+
parameters_to_display = dict(
|
354
|
+
list(parameters_to_display.items())[: self.MAX_PARAMETERS_TO_DISPLAY]
|
355
|
+
)
|
356
|
+
|
357
|
+
# Get tags, metrics
|
358
|
+
# tags
|
359
|
+
tags = []
|
360
|
+
if name in self.span_attributes_dict:
|
361
|
+
tags = self.span_attributes_dict[name].tags or []
|
362
|
+
|
363
|
+
# metrics
|
364
|
+
metrics = []
|
365
|
+
if name in self.span_attributes_dict:
|
366
|
+
metrics = self.span_attributes_dict[name].metrics or []
|
289
367
|
|
290
368
|
component = {
|
291
369
|
"id": component_id,
|
@@ -303,23 +381,30 @@ class LLMTracerMixin:
|
|
303
381
|
"memory_used": memory_used,
|
304
382
|
"cost": cost,
|
305
383
|
"tokens": usage,
|
306
|
-
|
384
|
+
"tags": tags,
|
385
|
+
**parameters_to_display,
|
307
386
|
},
|
308
387
|
"extra_info": parameters,
|
309
388
|
"data": {
|
310
|
-
"input":
|
389
|
+
"input": (
|
390
|
+
input_data["args"] if hasattr(input_data, "args") else input_data
|
391
|
+
),
|
311
392
|
"output": output_data.output_response if output_data else None,
|
312
|
-
"memory_used": memory_used
|
393
|
+
"memory_used": memory_used,
|
313
394
|
},
|
314
|
-
"
|
315
|
-
"
|
395
|
+
"metrics": metrics,
|
396
|
+
"network_calls": network_calls,
|
397
|
+
"interactions": interactions,
|
316
398
|
}
|
317
399
|
|
318
|
-
if self.gt:
|
400
|
+
if self.gt:
|
319
401
|
component["data"]["gt"] = self.gt
|
320
402
|
|
403
|
+
# Reset the SpanAttributes context variable
|
404
|
+
self.span_attributes_dict[name] = SpanAttributes(name)
|
405
|
+
|
321
406
|
return component
|
322
|
-
|
407
|
+
|
323
408
|
def start_component(self, component_id):
|
324
409
|
"""Start tracking network calls for a component"""
|
325
410
|
self.component_network_calls[component_id] = []
|
@@ -329,16 +414,18 @@ class LLMTracerMixin:
|
|
329
414
|
"""Stop tracking network calls for a component"""
|
330
415
|
self.current_component_id = None
|
331
416
|
|
332
|
-
|
333
417
|
async def trace_llm_call(self, original_func, *args, **kwargs):
|
334
418
|
"""Trace an LLM API call"""
|
335
419
|
if not self.is_active:
|
336
420
|
return await original_func(*args, **kwargs)
|
337
421
|
|
422
|
+
if not self.auto_instrument_llm:
|
423
|
+
return await original_func(*args, **kwargs)
|
424
|
+
|
338
425
|
start_time = datetime.now().astimezone()
|
339
426
|
start_memory = psutil.Process().memory_info().rss
|
340
427
|
component_id = str(uuid.uuid4())
|
341
|
-
hash_id = generate_unique_hash_simple(original_func)
|
428
|
+
hash_id = generate_unique_hash_simple(original_func)
|
342
429
|
|
343
430
|
# Start tracking network calls for this component
|
344
431
|
self.start_component(component_id)
|
@@ -364,25 +451,26 @@ class LLMTracerMixin:
|
|
364
451
|
name = self.current_llm_call_name.get()
|
365
452
|
if name is None:
|
366
453
|
name = original_func.__name__
|
367
|
-
|
454
|
+
|
368
455
|
# Create LLM component
|
369
456
|
llm_component = self.create_llm_component(
|
370
457
|
component_id=component_id,
|
371
458
|
hash_id=hash_id,
|
372
459
|
name=name,
|
373
460
|
llm_type=model_name,
|
374
|
-
version=
|
461
|
+
version=None,
|
375
462
|
memory_used=memory_used,
|
376
463
|
start_time=start_time,
|
377
464
|
input_data=input_data,
|
378
465
|
output_data=extract_llm_output(result),
|
379
466
|
cost=cost,
|
380
467
|
usage=token_usage,
|
381
|
-
parameters=parameters
|
468
|
+
parameters=parameters,
|
382
469
|
)
|
383
|
-
|
470
|
+
|
384
471
|
# self.add_component(llm_component)
|
385
472
|
self.llm_data = llm_component
|
473
|
+
|
386
474
|
return result
|
387
475
|
|
388
476
|
except Exception as e:
|
@@ -390,30 +478,31 @@ class LLMTracerMixin:
|
|
390
478
|
"code": 500,
|
391
479
|
"type": type(e).__name__,
|
392
480
|
"message": str(e),
|
393
|
-
"details": {}
|
481
|
+
"details": {},
|
394
482
|
}
|
395
|
-
|
483
|
+
|
396
484
|
# End tracking network calls for this component
|
397
485
|
self.end_component(component_id)
|
398
486
|
|
399
487
|
name = self.current_llm_call_name.get()
|
400
488
|
if name is None:
|
401
489
|
name = original_func.__name__
|
402
|
-
|
490
|
+
|
403
491
|
llm_component = self.create_llm_component(
|
404
492
|
component_id=component_id,
|
405
493
|
hash_id=hash_id,
|
406
494
|
name=name,
|
407
495
|
llm_type="unknown",
|
408
|
-
version=
|
496
|
+
version=None,
|
409
497
|
memory_used=0,
|
410
498
|
start_time=start_time,
|
411
499
|
input_data=extract_input_data(args, kwargs, None),
|
412
500
|
output_data=None,
|
413
|
-
error=error_component
|
501
|
+
error=error_component,
|
414
502
|
)
|
415
|
-
|
503
|
+
|
416
504
|
self.add_component(llm_component)
|
505
|
+
|
417
506
|
raise
|
418
507
|
|
419
508
|
def trace_llm_call_sync(self, original_func, *args, **kwargs):
|
@@ -423,6 +512,9 @@ class LLMTracerMixin:
|
|
423
512
|
return asyncio.run(original_func(*args, **kwargs))
|
424
513
|
return original_func(*args, **kwargs)
|
425
514
|
|
515
|
+
if not self.auto_instrument_llm:
|
516
|
+
return original_func(*args, **kwargs)
|
517
|
+
|
426
518
|
start_time = datetime.now().astimezone()
|
427
519
|
component_id = str(uuid.uuid4())
|
428
520
|
hash_id = generate_unique_hash_simple(original_func)
|
@@ -456,24 +548,25 @@ class LLMTracerMixin:
|
|
456
548
|
name = self.current_llm_call_name.get()
|
457
549
|
if name is None:
|
458
550
|
name = original_func.__name__
|
459
|
-
|
551
|
+
|
460
552
|
# Create LLM component
|
461
553
|
llm_component = self.create_llm_component(
|
462
554
|
component_id=component_id,
|
463
555
|
hash_id=hash_id,
|
464
556
|
name=name,
|
465
557
|
llm_type=model_name,
|
466
|
-
version=
|
558
|
+
version=None,
|
467
559
|
memory_used=memory_used,
|
468
560
|
start_time=start_time,
|
469
561
|
input_data=input_data,
|
470
562
|
output_data=extract_llm_output(result),
|
471
563
|
cost=cost,
|
472
564
|
usage=token_usage,
|
473
|
-
parameters=parameters
|
565
|
+
parameters=parameters,
|
474
566
|
)
|
475
567
|
self.llm_data = llm_component
|
476
568
|
self.add_component(llm_component)
|
569
|
+
|
477
570
|
return result
|
478
571
|
|
479
572
|
except Exception as e:
|
@@ -481,9 +574,9 @@ class LLMTracerMixin:
|
|
481
574
|
"code": 500,
|
482
575
|
"type": type(e).__name__,
|
483
576
|
"message": str(e),
|
484
|
-
"details": {}
|
577
|
+
"details": {},
|
485
578
|
}
|
486
|
-
|
579
|
+
|
487
580
|
# End tracking network calls for this component
|
488
581
|
self.end_component(component_id)
|
489
582
|
|
@@ -493,41 +586,76 @@ class LLMTracerMixin:
|
|
493
586
|
|
494
587
|
end_memory = psutil.Process().memory_info().rss
|
495
588
|
memory_used = max(0, end_memory - start_memory)
|
496
|
-
|
589
|
+
|
497
590
|
llm_component = self.create_llm_component(
|
498
591
|
component_id=component_id,
|
499
592
|
hash_id=hash_id,
|
500
593
|
name=name,
|
501
594
|
llm_type="unknown",
|
502
|
-
version=
|
595
|
+
version=None,
|
503
596
|
memory_used=memory_used,
|
504
597
|
start_time=start_time,
|
505
598
|
input_data=extract_input_data(args, kwargs, None),
|
506
599
|
output_data=None,
|
507
|
-
error=error_component
|
600
|
+
error=error_component,
|
508
601
|
)
|
509
602
|
self.llm_data = llm_component
|
510
603
|
self.add_component(llm_component, is_error=True)
|
604
|
+
|
511
605
|
raise
|
512
606
|
|
513
|
-
def trace_llm(
|
607
|
+
def trace_llm(
|
608
|
+
self,
|
609
|
+
name: str = None,
|
610
|
+
tags: List[str] = [],
|
611
|
+
metadata: Dict[str, Any] = {},
|
612
|
+
metrics: List[Dict[str, Any]] = [],
|
613
|
+
feedback: Optional[Any] = None,
|
614
|
+
):
|
615
|
+
if name not in self.span_attributes_dict:
|
616
|
+
self.span_attributes_dict[name] = SpanAttributes(name)
|
617
|
+
if tags:
|
618
|
+
self.span(name).add_tags(tags)
|
619
|
+
if metadata:
|
620
|
+
self.span(name).add_metadata(metadata)
|
621
|
+
if metrics:
|
622
|
+
if isinstance(metrics, dict):
|
623
|
+
metrics = [metrics]
|
624
|
+
for metric in metrics:
|
625
|
+
try:
|
626
|
+
self.span(name).add_metrics(
|
627
|
+
name=metric["name"],
|
628
|
+
score=metric["score"],
|
629
|
+
reasoning=metric.get("reasoning", ""),
|
630
|
+
cost=metric.get("cost", None),
|
631
|
+
latency=metric.get("latency", None),
|
632
|
+
metadata=metric.get("metadata", {}),
|
633
|
+
config=metric.get("config", {}),
|
634
|
+
)
|
635
|
+
except KeyError as e:
|
636
|
+
logger.error(f"Error adding metric: {e}")
|
637
|
+
if feedback:
|
638
|
+
self.span(name).add_feedback(feedback)
|
639
|
+
|
640
|
+
self.current_llm_call_name.set(name)
|
641
|
+
|
514
642
|
def decorator(func):
|
515
643
|
@self.file_tracker.trace_decorator
|
516
644
|
@functools.wraps(func)
|
517
645
|
async def async_wrapper(*args, **kwargs):
|
518
|
-
self.gt = kwargs.get(
|
646
|
+
self.gt = kwargs.get("gt", None) if kwargs else None
|
519
647
|
self.current_llm_call_name.set(name)
|
520
648
|
if not self.is_active:
|
521
649
|
return await func(*args, **kwargs)
|
522
|
-
|
523
|
-
hash_id = generate_unique_hash_simple(func)
|
650
|
+
|
651
|
+
hash_id = generate_unique_hash_simple(func)
|
524
652
|
component_id = str(uuid.uuid4())
|
525
653
|
parent_agent_id = self.current_agent_id.get()
|
526
654
|
self.start_component(component_id)
|
527
|
-
|
655
|
+
|
528
656
|
error_info = None
|
529
657
|
result = None
|
530
|
-
|
658
|
+
|
531
659
|
try:
|
532
660
|
result = await func(*args, **kwargs)
|
533
661
|
return result
|
@@ -537,7 +665,7 @@ class LLMTracerMixin:
|
|
537
665
|
"type": type(e).__name__,
|
538
666
|
"message": str(e),
|
539
667
|
"traceback": traceback.format_exc(),
|
540
|
-
"timestamp": datetime.now().isoformat()
|
668
|
+
"timestamp": datetime.now().isoformat(),
|
541
669
|
}
|
542
670
|
}
|
543
671
|
raise
|
@@ -545,41 +673,45 @@ class LLMTracerMixin:
|
|
545
673
|
|
546
674
|
llm_component = self.llm_data
|
547
675
|
if (name is not None) or (name != ""):
|
548
|
-
llm_component[
|
676
|
+
llm_component["name"] = name
|
549
677
|
|
550
678
|
if self.gt:
|
551
679
|
llm_component["data"]["gt"] = self.gt
|
552
680
|
|
553
681
|
if error_info:
|
554
682
|
llm_component["error"] = error_info["error"]
|
555
|
-
|
683
|
+
|
556
684
|
if parent_agent_id:
|
557
685
|
children = self.agent_children.get()
|
558
686
|
children.append(llm_component)
|
559
687
|
self.agent_children.set(children)
|
560
688
|
else:
|
561
689
|
self.add_component(llm_component)
|
562
|
-
|
690
|
+
|
563
691
|
self.end_component(component_id)
|
692
|
+
llm_component["interactions"] = self.component_user_interaction.get(
|
693
|
+
component_id, []
|
694
|
+
)
|
695
|
+
self.add_component(llm_component)
|
564
696
|
|
565
697
|
@self.file_tracker.trace_decorator
|
566
698
|
@functools.wraps(func)
|
567
699
|
def sync_wrapper(*args, **kwargs):
|
568
|
-
self.gt = kwargs.get(
|
700
|
+
self.gt = kwargs.get("gt", None) if kwargs else None
|
569
701
|
self.current_llm_call_name.set(name)
|
570
702
|
if not self.is_active:
|
571
703
|
return func(*args, **kwargs)
|
572
|
-
|
704
|
+
|
573
705
|
hash_id = generate_unique_hash_simple(func)
|
574
706
|
|
575
707
|
component_id = str(uuid.uuid4())
|
576
708
|
parent_agent_id = self.current_agent_id.get()
|
577
709
|
self.start_component(component_id)
|
578
|
-
|
710
|
+
|
579
711
|
start_time = datetime.now()
|
580
712
|
error_info = None
|
581
713
|
result = None
|
582
|
-
|
714
|
+
|
583
715
|
try:
|
584
716
|
result = func(*args, **kwargs)
|
585
717
|
return result
|
@@ -589,7 +721,7 @@ class LLMTracerMixin:
|
|
589
721
|
"type": type(e).__name__,
|
590
722
|
"message": str(e),
|
591
723
|
"traceback": traceback.format_exc(),
|
592
|
-
"timestamp": datetime.now().isoformat()
|
724
|
+
"timestamp": datetime.now().isoformat(),
|
593
725
|
}
|
594
726
|
}
|
595
727
|
raise
|
@@ -597,21 +729,26 @@ class LLMTracerMixin:
|
|
597
729
|
|
598
730
|
llm_component = self.llm_data
|
599
731
|
if (name is not None) or (name != ""):
|
600
|
-
llm_component[
|
732
|
+
llm_component["name"] = name
|
601
733
|
|
602
734
|
if error_info:
|
603
735
|
llm_component["error"] = error_info["error"]
|
604
|
-
|
736
|
+
|
605
737
|
if parent_agent_id:
|
606
738
|
children = self.agent_children.get()
|
607
739
|
children.append(llm_component)
|
608
740
|
self.agent_children.set(children)
|
609
741
|
else:
|
610
742
|
self.add_component(llm_component)
|
611
|
-
|
743
|
+
|
612
744
|
self.end_component(component_id)
|
745
|
+
llm_component["interactions"] = self.component_user_interaction.get(
|
746
|
+
component_id, []
|
747
|
+
)
|
748
|
+
self.add_component(llm_component)
|
613
749
|
|
614
750
|
return async_wrapper if asyncio.iscoroutinefunction(func) else sync_wrapper
|
751
|
+
|
615
752
|
return decorator
|
616
753
|
|
617
754
|
def unpatch_llm_calls(self):
|