ragaai-catalyst 2.1.3b0__py3-none-any.whl → 2.1.4b1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ragaai_catalyst/tracers/agentic_tracing/data/data_structure.py +36 -10
- ragaai_catalyst/tracers/agentic_tracing/tracers/agent_tracer.py +213 -76
- ragaai_catalyst/tracers/agentic_tracing/tracers/base.py +568 -107
- ragaai_catalyst/tracers/agentic_tracing/tracers/custom_tracer.py +325 -0
- ragaai_catalyst/tracers/agentic_tracing/tracers/llm_tracer.py +207 -81
- ragaai_catalyst/tracers/agentic_tracing/tracers/main_tracer.py +208 -58
- ragaai_catalyst/tracers/agentic_tracing/tracers/network_tracer.py +2 -0
- ragaai_catalyst/tracers/agentic_tracing/tracers/tool_tracer.py +125 -28
- ragaai_catalyst/tracers/agentic_tracing/tracers/user_interaction_tracer.py +86 -0
- ragaai_catalyst/tracers/agentic_tracing/upload/upload_agentic_traces.py +9 -51
- ragaai_catalyst/tracers/agentic_tracing/upload/upload_trace_metric.py +83 -0
- ragaai_catalyst/tracers/agentic_tracing/utils/create_dataset_schema.py +26 -0
- ragaai_catalyst/tracers/agentic_tracing/utils/get_user_trace_metrics.py +28 -0
- ragaai_catalyst/tracers/agentic_tracing/utils/llm_utils.py +45 -15
- ragaai_catalyst/tracers/agentic_tracing/utils/model_costs.json +2476 -2122
- ragaai_catalyst/tracers/agentic_tracing/utils/span_attributes.py +59 -0
- ragaai_catalyst/tracers/agentic_tracing/utils/trace_utils.py +23 -0
- ragaai_catalyst/tracers/agentic_tracing/utils/zip_list_of_unique_files.py +284 -15
- ragaai_catalyst/tracers/tracer.py +80 -8
- ragaai_catalyst-2.1.4b1.dist-info/METADATA +431 -0
- {ragaai_catalyst-2.1.3b0.dist-info → ragaai_catalyst-2.1.4b1.dist-info}/RECORD +23 -18
- ragaai_catalyst-2.1.3b0.dist-info/METADATA +0 -43
- {ragaai_catalyst-2.1.3b0.dist-info → ragaai_catalyst-2.1.4b1.dist-info}/WHEEL +0 -0
- {ragaai_catalyst-2.1.3b0.dist-info → ragaai_catalyst-2.1.4b1.dist-info}/top_level.txt +0 -0
@@ -22,8 +22,17 @@ from ..utils.llm_utils import (
|
|
22
22
|
extract_llm_output,
|
23
23
|
)
|
24
24
|
from ..utils.trace_utils import load_model_costs
|
25
|
-
from ..utils.unique_decorator import generate_unique_hash_simple
|
25
|
+
from ..utils.unique_decorator import generate_unique_hash_simple
|
26
26
|
from ..utils.file_name_tracker import TrackName
|
27
|
+
from ..utils.span_attributes import SpanAttributes
|
28
|
+
import logging
|
29
|
+
|
30
|
+
logger = logging.getLogger(__name__)
|
31
|
+
logging_level = (
|
32
|
+
logger.setLevel(logging.DEBUG)
|
33
|
+
if os.getenv("DEBUG")
|
34
|
+
else logger.setLevel(logging.INFO)
|
35
|
+
)
|
27
36
|
|
28
37
|
|
29
38
|
class LLMTracerMixin:
|
@@ -36,29 +45,36 @@ class LLMTracerMixin:
|
|
36
45
|
except Exception as e:
|
37
46
|
self.model_costs = {
|
38
47
|
# TODO: Default cost handling needs to be improved
|
39
|
-
"default": {
|
40
|
-
"input_cost_per_token": 0.0,
|
41
|
-
"output_cost_per_token": 0.0
|
42
|
-
}
|
48
|
+
"default": {"input_cost_per_token": 0.0, "output_cost_per_token": 0.0}
|
43
49
|
}
|
44
50
|
self.MAX_PARAMETERS_TO_DISPLAY = 10
|
45
|
-
self.current_llm_call_name = contextvars.ContextVar(
|
46
|
-
|
51
|
+
self.current_llm_call_name = contextvars.ContextVar(
|
52
|
+
"llm_call_name", default=None
|
53
|
+
)
|
54
|
+
self.component_network_calls = {}
|
47
55
|
self.component_user_interaction = {}
|
48
|
-
self.current_component_id = None
|
56
|
+
self.current_component_id = None
|
49
57
|
self.total_tokens = 0
|
50
58
|
self.total_cost = 0.0
|
51
59
|
self.llm_data = {}
|
52
60
|
|
61
|
+
# Add auto_instrument options
|
62
|
+
self.auto_instrument_llm = False
|
63
|
+
self.auto_instrument_user_interaction = False
|
64
|
+
self.auto_instrument_network = False
|
65
|
+
|
53
66
|
def instrument_llm_calls(self):
|
67
|
+
"""Enable LLM instrumentation"""
|
68
|
+
self.auto_instrument_llm = True
|
69
|
+
|
54
70
|
# Handle modules that are already imported
|
55
71
|
import sys
|
56
|
-
|
72
|
+
|
57
73
|
if "vertexai" in sys.modules:
|
58
74
|
self.patch_vertex_ai_methods(sys.modules["vertexai"])
|
59
75
|
if "vertexai.generative_models" in sys.modules:
|
60
76
|
self.patch_vertex_ai_methods(sys.modules["vertexai.generative_models"])
|
61
|
-
|
77
|
+
|
62
78
|
if "openai" in sys.modules:
|
63
79
|
self.patch_openai_methods(sys.modules["openai"])
|
64
80
|
if "litellm" in sys.modules:
|
@@ -68,21 +84,39 @@ class LLMTracerMixin:
|
|
68
84
|
if "google.generativeai" in sys.modules:
|
69
85
|
self.patch_google_genai_methods(sys.modules["google.generativeai"])
|
70
86
|
if "langchain_google_vertexai" in sys.modules:
|
71
|
-
self.patch_langchain_google_methods(
|
87
|
+
self.patch_langchain_google_methods(
|
88
|
+
sys.modules["langchain_google_vertexai"]
|
89
|
+
)
|
72
90
|
if "langchain_google_genai" in sys.modules:
|
73
91
|
self.patch_langchain_google_methods(sys.modules["langchain_google_genai"])
|
74
92
|
|
75
93
|
# Register hooks for future imports
|
76
94
|
wrapt.register_post_import_hook(self.patch_vertex_ai_methods, "vertexai")
|
77
|
-
wrapt.register_post_import_hook(
|
95
|
+
wrapt.register_post_import_hook(
|
96
|
+
self.patch_vertex_ai_methods, "vertexai.generative_models"
|
97
|
+
)
|
78
98
|
wrapt.register_post_import_hook(self.patch_openai_methods, "openai")
|
79
99
|
wrapt.register_post_import_hook(self.patch_litellm_methods, "litellm")
|
80
100
|
wrapt.register_post_import_hook(self.patch_anthropic_methods, "anthropic")
|
81
|
-
wrapt.register_post_import_hook(
|
82
|
-
|
101
|
+
wrapt.register_post_import_hook(
|
102
|
+
self.patch_google_genai_methods, "google.generativeai"
|
103
|
+
)
|
104
|
+
|
83
105
|
# Add hooks for LangChain integrations
|
84
|
-
wrapt.register_post_import_hook(
|
85
|
-
|
106
|
+
wrapt.register_post_import_hook(
|
107
|
+
self.patch_langchain_google_methods, "langchain_google_vertexai"
|
108
|
+
)
|
109
|
+
wrapt.register_post_import_hook(
|
110
|
+
self.patch_langchain_google_methods, "langchain_google_genai"
|
111
|
+
)
|
112
|
+
|
113
|
+
def instrument_user_interaction_calls(self):
|
114
|
+
"""Enable user interaction instrumentation for LLM calls"""
|
115
|
+
self.auto_instrument_user_interaction = True
|
116
|
+
|
117
|
+
def instrument_network_calls(self):
|
118
|
+
"""Enable network instrumentation for LLM calls"""
|
119
|
+
self.auto_instrument_network = True
|
86
120
|
|
87
121
|
def patch_openai_methods(self, module):
|
88
122
|
try:
|
@@ -106,20 +140,20 @@ class LLMTracerMixin:
|
|
106
140
|
if hasattr(module, "GenerativeModel"):
|
107
141
|
model_class = getattr(module, "GenerativeModel")
|
108
142
|
self.wrap_genai_model_methods(model_class)
|
109
|
-
|
143
|
+
|
110
144
|
# Patch LangChain integration
|
111
145
|
if hasattr(module, "ChatGoogleGenerativeAI"):
|
112
146
|
chat_class = getattr(module, "ChatGoogleGenerativeAI")
|
113
147
|
# Wrap invoke method to capture messages
|
114
148
|
original_invoke = chat_class.invoke
|
115
|
-
|
149
|
+
|
116
150
|
def patched_invoke(self, messages, *args, **kwargs):
|
117
151
|
# Store messages in the instance for later use
|
118
152
|
self._last_messages = messages
|
119
153
|
return original_invoke(self, messages, *args, **kwargs)
|
120
|
-
|
154
|
+
|
121
155
|
chat_class.invoke = patched_invoke
|
122
|
-
|
156
|
+
|
123
157
|
# LangChain v0.2+ uses invoke/ainvoke
|
124
158
|
self.wrap_method(chat_class, "_generate")
|
125
159
|
if hasattr(chat_class, "_agenerate"):
|
@@ -137,7 +171,7 @@ class LLMTracerMixin:
|
|
137
171
|
if hasattr(gen_models, "GenerativeModel"):
|
138
172
|
model_class = getattr(gen_models, "GenerativeModel")
|
139
173
|
self.wrap_vertex_model_methods(model_class)
|
140
|
-
|
174
|
+
|
141
175
|
# Also patch the class directly if available
|
142
176
|
if hasattr(module, "GenerativeModel"):
|
143
177
|
model_class = getattr(module, "GenerativeModel")
|
@@ -186,23 +220,31 @@ class LLMTracerMixin:
|
|
186
220
|
def patched_init(client_self, *args, **kwargs):
|
187
221
|
original_init(client_self, *args, **kwargs)
|
188
222
|
# Check if this is AsyncOpenAI or OpenAI
|
189
|
-
is_async =
|
190
|
-
|
223
|
+
is_async = "AsyncOpenAI" in client_class.__name__
|
224
|
+
|
191
225
|
if is_async:
|
192
226
|
# Patch async methods for AsyncOpenAI
|
193
227
|
if hasattr(client_self.chat.completions, "create"):
|
194
228
|
original_create = client_self.chat.completions.create
|
229
|
+
|
195
230
|
@functools.wraps(original_create)
|
196
231
|
async def wrapped_create(*args, **kwargs):
|
197
|
-
return await self.trace_llm_call(
|
232
|
+
return await self.trace_llm_call(
|
233
|
+
original_create, *args, **kwargs
|
234
|
+
)
|
235
|
+
|
198
236
|
client_self.chat.completions.create = wrapped_create
|
199
237
|
else:
|
200
238
|
# Patch sync methods for OpenAI
|
201
239
|
if hasattr(client_self.chat.completions, "create"):
|
202
240
|
original_create = client_self.chat.completions.create
|
241
|
+
|
203
242
|
@functools.wraps(original_create)
|
204
243
|
def wrapped_create(*args, **kwargs):
|
205
|
-
return self.trace_llm_call_sync(
|
244
|
+
return self.trace_llm_call_sync(
|
245
|
+
original_create, *args, **kwargs
|
246
|
+
)
|
247
|
+
|
206
248
|
client_self.chat.completions.create = wrapped_create
|
207
249
|
|
208
250
|
setattr(client_class, "__init__", patched_init)
|
@@ -240,41 +282,64 @@ class LLMTracerMixin:
|
|
240
282
|
if isinstance(obj, type):
|
241
283
|
# Store the original class method
|
242
284
|
original_method = getattr(obj, method_name)
|
243
|
-
|
285
|
+
|
244
286
|
@wrapt.decorator
|
245
287
|
def wrapper(wrapped, instance, args, kwargs):
|
246
288
|
if asyncio.iscoroutinefunction(wrapped):
|
247
289
|
return self.trace_llm_call(wrapped, *args, **kwargs)
|
248
290
|
return self.trace_llm_call_sync(wrapped, *args, **kwargs)
|
249
|
-
|
291
|
+
|
250
292
|
# Wrap the class method
|
251
293
|
wrapped_method = wrapper(original_method)
|
252
294
|
setattr(obj, method_name, wrapped_method)
|
253
295
|
self.patches.append((obj, method_name, original_method))
|
254
|
-
|
296
|
+
|
255
297
|
else:
|
256
298
|
# For instance methods
|
257
299
|
original_method = getattr(obj, method_name)
|
258
|
-
|
300
|
+
|
259
301
|
@wrapt.decorator
|
260
302
|
def wrapper(wrapped, instance, args, kwargs):
|
261
303
|
if asyncio.iscoroutinefunction(wrapped):
|
262
304
|
return self.trace_llm_call(wrapped, *args, **kwargs)
|
263
305
|
return self.trace_llm_call_sync(wrapped, *args, **kwargs)
|
264
|
-
|
306
|
+
|
265
307
|
wrapped_method = wrapper(original_method)
|
266
308
|
setattr(obj, method_name, wrapped_method)
|
267
309
|
self.patches.append((obj, method_name, original_method))
|
268
310
|
|
269
|
-
def create_llm_component(
|
311
|
+
def create_llm_component(
|
312
|
+
self,
|
313
|
+
component_id,
|
314
|
+
hash_id,
|
315
|
+
name,
|
316
|
+
llm_type,
|
317
|
+
version,
|
318
|
+
memory_used,
|
319
|
+
start_time,
|
320
|
+
input_data,
|
321
|
+
output_data,
|
322
|
+
cost={},
|
323
|
+
usage={},
|
324
|
+
error=None,
|
325
|
+
parameters={},
|
326
|
+
):
|
270
327
|
# Update total metrics
|
271
328
|
self.total_tokens += usage.get("total_tokens", 0)
|
272
329
|
self.total_cost += cost.get("total_cost", 0)
|
273
330
|
|
331
|
+
network_calls = []
|
332
|
+
if self.auto_instrument_network:
|
333
|
+
network_calls = self.component_network_calls.get(component_id, [])
|
334
|
+
|
335
|
+
interactions = []
|
336
|
+
if self.auto_instrument_user_interaction:
|
337
|
+
interactions = self.component_user_interaction.get(component_id, [])
|
338
|
+
|
274
339
|
parameters_to_display = {}
|
275
|
-
if
|
276
|
-
parameters_obj = parameters[
|
277
|
-
if hasattr(parameters_obj,
|
340
|
+
if "run_manager" in parameters:
|
341
|
+
parameters_obj = parameters["run_manager"]
|
342
|
+
if hasattr(parameters_obj, "metadata"):
|
278
343
|
metadata = parameters_obj.metadata
|
279
344
|
# parameters = {'metadata': metadata}
|
280
345
|
parameters_to_display.update(metadata)
|
@@ -283,9 +348,11 @@ class LLMTracerMixin:
|
|
283
348
|
for key, value in parameters.items():
|
284
349
|
if isinstance(value, (str, int, float, bool)):
|
285
350
|
parameters_to_display[key] = value
|
286
|
-
|
351
|
+
|
287
352
|
# Limit the number of parameters to display
|
288
|
-
parameters_to_display = dict(
|
353
|
+
parameters_to_display = dict(
|
354
|
+
list(parameters_to_display.items())[: self.MAX_PARAMETERS_TO_DISPLAY]
|
355
|
+
)
|
289
356
|
|
290
357
|
component = {
|
291
358
|
"id": component_id,
|
@@ -303,23 +370,30 @@ class LLMTracerMixin:
|
|
303
370
|
"memory_used": memory_used,
|
304
371
|
"cost": cost,
|
305
372
|
"tokens": usage,
|
306
|
-
|
373
|
+
"tags": self.span_attributes_dict[name].tags or [],
|
374
|
+
**parameters_to_display,
|
307
375
|
},
|
308
376
|
"extra_info": parameters,
|
309
377
|
"data": {
|
310
|
-
"input":
|
378
|
+
"input": (
|
379
|
+
input_data["args"] if hasattr(input_data, "args") else input_data
|
380
|
+
),
|
311
381
|
"output": output_data.output_response if output_data else None,
|
312
|
-
"memory_used": memory_used
|
382
|
+
"memory_used": memory_used,
|
313
383
|
},
|
314
|
-
"
|
315
|
-
"
|
384
|
+
"metrics": self.span_attributes_dict[name].metrics or [],
|
385
|
+
"network_calls": network_calls,
|
386
|
+
"interactions": interactions,
|
316
387
|
}
|
317
388
|
|
318
|
-
if self.gt:
|
389
|
+
if self.gt:
|
319
390
|
component["data"]["gt"] = self.gt
|
320
391
|
|
392
|
+
# Reset the SpanAttributes context variable
|
393
|
+
self.span_attributes_dict[name] = SpanAttributes(name)
|
394
|
+
|
321
395
|
return component
|
322
|
-
|
396
|
+
|
323
397
|
def start_component(self, component_id):
|
324
398
|
"""Start tracking network calls for a component"""
|
325
399
|
self.component_network_calls[component_id] = []
|
@@ -329,16 +403,18 @@ class LLMTracerMixin:
|
|
329
403
|
"""Stop tracking network calls for a component"""
|
330
404
|
self.current_component_id = None
|
331
405
|
|
332
|
-
|
333
406
|
async def trace_llm_call(self, original_func, *args, **kwargs):
|
334
407
|
"""Trace an LLM API call"""
|
335
408
|
if not self.is_active:
|
336
409
|
return await original_func(*args, **kwargs)
|
337
410
|
|
411
|
+
if not self.auto_instrument_llm:
|
412
|
+
return await original_func(*args, **kwargs)
|
413
|
+
|
338
414
|
start_time = datetime.now().astimezone()
|
339
415
|
start_memory = psutil.Process().memory_info().rss
|
340
416
|
component_id = str(uuid.uuid4())
|
341
|
-
hash_id = generate_unique_hash_simple(original_func)
|
417
|
+
hash_id = generate_unique_hash_simple(original_func)
|
342
418
|
|
343
419
|
# Start tracking network calls for this component
|
344
420
|
self.start_component(component_id)
|
@@ -364,25 +440,26 @@ class LLMTracerMixin:
|
|
364
440
|
name = self.current_llm_call_name.get()
|
365
441
|
if name is None:
|
366
442
|
name = original_func.__name__
|
367
|
-
|
443
|
+
|
368
444
|
# Create LLM component
|
369
445
|
llm_component = self.create_llm_component(
|
370
446
|
component_id=component_id,
|
371
447
|
hash_id=hash_id,
|
372
448
|
name=name,
|
373
449
|
llm_type=model_name,
|
374
|
-
version=
|
450
|
+
version=None,
|
375
451
|
memory_used=memory_used,
|
376
452
|
start_time=start_time,
|
377
453
|
input_data=input_data,
|
378
454
|
output_data=extract_llm_output(result),
|
379
455
|
cost=cost,
|
380
456
|
usage=token_usage,
|
381
|
-
parameters=parameters
|
457
|
+
parameters=parameters,
|
382
458
|
)
|
383
|
-
|
459
|
+
|
384
460
|
# self.add_component(llm_component)
|
385
461
|
self.llm_data = llm_component
|
462
|
+
|
386
463
|
return result
|
387
464
|
|
388
465
|
except Exception as e:
|
@@ -390,30 +467,31 @@ class LLMTracerMixin:
|
|
390
467
|
"code": 500,
|
391
468
|
"type": type(e).__name__,
|
392
469
|
"message": str(e),
|
393
|
-
"details": {}
|
470
|
+
"details": {},
|
394
471
|
}
|
395
|
-
|
472
|
+
|
396
473
|
# End tracking network calls for this component
|
397
474
|
self.end_component(component_id)
|
398
475
|
|
399
476
|
name = self.current_llm_call_name.get()
|
400
477
|
if name is None:
|
401
478
|
name = original_func.__name__
|
402
|
-
|
479
|
+
|
403
480
|
llm_component = self.create_llm_component(
|
404
481
|
component_id=component_id,
|
405
482
|
hash_id=hash_id,
|
406
483
|
name=name,
|
407
484
|
llm_type="unknown",
|
408
|
-
version=
|
485
|
+
version=None,
|
409
486
|
memory_used=0,
|
410
487
|
start_time=start_time,
|
411
488
|
input_data=extract_input_data(args, kwargs, None),
|
412
489
|
output_data=None,
|
413
|
-
error=error_component
|
490
|
+
error=error_component,
|
414
491
|
)
|
415
|
-
|
492
|
+
|
416
493
|
self.add_component(llm_component)
|
494
|
+
|
417
495
|
raise
|
418
496
|
|
419
497
|
def trace_llm_call_sync(self, original_func, *args, **kwargs):
|
@@ -423,6 +501,9 @@ class LLMTracerMixin:
|
|
423
501
|
return asyncio.run(original_func(*args, **kwargs))
|
424
502
|
return original_func(*args, **kwargs)
|
425
503
|
|
504
|
+
if not self.auto_instrument_llm:
|
505
|
+
return original_func(*args, **kwargs)
|
506
|
+
|
426
507
|
start_time = datetime.now().astimezone()
|
427
508
|
component_id = str(uuid.uuid4())
|
428
509
|
hash_id = generate_unique_hash_simple(original_func)
|
@@ -456,24 +537,25 @@ class LLMTracerMixin:
|
|
456
537
|
name = self.current_llm_call_name.get()
|
457
538
|
if name is None:
|
458
539
|
name = original_func.__name__
|
459
|
-
|
540
|
+
|
460
541
|
# Create LLM component
|
461
542
|
llm_component = self.create_llm_component(
|
462
543
|
component_id=component_id,
|
463
544
|
hash_id=hash_id,
|
464
545
|
name=name,
|
465
546
|
llm_type=model_name,
|
466
|
-
version=
|
547
|
+
version=None,
|
467
548
|
memory_used=memory_used,
|
468
549
|
start_time=start_time,
|
469
550
|
input_data=input_data,
|
470
551
|
output_data=extract_llm_output(result),
|
471
552
|
cost=cost,
|
472
553
|
usage=token_usage,
|
473
|
-
parameters=parameters
|
554
|
+
parameters=parameters,
|
474
555
|
)
|
475
556
|
self.llm_data = llm_component
|
476
557
|
self.add_component(llm_component)
|
558
|
+
|
477
559
|
return result
|
478
560
|
|
479
561
|
except Exception as e:
|
@@ -481,9 +563,9 @@ class LLMTracerMixin:
|
|
481
563
|
"code": 500,
|
482
564
|
"type": type(e).__name__,
|
483
565
|
"message": str(e),
|
484
|
-
"details": {}
|
566
|
+
"details": {},
|
485
567
|
}
|
486
|
-
|
568
|
+
|
487
569
|
# End tracking network calls for this component
|
488
570
|
self.end_component(component_id)
|
489
571
|
|
@@ -493,41 +575,76 @@ class LLMTracerMixin:
|
|
493
575
|
|
494
576
|
end_memory = psutil.Process().memory_info().rss
|
495
577
|
memory_used = max(0, end_memory - start_memory)
|
496
|
-
|
578
|
+
|
497
579
|
llm_component = self.create_llm_component(
|
498
580
|
component_id=component_id,
|
499
581
|
hash_id=hash_id,
|
500
582
|
name=name,
|
501
583
|
llm_type="unknown",
|
502
|
-
version=
|
584
|
+
version=None,
|
503
585
|
memory_used=memory_used,
|
504
586
|
start_time=start_time,
|
505
587
|
input_data=extract_input_data(args, kwargs, None),
|
506
588
|
output_data=None,
|
507
|
-
error=error_component
|
589
|
+
error=error_component,
|
508
590
|
)
|
509
591
|
self.llm_data = llm_component
|
510
592
|
self.add_component(llm_component, is_error=True)
|
593
|
+
|
511
594
|
raise
|
512
595
|
|
513
|
-
def trace_llm(
|
596
|
+
def trace_llm(
|
597
|
+
self,
|
598
|
+
name: str = None,
|
599
|
+
tags: List[str] = [],
|
600
|
+
metadata: Dict[str, Any] = {},
|
601
|
+
metrics: List[Dict[str, Any]] = [],
|
602
|
+
feedback: Optional[Any] = None,
|
603
|
+
):
|
604
|
+
if name not in self.span_attributes_dict:
|
605
|
+
self.span_attributes_dict[name] = SpanAttributes(name)
|
606
|
+
if tags:
|
607
|
+
self.span(name).add_tags(tags)
|
608
|
+
if metadata:
|
609
|
+
self.span(name).add_metadata(metadata)
|
610
|
+
if metrics:
|
611
|
+
if isinstance(metrics, dict):
|
612
|
+
metrics = [metrics]
|
613
|
+
for metric in metrics:
|
614
|
+
try:
|
615
|
+
self.span(name).add_metrics(
|
616
|
+
name=metric["name"],
|
617
|
+
score=metric["score"],
|
618
|
+
reasoning=metric.get("reasoning", ""),
|
619
|
+
cost=metric.get("cost", None),
|
620
|
+
latency=metric.get("latency", None),
|
621
|
+
metadata=metric.get("metadata", {}),
|
622
|
+
config=metric.get("config", {}),
|
623
|
+
)
|
624
|
+
except KeyError as e:
|
625
|
+
logger.error(f"Error adding metric: {e}")
|
626
|
+
if feedback:
|
627
|
+
self.span(name).add_feedback(feedback)
|
628
|
+
|
629
|
+
self.current_llm_call_name.set(name)
|
630
|
+
|
514
631
|
def decorator(func):
|
515
632
|
@self.file_tracker.trace_decorator
|
516
633
|
@functools.wraps(func)
|
517
634
|
async def async_wrapper(*args, **kwargs):
|
518
|
-
self.gt = kwargs.get(
|
635
|
+
self.gt = kwargs.get("gt", None) if kwargs else None
|
519
636
|
self.current_llm_call_name.set(name)
|
520
637
|
if not self.is_active:
|
521
638
|
return await func(*args, **kwargs)
|
522
|
-
|
523
|
-
hash_id = generate_unique_hash_simple(func)
|
639
|
+
|
640
|
+
hash_id = generate_unique_hash_simple(func)
|
524
641
|
component_id = str(uuid.uuid4())
|
525
642
|
parent_agent_id = self.current_agent_id.get()
|
526
643
|
self.start_component(component_id)
|
527
|
-
|
644
|
+
|
528
645
|
error_info = None
|
529
646
|
result = None
|
530
|
-
|
647
|
+
|
531
648
|
try:
|
532
649
|
result = await func(*args, **kwargs)
|
533
650
|
return result
|
@@ -537,7 +654,7 @@ class LLMTracerMixin:
|
|
537
654
|
"type": type(e).__name__,
|
538
655
|
"message": str(e),
|
539
656
|
"traceback": traceback.format_exc(),
|
540
|
-
"timestamp": datetime.now().isoformat()
|
657
|
+
"timestamp": datetime.now().isoformat(),
|
541
658
|
}
|
542
659
|
}
|
543
660
|
raise
|
@@ -545,41 +662,45 @@ class LLMTracerMixin:
|
|
545
662
|
|
546
663
|
llm_component = self.llm_data
|
547
664
|
if (name is not None) or (name != ""):
|
548
|
-
llm_component[
|
665
|
+
llm_component["name"] = name
|
549
666
|
|
550
667
|
if self.gt:
|
551
668
|
llm_component["data"]["gt"] = self.gt
|
552
669
|
|
553
670
|
if error_info:
|
554
671
|
llm_component["error"] = error_info["error"]
|
555
|
-
|
672
|
+
|
556
673
|
if parent_agent_id:
|
557
674
|
children = self.agent_children.get()
|
558
675
|
children.append(llm_component)
|
559
676
|
self.agent_children.set(children)
|
560
677
|
else:
|
561
678
|
self.add_component(llm_component)
|
562
|
-
|
679
|
+
|
563
680
|
self.end_component(component_id)
|
681
|
+
llm_component["interactions"] = self.component_user_interaction.get(
|
682
|
+
component_id, []
|
683
|
+
)
|
684
|
+
self.add_component(llm_component)
|
564
685
|
|
565
686
|
@self.file_tracker.trace_decorator
|
566
687
|
@functools.wraps(func)
|
567
688
|
def sync_wrapper(*args, **kwargs):
|
568
|
-
self.gt = kwargs.get(
|
689
|
+
self.gt = kwargs.get("gt", None) if kwargs else None
|
569
690
|
self.current_llm_call_name.set(name)
|
570
691
|
if not self.is_active:
|
571
692
|
return func(*args, **kwargs)
|
572
|
-
|
693
|
+
|
573
694
|
hash_id = generate_unique_hash_simple(func)
|
574
695
|
|
575
696
|
component_id = str(uuid.uuid4())
|
576
697
|
parent_agent_id = self.current_agent_id.get()
|
577
698
|
self.start_component(component_id)
|
578
|
-
|
699
|
+
|
579
700
|
start_time = datetime.now()
|
580
701
|
error_info = None
|
581
702
|
result = None
|
582
|
-
|
703
|
+
|
583
704
|
try:
|
584
705
|
result = func(*args, **kwargs)
|
585
706
|
return result
|
@@ -589,7 +710,7 @@ class LLMTracerMixin:
|
|
589
710
|
"type": type(e).__name__,
|
590
711
|
"message": str(e),
|
591
712
|
"traceback": traceback.format_exc(),
|
592
|
-
"timestamp": datetime.now().isoformat()
|
713
|
+
"timestamp": datetime.now().isoformat(),
|
593
714
|
}
|
594
715
|
}
|
595
716
|
raise
|
@@ -597,21 +718,26 @@ class LLMTracerMixin:
|
|
597
718
|
|
598
719
|
llm_component = self.llm_data
|
599
720
|
if (name is not None) or (name != ""):
|
600
|
-
llm_component[
|
721
|
+
llm_component["name"] = name
|
601
722
|
|
602
723
|
if error_info:
|
603
724
|
llm_component["error"] = error_info["error"]
|
604
|
-
|
725
|
+
|
605
726
|
if parent_agent_id:
|
606
727
|
children = self.agent_children.get()
|
607
728
|
children.append(llm_component)
|
608
729
|
self.agent_children.set(children)
|
609
730
|
else:
|
610
731
|
self.add_component(llm_component)
|
611
|
-
|
732
|
+
|
612
733
|
self.end_component(component_id)
|
734
|
+
llm_component["interactions"] = self.component_user_interaction.get(
|
735
|
+
component_id, []
|
736
|
+
)
|
737
|
+
self.add_component(llm_component)
|
613
738
|
|
614
739
|
return async_wrapper if asyncio.iscoroutinefunction(func) else sync_wrapper
|
740
|
+
|
615
741
|
return decorator
|
616
742
|
|
617
743
|
def unpatch_llm_calls(self):
|