ragaai-catalyst 2.1.5b21__py3-none-any.whl → 2.1.5b23__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ragaai_catalyst/__init__.py +3 -1
- ragaai_catalyst/dataset.py +49 -1
- ragaai_catalyst/redteaming.py +171 -0
- ragaai_catalyst/synthetic_data_generation.py +40 -7
- ragaai_catalyst/tracers/agentic_tracing/tracers/agent_tracer.py +57 -46
- ragaai_catalyst/tracers/agentic_tracing/tracers/base.py +218 -47
- ragaai_catalyst/tracers/agentic_tracing/tracers/custom_tracer.py +17 -7
- ragaai_catalyst/tracers/agentic_tracing/tracers/llm_tracer.py +327 -62
- ragaai_catalyst/tracers/agentic_tracing/tracers/main_tracer.py +0 -3
- ragaai_catalyst/tracers/agentic_tracing/tracers/tool_tracer.py +17 -6
- ragaai_catalyst/tracers/agentic_tracing/upload/upload_local_metric.py +72 -0
- ragaai_catalyst/tracers/agentic_tracing/upload/upload_trace_metric.py +32 -15
- ragaai_catalyst/tracers/agentic_tracing/utils/file_name_tracker.py +21 -2
- ragaai_catalyst/tracers/agentic_tracing/utils/llm_utils.py +33 -11
- ragaai_catalyst/tracers/agentic_tracing/utils/model_costs.json +1204 -484
- ragaai_catalyst/tracers/agentic_tracing/utils/span_attributes.py +79 -10
- ragaai_catalyst/tracers/agentic_tracing/utils/trace_utils.py +0 -32
- ragaai_catalyst/tracers/agentic_tracing/utils/unique_decorator.py +3 -1
- ragaai_catalyst/tracers/agentic_tracing/utils/zip_list_of_unique_files.py +40 -21
- ragaai_catalyst/tracers/distributed.py +7 -3
- ragaai_catalyst/tracers/tracer.py +9 -9
- ragaai_catalyst/tracers/utils/langchain_tracer_extraction_logic.py +0 -1
- {ragaai_catalyst-2.1.5b21.dist-info → ragaai_catalyst-2.1.5b23.dist-info}/METADATA +37 -2
- {ragaai_catalyst-2.1.5b21.dist-info → ragaai_catalyst-2.1.5b23.dist-info}/RECORD +27 -25
- {ragaai_catalyst-2.1.5b21.dist-info → ragaai_catalyst-2.1.5b23.dist-info}/LICENSE +0 -0
- {ragaai_catalyst-2.1.5b21.dist-info → ragaai_catalyst-2.1.5b23.dist-info}/WHEEL +0 -0
- {ragaai_catalyst-2.1.5b21.dist-info → ragaai_catalyst-2.1.5b23.dist-info}/top_level.txt +0 -0
@@ -12,7 +12,10 @@ import contextvars
|
|
12
12
|
import traceback
|
13
13
|
import importlib
|
14
14
|
import sys
|
15
|
+
from litellm import model_cost
|
16
|
+
from llama_index.core.base.llms.types import ChatResponse
|
15
17
|
|
18
|
+
from .base import BaseTracer
|
16
19
|
from ..utils.llm_utils import (
|
17
20
|
extract_model_name,
|
18
21
|
extract_parameters,
|
@@ -24,8 +27,7 @@ from ..utils.llm_utils import (
|
|
24
27
|
extract_llm_output,
|
25
28
|
num_tokens_from_messages
|
26
29
|
)
|
27
|
-
from ..utils.
|
28
|
-
from ..utils.unique_decorator import generate_unique_hash_simple
|
30
|
+
from ..utils.unique_decorator import generate_unique_hash
|
29
31
|
from ..utils.file_name_tracker import TrackName
|
30
32
|
from ..utils.span_attributes import SpanAttributes
|
31
33
|
import logging
|
@@ -44,7 +46,7 @@ class LLMTracerMixin:
|
|
44
46
|
self.file_tracker = TrackName()
|
45
47
|
self.patches = []
|
46
48
|
try:
|
47
|
-
self.model_costs =
|
49
|
+
self.model_costs = model_cost
|
48
50
|
except Exception as e:
|
49
51
|
self.model_costs = {
|
50
52
|
"default": {"input_cost_per_token": 0.0, "output_cost_per_token": 0.0}
|
@@ -80,7 +82,6 @@ class LLMTracerMixin:
|
|
80
82
|
def instrument_llm_calls(self):
|
81
83
|
"""Enable LLM instrumentation"""
|
82
84
|
self.auto_instrument_llm = True
|
83
|
-
|
84
85
|
# Check currently loaded modules
|
85
86
|
if "vertexai" in sys.modules:
|
86
87
|
self.patch_vertex_ai_methods(sys.modules["vertexai"])
|
@@ -98,23 +99,31 @@ class LLMTracerMixin:
|
|
98
99
|
if "langchain_google_genai" in sys.modules:
|
99
100
|
self.patch_langchain_google_methods(sys.modules["langchain_google_genai"])
|
100
101
|
|
102
|
+
if "langchain_openai" in sys.modules:
|
103
|
+
self.patch_langchain_openai_methods(sys.modules["langchain_openai"])
|
104
|
+
if "langchain_anthropic" in sys.modules:
|
105
|
+
self.patch_langchain_anthropic_methods(sys.modules["langchain_anthropic"])
|
106
|
+
|
107
|
+
if "llama_index" in sys.modules:
|
108
|
+
self.patch_llama_index_methods(sys.modules["llama_index"])
|
109
|
+
|
101
110
|
# Register hooks for future imports with availability checks
|
102
111
|
if self.check_package_available("vertexai"):
|
103
112
|
wrapt.register_post_import_hook(self.patch_vertex_ai_methods, "vertexai")
|
104
113
|
wrapt.register_post_import_hook(
|
105
114
|
self.patch_vertex_ai_methods, "vertexai.generative_models"
|
106
115
|
)
|
107
|
-
|
116
|
+
|
108
117
|
if self.check_package_available("openai") and self.validate_openai_key():
|
109
118
|
wrapt.register_post_import_hook(self.patch_openai_methods, "openai")
|
110
119
|
wrapt.register_post_import_hook(self.patch_openai_beta_methods, "openai")
|
111
|
-
|
120
|
+
|
112
121
|
if self.check_package_available("litellm"):
|
113
122
|
wrapt.register_post_import_hook(self.patch_litellm_methods, "litellm")
|
114
|
-
|
123
|
+
|
115
124
|
if self.check_package_available("anthropic"):
|
116
125
|
wrapt.register_post_import_hook(self.patch_anthropic_methods, "anthropic")
|
117
|
-
|
126
|
+
|
118
127
|
if self.check_package_available("google.generativeai"):
|
119
128
|
wrapt.register_post_import_hook(
|
120
129
|
self.patch_google_genai_methods, "google.generativeai"
|
@@ -125,12 +134,25 @@ class LLMTracerMixin:
|
|
125
134
|
wrapt.register_post_import_hook(
|
126
135
|
self.patch_langchain_google_methods, "langchain_google_vertexai"
|
127
136
|
)
|
137
|
+
|
138
|
+
|
139
|
+
# Add hooks for llama-index
|
140
|
+
wrapt.register_post_import_hook(self.patch_llama_index_methods, "llama_index")
|
128
141
|
|
129
142
|
if self.check_package_available("langchain_google_genai"):
|
130
143
|
wrapt.register_post_import_hook(
|
131
144
|
self.patch_langchain_google_methods, "langchain_google_genai"
|
132
145
|
)
|
133
146
|
|
147
|
+
if self.check_package_available("langchain_openai"):
|
148
|
+
wrapt.register_post_import_hook(
|
149
|
+
self.patch_langchain_openai_methods, "langchain_openai"
|
150
|
+
)
|
151
|
+
if self.check_package_available("langchain_anthropic"):
|
152
|
+
wrapt.register_post_import_hook(
|
153
|
+
self.patch_langchain_anthropic_methods, "langchain_anthropic"
|
154
|
+
)
|
155
|
+
|
134
156
|
def instrument_user_interaction_calls(self):
|
135
157
|
"""Enable user interaction instrumentation for LLM calls"""
|
136
158
|
self.auto_instrument_user_interaction = True
|
@@ -138,11 +160,88 @@ class LLMTracerMixin:
|
|
138
160
|
def instrument_network_calls(self):
|
139
161
|
"""Enable network instrumentation for LLM calls"""
|
140
162
|
self.auto_instrument_network = True
|
141
|
-
|
163
|
+
|
142
164
|
def instrument_file_io_calls(self):
|
143
165
|
"""Enable file IO instrumentation for LLM calls"""
|
144
166
|
self.auto_instrument_file_io = True
|
145
167
|
|
168
|
+
def patch_llama_index_methods(self, module):
|
169
|
+
"""Patch llama-index LLM methods"""
|
170
|
+
try:
|
171
|
+
# Handle OpenAI LLM from llama-index
|
172
|
+
if hasattr(module, "llms"):
|
173
|
+
# OpenAI
|
174
|
+
if hasattr(module.llms, "openai"):
|
175
|
+
openai_module = module.llms.openai
|
176
|
+
if hasattr(openai_module, "OpenAI"):
|
177
|
+
llm_class = getattr(openai_module, "OpenAI")
|
178
|
+
self.wrap_method(llm_class, "complete")
|
179
|
+
self.wrap_method(llm_class, "acomplete")
|
180
|
+
self.wrap_method(llm_class, "chat")
|
181
|
+
self.wrap_method(llm_class, "achat")
|
182
|
+
self.wrap_method(llm_class, "stream_chat")
|
183
|
+
# self.wrap_method(llm_class, "stream_achat")
|
184
|
+
self.wrap_method(llm_class, "stream_complete")
|
185
|
+
# self.wrap_method(llm_class, "stream_acomplete")
|
186
|
+
|
187
|
+
# Anthropic
|
188
|
+
if hasattr(module.llms, "anthropic"):
|
189
|
+
anthropic_module = module.llms.anthropic
|
190
|
+
if hasattr(anthropic_module, "Anthropic"):
|
191
|
+
llm_class = getattr(anthropic_module, "Anthropic")
|
192
|
+
self.wrap_method(llm_class, "complete")
|
193
|
+
self.wrap_method(llm_class, "acomplete")
|
194
|
+
self.wrap_method(llm_class, "chat")
|
195
|
+
self.wrap_method(llm_class, "achat")
|
196
|
+
self.wrap_method(llm_class, "stream_chat")
|
197
|
+
# self.wrap_method(llm_class, "stream_achat")
|
198
|
+
|
199
|
+
# Azure OpenAI
|
200
|
+
if hasattr(module.llms, "azure_openai"):
|
201
|
+
azure_module = module.llms.azure_openai
|
202
|
+
if hasattr(azure_module, "AzureOpenAI"):
|
203
|
+
llm_class = getattr(azure_module, "AzureOpenAI")
|
204
|
+
self.wrap_method(llm_class, "complete")
|
205
|
+
self.wrap_method(llm_class, "acomplete")
|
206
|
+
self.wrap_method(llm_class, "chat")
|
207
|
+
self.wrap_method(llm_class, "achat")
|
208
|
+
self.wrap_method(llm_class, "stream_chat")
|
209
|
+
# self.wrap_method(llm_class, "stream_achat")
|
210
|
+
|
211
|
+
# LiteLLM
|
212
|
+
if hasattr(module.llms, "litellm"):
|
213
|
+
litellm_module = module.llms.litellm
|
214
|
+
if hasattr(litellm_module, "LiteLLM"):
|
215
|
+
llm_class = getattr(litellm_module, "LiteLLM")
|
216
|
+
self.wrap_method(llm_class, "complete")
|
217
|
+
self.wrap_method(llm_class, "acomplete")
|
218
|
+
self.wrap_method(llm_class, "chat")
|
219
|
+
self.wrap_method(llm_class, "achat")
|
220
|
+
|
221
|
+
# Vertex AI
|
222
|
+
if hasattr(module.llms, "vertex"):
|
223
|
+
vertex_module = module.llms.vertex
|
224
|
+
if hasattr(vertex_module, "Vertex"):
|
225
|
+
llm_class = getattr(vertex_module, "Vertex")
|
226
|
+
self.wrap_method(llm_class, "complete")
|
227
|
+
self.wrap_method(llm_class, "acomplete")
|
228
|
+
self.wrap_method(llm_class, "chat")
|
229
|
+
self.wrap_method(llm_class, "achat")
|
230
|
+
|
231
|
+
# Gemini
|
232
|
+
if hasattr(module.llms, "gemini"):
|
233
|
+
gemini_module = module.llms.gemini
|
234
|
+
if hasattr(gemini_module, "Gemini"):
|
235
|
+
llm_class = getattr(gemini_module, "Gemini")
|
236
|
+
self.wrap_method(llm_class, "complete")
|
237
|
+
self.wrap_method(llm_class, "acomplete")
|
238
|
+
self.wrap_method(llm_class, "chat")
|
239
|
+
self.wrap_method(llm_class, "achat")
|
240
|
+
|
241
|
+
except Exception as e:
|
242
|
+
# Log the error but continue execution
|
243
|
+
print(f"Warning: Failed to patch llama-index methods: {str(e)}")
|
244
|
+
|
146
245
|
def patch_openai_methods(self, module):
|
147
246
|
try:
|
148
247
|
if hasattr(module, "OpenAI"):
|
@@ -155,6 +254,42 @@ class LLMTracerMixin:
|
|
155
254
|
# Log the error but continue execution
|
156
255
|
print(f"Warning: Failed to patch OpenAI methods: {str(e)}")
|
157
256
|
|
257
|
+
def patch_langchain_openai_methods(self, module):
|
258
|
+
try:
|
259
|
+
if hasattr(module, 'ChatOpenAI'):
|
260
|
+
client_class = getattr(module, "ChatOpenAI")
|
261
|
+
|
262
|
+
if hasattr(client_class, "invoke"):
|
263
|
+
self.wrap_langchain_openai_method(client_class, f"{client_class.__name__}.invoke")
|
264
|
+
elif hasattr(client_class, "run"):
|
265
|
+
self.wrap_langchain_openai_method(client_class, f"{client_class.__name__}.run")
|
266
|
+
if hasattr(module, 'AsyncChatOpenAI'):
|
267
|
+
if hasattr(client_class, "ainvoke"):
|
268
|
+
self.wrap_langchain_openai_method(client_class, f"{client_class.__name__}.ainvoke")
|
269
|
+
elif hasattr(client_class, "arun"):
|
270
|
+
self.wrap_langchain_openai_method(client_class, f"{client_class.__name__}.arun")
|
271
|
+
except Exception as e:
|
272
|
+
# Log the error but continue execution
|
273
|
+
print(f"Warning: Failed to patch OpenAI methods: {str(e)}")
|
274
|
+
|
275
|
+
def patch_langchain_anthropic_methods(self, module):
|
276
|
+
try:
|
277
|
+
if hasattr(module, 'ChatAnthropic'):
|
278
|
+
client_class = getattr(module, "ChatAnthropic")
|
279
|
+
if hasattr(client_class, "invoke"):
|
280
|
+
self.wrap_langchain_anthropic_method(client_class, f"{client_class.__name__}.invoke")
|
281
|
+
if hasattr(client_class, "ainvoke"):
|
282
|
+
self.wrap_langchain_anthropic_method(client_class, f"{client_class.__name__}.ainvoke")
|
283
|
+
if hasattr(module, 'AsyncChatAnthropic'):
|
284
|
+
async_client_class = getattr(module, "AsyncChatAnthropic")
|
285
|
+
if hasattr(async_client_class, "ainvoke"):
|
286
|
+
self.wrap_langchain_anthropic_method(async_client_class, f"{async_client_class.__name__}.ainvoke")
|
287
|
+
if hasattr(async_client_class, "arun"):
|
288
|
+
self.wrap_langchain_anthropic_method(async_client_class, f"{async_client_class.__name__}.arun")
|
289
|
+
except Exception as e:
|
290
|
+
# Log the error but continue execution
|
291
|
+
print(f"Warning: Failed to patch Anthropic methods: {str(e)}")
|
292
|
+
|
158
293
|
def patch_openai_beta_methods(self, openai_module):
|
159
294
|
"""
|
160
295
|
Patch the new openai.beta endpoints (threads, runs, messages, etc.)
|
@@ -162,6 +297,7 @@ class LLMTracerMixin:
|
|
162
297
|
openai.beta.threads.runs.create(...) are automatically traced.
|
163
298
|
"""
|
164
299
|
# Make sure openai_module has a 'beta' attribute
|
300
|
+
openai_module.api_type = "openai"
|
165
301
|
if not hasattr(openai_module, "beta"):
|
166
302
|
return
|
167
303
|
|
@@ -190,7 +326,6 @@ class LLMTracerMixin:
|
|
190
326
|
if hasattr(runs_obj, method_name):
|
191
327
|
self.wrap_method(runs_obj, method_name)
|
192
328
|
|
193
|
-
|
194
329
|
def patch_anthropic_methods(self, module):
|
195
330
|
if hasattr(module, "Anthropic"):
|
196
331
|
client_class = getattr(module, "Anthropic")
|
@@ -310,6 +445,36 @@ class LLMTracerMixin:
|
|
310
445
|
|
311
446
|
setattr(client_class, "__init__", patched_init)
|
312
447
|
|
448
|
+
def wrap_langchain_openai_method(self, client_class, method_name):
|
449
|
+
method = method_name.split(".")[-1]
|
450
|
+
original_init = getattr(client_class, method)
|
451
|
+
|
452
|
+
@functools.wraps(original_init)
|
453
|
+
def patched_init(*args, **kwargs):
|
454
|
+
# Check if this is AsyncOpenAI or OpenAI
|
455
|
+
is_async = "AsyncChatOpenAI" in client_class.__name__
|
456
|
+
|
457
|
+
if is_async:
|
458
|
+
return self.trace_llm_call(original_init, *args, **kwargs)
|
459
|
+
else:
|
460
|
+
return self.trace_llm_call_sync(original_init, *args, **kwargs)
|
461
|
+
|
462
|
+
setattr(client_class, method, patched_init)
|
463
|
+
|
464
|
+
def wrap_langchain_anthropic_method(self, client_class, method_name):
|
465
|
+
original_init = getattr(client_class, method_name)
|
466
|
+
|
467
|
+
@functools.wraps(original_init)
|
468
|
+
def patched_init(*args, **kwargs):
|
469
|
+
is_async = "AsyncChatAnthropic" in client_class.__name__
|
470
|
+
|
471
|
+
if is_async:
|
472
|
+
return self.trace_llm_call(original_init, *args, **kwargs)
|
473
|
+
else:
|
474
|
+
return self.trace_llm_call_sync(original_init, *args, **kwargs)
|
475
|
+
|
476
|
+
setattr(client_class, method_name, patched_init)
|
477
|
+
|
313
478
|
def wrap_anthropic_client_methods(self, client_class):
|
314
479
|
original_init = client_class.__init__
|
315
480
|
|
@@ -370,20 +535,20 @@ class LLMTracerMixin:
|
|
370
535
|
self.patches.append((obj, method_name, original_method))
|
371
536
|
|
372
537
|
def create_llm_component(
|
373
|
-
|
374
|
-
|
375
|
-
|
376
|
-
|
377
|
-
|
378
|
-
|
379
|
-
|
380
|
-
|
381
|
-
|
382
|
-
|
383
|
-
|
384
|
-
|
385
|
-
|
386
|
-
|
538
|
+
self,
|
539
|
+
component_id,
|
540
|
+
hash_id,
|
541
|
+
name,
|
542
|
+
llm_type,
|
543
|
+
version,
|
544
|
+
memory_used,
|
545
|
+
start_time,
|
546
|
+
input_data,
|
547
|
+
output_data,
|
548
|
+
cost={},
|
549
|
+
usage={},
|
550
|
+
error=None,
|
551
|
+
parameters={},
|
387
552
|
):
|
388
553
|
# Update total metrics
|
389
554
|
self.total_tokens += usage.get("total_tokens", 0)
|
@@ -399,7 +564,7 @@ class LLMTracerMixin:
|
|
399
564
|
for interaction in self.component_user_interaction.get(component_id, []):
|
400
565
|
if interaction["interaction_type"] in ["input", "output"]:
|
401
566
|
input_output_interactions.append(interaction)
|
402
|
-
interactions.extend(input_output_interactions)
|
567
|
+
interactions.extend(input_output_interactions)
|
403
568
|
if self.auto_instrument_file_io:
|
404
569
|
file_io_interactions = []
|
405
570
|
for interaction in self.component_user_interaction.get(component_id, []):
|
@@ -425,13 +590,24 @@ class LLMTracerMixin:
|
|
425
590
|
list(parameters_to_display.items())[: self.MAX_PARAMETERS_TO_DISPLAY]
|
426
591
|
)
|
427
592
|
|
428
|
-
#
|
429
|
-
|
593
|
+
# Set the Context and GT
|
594
|
+
span_gt = None
|
595
|
+
span_context = None
|
596
|
+
if name in self.span_attributes_dict:
|
597
|
+
span_gt = self.span_attributes_dict[name].gt
|
598
|
+
span_context = self.span_attributes_dict[name].context
|
599
|
+
|
600
|
+
logger.debug(f"span context {span_context}, span_gt {span_gt}")
|
601
|
+
|
602
|
+
# Tags
|
430
603
|
tags = []
|
431
604
|
if name in self.span_attributes_dict:
|
432
605
|
tags = self.span_attributes_dict[name].tags or []
|
433
606
|
|
434
|
-
#
|
607
|
+
# Get End Time
|
608
|
+
end_time = datetime.now().astimezone().isoformat()
|
609
|
+
|
610
|
+
# Metrics
|
435
611
|
metrics = []
|
436
612
|
if name in self.span_attributes_dict:
|
437
613
|
raw_metrics = self.span_attributes_dict[name].metrics or []
|
@@ -440,17 +616,32 @@ class LLMTracerMixin:
|
|
440
616
|
counter = sum(1 for x in self.visited_metrics if x.startswith(base_metric_name))
|
441
617
|
metric_name = f'{base_metric_name}_{counter}' if counter > 0 else base_metric_name
|
442
618
|
self.visited_metrics.append(metric_name)
|
443
|
-
metric["name"] = metric_name
|
619
|
+
metric["name"] = metric_name
|
444
620
|
metrics.append(metric)
|
445
621
|
|
622
|
+
# TODO TO check i/p and o/p is according or not
|
623
|
+
input = input_data["args"] if hasattr(input_data, "args") else input_data
|
624
|
+
output = output_data.output_response if output_data else None
|
625
|
+
#print("Prompt input:",input)
|
626
|
+
prompt = self.convert_to_content(input)
|
627
|
+
#print("Prompt Output: ",prompt)
|
628
|
+
#print("Response input: ",output)
|
629
|
+
response = self.convert_to_content(output)
|
630
|
+
#print("Response output: ",response)
|
631
|
+
|
632
|
+
# TODO: Execute & Add the User requested metrics here
|
633
|
+
formatted_metric = BaseTracer.get_formatted_metric(self.span_attributes_dict, self.project_id, name, prompt, span_context, response, span_gt)
|
634
|
+
if formatted_metric is not None:
|
635
|
+
metrics.append(formatted_metric)
|
636
|
+
|
446
637
|
component = {
|
447
638
|
"id": component_id,
|
448
639
|
"hash_id": hash_id,
|
449
640
|
"source_hash_id": None,
|
450
641
|
"type": "llm",
|
451
642
|
"name": name,
|
452
|
-
"start_time": start_time
|
453
|
-
"end_time":
|
643
|
+
"start_time": start_time,
|
644
|
+
"end_time": end_time,
|
454
645
|
"error": error,
|
455
646
|
"parent_id": self.current_agent_id.get(),
|
456
647
|
"info": {
|
@@ -464,10 +655,8 @@ class LLMTracerMixin:
|
|
464
655
|
},
|
465
656
|
"extra_info": parameters,
|
466
657
|
"data": {
|
467
|
-
"input":
|
468
|
-
|
469
|
-
),
|
470
|
-
"output": output_data.output_response if output_data else None,
|
658
|
+
"input": input,
|
659
|
+
"output": output,
|
471
660
|
"memory_used": memory_used,
|
472
661
|
},
|
473
662
|
"metrics": metrics,
|
@@ -475,14 +664,79 @@ class LLMTracerMixin:
|
|
475
664
|
"interactions": interactions,
|
476
665
|
}
|
477
666
|
|
478
|
-
if
|
479
|
-
|
667
|
+
# Assign context and gt if available
|
668
|
+
component["data"]["gt"] = span_gt
|
669
|
+
component["data"]["context"] = span_context
|
480
670
|
|
481
671
|
# Reset the SpanAttributes context variable
|
482
672
|
self.span_attributes_dict[name] = SpanAttributes(name)
|
483
673
|
|
484
674
|
return component
|
485
675
|
|
676
|
+
# def convert_to_content(self, input_data):
|
677
|
+
# if isinstance(input_data, dict):
|
678
|
+
# messages = input_data.get("kwargs", {}).get("messages", [])
|
679
|
+
# elif isinstance(input_data, list):
|
680
|
+
# messages = input_data
|
681
|
+
# else:
|
682
|
+
# return ""
|
683
|
+
# return "\n".join(process_content(msg.get("content", "")) for msg in messages if msg.get("content"))
|
684
|
+
|
685
|
+
def convert_to_content(self, input_data):
|
686
|
+
if isinstance(input_data, dict):
|
687
|
+
messages = input_data.get("kwargs", {}).get("messages", [])
|
688
|
+
elif isinstance(input_data, list):
|
689
|
+
if len(input_data)>0 and isinstance(input_data[0]['content'],ChatResponse):
|
690
|
+
extracted_messages = []
|
691
|
+
|
692
|
+
for item in input_data:
|
693
|
+
chat_response = item.get('content')
|
694
|
+
if hasattr(chat_response, 'message') and hasattr(chat_response.message, 'blocks'):
|
695
|
+
for block in chat_response.message.blocks:
|
696
|
+
if hasattr(block, 'text'):
|
697
|
+
extracted_messages.append(block.text)
|
698
|
+
messages=extracted_messages
|
699
|
+
if isinstance(messages,list):
|
700
|
+
return "\n".join(messages)
|
701
|
+
|
702
|
+
#messages=[msg["content"] for msg in input_data if isinstance(msg, dict) and "content" in msg]
|
703
|
+
#messages = [msg["content"].message for msg in input_data if isinstance(msg, dict) and "content" in msg and isinstance(msg["content"], ChatResponse)]
|
704
|
+
else:
|
705
|
+
messages = input_data
|
706
|
+
elif isinstance(input_data,ChatResponse):
|
707
|
+
messages=input_data['content']
|
708
|
+
else:
|
709
|
+
return ""
|
710
|
+
res=""
|
711
|
+
# try:
|
712
|
+
res="\n".join(msg.get("content", "").strip() for msg in messages if msg.get("content"))
|
713
|
+
# except Exception as e:
|
714
|
+
# print("Exception occured for: ",e)
|
715
|
+
# print("Input: ",input_data,"Meeage: ",messages)
|
716
|
+
# # import sys
|
717
|
+
# # sys.exit()
|
718
|
+
return res
|
719
|
+
|
720
|
+
def process_content(content):
|
721
|
+
if isinstance(content, str):
|
722
|
+
return content.strip()
|
723
|
+
elif isinstance(content, list):
|
724
|
+
# Handle list of content blocks
|
725
|
+
text_parts = []
|
726
|
+
for block in content:
|
727
|
+
if hasattr(block, 'text'):
|
728
|
+
# Handle TextBlock-like objects
|
729
|
+
text_parts.append(block.text.strip())
|
730
|
+
elif isinstance(block, dict) and 'text' in block:
|
731
|
+
# Handle dictionary with text field
|
732
|
+
text_parts.append(block['text'].strip())
|
733
|
+
return " ".join(text_parts)
|
734
|
+
elif isinstance(content, dict):
|
735
|
+
# Handle dictionary content
|
736
|
+
return content.get('text', '').strip()
|
737
|
+
return ""
|
738
|
+
|
739
|
+
|
486
740
|
def start_component(self, component_id):
|
487
741
|
"""Start tracking network calls for a component"""
|
488
742
|
self.component_network_calls[component_id] = []
|
@@ -500,10 +754,10 @@ class LLMTracerMixin:
|
|
500
754
|
if not self.auto_instrument_llm:
|
501
755
|
return await original_func(*args, **kwargs)
|
502
756
|
|
503
|
-
start_time = datetime.now().astimezone()
|
757
|
+
start_time = datetime.now().astimezone().isoformat()
|
504
758
|
start_memory = psutil.Process().memory_info().rss
|
505
759
|
component_id = str(uuid.uuid4())
|
506
|
-
hash_id =
|
760
|
+
hash_id = generate_unique_hash(original_func, args, kwargs)
|
507
761
|
|
508
762
|
# Start tracking network calls for this component
|
509
763
|
self.start_component(component_id)
|
@@ -523,7 +777,8 @@ class LLMTracerMixin:
|
|
523
777
|
if stream:
|
524
778
|
prompt_messages = kwargs['messages']
|
525
779
|
# Create response message for streaming case
|
526
|
-
response_message = {"role": "assistant", "content": result} if result else {"role": "assistant",
|
780
|
+
response_message = {"role": "assistant", "content": result} if result else {"role": "assistant",
|
781
|
+
"content": ""}
|
527
782
|
token_usage = num_tokens_from_messages(model_name, prompt_messages, response_message)
|
528
783
|
else:
|
529
784
|
token_usage = extract_token_usage(result)
|
@@ -603,9 +858,9 @@ class LLMTracerMixin:
|
|
603
858
|
if not self.auto_instrument_llm:
|
604
859
|
return original_func(*args, **kwargs)
|
605
860
|
|
606
|
-
start_time = datetime.now().astimezone()
|
861
|
+
start_time = datetime.now().astimezone().isoformat()
|
607
862
|
component_id = str(uuid.uuid4())
|
608
|
-
hash_id =
|
863
|
+
hash_id = generate_unique_hash(original_func, args, kwargs)
|
609
864
|
|
610
865
|
# Start tracking network calls for this component
|
611
866
|
self.start_component(component_id)
|
@@ -625,13 +880,14 @@ class LLMTracerMixin:
|
|
625
880
|
|
626
881
|
# Extract token usage and calculate cost
|
627
882
|
model_name = extract_model_name(args, kwargs, result)
|
628
|
-
|
883
|
+
|
629
884
|
if 'stream' in kwargs:
|
630
885
|
stream = kwargs['stream']
|
631
886
|
if stream:
|
632
887
|
prompt_messages = kwargs['messages']
|
633
888
|
# Create response message for streaming case
|
634
|
-
response_message = {"role": "assistant", "content": result} if result else {"role": "assistant",
|
889
|
+
response_message = {"role": "assistant", "content": result} if result else {"role": "assistant",
|
890
|
+
"content": ""}
|
635
891
|
token_usage = num_tokens_from_messages(model_name, prompt_messages, response_message)
|
636
892
|
else:
|
637
893
|
token_usage = extract_token_usage(result)
|
@@ -704,12 +960,12 @@ class LLMTracerMixin:
|
|
704
960
|
raise
|
705
961
|
|
706
962
|
def trace_llm(
|
707
|
-
|
708
|
-
|
709
|
-
|
710
|
-
|
711
|
-
|
712
|
-
|
963
|
+
self,
|
964
|
+
name: str = None,
|
965
|
+
tags: List[str] = [],
|
966
|
+
metadata: Dict[str, Any] = {},
|
967
|
+
metrics: List[Dict[str, Any]] = [],
|
968
|
+
feedback: Optional[Any] = None,
|
713
969
|
):
|
714
970
|
if name not in self.span_attributes_dict:
|
715
971
|
self.span_attributes_dict[name] = SpanAttributes(name)
|
@@ -735,7 +991,7 @@ class LLMTracerMixin:
|
|
735
991
|
logger.error(f"Validation Error: {e}")
|
736
992
|
except Exception as e:
|
737
993
|
logger.error(f"Error adding metric: {e}")
|
738
|
-
|
994
|
+
|
739
995
|
if feedback:
|
740
996
|
self.span(name).add_feedback(feedback)
|
741
997
|
|
@@ -745,12 +1001,14 @@ class LLMTracerMixin:
|
|
745
1001
|
@self.file_tracker.trace_decorator
|
746
1002
|
@functools.wraps(func)
|
747
1003
|
async def async_wrapper(*args, **kwargs):
|
748
|
-
|
1004
|
+
gt = kwargs.get("gt") if kwargs else None
|
1005
|
+
if gt is not None:
|
1006
|
+
span = self.span(name)
|
1007
|
+
span.add_gt(gt)
|
749
1008
|
self.current_llm_call_name.set(name)
|
750
1009
|
if not self.is_active:
|
751
1010
|
return await func(*args, **kwargs)
|
752
1011
|
|
753
|
-
hash_id = generate_unique_hash_simple(func)
|
754
1012
|
component_id = str(uuid.uuid4())
|
755
1013
|
parent_agent_id = self.current_agent_id.get()
|
756
1014
|
self.start_component(component_id)
|
@@ -777,13 +1035,19 @@ class LLMTracerMixin:
|
|
777
1035
|
if (name is not None) or (name != ""):
|
778
1036
|
llm_component["name"] = name
|
779
1037
|
|
780
|
-
if self.
|
781
|
-
|
1038
|
+
if name in self.span_attributes_dict:
|
1039
|
+
span_gt = self.span_attributes_dict[name].gt
|
1040
|
+
if span_gt is not None:
|
1041
|
+
llm_component["data"]["gt"] = span_gt
|
1042
|
+
span_context = self.span_attributes_dict[name].context
|
1043
|
+
if span_context:
|
1044
|
+
llm_component["data"]["context"] = span_context
|
782
1045
|
|
783
1046
|
if error_info:
|
784
1047
|
llm_component["error"] = error_info["error"]
|
785
|
-
|
1048
|
+
|
786
1049
|
self.end_component(component_id)
|
1050
|
+
|
787
1051
|
# metrics
|
788
1052
|
metrics = []
|
789
1053
|
if name in self.span_attributes_dict:
|
@@ -793,7 +1057,7 @@ class LLMTracerMixin:
|
|
793
1057
|
counter = sum(1 for x in self.visited_metrics if x.startswith(base_metric_name))
|
794
1058
|
metric_name = f'{base_metric_name}_{counter}' if counter > 0 else base_metric_name
|
795
1059
|
self.visited_metrics.append(metric_name)
|
796
|
-
metric["name"] = metric_name
|
1060
|
+
metric["name"] = metric_name
|
797
1061
|
metrics.append(metric)
|
798
1062
|
llm_component["metrics"] = metrics
|
799
1063
|
if parent_agent_id:
|
@@ -811,13 +1075,14 @@ class LLMTracerMixin:
|
|
811
1075
|
@self.file_tracker.trace_decorator
|
812
1076
|
@functools.wraps(func)
|
813
1077
|
def sync_wrapper(*args, **kwargs):
|
814
|
-
|
1078
|
+
gt = kwargs.get("gt") if kwargs else None
|
1079
|
+
if gt is not None:
|
1080
|
+
span = self.span(name)
|
1081
|
+
span.add_gt(gt)
|
815
1082
|
self.current_llm_call_name.set(name)
|
816
1083
|
if not self.is_active:
|
817
1084
|
return func(*args, **kwargs)
|
818
1085
|
|
819
|
-
hash_id = generate_unique_hash_simple(func)
|
820
|
-
|
821
1086
|
component_id = str(uuid.uuid4())
|
822
1087
|
parent_agent_id = self.current_agent_id.get()
|
823
1088
|
self.start_component(component_id)
|
@@ -856,9 +1121,9 @@ class LLMTracerMixin:
|
|
856
1121
|
counter = sum(1 for x in self.visited_metrics if x.startswith(base_metric_name))
|
857
1122
|
metric_name = f'{base_metric_name}_{counter}' if counter > 0 else base_metric_name
|
858
1123
|
self.visited_metrics.append(metric_name)
|
859
|
-
metric["name"] = metric_name
|
1124
|
+
metric["name"] = metric_name
|
860
1125
|
metrics.append(metric)
|
861
|
-
llm_component["metrics"] = metrics
|
1126
|
+
llm_component["metrics"] = metrics
|
862
1127
|
if parent_agent_id:
|
863
1128
|
children = self.agent_children.get()
|
864
1129
|
children.append(llm_component)
|
@@ -119,9 +119,6 @@ class AgenticTracing(
|
|
119
119
|
self.component_network_calls = {} # Store network calls per component
|
120
120
|
self.component_user_interaction = {}
|
121
121
|
|
122
|
-
# Create output directory if it doesn't exist
|
123
|
-
self.output_dir = Path("./traces") # Using default traces directory
|
124
|
-
self.output_dir.mkdir(exist_ok=True)
|
125
122
|
|
126
123
|
def start_component(self, component_id: str):
|
127
124
|
"""Start tracking network calls for a component"""
|