ragaai-catalyst 2.1.4.1b0__py3-none-any.whl → 2.1.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ragaai_catalyst/__init__.py +23 -2
- ragaai_catalyst/dataset.py +462 -1
- ragaai_catalyst/evaluation.py +76 -7
- ragaai_catalyst/ragaai_catalyst.py +52 -10
- ragaai_catalyst/redteaming/__init__.py +7 -0
- ragaai_catalyst/redteaming/config/detectors.toml +13 -0
- ragaai_catalyst/redteaming/data_generator/scenario_generator.py +95 -0
- ragaai_catalyst/redteaming/data_generator/test_case_generator.py +120 -0
- ragaai_catalyst/redteaming/evaluator.py +125 -0
- ragaai_catalyst/redteaming/llm_generator.py +136 -0
- ragaai_catalyst/redteaming/llm_generator_old.py +83 -0
- ragaai_catalyst/redteaming/red_teaming.py +331 -0
- ragaai_catalyst/redteaming/requirements.txt +4 -0
- ragaai_catalyst/redteaming/tests/grok.ipynb +97 -0
- ragaai_catalyst/redteaming/tests/stereotype.ipynb +2258 -0
- ragaai_catalyst/redteaming/upload_result.py +38 -0
- ragaai_catalyst/redteaming/utils/issue_description.py +114 -0
- ragaai_catalyst/redteaming/utils/rt.png +0 -0
- ragaai_catalyst/redteaming_old.py +171 -0
- ragaai_catalyst/synthetic_data_generation.py +400 -22
- ragaai_catalyst/tracers/__init__.py +17 -1
- ragaai_catalyst/tracers/agentic_tracing/data/data_structure.py +4 -2
- ragaai_catalyst/tracers/agentic_tracing/tracers/agent_tracer.py +212 -148
- ragaai_catalyst/tracers/agentic_tracing/tracers/base.py +657 -247
- ragaai_catalyst/tracers/agentic_tracing/tracers/custom_tracer.py +50 -19
- ragaai_catalyst/tracers/agentic_tracing/tracers/llm_tracer.py +588 -177
- ragaai_catalyst/tracers/agentic_tracing/tracers/main_tracer.py +99 -100
- ragaai_catalyst/tracers/agentic_tracing/tracers/network_tracer.py +3 -3
- ragaai_catalyst/tracers/agentic_tracing/tracers/tool_tracer.py +230 -29
- ragaai_catalyst/tracers/agentic_tracing/upload/trace_uploader.py +358 -0
- ragaai_catalyst/tracers/agentic_tracing/upload/upload_agentic_traces.py +75 -20
- ragaai_catalyst/tracers/agentic_tracing/upload/upload_code.py +55 -11
- ragaai_catalyst/tracers/agentic_tracing/upload/upload_local_metric.py +74 -0
- ragaai_catalyst/tracers/agentic_tracing/upload/upload_trace_metric.py +47 -16
- ragaai_catalyst/tracers/agentic_tracing/utils/create_dataset_schema.py +4 -2
- ragaai_catalyst/tracers/agentic_tracing/utils/file_name_tracker.py +26 -3
- ragaai_catalyst/tracers/agentic_tracing/utils/llm_utils.py +182 -17
- ragaai_catalyst/tracers/agentic_tracing/utils/model_costs.json +1233 -497
- ragaai_catalyst/tracers/agentic_tracing/utils/span_attributes.py +81 -10
- ragaai_catalyst/tracers/agentic_tracing/utils/supported_llm_provider.toml +34 -0
- ragaai_catalyst/tracers/agentic_tracing/utils/system_monitor.py +215 -0
- ragaai_catalyst/tracers/agentic_tracing/utils/trace_utils.py +0 -32
- ragaai_catalyst/tracers/agentic_tracing/utils/unique_decorator.py +3 -1
- ragaai_catalyst/tracers/agentic_tracing/utils/zip_list_of_unique_files.py +73 -47
- ragaai_catalyst/tracers/distributed.py +300 -0
- ragaai_catalyst/tracers/exporters/__init__.py +3 -1
- ragaai_catalyst/tracers/exporters/dynamic_trace_exporter.py +160 -0
- ragaai_catalyst/tracers/exporters/ragaai_trace_exporter.py +129 -0
- ragaai_catalyst/tracers/langchain_callback.py +809 -0
- ragaai_catalyst/tracers/llamaindex_instrumentation.py +424 -0
- ragaai_catalyst/tracers/tracer.py +301 -55
- ragaai_catalyst/tracers/upload_traces.py +24 -7
- ragaai_catalyst/tracers/utils/convert_langchain_callbacks_output.py +61 -0
- ragaai_catalyst/tracers/utils/convert_llama_instru_callback.py +69 -0
- ragaai_catalyst/tracers/utils/extraction_logic_llama_index.py +74 -0
- ragaai_catalyst/tracers/utils/langchain_tracer_extraction_logic.py +82 -0
- ragaai_catalyst/tracers/utils/model_prices_and_context_window_backup.json +9365 -0
- ragaai_catalyst/tracers/utils/trace_json_converter.py +269 -0
- {ragaai_catalyst-2.1.4.1b0.dist-info → ragaai_catalyst-2.1.5.dist-info}/METADATA +367 -45
- ragaai_catalyst-2.1.5.dist-info/RECORD +97 -0
- {ragaai_catalyst-2.1.4.1b0.dist-info → ragaai_catalyst-2.1.5.dist-info}/WHEEL +1 -1
- ragaai_catalyst-2.1.4.1b0.dist-info/RECORD +0 -67
- {ragaai_catalyst-2.1.4.1b0.dist-info → ragaai_catalyst-2.1.5.dist-info}/LICENSE +0 -0
- {ragaai_catalyst-2.1.4.1b0.dist-info → ragaai_catalyst-2.1.5.dist-info}/top_level.txt +0 -0
@@ -10,7 +10,15 @@ from datetime import datetime
|
|
10
10
|
import uuid
|
11
11
|
import contextvars
|
12
12
|
import traceback
|
13
|
+
import importlib
|
14
|
+
import sys
|
15
|
+
import logging
|
13
16
|
|
17
|
+
try:
|
18
|
+
from llama_index.core.base.llms.types import ChatResponse,TextBlock, ChatMessage
|
19
|
+
except ImportError:
|
20
|
+
logging.warning("Failed to import ChatResponse, TextBlock, ChatMessage. Some features from llamaindex may not work. Please upgrade to the latest version of llama_index or version (>=0.12)")
|
21
|
+
from .base import BaseTracer
|
14
22
|
from ..utils.llm_utils import (
|
15
23
|
extract_model_name,
|
16
24
|
extract_parameters,
|
@@ -20,12 +28,12 @@ from ..utils.llm_utils import (
|
|
20
28
|
sanitize_api_keys,
|
21
29
|
sanitize_input,
|
22
30
|
extract_llm_output,
|
31
|
+
num_tokens_from_messages,
|
32
|
+
get_model_cost
|
23
33
|
)
|
24
|
-
from ..utils.
|
25
|
-
from ..utils.unique_decorator import generate_unique_hash_simple
|
34
|
+
from ..utils.unique_decorator import generate_unique_hash
|
26
35
|
from ..utils.file_name_tracker import TrackName
|
27
36
|
from ..utils.span_attributes import SpanAttributes
|
28
|
-
import logging
|
29
37
|
|
30
38
|
logger = logging.getLogger(__name__)
|
31
39
|
logging_level = (
|
@@ -41,10 +49,9 @@ class LLMTracerMixin:
|
|
41
49
|
self.file_tracker = TrackName()
|
42
50
|
self.patches = []
|
43
51
|
try:
|
44
|
-
self.model_costs =
|
52
|
+
self.model_costs = get_model_cost()
|
45
53
|
except Exception as e:
|
46
54
|
self.model_costs = {
|
47
|
-
# TODO: Default cost handling needs to be improved
|
48
55
|
"default": {"input_cost_per_token": 0.0, "output_cost_per_token": 0.0}
|
49
56
|
}
|
50
57
|
self.MAX_PARAMETERS_TO_DISPLAY = 10
|
@@ -58,25 +65,32 @@ class LLMTracerMixin:
|
|
58
65
|
self.total_cost = 0.0
|
59
66
|
self.llm_data = {}
|
60
67
|
|
61
|
-
# Add auto_instrument options
|
62
68
|
self.auto_instrument_llm = False
|
63
69
|
self.auto_instrument_user_interaction = False
|
70
|
+
self.auto_instrument_file_io = False
|
64
71
|
self.auto_instrument_network = False
|
65
72
|
|
73
|
+
def check_package_available(self, package_name):
|
74
|
+
"""Check if a package is available in the environment"""
|
75
|
+
try:
|
76
|
+
importlib.import_module(package_name)
|
77
|
+
return True
|
78
|
+
except ImportError:
|
79
|
+
return False
|
80
|
+
|
81
|
+
def validate_openai_key(self):
|
82
|
+
"""Validate if OpenAI API key is available"""
|
83
|
+
return bool(os.getenv("OPENAI_API_KEY"))
|
84
|
+
|
66
85
|
def instrument_llm_calls(self):
|
67
86
|
"""Enable LLM instrumentation"""
|
68
87
|
self.auto_instrument_llm = True
|
69
|
-
|
70
|
-
# Handle modules that are already imported
|
71
|
-
import sys
|
72
|
-
|
88
|
+
# Check currently loaded modules
|
73
89
|
if "vertexai" in sys.modules:
|
74
90
|
self.patch_vertex_ai_methods(sys.modules["vertexai"])
|
75
|
-
if "
|
76
|
-
self.patch_vertex_ai_methods(sys.modules["vertexai.generative_models"])
|
77
|
-
|
78
|
-
if "openai" in sys.modules:
|
91
|
+
if "openai" in sys.modules and self.validate_openai_key():
|
79
92
|
self.patch_openai_methods(sys.modules["openai"])
|
93
|
+
self.patch_openai_beta_methods(sys.modules["openai"])
|
80
94
|
if "litellm" in sys.modules:
|
81
95
|
self.patch_litellm_methods(sys.modules["litellm"])
|
82
96
|
if "anthropic" in sys.modules:
|
@@ -84,31 +98,63 @@ class LLMTracerMixin:
|
|
84
98
|
if "google.generativeai" in sys.modules:
|
85
99
|
self.patch_google_genai_methods(sys.modules["google.generativeai"])
|
86
100
|
if "langchain_google_vertexai" in sys.modules:
|
87
|
-
self.patch_langchain_google_methods(
|
88
|
-
sys.modules["langchain_google_vertexai"]
|
89
|
-
)
|
101
|
+
self.patch_langchain_google_methods(sys.modules["langchain_google_vertexai"])
|
90
102
|
if "langchain_google_genai" in sys.modules:
|
91
103
|
self.patch_langchain_google_methods(sys.modules["langchain_google_genai"])
|
92
104
|
|
93
|
-
|
94
|
-
|
95
|
-
|
96
|
-
self.
|
97
|
-
)
|
98
|
-
wrapt.register_post_import_hook(self.patch_openai_methods, "openai")
|
99
|
-
wrapt.register_post_import_hook(self.patch_litellm_methods, "litellm")
|
100
|
-
wrapt.register_post_import_hook(self.patch_anthropic_methods, "anthropic")
|
101
|
-
wrapt.register_post_import_hook(
|
102
|
-
self.patch_google_genai_methods, "google.generativeai"
|
103
|
-
)
|
105
|
+
if "langchain_openai" in sys.modules:
|
106
|
+
self.patch_langchain_openai_methods(sys.modules["langchain_openai"])
|
107
|
+
if "langchain_anthropic" in sys.modules:
|
108
|
+
self.patch_langchain_anthropic_methods(sys.modules["langchain_anthropic"])
|
104
109
|
|
105
|
-
|
106
|
-
|
107
|
-
|
108
|
-
|
109
|
-
|
110
|
-
self.
|
111
|
-
|
110
|
+
if "llama_index" in sys.modules:
|
111
|
+
self.patch_llama_index_methods(sys.modules["llama_index"])
|
112
|
+
|
113
|
+
# Register hooks for future imports with availability checks
|
114
|
+
if self.check_package_available("vertexai"):
|
115
|
+
wrapt.register_post_import_hook(self.patch_vertex_ai_methods, "vertexai")
|
116
|
+
wrapt.register_post_import_hook(
|
117
|
+
self.patch_vertex_ai_methods, "vertexai.generative_models"
|
118
|
+
)
|
119
|
+
|
120
|
+
if self.check_package_available("openai") and self.validate_openai_key():
|
121
|
+
wrapt.register_post_import_hook(self.patch_openai_methods, "openai")
|
122
|
+
wrapt.register_post_import_hook(self.patch_openai_beta_methods, "openai")
|
123
|
+
|
124
|
+
if self.check_package_available("litellm"):
|
125
|
+
wrapt.register_post_import_hook(self.patch_litellm_methods, "litellm")
|
126
|
+
|
127
|
+
if self.check_package_available("anthropic"):
|
128
|
+
wrapt.register_post_import_hook(self.patch_anthropic_methods, "anthropic")
|
129
|
+
|
130
|
+
if self.check_package_available("google.generativeai"):
|
131
|
+
wrapt.register_post_import_hook(
|
132
|
+
self.patch_google_genai_methods, "google.generativeai"
|
133
|
+
)
|
134
|
+
|
135
|
+
# Add hooks for LangChain integrations with availability checks
|
136
|
+
if self.check_package_available("langchain_google_vertexai"):
|
137
|
+
wrapt.register_post_import_hook(
|
138
|
+
self.patch_langchain_google_methods, "langchain_google_vertexai"
|
139
|
+
)
|
140
|
+
|
141
|
+
|
142
|
+
# Add hooks for llama-index
|
143
|
+
wrapt.register_post_import_hook(self.patch_llama_index_methods, "llama_index")
|
144
|
+
|
145
|
+
if self.check_package_available("langchain_google_genai"):
|
146
|
+
wrapt.register_post_import_hook(
|
147
|
+
self.patch_langchain_google_methods, "langchain_google_genai"
|
148
|
+
)
|
149
|
+
|
150
|
+
if self.check_package_available("langchain_openai"):
|
151
|
+
wrapt.register_post_import_hook(
|
152
|
+
self.patch_langchain_openai_methods, "langchain_openai"
|
153
|
+
)
|
154
|
+
if self.check_package_available("langchain_anthropic"):
|
155
|
+
wrapt.register_post_import_hook(
|
156
|
+
self.patch_langchain_anthropic_methods, "langchain_anthropic"
|
157
|
+
)
|
112
158
|
|
113
159
|
def instrument_user_interaction_calls(self):
|
114
160
|
"""Enable user interaction instrumentation for LLM calls"""
|
@@ -118,6 +164,87 @@ class LLMTracerMixin:
|
|
118
164
|
"""Enable network instrumentation for LLM calls"""
|
119
165
|
self.auto_instrument_network = True
|
120
166
|
|
167
|
+
def instrument_file_io_calls(self):
|
168
|
+
"""Enable file IO instrumentation for LLM calls"""
|
169
|
+
self.auto_instrument_file_io = True
|
170
|
+
|
171
|
+
def patch_llama_index_methods(self, module):
|
172
|
+
"""Patch llama-index LLM methods"""
|
173
|
+
try:
|
174
|
+
# Handle OpenAI LLM from llama-index
|
175
|
+
if hasattr(module, "llms"):
|
176
|
+
# OpenAI
|
177
|
+
if hasattr(module.llms, "openai"):
|
178
|
+
openai_module = module.llms.openai
|
179
|
+
if hasattr(openai_module, "OpenAI"):
|
180
|
+
llm_class = getattr(openai_module, "OpenAI")
|
181
|
+
self.wrap_method(llm_class, "complete")
|
182
|
+
self.wrap_method(llm_class, "acomplete")
|
183
|
+
self.wrap_method(llm_class, "chat")
|
184
|
+
self.wrap_method(llm_class, "achat")
|
185
|
+
self.wrap_method(llm_class, "stream_chat")
|
186
|
+
# self.wrap_method(llm_class, "stream_achat")
|
187
|
+
self.wrap_method(llm_class, "stream_complete")
|
188
|
+
# self.wrap_method(llm_class, "stream_acomplete")
|
189
|
+
|
190
|
+
# Anthropic
|
191
|
+
if hasattr(module.llms, "anthropic"):
|
192
|
+
anthropic_module = module.llms.anthropic
|
193
|
+
if hasattr(anthropic_module, "Anthropic"):
|
194
|
+
llm_class = getattr(anthropic_module, "Anthropic")
|
195
|
+
self.wrap_method(llm_class, "complete")
|
196
|
+
self.wrap_method(llm_class, "acomplete")
|
197
|
+
self.wrap_method(llm_class, "chat")
|
198
|
+
self.wrap_method(llm_class, "achat")
|
199
|
+
self.wrap_method(llm_class, "stream_chat")
|
200
|
+
# self.wrap_method(llm_class, "stream_achat")
|
201
|
+
|
202
|
+
# Azure OpenAI
|
203
|
+
if hasattr(module.llms, "azure_openai"):
|
204
|
+
azure_module = module.llms.azure_openai
|
205
|
+
if hasattr(azure_module, "AzureOpenAI"):
|
206
|
+
llm_class = getattr(azure_module, "AzureOpenAI")
|
207
|
+
self.wrap_method(llm_class, "complete")
|
208
|
+
self.wrap_method(llm_class, "acomplete")
|
209
|
+
self.wrap_method(llm_class, "chat")
|
210
|
+
self.wrap_method(llm_class, "achat")
|
211
|
+
self.wrap_method(llm_class, "stream_chat")
|
212
|
+
# self.wrap_method(llm_class, "stream_achat")
|
213
|
+
|
214
|
+
# LiteLLM
|
215
|
+
if hasattr(module.llms, "litellm"):
|
216
|
+
litellm_module = module.llms.litellm
|
217
|
+
if hasattr(litellm_module, "LiteLLM"):
|
218
|
+
llm_class = getattr(litellm_module, "LiteLLM")
|
219
|
+
self.wrap_method(llm_class, "complete")
|
220
|
+
self.wrap_method(llm_class, "acomplete")
|
221
|
+
self.wrap_method(llm_class, "chat")
|
222
|
+
self.wrap_method(llm_class, "achat")
|
223
|
+
|
224
|
+
# Vertex AI
|
225
|
+
if hasattr(module.llms, "vertex"):
|
226
|
+
vertex_module = module.llms.vertex
|
227
|
+
if hasattr(vertex_module, "Vertex"):
|
228
|
+
llm_class = getattr(vertex_module, "Vertex")
|
229
|
+
self.wrap_method(llm_class, "complete")
|
230
|
+
self.wrap_method(llm_class, "acomplete")
|
231
|
+
self.wrap_method(llm_class, "chat")
|
232
|
+
self.wrap_method(llm_class, "achat")
|
233
|
+
|
234
|
+
# Gemini
|
235
|
+
if hasattr(module.llms, "gemini"):
|
236
|
+
gemini_module = module.llms.gemini
|
237
|
+
if hasattr(gemini_module, "Gemini"):
|
238
|
+
llm_class = getattr(gemini_module, "Gemini")
|
239
|
+
self.wrap_method(llm_class, "complete")
|
240
|
+
self.wrap_method(llm_class, "acomplete")
|
241
|
+
self.wrap_method(llm_class, "chat")
|
242
|
+
self.wrap_method(llm_class, "achat")
|
243
|
+
|
244
|
+
except Exception as e:
|
245
|
+
# Log the error but continue execution
|
246
|
+
print(f"Warning: Failed to patch llama-index methods: {str(e)}")
|
247
|
+
|
121
248
|
def patch_openai_methods(self, module):
|
122
249
|
try:
|
123
250
|
if hasattr(module, "OpenAI"):
|
@@ -130,6 +257,78 @@ class LLMTracerMixin:
|
|
130
257
|
# Log the error but continue execution
|
131
258
|
print(f"Warning: Failed to patch OpenAI methods: {str(e)}")
|
132
259
|
|
260
|
+
def patch_langchain_openai_methods(self, module):
|
261
|
+
try:
|
262
|
+
if hasattr(module, 'ChatOpenAI'):
|
263
|
+
client_class = getattr(module, "ChatOpenAI")
|
264
|
+
|
265
|
+
if hasattr(client_class, "invoke"):
|
266
|
+
self.wrap_langchain_openai_method(client_class, f"{client_class.__name__}.invoke")
|
267
|
+
elif hasattr(client_class, "run"):
|
268
|
+
self.wrap_langchain_openai_method(client_class, f"{client_class.__name__}.run")
|
269
|
+
if hasattr(module, 'AsyncChatOpenAI'):
|
270
|
+
if hasattr(client_class, "ainvoke"):
|
271
|
+
self.wrap_langchain_openai_method(client_class, f"{client_class.__name__}.ainvoke")
|
272
|
+
elif hasattr(client_class, "arun"):
|
273
|
+
self.wrap_langchain_openai_method(client_class, f"{client_class.__name__}.arun")
|
274
|
+
except Exception as e:
|
275
|
+
# Log the error but continue execution
|
276
|
+
print(f"Warning: Failed to patch OpenAI methods: {str(e)}")
|
277
|
+
|
278
|
+
def patch_langchain_anthropic_methods(self, module):
|
279
|
+
try:
|
280
|
+
if hasattr(module, 'ChatAnthropic'):
|
281
|
+
client_class = getattr(module, "ChatAnthropic")
|
282
|
+
if hasattr(client_class, "invoke"):
|
283
|
+
self.wrap_langchain_anthropic_method(client_class, f"{client_class.__name__}.invoke")
|
284
|
+
if hasattr(client_class, "ainvoke"):
|
285
|
+
self.wrap_langchain_anthropic_method(client_class, f"{client_class.__name__}.ainvoke")
|
286
|
+
if hasattr(module, 'AsyncChatAnthropic'):
|
287
|
+
async_client_class = getattr(module, "AsyncChatAnthropic")
|
288
|
+
if hasattr(async_client_class, "ainvoke"):
|
289
|
+
self.wrap_langchain_anthropic_method(async_client_class, f"{async_client_class.__name__}.ainvoke")
|
290
|
+
if hasattr(async_client_class, "arun"):
|
291
|
+
self.wrap_langchain_anthropic_method(async_client_class, f"{async_client_class.__name__}.arun")
|
292
|
+
except Exception as e:
|
293
|
+
# Log the error but continue execution
|
294
|
+
print(f"Warning: Failed to patch Anthropic methods: {str(e)}")
|
295
|
+
|
296
|
+
def patch_openai_beta_methods(self, openai_module):
|
297
|
+
"""
|
298
|
+
Patch the new openai.beta endpoints (threads, runs, messages, etc.)
|
299
|
+
so that calls like openai.beta.threads.create(...) or
|
300
|
+
openai.beta.threads.runs.create(...) are automatically traced.
|
301
|
+
"""
|
302
|
+
# Make sure openai_module has a 'beta' attribute
|
303
|
+
openai_module.api_type = "openai"
|
304
|
+
if not hasattr(openai_module, "beta"):
|
305
|
+
return
|
306
|
+
|
307
|
+
beta_module = openai_module.beta
|
308
|
+
|
309
|
+
# Patch openai.beta.threads
|
310
|
+
import openai
|
311
|
+
openai.api_type = "openai"
|
312
|
+
if hasattr(beta_module, "threads"):
|
313
|
+
threads_obj = beta_module.threads
|
314
|
+
# Patch top-level methods on openai.beta.threads
|
315
|
+
for method_name in ["create", "list"]:
|
316
|
+
if hasattr(threads_obj, method_name):
|
317
|
+
self.wrap_method(threads_obj, method_name)
|
318
|
+
|
319
|
+
# Patch the nested objects: messages, runs
|
320
|
+
if hasattr(threads_obj, "messages"):
|
321
|
+
messages_obj = threads_obj.messages
|
322
|
+
for method_name in ["create", "list"]:
|
323
|
+
if hasattr(messages_obj, method_name):
|
324
|
+
self.wrap_method(messages_obj, method_name)
|
325
|
+
|
326
|
+
if hasattr(threads_obj, "runs"):
|
327
|
+
runs_obj = threads_obj.runs
|
328
|
+
for method_name in ["create", "retrieve", "list"]:
|
329
|
+
if hasattr(runs_obj, method_name):
|
330
|
+
self.wrap_method(runs_obj, method_name)
|
331
|
+
|
133
332
|
def patch_anthropic_methods(self, module):
|
134
333
|
if hasattr(module, "Anthropic"):
|
135
334
|
client_class = getattr(module, "Anthropic")
|
@@ -249,6 +448,36 @@ class LLMTracerMixin:
|
|
249
448
|
|
250
449
|
setattr(client_class, "__init__", patched_init)
|
251
450
|
|
451
|
+
def wrap_langchain_openai_method(self, client_class, method_name):
|
452
|
+
method = method_name.split(".")[-1]
|
453
|
+
original_init = getattr(client_class, method)
|
454
|
+
|
455
|
+
@functools.wraps(original_init)
|
456
|
+
def patched_init(*args, **kwargs):
|
457
|
+
# Check if this is AsyncOpenAI or OpenAI
|
458
|
+
is_async = "AsyncChatOpenAI" in client_class.__name__
|
459
|
+
|
460
|
+
if is_async:
|
461
|
+
return self.trace_llm_call(original_init, *args, **kwargs)
|
462
|
+
else:
|
463
|
+
return self.trace_llm_call_sync(original_init, *args, **kwargs)
|
464
|
+
|
465
|
+
setattr(client_class, method, patched_init)
|
466
|
+
|
467
|
+
def wrap_langchain_anthropic_method(self, client_class, method_name):
|
468
|
+
original_init = getattr(client_class, method_name)
|
469
|
+
|
470
|
+
@functools.wraps(original_init)
|
471
|
+
def patched_init(*args, **kwargs):
|
472
|
+
is_async = "AsyncChatAnthropic" in client_class.__name__
|
473
|
+
|
474
|
+
if is_async:
|
475
|
+
return self.trace_llm_call(original_init, *args, **kwargs)
|
476
|
+
else:
|
477
|
+
return self.trace_llm_call_sync(original_init, *args, **kwargs)
|
478
|
+
|
479
|
+
setattr(client_class, method_name, patched_init)
|
480
|
+
|
252
481
|
def wrap_anthropic_client_methods(self, client_class):
|
253
482
|
original_init = client_class.__init__
|
254
483
|
|
@@ -309,109 +538,195 @@ class LLMTracerMixin:
|
|
309
538
|
self.patches.append((obj, method_name, original_method))
|
310
539
|
|
311
540
|
def create_llm_component(
|
312
|
-
|
313
|
-
|
314
|
-
|
315
|
-
|
316
|
-
|
317
|
-
|
318
|
-
|
319
|
-
|
320
|
-
|
321
|
-
|
322
|
-
|
323
|
-
|
324
|
-
|
325
|
-
|
541
|
+
self,
|
542
|
+
component_id,
|
543
|
+
hash_id,
|
544
|
+
name,
|
545
|
+
llm_type,
|
546
|
+
version,
|
547
|
+
memory_used,
|
548
|
+
start_time,
|
549
|
+
input_data,
|
550
|
+
output_data,
|
551
|
+
cost={},
|
552
|
+
usage={},
|
553
|
+
error=None,
|
554
|
+
parameters={},
|
326
555
|
):
|
327
|
-
|
328
|
-
|
329
|
-
|
330
|
-
|
331
|
-
|
332
|
-
|
333
|
-
|
334
|
-
|
335
|
-
|
336
|
-
|
337
|
-
|
338
|
-
|
339
|
-
|
340
|
-
|
341
|
-
|
342
|
-
|
343
|
-
|
344
|
-
|
345
|
-
|
346
|
-
|
347
|
-
|
348
|
-
|
349
|
-
|
350
|
-
|
351
|
-
|
352
|
-
|
353
|
-
|
354
|
-
|
355
|
-
|
556
|
+
try:
|
557
|
+
# Update total metrics
|
558
|
+
self.total_tokens += usage.get("total_tokens", 0)
|
559
|
+
self.total_cost += cost.get("total_cost", 0)
|
560
|
+
|
561
|
+
network_calls = []
|
562
|
+
if self.auto_instrument_network:
|
563
|
+
network_calls = self.component_network_calls.get(component_id, [])
|
564
|
+
|
565
|
+
interactions = []
|
566
|
+
if self.auto_instrument_user_interaction:
|
567
|
+
input_output_interactions = []
|
568
|
+
for interaction in self.component_user_interaction.get(component_id, []):
|
569
|
+
if interaction["interaction_type"] in ["input", "output"]:
|
570
|
+
input_output_interactions.append(interaction)
|
571
|
+
interactions.extend(input_output_interactions)
|
572
|
+
if self.auto_instrument_file_io:
|
573
|
+
file_io_interactions = []
|
574
|
+
for interaction in self.component_user_interaction.get(component_id, []):
|
575
|
+
if interaction["interaction_type"] in ["file_read", "file_write"]:
|
576
|
+
file_io_interactions.append(interaction)
|
577
|
+
interactions.extend(file_io_interactions)
|
578
|
+
|
579
|
+
parameters_to_display = {}
|
580
|
+
if "run_manager" in parameters:
|
581
|
+
parameters_obj = parameters["run_manager"]
|
582
|
+
if hasattr(parameters_obj, "metadata"):
|
583
|
+
metadata = parameters_obj.metadata
|
584
|
+
# parameters = {'metadata': metadata}
|
585
|
+
parameters_to_display.update(metadata)
|
586
|
+
|
587
|
+
# Add only those keys in parameters that are single values and not objects, dict or list
|
588
|
+
for key, value in parameters.items():
|
589
|
+
if isinstance(value, (str, int, float, bool)):
|
590
|
+
parameters_to_display[key] = value
|
591
|
+
|
592
|
+
# Limit the number of parameters to display
|
593
|
+
parameters_to_display = dict(
|
594
|
+
list(parameters_to_display.items())[: self.MAX_PARAMETERS_TO_DISPLAY]
|
595
|
+
)
|
596
|
+
|
597
|
+
# Set the Context and GT
|
598
|
+
span_gt = None
|
599
|
+
span_context = None
|
600
|
+
if name in self.span_attributes_dict:
|
601
|
+
span_gt = self.span_attributes_dict[name].gt
|
602
|
+
span_context = self.span_attributes_dict[name].context
|
603
|
+
|
604
|
+
logger.debug(f"span context {span_context}, span_gt {span_gt}")
|
605
|
+
|
606
|
+
# Tags
|
607
|
+
tags = []
|
608
|
+
if name in self.span_attributes_dict:
|
609
|
+
tags = self.span_attributes_dict[name].tags or []
|
610
|
+
|
611
|
+
# Get End Time
|
612
|
+
end_time = datetime.now().astimezone().isoformat()
|
613
|
+
|
614
|
+
# Metrics
|
615
|
+
metrics = []
|
616
|
+
if name in self.span_attributes_dict:
|
617
|
+
raw_metrics = self.span_attributes_dict[name].metrics or []
|
618
|
+
for metric in raw_metrics:
|
619
|
+
base_metric_name = metric["name"]
|
620
|
+
counter = sum(1 for x in self.visited_metrics if x.startswith(base_metric_name))
|
621
|
+
metric_name = f'{base_metric_name}_{counter}' if counter > 0 else base_metric_name
|
622
|
+
self.visited_metrics.append(metric_name)
|
623
|
+
metric["name"] = metric_name
|
624
|
+
metrics.append(metric)
|
625
|
+
|
626
|
+
# TODO TO check i/p and o/p is according or not
|
627
|
+
input = input_data["args"] if hasattr(input_data, "args") else input_data
|
628
|
+
output = output_data.output_response if output_data else None
|
629
|
+
prompt = self.convert_to_content(input)
|
630
|
+
response = self.convert_to_content(output)
|
631
|
+
|
632
|
+
# TODO: Execute & Add the User requested metrics here
|
633
|
+
formatted_metrics = BaseTracer.get_formatted_metric(self.span_attributes_dict, self.project_id, name)
|
634
|
+
if formatted_metrics:
|
635
|
+
metrics.extend(formatted_metrics)
|
636
|
+
|
637
|
+
component = {
|
638
|
+
"id": component_id,
|
639
|
+
"hash_id": hash_id,
|
640
|
+
"source_hash_id": None,
|
641
|
+
"type": "llm",
|
642
|
+
"name": name,
|
643
|
+
"start_time": start_time,
|
644
|
+
"end_time": end_time,
|
645
|
+
"error": error,
|
646
|
+
"parent_id": self.current_agent_id.get(),
|
647
|
+
"info": {
|
648
|
+
"model": llm_type,
|
649
|
+
"version": version,
|
650
|
+
"memory_used": memory_used,
|
651
|
+
"cost": cost,
|
652
|
+
"tokens": usage,
|
653
|
+
"tags": tags,
|
654
|
+
**parameters_to_display,
|
655
|
+
},
|
656
|
+
"extra_info": parameters,
|
657
|
+
"data": {
|
658
|
+
"input": input,
|
659
|
+
"output": output,
|
660
|
+
"memory_used": memory_used,
|
661
|
+
},
|
662
|
+
"metrics": metrics,
|
663
|
+
"network_calls": network_calls,
|
664
|
+
"interactions": interactions,
|
665
|
+
}
|
666
|
+
|
667
|
+
# Assign context and gt if available
|
668
|
+
component["data"]["gt"] = span_gt
|
669
|
+
component["data"]["context"] = span_context
|
670
|
+
|
671
|
+
# Reset the SpanAttributes context variable
|
672
|
+
self.span_attributes_dict[name] = SpanAttributes(name)
|
356
673
|
|
357
|
-
|
358
|
-
|
359
|
-
|
360
|
-
if name in self.span_attributes_dict:
|
361
|
-
tags = self.span_attributes_dict[name].tags or []
|
362
|
-
|
363
|
-
# metrics
|
364
|
-
metrics = []
|
365
|
-
if name in self.span_attributes_dict:
|
366
|
-
raw_metrics = self.span_attributes_dict[name].metrics or []
|
367
|
-
for metric in raw_metrics:
|
368
|
-
base_metric_name = metric["name"]
|
369
|
-
counter = sum(1 for x in self.visited_metrics if x.startswith(base_metric_name))
|
370
|
-
metric_name = f'{base_metric_name}_{counter}' if counter > 0 else base_metric_name
|
371
|
-
self.visited_metrics.append(metric_name)
|
372
|
-
metric["name"] = metric_name
|
373
|
-
metrics.append(metric)
|
374
|
-
|
375
|
-
component = {
|
376
|
-
"id": component_id,
|
377
|
-
"hash_id": hash_id,
|
378
|
-
"source_hash_id": None,
|
379
|
-
"type": "llm",
|
380
|
-
"name": name,
|
381
|
-
"start_time": start_time.isoformat(),
|
382
|
-
"end_time": datetime.now().astimezone().isoformat(),
|
383
|
-
"error": error,
|
384
|
-
"parent_id": self.current_agent_id.get(),
|
385
|
-
"info": {
|
386
|
-
"model": llm_type,
|
387
|
-
"version": version,
|
388
|
-
"memory_used": memory_used,
|
389
|
-
"cost": cost,
|
390
|
-
"tokens": usage,
|
391
|
-
"tags": tags,
|
392
|
-
**parameters_to_display,
|
393
|
-
},
|
394
|
-
"extra_info": parameters,
|
395
|
-
"data": {
|
396
|
-
"input": (
|
397
|
-
input_data["args"] if hasattr(input_data, "args") else input_data
|
398
|
-
),
|
399
|
-
"output": output_data.output_response if output_data else None,
|
400
|
-
"memory_used": memory_used,
|
401
|
-
},
|
402
|
-
"metrics": metrics,
|
403
|
-
"network_calls": network_calls,
|
404
|
-
"interactions": interactions,
|
405
|
-
}
|
406
|
-
|
407
|
-
if self.gt:
|
408
|
-
component["data"]["gt"] = self.gt
|
409
|
-
|
410
|
-
# Reset the SpanAttributes context variable
|
411
|
-
self.span_attributes_dict[name] = SpanAttributes(name)
|
412
|
-
|
413
|
-
return component
|
674
|
+
return component
|
675
|
+
except Exception as e:
|
676
|
+
raise Exception("Failed to create LLM component")
|
414
677
|
|
678
|
+
def convert_to_content(self, input_data):
|
679
|
+
try:
|
680
|
+
if isinstance(input_data, dict):
|
681
|
+
messages = input_data.get("kwargs", {}).get("messages", [])
|
682
|
+
elif isinstance(input_data, list):
|
683
|
+
if len(input_data)>0 and isinstance(input_data[0]['content'],ChatResponse):
|
684
|
+
extracted_messages = []
|
685
|
+
for item in input_data:
|
686
|
+
chat_response = item.get('content')
|
687
|
+
if hasattr(chat_response, 'message') and hasattr(chat_response.message, 'blocks'):
|
688
|
+
for block in chat_response.message.blocks:
|
689
|
+
if hasattr(block, 'text'):
|
690
|
+
extracted_messages.append(block.text)
|
691
|
+
messages=extracted_messages
|
692
|
+
if isinstance(messages,list):
|
693
|
+
return "\n".join(messages)
|
694
|
+
elif len(input_data)>0 and isinstance(input_data[0]['content'],TextBlock):
|
695
|
+
return " ".join(block.text for item in input_data for block in item['content'] if isinstance(block, TextBlock))
|
696
|
+
elif len(input_data)>0 and isinstance(input_data[0]['content'],ChatMessage):
|
697
|
+
return " ".join(block.text for block in input_data[0]['content'].blocks if isinstance(block, TextBlock))
|
698
|
+
else:
|
699
|
+
messages = input_data
|
700
|
+
elif isinstance(input_data,ChatResponse):
|
701
|
+
messages=input_data['content']
|
702
|
+
else:
|
703
|
+
return ""
|
704
|
+
res=""
|
705
|
+
res="\n".join(msg.get("content", "").strip() for msg in messages if msg.get("content"))
|
706
|
+
except Exception as e:
|
707
|
+
res=str(input_data)
|
708
|
+
return res
|
709
|
+
|
710
|
+
def process_content(content):
|
711
|
+
if isinstance(content, str):
|
712
|
+
return content.strip()
|
713
|
+
elif isinstance(content, list):
|
714
|
+
# Handle list of content blocks
|
715
|
+
text_parts = []
|
716
|
+
for block in content:
|
717
|
+
if hasattr(block, 'text'):
|
718
|
+
# Handle TextBlock-like objects
|
719
|
+
text_parts.append(block.text.strip())
|
720
|
+
elif isinstance(block, dict) and 'text' in block:
|
721
|
+
# Handle dictionary with text field
|
722
|
+
text_parts.append(block['text'].strip())
|
723
|
+
return " ".join(text_parts)
|
724
|
+
elif isinstance(content, dict):
|
725
|
+
# Handle dictionary content
|
726
|
+
return content.get('text', '').strip()
|
727
|
+
return ""
|
728
|
+
|
729
|
+
|
415
730
|
def start_component(self, component_id):
|
416
731
|
"""Start tracking network calls for a component"""
|
417
732
|
self.component_network_calls[component_id] = []
|
@@ -429,10 +744,10 @@ class LLMTracerMixin:
|
|
429
744
|
if not self.auto_instrument_llm:
|
430
745
|
return await original_func(*args, **kwargs)
|
431
746
|
|
432
|
-
start_time = datetime.now().astimezone()
|
747
|
+
start_time = datetime.now().astimezone().isoformat()
|
433
748
|
start_memory = psutil.Process().memory_info().rss
|
434
749
|
component_id = str(uuid.uuid4())
|
435
|
-
hash_id =
|
750
|
+
hash_id = generate_unique_hash(original_func, args, kwargs)
|
436
751
|
|
437
752
|
# Start tracking network calls for this component
|
438
753
|
self.start_component(component_id)
|
@@ -447,8 +762,19 @@ class LLMTracerMixin:
|
|
447
762
|
|
448
763
|
# Extract token usage and calculate cost
|
449
764
|
model_name = extract_model_name(args, kwargs, result)
|
450
|
-
|
451
|
-
|
765
|
+
if 'stream' in kwargs:
|
766
|
+
stream = kwargs['stream']
|
767
|
+
if stream:
|
768
|
+
prompt_messages = kwargs['messages']
|
769
|
+
# Create response message for streaming case
|
770
|
+
response_message = {"role": "assistant", "content": result} if result else {"role": "assistant",
|
771
|
+
"content": ""}
|
772
|
+
token_usage = num_tokens_from_messages(model_name, prompt_messages, response_message)
|
773
|
+
else:
|
774
|
+
token_usage = extract_token_usage(result)
|
775
|
+
else:
|
776
|
+
token_usage = extract_token_usage(result)
|
777
|
+
cost = calculate_llm_cost(token_usage, model_name, self.model_costs, self.model_custom_cost)
|
452
778
|
parameters = extract_parameters(kwargs)
|
453
779
|
input_data = extract_input_data(args, kwargs, result)
|
454
780
|
|
@@ -475,7 +801,7 @@ class LLMTracerMixin:
|
|
475
801
|
parameters=parameters,
|
476
802
|
)
|
477
803
|
|
478
|
-
|
804
|
+
self.add_component(llm_component)
|
479
805
|
self.llm_data = llm_component
|
480
806
|
|
481
807
|
return result
|
@@ -522,9 +848,9 @@ class LLMTracerMixin:
|
|
522
848
|
if not self.auto_instrument_llm:
|
523
849
|
return original_func(*args, **kwargs)
|
524
850
|
|
525
|
-
start_time = datetime.now().astimezone()
|
851
|
+
start_time = datetime.now().astimezone().isoformat()
|
526
852
|
component_id = str(uuid.uuid4())
|
527
|
-
hash_id =
|
853
|
+
hash_id = generate_unique_hash(original_func, args, kwargs)
|
528
854
|
|
529
855
|
# Start tracking network calls for this component
|
530
856
|
self.start_component(component_id)
|
@@ -544,8 +870,20 @@ class LLMTracerMixin:
|
|
544
870
|
|
545
871
|
# Extract token usage and calculate cost
|
546
872
|
model_name = extract_model_name(args, kwargs, result)
|
547
|
-
|
548
|
-
|
873
|
+
|
874
|
+
if 'stream' in kwargs:
|
875
|
+
stream = kwargs['stream']
|
876
|
+
if stream:
|
877
|
+
prompt_messages = kwargs['messages']
|
878
|
+
# Create response message for streaming case
|
879
|
+
response_message = {"role": "assistant", "content": result} if result else {"role": "assistant",
|
880
|
+
"content": ""}
|
881
|
+
token_usage = num_tokens_from_messages(model_name, prompt_messages, response_message)
|
882
|
+
else:
|
883
|
+
token_usage = extract_token_usage(result)
|
884
|
+
else:
|
885
|
+
token_usage = extract_token_usage(result)
|
886
|
+
cost = calculate_llm_cost(token_usage, model_name, self.model_costs, self.model_custom_cost)
|
549
887
|
parameters = extract_parameters(kwargs)
|
550
888
|
input_data = extract_input_data(args, kwargs, result)
|
551
889
|
|
@@ -612,13 +950,17 @@ class LLMTracerMixin:
|
|
612
950
|
raise
|
613
951
|
|
614
952
|
def trace_llm(
|
615
|
-
|
616
|
-
|
617
|
-
|
618
|
-
|
619
|
-
|
620
|
-
|
953
|
+
self,
|
954
|
+
name: str = None,
|
955
|
+
tags: List[str] = [],
|
956
|
+
metadata: Dict[str, Any] = {},
|
957
|
+
metrics: List[Dict[str, Any]] = [],
|
958
|
+
feedback: Optional[Any] = None,
|
621
959
|
):
|
960
|
+
|
961
|
+
start_memory = psutil.Process().memory_info().rss
|
962
|
+
start_time = datetime.now().astimezone().isoformat()
|
963
|
+
|
622
964
|
if name not in self.span_attributes_dict:
|
623
965
|
self.span_attributes_dict[name] = SpanAttributes(name)
|
624
966
|
if tags:
|
@@ -643,22 +985,23 @@ class LLMTracerMixin:
|
|
643
985
|
logger.error(f"Validation Error: {e}")
|
644
986
|
except Exception as e:
|
645
987
|
logger.error(f"Error adding metric: {e}")
|
646
|
-
|
988
|
+
|
647
989
|
if feedback:
|
648
990
|
self.span(name).add_feedback(feedback)
|
649
991
|
|
650
992
|
self.current_llm_call_name.set(name)
|
651
993
|
|
652
994
|
def decorator(func):
|
653
|
-
@self.file_tracker.trace_decorator
|
654
995
|
@functools.wraps(func)
|
655
996
|
async def async_wrapper(*args, **kwargs):
|
656
|
-
|
997
|
+
gt = kwargs.get("gt") if kwargs else None
|
998
|
+
if gt is not None:
|
999
|
+
span = self.span(name)
|
1000
|
+
span.add_gt(gt)
|
657
1001
|
self.current_llm_call_name.set(name)
|
658
1002
|
if not self.is_active:
|
659
1003
|
return await func(*args, **kwargs)
|
660
1004
|
|
661
|
-
hash_id = generate_unique_hash_simple(func)
|
662
1005
|
component_id = str(uuid.uuid4())
|
663
1006
|
parent_agent_id = self.current_agent_id.get()
|
664
1007
|
self.start_component(component_id)
|
@@ -670,14 +1013,34 @@ class LLMTracerMixin:
|
|
670
1013
|
result = await func(*args, **kwargs)
|
671
1014
|
return result
|
672
1015
|
except Exception as e:
|
673
|
-
|
674
|
-
"
|
675
|
-
|
676
|
-
|
677
|
-
|
678
|
-
"timestamp": datetime.now().astimezone().isoformat(),
|
679
|
-
}
|
1016
|
+
error_component = {
|
1017
|
+
"type": type(e).__name__,
|
1018
|
+
"message": str(e),
|
1019
|
+
"traceback": traceback.format_exc(),
|
1020
|
+
"timestamp": datetime.now().astimezone().isoformat(),
|
680
1021
|
}
|
1022
|
+
|
1023
|
+
# End tracking network calls for this component
|
1024
|
+
self.end_component(component_id)
|
1025
|
+
|
1026
|
+
end_memory = psutil.Process().memory_info().rss
|
1027
|
+
memory_used = max(0, end_memory - start_memory)
|
1028
|
+
|
1029
|
+
llm_component = self.create_llm_component(
|
1030
|
+
component_id=component_id,
|
1031
|
+
hash_id=generate_unique_hash(func, args, kwargs),
|
1032
|
+
name=name,
|
1033
|
+
llm_type="unknown",
|
1034
|
+
version=None,
|
1035
|
+
memory_used=memory_used,
|
1036
|
+
start_time=start_time,
|
1037
|
+
input_data=extract_input_data(args, kwargs, None),
|
1038
|
+
output_data=None,
|
1039
|
+
error=error_component,
|
1040
|
+
)
|
1041
|
+
self.llm_data = llm_component
|
1042
|
+
self.add_component(llm_component, is_error=True)
|
1043
|
+
|
681
1044
|
raise
|
682
1045
|
finally:
|
683
1046
|
|
@@ -685,12 +1048,31 @@ class LLMTracerMixin:
|
|
685
1048
|
if (name is not None) or (name != ""):
|
686
1049
|
llm_component["name"] = name
|
687
1050
|
|
688
|
-
if self.
|
689
|
-
|
1051
|
+
if name in self.span_attributes_dict:
|
1052
|
+
span_gt = self.span_attributes_dict[name].gt
|
1053
|
+
if span_gt is not None:
|
1054
|
+
llm_component["data"]["gt"] = span_gt
|
1055
|
+
span_context = self.span_attributes_dict[name].context
|
1056
|
+
if span_context:
|
1057
|
+
llm_component["data"]["context"] = span_context
|
690
1058
|
|
691
1059
|
if error_info:
|
692
1060
|
llm_component["error"] = error_info["error"]
|
693
1061
|
|
1062
|
+
self.end_component(component_id)
|
1063
|
+
|
1064
|
+
# metrics
|
1065
|
+
metrics = []
|
1066
|
+
if name in self.span_attributes_dict:
|
1067
|
+
raw_metrics = self.span_attributes_dict[name].metrics or []
|
1068
|
+
for metric in raw_metrics:
|
1069
|
+
base_metric_name = metric["name"]
|
1070
|
+
counter = sum(1 for x in self.visited_metrics if x.startswith(base_metric_name))
|
1071
|
+
metric_name = f'{base_metric_name}_{counter}' if counter > 0 else base_metric_name
|
1072
|
+
self.visited_metrics.append(metric_name)
|
1073
|
+
metric["name"] = metric_name
|
1074
|
+
metrics.append(metric)
|
1075
|
+
llm_component["metrics"] = metrics
|
694
1076
|
if parent_agent_id:
|
695
1077
|
children = self.agent_children.get()
|
696
1078
|
children.append(llm_component)
|
@@ -698,22 +1080,21 @@ class LLMTracerMixin:
|
|
698
1080
|
else:
|
699
1081
|
self.add_component(llm_component)
|
700
1082
|
|
701
|
-
self.end_component(component_id)
|
702
1083
|
llm_component["interactions"] = self.component_user_interaction.get(
|
703
1084
|
component_id, []
|
704
1085
|
)
|
705
1086
|
self.add_component(llm_component)
|
706
1087
|
|
707
|
-
@self.file_tracker.trace_decorator
|
708
1088
|
@functools.wraps(func)
|
709
1089
|
def sync_wrapper(*args, **kwargs):
|
710
|
-
|
1090
|
+
gt = kwargs.get("gt") if kwargs else None
|
1091
|
+
if gt is not None:
|
1092
|
+
span = self.span(name)
|
1093
|
+
span.add_gt(gt)
|
711
1094
|
self.current_llm_call_name.set(name)
|
712
1095
|
if not self.is_active:
|
713
1096
|
return func(*args, **kwargs)
|
714
1097
|
|
715
|
-
hash_id = generate_unique_hash_simple(func)
|
716
|
-
|
717
1098
|
component_id = str(uuid.uuid4())
|
718
1099
|
parent_agent_id = self.current_agent_id.get()
|
719
1100
|
self.start_component(component_id)
|
@@ -726,17 +1107,36 @@ class LLMTracerMixin:
|
|
726
1107
|
result = func(*args, **kwargs)
|
727
1108
|
return result
|
728
1109
|
except Exception as e:
|
729
|
-
|
730
|
-
"
|
731
|
-
|
732
|
-
|
733
|
-
|
734
|
-
"timestamp": datetime.now().astimezone().isoformat(),
|
735
|
-
}
|
1110
|
+
error_component = {
|
1111
|
+
"type": type(e).__name__,
|
1112
|
+
"message": str(e),
|
1113
|
+
"traceback": traceback.format_exc(),
|
1114
|
+
"timestamp": datetime.now().astimezone().isoformat(),
|
736
1115
|
}
|
1116
|
+
|
1117
|
+
# End tracking network calls for this component
|
1118
|
+
self.end_component(component_id)
|
1119
|
+
|
1120
|
+
end_memory = psutil.Process().memory_info().rss
|
1121
|
+
memory_used = max(0, end_memory - start_memory)
|
1122
|
+
|
1123
|
+
llm_component = self.create_llm_component(
|
1124
|
+
component_id=component_id,
|
1125
|
+
hash_id=generate_unique_hash(func, args, kwargs),
|
1126
|
+
name=name,
|
1127
|
+
llm_type="unknown",
|
1128
|
+
version=None,
|
1129
|
+
memory_used=memory_used,
|
1130
|
+
start_time=start_time,
|
1131
|
+
input_data=extract_input_data(args, kwargs, None),
|
1132
|
+
output_data=None,
|
1133
|
+
error=error_component,
|
1134
|
+
)
|
1135
|
+
self.llm_data = llm_component
|
1136
|
+
self.add_component(llm_component, is_error=True)
|
1137
|
+
|
737
1138
|
raise
|
738
1139
|
finally:
|
739
|
-
|
740
1140
|
llm_component = self.llm_data
|
741
1141
|
if (name is not None) or (name != ""):
|
742
1142
|
llm_component["name"] = name
|
@@ -744,6 +1144,18 @@ class LLMTracerMixin:
|
|
744
1144
|
if error_info:
|
745
1145
|
llm_component["error"] = error_info["error"]
|
746
1146
|
|
1147
|
+
self.end_component(component_id)
|
1148
|
+
metrics = []
|
1149
|
+
if name in self.span_attributes_dict:
|
1150
|
+
raw_metrics = self.span_attributes_dict[name].metrics or []
|
1151
|
+
for metric in raw_metrics:
|
1152
|
+
base_metric_name = metric["name"]
|
1153
|
+
counter = sum(1 for x in self.visited_metrics if x.startswith(base_metric_name))
|
1154
|
+
metric_name = f'{base_metric_name}_{counter}' if counter > 0 else base_metric_name
|
1155
|
+
self.visited_metrics.append(metric_name)
|
1156
|
+
metric["name"] = metric_name
|
1157
|
+
metrics.append(metric)
|
1158
|
+
llm_component["metrics"] = metrics
|
747
1159
|
if parent_agent_id:
|
748
1160
|
children = self.agent_children.get()
|
749
1161
|
children.append(llm_component)
|
@@ -751,7 +1163,6 @@ class LLMTracerMixin:
|
|
751
1163
|
else:
|
752
1164
|
self.add_component(llm_component)
|
753
1165
|
|
754
|
-
self.end_component(component_id)
|
755
1166
|
llm_component["interactions"] = self.component_user_interaction.get(
|
756
1167
|
component_id, []
|
757
1168
|
)
|