ragaai-catalyst 2.1.5b14__py3-none-any.whl → 2.1.5b15__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -598,27 +598,23 @@ class AgentTracerMixin:
598
598
  component_network_calls[component_id] = []
599
599
  self.component_network_calls.set(component_network_calls)
600
600
 
601
- def _sanitize_input(self, args: tuple, kwargs: dict) -> str:
602
- """Convert input arguments to text format.
603
-
604
- Args:
605
- args: Input arguments tuple
606
- kwargs: Input keyword arguments dict
607
-
608
- Returns:
609
- str: Text representation of the input arguments
610
- """
611
-
612
- def _sanitize_value(value):
613
- if isinstance(value, dict):
614
- return str({k: _sanitize_value(v) for k, v in value.items()})
615
- elif isinstance(value, (list, tuple)):
616
- return str([_sanitize_value(item) for item in value])
617
- return str(value)
618
-
619
- sanitized_args = [_sanitize_value(arg) for arg in args]
620
- sanitized_kwargs = {k: _sanitize_value(v) for k, v in kwargs.items()}
621
- return str({"args": sanitized_args, "kwargs": sanitized_kwargs})
601
+ def _sanitize_input(self, args: tuple, kwargs: dict) -> dict:
602
+ """Sanitize and format input data, including handling of nested lists and dictionaries."""
603
+
604
+ def sanitize_value(value):
605
+ if isinstance(value, (int, float, bool, str)):
606
+ return value
607
+ elif isinstance(value, list):
608
+ return [sanitize_value(item) for item in value]
609
+ elif isinstance(value, dict):
610
+ return {key: sanitize_value(val) for key, val in value.items()}
611
+ else:
612
+ return str(value) # Convert non-standard types to string
613
+
614
+ return {
615
+ "args": [sanitize_value(arg) for arg in args],
616
+ "kwargs": {key: sanitize_value(val) for key, val in kwargs.items()},
617
+ }
622
618
 
623
619
  def _sanitize_output(self, output: Any) -> Any:
624
620
  """Sanitize and format output data"""
@@ -297,15 +297,23 @@ class CustomTracerMixin:
297
297
  """End tracking network calls for a component"""
298
298
  pass
299
299
 
300
- def _sanitize_input(self, args: tuple, kwargs: dict) -> Dict:
301
- """Sanitize and format input data"""
302
- return {
303
- "args": [str(arg) if not isinstance(arg, (int, float, bool, str, list, dict)) else arg for arg in args],
304
- "kwargs": {
305
- k: str(v) if not isinstance(v, (int, float, bool, str, list, dict)) else v
306
- for k, v in kwargs.items()
300
+ def _sanitize_input(self, args: tuple, kwargs: dict) -> dict:
301
+ """Sanitize and format input data, including handling of nested lists and dictionaries."""
302
+
303
+ def sanitize_value(value):
304
+ if isinstance(value, (int, float, bool, str)):
305
+ return value
306
+ elif isinstance(value, list):
307
+ return [sanitize_value(item) for item in value]
308
+ elif isinstance(value, dict):
309
+ return {key: sanitize_value(val) for key, val in value.items()}
310
+ else:
311
+ return str(value) # Convert non-standard types to string
312
+
313
+ return {
314
+ "args": [sanitize_value(arg) for arg in args],
315
+ "kwargs": {key: sanitize_value(val) for key, val in kwargs.items()},
307
316
  }
308
- }
309
317
 
310
318
  def _sanitize_output(self, output: Any) -> Any:
311
319
  """Sanitize and format output data"""
@@ -20,6 +20,7 @@ from ..utils.llm_utils import (
20
20
  sanitize_api_keys,
21
21
  sanitize_input,
22
22
  extract_llm_output,
23
+ num_tokens_from_messages
23
24
  )
24
25
  from ..utils.trace_utils import load_model_costs
25
26
  from ..utils.unique_decorator import generate_unique_hash_simple
@@ -150,6 +151,8 @@ class LLMTracerMixin:
150
151
  beta_module = openai_module.beta
151
152
 
152
153
  # Patch openai.beta.threads
154
+ import openai
155
+ openai.api_type = "openai"
153
156
  if hasattr(beta_module, "threads"):
154
157
  threads_obj = beta_module.threads
155
158
  # Patch top-level methods on openai.beta.threads
@@ -498,7 +501,17 @@ class LLMTracerMixin:
498
501
 
499
502
  # Extract token usage and calculate cost
500
503
  model_name = extract_model_name(args, kwargs, result)
501
- token_usage = extract_token_usage(result)
504
+ if 'stream' in kwargs:
505
+ stream = kwargs['stream']
506
+ if stream:
507
+ prompt_messages = kwargs['messages']
508
+ # Create response message for streaming case
509
+ response_message = {"role": "assistant", "content": result} if result else {"role": "assistant", "content": ""}
510
+ token_usage = num_tokens_from_messages(model_name, prompt_messages, response_message)
511
+ else:
512
+ token_usage = extract_token_usage(result)
513
+ else:
514
+ token_usage = extract_token_usage(result)
502
515
  cost = calculate_llm_cost(token_usage, model_name, self.model_costs)
503
516
  parameters = extract_parameters(kwargs)
504
517
  input_data = extract_input_data(args, kwargs, result)
@@ -595,7 +608,18 @@ class LLMTracerMixin:
595
608
 
596
609
  # Extract token usage and calculate cost
597
610
  model_name = extract_model_name(args, kwargs, result)
598
- token_usage = extract_token_usage(result)
611
+
612
+ if 'stream' in kwargs:
613
+ stream = kwargs['stream']
614
+ if stream:
615
+ prompt_messages = kwargs['messages']
616
+ # Create response message for streaming case
617
+ response_message = {"role": "assistant", "content": result} if result else {"role": "assistant", "content": ""}
618
+ token_usage = num_tokens_from_messages(model_name, prompt_messages, response_message)
619
+ else:
620
+ token_usage = extract_token_usage(result)
621
+ else:
622
+ token_usage = extract_token_usage(result)
599
623
  cost = calculate_llm_cost(token_usage, model_name, self.model_costs)
600
624
  parameters = extract_parameters(kwargs)
601
625
  input_data = extract_input_data(args, kwargs, result)
@@ -741,7 +765,20 @@ class LLMTracerMixin:
741
765
 
742
766
  if error_info:
743
767
  llm_component["error"] = error_info["error"]
744
-
768
+
769
+ self.end_component(component_id)
770
+ # metrics
771
+ metrics = []
772
+ if name in self.span_attributes_dict:
773
+ raw_metrics = self.span_attributes_dict[name].metrics or []
774
+ for metric in raw_metrics:
775
+ base_metric_name = metric["name"]
776
+ counter = sum(1 for x in self.visited_metrics if x.startswith(base_metric_name))
777
+ metric_name = f'{base_metric_name}_{counter}' if counter > 0 else base_metric_name
778
+ self.visited_metrics.append(metric_name)
779
+ metric["name"] = metric_name
780
+ metrics.append(metric)
781
+ llm_component["metrics"] = metrics
745
782
  if parent_agent_id:
746
783
  children = self.agent_children.get()
747
784
  children.append(llm_component)
@@ -749,7 +786,6 @@ class LLMTracerMixin:
749
786
  else:
750
787
  self.add_component(llm_component)
751
788
 
752
- self.end_component(component_id)
753
789
  llm_component["interactions"] = self.component_user_interaction.get(
754
790
  component_id, []
755
791
  )
@@ -787,7 +823,6 @@ class LLMTracerMixin:
787
823
  }
788
824
  raise
789
825
  finally:
790
-
791
826
  llm_component = self.llm_data
792
827
  if (name is not None) or (name != ""):
793
828
  llm_component["name"] = name
@@ -795,6 +830,18 @@ class LLMTracerMixin:
795
830
  if error_info:
796
831
  llm_component["error"] = error_info["error"]
797
832
 
833
+ self.end_component(component_id)
834
+ metrics = []
835
+ if name in self.span_attributes_dict:
836
+ raw_metrics = self.span_attributes_dict[name].metrics or []
837
+ for metric in raw_metrics:
838
+ base_metric_name = metric["name"]
839
+ counter = sum(1 for x in self.visited_metrics if x.startswith(base_metric_name))
840
+ metric_name = f'{base_metric_name}_{counter}' if counter > 0 else base_metric_name
841
+ self.visited_metrics.append(metric_name)
842
+ metric["name"] = metric_name
843
+ metrics.append(metric)
844
+ llm_component["metrics"] = metrics
798
845
  if parent_agent_id:
799
846
  children = self.agent_children.get()
800
847
  children.append(llm_component)
@@ -802,7 +849,6 @@ class LLMTracerMixin:
802
849
  else:
803
850
  self.add_component(llm_component)
804
851
 
805
- self.end_component(component_id)
806
852
  llm_component["interactions"] = self.component_user_interaction.get(
807
853
  component_id, []
808
854
  )
@@ -6,6 +6,7 @@ import uuid
6
6
  import os
7
7
  import builtins
8
8
  from pathlib import Path
9
+ import logging
9
10
 
10
11
  from .base import BaseTracer
11
12
  from .llm_tracer import LLMTracerMixin
@@ -57,10 +58,12 @@ class AgenticTracing(
57
58
 
58
59
  self.project_name = user_detail["project_name"]
59
60
  self.project_id = user_detail["project_id"]
60
- # self.dataset_name = user_detail["dataset_name"]
61
61
  self.trace_user_detail = user_detail["trace_user_detail"]
62
62
  self.base_url = f"{RagaAICatalyst.BASE_URL}"
63
63
  self.timeout = 10
64
+
65
+ # Add warning flag
66
+ self._warning_shown = False
64
67
 
65
68
  BaseTracer.__init__(self, user_detail)
66
69
 
@@ -174,18 +177,6 @@ class AgenticTracing(
174
177
  self.network_tracer.activate_patches()
175
178
 
176
179
  # take care of the auto instrumentation
177
- if self.auto_instrument_llm:
178
- self.instrument_llm_calls()
179
-
180
- if self.auto_instrument_tool:
181
- self.instrument_tool_calls()
182
-
183
- if self.auto_instrument_agent:
184
- self.instrument_agent_calls()
185
-
186
- if self.auto_instrument_custom:
187
- self.instrument_custom_calls()
188
-
189
180
  if self.auto_instrument_user_interaction:
190
181
  ToolTracerMixin.instrument_user_interaction_calls(self)
191
182
  LLMTracerMixin.instrument_user_interaction_calls(self)
@@ -206,6 +197,18 @@ class AgenticTracing(
206
197
  AgentTracerMixin.instrument_file_io_calls(self)
207
198
  CustomTracerMixin.instrument_file_io_calls(self)
208
199
  builtins.open = self.user_interaction_tracer.traced_open
200
+
201
+ if self.auto_instrument_llm:
202
+ self.instrument_llm_calls()
203
+
204
+ if self.auto_instrument_tool:
205
+ self.instrument_tool_calls()
206
+
207
+ if self.auto_instrument_agent:
208
+ self.instrument_agent_calls()
209
+
210
+ if self.auto_instrument_custom:
211
+ self.instrument_custom_calls()
209
212
 
210
213
  def stop(self):
211
214
  """Stop tracing and save results"""
@@ -310,6 +313,47 @@ class AgenticTracing(
310
313
  ]
311
314
  }
312
315
 
316
+ if component_data == None or component_data == {} or component_data.get("type", None) == None:
317
+ # Only show warning if it hasn't been shown before
318
+ if not self._warning_shown:
319
+ import toml
320
+ import os
321
+ from pathlib import Path
322
+
323
+ # Load supported LLM calls from TOML file
324
+ current_dir = Path(__file__).parent
325
+ toml_path = current_dir / "../utils/supported_llm_provider.toml"
326
+ try:
327
+ with open(toml_path, "r") as f:
328
+ config = toml.load(f)
329
+ supported_calls = ", ".join(config["supported_llm_calls"])
330
+ except Exception as e:
331
+ supported_calls = "Error loading supported LLM calls"
332
+
333
+ # ANSI escape codes for colors and formatting
334
+ RED = "\033[91m"
335
+ BOLD = "\033[1m"
336
+ RESET = "\033[0m"
337
+ BIG = "\033[1;2m" # Makes text slightly larger in supported terminals
338
+
339
+ warning_msg = f"""{RED}{BOLD}{BIG}
340
+ ╔════════════════════════ COMPONENT DATA INCOMPLETE ════════════════════════╗
341
+ ║ ║
342
+ ║ Please ensure these requirements: ║
343
+ ║ ✗ trace_llm decorator must have a stand alone llm call ║
344
+ ║ ✗ trace_tool decorator must be a stand alone tool/function call ║
345
+ ║ ✗ trace_agent decorator can have multiple/nested llm/tool/agent calls ║
346
+ ║ ║
347
+ ║ Supported LLM calls: ║
348
+ ║ {supported_calls} ║
349
+ ║ ║
350
+ ╚══════════════════════════════════════════════════════════════════════════╝
351
+ {RESET}"""
352
+ # Use logger.warning for the message
353
+ logging.warning(warning_msg)
354
+ self._warning_shown = True
355
+ return
356
+
313
357
  if component_data["type"] == "llm":
314
358
  component = LLMComponent(**filtered_data)
315
359
  elif component_data["type"] == "agent":
@@ -1,6 +1,7 @@
1
1
  import os
2
2
  import uuid
3
3
  from datetime import datetime
4
+ from langchain_core.tools import tool
4
5
  import psutil
5
6
  import functools
6
7
  from typing import Optional, Any, Dict, List
@@ -10,6 +11,8 @@ import asyncio
10
11
  from ..utils.file_name_tracker import TrackName
11
12
  from ..utils.span_attributes import SpanAttributes
12
13
  import logging
14
+ import wrapt
15
+ import time
13
16
 
14
17
  logger = logging.getLogger(__name__)
15
18
  logging_level = (
@@ -34,10 +37,122 @@ class ToolTracerMixin:
34
37
  self.auto_instrument_user_interaction = False
35
38
  self.auto_instrument_file_io = False
36
39
  self.auto_instrument_network = False
40
+ self._instrumented_tools = set() # Track which tools we've instrumented
37
41
 
38
42
  # take care of auto_instrument
39
43
  def instrument_tool_calls(self):
44
+ """Enable tool instrumentation"""
40
45
  self.auto_instrument_tool = True
46
+
47
+ # Handle modules that are already imported
48
+ import sys
49
+
50
+ if "langchain_community.tools" in sys.modules:
51
+ self.patch_langchain_community_tools(sys.modules["langchain_community.tools"])
52
+
53
+ if "langchain.tools" in sys.modules:
54
+ self.patch_langchain_community_tools(sys.modules["langchain.tools"])
55
+
56
+ if "langchain_core.tools" in sys.modules:
57
+ self.patch_langchain_core_tools(sys.modules["langchain_core.tools"])
58
+
59
+ # Register hooks for future imports
60
+ wrapt.register_post_import_hook(
61
+ self.patch_langchain_community_tools, "langchain_community.tools"
62
+ )
63
+ wrapt.register_post_import_hook(
64
+ self.patch_langchain_community_tools, "langchain.tools"
65
+ )
66
+
67
+ wrapt.register_post_import_hook(
68
+ self.patch_langchain_core_tools, "langchain_core.tools"
69
+ )
70
+
71
+ def patch_langchain_core_tools(self, module):
72
+ """Patch langchain core tools by wrapping @tool decorated functions"""
73
+ from langchain_core.tools import BaseTool, StructuredTool, Tool
74
+
75
+ # Patch the tool decorator
76
+ original_tool = module.tool
77
+
78
+ def wrapped_tool(*args, **kwargs):
79
+ # Get the original decorated function
80
+ decorated = original_tool(*args, **kwargs)
81
+
82
+ def wrapper(func):
83
+ tool_instance = decorated(func)
84
+ # Wrap the tool's run/arun methods
85
+ if hasattr(tool_instance, 'run'):
86
+ self.wrap_tool_method(tool_instance.__class__, 'run')
87
+ if hasattr(tool_instance, 'arun'):
88
+ self.wrap_tool_method(tool_instance.__class__, 'arun')
89
+ if hasattr(tool_instance, 'invoke'):
90
+ self.wrap_tool_method(tool_instance.__class__, 'invoke')
91
+ if hasattr(tool_instance, 'ainvoke'):
92
+ self.wrap_tool_method(tool_instance.__class__, 'ainvoke')
93
+ return tool_instance
94
+
95
+ return wrapper
96
+
97
+ # Replace the original decorator
98
+ module.tool = wrapped_tool
99
+
100
+ # Patch base tool classes
101
+ for tool_class in [BaseTool, StructuredTool, Tool]:
102
+ if tool_class in self._instrumented_tools:
103
+ continue
104
+ if hasattr(tool_class, 'run'):
105
+ self.wrap_tool_method(tool_class, f'{tool_class.__name__}.run')
106
+ if hasattr(tool_class, 'arun'):
107
+ self.wrap_tool_method(tool_class, f'{tool_class.__name__}.arun')
108
+ if hasattr(tool_class, 'invoke'):
109
+ self.wrap_tool_method(tool_class, f'{tool_class.__name__}.invoke')
110
+ if hasattr(tool_class, 'ainvoke'):
111
+ self.wrap_tool_method(tool_class, f'{tool_class.__name__}.ainvoke')
112
+ self._instrumented_tools.add(tool_class)
113
+
114
+ def patch_langchain_community_tools(self, module):
115
+ """Patch langchain-community tool methods"""
116
+ for directory in dir(module):
117
+ dir_class = getattr(module, directory)
118
+ tools = getattr(dir_class, "__all__", None)
119
+ if tools is None:
120
+ continue
121
+ for tool in tools:
122
+ tool_class = getattr(dir_class, tool)
123
+ # Skip if already instrumented
124
+ if tool_class in self._instrumented_tools:
125
+ continue
126
+
127
+ # Prefer invoke/ainvoke over run/arun
128
+ if hasattr(tool_class, "invoke"):
129
+ self.wrap_tool_method(tool_class, f"{tool}.invoke")
130
+ elif hasattr(tool_class, "run"): # Only wrap run if invoke doesn't exist
131
+ self.wrap_tool_method(tool_class, f"{tool}.run")
132
+
133
+ if hasattr(tool_class, "ainvoke"):
134
+ self.wrap_tool_method(tool_class, f"{tool}.ainvoke")
135
+ elif hasattr(tool_class, "arun"): # Only wrap arun if ainvoke doesn't exist
136
+ self.wrap_tool_method(tool_class, f"{tool}.arun")
137
+
138
+ self._instrumented_tools.add(tool_class)
139
+
140
+ def wrap_tool_method(self, obj, method_name):
141
+ """Wrap a method with tracing functionality"""
142
+ method_name = method_name.split(".")[-1]
143
+ tool_name = obj.__name__.split(".")[0]
144
+ original_method = getattr(obj, method_name)
145
+
146
+ @functools.wraps(original_method)
147
+ def wrapper(*args, **kwargs):
148
+ name = tool_name
149
+ tool_type = "langchain"
150
+ version = None
151
+ if asyncio.iscoroutinefunction(original_method):
152
+ return self._trace_tool_execution(original_method, name, tool_type, version, *args, **kwargs)
153
+ return self._trace_sync_tool_execution(original_method, name, tool_type, version, *args, **kwargs)
154
+
155
+ setattr(obj, method_name, wrapper)
41
156
 
42
157
  def instrument_user_interaction_calls(self):
43
158
  self.auto_instrument_user_interaction = True
@@ -362,25 +477,22 @@ class ToolTracerMixin:
362
477
  def end_component(self, component_id):
363
478
  pass
364
479
 
365
- def _sanitize_input(self, args: tuple, kwargs: dict) -> Dict:
366
- """Sanitize and format input data"""
480
+ def _sanitize_input(self, args: tuple, kwargs: dict) -> dict:
481
+ """Sanitize and format input data, including handling of nested lists and dictionaries."""
482
+
483
+ def sanitize_value(value):
484
+ if isinstance(value, (int, float, bool, str)):
485
+ return value
486
+ elif isinstance(value, list):
487
+ return [sanitize_value(item) for item in value]
488
+ elif isinstance(value, dict):
489
+ return {key: sanitize_value(val) for key, val in value.items()}
490
+ else:
491
+ return str(value) # Convert non-standard types to string
492
+
367
493
  return {
368
- "args": [
369
- (
370
- str(arg)
371
- if not isinstance(arg, (int, float, bool, str, list, dict))
372
- else arg
373
- )
374
- for arg in args
375
- ],
376
- "kwargs": {
377
- k: (
378
- str(v)
379
- if not isinstance(v, (int, float, bool, str, list, dict))
380
- else v
381
- )
382
- for k, v in kwargs.items()
383
- },
494
+ "args": [sanitize_value(arg) for arg in args],
495
+ "kwargs": {key: sanitize_value(val) for key, val in kwargs.items()},
384
496
  }
385
497
 
386
498
  def _sanitize_output(self, output: Any) -> Any:
@@ -9,7 +9,10 @@ import json
9
9
  import os
10
10
  import asyncio
11
11
  import psutil
12
+ import tiktoken
13
+ import logging
12
14
 
15
+ logger = logging.getLogger(__name__)
13
16
 
14
17
  def extract_model_name(args, kwargs, result):
15
18
  """Extract model name from kwargs or result"""
@@ -173,6 +176,96 @@ def extract_token_usage(result):
173
176
  "total_tokens": 0
174
177
  }
175
178
 
179
+ def num_tokens_from_messages(model="gpt-4o-mini-2024-07-18", prompt_messages=None, response_message=None):
180
+ """Calculate the number of tokens used by messages.
181
+
182
+ Args:
183
+ messages: Optional list of messages (deprecated, use prompt_messages and response_message instead)
184
+ model: The model name to use for token calculation
185
+ prompt_messages: List of prompt messages
186
+ response_message: Response message from the assistant
187
+
188
+ Returns:
189
+ dict: A dictionary containing:
190
+ - prompt_tokens: Number of tokens in the prompt
191
+ - completion_tokens: Number of tokens in the completion
192
+ - total_tokens: Total number of tokens
193
+ """
194
+ try:
195
+ encoding = tiktoken.encoding_for_model(model)
196
+ except KeyError:
197
+ logging.warning("Warning: model not found. Using o200k_base encoding.")
198
+ encoding = tiktoken.get_encoding("o200k_base")
199
+
200
+ if model in {
201
+ "gpt-3.5-turbo-0125",
202
+ "gpt-4-0314",
203
+ "gpt-4-32k-0314",
204
+ "gpt-4-0613",
205
+ "gpt-4-32k-0613",
206
+ "gpt-4o-mini-2024-07-18",
207
+ "gpt-4o-2024-08-06"
208
+ }:
209
+ tokens_per_message = 3
210
+ tokens_per_name = 1
211
+ elif "gpt-3.5-turbo" in model:
212
+ logging.warning("Warning: gpt-3.5-turbo may update over time. Returning num tokens assuming gpt-3.5-turbo-0125.")
213
+ return num_tokens_from_messages(model="gpt-3.5-turbo-0125",
214
+ prompt_messages=prompt_messages, response_message=response_message)
215
+ elif "gpt-4o-mini" in model:
216
+ logging.warning("Warning: gpt-4o-mini may update over time. Returning num tokens assuming gpt-4o-mini-2024-07-18.")
217
+ return num_tokens_from_messages(model="gpt-4o-mini-2024-07-18",
218
+ prompt_messages=prompt_messages, response_message=response_message)
219
+ elif "gpt-4o" in model:
220
+ logging.warning("Warning: gpt-4o and gpt-4o-mini may update over time. Returning num tokens assuming gpt-4o-2024-08-06.")
221
+ return num_tokens_from_messages(model="gpt-4o-2024-08-06",
222
+ prompt_messages=prompt_messages, response_message=response_message)
223
+ elif "gpt-4" in model:
224
+ logging.warning("Warning: gpt-4 may update over time. Returning num tokens assuming gpt-4-0613.")
225
+ return num_tokens_from_messages(model="gpt-4-0613",
226
+ prompt_messages=prompt_messages, response_message=response_message)
227
+ else:
228
+ raise NotImplementedError(
229
+ f"""num_tokens_from_messages() is not implemented for model {model}."""
230
+ )
231
+
232
+ all_messages = []
233
+ if prompt_messages:
234
+ all_messages.extend(prompt_messages)
235
+ if response_message:
236
+ if isinstance(response_message, dict):
237
+ all_messages.append(response_message)
238
+ else:
239
+ all_messages.append({"role": "assistant", "content": response_message})
240
+
241
+ prompt_tokens = 0
242
+ completion_tokens = 0
243
+
244
+ for message in all_messages:
245
+ num_tokens = tokens_per_message
246
+ for key, value in message.items():
247
+ token_count = len(encoding.encode(str(value))) # Convert value to string for safety
248
+ num_tokens += token_count
249
+ if key == "name":
250
+ num_tokens += tokens_per_name
251
+
252
+ # Add tokens to prompt or completion based on role
253
+ if message.get("role") == "assistant":
254
+ completion_tokens += num_tokens
255
+ else:
256
+ prompt_tokens += num_tokens
257
+
258
+ # Add the assistant message prefix tokens to completion tokens if we have a response
259
+ if completion_tokens > 0:
260
+ completion_tokens += 3 # <|start|>assistant<|message|>
261
+
262
+ total_tokens = prompt_tokens + completion_tokens
263
+
264
+ return {
265
+ "prompt_tokens": prompt_tokens,
266
+ "completion_tokens": completion_tokens,
267
+ "total_tokens": total_tokens
268
+ }
176
269
 
177
270
  def extract_input_data(args, kwargs, result):
178
271
  """Extract input data from function call"""
@@ -14,8 +14,7 @@
14
14
  "supports_audio_output": true,
15
15
  "supports_prompt_caching": true,
16
16
  "supports_response_schema": true,
17
- "supports_system_messages": true,
18
- "deprecation_date": "date when the model becomes deprecated in the format YYYY-MM-DD"
17
+ "supports_system_messages": true
19
18
  },
20
19
  "omni-moderation-latest": {
21
20
  "max_tokens": 32768,
@@ -442,8 +441,7 @@
442
441
  "mode": "chat",
443
442
  "supports_function_calling": true,
444
443
  "supports_prompt_caching": true,
445
- "supports_system_messages": true,
446
- "deprecation_date": "2025-06-06"
444
+ "supports_system_messages": true
447
445
  },
448
446
  "gpt-4-32k": {
449
447
  "max_tokens": 4096,
@@ -542,8 +540,7 @@
542
540
  "mode": "chat",
543
541
  "supports_vision": true,
544
542
  "supports_prompt_caching": true,
545
- "supports_system_messages": true,
546
- "deprecation_date": "2024-12-06"
543
+ "supports_system_messages": true
547
544
  },
548
545
  "gpt-4-1106-vision-preview": {
549
546
  "max_tokens": 4096,
@@ -555,8 +552,7 @@
555
552
  "mode": "chat",
556
553
  "supports_vision": true,
557
554
  "supports_prompt_caching": true,
558
- "supports_system_messages": true,
559
- "deprecation_date": "2024-12-06"
555
+ "supports_system_messages": true
560
556
  },
561
557
  "gpt-3.5-turbo": {
562
558
  "max_tokens": 4097,
@@ -1227,8 +1223,7 @@
1227
1223
  "litellm_provider": "azure",
1228
1224
  "mode": "chat",
1229
1225
  "supports_function_calling": true,
1230
- "supports_parallel_function_calling": true,
1231
- "deprecation_date": "2025-03-31"
1226
+ "supports_parallel_function_calling": true
1232
1227
  },
1233
1228
  "azure/gpt-35-turbo-0613": {
1234
1229
  "max_tokens": 4097,
@@ -1239,8 +1234,7 @@
1239
1234
  "litellm_provider": "azure",
1240
1235
  "mode": "chat",
1241
1236
  "supports_function_calling": true,
1242
- "supports_parallel_function_calling": true,
1243
- "deprecation_date": "2025-02-13"
1237
+ "supports_parallel_function_calling": true
1244
1238
  },
1245
1239
  "azure/gpt-35-turbo-0301": {
1246
1240
  "max_tokens": 4097,
@@ -1251,8 +1245,7 @@
1251
1245
  "litellm_provider": "azure",
1252
1246
  "mode": "chat",
1253
1247
  "supports_function_calling": true,
1254
- "supports_parallel_function_calling": true,
1255
- "deprecation_date": "2025-02-13"
1248
+ "supports_parallel_function_calling": true
1256
1249
  },
1257
1250
  "azure/gpt-35-turbo-0125": {
1258
1251
  "max_tokens": 4096,
@@ -1263,8 +1256,7 @@
1263
1256
  "litellm_provider": "azure",
1264
1257
  "mode": "chat",
1265
1258
  "supports_function_calling": true,
1266
- "supports_parallel_function_calling": true,
1267
- "deprecation_date": "2025-03-31"
1259
+ "supports_parallel_function_calling": true
1268
1260
  },
1269
1261
  "azure/gpt-35-turbo-16k": {
1270
1262
  "max_tokens": 4096,
@@ -2050,84 +2042,6 @@
2050
2042
  "supports_function_calling": true,
2051
2043
  "supports_vision": true
2052
2044
  },
2053
- "xai/grok-2-vision-1212": {
2054
- "max_tokens": 32768,
2055
- "max_input_tokens": 32768,
2056
- "max_output_tokens": 32768,
2057
- "input_cost_per_token": 2e-06,
2058
- "input_cost_per_image": 2e-06,
2059
- "output_cost_per_token": 1e-05,
2060
- "litellm_provider": "xai",
2061
- "mode": "chat",
2062
- "supports_function_calling": true,
2063
- "supports_vision": true
2064
- },
2065
- "xai/grok-2-vision-latest": {
2066
- "max_tokens": 32768,
2067
- "max_input_tokens": 32768,
2068
- "max_output_tokens": 32768,
2069
- "input_cost_per_token": 2e-06,
2070
- "input_cost_per_image": 2e-06,
2071
- "output_cost_per_token": 1e-05,
2072
- "litellm_provider": "xai",
2073
- "mode": "chat",
2074
- "supports_function_calling": true,
2075
- "supports_vision": true
2076
- },
2077
- "xai/grok-2-vision": {
2078
- "max_tokens": 32768,
2079
- "max_input_tokens": 32768,
2080
- "max_output_tokens": 32768,
2081
- "input_cost_per_token": 2e-06,
2082
- "input_cost_per_image": 2e-06,
2083
- "output_cost_per_token": 1e-05,
2084
- "litellm_provider": "xai",
2085
- "mode": "chat",
2086
- "supports_function_calling": true,
2087
- "supports_vision": true
2088
- },
2089
- "xai/grok-vision-beta": {
2090
- "max_tokens": 8192,
2091
- "max_input_tokens": 8192,
2092
- "max_output_tokens": 8192,
2093
- "input_cost_per_token": 5e-06,
2094
- "input_cost_per_image": 5e-06,
2095
- "output_cost_per_token": 1.5e-05,
2096
- "litellm_provider": "xai",
2097
- "mode": "chat",
2098
- "supports_function_calling": true,
2099
- "supports_vision": true
2100
- },
2101
- "xai/grok-2-1212": {
2102
- "max_tokens": 131072,
2103
- "max_input_tokens": 131072,
2104
- "max_output_tokens": 131072,
2105
- "input_cost_per_token": 2e-06,
2106
- "output_cost_per_token": 1e-05,
2107
- "litellm_provider": "xai",
2108
- "mode": "chat",
2109
- "supports_function_calling": true
2110
- },
2111
- "xai/grok-2": {
2112
- "max_tokens": 131072,
2113
- "max_input_tokens": 131072,
2114
- "max_output_tokens": 131072,
2115
- "input_cost_per_token": 2e-06,
2116
- "output_cost_per_token": 1e-05,
2117
- "litellm_provider": "xai",
2118
- "mode": "chat",
2119
- "supports_function_calling": true
2120
- },
2121
- "xai/grok-2-latest": {
2122
- "max_tokens": 131072,
2123
- "max_input_tokens": 131072,
2124
- "max_output_tokens": 131072,
2125
- "input_cost_per_token": 2e-06,
2126
- "output_cost_per_token": 1e-05,
2127
- "litellm_provider": "xai",
2128
- "mode": "chat",
2129
- "supports_function_calling": true
2130
- },
2131
2045
  "deepseek/deepseek-coder": {
2132
2046
  "max_tokens": 4096,
2133
2047
  "max_input_tokens": 128000,
@@ -2439,8 +2353,7 @@
2439
2353
  "tool_use_system_prompt_tokens": 264,
2440
2354
  "supports_assistant_prefill": true,
2441
2355
  "supports_prompt_caching": true,
2442
- "supports_response_schema": true,
2443
- "deprecation_date": "2025-03-01"
2356
+ "supports_response_schema": true
2444
2357
  },
2445
2358
  "claude-3-5-haiku-20241022": {
2446
2359
  "max_tokens": 8192,
@@ -2456,8 +2369,7 @@
2456
2369
  "tool_use_system_prompt_tokens": 264,
2457
2370
  "supports_assistant_prefill": true,
2458
2371
  "supports_prompt_caching": true,
2459
- "supports_response_schema": true,
2460
- "deprecation_date": "2025-10-01"
2372
+ "supports_response_schema": true
2461
2373
  },
2462
2374
  "claude-3-opus-20240229": {
2463
2375
  "max_tokens": 4096,
@@ -2474,8 +2386,7 @@
2474
2386
  "tool_use_system_prompt_tokens": 395,
2475
2387
  "supports_assistant_prefill": true,
2476
2388
  "supports_prompt_caching": true,
2477
- "supports_response_schema": true,
2478
- "deprecation_date": "2025-03-01"
2389
+ "supports_response_schema": true
2479
2390
  },
2480
2391
  "claude-3-sonnet-20240229": {
2481
2392
  "max_tokens": 4096,
@@ -2490,8 +2401,7 @@
2490
2401
  "tool_use_system_prompt_tokens": 159,
2491
2402
  "supports_assistant_prefill": true,
2492
2403
  "supports_prompt_caching": true,
2493
- "supports_response_schema": true,
2494
- "deprecation_date": "2025-07-21"
2404
+ "supports_response_schema": true
2495
2405
  },
2496
2406
  "claude-3-5-sonnet-20240620": {
2497
2407
  "max_tokens": 8192,
@@ -2508,8 +2418,7 @@
2508
2418
  "tool_use_system_prompt_tokens": 159,
2509
2419
  "supports_assistant_prefill": true,
2510
2420
  "supports_prompt_caching": true,
2511
- "supports_response_schema": true,
2512
- "deprecation_date": "2025-06-01"
2421
+ "supports_response_schema": true
2513
2422
  },
2514
2423
  "claude-3-5-sonnet-20241022": {
2515
2424
  "max_tokens": 8192,
@@ -2527,8 +2436,7 @@
2527
2436
  "supports_assistant_prefill": true,
2528
2437
  "supports_pdf_input": true,
2529
2438
  "supports_prompt_caching": true,
2530
- "supports_response_schema": true,
2531
- "deprecation_date": "2025-10-01"
2439
+ "supports_response_schema": true
2532
2440
  },
2533
2441
  "text-bison": {
2534
2442
  "max_tokens": 2048,
@@ -2638,8 +2546,7 @@
2638
2546
  "output_cost_per_character": 5e-07,
2639
2547
  "litellm_provider": "vertex_ai-chat-models",
2640
2548
  "mode": "chat",
2641
- "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#foundation_models",
2642
- "deprecation_date": "2025-04-09"
2549
+ "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#foundation_models"
2643
2550
  },
2644
2551
  "chat-bison-32k": {
2645
2552
  "max_tokens": 8192,
@@ -2880,8 +2787,7 @@
2880
2787
  "litellm_provider": "vertex_ai-language-models",
2881
2788
  "mode": "chat",
2882
2789
  "supports_function_calling": true,
2883
- "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#foundation_models",
2884
- "deprecation_date": "2025-04-09"
2790
+ "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#foundation_models"
2885
2791
  },
2886
2792
  "gemini-1.0-ultra": {
2887
2793
  "max_tokens": 8192,
@@ -2926,8 +2832,7 @@
2926
2832
  "litellm_provider": "vertex_ai-language-models",
2927
2833
  "mode": "chat",
2928
2834
  "supports_function_calling": true,
2929
- "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#foundation_models",
2930
- "deprecation_date": "2025-04-09"
2835
+ "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#foundation_models"
2931
2836
  },
2932
2837
  "gemini-1.5-pro": {
2933
2838
  "max_tokens": 8192,
@@ -3009,8 +2914,7 @@
3009
2914
  "supports_function_calling": true,
3010
2915
  "supports_tool_choice": true,
3011
2916
  "supports_response_schema": true,
3012
- "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#foundation_models",
3013
- "deprecation_date": "2025-05-24"
2917
+ "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#foundation_models"
3014
2918
  },
3015
2919
  "gemini-1.5-pro-preview-0514": {
3016
2920
  "max_tokens": 8192,
@@ -3215,8 +3119,7 @@
3215
3119
  "supports_function_calling": true,
3216
3120
  "supports_vision": true,
3217
3121
  "supports_response_schema": true,
3218
- "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#foundation_models",
3219
- "deprecation_date": "2025-05-24"
3122
+ "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#foundation_models"
3220
3123
  },
3221
3124
  "gemini-1.5-flash-preview-0514": {
3222
3125
  "max_tokens": 8192,
@@ -3320,8 +3223,7 @@
3320
3223
  "mode": "chat",
3321
3224
  "supports_function_calling": true,
3322
3225
  "supports_vision": true,
3323
- "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#foundation_models",
3324
- "deprecation_date": "2025-04-09"
3226
+ "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#foundation_models"
3325
3227
  },
3326
3228
  "medlm-medium": {
3327
3229
  "max_tokens": 8192,
@@ -4027,8 +3929,7 @@
4027
3929
  "supports_prompt_caching": true,
4028
3930
  "tpm": 4000000,
4029
3931
  "rpm": 2000,
4030
- "source": "https://ai.google.dev/pricing",
4031
- "deprecation_date": "2025-05-24"
3932
+ "source": "https://ai.google.dev/pricing"
4032
3933
  },
4033
3934
  "gemini/gemini-1.5-flash": {
4034
3935
  "max_tokens": 8192,
@@ -4304,8 +4205,7 @@
4304
4205
  "supports_prompt_caching": true,
4305
4206
  "tpm": 4000000,
4306
4207
  "rpm": 1000,
4307
- "source": "https://ai.google.dev/pricing",
4308
- "deprecation_date": "2025-05-24"
4208
+ "source": "https://ai.google.dev/pricing"
4309
4209
  },
4310
4210
  "gemini/gemini-1.5-pro-exp-0801": {
4311
4211
  "max_tokens": 8192,
@@ -5323,24 +5223,6 @@
5323
5223
  "mode": "chat",
5324
5224
  "supports_system_messages": true
5325
5225
  },
5326
- "ai21.jamba-1-5-large-v1:0": {
5327
- "max_tokens": 256000,
5328
- "max_input_tokens": 256000,
5329
- "max_output_tokens": 256000,
5330
- "input_cost_per_token": 2e-06,
5331
- "output_cost_per_token": 8e-06,
5332
- "litellm_provider": "bedrock",
5333
- "mode": "chat"
5334
- },
5335
- "ai21.jamba-1-5-mini-v1:0": {
5336
- "max_tokens": 256000,
5337
- "max_input_tokens": 256000,
5338
- "max_output_tokens": 256000,
5339
- "input_cost_per_token": 2e-07,
5340
- "output_cost_per_token": 4e-07,
5341
- "litellm_provider": "bedrock",
5342
- "mode": "chat"
5343
- },
5344
5226
  "amazon.titan-text-lite-v1": {
5345
5227
  "max_tokens": 4000,
5346
5228
  "max_input_tokens": 42000,
@@ -5659,8 +5541,8 @@
5659
5541
  "max_tokens": 8192,
5660
5542
  "max_input_tokens": 200000,
5661
5543
  "max_output_tokens": 8192,
5662
- "input_cost_per_token": 8e-07,
5663
- "output_cost_per_token": 4e-06,
5544
+ "input_cost_per_token": 1e-06,
5545
+ "output_cost_per_token": 5e-06,
5664
5546
  "litellm_provider": "bedrock",
5665
5547
  "mode": "chat",
5666
5548
  "supports_assistant_prefill": true,
@@ -5729,8 +5611,8 @@
5729
5611
  "max_tokens": 8192,
5730
5612
  "max_input_tokens": 200000,
5731
5613
  "max_output_tokens": 8192,
5732
- "input_cost_per_token": 8e-07,
5733
- "output_cost_per_token": 4e-06,
5614
+ "input_cost_per_token": 1e-06,
5615
+ "output_cost_per_token": 5e-06,
5734
5616
  "litellm_provider": "bedrock",
5735
5617
  "mode": "chat",
5736
5618
  "supports_assistant_prefill": true,
@@ -5799,8 +5681,8 @@
5799
5681
  "max_tokens": 8192,
5800
5682
  "max_input_tokens": 200000,
5801
5683
  "max_output_tokens": 8192,
5802
- "input_cost_per_token": 2.5e-07,
5803
- "output_cost_per_token": 1.25e-06,
5684
+ "input_cost_per_token": 1e-06,
5685
+ "output_cost_per_token": 5e-06,
5804
5686
  "litellm_provider": "bedrock",
5805
5687
  "mode": "chat",
5806
5688
  "supports_function_calling": true,
@@ -6174,8 +6056,8 @@
6174
6056
  "max_tokens": 8191,
6175
6057
  "max_input_tokens": 100000,
6176
6058
  "max_output_tokens": 8191,
6177
- "input_cost_per_token": 8e-07,
6178
- "output_cost_per_token": 2.4e-06,
6059
+ "input_cost_per_token": 1.63e-06,
6060
+ "output_cost_per_token": 5.51e-06,
6179
6061
  "litellm_provider": "bedrock",
6180
6062
  "mode": "chat"
6181
6063
  },
@@ -7441,8 +7323,7 @@
7441
7323
  "input_cost_per_token": 5e-06,
7442
7324
  "output_cost_per_token": 5e-06,
7443
7325
  "litellm_provider": "perplexity",
7444
- "mode": "chat",
7445
- "deprecation_date": "2025-02-22"
7326
+ "mode": "chat"
7446
7327
  },
7447
7328
  "perplexity/llama-3.1-sonar-large-128k-online": {
7448
7329
  "max_tokens": 127072,
@@ -7451,8 +7332,7 @@
7451
7332
  "input_cost_per_token": 1e-06,
7452
7333
  "output_cost_per_token": 1e-06,
7453
7334
  "litellm_provider": "perplexity",
7454
- "mode": "chat",
7455
- "deprecation_date": "2025-02-22"
7335
+ "mode": "chat"
7456
7336
  },
7457
7337
  "perplexity/llama-3.1-sonar-large-128k-chat": {
7458
7338
  "max_tokens": 131072,
@@ -7461,8 +7341,7 @@
7461
7341
  "input_cost_per_token": 1e-06,
7462
7342
  "output_cost_per_token": 1e-06,
7463
7343
  "litellm_provider": "perplexity",
7464
- "mode": "chat",
7465
- "deprecation_date": "2025-02-22"
7344
+ "mode": "chat"
7466
7345
  },
7467
7346
  "perplexity/llama-3.1-sonar-small-128k-chat": {
7468
7347
  "max_tokens": 131072,
@@ -7471,8 +7350,7 @@
7471
7350
  "input_cost_per_token": 2e-07,
7472
7351
  "output_cost_per_token": 2e-07,
7473
7352
  "litellm_provider": "perplexity",
7474
- "mode": "chat",
7475
- "deprecation_date": "2025-02-22"
7353
+ "mode": "chat"
7476
7354
  },
7477
7355
  "perplexity/llama-3.1-sonar-small-128k-online": {
7478
7356
  "max_tokens": 127072,
@@ -7481,8 +7359,7 @@
7481
7359
  "input_cost_per_token": 2e-07,
7482
7360
  "output_cost_per_token": 2e-07,
7483
7361
  "litellm_provider": "perplexity",
7484
- "mode": "chat",
7485
- "deprecation_date": "2025-02-22"
7362
+ "mode": "chat"
7486
7363
  },
7487
7364
  "perplexity/pplx-7b-chat": {
7488
7365
  "max_tokens": 8192,
@@ -0,0 +1,34 @@
1
+ # List of all supported LLM method calls
2
+
3
+ supported_llm_calls = [
4
+ # OpenAI
5
+ "OpenAI.chat.completions.create()",
6
+ "AsyncOpenAI.chat.completions.create()",
7
+
8
+ # OpenAI Beta
9
+ "OpenAI.beta.threads.create()",
10
+ "OpenAI.beta.threads.messages.create()",
11
+ "OpenAI.beta.threads.runs.create()",
12
+
13
+ # Anthropic
14
+ "Anthropic.messages.create()",
15
+ "Anthropic.messages.acreate()",
16
+
17
+ # Google VertexAI/PaLM
18
+ "GenerativeModel.generate_content()",
19
+ "GenerativeModel.generate_content_async()",
20
+ "ChatVertexAI._generate()",
21
+ "ChatVertexAI._agenerate()",
22
+ "ChatVertexAI.complete()",
23
+ "ChatVertexAI.acomplete()",
24
+
25
+ # Google GenerativeAI
26
+ "ChatGoogleGenerativeAI._generate()",
27
+ "ChatGoogleGenerativeAI._agenerate()",
28
+ "ChatGoogleGenerativeAI.complete()",
29
+ "ChatGoogleGenerativeAI.acomplete()",
30
+
31
+ # LiteLLM
32
+ "litellm.completion()",
33
+ "litellm.acompletion()"
34
+ ]
@@ -12,6 +12,9 @@ import asyncio
12
12
  from langchain_core.documents import Document
13
13
  import logging
14
14
  import tempfile
15
+ import sys
16
+ import importlib
17
+ from importlib.util import find_spec
15
18
 
16
19
  logging.basicConfig(level=logging.INFO)
17
20
  logger = logging.getLogger(__name__)
@@ -367,6 +370,7 @@ class LangchainTracer(BaseCallbackHandler):
367
370
  logger.error(f"Error restoring {name}: {e}")
368
371
  self.on_error(e, context=f"restore_{name}")
369
372
 
373
+ # Restore original methods and functions
370
374
  for name, original in self._original_methods.items():
371
375
  try:
372
376
  if "." in name:
@@ -280,45 +280,64 @@ class Tracer(AgenticTracing):
280
280
  data, additional_metadata = self.langchain_tracer.stop()
281
281
 
282
282
  # Add cost if possible
283
- if additional_metadata['model_name']:
283
+ if additional_metadata.get('model_name'):
284
284
  try:
285
285
  model_cost_data = self.model_cost_dict[additional_metadata['model_name']]
286
- prompt_cost = additional_metadata["tokens"]["prompt"]*model_cost_data["input_cost_per_token"]
287
- completion_cost = additional_metadata["tokens"]["completion"]*model_cost_data["output_cost_per_token"]
288
- # additional_metadata.setdefault('cost', {})["prompt_cost"] = prompt_cost
289
- # additional_metadata.setdefault('cost', {})["completion_cost"] = completion_cost
290
- additional_metadata.setdefault('cost', {})["total_cost"] = prompt_cost + completion_cost
286
+ if 'tokens' in additional_metadata and all(k in additional_metadata['tokens'] for k in ['prompt', 'completion']):
287
+ prompt_cost = additional_metadata["tokens"]["prompt"]*model_cost_data["input_cost_per_token"]
288
+ completion_cost = additional_metadata["tokens"]["completion"]*model_cost_data["output_cost_per_token"]
289
+ additional_metadata.setdefault('cost', {})["total_cost"] = prompt_cost + completion_cost
290
+ else:
291
+ logger.warning("Token information missing in additional_metadata")
291
292
  except Exception as e:
292
293
  logger.warning(f"Error adding cost: {e}")
294
+ else:
295
+ logger.debug("Model name not available in additional_metadata, skipping cost calculation")
296
+
297
+ # Safely get total tokens and cost
298
+ if 'tokens' in additional_metadata and 'total' in additional_metadata['tokens']:
299
+ additional_metadata["total_tokens"] = float(additional_metadata["tokens"]["total"])
300
+ else:
301
+ additional_metadata["total_tokens"] = 0.0
302
+ logger.warning("Total tokens information not available")
293
303
 
294
- additional_metadata["total_tokens"] = float(additional_metadata["tokens"]["total"])
295
- del additional_metadata["tokens"]
296
- if "cost" in additional_metadata:
304
+ if 'cost' in additional_metadata and 'total_cost' in additional_metadata['cost']:
297
305
  additional_metadata["total_cost"] = float(additional_metadata["cost"]["total_cost"])
298
- del additional_metadata["cost"]
299
306
  else:
300
- additional_metadata["total_cost"] = float(0.0)
307
+ additional_metadata["total_cost"] = 0.0
308
+ logger.warning("Total cost information not available")
309
+
310
+ # Safely remove tokens and cost dictionaries if they exist
311
+ additional_metadata.pop("tokens", None)
312
+ additional_metadata.pop("cost", None)
301
313
 
302
- combined_metadata = user_detail['trace_user_detail']['metadata'].copy()
303
- combined_metadata.update(additional_metadata)
304
- combined_metadata
314
+ # Safely merge metadata
315
+ combined_metadata = {}
316
+ if user_detail.get('trace_user_detail', {}).get('metadata'):
317
+ combined_metadata.update(user_detail['trace_user_detail']['metadata'])
318
+ if additional_metadata:
319
+ combined_metadata.update(additional_metadata)
305
320
 
306
321
  langchain_traces = langchain_tracer_extraction(data)
307
322
  final_result = convert_langchain_callbacks_output(langchain_traces)
308
- final_result[0]['project_name'] = user_detail['project_name']
309
- final_result[0]['trace_id'] = str(uuid.uuid4())
310
- final_result[0]['session_id'] = None
311
- final_result[0]['metadata'] = combined_metadata
312
- final_result[0]['pipeline'] = user_detail['trace_user_detail']['pipeline']
313
-
314
- filepath_3 = os.path.join(os.getcwd(), "final_result.json")
315
- with open(filepath_3, 'w') as f:
316
- json.dump(final_result, f, indent=2)
317
-
318
323
 
319
- print(filepath_3)
324
+ # Safely set required fields in final_result
325
+ if final_result and isinstance(final_result, list) and len(final_result) > 0:
326
+ final_result[0]['project_name'] = user_detail.get('project_name', '')
327
+ final_result[0]['trace_id'] = str(uuid.uuid4())
328
+ final_result[0]['session_id'] = None
329
+ final_result[0]['metadata'] = combined_metadata
330
+ final_result[0]['pipeline'] = user_detail.get('trace_user_detail', {}).get('pipeline')
331
+
332
+ filepath_3 = os.path.join(os.getcwd(), "final_result.json")
333
+ with open(filepath_3, 'w') as f:
334
+ json.dump(final_result, f, indent=2)
335
+
336
+ print(filepath_3)
337
+ else:
338
+ logger.warning("No valid langchain traces found in final_result")
320
339
 
321
- additional_metadata_keys = additional_metadata.keys() if additional_metadata else None
340
+ additional_metadata_keys = list(additional_metadata.keys()) if additional_metadata else None
322
341
 
323
342
  UploadTraces(json_file_path=filepath_3,
324
343
  project_name=self.project_name,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: ragaai_catalyst
3
- Version: 2.1.5b14
3
+ Version: 2.1.5b15
4
4
  Summary: RAGA AI CATALYST
5
5
  Author-email: Kiran Scaria <kiran.scaria@raga.ai>, Kedar Gaikwad <kedar.gaikwad@raga.ai>, Dushyant Mahajan <dushyant.mahajan@raga.ai>, Siddhartha Kosti <siddhartha.kosti@raga.ai>, Ritika Goel <ritika.goel@raga.ai>, Vijay Chaurasia <vijay.chaurasia@raga.ai>
6
6
  Requires-Python: <3.13,>=3.9
@@ -35,6 +35,7 @@ Requires-Dist: py-cpuinfo~=9.0.0
35
35
  Requires-Dist: requests~=2.32.3
36
36
  Requires-Dist: GPUtil~=1.4.0
37
37
  Requires-Dist: ipynbname
38
+ Requires-Dist: tiktoken>=0.7.0
38
39
  Provides-Extra: dev
39
40
  Requires-Dist: pytest; extra == "dev"
40
41
  Requires-Dist: pytest-cov; extra == "dev"
@@ -13,9 +13,9 @@ ragaai_catalyst/synthetic_data_generation.py,sha256=uDV9tNwto2xSkWg5XHXUvjErW-4P
13
13
  ragaai_catalyst/utils.py,sha256=TlhEFwLyRU690HvANbyoRycR3nQ67lxVUQoUOfTPYQ0,3772
14
14
  ragaai_catalyst/tracers/__init__.py,sha256=LfgTes-nHpazssbGKnn8kyLZNr49kIPrlkrqqoTFTfc,301
15
15
  ragaai_catalyst/tracers/distributed.py,sha256=AIRvS5Ur4jbFDXsUkYuCTmtGoHHx3LOG4n5tWOh610U,10330
16
- ragaai_catalyst/tracers/langchain_callback.py,sha256=v004nQuim4qgb3k4iWyfhvSsMPoI3ZSS26hzbBQT8CI,30572
16
+ ragaai_catalyst/tracers/langchain_callback.py,sha256=ZXN378gloGh5EVpTJuUScHD964WuIeVeE4_hp60gxG4,30686
17
17
  ragaai_catalyst/tracers/llamaindex_callback.py,sha256=ZY0BJrrlz-P9Mg2dX-ZkVKG3gSvzwqBtk7JL_05MiYA,14028
18
- ragaai_catalyst/tracers/tracer.py,sha256=dn2Abz9Or7F2lp3hgUf7AUKzp2Dq-WDtg-_dW6gf1e0,19005
18
+ ragaai_catalyst/tracers/tracer.py,sha256=TftNxiYO13Qb6AA4qst__zGS2-Xn_COkQOce1gFgjjc,20169
19
19
  ragaai_catalyst/tracers/upload_traces.py,sha256=2TWdRTN6FMaX-dqDv8BJWQS0xrCGYKkXEYOi2kK3Z3Y,5487
20
20
  ragaai_catalyst/tracers/agentic_tracing/README.md,sha256=X4QwLb7-Jg7GQMIXj-SerZIgDETfw-7VgYlczOR8ZeQ,4508
21
21
  ragaai_catalyst/tracers/agentic_tracing/__init__.py,sha256=yf6SKvOPSpH-9LiKaoLKXwqj5sez8F_5wkOb91yp0oE,260
@@ -28,14 +28,14 @@ ragaai_catalyst/tracers/agentic_tracing/tests/__init__.py,sha256=47DEQpj8HBSa-_T
28
28
  ragaai_catalyst/tracers/agentic_tracing/tests/ai_travel_agent.py,sha256=S4rCcKzU_5SB62BYEbNn_1VbbTdG4396N8rdZ3ZNGcE,5654
29
29
  ragaai_catalyst/tracers/agentic_tracing/tests/unique_decorator_test.py,sha256=Xk1cLzs-2A3dgyBwRRnCWs7Eubki40FVonwd433hPN8,4805
30
30
  ragaai_catalyst/tracers/agentic_tracing/tracers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
31
- ragaai_catalyst/tracers/agentic_tracing/tracers/agent_tracer.py,sha256=--wvhOJ-J2433WPatIS3wx6VFeCUIcgRT5_ZjGQDv2c,26364
31
+ ragaai_catalyst/tracers/agentic_tracing/tracers/agent_tracer.py,sha256=8d6YovuWiyRZ_h3GQwV6G5WdHzr14ukXtxE_TK1WIUY,26365
32
32
  ragaai_catalyst/tracers/agentic_tracing/tracers/base.py,sha256=88rX7OkOGEyVNECUrc4bYqODyulXve_-99d9ku5hBeQ,37373
33
- ragaai_catalyst/tracers/agentic_tracing/tracers/custom_tracer.py,sha256=l3x3uFO5ov93I7UUrUX1M06WVGy2ug2jEZ1G7o315z4,13075
33
+ ragaai_catalyst/tracers/agentic_tracing/tracers/custom_tracer.py,sha256=OHet_Cphyrsq2CP4WiooTsWSgg3Rc1n8QsOl1s2vqdY,13480
34
34
  ragaai_catalyst/tracers/agentic_tracing/tracers/langgraph_tracer.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
35
- ragaai_catalyst/tracers/agentic_tracing/tracers/llm_tracer.py,sha256=91aWXJGb3GDfyDfJyA7Irnk3XSyfkQaQppW_NMORGJQ,31725
36
- ragaai_catalyst/tracers/agentic_tracing/tracers/main_tracer.py,sha256=6hsg-Yw11v4qeELI1CWrdX8BXf-wJrTF5smBI5prgoo,15873
35
+ ragaai_catalyst/tracers/agentic_tracing/tracers/llm_tracer.py,sha256=NklCHbkG_ITlJ0zxGm-QHdk6it5xeu5gUHnK18YuLGo,34499
36
+ ragaai_catalyst/tracers/agentic_tracing/tracers/main_tracer.py,sha256=JEwvFV6KdYcg8zt2qUTKmmPvrkEft0YNAyUYK6FWF7c,18335
37
37
  ragaai_catalyst/tracers/agentic_tracing/tracers/network_tracer.py,sha256=m8CxYkl7iMiFya_lNwN1ykBc3Pmo-2pR_2HmpptwHWQ,10352
38
- ragaai_catalyst/tracers/agentic_tracing/tracers/tool_tracer.py,sha256=4rWL7fIJE5wN0nwh6fMWyh3OrrenZHJkNzyQXikyzQI,13771
38
+ ragaai_catalyst/tracers/agentic_tracing/tracers/tool_tracer.py,sha256=n03vRN5Lwp--B_dt2ZGt2WWXYRIymTqCVrdg-Hf5rd4,19119
39
39
  ragaai_catalyst/tracers/agentic_tracing/tracers/user_interaction_tracer.py,sha256=bhSUhNQCuJXKjgJAXhjKEYjnHMpYN90FSZdR84fNIKU,4614
40
40
  ragaai_catalyst/tracers/agentic_tracing/upload/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
41
41
  ragaai_catalyst/tracers/agentic_tracing/upload/upload_agentic_traces.py,sha256=1MDKXAAPzOEdxFKWWQrRgrmM3kz--DGXSywGXQmR3lQ,6041
@@ -47,9 +47,10 @@ ragaai_catalyst/tracers/agentic_tracing/utils/create_dataset_schema.py,sha256=lg
47
47
  ragaai_catalyst/tracers/agentic_tracing/utils/file_name_tracker.py,sha256=515NNDQJTyy3O-2rdlUYUoWL9qSwLIfvV3sMB9BtHp8,1366
48
48
  ragaai_catalyst/tracers/agentic_tracing/utils/generic.py,sha256=WwXT01xmp8MSr7KinuDCSK9a1ifpLcT7ajFkvYviG_A,1190
49
49
  ragaai_catalyst/tracers/agentic_tracing/utils/get_user_trace_metrics.py,sha256=vPZ4dn4EHFW0kqd1GyRpsYXbfrRrd0DXCmh-pzsDBNE,1109
50
- ragaai_catalyst/tracers/agentic_tracing/utils/llm_utils.py,sha256=wlXCuaRe81s-7FWdJ_MquXFGRZZfNrZxLIIxl-Ohbqk,15541
51
- ragaai_catalyst/tracers/agentic_tracing/utils/model_costs.json,sha256=kQwC8AYTfJCqPm1F_heR7FoEhIpEZgBRWvkHRncfhzU,298689
50
+ ragaai_catalyst/tracers/agentic_tracing/utils/llm_utils.py,sha256=67AS2s51qUnIt4hhcDRNe2Mmi5DSikR2XgmtinaVJjY,19549
51
+ ragaai_catalyst/tracers/agentic_tracing/utils/model_costs.json,sha256=GXV1s349reRMpYF_EkK-b6peSb4SY-17WnlkvpuQ4sM,294430
52
52
  ragaai_catalyst/tracers/agentic_tracing/utils/span_attributes.py,sha256=MqeRNGxzeuh9qTK0NbYMftl9V9Z0V7gMgBoHkrXP56k,1592
53
+ ragaai_catalyst/tracers/agentic_tracing/utils/supported_llm_provider.toml,sha256=LvFDivDIE96Zasp-fgDEqUJ5GEQZUawQucR3aOcSUTY,926
53
54
  ragaai_catalyst/tracers/agentic_tracing/utils/system_monitor.py,sha256=H8WNsk4v_5T6OUw4TFOzlDLjQhJwjh1nAMyMAoqMEi4,6946
54
55
  ragaai_catalyst/tracers/agentic_tracing/utils/trace_utils.py,sha256=RciiDdo2riibEoM8X0FKHaXi78y3bWwNkV8U0leqigk,3508
55
56
  ragaai_catalyst/tracers/agentic_tracing/utils/unique_decorator.py,sha256=DQHjcEuqEKsNSWaNs7SoOaq50yK4Jsl966S7mBnV-zA,5723
@@ -65,8 +66,8 @@ ragaai_catalyst/tracers/utils/__init__.py,sha256=KeMaZtYaTojilpLv65qH08QmpYclfpa
65
66
  ragaai_catalyst/tracers/utils/convert_langchain_callbacks_output.py,sha256=ofrNrxf2b1hpjDh_zeaxiYq86azn1MF3kW8-ViYPEg0,1641
66
67
  ragaai_catalyst/tracers/utils/langchain_tracer_extraction_logic.py,sha256=qK67fdUBz5Xr99ajqXbYf1ueKS1V3a3_XR0zCcN4iGI,3061
67
68
  ragaai_catalyst/tracers/utils/utils.py,sha256=ViygfJ7vZ7U0CTSA1lbxVloHp4NSlmfDzBRNCJuMhis,2374
68
- ragaai_catalyst-2.1.5b14.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
69
- ragaai_catalyst-2.1.5b14.dist-info/METADATA,sha256=xCMz9BuQQxwXe_FO4ZWN2_q_WYVwk_opJYIQN6GMt6g,12765
70
- ragaai_catalyst-2.1.5b14.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
71
- ragaai_catalyst-2.1.5b14.dist-info/top_level.txt,sha256=HpgsdRgEJMk8nqrU6qdCYk3di7MJkDL0B19lkc7dLfM,16
72
- ragaai_catalyst-2.1.5b14.dist-info/RECORD,,
69
+ ragaai_catalyst-2.1.5b15.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
70
+ ragaai_catalyst-2.1.5b15.dist-info/METADATA,sha256=_mdTGHCMe9WtrUydgSdzhnmvRZYzN6pfcxnWkpV5QHs,12796
71
+ ragaai_catalyst-2.1.5b15.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
72
+ ragaai_catalyst-2.1.5b15.dist-info/top_level.txt,sha256=HpgsdRgEJMk8nqrU6qdCYk3di7MJkDL0B19lkc7dLfM,16
73
+ ragaai_catalyst-2.1.5b15.dist-info/RECORD,,