ragaai-catalyst 2.0.7.2__py3-none-any.whl → 2.0.7.2b0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (29) hide show
  1. ragaai_catalyst/evaluation.py +107 -153
  2. ragaai_catalyst/tracers/agentic_tracing/Untitled-1.json +660 -0
  3. ragaai_catalyst/tracers/agentic_tracing/__init__.py +3 -0
  4. ragaai_catalyst/tracers/agentic_tracing/agent_tracer.py +311 -0
  5. ragaai_catalyst/tracers/agentic_tracing/agentic_tracing.py +212 -0
  6. ragaai_catalyst/tracers/agentic_tracing/base.py +270 -0
  7. ragaai_catalyst/tracers/agentic_tracing/data_structure.py +239 -0
  8. ragaai_catalyst/tracers/agentic_tracing/llm_tracer.py +906 -0
  9. ragaai_catalyst/tracers/agentic_tracing/network_tracer.py +286 -0
  10. ragaai_catalyst/tracers/agentic_tracing/sample.py +197 -0
  11. ragaai_catalyst/tracers/agentic_tracing/tool_tracer.py +235 -0
  12. ragaai_catalyst/tracers/agentic_tracing/unique_decorator.py +221 -0
  13. ragaai_catalyst/tracers/agentic_tracing/unique_decorator_test.py +172 -0
  14. ragaai_catalyst/tracers/agentic_tracing/user_interaction_tracer.py +67 -0
  15. ragaai_catalyst/tracers/agentic_tracing/utils/__init__.py +3 -0
  16. ragaai_catalyst/tracers/agentic_tracing/utils/api_utils.py +18 -0
  17. ragaai_catalyst/tracers/agentic_tracing/utils/data_classes.py +61 -0
  18. ragaai_catalyst/tracers/agentic_tracing/utils/generic.py +32 -0
  19. ragaai_catalyst/tracers/agentic_tracing/utils/llm_utils.py +181 -0
  20. ragaai_catalyst/tracers/agentic_tracing/utils/model_costs.json +5946 -0
  21. ragaai_catalyst/tracers/agentic_tracing/utils/trace_utils.py +74 -0
  22. ragaai_catalyst/tracers/tracer.py +26 -4
  23. ragaai_catalyst/tracers/upload_traces.py +127 -0
  24. ragaai_catalyst-2.0.7.2b0.dist-info/METADATA +39 -0
  25. ragaai_catalyst-2.0.7.2b0.dist-info/RECORD +50 -0
  26. ragaai_catalyst-2.0.7.2.dist-info/METADATA +0 -386
  27. ragaai_catalyst-2.0.7.2.dist-info/RECORD +0 -29
  28. {ragaai_catalyst-2.0.7.2.dist-info → ragaai_catalyst-2.0.7.2b0.dist-info}/WHEEL +0 -0
  29. {ragaai_catalyst-2.0.7.2.dist-info → ragaai_catalyst-2.0.7.2b0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,18 @@
1
+ import requests
2
+
3
+ def fetch_analysis_trace(base_url, trace_id):
4
+ """
5
+ Fetches the analysis trace data from the server.
6
+
7
+ :param base_url: The base URL of the server (e.g., "http://localhost:3000").
8
+ :param trace_id: The ID of the trace to fetch.
9
+ :return: The JSON response from the server if successful, otherwise None.
10
+ """
11
+ try:
12
+ url = f"{base_url}/api/analysis_traces/{trace_id}"
13
+ response = requests.get(url)
14
+ response.raise_for_status() # Raise an error for bad responses (4xx, 5xx)
15
+ return response.json()
16
+ except requests.exceptions.RequestException as e:
17
+ print(f"Error fetching analysis trace: {e}")
18
+ return None
@@ -0,0 +1,61 @@
1
+ from dataclasses import dataclass, field
2
+ from typing import Dict, List, Any, Optional
3
+
4
+
5
+ @dataclass
6
+ class ProjectInfo:
7
+ project_name: str
8
+ start_time: float
9
+ end_time: float = field(default=0)
10
+ duration: float = field(default=0)
11
+ total_cost: float = field(default=0)
12
+ total_tokens: int = field(default=0)
13
+
14
+
15
+ @dataclass
16
+ class SystemInfo:
17
+ project_id: int
18
+ os_name: str
19
+ os_version: str
20
+ python_version: str
21
+ cpu_info: str
22
+ memory_total: float
23
+ installed_packages: str
24
+
25
+
26
+ @dataclass
27
+ class LLMCall:
28
+ name: str
29
+ model_name: str
30
+ input_prompt: str
31
+ output_response: str
32
+ tool_call: Dict
33
+ token_usage: Dict[str, int]
34
+ cost: Dict[str, float]
35
+ start_time: float = field(default=0)
36
+ end_time: float = field(default=0)
37
+ duration: float = field(default=0)
38
+
39
+
40
+ @dataclass
41
+ class ToolCall:
42
+ name: str
43
+ input_parameters: Dict[str, Any]
44
+ output: Any
45
+ start_time: float
46
+ end_time: float
47
+ duration: float
48
+ errors: Optional[str] = None
49
+
50
+
51
+ @dataclass
52
+ class AgentCall:
53
+ name: str
54
+ input_parameters: Dict[str, Any]
55
+ output: Any
56
+ start_time: float
57
+ end_time: float
58
+ duration: float
59
+ tool_calls: List[Dict[str, Any]]
60
+ llm_calls: List[Dict[str, Any]]
61
+ errors: Optional[str] = None
@@ -0,0 +1,32 @@
1
+ import os
2
+ import logging
3
+
4
+
5
+ def get_db_path():
6
+ db_filename = "trace_data.db"
7
+
8
+ # First, try the package directory
9
+ package_dir = os.path.dirname(os.path.abspath(__file__))
10
+ public_dir = os.path.join(package_dir, "..", "ui", "dist")
11
+ package_db_path = os.path.join(public_dir, db_filename)
12
+
13
+ # Ensure the directory exists
14
+ os.makedirs(os.path.dirname(package_db_path), exist_ok=True)
15
+
16
+ if os.path.exists(os.path.dirname(package_db_path)):
17
+ logging.debug(f"Using package database: {package_db_path}")
18
+ return f"sqlite:///{package_db_path}"
19
+
20
+ # Then, try the local directory
21
+ local_db_path = os.path.join(os.getcwd(), "agentneo", "ui", "dist", db_filename)
22
+ if os.path.exists(os.path.dirname(local_db_path)):
23
+ logging.debug(f"Using local database: {local_db_path}")
24
+ return f"sqlite:///{local_db_path}"
25
+
26
+ # Finally, try the local "/dist" directory
27
+ local_dist_path = os.path.join(os.getcwd(), "dist", db_filename)
28
+ if os.path.exists(os.path.dirname(local_dist_path)):
29
+ logging.debug(f"Using local database: {local_dist_path}")
30
+ return f"sqlite:///{local_dist_path}"
31
+
32
+ return f"sqlite:///{package_db_path}"
@@ -0,0 +1,181 @@
1
+ from .data_classes import LLMCall
2
+ from .trace_utils import (
3
+ calculate_cost,
4
+ convert_usage_to_dict,
5
+ load_model_costs,
6
+ )
7
+ from importlib import resources
8
+ import json
9
+ import os
10
+
11
+
12
+ # Load the Json configuration
13
+ try:
14
+ current_dir = os.path.dirname(os.path.abspath(__file__))
15
+ model_costs_path = os.path.join(current_dir, "model_costs.json")
16
+ with open(model_costs_path, "r") as file:
17
+ config = json.load(file)
18
+ except FileNotFoundError:
19
+ from importlib.resources import files
20
+ with (files("") / "model_costs.json").open("r") as file:
21
+ config = json.load(file)
22
+
23
+
24
+
25
+ def extract_llm_output(result):
26
+
27
+ # import pdb
28
+
29
+ # pdb.set_trace()
30
+ # Initialize variables
31
+ model_name = None
32
+ output_response = ""
33
+ function_call = None
34
+ tool_call = None
35
+ token_usage = {}
36
+ cost = {}
37
+
38
+ # Try to get model_name from result or result.content
39
+ model_name = None
40
+ if hasattr(result, "model"):
41
+ model_name = result.model
42
+ elif hasattr(result, "content"):
43
+ try:
44
+ content_dict = json.loads(result.content)
45
+ model_name = content_dict.get("model", None)
46
+ except (json.JSONDecodeError, TypeError):
47
+ model_name = None
48
+
49
+ # Try to get choices from result or result.content
50
+ choices = None
51
+ if hasattr(result, "choices"):
52
+ choices = result.choices
53
+ elif hasattr(result, "content"):
54
+ try:
55
+ content_dict = json.loads(result.content)
56
+ choices = content_dict.get("choices", None)
57
+ except (json.JSONDecodeError, TypeError):
58
+ choices = None
59
+
60
+ if choices and len(choices) > 0:
61
+ first_choice = choices[0]
62
+
63
+ # Get message or text
64
+ message = None
65
+ if hasattr(first_choice, "message"):
66
+ message = first_choice.message
67
+ elif isinstance(first_choice, dict) and "message" in first_choice:
68
+ message = first_choice["message"]
69
+
70
+ if message:
71
+ # For chat completion
72
+ # Get output_response
73
+ if hasattr(message, "content"):
74
+ output_response = message.content
75
+ elif isinstance(message, dict) and "content" in message:
76
+ output_response = message["content"]
77
+
78
+ # Get function_call
79
+ if hasattr(message, "function_call"):
80
+ function_call = message.function_call
81
+ elif isinstance(message, dict) and "function_call" in message:
82
+ function_call = message["function_call"]
83
+
84
+ # Get tool_calls (if any)
85
+ if hasattr(message, "tool_calls"):
86
+ tool_call = message.tool_calls
87
+ elif isinstance(message, dict) and "tool_calls" in message:
88
+ tool_call = message["tool_calls"]
89
+ else:
90
+ # For completion
91
+ # Get output_response
92
+ if hasattr(first_choice, "text"):
93
+ output_response = first_choice.text
94
+ elif isinstance(first_choice, dict) and "text" in first_choice:
95
+ output_response = first_choice["text"]
96
+ else:
97
+ output_response = ""
98
+
99
+ # No message, so no function_call or tool_call
100
+ function_call = None
101
+ tool_call = None
102
+ else:
103
+ output_response = ""
104
+ function_call = None
105
+ tool_call = None
106
+
107
+ # Set tool_call to function_call if tool_call is None
108
+ if not tool_call:
109
+ tool_call = function_call
110
+
111
+ # Parse tool_call
112
+ parsed_tool_call = None
113
+ if tool_call:
114
+ if isinstance(tool_call, dict):
115
+ arguments = tool_call.get("arguments", "{}")
116
+ name = tool_call.get("name", "")
117
+ else:
118
+ # Maybe it's an object with attributes
119
+ arguments = getattr(tool_call, "arguments", "{}")
120
+ name = getattr(tool_call, "name", "")
121
+ try:
122
+ if isinstance(arguments, str):
123
+ arguments = json.loads(arguments)
124
+ else:
125
+ arguments = arguments # If already a dict
126
+ except json.JSONDecodeError:
127
+ arguments = {}
128
+ parsed_tool_call = {"arguments": arguments, "name": name}
129
+
130
+ # Try to get token_usage from result.usage or result.content
131
+ usage = None
132
+ if hasattr(result, "usage"):
133
+ usage = result.usage
134
+ elif hasattr(result, "content"):
135
+ try:
136
+ content_dict = json.loads(result.content)
137
+ usage = content_dict.get("usage", {})
138
+ except (json.JSONDecodeError, TypeError):
139
+ usage = {}
140
+ else:
141
+ usage = {}
142
+
143
+ token_usage = convert_usage_to_dict(usage)
144
+
145
+ # Load model costs
146
+ model_costs = load_model_costs()
147
+
148
+ # Calculate cost
149
+ if model_name in model_costs:
150
+ model_config = model_costs[model_name]
151
+ input_cost_per_token = model_config.get("input_cost_per_token", 0.0)
152
+ output_cost_per_token = model_config.get("output_cost_per_token", 0.0)
153
+ reasoning_cost_per_token = model_config.get(
154
+ "reasoning_cost_per_token", output_cost_per_token
155
+ )
156
+ else:
157
+ # Default costs or log a warning
158
+ print(
159
+ f"Warning: Model '{model_name}' not found in config. Using default costs."
160
+ )
161
+ input_cost_per_token = 0.0
162
+ output_cost_per_token = 0.0
163
+ reasoning_cost_per_token = 0.0
164
+
165
+ cost = calculate_cost(
166
+ token_usage,
167
+ input_cost_per_token=input_cost_per_token,
168
+ output_cost_per_token=output_cost_per_token,
169
+ reasoning_cost_per_token=reasoning_cost_per_token,
170
+ )
171
+
172
+ llm_data = LLMCall(
173
+ name="",
174
+ model_name=model_name,
175
+ input_prompt="", # Not available here
176
+ output_response=output_response,
177
+ token_usage=token_usage,
178
+ cost=cost,
179
+ tool_call=parsed_tool_call,
180
+ )
181
+ return llm_data