ragaai-catalyst 2.1.4.1b0__py3-none-any.whl → 2.1.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (64) hide show
  1. ragaai_catalyst/__init__.py +23 -2
  2. ragaai_catalyst/dataset.py +462 -1
  3. ragaai_catalyst/evaluation.py +76 -7
  4. ragaai_catalyst/ragaai_catalyst.py +52 -10
  5. ragaai_catalyst/redteaming/__init__.py +7 -0
  6. ragaai_catalyst/redteaming/config/detectors.toml +13 -0
  7. ragaai_catalyst/redteaming/data_generator/scenario_generator.py +95 -0
  8. ragaai_catalyst/redteaming/data_generator/test_case_generator.py +120 -0
  9. ragaai_catalyst/redteaming/evaluator.py +125 -0
  10. ragaai_catalyst/redteaming/llm_generator.py +136 -0
  11. ragaai_catalyst/redteaming/llm_generator_old.py +83 -0
  12. ragaai_catalyst/redteaming/red_teaming.py +331 -0
  13. ragaai_catalyst/redteaming/requirements.txt +4 -0
  14. ragaai_catalyst/redteaming/tests/grok.ipynb +97 -0
  15. ragaai_catalyst/redteaming/tests/stereotype.ipynb +2258 -0
  16. ragaai_catalyst/redteaming/upload_result.py +38 -0
  17. ragaai_catalyst/redteaming/utils/issue_description.py +114 -0
  18. ragaai_catalyst/redteaming/utils/rt.png +0 -0
  19. ragaai_catalyst/redteaming_old.py +171 -0
  20. ragaai_catalyst/synthetic_data_generation.py +400 -22
  21. ragaai_catalyst/tracers/__init__.py +17 -1
  22. ragaai_catalyst/tracers/agentic_tracing/data/data_structure.py +4 -2
  23. ragaai_catalyst/tracers/agentic_tracing/tracers/agent_tracer.py +212 -148
  24. ragaai_catalyst/tracers/agentic_tracing/tracers/base.py +657 -247
  25. ragaai_catalyst/tracers/agentic_tracing/tracers/custom_tracer.py +50 -19
  26. ragaai_catalyst/tracers/agentic_tracing/tracers/llm_tracer.py +588 -177
  27. ragaai_catalyst/tracers/agentic_tracing/tracers/main_tracer.py +99 -100
  28. ragaai_catalyst/tracers/agentic_tracing/tracers/network_tracer.py +3 -3
  29. ragaai_catalyst/tracers/agentic_tracing/tracers/tool_tracer.py +230 -29
  30. ragaai_catalyst/tracers/agentic_tracing/upload/trace_uploader.py +358 -0
  31. ragaai_catalyst/tracers/agentic_tracing/upload/upload_agentic_traces.py +75 -20
  32. ragaai_catalyst/tracers/agentic_tracing/upload/upload_code.py +55 -11
  33. ragaai_catalyst/tracers/agentic_tracing/upload/upload_local_metric.py +74 -0
  34. ragaai_catalyst/tracers/agentic_tracing/upload/upload_trace_metric.py +47 -16
  35. ragaai_catalyst/tracers/agentic_tracing/utils/create_dataset_schema.py +4 -2
  36. ragaai_catalyst/tracers/agentic_tracing/utils/file_name_tracker.py +26 -3
  37. ragaai_catalyst/tracers/agentic_tracing/utils/llm_utils.py +182 -17
  38. ragaai_catalyst/tracers/agentic_tracing/utils/model_costs.json +1233 -497
  39. ragaai_catalyst/tracers/agentic_tracing/utils/span_attributes.py +81 -10
  40. ragaai_catalyst/tracers/agentic_tracing/utils/supported_llm_provider.toml +34 -0
  41. ragaai_catalyst/tracers/agentic_tracing/utils/system_monitor.py +215 -0
  42. ragaai_catalyst/tracers/agentic_tracing/utils/trace_utils.py +0 -32
  43. ragaai_catalyst/tracers/agentic_tracing/utils/unique_decorator.py +3 -1
  44. ragaai_catalyst/tracers/agentic_tracing/utils/zip_list_of_unique_files.py +73 -47
  45. ragaai_catalyst/tracers/distributed.py +300 -0
  46. ragaai_catalyst/tracers/exporters/__init__.py +3 -1
  47. ragaai_catalyst/tracers/exporters/dynamic_trace_exporter.py +160 -0
  48. ragaai_catalyst/tracers/exporters/ragaai_trace_exporter.py +129 -0
  49. ragaai_catalyst/tracers/langchain_callback.py +809 -0
  50. ragaai_catalyst/tracers/llamaindex_instrumentation.py +424 -0
  51. ragaai_catalyst/tracers/tracer.py +301 -55
  52. ragaai_catalyst/tracers/upload_traces.py +24 -7
  53. ragaai_catalyst/tracers/utils/convert_langchain_callbacks_output.py +61 -0
  54. ragaai_catalyst/tracers/utils/convert_llama_instru_callback.py +69 -0
  55. ragaai_catalyst/tracers/utils/extraction_logic_llama_index.py +74 -0
  56. ragaai_catalyst/tracers/utils/langchain_tracer_extraction_logic.py +82 -0
  57. ragaai_catalyst/tracers/utils/model_prices_and_context_window_backup.json +9365 -0
  58. ragaai_catalyst/tracers/utils/trace_json_converter.py +269 -0
  59. {ragaai_catalyst-2.1.4.1b0.dist-info → ragaai_catalyst-2.1.5.dist-info}/METADATA +367 -45
  60. ragaai_catalyst-2.1.5.dist-info/RECORD +97 -0
  61. {ragaai_catalyst-2.1.4.1b0.dist-info → ragaai_catalyst-2.1.5.dist-info}/WHEEL +1 -1
  62. ragaai_catalyst-2.1.4.1b0.dist-info/RECORD +0 -67
  63. {ragaai_catalyst-2.1.4.1b0.dist-info → ragaai_catalyst-2.1.5.dist-info}/LICENSE +0 -0
  64. {ragaai_catalyst-2.1.4.1b0.dist-info → ragaai_catalyst-2.1.5.dist-info}/top_level.txt +0 -0
@@ -1,45 +1,29 @@
1
1
  import json
2
2
  import os
3
- import platform
4
- import psutil
5
- import pkg_resources
6
3
  from datetime import datetime
7
4
  from pathlib import Path
8
- from typing import List, Any
5
+ from typing import List, Any, Dict, Optional
9
6
  import uuid
10
7
  import sys
11
8
  import tempfile
12
9
  import threading
13
10
  import time
14
- from ....ragaai_catalyst import RagaAICatalyst
15
- from ..data.data_structure import (
11
+
12
+ from ragaai_catalyst.tracers.agentic_tracing.upload.upload_local_metric import calculate_metric
13
+ from ragaai_catalyst import RagaAICatalyst
14
+ from ragaai_catalyst.tracers.agentic_tracing.data.data_structure import (
16
15
  Trace,
17
16
  Metadata,
18
17
  SystemInfo,
19
- OSInfo,
20
- EnvironmentInfo,
21
18
  Resources,
22
- CPUResource,
23
- MemoryResource,
24
- DiskResource,
25
- NetworkResource,
26
- ResourceInfo,
27
- MemoryInfo,
28
- DiskInfo,
29
- NetworkInfo,
30
19
  Component,
31
20
  )
21
+ from ragaai_catalyst.tracers.agentic_tracing.utils.file_name_tracker import TrackName
22
+ from ragaai_catalyst.tracers.agentic_tracing.utils.zip_list_of_unique_files import zip_list_of_unique_files
23
+ from ragaai_catalyst.tracers.agentic_tracing.utils.span_attributes import SpanAttributes
24
+ from ragaai_catalyst.tracers.agentic_tracing.utils.system_monitor import SystemMonitor
25
+ from ragaai_catalyst.tracers.agentic_tracing.upload.trace_uploader import submit_upload_task, get_task_status, ensure_uploader_running
32
26
 
33
- from ..upload.upload_agentic_traces import UploadAgenticTraces
34
- from ..upload.upload_code import upload_code
35
- from ..upload.upload_trace_metric import upload_trace_metric
36
- from ..utils.file_name_tracker import TrackName
37
- from ..utils.zip_list_of_unique_files import zip_list_of_unique_files
38
- from ..utils.span_attributes import SpanAttributes
39
- from ..utils.create_dataset_schema import create_dataset_schema_with_trace
40
-
41
-
42
- # Configure logging to show debug messages (which includes info messages as well)
43
27
  import logging
44
28
 
45
29
  logger = logging.getLogger(__name__)
@@ -76,11 +60,13 @@ class TracerJSONEncoder(json.JSONEncoder):
76
60
  class BaseTracer:
77
61
  def __init__(self, user_details):
78
62
  self.user_details = user_details
79
- self.project_name = self.user_details["project_name"] # Access the project_name
80
- self.dataset_name = self.user_details["dataset_name"] # Access the dataset_name
81
- self.project_id = self.user_details["project_id"] # Access the project_id
82
- self.trace_name = self.user_details["trace_name"] # Access the trace_name
63
+ self.project_name = self.user_details["project_name"]
64
+ self.dataset_name = self.user_details["dataset_name"]
65
+ self.project_id = self.user_details["project_id"]
66
+ self.trace_name = self.user_details["trace_name"]
67
+ self.base_url = self.user_details.get("base_url", RagaAICatalyst.BASE_URL) # Get base_url from user_details or fallback to default
83
68
  self.visited_metrics = []
69
+ self.trace_metrics = []
84
70
 
85
71
  # Initialize trace data
86
72
  self.trace_id = None
@@ -96,121 +82,80 @@ class BaseTracer:
96
82
  self.network_usage_list = []
97
83
  self.tracking_thread = None
98
84
  self.tracking = False
85
+ self.system_monitor = None
86
+ self.gt = None
99
87
 
100
- def _get_system_info(self) -> SystemInfo:
101
- # Get OS info
102
- os_info = OSInfo(
103
- name=platform.system(),
104
- version=platform.version(),
105
- platform=platform.machine(),
106
- kernel_version=platform.release(),
107
- )
108
-
109
- # Get Python environment info
110
- installed_packages = [
111
- f"{pkg.key}=={pkg.version}" for pkg in pkg_resources.working_set
112
- ]
113
- env_info = EnvironmentInfo(
114
- name="Python",
115
- version=platform.python_version(),
116
- packages=installed_packages,
117
- env_path=sys.prefix,
118
- command_to_run=f"python {sys.argv[0]}",
119
- )
88
+ # For upload tracking
89
+ self.upload_task_id = None
90
+
91
+ # For backward compatibility
92
+ self._upload_tasks = []
93
+ self._is_uploading = False
94
+ self._upload_completed_callback = None
95
+
96
+ ensure_uploader_running()
120
97
 
121
- return SystemInfo(
122
- id=f"sys_{self.trace_id}",
123
- os=os_info,
124
- environment=env_info,
125
- source_code="Path to the source code .zip file in format hashid.zip", # TODO: Implement source code archiving
126
- )
98
+ def _get_system_info(self) -> SystemInfo:
99
+ return self.system_monitor.get_system_info()
127
100
 
128
101
  def _get_resources(self) -> Resources:
129
- # CPU info
130
- cpu_info = ResourceInfo(
131
- name=platform.processor(),
132
- cores=psutil.cpu_count(logical=False),
133
- threads=psutil.cpu_count(logical=True),
134
- )
135
- cpu = CPUResource(info=cpu_info, interval="5s", values=[psutil.cpu_percent()])
136
-
137
- # Memory info
138
- memory = psutil.virtual_memory()
139
- mem_info = MemoryInfo(
140
- total=memory.total / (1024**3), # Convert to GB
141
- free=memory.available / (1024**3),
142
- )
143
- mem = MemoryResource(info=mem_info, interval="5s", values=[memory.percent])
144
-
145
- # Disk info
146
- disk = psutil.disk_usage("/")
147
- disk_info = DiskInfo(total=disk.total / (1024**3), free=disk.free / (1024**3))
148
- disk_io = psutil.disk_io_counters()
149
- disk_resource = DiskResource(
150
- info=disk_info,
151
- interval="5s",
152
- read=[disk_io.read_bytes / (1024**2)], # MB
153
- write=[disk_io.write_bytes / (1024**2)],
154
- )
155
-
156
- # Network info
157
- net_io = psutil.net_io_counters()
158
- net_info = NetworkInfo(
159
- upload_speed=net_io.bytes_sent / (1024**2), # MB
160
- download_speed=net_io.bytes_recv / (1024**2),
161
- )
162
- net = NetworkResource(
163
- info=net_info,
164
- interval="5s",
165
- uploads=[net_io.bytes_sent / (1024**2)],
166
- downloads=[net_io.bytes_recv / (1024**2)],
167
- )
168
-
169
- return Resources(cpu=cpu, memory=mem, disk=disk_resource, network=net)
102
+ return self.system_monitor.get_resources()
170
103
 
171
104
  def _track_memory_usage(self):
172
105
  self.memory_usage_list = []
173
106
  while self.tracking:
174
- memory_usage = psutil.Process().memory_info().rss
175
- self.memory_usage_list.append(memory_usage / (1024 * 1024)) # Convert to MB and append to the list
176
- time.sleep(self.interval_time)
107
+ usage = self.system_monitor.track_memory_usage()
108
+ self.memory_usage_list.append(usage)
109
+ try:
110
+ time.sleep(self.interval_time)
111
+ except Exception as e:
112
+ logger.warning(f"Sleep interrupted in memory tracking: {str(e)}")
177
113
 
178
114
  def _track_cpu_usage(self):
179
115
  self.cpu_usage_list = []
180
116
  while self.tracking:
181
- cpu_usage = psutil.cpu_percent(interval=self.interval_time)
182
- self.cpu_usage_list.append(cpu_usage)
183
- time.sleep(self.interval_time)
117
+ usage = self.system_monitor.track_cpu_usage(self.interval_time)
118
+ self.cpu_usage_list.append(usage)
119
+ try:
120
+ time.sleep(self.interval_time)
121
+ except Exception as e:
122
+ logger.warning(f"Sleep interrupted in CPU tracking: {str(e)}")
184
123
 
185
124
  def _track_disk_usage(self):
186
125
  self.disk_usage_list = []
187
126
  while self.tracking:
188
- disk_io = psutil.disk_io_counters()
189
- self.disk_usage_list.append({
190
- 'disk_read': disk_io.read_bytes / (1024 * 1024), # Convert to MB
191
- 'disk_write': disk_io.write_bytes / (1024 * 1024) # Convert to MB
192
- })
193
- time.sleep(self.interval_time)
127
+ usage = self.system_monitor.track_disk_usage()
128
+ self.disk_usage_list.append(usage)
129
+ try:
130
+ time.sleep(self.interval_time)
131
+ except Exception as e:
132
+ logger.warning(f"Sleep interrupted in disk tracking: {str(e)}")
194
133
 
195
134
  def _track_network_usage(self):
196
135
  self.network_usage_list = []
197
136
  while self.tracking:
198
- net_io = psutil.net_io_counters()
199
- self.network_usage_list.append({
200
- 'uploads': net_io.bytes_sent / (1024 * 1024), # Convert to MB
201
- 'downloads': net_io.bytes_recv / (1024 * 1024) # Convert to MB
202
- })
203
- time.sleep(self.interval_time)
137
+ usage = self.system_monitor.track_network_usage()
138
+ self.network_usage_list.append(usage)
139
+ try:
140
+ time.sleep(self.interval_time)
141
+ except Exception as e:
142
+ logger.warning(f"Sleep interrupted in network tracking: {str(e)}")
204
143
 
205
144
  def start(self):
206
145
  """Initialize a new trace"""
207
146
  self.tracking = True
208
- self.tracking_thread = threading.Thread(target=self._track_memory_usage)
209
- self.tracking_thread.start()
147
+ self.trace_id = str(uuid.uuid4())
148
+ self.file_tracker.trace_main_file()
149
+ self.system_monitor = SystemMonitor(self.trace_id)
150
+ threading.Thread(target=self._track_memory_usage).start()
210
151
  threading.Thread(target=self._track_cpu_usage).start()
211
152
  threading.Thread(target=self._track_disk_usage).start()
212
153
  threading.Thread(target=self._track_network_usage).start()
213
154
 
155
+ # Reset metrics
156
+ self.visited_metrics = []
157
+ self.trace_metrics = []
158
+
214
159
  metadata = Metadata(
215
160
  cost={},
216
161
  tokens={},
@@ -218,9 +163,6 @@ class BaseTracer:
218
163
  resources=self._get_resources(),
219
164
  )
220
165
 
221
- # Generate a unique trace ID, when trace starts
222
- self.trace_id = str(uuid.uuid4())
223
-
224
166
  # Get the start time
225
167
  self.start_time = datetime.now().astimezone().isoformat()
226
168
 
@@ -241,122 +183,224 @@ class BaseTracer:
241
183
  metadata=metadata,
242
184
  data=self.data_key,
243
185
  replays={"source": None},
186
+ metrics=[] # Initialize empty metrics list
244
187
  )
245
188
 
189
+ def on_upload_completed(self, callback_fn):
190
+ """
191
+ Register a callback function to be called when all uploads are completed.
192
+ For backward compatibility - simulates the old callback mechanism.
193
+
194
+ Args:
195
+ callback_fn: A function that takes a single argument (the tracer instance)
196
+ """
197
+ self._upload_completed_callback = callback_fn
198
+
199
+ # Check for status periodically and call callback when complete
200
+ def check_status_and_callback():
201
+ if self.upload_task_id:
202
+ status = self.get_upload_status()
203
+ if status.get("status") in ["completed", "failed"]:
204
+ self._is_uploading = False
205
+ # Execute callback
206
+ try:
207
+ if self._upload_completed_callback:
208
+ self._upload_completed_callback(self)
209
+ except Exception as e:
210
+ logger.error(f"Error in upload completion callback: {e}")
211
+ return
212
+
213
+ # Schedule next check
214
+ threading.Timer(5.0, check_status_and_callback).start()
215
+
216
+ # Start status checking if we already have a task
217
+ if self.upload_task_id:
218
+ threading.Timer(5.0, check_status_and_callback).start()
219
+
220
+ return self
221
+
222
+ def wait_for_uploads(self, timeout=None):
223
+ """
224
+ Wait for all async uploads to complete.
225
+ This provides backward compatibility with the old API.
226
+
227
+ Args:
228
+ timeout: Maximum time to wait in seconds (None means wait indefinitely)
229
+
230
+ Returns:
231
+ True if all uploads completed successfully, False otherwise
232
+ """
233
+ if not self.upload_task_id:
234
+ return True
235
+
236
+ start_time = time.time()
237
+ while True:
238
+ # Check if timeout expired
239
+ if timeout is not None and time.time() - start_time > timeout:
240
+ logger.warning(f"Upload wait timed out after {timeout} seconds")
241
+ return False
242
+
243
+ # Get current status
244
+ status = self.get_upload_status()
245
+ if status.get("status") == "completed":
246
+ return True
247
+ elif status.get("status") == "failed":
248
+ logger.error(f"Upload failed: {status.get('error')}")
249
+ return False
250
+ elif status.get("status") == "unknown":
251
+ logger.warning("Upload task not found, assuming completed")
252
+ return True
253
+
254
+ # Sleep before checking again
255
+ time.sleep(1.0)
256
+
246
257
  def stop(self):
247
- """Stop the trace and save to JSON file"""
258
+ """Stop the trace and save to JSON file, then submit to background uploader"""
248
259
  if hasattr(self, "trace"):
260
+ # Set end times
249
261
  self.trace.data[0]["end_time"] = datetime.now().astimezone().isoformat()
250
262
  self.trace.end_time = datetime.now().astimezone().isoformat()
251
263
 
252
- #track memory usage
264
+ # Stop tracking metrics
253
265
  self.tracking = False
254
- if self.tracking_thread is not None:
255
- self.tracking_thread.join()
256
- self.trace.metadata.resources.memory.values = self.memory_usage_list
257
-
258
- #track cpu usage
259
- self.trace.metadata.resources.cpu.values = self.cpu_usage_list
260
-
261
- #track network and disk usage
262
- network_upoloads, network_downloads = 0, 0
263
- disk_read, disk_write = 0, 0
264
- for network_usage, disk_usage in zip(self.network_usage_list, self.disk_usage_list):
265
- network_upoloads += network_usage['uploads']
266
- network_downloads += network_usage['downloads']
267
- disk_read += disk_usage['disk_read']
268
- disk_write += disk_usage['disk_write']
269
-
270
- #track disk usage
271
- self.trace.metadata.resources.disk.read = [disk_read / len(self.disk_usage_list)]
272
- self.trace.metadata.resources.disk.write = [disk_write / len(self.disk_usage_list)]
273
-
274
- #track network usage
275
- self.trace.metadata.resources.network.uploads = [network_upoloads / len(self.network_usage_list)]
276
- self.trace.metadata.resources.network.downloads = [network_downloads / len(self.network_usage_list)]
277
-
278
- # update interval time
279
- self.trace.metadata.resources.cpu.interval = float(self.interval_time)
280
- self.trace.metadata.resources.memory.interval = float(self.interval_time)
281
- self.trace.metadata.resources.disk.interval = float(self.interval_time)
282
- self.trace.metadata.resources.network.interval = float(self.interval_time)
283
-
284
- # Change span ids to int
266
+
267
+ # Process and aggregate metrics
268
+ self._process_resource_metrics()
269
+
270
+ # Process trace spans
285
271
  self.trace = self._change_span_ids_to_int(self.trace)
286
272
  self.trace = self._change_agent_input_output(self.trace)
287
273
  self.trace = self._extract_cost_tokens(self.trace)
288
274
 
289
- # Create traces directory if it doesn't exist
275
+ # Create traces directory and prepare file paths
290
276
  self.traces_dir = tempfile.gettempdir()
291
277
  filename = self.trace.id + ".json"
292
278
  filepath = f"{self.traces_dir}/{filename}"
293
279
 
294
- # get unique files and zip it. Generate a unique hash ID for the contents of the files
280
+ # Process source files
295
281
  list_of_unique_files = self.file_tracker.get_unique_files()
296
282
  hash_id, zip_path = zip_list_of_unique_files(
297
283
  list_of_unique_files, output_dir=self.traces_dir
298
284
  )
299
-
300
- # replace source code with zip_path
301
285
  self.trace.metadata.system_info.source_code = hash_id
302
286
 
303
- # Clean up trace_data before saving
304
- trace_data = self.trace.__dict__
287
+ # Prepare trace data for saving
288
+ trace_data = self.trace.to_dict()
289
+ trace_data["metrics"] = self.trace_metrics
305
290
  cleaned_trace_data = self._clean_trace(trace_data)
306
-
307
- # Format interactions and add to trace
291
+
292
+ # Add interactions
308
293
  interactions = self.format_interactions()
309
- self.trace.workflow = interactions["workflow"]
294
+ cleaned_trace_data["workflow"] = interactions["workflow"]
310
295
 
296
+ # Save trace data to file
311
297
  with open(filepath, "w") as f:
312
298
  json.dump(cleaned_trace_data, f, cls=TracerJSONEncoder, indent=2)
313
299
 
314
- logger.info(" Traces saved successfully.")
300
+ logger.info("Traces saved successfully.")
315
301
  logger.debug(f"Trace saved to {filepath}")
316
- # Upload traces
317
-
318
- json_file_path = str(filepath)
319
- project_name = self.project_name
320
- project_id = self.project_id
321
- dataset_name = self.dataset_name
322
- user_detail = self.user_details
323
- base_url = RagaAICatalyst.BASE_URL
324
-
325
- ## create dataset schema
326
- response = create_dataset_schema_with_trace(
327
- dataset_name=dataset_name, project_name=project_name
328
- )
329
-
330
- ##Upload trace metrics
331
- response = upload_trace_metric(
332
- json_file_path=json_file_path,
333
- dataset_name=self.dataset_name,
334
- project_name=self.project_name,
335
- )
336
-
337
- upload_traces = UploadAgenticTraces(
338
- json_file_path=json_file_path,
339
- project_name=project_name,
340
- project_id=project_id,
341
- dataset_name=dataset_name,
342
- user_detail=user_detail,
343
- base_url=base_url,
344
- )
345
- upload_traces.upload_agentic_traces()
302
+
303
+ # Make sure uploader process is available
304
+ ensure_uploader_running()
346
305
 
347
- # Upload Codehash
348
- response = upload_code(
306
+ logger.debug("Base URL used for uploading: {}".format(self.base_url))
307
+
308
+ # Submit to background process for uploading using futures
309
+ self.upload_task_id = submit_upload_task(
310
+ filepath=filepath,
349
311
  hash_id=hash_id,
350
312
  zip_path=zip_path,
351
- project_name=project_name,
352
- dataset_name=dataset_name,
313
+ project_name=self.project_name,
314
+ project_id=self.project_id,
315
+ dataset_name=self.dataset_name,
316
+ user_details=self.user_details,
317
+ base_url=self.base_url
353
318
  )
354
- print(response)
319
+
320
+ # For backward compatibility
321
+ self._is_uploading = True
322
+
323
+ # Start checking for completion if a callback is registered
324
+ if self._upload_completed_callback:
325
+ # Start a thread to check status and call callback when complete
326
+ def check_status_and_callback():
327
+ status = self.get_upload_status()
328
+ if status.get("status") in ["completed", "failed"]:
329
+ self._is_uploading = False
330
+ # Execute callback
331
+ try:
332
+ self._upload_completed_callback(self)
333
+ except Exception as e:
334
+ logger.error(f"Error in upload completion callback: {e}")
335
+ return
336
+
337
+ # Check again after a delay
338
+ threading.Timer(5.0, check_status_and_callback).start()
339
+
340
+ # Start checking
341
+ threading.Timer(5.0, check_status_and_callback).start()
342
+
343
+ logger.info(f"Submitted upload task with ID: {self.upload_task_id}")
355
344
 
356
- # Cleanup
345
+ # Cleanup local resources
357
346
  self.components = []
358
347
  self.file_tracker.reset()
348
+
349
+ def get_upload_status(self):
350
+ """
351
+ Get the status of the upload task.
352
+
353
+ Returns:
354
+ dict: Status information
355
+ """
356
+ if not self.upload_task_id:
357
+ return {"status": "not_started", "message": "No upload has been initiated"}
358
+
359
+ return get_task_status(self.upload_task_id)
359
360
 
361
+ def _process_resource_metrics(self):
362
+ """Process and aggregate all resource metrics"""
363
+ # Process memory metrics
364
+ self.trace.metadata.resources.memory.values = self.memory_usage_list
365
+
366
+ # Process CPU metrics
367
+ self.trace.metadata.resources.cpu.values = self.cpu_usage_list
368
+
369
+ # Process network and disk metrics
370
+ network_uploads, network_downloads = 0, 0
371
+ disk_read, disk_write = 0, 0
372
+
373
+ # Handle cases where lists might have different lengths
374
+ min_len = min(len(self.network_usage_list), len(self.disk_usage_list)) if self.network_usage_list and self.disk_usage_list else 0
375
+ for i in range(min_len):
376
+ network_usage = self.network_usage_list[i]
377
+ disk_usage = self.disk_usage_list[i]
378
+
379
+ # Safely get network usage values with defaults of 0
380
+ network_uploads += network_usage.get('uploads', 0) or 0
381
+ network_downloads += network_usage.get('downloads', 0) or 0
382
+
383
+ # Safely get disk usage values with defaults of 0
384
+ disk_read += disk_usage.get('disk_read', 0) or 0
385
+ disk_write += disk_usage.get('disk_write', 0) or 0
386
+
387
+ # Set aggregate values
388
+ disk_list_len = len(self.disk_usage_list)
389
+ self.trace.metadata.resources.disk.read = [disk_read / disk_list_len if disk_list_len > 0 else 0]
390
+ self.trace.metadata.resources.disk.write = [disk_write / disk_list_len if disk_list_len > 0 else 0]
391
+
392
+ network_list_len = len(self.network_usage_list)
393
+ self.trace.metadata.resources.network.uploads = [
394
+ network_uploads / network_list_len if network_list_len > 0 else 0]
395
+ self.trace.metadata.resources.network.downloads = [
396
+ network_downloads / network_list_len if network_list_len > 0 else 0]
397
+
398
+ # Set interval times
399
+ self.trace.metadata.resources.cpu.interval = float(self.interval_time)
400
+ self.trace.metadata.resources.memory.interval = float(self.interval_time)
401
+ self.trace.metadata.resources.disk.interval = float(self.interval_time)
402
+ self.trace.metadata.resources.network.interval = float(self.interval_time)
403
+
360
404
  def add_component(self, component: Component):
361
405
  """Add a component to the trace"""
362
406
  self.components.append(component)
@@ -424,38 +468,44 @@ class BaseTracer:
424
468
  def _extract_cost_tokens(self, trace):
425
469
  cost = {}
426
470
  tokens = {}
427
- for span in trace.data[0]["spans"]:
428
- if span.type == "llm":
429
- info = span.info
430
- if isinstance(info, dict):
431
- cost_info = info.get("cost", {})
432
- for key, value in cost_info.items():
433
- if key not in cost:
434
- cost[key] = 0
435
- cost[key] += value
436
- token_info = info.get("tokens", {})
437
- for key, value in token_info.items():
438
- if key not in tokens:
439
- tokens[key] = 0
440
- tokens[key] += value
441
- if span.type == "agent":
442
- for children in span.data["children"]:
443
- if "type" not in children:
444
- continue
445
- if children["type"] != "llm":
446
- continue
447
- info = children["info"]
448
- if isinstance(info, dict):
449
- cost_info = info.get("cost", {})
450
- for key, value in cost_info.items():
451
- if key not in cost:
452
- cost[key] = 0
453
- cost[key] += value
454
- token_info = info.get("tokens", {})
455
- for key, value in token_info.items():
456
- if key not in tokens:
457
- tokens[key] = 0
458
- tokens[key] += value
471
+
472
+ def process_span_info(info):
473
+ if not isinstance(info, dict):
474
+ return
475
+ cost_info = info.get("cost", {})
476
+ for key, value in cost_info.items():
477
+ if key not in cost:
478
+ cost[key] = 0
479
+ cost[key] += value
480
+ token_info = info.get("tokens", {})
481
+ for key, value in token_info.items():
482
+ if key not in tokens:
483
+ tokens[key] = 0
484
+ tokens[key] += value
485
+
486
+ def process_spans(spans):
487
+ for span in spans:
488
+ # Get span type, handling both span objects and dictionaries
489
+ span_type = span.type if hasattr(span, 'type') else span.get('type')
490
+ span_info = span.info if hasattr(span, 'info') else span.get('info', {})
491
+ span_data = span.data if hasattr(span, 'data') else span.get('data', {})
492
+
493
+ # Process direct LLM spans
494
+ if span_type == "llm":
495
+ process_span_info(span_info)
496
+ # Process agent spans recursively
497
+ elif span_type == "agent":
498
+ # Process LLM children in the current agent span
499
+ children = span_data.get("children", [])
500
+ for child in children:
501
+ child_type = child.get("type")
502
+ if child_type == "llm":
503
+ process_span_info(child.get("info", {}))
504
+ # Recursively process nested agent spans
505
+ elif child_type == "agent":
506
+ process_spans([child])
507
+
508
+ process_spans(trace.data[0]["spans"])
459
509
  trace.metadata.cost = cost
460
510
  trace.metadata.tokens = tokens
461
511
  return trace
@@ -503,15 +553,16 @@ class BaseTracer:
503
553
  else existing_span.__dict__
504
554
  )
505
555
  if (
506
- existing_dict.get("hash_id")
507
- == span_dict.get("hash_id")
508
- and str(existing_dict.get("data", {}).get("input"))
509
- == str(span_dict.get("data", {}).get("input"))
510
- and str(existing_dict.get("data", {}).get("output"))
511
- == str(span_dict.get("data", {}).get("output"))
556
+ existing_dict.get("hash_id")
557
+ == span_dict.get("hash_id")
558
+ and str(existing_dict.get("data", {}).get("input"))
559
+ == str(span_dict.get("data", {}).get("input"))
560
+ and str(existing_dict.get("data", {}).get("output"))
561
+ == str(span_dict.get("data", {}).get("output"))
512
562
  ):
513
563
  unique_spans[i] = span
514
564
  break
565
+
515
566
  else:
516
567
  # For non-LLM spans, process their children if they exist
517
568
  if "data" in span_dict and "children" in span_dict["data"]:
@@ -522,8 +573,44 @@ class BaseTracer:
522
573
  span["data"]["children"] = filtered_children
523
574
  else:
524
575
  span.data["children"] = filtered_children
525
- unique_spans.append(span)
526
-
576
+ unique_spans.append(span)
577
+
578
+ # Process spans to update model information for LLM spans with same name
579
+ llm_spans_by_name = {}
580
+ for i, span in enumerate(unique_spans):
581
+ span_dict = span if isinstance(span, dict) else span.__dict__
582
+
583
+ if span_dict.get('type') == 'llm':
584
+ span_name = span_dict.get('name')
585
+ if span_name:
586
+ if span_name not in llm_spans_by_name:
587
+ llm_spans_by_name[span_name] = []
588
+ llm_spans_by_name[span_name].append((i, span_dict))
589
+
590
+ # Update model information for spans with same name
591
+ for spans_with_same_name in llm_spans_by_name.values():
592
+ if len(spans_with_same_name) > 1:
593
+ # Check if any span has non-default model
594
+ has_custom_model = any(
595
+ span[1].get('info', {}).get('model') != 'default'
596
+ for span in spans_with_same_name
597
+ )
598
+
599
+ # If we have a custom model, update all default models to 'custom'
600
+ if has_custom_model:
601
+ for idx, span_dict in spans_with_same_name:
602
+ if span_dict.get('info', {}).get('model') == 'default':
603
+ if isinstance(unique_spans[idx], dict):
604
+ if 'info' not in unique_spans[idx]:
605
+ unique_spans[idx]['info'] = {}
606
+ # unique_spans[idx]['info']['model'] = 'custom'
607
+ unique_spans[idx]['type'] = 'custom'
608
+ else:
609
+ if not hasattr(unique_spans[idx], 'info'):
610
+ unique_spans[idx].info = {}
611
+ # unique_spans[idx].info['model'] = 'custom'
612
+ unique_spans[idx].type = 'custom'
613
+
527
614
  return unique_spans
528
615
 
529
616
  # Remove any spans without hash ids
@@ -550,7 +637,7 @@ class BaseTracer:
550
637
  int: Next interaction ID to use
551
638
  """
552
639
  child_type = child.get("type")
553
-
640
+
554
641
  if child_type == "tool":
555
642
  # Tool call start
556
643
  interactions.append(
@@ -609,9 +696,7 @@ class BaseTracer:
609
696
  "span_id": child.get("id"),
610
697
  "interaction_type": "llm_call_end",
611
698
  "name": child.get("name"),
612
- "content": {
613
- "response": child.get("data", {}).get("output")
614
- },
699
+ "content": {"response": child.get("data", {}).get("output")},
615
700
  "timestamp": child.get("end_time"),
616
701
  "error": child.get("error"),
617
702
  }
@@ -657,7 +742,7 @@ class BaseTracer:
657
742
  {
658
743
  "id": str(interaction_id),
659
744
  "span_id": child.get("id"),
660
- "interaction_type": child_type,
745
+ "interaction_type": f"{child_type}_call_start",
661
746
  "name": child.get("name"),
662
747
  "content": child.get("data", {}),
663
748
  "timestamp": child.get("start_time"),
@@ -666,6 +751,19 @@ class BaseTracer:
666
751
  )
667
752
  interaction_id += 1
668
753
 
754
+ interactions.append(
755
+ {
756
+ "id": str(interaction_id),
757
+ "span_id": child.get("id"),
758
+ "interaction_type": f"{child_type}_call_end",
759
+ "name": child.get("name"),
760
+ "content": child.get("data", {}),
761
+ "timestamp": child.get("end_time"),
762
+ "error": child.get("error"),
763
+ }
764
+ )
765
+ interaction_id += 1
766
+
669
767
  # Process additional interactions and network calls
670
768
  if "interactions" in child:
671
769
  for interaction in child["interactions"]:
@@ -825,7 +923,7 @@ class BaseTracer:
825
923
  {
826
924
  "id": str(interaction_id),
827
925
  "span_id": span.id,
828
- "interaction_type": span.type,
926
+ "interaction_type": f"{span.type}_call_start",
829
927
  "name": span.name,
830
928
  "content": span.data,
831
929
  "timestamp": span.start_time,
@@ -834,6 +932,19 @@ class BaseTracer:
834
932
  )
835
933
  interaction_id += 1
836
934
 
935
+ interactions.append(
936
+ {
937
+ "id": str(interaction_id),
938
+ "span_id": span.id,
939
+ "interaction_type": f"{span.type}_call_end",
940
+ "name": span.name,
941
+ "content": span.data,
942
+ "timestamp": span.end_time,
943
+ "error": span.error,
944
+ }
945
+ )
946
+ interaction_id += 1
947
+
837
948
  # Process interactions from span.data if they exist
838
949
  if span.interactions:
839
950
  for span_interaction in span.interactions:
@@ -882,8 +993,307 @@ class BaseTracer:
882
993
 
883
994
  return {"workflow": sorted_interactions}
884
995
 
996
+ # TODO: Add support for execute metrics. Maintain list of all metrics to be added for this span
997
+
998
+ def execute_metrics(self,
999
+ name: str,
1000
+ model: str,
1001
+ provider: str,
1002
+ prompt: str,
1003
+ context: str,
1004
+ response: str
1005
+ ):
1006
+ if not hasattr(self, 'trace'):
1007
+ logger.warning("Cannot add metrics before trace is initialized. Call start() first.")
1008
+ return
1009
+
1010
+ # Convert individual parameters to metric dict if needed
1011
+ if isinstance(name, str):
1012
+ metrics = [{
1013
+ "name": name
1014
+ }]
1015
+ else:
1016
+ # Handle dict or list input
1017
+ metrics = name if isinstance(name, list) else [name] if isinstance(name, dict) else []
1018
+
1019
+ try:
1020
+ for metric in metrics:
1021
+ if not isinstance(metric, dict):
1022
+ raise ValueError(f"Expected dict, got {type(metric)}")
1023
+
1024
+ if "name" not in metric :
1025
+ raise ValueError("Metric must contain 'name'") #score was written not required here
1026
+
1027
+ # Handle duplicate metric names on executing metric
1028
+ metric_name = metric["name"]
1029
+ if metric_name in self.visited_metrics:
1030
+ count = sum(1 for m in self.visited_metrics if m.startswith(metric_name))
1031
+ metric_name = f"{metric_name}_{count + 1}"
1032
+ self.visited_metrics.append(metric_name)
1033
+
1034
+ result = calculate_metric(project_id=self.project_id,
1035
+ metric_name=metric_name,
1036
+ model=model,
1037
+ org_domain="raga",
1038
+ provider=provider,
1039
+ user_id="1", # self.user_details['id'],
1040
+ prompt=prompt,
1041
+ context=context,
1042
+ response=response
1043
+ )
1044
+
1045
+ result = result['data']
1046
+ formatted_metric = {
1047
+ "name": metric_name,
1048
+ "score": result.get("score"),
1049
+ "reason": result.get("reason", ""),
1050
+ "source": "user",
1051
+ "cost": result.get("cost"),
1052
+ "latency": result.get("latency"),
1053
+ "mappings": [],
1054
+ "config": result.get("metric_config", {})
1055
+ }
1056
+
1057
+ logger.debug(f"Executed metric: {formatted_metric}")
1058
+
1059
+ except ValueError as e:
1060
+ logger.error(f"Validation Error: {e}")
1061
+ except Exception as e:
1062
+ logger.error(f"Error adding metric: {e}")
1063
+
1064
+ def add_metrics(
1065
+ self,
1066
+ name: str | List[Dict[str, Any]] | Dict[str, Any] = None,
1067
+ score: float | int = None,
1068
+ reasoning: str = "",
1069
+ cost: float = None,
1070
+ latency: float = None,
1071
+ metadata: Dict[str, Any] = None,
1072
+ config: Dict[str, Any] = None,
1073
+ ):
1074
+ """Add metrics at the trace level.
1075
+
1076
+ Can be called in two ways:
1077
+ 1. With individual parameters:
1078
+ tracer.add_metrics(name="metric_name", score=0.9, reasoning="Good performance")
1079
+
1080
+ 2. With a dictionary or list of dictionaries:
1081
+ tracer.add_metrics({"name": "metric_name", "score": 0.9})
1082
+ tracer.add_metrics([{"name": "metric1", "score": 0.9}, {"name": "metric2", "score": 0.8}])
1083
+
1084
+ Args:
1085
+ name: Either the metric name (str) or a metric dictionary/list of dictionaries
1086
+ score: Score value (float or int) when using individual parameters
1087
+ reasoning: Optional explanation for the score
1088
+ cost: Optional cost associated with the metric
1089
+ latency: Optional latency measurement
1090
+ metadata: Optional additional metadata as key-value pairs
1091
+ config: Optional configuration parameters
1092
+ """
1093
+ if not hasattr(self, 'trace'):
1094
+ logger.warning("Cannot add metrics before trace is initialized. Call start() first.")
1095
+ return
1096
+
1097
+ # Convert individual parameters to metric dict if needed
1098
+ if isinstance(name, str):
1099
+ metrics = [{
1100
+ "name": name,
1101
+ "score": score,
1102
+ "reasoning": reasoning,
1103
+ "cost": cost,
1104
+ "latency": latency,
1105
+ "metadata": metadata or {},
1106
+ "config": config or {}
1107
+ }]
1108
+ else:
1109
+ # Handle dict or list input
1110
+ metrics = name if isinstance(name, list) else [name] if isinstance(name, dict) else []
1111
+
1112
+ try:
1113
+ for metric in metrics:
1114
+ if not isinstance(metric, dict):
1115
+ raise ValueError(f"Expected dict, got {type(metric)}")
1116
+
1117
+ if "name" not in metric or "score" not in metric:
1118
+ raise ValueError("Metric must contain 'name' and 'score' fields")
1119
+
1120
+ # Handle duplicate metric names
1121
+ metric_name = metric["name"]
1122
+ if metric_name in self.visited_metrics:
1123
+ count = sum(1 for m in self.visited_metrics if m.startswith(metric_name))
1124
+ metric_name = f"{metric_name}_{count + 1}"
1125
+ self.visited_metrics.append(metric_name)
1126
+
1127
+ formatted_metric = {
1128
+ "name": metric_name,
1129
+ "score": metric["score"],
1130
+ "reason": metric.get("reasoning", ""),
1131
+ "source": "user",
1132
+ "cost": metric.get("cost"),
1133
+ "latency": metric.get("latency"),
1134
+ "metadata": metric.get("metadata", {}),
1135
+ "mappings": [],
1136
+ "config": metric.get("config", {})
1137
+ }
1138
+
1139
+ self.trace_metrics.append(formatted_metric)
1140
+ logger.debug(f"Added trace-level metric: {formatted_metric}")
1141
+
1142
+ except ValueError as e:
1143
+ logger.error(f"Validation Error: {e}")
1144
+ except Exception as e:
1145
+ logger.error(f"Error adding metric: {e}")
1146
+
885
1147
  def span(self, span_name):
886
1148
  if span_name not in self.span_attributes_dict:
887
- self.span_attributes_dict[span_name] = SpanAttributes(span_name)
1149
+ self.span_attributes_dict[span_name] = SpanAttributes(span_name, self.project_id)
888
1150
  return self.span_attributes_dict[span_name]
889
-
1151
+
1152
+ @staticmethod
1153
+ def get_formatted_metric(span_attributes_dict, project_id, name):
1154
+ if name in span_attributes_dict:
1155
+ local_metrics = span_attributes_dict[name].local_metrics or []
1156
+ local_metrics_results = []
1157
+ for metric in local_metrics:
1158
+ try:
1159
+ logger.info("calculating the metric, please wait....")
1160
+
1161
+ mapping = metric.get("mapping", {})
1162
+ result = calculate_metric(project_id=project_id,
1163
+ metric_name=metric.get("name"),
1164
+ model=metric.get("model"),
1165
+ provider=metric.get("provider"),
1166
+ **mapping
1167
+ )
1168
+
1169
+ result = result['data']['data'][0]
1170
+ config = result['metric_config']
1171
+ metric_config = {
1172
+ "job_id": config.get("job_id"),
1173
+ "metric_name": config.get("displayName"),
1174
+ "model": config.get("model"),
1175
+ "org_domain": config.get("orgDomain"),
1176
+ "provider": config.get("provider"),
1177
+ "reason": config.get("reason"),
1178
+ "request_id": config.get("request_id"),
1179
+ "user_id": config.get("user_id"),
1180
+ "threshold": {
1181
+ "is_editable": config.get("threshold").get("isEditable"),
1182
+ "lte": config.get("threshold").get("lte")
1183
+ }
1184
+ }
1185
+ formatted_metric = {
1186
+ "name": metric.get("displayName"),
1187
+ "displayName": metric.get("displayName"),
1188
+ "score": result.get("score"),
1189
+ "reason": result.get("reason", ""),
1190
+ "source": "user",
1191
+ "cost": result.get("cost"),
1192
+ "latency": result.get("latency"),
1193
+ "mappings": [],
1194
+ "config": metric_config
1195
+ }
1196
+ local_metrics_results.append(formatted_metric)
1197
+ except ValueError as e:
1198
+ logger.error(f"Validation Error: {e}")
1199
+ except Exception as e:
1200
+ logger.error(f"Error executing metric: {e}")
1201
+
1202
+ return local_metrics_results
1203
+
1204
+
1205
+ def upload_directly(self):
1206
+ """Upload trace directly without using the background process"""
1207
+ # Check if we have necessary details
1208
+ if not hasattr(self, 'trace') or not self.trace_id:
1209
+ print("No trace to upload")
1210
+ return False
1211
+
1212
+ # Get the filepath from the last trace
1213
+ trace_dir = tempfile.gettempdir()
1214
+ trace_file = os.path.join(trace_dir, f"{self.trace_id}.json")
1215
+
1216
+ # If filepath wasn't saved from previous stop() call, try to find it
1217
+ if not os.path.exists(trace_file):
1218
+ print(f"Looking for trace file for {self.trace_id}")
1219
+ # Try to find the trace file by pattern
1220
+ for file in os.listdir(trace_dir):
1221
+ if file.endswith(".json") and self.trace_id in file:
1222
+ trace_file = os.path.join(trace_dir, file)
1223
+ print(f"Found trace file: {trace_file}")
1224
+ break
1225
+
1226
+ if not os.path.exists(trace_file):
1227
+ print(f"Trace file not found for ID {self.trace_id}")
1228
+ return False
1229
+
1230
+ print(f"Starting direct upload of {trace_file}")
1231
+
1232
+ try:
1233
+ # 1. Create the dataset schema
1234
+ print("Creating dataset schema...")
1235
+ from ragaai_catalyst.tracers.agentic_tracing.utils.create_dataset_schema import create_dataset_schema_with_trace
1236
+ response = create_dataset_schema_with_trace(
1237
+ dataset_name=self.dataset_name,
1238
+ project_name=self.project_name
1239
+ )
1240
+ print(f"Schema created: {response}")
1241
+
1242
+ # 2. Upload trace metrics
1243
+ print("Uploading trace metrics...")
1244
+ from ragaai_catalyst.tracers.agentic_tracing.upload.upload_trace_metric import upload_trace_metric
1245
+ response = upload_trace_metric(
1246
+ json_file_path=trace_file,
1247
+ dataset_name=self.dataset_name,
1248
+ project_name=self.project_name,
1249
+ )
1250
+ print(f"Metrics uploaded: {response}")
1251
+
1252
+ # 3. Get code hash and zip path if available
1253
+ code_hash = None
1254
+ zip_path = None
1255
+ try:
1256
+ with open(trace_file, 'r') as f:
1257
+ data = json.load(f)
1258
+ code_hash = data.get("metadata", {}).get("system_info", {}).get("source_code")
1259
+ if code_hash:
1260
+ zip_path = os.path.join(trace_dir, f"{code_hash}.zip")
1261
+ print(f"Found code hash: {code_hash}")
1262
+ print(f"Zip path: {zip_path}")
1263
+ except Exception as e:
1264
+ print(f"Error getting code hash: {e}")
1265
+
1266
+ # 4. Upload agentic traces
1267
+ print("Uploading agentic traces...")
1268
+ from ragaai_catalyst.tracers.agentic_tracing.upload.upload_agentic_traces import UploadAgenticTraces
1269
+ from ragaai_catalyst import RagaAICatalyst
1270
+ upload_traces = UploadAgenticTraces(
1271
+ json_file_path=trace_file,
1272
+ project_name=self.project_name,
1273
+ project_id=self.project_id,
1274
+ dataset_name=self.dataset_name,
1275
+ user_detail=self.user_details,
1276
+ base_url=RagaAICatalyst.BASE_URL,
1277
+ )
1278
+ upload_traces.upload_agentic_traces()
1279
+ print("Agentic traces uploaded successfully")
1280
+
1281
+ # 5. Upload code hash if available
1282
+ if code_hash and zip_path and os.path.exists(zip_path):
1283
+ print(f"Uploading code hash: {code_hash}")
1284
+ from ragaai_catalyst.tracers.agentic_tracing.upload.upload_code import upload_code
1285
+ response = upload_code(
1286
+ hash_id=code_hash,
1287
+ zip_path=zip_path,
1288
+ project_name=self.project_name,
1289
+ dataset_name=self.dataset_name,
1290
+ )
1291
+ print(f"Code uploaded: {response}")
1292
+
1293
+ print("Upload completed successfully - check UI now")
1294
+ return True
1295
+ except Exception as e:
1296
+ print(f"Error during direct upload: {e}")
1297
+ import traceback
1298
+ traceback.print_exc()
1299
+ return False