ragaai-catalyst 2.1.5b32__py3-none-any.whl → 2.1.5b33__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,7 @@
1
1
  import os
2
2
  import logging
3
3
  import requests
4
+ import time
4
5
  from typing import Dict, Optional, Union
5
6
  import re
6
7
  logger = logging.getLogger("RagaAICatalyst")
@@ -116,12 +117,17 @@ class RagaAICatalyst:
116
117
  for service, key in self.api_keys.items()
117
118
  ]
118
119
  json_data = {"secrets": secrets}
120
+ start_time = time.time()
121
+ endpoint = f"{RagaAICatalyst.BASE_URL}/v1/llm/secrets/upload"
119
122
  response = requests.post(
120
- f"{RagaAICatalyst.BASE_URL}/v1/llm/secrets/upload",
123
+ endpoint,
121
124
  headers=headers,
122
125
  json=json_data,
123
126
  timeout=RagaAICatalyst.TIMEOUT,
124
127
  )
128
+ elapsed_ms = (time.time() - start_time) * 1000
129
+ logger.debug(
130
+ f"API Call: [POST] {endpoint} | Status: {response.status_code} | Time: {elapsed_ms:.2f}ms")
125
131
  if response.status_code == 200:
126
132
  print("API keys uploaded successfully")
127
133
  else:
@@ -162,12 +168,17 @@ class RagaAICatalyst:
162
168
  headers = {"Content-Type": "application/json"}
163
169
  json_data = {"accessKey": access_key, "secretKey": secret_key}
164
170
 
171
+ start_time = time.time()
172
+ endpoint = f"{RagaAICatalyst.BASE_URL}/token"
165
173
  response = requests.post(
166
- f"{ RagaAICatalyst.BASE_URL}/token",
174
+ endpoint,
167
175
  headers=headers,
168
176
  json=json_data,
169
177
  timeout=RagaAICatalyst.TIMEOUT,
170
178
  )
179
+ elapsed_ms = (time.time() - start_time) * 1000
180
+ logger.debug(
181
+ f"API Call: [POST] {endpoint} | Status: {response.status_code} | Time: {elapsed_ms:.2f}ms")
171
182
 
172
183
  # Handle specific status codes before raising an error
173
184
  if response.status_code == 400:
@@ -202,11 +213,16 @@ class RagaAICatalyst:
202
213
  headers = {
203
214
  "Authorization": f'Bearer {os.getenv("RAGAAI_CATALYST_TOKEN")}',
204
215
  }
216
+ start_time = time.time()
217
+ endpoint = f"{RagaAICatalyst.BASE_URL}/v2/llm/usecase"
205
218
  response = requests.get(
206
- f"{RagaAICatalyst.BASE_URL}/v2/llm/usecase",
219
+ endpoint,
207
220
  headers=headers,
208
221
  timeout=self.TIMEOUT
209
222
  )
223
+ elapsed_ms = (time.time() - start_time) * 1000
224
+ logger.debug(
225
+ f"API Call: [GET] {endpoint} | Status: {response.status_code} | Time: {elapsed_ms:.2f}ms")
210
226
  response.raise_for_status() # Use raise_for_status to handle HTTP errors
211
227
  usecase = response.json()["data"]["usecase"]
212
228
  return usecase
@@ -241,12 +257,17 @@ class RagaAICatalyst:
241
257
  "Authorization": f'Bearer {os.getenv("RAGAAI_CATALYST_TOKEN")}',
242
258
  }
243
259
  try:
260
+ start_time = time.time()
261
+ endpoint = f"{RagaAICatalyst.BASE_URL}/v2/llm/project"
244
262
  response = requests.post(
245
- f"{RagaAICatalyst.BASE_URL}/v2/llm/project",
263
+ endpoint,
246
264
  headers=headers,
247
265
  json=json_data,
248
266
  timeout=self.TIMEOUT,
249
267
  )
268
+ elapsed_ms = (time.time() - start_time) * 1000
269
+ logger.debug(
270
+ f"API Call: [POST] {endpoint} | Status: {response.status_code} | Time: {elapsed_ms:.2f}ms")
250
271
  response.raise_for_status()
251
272
  print(
252
273
  f"Project Created Successfully with name {response.json()['data']['name']} & usecase {usecase}"
@@ -310,11 +331,16 @@ class RagaAICatalyst:
310
331
  "Authorization": f'Bearer {os.getenv("RAGAAI_CATALYST_TOKEN")}',
311
332
  }
312
333
  try:
334
+ start_time = time.time()
335
+ endpoint = f"{RagaAICatalyst.BASE_URL}/v2/llm/projects?size={num_projects}"
313
336
  response = requests.get(
314
- f"{RagaAICatalyst.BASE_URL}/v2/llm/projects?size={num_projects}",
337
+ endpoint,
315
338
  headers=headers,
316
339
  timeout=self.TIMEOUT,
317
340
  )
341
+ elapsed_ms = (time.time() - start_time) * 1000
342
+ logger.debug(
343
+ f"API Call: [GET] {endpoint} | Status: {response.status_code} | Time: {elapsed_ms:.2f}ms")
318
344
  response.raise_for_status()
319
345
  logger.debug("Projects list retrieved successfully")
320
346
 
@@ -378,11 +404,16 @@ class RagaAICatalyst:
378
404
  "Authorization": f'Bearer {os.getenv("RAGAAI_CATALYST_TOKEN")}',
379
405
  }
380
406
  try:
407
+ start_time = time.time()
408
+ endpoint = f"{RagaAICatalyst.BASE_URL}/v1/llm/llm-metrics"
381
409
  response = requests.get(
382
- f"{RagaAICatalyst.BASE_URL}/v1/llm/llm-metrics",
410
+ endpoint,
383
411
  headers=headers,
384
412
  timeout=RagaAICatalyst.TIMEOUT,
385
413
  )
414
+ elapsed_ms = (time.time() - start_time) * 1000
415
+ logger.debug(
416
+ f"API Call: [GET] {endpoint} | Status: {response.status_code} | Time: {elapsed_ms:.2f}ms")
386
417
  response.raise_for_status()
387
418
  logger.debug("Metrics list retrieved successfully")
388
419
 
@@ -18,14 +18,11 @@ from ragaai_catalyst.tracers.agentic_tracing.data.data_structure import (
18
18
  Resources,
19
19
  Component,
20
20
  )
21
- from ragaai_catalyst.tracers.agentic_tracing.upload.upload_agentic_traces import UploadAgenticTraces
22
- from ragaai_catalyst.tracers.agentic_tracing.upload.upload_code import upload_code
23
- from ragaai_catalyst.tracers.agentic_tracing.upload.upload_trace_metric import upload_trace_metric
24
21
  from ragaai_catalyst.tracers.agentic_tracing.utils.file_name_tracker import TrackName
25
22
  from ragaai_catalyst.tracers.agentic_tracing.utils.zip_list_of_unique_files import zip_list_of_unique_files
26
23
  from ragaai_catalyst.tracers.agentic_tracing.utils.span_attributes import SpanAttributes
27
- from ragaai_catalyst.tracers.agentic_tracing.utils.create_dataset_schema import create_dataset_schema_with_trace
28
24
  from ragaai_catalyst.tracers.agentic_tracing.utils.system_monitor import SystemMonitor
25
+ from ragaai_catalyst.tracers.agentic_tracing.upload.trace_uploader import submit_upload_task, get_task_status, ensure_uploader_running
29
26
 
30
27
  import logging
31
28
 
@@ -67,6 +64,7 @@ class BaseTracer:
67
64
  self.dataset_name = self.user_details["dataset_name"]
68
65
  self.project_id = self.user_details["project_id"]
69
66
  self.trace_name = self.user_details["trace_name"]
67
+ self.base_url = self.user_details.get("base_url", RagaAICatalyst.BASE_URL) # Get base_url from user_details or fallback to default
70
68
  self.visited_metrics = []
71
69
  self.trace_metrics = []
72
70
 
@@ -87,6 +85,14 @@ class BaseTracer:
87
85
  self.system_monitor = None
88
86
  self.gt = None
89
87
 
88
+ # For upload tracking
89
+ self.upload_task_id = None
90
+
91
+ # For backward compatibility
92
+ self._upload_tasks = []
93
+ self._is_uploading = False
94
+ self._upload_completed_callback = None
95
+
90
96
  def _get_system_info(self) -> SystemInfo:
91
97
  return self.system_monitor.get_system_info()
92
98
 
@@ -178,135 +184,221 @@ class BaseTracer:
178
184
  metrics=[] # Initialize empty metrics list
179
185
  )
180
186
 
187
+ def on_upload_completed(self, callback_fn):
188
+ """
189
+ Register a callback function to be called when all uploads are completed.
190
+ For backward compatibility - simulates the old callback mechanism.
191
+
192
+ Args:
193
+ callback_fn: A function that takes a single argument (the tracer instance)
194
+ """
195
+ self._upload_completed_callback = callback_fn
196
+
197
+ # Check for status periodically and call callback when complete
198
+ def check_status_and_callback():
199
+ if self.upload_task_id:
200
+ status = self.get_upload_status()
201
+ if status.get("status") in ["completed", "failed"]:
202
+ self._is_uploading = False
203
+ # Execute callback
204
+ try:
205
+ if self._upload_completed_callback:
206
+ self._upload_completed_callback(self)
207
+ except Exception as e:
208
+ logger.error(f"Error in upload completion callback: {e}")
209
+ return
210
+
211
+ # Schedule next check
212
+ threading.Timer(5.0, check_status_and_callback).start()
213
+
214
+ # Start status checking if we already have a task
215
+ if self.upload_task_id:
216
+ threading.Timer(5.0, check_status_and_callback).start()
217
+
218
+ return self
219
+
220
+ def wait_for_uploads(self, timeout=None):
221
+ """
222
+ Wait for all async uploads to complete.
223
+ This provides backward compatibility with the old API.
224
+
225
+ Args:
226
+ timeout: Maximum time to wait in seconds (None means wait indefinitely)
227
+
228
+ Returns:
229
+ True if all uploads completed successfully, False otherwise
230
+ """
231
+ if not self.upload_task_id:
232
+ return True
233
+
234
+ start_time = time.time()
235
+ while True:
236
+ # Check if timeout expired
237
+ if timeout is not None and time.time() - start_time > timeout:
238
+ logger.warning(f"Upload wait timed out after {timeout} seconds")
239
+ return False
240
+
241
+ # Get current status
242
+ status = self.get_upload_status()
243
+ if status.get("status") == "completed":
244
+ return True
245
+ elif status.get("status") == "failed":
246
+ logger.error(f"Upload failed: {status.get('error')}")
247
+ return False
248
+ elif status.get("status") == "unknown":
249
+ logger.warning("Upload task not found, assuming completed")
250
+ return True
251
+
252
+ # Sleep before checking again
253
+ time.sleep(1.0)
254
+
181
255
  def stop(self):
182
- """Stop the trace and save to JSON file"""
256
+ """Stop the trace and save to JSON file, then submit to background uploader"""
183
257
  if hasattr(self, "trace"):
258
+ # Set end times
184
259
  self.trace.data[0]["end_time"] = datetime.now().astimezone().isoformat()
185
260
  self.trace.end_time = datetime.now().astimezone().isoformat()
186
261
 
187
- # track memory usage
262
+ # Stop tracking metrics
188
263
  self.tracking = False
189
- self.trace.metadata.resources.memory.values = self.memory_usage_list
190
-
191
- # track cpu usage
192
- self.trace.metadata.resources.cpu.values = self.cpu_usage_list
193
-
194
- # track network and disk usage
195
- network_uploads, network_downloads = 0, 0
196
- disk_read, disk_write = 0, 0
197
-
198
- # Handle cases where lists might have different lengths
199
- min_len = min(len(self.network_usage_list), len(self.disk_usage_list))
200
- for i in range(min_len):
201
- network_usage = self.network_usage_list[i]
202
- disk_usage = self.disk_usage_list[i]
203
-
204
- # Safely get network usage values with defaults of 0
205
- network_uploads += network_usage.get('uploads', 0) or 0
206
- network_downloads += network_usage.get('downloads', 0) or 0
207
-
208
- # Safely get disk usage values with defaults of 0
209
- disk_read += disk_usage.get('disk_read', 0) or 0
210
- disk_write += disk_usage.get('disk_write', 0) or 0
211
-
212
- # track disk usage
213
- disk_list_len = len(self.disk_usage_list)
214
- self.trace.metadata.resources.disk.read = [disk_read / disk_list_len if disk_list_len > 0 else 0]
215
- self.trace.metadata.resources.disk.write = [disk_write / disk_list_len if disk_list_len > 0 else 0]
216
-
217
- # track network usage
218
- network_list_len = len(self.network_usage_list)
219
- self.trace.metadata.resources.network.uploads = [
220
- network_uploads / network_list_len if network_list_len > 0 else 0]
221
- self.trace.metadata.resources.network.downloads = [
222
- network_downloads / network_list_len if network_list_len > 0 else 0]
223
-
224
- # update interval time
225
- self.trace.metadata.resources.cpu.interval = float(self.interval_time)
226
- self.trace.metadata.resources.memory.interval = float(self.interval_time)
227
- self.trace.metadata.resources.disk.interval = float(self.interval_time)
228
- self.trace.metadata.resources.network.interval = float(self.interval_time)
229
-
230
- # Change span ids to int
264
+
265
+ # Process and aggregate metrics
266
+ self._process_resource_metrics()
267
+
268
+ # Process trace spans
231
269
  self.trace = self._change_span_ids_to_int(self.trace)
232
270
  self.trace = self._change_agent_input_output(self.trace)
233
271
  self.trace = self._extract_cost_tokens(self.trace)
234
272
 
235
- # Create traces directory if it doesn't exist
273
+ # Create traces directory and prepare file paths
236
274
  self.traces_dir = tempfile.gettempdir()
237
275
  filename = self.trace.id + ".json"
238
276
  filepath = f"{self.traces_dir}/{filename}"
239
277
 
240
- # get unique files and zip it. Generate a unique hash ID for the contents of the files
278
+ # Process source files
241
279
  list_of_unique_files = self.file_tracker.get_unique_files()
242
280
  hash_id, zip_path = zip_list_of_unique_files(
243
281
  list_of_unique_files, output_dir=self.traces_dir
244
282
  )
245
-
246
- # replace source code with zip_path
247
283
  self.trace.metadata.system_info.source_code = hash_id
248
284
 
249
- # Add metrics to trace before saving
285
+ # Prepare trace data for saving
250
286
  trace_data = self.trace.to_dict()
251
287
  trace_data["metrics"] = self.trace_metrics
252
-
253
- # Clean up trace_data before saving
254
288
  cleaned_trace_data = self._clean_trace(trace_data)
255
-
256
- # Format interactions and add to trace
289
+
290
+ # Add interactions
257
291
  interactions = self.format_interactions()
258
- # trace_data["workflow"] = interactions["workflow"]
259
292
  cleaned_trace_data["workflow"] = interactions["workflow"]
260
293
 
294
+ # Save trace data to file
261
295
  with open(filepath, "w") as f:
262
296
  json.dump(cleaned_trace_data, f, cls=TracerJSONEncoder, indent=2)
263
297
 
264
- logger.info(" Traces saved successfully.")
298
+ logger.info("Traces saved successfully.")
265
299
  logger.debug(f"Trace saved to {filepath}")
266
- # Upload traces
267
-
268
- json_file_path = str(filepath)
269
- project_name = self.project_name
270
- project_id = self.project_id
271
- dataset_name = self.dataset_name
272
- user_detail = self.user_details
273
- base_url = RagaAICatalyst.BASE_URL
274
-
275
- ## create dataset schema
276
- response = create_dataset_schema_with_trace(
277
- dataset_name=dataset_name, project_name=project_name
278
- )
279
-
280
- ##Upload trace metrics
281
- response = upload_trace_metric(
282
- json_file_path=json_file_path,
283
- dataset_name=self.dataset_name,
284
- project_name=self.project_name,
285
- )
286
-
287
- upload_traces = UploadAgenticTraces(
288
- json_file_path=json_file_path,
289
- project_name=project_name,
290
- project_id=project_id,
291
- dataset_name=dataset_name,
292
- user_detail=user_detail,
293
- base_url=base_url,
294
- )
295
- upload_traces.upload_agentic_traces()
300
+
301
+ # Make sure uploader process is available
302
+ ensure_uploader_running()
296
303
 
297
- # Upload Codehash
298
- response = upload_code(
304
+ logger.debug("Base URL used for uploading: {}".format(self.base_url))
305
+
306
+ # Submit to background process for uploading
307
+ self.upload_task_id = submit_upload_task(
308
+ filepath=filepath,
299
309
  hash_id=hash_id,
300
310
  zip_path=zip_path,
301
- project_name=project_name,
302
- dataset_name=dataset_name,
311
+ project_name=self.project_name,
312
+ project_id=self.project_id,
313
+ dataset_name=self.dataset_name,
314
+ user_details=self.user_details,
315
+ base_url=self.base_url
303
316
  )
304
- print(response)
317
+
318
+ # # For backward compatibility
319
+ # self._is_uploading = True
320
+
321
+ # # Start checking for completion if a callback is registered
322
+ # if self._upload_completed_callback:
323
+ # # Start a thread to check status and call callback when complete
324
+ # def check_status_and_callback():
325
+ # status = self.get_upload_status()
326
+ # if status.get("status") in ["completed", "failed"]:
327
+ # self._is_uploading = False
328
+ # # Execute callback
329
+ # try:
330
+ # self._upload_completed_callback(self)
331
+ # except Exception as e:
332
+ # logger.error(f"Error in upload completion callback: {e}")
333
+ # return
334
+
335
+ # # Check again after a delay
336
+ # threading.Timer(5.0, check_status_and_callback).start()
337
+
338
+ # # Start checking
339
+ # threading.Timer(5.0, check_status_and_callback).start()
340
+
341
+ logger.info(f"Submitted upload task with ID: {self.upload_task_id}")
305
342
 
306
- # Cleanup
343
+ # Cleanup local resources
307
344
  self.components = []
308
345
  self.file_tracker.reset()
346
+
347
+ def get_upload_status(self):
348
+ """
349
+ Get the status of the upload task.
350
+
351
+ Returns:
352
+ dict: Status information
353
+ """
354
+ if not self.upload_task_id:
355
+ return {"status": "not_started", "message": "No upload has been initiated"}
356
+
357
+ return get_task_status(self.upload_task_id)
309
358
 
359
+ def _process_resource_metrics(self):
360
+ """Process and aggregate all resource metrics"""
361
+ # Process memory metrics
362
+ self.trace.metadata.resources.memory.values = self.memory_usage_list
363
+
364
+ # Process CPU metrics
365
+ self.trace.metadata.resources.cpu.values = self.cpu_usage_list
366
+
367
+ # Process network and disk metrics
368
+ network_uploads, network_downloads = 0, 0
369
+ disk_read, disk_write = 0, 0
370
+
371
+ # Handle cases where lists might have different lengths
372
+ min_len = min(len(self.network_usage_list), len(self.disk_usage_list)) if self.network_usage_list and self.disk_usage_list else 0
373
+ for i in range(min_len):
374
+ network_usage = self.network_usage_list[i]
375
+ disk_usage = self.disk_usage_list[i]
376
+
377
+ # Safely get network usage values with defaults of 0
378
+ network_uploads += network_usage.get('uploads', 0) or 0
379
+ network_downloads += network_usage.get('downloads', 0) or 0
380
+
381
+ # Safely get disk usage values with defaults of 0
382
+ disk_read += disk_usage.get('disk_read', 0) or 0
383
+ disk_write += disk_usage.get('disk_write', 0) or 0
384
+
385
+ # Set aggregate values
386
+ disk_list_len = len(self.disk_usage_list)
387
+ self.trace.metadata.resources.disk.read = [disk_read / disk_list_len if disk_list_len > 0 else 0]
388
+ self.trace.metadata.resources.disk.write = [disk_write / disk_list_len if disk_list_len > 0 else 0]
389
+
390
+ network_list_len = len(self.network_usage_list)
391
+ self.trace.metadata.resources.network.uploads = [
392
+ network_uploads / network_list_len if network_list_len > 0 else 0]
393
+ self.trace.metadata.resources.network.downloads = [
394
+ network_downloads / network_list_len if network_list_len > 0 else 0]
395
+
396
+ # Set interval times
397
+ self.trace.metadata.resources.cpu.interval = float(self.interval_time)
398
+ self.trace.metadata.resources.memory.interval = float(self.interval_time)
399
+ self.trace.metadata.resources.disk.interval = float(self.interval_time)
400
+ self.trace.metadata.resources.network.interval = float(self.interval_time)
401
+
310
402
  def add_component(self, component: Component):
311
403
  """Add a component to the trace"""
312
404
  self.components.append(component)
@@ -1107,3 +1199,99 @@ class BaseTracer:
1107
1199
 
1108
1200
  return local_metrics_results
1109
1201
 
1202
+
1203
+ def upload_directly(self):
1204
+ """Upload trace directly without using the background process"""
1205
+ # Check if we have necessary details
1206
+ if not hasattr(self, 'trace') or not self.trace_id:
1207
+ print("No trace to upload")
1208
+ return False
1209
+
1210
+ # Get the filepath from the last trace
1211
+ trace_dir = tempfile.gettempdir()
1212
+ trace_file = os.path.join(trace_dir, f"{self.trace_id}.json")
1213
+
1214
+ # If filepath wasn't saved from previous stop() call, try to find it
1215
+ if not os.path.exists(trace_file):
1216
+ print(f"Looking for trace file for {self.trace_id}")
1217
+ # Try to find the trace file by pattern
1218
+ for file in os.listdir(trace_dir):
1219
+ if file.endswith(".json") and self.trace_id in file:
1220
+ trace_file = os.path.join(trace_dir, file)
1221
+ print(f"Found trace file: {trace_file}")
1222
+ break
1223
+
1224
+ if not os.path.exists(trace_file):
1225
+ print(f"Trace file not found for ID {self.trace_id}")
1226
+ return False
1227
+
1228
+ print(f"Starting direct upload of {trace_file}")
1229
+
1230
+ try:
1231
+ # 1. Create the dataset schema
1232
+ print("Creating dataset schema...")
1233
+ from ragaai_catalyst.tracers.agentic_tracing.utils.create_dataset_schema import create_dataset_schema_with_trace
1234
+ response = create_dataset_schema_with_trace(
1235
+ dataset_name=self.dataset_name,
1236
+ project_name=self.project_name
1237
+ )
1238
+ print(f"Schema created: {response}")
1239
+
1240
+ # 2. Upload trace metrics
1241
+ print("Uploading trace metrics...")
1242
+ from ragaai_catalyst.tracers.agentic_tracing.upload.upload_trace_metric import upload_trace_metric
1243
+ response = upload_trace_metric(
1244
+ json_file_path=trace_file,
1245
+ dataset_name=self.dataset_name,
1246
+ project_name=self.project_name,
1247
+ )
1248
+ print(f"Metrics uploaded: {response}")
1249
+
1250
+ # 3. Get code hash and zip path if available
1251
+ code_hash = None
1252
+ zip_path = None
1253
+ try:
1254
+ with open(trace_file, 'r') as f:
1255
+ data = json.load(f)
1256
+ code_hash = data.get("metadata", {}).get("system_info", {}).get("source_code")
1257
+ if code_hash:
1258
+ zip_path = os.path.join(trace_dir, f"{code_hash}.zip")
1259
+ print(f"Found code hash: {code_hash}")
1260
+ print(f"Zip path: {zip_path}")
1261
+ except Exception as e:
1262
+ print(f"Error getting code hash: {e}")
1263
+
1264
+ # 4. Upload agentic traces
1265
+ print("Uploading agentic traces...")
1266
+ from ragaai_catalyst.tracers.agentic_tracing.upload.upload_agentic_traces import UploadAgenticTraces
1267
+ from ragaai_catalyst import RagaAICatalyst
1268
+ upload_traces = UploadAgenticTraces(
1269
+ json_file_path=trace_file,
1270
+ project_name=self.project_name,
1271
+ project_id=self.project_id,
1272
+ dataset_name=self.dataset_name,
1273
+ user_detail=self.user_details,
1274
+ base_url=RagaAICatalyst.BASE_URL,
1275
+ )
1276
+ upload_traces.upload_agentic_traces()
1277
+ print("Agentic traces uploaded successfully")
1278
+
1279
+ # 5. Upload code hash if available
1280
+ if code_hash and zip_path and os.path.exists(zip_path):
1281
+ print(f"Uploading code hash: {code_hash}")
1282
+ from ragaai_catalyst.tracers.agentic_tracing.upload.upload_code import upload_code
1283
+ response = upload_code(
1284
+ hash_id=code_hash,
1285
+ zip_path=zip_path,
1286
+ project_name=self.project_name,
1287
+ dataset_name=self.dataset_name,
1288
+ )
1289
+ print(f"Code uploaded: {response}")
1290
+
1291
+ print("Upload completed successfully - check UI now")
1292
+ return True
1293
+ except Exception as e:
1294
+ print(f"Error during direct upload: {e}")
1295
+ import traceback
1296
+ traceback.print_exc()
1297
+ return False
@@ -12,7 +12,6 @@ import contextvars
12
12
  import traceback
13
13
  import importlib
14
14
  import sys
15
- from litellm import model_cost
16
15
  import logging
17
16
 
18
17
  try:
@@ -29,7 +28,8 @@ from ..utils.llm_utils import (
29
28
  sanitize_api_keys,
30
29
  sanitize_input,
31
30
  extract_llm_output,
32
- num_tokens_from_messages
31
+ num_tokens_from_messages,
32
+ get_model_cost
33
33
  )
34
34
  from ..utils.unique_decorator import generate_unique_hash
35
35
  from ..utils.file_name_tracker import TrackName
@@ -49,7 +49,7 @@ class LLMTracerMixin:
49
49
  self.file_tracker = TrackName()
50
50
  self.patches = []
51
51
  try:
52
- self.model_costs = model_cost
52
+ self.model_costs = get_model_cost()
53
53
  except Exception as e:
54
54
  self.model_costs = {
55
55
  "default": {"input_cost_per_token": 0.0, "output_cost_per_token": 0.0}