ragaai-catalyst 2.2.3b0__py3-none-any.whl → 2.2.3b2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ragaai_catalyst/dataset.py +41 -43
- ragaai_catalyst/evaluation.py +15 -17
- ragaai_catalyst/guard_executor.py +8 -8
- ragaai_catalyst/tracers/agentic_tracing/upload/upload_code.py +8 -12
- ragaai_catalyst/tracers/agentic_tracing/utils/api_utils.py +4 -1
- ragaai_catalyst/tracers/agentic_tracing/utils/llm_utils.py +3 -2
- ragaai_catalyst/tracers/agentic_tracing/utils/span_attributes.py +6 -3
- ragaai_catalyst/tracers/distributed.py +6 -3
- ragaai_catalyst/tracers/exporters/dynamic_trace_exporter.py +4 -4
- ragaai_catalyst/tracers/exporters/file_span_exporter.py +1 -3
- ragaai_catalyst/tracers/exporters/raga_exporter.py +2 -2
- ragaai_catalyst/tracers/exporters/ragaai_trace_exporter.py +3 -3
- ragaai_catalyst/tracers/tracer.py +9 -12
- ragaai_catalyst/tracers/utils/model_prices_and_context_window_backup.json +6131 -2218
- ragaai_catalyst/tracers/utils/trace_json_converter.py +3 -1
- ragaai_catalyst/tracers/utils/utils.py +1 -1
- {ragaai_catalyst-2.2.3b0.dist-info → ragaai_catalyst-2.2.3b2.dist-info}/METADATA +1 -1
- {ragaai_catalyst-2.2.3b0.dist-info → ragaai_catalyst-2.2.3b2.dist-info}/RECORD +21 -21
- {ragaai_catalyst-2.2.3b0.dist-info → ragaai_catalyst-2.2.3b2.dist-info}/WHEEL +0 -0
- {ragaai_catalyst-2.2.3b0.dist-info → ragaai_catalyst-2.2.3b2.dist-info}/licenses/LICENSE +0 -0
- {ragaai_catalyst-2.2.3b0.dist-info → ragaai_catalyst-2.2.3b2.dist-info}/top_level.txt +0 -0
ragaai_catalyst/dataset.py
CHANGED
@@ -42,7 +42,7 @@ class Dataset:
|
|
42
42
|
]
|
43
43
|
|
44
44
|
if project_name not in project_list:
|
45
|
-
|
45
|
+
logger.error("Project not found. Please enter a valid project name")
|
46
46
|
|
47
47
|
self.project_id = [
|
48
48
|
project["id"] for project in response.json()["data"]["content"] if project["name"] == project_name
|
@@ -50,7 +50,7 @@ class Dataset:
|
|
50
50
|
|
51
51
|
except requests.exceptions.RequestException as e:
|
52
52
|
logger.error(f"Failed to retrieve projects list: {e}")
|
53
|
-
|
53
|
+
pass
|
54
54
|
|
55
55
|
def list_datasets(self):
|
56
56
|
"""
|
@@ -81,7 +81,7 @@ class Dataset:
|
|
81
81
|
return response
|
82
82
|
except requests.exceptions.RequestException as e:
|
83
83
|
logger.error(f"Failed to list datasets: {e}")
|
84
|
-
|
84
|
+
pass
|
85
85
|
|
86
86
|
try:
|
87
87
|
response = make_request()
|
@@ -99,7 +99,7 @@ class Dataset:
|
|
99
99
|
return dataset_list
|
100
100
|
except Exception as e:
|
101
101
|
logger.error(f"Error in list_datasets: {e}")
|
102
|
-
|
102
|
+
pass
|
103
103
|
|
104
104
|
def get_schema_mapping(self):
|
105
105
|
headers = {
|
@@ -115,18 +115,18 @@ class Dataset:
|
|
115
115
|
response.raise_for_status()
|
116
116
|
response_data = response.json()["data"]["schemaElements"]
|
117
117
|
if not response.json()['success']:
|
118
|
-
|
118
|
+
logger.error('Unable to fetch Schema Elements for the CSV')
|
119
119
|
return response_data
|
120
120
|
except requests.exceptions.RequestException as e:
|
121
121
|
logger.error(f"Failed to get CSV schema: {e}")
|
122
|
-
|
122
|
+
pass
|
123
123
|
|
124
124
|
###################### CSV Upload APIs ###################
|
125
125
|
|
126
126
|
def get_dataset_columns(self, dataset_name):
|
127
127
|
list_dataset = self.list_datasets()
|
128
128
|
if dataset_name not in list_dataset:
|
129
|
-
|
129
|
+
logger.error(f"Dataset {dataset_name} does not exists. Please enter a valid dataset name")
|
130
130
|
|
131
131
|
headers = {
|
132
132
|
"Authorization": f"Bearer {os.getenv('RAGAAI_CATALYST_TOKEN')}",
|
@@ -150,7 +150,7 @@ class Dataset:
|
|
150
150
|
dataset_id = [dataset["id"] for dataset in datasets if dataset["name"]==dataset_name][0]
|
151
151
|
except requests.exceptions.RequestException as e:
|
152
152
|
logger.error(f"Failed to list datasets: {e}")
|
153
|
-
|
153
|
+
pass
|
154
154
|
|
155
155
|
try:
|
156
156
|
response = requests.get(
|
@@ -163,16 +163,16 @@ class Dataset:
|
|
163
163
|
dataset_columns = [item["displayName"] for item in dataset_columns]
|
164
164
|
dataset_columns = [data for data in dataset_columns if not data.startswith('_')]
|
165
165
|
if not response.json()['success']:
|
166
|
-
|
166
|
+
logger.error('Unable to fetch details of for the CSV')
|
167
167
|
return dataset_columns
|
168
168
|
except requests.exceptions.RequestException as e:
|
169
169
|
logger.error(f"Failed to get CSV columns: {e}")
|
170
|
-
|
170
|
+
pass
|
171
171
|
|
172
172
|
def create_from_csv(self, csv_path, dataset_name, schema_mapping):
|
173
173
|
list_dataset = self.list_datasets()
|
174
174
|
if dataset_name in list_dataset:
|
175
|
-
|
175
|
+
logger.error(f"Dataset name {dataset_name} already exists. Please enter a unique dataset name")
|
176
176
|
|
177
177
|
#### get presigned URL
|
178
178
|
def get_presignedUrl():
|
@@ -190,7 +190,7 @@ class Dataset:
|
|
190
190
|
return response.json()
|
191
191
|
except requests.exceptions.RequestException as e:
|
192
192
|
logger.error(f"Failed to get presigned URL: {e}")
|
193
|
-
|
193
|
+
pass
|
194
194
|
|
195
195
|
try:
|
196
196
|
presignedUrl = get_presignedUrl()
|
@@ -198,10 +198,10 @@ class Dataset:
|
|
198
198
|
url = presignedUrl['data']['presignedUrl']
|
199
199
|
filename = presignedUrl['data']['fileName']
|
200
200
|
else:
|
201
|
-
|
201
|
+
logger.error('Unable to fetch presignedUrl')
|
202
202
|
except Exception as e:
|
203
203
|
logger.error(f"Error in get_presignedUrl: {e}")
|
204
|
-
|
204
|
+
pass
|
205
205
|
|
206
206
|
#### put csv to presigned URL
|
207
207
|
def put_csv_to_presignedUrl(url):
|
@@ -221,16 +221,16 @@ class Dataset:
|
|
221
221
|
return response
|
222
222
|
except requests.exceptions.RequestException as e:
|
223
223
|
logger.error(f"Failed to put CSV to presigned URL: {e}")
|
224
|
-
|
224
|
+
pass
|
225
225
|
|
226
226
|
try:
|
227
227
|
|
228
228
|
put_csv_response = put_csv_to_presignedUrl(url)
|
229
229
|
if put_csv_response.status_code not in (200, 201):
|
230
|
-
|
230
|
+
logger.error('Unable to put csv to the presignedUrl')
|
231
231
|
except Exception as e:
|
232
232
|
logger.error(f"Error in put_csv_to_presignedUrl: {e}")
|
233
|
-
|
233
|
+
pass
|
234
234
|
|
235
235
|
## Upload csv to elastic
|
236
236
|
def upload_csv_to_elastic(data):
|
@@ -247,12 +247,12 @@ class Dataset:
|
|
247
247
|
timeout=Dataset.TIMEOUT,
|
248
248
|
)
|
249
249
|
if response.status_code==400:
|
250
|
-
|
250
|
+
logger.error(response.json()["message"])
|
251
251
|
response.raise_for_status()
|
252
252
|
return response.json()
|
253
253
|
except requests.exceptions.RequestException as e:
|
254
254
|
logger.error(f"Failed to upload CSV to elastic: {e}")
|
255
|
-
|
255
|
+
pass
|
256
256
|
|
257
257
|
def generate_schema(mapping):
|
258
258
|
result = {}
|
@@ -275,13 +275,13 @@ class Dataset:
|
|
275
275
|
}
|
276
276
|
upload_csv_response = upload_csv_to_elastic(data)
|
277
277
|
if not upload_csv_response['success']:
|
278
|
-
|
278
|
+
logger.error('Unable to upload csv')
|
279
279
|
else:
|
280
280
|
print(upload_csv_response['message'])
|
281
281
|
self.jobId = upload_csv_response['data']['jobId']
|
282
282
|
except Exception as e:
|
283
283
|
logger.error(f"Error in create_from_csv: {e}")
|
284
|
-
|
284
|
+
pass
|
285
285
|
|
286
286
|
def add_rows(self, csv_path, dataset_name):
|
287
287
|
"""
|
@@ -304,7 +304,6 @@ class Dataset:
|
|
304
304
|
csv_columns = df.columns.tolist()
|
305
305
|
except Exception as e:
|
306
306
|
logger.error(f"Failed to read CSV file: {e}")
|
307
|
-
raise ValueError(f"Unable to read CSV file: {e}")
|
308
307
|
|
309
308
|
# Check column compatibility
|
310
309
|
for column in existing_columns:
|
@@ -327,7 +326,7 @@ class Dataset:
|
|
327
326
|
return response.json()
|
328
327
|
except requests.exceptions.RequestException as e:
|
329
328
|
logger.error(f"Failed to get presigned URL: {e}")
|
330
|
-
|
329
|
+
pass
|
331
330
|
|
332
331
|
try:
|
333
332
|
presignedUrl = get_presignedUrl()
|
@@ -335,10 +334,10 @@ class Dataset:
|
|
335
334
|
url = presignedUrl['data']['presignedUrl']
|
336
335
|
filename = presignedUrl['data']['fileName']
|
337
336
|
else:
|
338
|
-
|
337
|
+
logger.error('Unable to fetch presignedUrl')
|
339
338
|
except Exception as e:
|
340
339
|
logger.error(f"Error in get_presignedUrl: {e}")
|
341
|
-
|
340
|
+
pass
|
342
341
|
|
343
342
|
# Upload CSV to presigned URL
|
344
343
|
def put_csv_to_presignedUrl(url):
|
@@ -358,15 +357,15 @@ class Dataset:
|
|
358
357
|
return response
|
359
358
|
except requests.exceptions.RequestException as e:
|
360
359
|
logger.error(f"Failed to put CSV to presigned URL: {e}")
|
361
|
-
|
360
|
+
pass
|
362
361
|
|
363
362
|
try:
|
364
363
|
put_csv_response = put_csv_to_presignedUrl(url)
|
365
364
|
if put_csv_response.status_code not in (200, 201):
|
366
|
-
|
365
|
+
logger.error('Unable to put csv to the presignedUrl')
|
367
366
|
except Exception as e:
|
368
367
|
logger.error(f"Error in put_csv_to_presignedUrl: {e}")
|
369
|
-
|
368
|
+
pass
|
370
369
|
|
371
370
|
# Prepare schema mapping (assuming same mapping as original dataset)
|
372
371
|
def generate_schema_mapping(dataset_name):
|
@@ -409,7 +408,7 @@ class Dataset:
|
|
409
408
|
return schema_mapping
|
410
409
|
except requests.exceptions.RequestException as e:
|
411
410
|
logger.error(f"Failed to get schema mapping: {e}")
|
412
|
-
|
411
|
+
pass
|
413
412
|
|
414
413
|
# Upload CSV to elastic
|
415
414
|
try:
|
@@ -438,7 +437,7 @@ class Dataset:
|
|
438
437
|
)
|
439
438
|
|
440
439
|
if response.status_code == 400:
|
441
|
-
|
440
|
+
logger.error(response.json().get("message", "Failed to add rows"))
|
442
441
|
|
443
442
|
response.raise_for_status()
|
444
443
|
|
@@ -448,11 +447,11 @@ class Dataset:
|
|
448
447
|
print(f"{response_data['message']}")
|
449
448
|
self.jobId = response_data['data']['jobId']
|
450
449
|
else:
|
451
|
-
|
450
|
+
logger.error(response_data.get('message', 'Failed to add rows'))
|
452
451
|
|
453
452
|
except Exception as e:
|
454
453
|
logger.error(f"Error in add_rows_to_dataset: {e}")
|
455
|
-
|
454
|
+
pass
|
456
455
|
|
457
456
|
def add_columns(self, text_fields, dataset_name, column_name, provider, model, variables={}):
|
458
457
|
"""
|
@@ -469,11 +468,11 @@ class Dataset:
|
|
469
468
|
|
470
469
|
# Validate text_fields input
|
471
470
|
if not isinstance(text_fields, list):
|
472
|
-
|
471
|
+
logger.error("text_fields must be a list of dictionaries")
|
473
472
|
|
474
473
|
for field in text_fields:
|
475
474
|
if not isinstance(field, dict) or 'role' not in field or 'content' not in field:
|
476
|
-
|
475
|
+
logger.error("Each text field must be a dictionary with 'role' and 'content' keys")
|
477
476
|
|
478
477
|
# First, get the dataset ID
|
479
478
|
headers = {
|
@@ -498,7 +497,7 @@ class Dataset:
|
|
498
497
|
dataset_id = next((dataset["id"] for dataset in datasets if dataset["name"] == dataset_name), None)
|
499
498
|
|
500
499
|
if dataset_id is None:
|
501
|
-
|
500
|
+
logger.error(f"Dataset {dataset_name} not found")
|
502
501
|
|
503
502
|
|
504
503
|
|
@@ -551,7 +550,7 @@ class Dataset:
|
|
551
550
|
elif param_type == "string":
|
552
551
|
value = str(value) # Ensure value is converted to string
|
553
552
|
else:
|
554
|
-
|
553
|
+
logger.error(f"Unsupported parameter type: {param_type}") # Handle unsupported types
|
555
554
|
|
556
555
|
formatted_param = {
|
557
556
|
"name": param.get('name'),
|
@@ -607,11 +606,11 @@ class Dataset:
|
|
607
606
|
print(f"Column '{column_name}' added successfully to dataset '{dataset_name}'")
|
608
607
|
self.jobId = response_data['data']['jobId']
|
609
608
|
else:
|
610
|
-
|
609
|
+
logger.error(response_data.get('message', 'Failed to add column'))
|
611
610
|
|
612
611
|
except requests.exceptions.RequestException as e:
|
613
|
-
|
614
|
-
|
612
|
+
logger.error(f"Error adding column: {e}")
|
613
|
+
pass
|
615
614
|
|
616
615
|
def get_status(self):
|
617
616
|
headers = {
|
@@ -683,7 +682,7 @@ class Dataset:
|
|
683
682
|
self.create_from_csv(tmp_csv_path, dataset_name, schema_mapping)
|
684
683
|
except (IOError, UnicodeError) as e:
|
685
684
|
logger.error(f"Error converting JSONL to CSV: {e}")
|
686
|
-
|
685
|
+
pass
|
687
686
|
finally:
|
688
687
|
if os.path.exists(tmp_csv_path):
|
689
688
|
try:
|
@@ -698,7 +697,6 @@ class Dataset:
|
|
698
697
|
self.add_rows(tmp_csv_path, dataset_name)
|
699
698
|
except (IOError, UnicodeError) as e:
|
700
699
|
logger.error(f"Error converting JSONL to CSV: {e}")
|
701
|
-
raise
|
702
700
|
finally:
|
703
701
|
if os.path.exists(tmp_csv_path):
|
704
702
|
try:
|
@@ -713,7 +711,7 @@ class Dataset:
|
|
713
711
|
self.create_from_csv(tmp_csv_path, dataset_name, schema_mapping)
|
714
712
|
except (IOError, UnicodeError) as e:
|
715
713
|
logger.error(f"Error converting DataFrame to CSV: {e}")
|
716
|
-
|
714
|
+
pass
|
717
715
|
finally:
|
718
716
|
if os.path.exists(tmp_csv_path):
|
719
717
|
try:
|
@@ -728,7 +726,7 @@ class Dataset:
|
|
728
726
|
self.add_rows(tmp_csv_path, dataset_name)
|
729
727
|
except (IOError, UnicodeError) as e:
|
730
728
|
logger.error(f"Error converting DataFrame to CSV: {e}")
|
731
|
-
|
729
|
+
pass
|
732
730
|
finally:
|
733
731
|
if os.path.exists(tmp_csv_path):
|
734
732
|
try:
|
ragaai_catalyst/evaluation.py
CHANGED
@@ -38,7 +38,7 @@ class Evaluation:
|
|
38
38
|
project["name"] for project in response.json()["data"]["content"]
|
39
39
|
]
|
40
40
|
if project_name not in project_list:
|
41
|
-
|
41
|
+
logger.error("Project not found. Please enter a valid project name")
|
42
42
|
|
43
43
|
self.project_id = [
|
44
44
|
project["id"] for project in response.json()["data"]["content"] if project["name"] == project_name
|
@@ -46,7 +46,7 @@ class Evaluation:
|
|
46
46
|
|
47
47
|
except requests.exceptions.RequestException as e:
|
48
48
|
logger.error(f"Failed to retrieve projects list: {e}")
|
49
|
-
|
49
|
+
pass
|
50
50
|
|
51
51
|
try:
|
52
52
|
|
@@ -68,14 +68,13 @@ class Evaluation:
|
|
68
68
|
dataset_list = [dataset["name"] for dataset in datasets_content]
|
69
69
|
|
70
70
|
if dataset_name not in dataset_list:
|
71
|
-
|
71
|
+
logger.error("Dataset not found. Please enter a valid dataset name")
|
72
72
|
|
73
73
|
self.dataset_id = [dataset["id"] for dataset in datasets_content if dataset["name"]==dataset_name][0]
|
74
74
|
|
75
75
|
except requests.exceptions.RequestException as e:
|
76
76
|
logger.error(f"Failed to retrieve dataset list: {e}")
|
77
|
-
|
78
|
-
|
77
|
+
pass
|
79
78
|
|
80
79
|
def list_metrics(self):
|
81
80
|
headers = {
|
@@ -126,8 +125,7 @@ class Evaluation:
|
|
126
125
|
return dataset["derivedDatasetId"]
|
127
126
|
except requests.exceptions.RequestException as e:
|
128
127
|
logger.error(f"Failed to retrieve dataset list: {e}")
|
129
|
-
|
130
|
-
|
128
|
+
pass
|
131
129
|
|
132
130
|
def _get_dataset_schema(self, metric_to_evaluate=None):
|
133
131
|
#this dataset_id is based on which type of metric_to_evaluate
|
@@ -175,11 +173,11 @@ class Evaluation:
|
|
175
173
|
if key in user_dataset_columns:
|
176
174
|
variableName=key
|
177
175
|
else:
|
178
|
-
|
176
|
+
logger.error(f"Column '{key}' is not present in '{self.dataset_name}' dataset")
|
179
177
|
if variableName:
|
180
178
|
return variableName
|
181
179
|
else:
|
182
|
-
|
180
|
+
logger.error(f"Map '{schemaName}' column in schema_mapping for {metric_name} metric evaluation")
|
183
181
|
|
184
182
|
|
185
183
|
def _get_mapping(self, metric_name, metrics_schema, schema_mapping):
|
@@ -257,11 +255,11 @@ class Evaluation:
|
|
257
255
|
for key, value in metric["config"].items():
|
258
256
|
#checking if provider is one of the allowed providers
|
259
257
|
if key.lower()=="provider" and value.lower() not in sub_providers:
|
260
|
-
|
258
|
+
logger.error("Enter a valid provider name. The following Provider names are supported: openai, azure, gemini, groq, anthropic, bedrock")
|
261
259
|
|
262
260
|
if key.lower()=="threshold":
|
263
261
|
if len(value)>1:
|
264
|
-
|
262
|
+
logger.error("'threshold' can only take one argument gte/lte/eq")
|
265
263
|
else:
|
266
264
|
for key_thres, value_thres in value.items():
|
267
265
|
base_json["metricSpec"]["config"]["params"][key] = {f"{key_thres}":value_thres}
|
@@ -314,18 +312,18 @@ class Evaluation:
|
|
314
312
|
for metric in metrics:
|
315
313
|
missing_keys = required_keys - metric.keys()
|
316
314
|
if missing_keys:
|
317
|
-
|
315
|
+
logger.error(f"{missing_keys} required for each metric evaluation.")
|
318
316
|
|
319
317
|
executed_metric_list = self._get_executed_metrics_list()
|
320
318
|
metrics_name = self.list_metrics()
|
321
319
|
user_metric_names = [metric["name"] for metric in metrics]
|
322
320
|
for user_metric in user_metric_names:
|
323
321
|
if user_metric not in metrics_name:
|
324
|
-
|
322
|
+
logger.error("Enter a valid metric name")
|
325
323
|
column_names = [metric["column_name"] for metric in metrics]
|
326
324
|
for column_name in column_names:
|
327
325
|
if column_name in executed_metric_list:
|
328
|
-
|
326
|
+
logger.error(f"Column name '{column_name}' already exists.")
|
329
327
|
|
330
328
|
headers = {
|
331
329
|
'Content-Type': 'application/json',
|
@@ -341,7 +339,7 @@ class Evaluation:
|
|
341
339
|
timeout=self.timeout
|
342
340
|
)
|
343
341
|
if response.status_code == 400:
|
344
|
-
|
342
|
+
logger.error(response.json()["message"])
|
345
343
|
response.raise_for_status()
|
346
344
|
if response.json()["success"]:
|
347
345
|
print(response.json()["message"])
|
@@ -360,7 +358,7 @@ class Evaluation:
|
|
360
358
|
|
361
359
|
def append_metrics(self, display_name):
|
362
360
|
if not isinstance(display_name, str):
|
363
|
-
|
361
|
+
logger.error("display_name should be a string")
|
364
362
|
|
365
363
|
headers = {
|
366
364
|
"Authorization": f"Bearer {os.getenv('RAGAAI_CATALYST_TOKEN')}",
|
@@ -387,7 +385,7 @@ class Evaluation:
|
|
387
385
|
data=payload,
|
388
386
|
timeout=self.timeout)
|
389
387
|
if response.status_code == 400:
|
390
|
-
|
388
|
+
logger.error(response.json()["message"])
|
391
389
|
response.raise_for_status()
|
392
390
|
if response.json()["success"]:
|
393
391
|
print(response.json()["message"])
|
@@ -24,18 +24,18 @@ class GuardExecutor:
|
|
24
24
|
if input_deployment_id and output_deployment_id:
|
25
25
|
# check if 2 deployments are mapped to same dataset
|
26
26
|
if self.input_deployment_details['data']['datasetId'] != self.output_deployment_details['data']['datasetId']:
|
27
|
-
|
27
|
+
logger.error('Input deployment and output deployment should be mapped to same dataset')
|
28
28
|
for guardrail in self.input_deployment_details['data']['guardrailsResponse']:
|
29
29
|
maps = guardrail['metricSpec']['config']['mappings']
|
30
30
|
for _map in maps:
|
31
31
|
if _map['schemaName']=='Response':
|
32
|
-
|
32
|
+
logger.error('Response field should be mapped only in output guardrails')
|
33
33
|
except Exception as e:
|
34
|
-
|
34
|
+
logger.error(str(e))
|
35
35
|
self.base_url = guard_manager.base_url
|
36
36
|
for key in field_map.keys():
|
37
37
|
if key not in ['prompt','context','response','instruction']:
|
38
|
-
|
38
|
+
logger.error('Keys in field map should be in ["prompt","context","response","instruction"]')
|
39
39
|
self.current_trace_id = None
|
40
40
|
self.id_2_doc = {}
|
41
41
|
|
@@ -52,10 +52,10 @@ class GuardExecutor:
|
|
52
52
|
try:
|
53
53
|
response = requests.request("POST", api, headers=headers, data=payload,timeout=self.guard_manager.timeout)
|
54
54
|
except Exception as e:
|
55
|
-
|
55
|
+
logger.error('Failed running guardrail: ',str(e))
|
56
56
|
return None
|
57
57
|
if response.status_code!=200:
|
58
|
-
|
58
|
+
logger.error('Error in running deployment ',response.json()['message'])
|
59
59
|
if response.json()['success']:
|
60
60
|
return response.json()
|
61
61
|
else:
|
@@ -152,7 +152,7 @@ class GuardExecutor:
|
|
152
152
|
for key in self.field_map:
|
153
153
|
if key not in ['prompt', 'response']:
|
154
154
|
if self.field_map[key] not in prompt_params:
|
155
|
-
|
155
|
+
logger.error(f'{key} added as field map but not passed as prompt parameter')
|
156
156
|
context_var = self.field_map.get('context', None)
|
157
157
|
|
158
158
|
doc = dict()
|
@@ -177,7 +177,7 @@ class GuardExecutor:
|
|
177
177
|
def execute_output_guardrails(self, llm_response: str, prompt=None, prompt_params=None) -> None:
|
178
178
|
if not prompt: # user has not passed input
|
179
179
|
if self.current_trace_id not in self.id_2_doc:
|
180
|
-
|
180
|
+
logger.error(f'No input doc found for trace_id: {self.current_trace_id}')
|
181
181
|
else:
|
182
182
|
doc = self.id_2_doc[self.current_trace_id]
|
183
183
|
doc['response'] = llm_response
|
@@ -76,12 +76,12 @@ def _fetch_dataset_code_hashes(project_name, dataset_name, base_url=None, timeou
|
|
76
76
|
if response.status_code == 200:
|
77
77
|
return response.json()["data"]["codeHashes"]
|
78
78
|
else:
|
79
|
-
|
79
|
+
logger.error(
|
80
80
|
f"Failed to fetch code hashes: {response.json()['message']}"
|
81
81
|
)
|
82
82
|
except requests.exceptions.RequestException as e:
|
83
83
|
logger.error(f"Failed to list datasets: {e}")
|
84
|
-
|
84
|
+
pass
|
85
85
|
|
86
86
|
|
87
87
|
def update_presigned_url(presigned_url, base_url):
|
@@ -164,19 +164,13 @@ def _fetch_presigned_url(project_name, dataset_name, base_url=None, timeout=120)
|
|
164
164
|
logger.error(
|
165
165
|
f"Failed to fetch code hashes: {response.json()['message']}"
|
166
166
|
)
|
167
|
-
raise Exception(
|
168
|
-
f"Failed to fetch code hashes: {response.json()['message']}"
|
169
|
-
)
|
170
167
|
else:
|
171
168
|
logger.error(
|
172
169
|
f"Failed to fetch code hashes: {response.json()['message']}"
|
173
170
|
)
|
174
|
-
raise Exception(
|
175
|
-
f"Failed to fetch code hashes: {response.json()['message']}"
|
176
|
-
)
|
177
171
|
except requests.exceptions.RequestException as e:
|
178
172
|
logger.error(f"Failed to list datasets: {e}")
|
179
|
-
|
173
|
+
pass
|
180
174
|
|
181
175
|
|
182
176
|
def _put_zip_presigned_url(project_name, presignedUrl, filename, timeout=120):
|
@@ -252,9 +246,11 @@ def _insert_code(
|
|
252
246
|
if response.status_code == 200:
|
253
247
|
return response.json()["message"]
|
254
248
|
else:
|
255
|
-
|
249
|
+
logger.error(f"Failed to insert code: {response.json()['message']}")
|
250
|
+
pass
|
256
251
|
else:
|
257
|
-
|
252
|
+
logger.error(f"Failed to insert code: {response.json()['message']}")
|
253
|
+
pass
|
258
254
|
except requests.exceptions.RequestException as e:
|
259
255
|
logger.error(f"Failed to insert code: {e}")
|
260
|
-
|
256
|
+
pass
|
@@ -1,4 +1,7 @@
|
|
1
1
|
import requests
|
2
|
+
import logging
|
3
|
+
|
4
|
+
logger = logging.getLogger(__name__)
|
2
5
|
|
3
6
|
def fetch_analysis_trace(base_url, trace_id):
|
4
7
|
"""
|
@@ -14,5 +17,5 @@ def fetch_analysis_trace(base_url, trace_id):
|
|
14
17
|
response.raise_for_status() # Raise an error for bad responses (4xx, 5xx)
|
15
18
|
return response.json()
|
16
19
|
except requests.exceptions.RequestException as e:
|
17
|
-
|
20
|
+
logger.error(f"Error fetching analysis trace: {e}")
|
18
21
|
return None
|
@@ -297,7 +297,7 @@ def num_tokens_from_messages(
|
|
297
297
|
response_message=response_message,
|
298
298
|
)
|
299
299
|
else:
|
300
|
-
|
300
|
+
logger.error(
|
301
301
|
f"""num_tokens_from_messages() is not implemented for model {model}."""
|
302
302
|
)
|
303
303
|
|
@@ -653,4 +653,5 @@ def count_tokens(input_str: str) -> int:
|
|
653
653
|
tokens = encoding.encode(input_str)
|
654
654
|
return len(tokens)
|
655
655
|
except Exception:
|
656
|
-
|
656
|
+
logger.error("Failed to count tokens")
|
657
|
+
return 0
|
@@ -79,10 +79,12 @@ class SpanAttributes:
|
|
79
79
|
|
80
80
|
for metric in metrics:
|
81
81
|
if not isinstance(metric, dict):
|
82
|
-
|
82
|
+
logger.error(f"Expected dict, got {type(metric)}")
|
83
|
+
continue
|
83
84
|
|
84
85
|
if "name" not in metric:
|
85
|
-
|
86
|
+
logger.error("Metric must contain 'name'")
|
87
|
+
continue
|
86
88
|
|
87
89
|
metric_name = metric["name"]
|
88
90
|
if metric_name in self.local_metrics:
|
@@ -111,7 +113,8 @@ class SpanAttributes:
|
|
111
113
|
|
112
114
|
def add_gt(self, gt: Any):
|
113
115
|
if not isinstance(gt, (str, int, float, bool, list, dict)):
|
114
|
-
|
116
|
+
logger.error(f"Unsupported type for gt: {type(gt)}")
|
117
|
+
return
|
115
118
|
if self.gt:
|
116
119
|
logger.warning(f"GT already exists: {self.gt} \n Overwriting...")
|
117
120
|
self.gt = gt
|
@@ -16,6 +16,9 @@ import asyncio
|
|
16
16
|
from .tracer import Tracer
|
17
17
|
from ..ragaai_catalyst import RagaAICatalyst
|
18
18
|
|
19
|
+
import logging
|
20
|
+
logger = logging.getLogger(__name__)
|
21
|
+
|
19
22
|
# Global state
|
20
23
|
_global_tracer: Optional[Tracer] = None
|
21
24
|
_global_catalyst: Optional[RagaAICatalyst] = None
|
@@ -60,9 +63,9 @@ def init_tracing(
|
|
60
63
|
_global_tracer = tracer
|
61
64
|
_global_catalyst = catalyst
|
62
65
|
else:
|
63
|
-
|
66
|
+
logger.error("Both Tracer and Catalyst objects must be instances of Tracer and RagaAICatalyst, respectively.")
|
64
67
|
else:
|
65
|
-
|
68
|
+
logger.error("Both Tracer and Catalyst objects must be provided.")
|
66
69
|
|
67
70
|
|
68
71
|
def trace_agent(name: str = None, agent_type: str = "generic", version: str = "1.0.0", **kwargs):
|
@@ -295,6 +298,6 @@ def current_span():
|
|
295
298
|
# Finally fall back to agent context
|
296
299
|
agent_name = tracer.current_agent_name.get()
|
297
300
|
if not agent_name:
|
298
|
-
|
301
|
+
logger.error("No active span found. Make sure you're calling this within a traced function.")
|
299
302
|
|
300
303
|
return tracer.span(agent_name)
|
@@ -76,14 +76,14 @@ class DynamicTraceExporter(SpanExporter):
|
|
76
76
|
# Update the exporter's properties
|
77
77
|
self._update_exporter_properties()
|
78
78
|
except Exception as e:
|
79
|
-
|
79
|
+
logger.error(f"Error updating exporter properties: {e}")
|
80
80
|
|
81
81
|
try:
|
82
82
|
# Forward the call to the underlying exporter
|
83
83
|
result = self._exporter.export(spans)
|
84
84
|
return result
|
85
85
|
except Exception as e:
|
86
|
-
|
86
|
+
logger.error(f"Error exporting trace: {e}")
|
87
87
|
|
88
88
|
|
89
89
|
|
@@ -96,13 +96,13 @@ class DynamicTraceExporter(SpanExporter):
|
|
96
96
|
# Update the exporter's properties
|
97
97
|
self._update_exporter_properties()
|
98
98
|
except Exception as e:
|
99
|
-
|
99
|
+
logger.error(f"Error updating exporter properties: {e}")
|
100
100
|
|
101
101
|
try:
|
102
102
|
# Forward the call to the underlying exporter
|
103
103
|
return self._exporter.shutdown()
|
104
104
|
except Exception as e:
|
105
|
-
|
105
|
+
logger.error(f"Error shutting down exporter: {e}")
|
106
106
|
|
107
107
|
def _update_exporter_properties(self):
|
108
108
|
"""
|