ragaai-catalyst 2.0.6b0__py3-none-any.whl → 2.0.6b1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ragaai_catalyst/__init__.py +1 -0
- ragaai_catalyst/guard_executor.py +97 -0
- ragaai_catalyst/guardrails_manager.py +39 -13
- ragaai_catalyst/synthetic_data_generation.py +6 -0
- {ragaai_catalyst-2.0.6b0.dist-info → ragaai_catalyst-2.0.6b1.dist-info}/METADATA +1 -1
- {ragaai_catalyst-2.0.6b0.dist-info → ragaai_catalyst-2.0.6b1.dist-info}/RECORD +8 -7
- {ragaai_catalyst-2.0.6b0.dist-info → ragaai_catalyst-2.0.6b1.dist-info}/WHEEL +1 -1
- {ragaai_catalyst-2.0.6b0.dist-info → ragaai_catalyst-2.0.6b1.dist-info}/top_level.txt +0 -0
ragaai_catalyst/__init__.py
CHANGED
@@ -7,6 +7,7 @@ from .prompt_manager import PromptManager
|
|
7
7
|
from .evaluation import Evaluation
|
8
8
|
from .synthetic_data_generation import SyntheticDataGeneration
|
9
9
|
from .guardrails_manager import GuardrailsManager
|
10
|
+
from .guard_executor import GuardExecutor
|
10
11
|
|
11
12
|
|
12
13
|
__all__ = ["Experiment", "RagaAICatalyst", "Tracer", "PromptManager", "Evaluation","SyntheticDataGeneration", "GuardrailsManager"]
|
@@ -0,0 +1,97 @@
|
|
1
|
+
import litellm
|
2
|
+
import json
|
3
|
+
import requests
|
4
|
+
import os
|
5
|
+
import logging
|
6
|
+
logger = logging.getLogger('LiteLLM')
|
7
|
+
logger.setLevel(logging.ERROR)
|
8
|
+
|
9
|
+
class GuardExecutor:
|
10
|
+
|
11
|
+
def __init__(self,id,guard_manager,field_map={}):
|
12
|
+
self.deployment_id = id
|
13
|
+
self.field_map = field_map
|
14
|
+
self.guard_manager = guard_manager
|
15
|
+
self.deployment_details = self.guard_manager.get_deployment(id)
|
16
|
+
if not self.deployment_details:
|
17
|
+
raise ValueError('Error in getting deployment details')
|
18
|
+
self.base_url = guard_manager.base_url
|
19
|
+
for key in field_map.keys():
|
20
|
+
if key not in ['prompt','context','response','instruction']:
|
21
|
+
print('Keys in field map should be in ["prompt","context","response","instruction"]')
|
22
|
+
|
23
|
+
def execute_deployment(self,payload):
|
24
|
+
api = self.base_url + f'/guardrail/deployment/{self.deployment_id}/ingest'
|
25
|
+
|
26
|
+
payload = json.dumps(payload)
|
27
|
+
headers = {
|
28
|
+
'x-project-id': str(self.guard_manager.project_id),
|
29
|
+
'Content-Type': 'application/json',
|
30
|
+
'Authorization': f'Bearer {os.getenv("RAGAAI_CATALYST_TOKEN")}'
|
31
|
+
}
|
32
|
+
try:
|
33
|
+
response = requests.request("POST", api, headers=headers, data=payload,timeout=self.guard_manager.timeout)
|
34
|
+
except Exception as e:
|
35
|
+
print('Failed running guardrail: ',str(e))
|
36
|
+
return None
|
37
|
+
if response.status_code!=200:
|
38
|
+
print('Error in running deployment ',response.json()['message'])
|
39
|
+
if response.json()['success']:
|
40
|
+
return response.json()
|
41
|
+
else:
|
42
|
+
print(response.json()['message'])
|
43
|
+
return None
|
44
|
+
|
45
|
+
def llm_executor(self,messages,model_params,llm_caller):
|
46
|
+
if llm_caller == 'litellm':
|
47
|
+
model_params['messages'] = messages
|
48
|
+
response = litellm.completion(**model_params)
|
49
|
+
return response
|
50
|
+
else:
|
51
|
+
print(f"{llm_caller} not supported currently, use litellm as llm caller")
|
52
|
+
|
53
|
+
|
54
|
+
def __call__(self,messages,prompt_params,model_params,llm_caller='litellm'):
|
55
|
+
for key in self.field_map:
|
56
|
+
if key not in ['prompt','response']:
|
57
|
+
if self.field_map[key] not in prompt_params:
|
58
|
+
raise ValueError(f'{key} added as field map but not passed as prompt parameter')
|
59
|
+
context_var = self.field_map.get('context',None)
|
60
|
+
prompt = None
|
61
|
+
for msg in messages:
|
62
|
+
if 'role' in msg:
|
63
|
+
if msg['role'] == 'user':
|
64
|
+
prompt = msg['content']
|
65
|
+
if not context_var:
|
66
|
+
msg['content'] += '\n' + prompt_params[context_var]
|
67
|
+
doc = dict()
|
68
|
+
doc['prompt'] = prompt
|
69
|
+
doc['context'] = prompt_params[context_var]
|
70
|
+
|
71
|
+
# inactive the guardrails that needs Response variable
|
72
|
+
#deployment_response = self.execute_deployment(doc)
|
73
|
+
|
74
|
+
# activate only guardrails that require response
|
75
|
+
try:
|
76
|
+
llm_response = self.llm_executor(messages,model_params,llm_caller)
|
77
|
+
except Exception as e:
|
78
|
+
print('Error in running llm:',str(e))
|
79
|
+
return None
|
80
|
+
doc['response'] = llm_response['choices'][0].message.content
|
81
|
+
if 'instruction' in self.field_map:
|
82
|
+
instruction = prompt_params[self.field_map['instruction']]
|
83
|
+
doc['instruction'] = instruction
|
84
|
+
response = self.execute_deployment(doc)
|
85
|
+
if response and response['data']['status'] == 'FAIL':
|
86
|
+
print('Guardrail deployment run retured failed status, replacing with alternate response')
|
87
|
+
return response['data']['alternateResponse'],llm_response,response
|
88
|
+
else:
|
89
|
+
return None,llm_response,response
|
90
|
+
|
91
|
+
|
92
|
+
|
93
|
+
|
94
|
+
|
95
|
+
|
96
|
+
|
97
|
+
|
@@ -68,10 +68,11 @@ class GuardrailsManager:
|
|
68
68
|
'X-Project-Id': str(self.project_id)
|
69
69
|
}
|
70
70
|
response = requests.request("GET", f"{self.base_url}/guardrail/deployment/{deployment_id}", headers=headers, data=payload, timeout=self.timeout)
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
71
|
+
if response.json()['success']:
|
72
|
+
return response.json()
|
73
|
+
else:
|
74
|
+
print('Error in retrieving deployment details:',response.json()['message'])
|
75
|
+
return None
|
75
76
|
|
76
77
|
|
77
78
|
def list_guardrails(self):
|
@@ -132,11 +133,12 @@ class GuardrailsManager:
|
|
132
133
|
print(response.json()["message"])
|
133
134
|
deployment_ids = self.list_deployment_ids()
|
134
135
|
self.deployment_id = [_["id"] for _ in deployment_ids if _["name"]==self.deployment_name][0]
|
136
|
+
return self.deployment_id
|
135
137
|
else:
|
136
138
|
print(response)
|
137
139
|
|
138
140
|
|
139
|
-
def add_guardrails(self, guardrails, guardrails_config={}):
|
141
|
+
def add_guardrails(self, deployment_id, guardrails, guardrails_config={}):
|
140
142
|
"""
|
141
143
|
Add guardrails to the current deployment.
|
142
144
|
|
@@ -145,16 +147,21 @@ class GuardrailsManager:
|
|
145
147
|
:raises ValueError: If a guardrail name or type is invalid.
|
146
148
|
"""
|
147
149
|
# Checking if guardrails names given already exist or not
|
148
|
-
|
150
|
+
self.deployment_id = deployment_id
|
151
|
+
deployment_details = self.get_deployment(self.deployment_id)
|
152
|
+
if not deployment_details:
|
153
|
+
return None
|
154
|
+
deployment_id_name = deployment_details["data"]["name"]
|
155
|
+
deployment_id_guardrails = deployment_details["data"]["guardrailsResponse"]
|
156
|
+
guardrails_type_name_exists = [{_['metricSpec']["name"]:_['metricSpec']["displayName"]} for _ in deployment_id_guardrails]
|
149
157
|
guardrails_type_name_exists = [list(d.values())[0] for d in guardrails_type_name_exists]
|
150
158
|
user_guardrails_name_list = [_["name"] for _ in guardrails]
|
151
159
|
for g_name in user_guardrails_name_list:
|
152
160
|
if g_name in guardrails_type_name_exists:
|
153
161
|
raise ValueError(f"Guardrail with '{g_name} already exists, choose a unique name'")
|
154
|
-
|
155
162
|
# Checking if guardrails type is correct or not
|
156
163
|
available_guardrails_list = self.list_guardrails()
|
157
|
-
user_guardrails_type_list = [_["
|
164
|
+
user_guardrails_type_list = [_["name"] for _ in guardrails]
|
158
165
|
for g_type in user_guardrails_type_list:
|
159
166
|
if g_type not in available_guardrails_list:
|
160
167
|
raise ValueError(f"Guardrail type '{g_type} does not exists, choose a correct type'")
|
@@ -170,6 +177,8 @@ class GuardrailsManager:
|
|
170
177
|
response = requests.request("POST", f"{self.base_url}/guardrail/deployment/{str(self.deployment_id)}/configure", headers=headers, data=payload)
|
171
178
|
if response.json()["success"]:
|
172
179
|
print(response.json()["message"])
|
180
|
+
else:
|
181
|
+
print('Error updating guardrail ',response.json()['message'])
|
173
182
|
|
174
183
|
def _get_guardrail_config_payload(self, guardrails_config):
|
175
184
|
"""
|
@@ -209,13 +218,30 @@ class GuardrailsManager:
|
|
209
218
|
:param guardrail: A dictionary containing the guardrail's attributes.
|
210
219
|
:return: A dictionary representing the guardrail's data.
|
211
220
|
"""
|
221
|
+
if 'config' in guardrail:
|
222
|
+
if 'mappings' in guardrail.get('config'):
|
223
|
+
for mapping in guardrail.get('config',{}).get('mappings',{}):
|
224
|
+
if mapping['schemaName'] not in ['Text','Prompt','Context','Response']:
|
225
|
+
raise(ValueError('Invalid schemaName in guardrail mapping schema'))
|
226
|
+
if mapping['variableName'] not in ['Instruction','Prompt','Context','Response']:
|
227
|
+
raise(ValueError('Invalid variableName in guardrail mapping schema'))
|
228
|
+
if 'model' in guardrail.get('config'):
|
229
|
+
if guardrail.get('config',{}).get('model','') not in ['gpt-4o-mini','gpt-4o','gpt-4-turbo']:
|
230
|
+
raise(ValueError('Invalid model name in guardrail model schema'))
|
231
|
+
if 'params' not in guardrail.get('config'):
|
232
|
+
guardrail['config']['params'] = {
|
233
|
+
"isActive": {"value": False},
|
234
|
+
"isHighRisk": {"value": False},
|
235
|
+
"threshold": {"lt": 1}
|
236
|
+
}
|
237
|
+
|
238
|
+
|
212
239
|
data = {
|
240
|
+
"displayName": guardrail["displayName"],
|
213
241
|
"name": guardrail["name"],
|
214
|
-
"
|
215
|
-
"isHighRisk": guardrail.get("isHighRisk", False),
|
216
|
-
"isActive": guardrail.get("isActive", False),
|
217
|
-
"threshold": {}
|
242
|
+
"config": guardrail.get("config", {})
|
218
243
|
}
|
244
|
+
'''
|
219
245
|
if "lte" in guardrail["threshold"]:
|
220
246
|
data["threshold"]["lte"] = guardrail["threshold"]["lte"]
|
221
247
|
elif "gte" in guardrail["threshold"]:
|
@@ -223,7 +249,7 @@ class GuardrailsManager:
|
|
223
249
|
elif "eq" in guardrail["threshold"]:
|
224
250
|
data["threshold"]["eq"] = guardrail["threshold"]["eq"]
|
225
251
|
else:
|
226
|
-
data["threshold"]["gte"] = 0.0
|
252
|
+
data["threshold"]["gte"] = 0.0'''
|
227
253
|
return data
|
228
254
|
|
229
255
|
|
@@ -141,6 +141,9 @@ class SyntheticDataGeneration:
|
|
141
141
|
|
142
142
|
def _initialize_client(self, provider, api_key, api_base=None, internal_llm_proxy=None):
|
143
143
|
"""Initialize the appropriate client based on provider."""
|
144
|
+
if not provider:
|
145
|
+
raise ValueError("Model configuration must be provided with a valid provider and model.")
|
146
|
+
|
144
147
|
if provider == "groq":
|
145
148
|
if api_key is None and os.getenv("GROQ_API_KEY") is None:
|
146
149
|
raise ValueError("API key must be provided for Groq.")
|
@@ -155,6 +158,9 @@ class SyntheticDataGeneration:
|
|
155
158
|
if api_key is None and os.getenv("OPENAI_API_KEY") is None and internal_llm_proxy is None:
|
156
159
|
raise ValueError("API key must be provided for OpenAI.")
|
157
160
|
openai.api_key = api_key or os.getenv("OPENAI_API_KEY")
|
161
|
+
|
162
|
+
else:
|
163
|
+
raise ValueError(f"Provider is not recognized.")
|
158
164
|
|
159
165
|
def _generate_batch_response(self, text, system_message, provider, model_config, api_key, api_base):
|
160
166
|
"""Generate a batch of responses using the specified provider."""
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: ragaai_catalyst
|
3
|
-
Version: 2.0.
|
3
|
+
Version: 2.0.6b1
|
4
4
|
Summary: RAGA AI CATALYST
|
5
5
|
Author-email: Kiran Scaria <kiran.scaria@raga.ai>, Kedar Gaikwad <kedar.gaikwad@raga.ai>, Dushyant Mahajan <dushyant.mahajan@raga.ai>, Siddhartha Kosti <siddhartha.kosti@raga.ai>, Ritika Goel <ritika.goel@raga.ai>, Vijay Chaurasia <vijay.chaurasia@raga.ai>
|
6
6
|
Requires-Python: >=3.9
|
@@ -1,14 +1,15 @@
|
|
1
|
-
ragaai_catalyst/__init__.py,sha256=
|
1
|
+
ragaai_catalyst/__init__.py,sha256=BdIJ_UUre0uEnRTsLw_hE0C0muWk6XWNZqdVOel22R4,537
|
2
2
|
ragaai_catalyst/_version.py,sha256=JKt9KaVNOMVeGs8ojO6LvIZr7ZkMzNN-gCcvryy4x8E,460
|
3
3
|
ragaai_catalyst/dataset.py,sha256=bDNZkcji22sg-zJqMHEwueTO8A2f_GJu70WcEHESwQk,10729
|
4
4
|
ragaai_catalyst/evaluation.py,sha256=ZS5G5RjmATjljQhAKYCrDXW2mUNXscpRRoL8cseDjAA,20283
|
5
5
|
ragaai_catalyst/experiment.py,sha256=8KvqgJg5JVnt9ghhGDJvdb4mN7ETBX_E5gNxBT0Nsn8,19010
|
6
|
-
ragaai_catalyst/
|
6
|
+
ragaai_catalyst/guard_executor.py,sha256=llPbE3DyVtrybojXknzBZj8-dtUrGBQwi9-ZiPJxGRo,3762
|
7
|
+
ragaai_catalyst/guardrails_manager.py,sha256=EsuQuZOPraLzZMJD502B1-wb642Sm3vqxWM4Nv9Q8Jc,11892
|
7
8
|
ragaai_catalyst/internal_api_completion.py,sha256=51YwXcas5NviC1wjr8EX5Y6BOyTbJ4FlKHM8gE46Wtk,2916
|
8
9
|
ragaai_catalyst/prompt_manager.py,sha256=ZMIHrmsnPMq20YfeNxWXLtrxnJyMcxpeJ8Uya7S5dUA,16411
|
9
10
|
ragaai_catalyst/proxy_call.py,sha256=CHxldeceZUaLU-to_hs_Kf1z_b2vHMssLS_cOBedu78,5499
|
10
11
|
ragaai_catalyst/ragaai_catalyst.py,sha256=5Q1VCE7P33DtjaOtVGRUgBL8dpDL9kjisWGIkOyX4nE,17426
|
11
|
-
ragaai_catalyst/synthetic_data_generation.py,sha256=
|
12
|
+
ragaai_catalyst/synthetic_data_generation.py,sha256=957UYz58uX13i8vn24rzZief5FgtfOEnEH7S8VtXtVw,19157
|
12
13
|
ragaai_catalyst/utils.py,sha256=TlhEFwLyRU690HvANbyoRycR3nQ67lxVUQoUOfTPYQ0,3772
|
13
14
|
ragaai_catalyst/tracers/__init__.py,sha256=NppmJhD3sQ5R1q6teaZLS7rULj08Gb6JT8XiPRIe_B0,49
|
14
15
|
ragaai_catalyst/tracers/llamaindex_callback.py,sha256=vPE7MieKjfwLrLUnnPs20Df0xNYqoCCj-Mt2NbiuiKU,14023
|
@@ -22,7 +23,7 @@ ragaai_catalyst/tracers/instrumentators/llamaindex.py,sha256=SMrRlR4xM7k9HK43hak
|
|
22
23
|
ragaai_catalyst/tracers/instrumentators/openai.py,sha256=14R4KW9wQCR1xysLfsP_nxS7cqXrTPoD8En4MBAaZUU,379
|
23
24
|
ragaai_catalyst/tracers/utils/__init__.py,sha256=KeMaZtYaTojilpLv65qH08QmpYclfpacDA0U3wg6Ybw,64
|
24
25
|
ragaai_catalyst/tracers/utils/utils.py,sha256=ViygfJ7vZ7U0CTSA1lbxVloHp4NSlmfDzBRNCJuMhis,2374
|
25
|
-
ragaai_catalyst-2.0.
|
26
|
-
ragaai_catalyst-2.0.
|
27
|
-
ragaai_catalyst-2.0.
|
28
|
-
ragaai_catalyst-2.0.
|
26
|
+
ragaai_catalyst-2.0.6b1.dist-info/METADATA,sha256=jDHkbahJ8JAcsVi0su6KtWbQoPgdZt07jxYxgT4LXDM,8525
|
27
|
+
ragaai_catalyst-2.0.6b1.dist-info/WHEEL,sha256=R06PA3UVYHThwHvxuRWMqaGcr-PuniXahwjmQRFMEkY,91
|
28
|
+
ragaai_catalyst-2.0.6b1.dist-info/top_level.txt,sha256=HpgsdRgEJMk8nqrU6qdCYk3di7MJkDL0B19lkc7dLfM,16
|
29
|
+
ragaai_catalyst-2.0.6b1.dist-info/RECORD,,
|
File without changes
|