ragaai-catalyst 2.0.6b1__tar.gz → 2.0.7b1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. {ragaai_catalyst-2.0.6b1/ragaai_catalyst.egg-info → ragaai_catalyst-2.0.7b1}/PKG-INFO +95 -4
  2. {ragaai_catalyst-2.0.6b1 → ragaai_catalyst-2.0.7b1}/README.md +93 -3
  3. {ragaai_catalyst-2.0.6b1 → ragaai_catalyst-2.0.7b1}/pyproject.toml +3 -2
  4. {ragaai_catalyst-2.0.6b1 → ragaai_catalyst-2.0.7b1}/ragaai_catalyst/dataset.py +1 -1
  5. {ragaai_catalyst-2.0.6b1 → ragaai_catalyst-2.0.7b1}/ragaai_catalyst/evaluation.py +1 -1
  6. {ragaai_catalyst-2.0.6b1 → ragaai_catalyst-2.0.7b1}/ragaai_catalyst/guardrails_manager.py +2 -2
  7. {ragaai_catalyst-2.0.6b1 → ragaai_catalyst-2.0.7b1}/ragaai_catalyst/internal_api_completion.py +1 -1
  8. {ragaai_catalyst-2.0.6b1 → ragaai_catalyst-2.0.7b1}/ragaai_catalyst/prompt_manager.py +7 -2
  9. {ragaai_catalyst-2.0.6b1 → ragaai_catalyst-2.0.7b1}/ragaai_catalyst/ragaai_catalyst.py +1 -1
  10. {ragaai_catalyst-2.0.6b1 → ragaai_catalyst-2.0.7b1}/ragaai_catalyst/synthetic_data_generation.py +1 -0
  11. {ragaai_catalyst-2.0.6b1 → ragaai_catalyst-2.0.7b1/ragaai_catalyst.egg-info}/PKG-INFO +95 -4
  12. {ragaai_catalyst-2.0.6b1 → ragaai_catalyst-2.0.7b1}/ragaai_catalyst.egg-info/requires.txt +1 -0
  13. {ragaai_catalyst-2.0.6b1 → ragaai_catalyst-2.0.7b1}/requirements.txt +1 -1
  14. {ragaai_catalyst-2.0.6b1 → ragaai_catalyst-2.0.7b1}/.gitignore +0 -0
  15. {ragaai_catalyst-2.0.6b1 → ragaai_catalyst-2.0.7b1}/__init__.py +0 -0
  16. {ragaai_catalyst-2.0.6b1 → ragaai_catalyst-2.0.7b1}/docs/dataset_management.md +0 -0
  17. {ragaai_catalyst-2.0.6b1 → ragaai_catalyst-2.0.7b1}/docs/prompt_management.md +0 -0
  18. {ragaai_catalyst-2.0.6b1 → ragaai_catalyst-2.0.7b1}/examples/prompt_management_litellm.ipynb +0 -0
  19. {ragaai_catalyst-2.0.6b1 → ragaai_catalyst-2.0.7b1}/examples/prompt_management_openai.ipynb +0 -0
  20. {ragaai_catalyst-2.0.6b1 → ragaai_catalyst-2.0.7b1}/ragaai_catalyst/__init__.py +0 -0
  21. {ragaai_catalyst-2.0.6b1 → ragaai_catalyst-2.0.7b1}/ragaai_catalyst/_version.py +0 -0
  22. {ragaai_catalyst-2.0.6b1 → ragaai_catalyst-2.0.7b1}/ragaai_catalyst/experiment.py +0 -0
  23. {ragaai_catalyst-2.0.6b1 → ragaai_catalyst-2.0.7b1}/ragaai_catalyst/guard_executor.py +0 -0
  24. {ragaai_catalyst-2.0.6b1 → ragaai_catalyst-2.0.7b1}/ragaai_catalyst/proxy_call.py +0 -0
  25. {ragaai_catalyst-2.0.6b1 → ragaai_catalyst-2.0.7b1}/ragaai_catalyst/tracers/__init__.py +0 -0
  26. {ragaai_catalyst-2.0.6b1 → ragaai_catalyst-2.0.7b1}/ragaai_catalyst/tracers/exporters/__init__.py +0 -0
  27. {ragaai_catalyst-2.0.6b1 → ragaai_catalyst-2.0.7b1}/ragaai_catalyst/tracers/exporters/file_span_exporter.py +0 -0
  28. {ragaai_catalyst-2.0.6b1 → ragaai_catalyst-2.0.7b1}/ragaai_catalyst/tracers/exporters/raga_exporter.py +0 -0
  29. {ragaai_catalyst-2.0.6b1 → ragaai_catalyst-2.0.7b1}/ragaai_catalyst/tracers/instrumentators/__init__.py +0 -0
  30. {ragaai_catalyst-2.0.6b1 → ragaai_catalyst-2.0.7b1}/ragaai_catalyst/tracers/instrumentators/langchain.py +0 -0
  31. {ragaai_catalyst-2.0.6b1 → ragaai_catalyst-2.0.7b1}/ragaai_catalyst/tracers/instrumentators/llamaindex.py +0 -0
  32. {ragaai_catalyst-2.0.6b1 → ragaai_catalyst-2.0.7b1}/ragaai_catalyst/tracers/instrumentators/openai.py +0 -0
  33. {ragaai_catalyst-2.0.6b1 → ragaai_catalyst-2.0.7b1}/ragaai_catalyst/tracers/llamaindex_callback.py +0 -0
  34. {ragaai_catalyst-2.0.6b1 → ragaai_catalyst-2.0.7b1}/ragaai_catalyst/tracers/tracer.py +0 -0
  35. {ragaai_catalyst-2.0.6b1 → ragaai_catalyst-2.0.7b1}/ragaai_catalyst/tracers/utils/__init__.py +0 -0
  36. {ragaai_catalyst-2.0.6b1 → ragaai_catalyst-2.0.7b1}/ragaai_catalyst/tracers/utils/utils.py +0 -0
  37. {ragaai_catalyst-2.0.6b1 → ragaai_catalyst-2.0.7b1}/ragaai_catalyst/utils.py +0 -0
  38. {ragaai_catalyst-2.0.6b1 → ragaai_catalyst-2.0.7b1}/ragaai_catalyst.egg-info/SOURCES.txt +0 -0
  39. {ragaai_catalyst-2.0.6b1 → ragaai_catalyst-2.0.7b1}/ragaai_catalyst.egg-info/dependency_links.txt +0 -0
  40. {ragaai_catalyst-2.0.6b1 → ragaai_catalyst-2.0.7b1}/ragaai_catalyst.egg-info/top_level.txt +0 -0
  41. {ragaai_catalyst-2.0.6b1 → ragaai_catalyst-2.0.7b1}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: ragaai_catalyst
3
- Version: 2.0.6b1
3
+ Version: 2.0.7b1
4
4
  Summary: RAGA AI CATALYST
5
5
  Author-email: Kiran Scaria <kiran.scaria@raga.ai>, Kedar Gaikwad <kedar.gaikwad@raga.ai>, Dushyant Mahajan <dushyant.mahajan@raga.ai>, Siddhartha Kosti <siddhartha.kosti@raga.ai>, Ritika Goel <ritika.goel@raga.ai>, Vijay Chaurasia <vijay.chaurasia@raga.ai>
6
6
  Requires-Python: >=3.9
@@ -28,6 +28,7 @@ Requires-Dist: litellm==1.51.1
28
28
  Requires-Dist: tenacity==8.3.0
29
29
  Requires-Dist: tqdm>=4.66.5
30
30
  Requires-Dist: llama-index==0.10.0
31
+ Requires-Dist: pyopenssl==24.2.1
31
32
  Provides-Extra: dev
32
33
  Requires-Dist: pytest; extra == "dev"
33
34
  Requires-Dist: pytest-cov; extra == "dev"
@@ -38,7 +39,7 @@ Requires-Dist: flake8; extra == "dev"
38
39
 
39
40
  # RagaAI Catalyst
40
41
 
41
- RagaAI Catalyst is a powerful tool for managing and optimizing LLM projects. It provides functionalities for project management, trace recording, and experiment management, allowing you to fine-tune and evaluate your LLM applications effectively.
42
+ RagaAI Catalyst is a comprehensive platform designed to enhance the management and optimization of LLM projects. It offers a wide range of features, including project management, dataset management, evaluation management, trace management, prompt management, synthetic data generation, and guardrail management. These functionalities enable you to efficiently evaluate, and safeguard your LLM applications.
42
43
 
43
44
  ## Table of Contents
44
45
 
@@ -53,6 +54,7 @@ RagaAI Catalyst is a powerful tool for managing and optimizing LLM projects. It
53
54
  - [Trace Management](#trace-management)
54
55
  - [Prompt Management](#prompt-management)
55
56
  - [Synthetic Data Generation](#synthetic-data-generation)
57
+ - [Guardrail Management](#guardrail-management)
56
58
 
57
59
  ## Installation
58
60
 
@@ -144,6 +146,7 @@ evaluation = Evaluation(
144
146
  evaluation.list_metrics()
145
147
 
146
148
  # Add metrics to the experiment
149
+
147
150
  schema_mapping={
148
151
  'Query': 'prompt',
149
152
  'response': 'response',
@@ -201,8 +204,12 @@ tracer = Tracer(
201
204
 
202
205
  # Your code here
203
206
 
207
+
204
208
  # Stop the trace recording
205
209
  tracer.stop()
210
+
211
+ # Get upload status
212
+ tracer.get_upload_status()
206
213
  ```
207
214
 
208
215
 
@@ -237,7 +244,7 @@ print("variable:",variable)
237
244
  prompt_content = prompt.get_prompt_content()
238
245
  print("prompt_content:", prompt_content)
239
246
 
240
- # Compile a prompt with variables
247
+ # Compile the prompt with variables
241
248
  compiled_prompt = prompt.compile(query="What's the weather?", context="sunny", llm_response="It's sunny today")
242
249
  print("Compiled prompt:", compiled_prompt)
243
250
 
@@ -280,7 +287,9 @@ sdg = SyntheticDataGeneration()
280
287
  text = sdg.process_document(input_data="file_path")
281
288
 
282
289
  # Generate results
283
- result = sdg.generate_qna(text, question_type ='simple',model_config={"provider":"openai","model":"gpt-4o-mini"},n=20)
290
+ result = sdg.generate_qna(text, question_type ='complex',model_config={"provider":"openai","model":"openai/gpt-3.5-turbo"},n=5)
291
+
292
+ print(result.head())
284
293
 
285
294
  # Get supported Q&A types
286
295
  sdg.get_supported_qna()
@@ -291,5 +300,87 @@ sdg.get_supported_providers()
291
300
 
292
301
 
293
302
 
303
+ ### Guardrail Management
304
+
305
+ ```py
306
+ from ragaai_catalyst import GuardrailsManager
307
+
308
+ # Initialize Guardrails Manager
309
+ gdm = GuardrailsManager(project_name=project_name)
310
+
311
+ # Get list of Guardrails available
312
+ guardrails_list = gdm.list_guardrails()
313
+ print('guardrails_list:', guardrails_list)
314
+
315
+ # Get list of fail condition for guardrails
316
+ fail_conditions = gdm.list_fail_condition()
317
+ print('fail_conditions;', fail_conditions)
318
+
319
+ #Get list of deployment ids
320
+ deployment_list = gdm.list_deployment_ids()
321
+ print('deployment_list:', deployment_list)
322
+
323
+ # Get specific deployment id with guardrails information
324
+ deployment_id_detail = gdm.get_deployment(17)
325
+ print('deployment_id_detail:', deployment_id_detail)
326
+
327
+ # Add guardrails to a deployment id
328
+ guardrails_config = {"guardrailFailConditions": ["FAIL"],
329
+ "deploymentFailCondition": "ALL_FAIL",
330
+ "alternateResponse": "Your alternate response"}
331
+
332
+ guardrails = [
333
+ {
334
+ "displayName": "Response_Evaluator",
335
+ "name": "Response Evaluator",
336
+ "config":{
337
+ "mappings": [{
338
+ "schemaName": "Text",
339
+ "variableName": "Response"
340
+ }],
341
+ "params": {
342
+ "isActive": {"value": False},
343
+ "isHighRisk": {"value": True},
344
+ "threshold": {"eq": 0},
345
+ "competitors": {"value": ["Google","Amazon"]}
346
+ }
347
+ }
348
+ },
349
+ {
350
+ "displayName": "Regex_Check",
351
+ "name": "Regex Check",
352
+ "config":{
353
+ "mappings": [{
354
+ "schemaName": "Text",
355
+ "variableName": "Response"
356
+ }],
357
+ "params":{
358
+ "isActive": {"value": False},
359
+ "isHighRisk": {"value": True},
360
+ "threshold": {"lt1": 1}
361
+ }
362
+ }
363
+ }
364
+ ]
365
+
366
+ gdm.add_guardrails(deployment_id, guardrails, guardrails_config)
294
367
 
295
368
 
369
+ # Import GuardExecutor
370
+ from ragaai_catalyst import GuardExecutor
371
+
372
+ # Initialise GuardExecutor with required params and Evaluate
373
+ executor = GuardExecutor(deployment_id,gdm,field_map={'context':'document'})
374
+
375
+
376
+ message={'role':'user',
377
+ 'content':'What is the capital of France'
378
+ }
379
+ prompt_params={'document':' France'}
380
+
381
+ model_params = {'temperature':.7,'model':'gpt-4o-mini'}
382
+ llm_caller = 'litellm'
383
+
384
+ executor([message],prompt_params,model_params,llm_caller)
385
+
386
+ ```
@@ -1,6 +1,6 @@
1
1
  # RagaAI Catalyst
2
2
 
3
- RagaAI Catalyst is a powerful tool for managing and optimizing LLM projects. It provides functionalities for project management, trace recording, and experiment management, allowing you to fine-tune and evaluate your LLM applications effectively.
3
+ RagaAI Catalyst is a comprehensive platform designed to enhance the management and optimization of LLM projects. It offers a wide range of features, including project management, dataset management, evaluation management, trace management, prompt management, synthetic data generation, and guardrail management. These functionalities enable you to efficiently evaluate, and safeguard your LLM applications.
4
4
 
5
5
  ## Table of Contents
6
6
 
@@ -15,6 +15,7 @@ RagaAI Catalyst is a powerful tool for managing and optimizing LLM projects. It
15
15
  - [Trace Management](#trace-management)
16
16
  - [Prompt Management](#prompt-management)
17
17
  - [Synthetic Data Generation](#synthetic-data-generation)
18
+ - [Guardrail Management](#guardrail-management)
18
19
 
19
20
  ## Installation
20
21
 
@@ -106,6 +107,7 @@ evaluation = Evaluation(
106
107
  evaluation.list_metrics()
107
108
 
108
109
  # Add metrics to the experiment
110
+
109
111
  schema_mapping={
110
112
  'Query': 'prompt',
111
113
  'response': 'response',
@@ -163,8 +165,12 @@ tracer = Tracer(
163
165
 
164
166
  # Your code here
165
167
 
168
+
166
169
  # Stop the trace recording
167
170
  tracer.stop()
171
+
172
+ # Get upload status
173
+ tracer.get_upload_status()
168
174
  ```
169
175
 
170
176
 
@@ -199,7 +205,7 @@ print("variable:",variable)
199
205
  prompt_content = prompt.get_prompt_content()
200
206
  print("prompt_content:", prompt_content)
201
207
 
202
- # Compile a prompt with variables
208
+ # Compile the prompt with variables
203
209
  compiled_prompt = prompt.compile(query="What's the weather?", context="sunny", llm_response="It's sunny today")
204
210
  print("Compiled prompt:", compiled_prompt)
205
211
 
@@ -242,7 +248,9 @@ sdg = SyntheticDataGeneration()
242
248
  text = sdg.process_document(input_data="file_path")
243
249
 
244
250
  # Generate results
245
- result = sdg.generate_qna(text, question_type ='simple',model_config={"provider":"openai","model":"gpt-4o-mini"},n=20)
251
+ result = sdg.generate_qna(text, question_type ='complex',model_config={"provider":"openai","model":"openai/gpt-3.5-turbo"},n=5)
252
+
253
+ print(result.head())
246
254
 
247
255
  # Get supported Q&A types
248
256
  sdg.get_supported_qna()
@@ -253,5 +261,87 @@ sdg.get_supported_providers()
253
261
 
254
262
 
255
263
 
264
+ ### Guardrail Management
265
+
266
+ ```py
267
+ from ragaai_catalyst import GuardrailsManager
268
+
269
+ # Initialize Guardrails Manager
270
+ gdm = GuardrailsManager(project_name=project_name)
271
+
272
+ # Get list of Guardrails available
273
+ guardrails_list = gdm.list_guardrails()
274
+ print('guardrails_list:', guardrails_list)
275
+
276
+ # Get list of fail condition for guardrails
277
+ fail_conditions = gdm.list_fail_condition()
278
+ print('fail_conditions;', fail_conditions)
279
+
280
+ #Get list of deployment ids
281
+ deployment_list = gdm.list_deployment_ids()
282
+ print('deployment_list:', deployment_list)
283
+
284
+ # Get specific deployment id with guardrails information
285
+ deployment_id_detail = gdm.get_deployment(17)
286
+ print('deployment_id_detail:', deployment_id_detail)
287
+
288
+ # Add guardrails to a deployment id
289
+ guardrails_config = {"guardrailFailConditions": ["FAIL"],
290
+ "deploymentFailCondition": "ALL_FAIL",
291
+ "alternateResponse": "Your alternate response"}
292
+
293
+ guardrails = [
294
+ {
295
+ "displayName": "Response_Evaluator",
296
+ "name": "Response Evaluator",
297
+ "config":{
298
+ "mappings": [{
299
+ "schemaName": "Text",
300
+ "variableName": "Response"
301
+ }],
302
+ "params": {
303
+ "isActive": {"value": False},
304
+ "isHighRisk": {"value": True},
305
+ "threshold": {"eq": 0},
306
+ "competitors": {"value": ["Google","Amazon"]}
307
+ }
308
+ }
309
+ },
310
+ {
311
+ "displayName": "Regex_Check",
312
+ "name": "Regex Check",
313
+ "config":{
314
+ "mappings": [{
315
+ "schemaName": "Text",
316
+ "variableName": "Response"
317
+ }],
318
+ "params":{
319
+ "isActive": {"value": False},
320
+ "isHighRisk": {"value": True},
321
+ "threshold": {"lt1": 1}
322
+ }
323
+ }
324
+ }
325
+ ]
326
+
327
+ gdm.add_guardrails(deployment_id, guardrails, guardrails_config)
328
+
329
+
330
+ # Import GuardExecutor
331
+ from ragaai_catalyst import GuardExecutor
332
+
333
+ # Initialise GuardExecutor with required params and Evaluate
334
+ executor = GuardExecutor(deployment_id,gdm,field_map={'context':'document'})
335
+
336
+
337
+ message={'role':'user',
338
+ 'content':'What is the capital of France'
339
+ }
340
+ prompt_params={'document':' France'}
341
+
342
+ model_params = {'temperature':.7,'model':'gpt-4o-mini'}
343
+ llm_caller = 'litellm'
256
344
 
345
+ executor([message],prompt_params,model_params,llm_caller)
257
346
 
347
+ ```
@@ -8,7 +8,7 @@ description = "RAGA AI CATALYST"
8
8
  readme = "README.md"
9
9
  requires-python = ">=3.9"
10
10
  # license = {file = "LICENSE"}
11
- version = "2.0.6.beta.1"
11
+ version = "2.0.7.beta.1"
12
12
  authors = [
13
13
  {name = "Kiran Scaria", email = "kiran.scaria@raga.ai"},
14
14
  {name = "Kedar Gaikwad", email = "kedar.gaikwad@raga.ai"},
@@ -41,7 +41,8 @@ dependencies = [
41
41
  "litellm==1.51.1",
42
42
  "tenacity==8.3.0",
43
43
  "tqdm>=4.66.5",
44
- "llama-index==0.10.0"
44
+ "llama-index==0.10.0",
45
+ "pyopenssl==24.2.1"
45
46
  ]
46
47
 
47
48
  [project.optional-dependencies]
@@ -16,7 +16,7 @@ class Dataset:
16
16
 
17
17
  def __init__(self, project_name):
18
18
  self.project_name = project_name
19
- self.num_projects = 100
19
+ self.num_projects = 99999
20
20
  Dataset.BASE_URL = (
21
21
  os.getenv("RAGAAI_CATALYST_BASE_URL")
22
22
  if os.getenv("RAGAAI_CATALYST_BASE_URL")
@@ -16,7 +16,7 @@ class Evaluation:
16
16
  self.base_url = f"{RagaAICatalyst.BASE_URL}"
17
17
  self.timeout = 10
18
18
  self.jobId = None
19
- self.num_projects=100
19
+ self.num_projects=99999
20
20
 
21
21
  try:
22
22
  response = requests.get(
@@ -13,7 +13,7 @@ class GuardrailsManager:
13
13
  """
14
14
  self.project_name = project_name
15
15
  self.timeout = 10
16
- self.num_projects = 100
16
+ self.num_projects = 99999
17
17
  self.deployment_name = "NA"
18
18
  self.deployment_id = "NA"
19
19
  self.base_url = f"{RagaAICatalyst.BASE_URL}"
@@ -31,7 +31,7 @@ class GuardrailsManager:
31
31
  :return: A tuple containing a list of project names and a list of dictionaries with project IDs and names.
32
32
  """
33
33
  headers = {'Authorization': f'Bearer {os.getenv("RAGAAI_CATALYST_TOKEN")}'}
34
- response = requests.request("GET", f"{self.base_url}/v2/llm/projects?size=12&page=0", headers=headers, timeout=self.timeout)
34
+ response = requests.request("GET", f"{self.base_url}/v2/llm/projects?size={self.num_projects}", headers=headers, timeout=self.timeout)
35
35
  project_content = response.json()["data"]["content"]
36
36
  list_project = [_["name"] for _ in project_content]
37
37
  project_name_with_id = [{"id": _["id"], "name": _["name"]} for _ in project_content]
@@ -36,7 +36,7 @@ def api_completion(messages, model_config, kwargs):
36
36
  raise ValueError(response["error"]["message"])
37
37
  else:
38
38
  result= response["choices"][0]["message"]["content"]
39
- response1 = result.replace('\n', '')
39
+ response1 = result.replace('\n', '').replace('```json','').replace('```', '').strip()
40
40
  try:
41
41
  json_data = json.loads(response1)
42
42
  df = pd.DataFrame(json_data)
@@ -23,7 +23,7 @@ class PromptManager:
23
23
  self.project_name = project_name
24
24
  self.base_url = f"{RagaAICatalyst.BASE_URL}/playground/prompt"
25
25
  self.timeout = 10
26
- self.size = 100 #Number of projects to fetch
26
+ self.size = 99999 #Number of projects to fetch
27
27
 
28
28
  try:
29
29
  response = requests.get(
@@ -432,7 +432,12 @@ class PromptObject:
432
432
  Returns:
433
433
  dict: A dictionary of parameters found in the prompt text.
434
434
  """
435
- parameters = {param["name"]: self._convert_value(param["value"], param["type"]) for param in self.parameters}
435
+ parameters = {}
436
+ for param in self.parameters:
437
+ if "value" in param:
438
+ parameters[param["name"]] = self._convert_value(param["value"], param["type"])
439
+ else:
440
+ parameters[param["name"]] = ""
436
441
  parameters["model"] = self.model
437
442
  return parameters
438
443
 
@@ -287,7 +287,7 @@ class RagaAICatalyst:
287
287
  def get_project_id(self, project_name):
288
288
  pass
289
289
 
290
- def list_projects(self, num_projects=100):
290
+ def list_projects(self, num_projects=99999):
291
291
  """
292
292
  Retrieves a list of projects with the specified number of projects.
293
293
 
@@ -289,6 +289,7 @@ class SyntheticDataGeneration:
289
289
 
290
290
  # Extract the content from the response
291
291
  content = response.choices[0].message.content
292
+ content = content.replace('\n', '').replace('```json','').replace('```', '').strip()
292
293
 
293
294
  # Clean the response if needed (remove any prefix before the JSON list)
294
295
  list_start_index = content.find('[')
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: ragaai_catalyst
3
- Version: 2.0.6b1
3
+ Version: 2.0.7b1
4
4
  Summary: RAGA AI CATALYST
5
5
  Author-email: Kiran Scaria <kiran.scaria@raga.ai>, Kedar Gaikwad <kedar.gaikwad@raga.ai>, Dushyant Mahajan <dushyant.mahajan@raga.ai>, Siddhartha Kosti <siddhartha.kosti@raga.ai>, Ritika Goel <ritika.goel@raga.ai>, Vijay Chaurasia <vijay.chaurasia@raga.ai>
6
6
  Requires-Python: >=3.9
@@ -28,6 +28,7 @@ Requires-Dist: litellm==1.51.1
28
28
  Requires-Dist: tenacity==8.3.0
29
29
  Requires-Dist: tqdm>=4.66.5
30
30
  Requires-Dist: llama-index==0.10.0
31
+ Requires-Dist: pyopenssl==24.2.1
31
32
  Provides-Extra: dev
32
33
  Requires-Dist: pytest; extra == "dev"
33
34
  Requires-Dist: pytest-cov; extra == "dev"
@@ -38,7 +39,7 @@ Requires-Dist: flake8; extra == "dev"
38
39
 
39
40
  # RagaAI Catalyst
40
41
 
41
- RagaAI Catalyst is a powerful tool for managing and optimizing LLM projects. It provides functionalities for project management, trace recording, and experiment management, allowing you to fine-tune and evaluate your LLM applications effectively.
42
+ RagaAI Catalyst is a comprehensive platform designed to enhance the management and optimization of LLM projects. It offers a wide range of features, including project management, dataset management, evaluation management, trace management, prompt management, synthetic data generation, and guardrail management. These functionalities enable you to efficiently evaluate, and safeguard your LLM applications.
42
43
 
43
44
  ## Table of Contents
44
45
 
@@ -53,6 +54,7 @@ RagaAI Catalyst is a powerful tool for managing and optimizing LLM projects. It
53
54
  - [Trace Management](#trace-management)
54
55
  - [Prompt Management](#prompt-management)
55
56
  - [Synthetic Data Generation](#synthetic-data-generation)
57
+ - [Guardrail Management](#guardrail-management)
56
58
 
57
59
  ## Installation
58
60
 
@@ -144,6 +146,7 @@ evaluation = Evaluation(
144
146
  evaluation.list_metrics()
145
147
 
146
148
  # Add metrics to the experiment
149
+
147
150
  schema_mapping={
148
151
  'Query': 'prompt',
149
152
  'response': 'response',
@@ -201,8 +204,12 @@ tracer = Tracer(
201
204
 
202
205
  # Your code here
203
206
 
207
+
204
208
  # Stop the trace recording
205
209
  tracer.stop()
210
+
211
+ # Get upload status
212
+ tracer.get_upload_status()
206
213
  ```
207
214
 
208
215
 
@@ -237,7 +244,7 @@ print("variable:",variable)
237
244
  prompt_content = prompt.get_prompt_content()
238
245
  print("prompt_content:", prompt_content)
239
246
 
240
- # Compile a prompt with variables
247
+ # Compile the prompt with variables
241
248
  compiled_prompt = prompt.compile(query="What's the weather?", context="sunny", llm_response="It's sunny today")
242
249
  print("Compiled prompt:", compiled_prompt)
243
250
 
@@ -280,7 +287,9 @@ sdg = SyntheticDataGeneration()
280
287
  text = sdg.process_document(input_data="file_path")
281
288
 
282
289
  # Generate results
283
- result = sdg.generate_qna(text, question_type ='simple',model_config={"provider":"openai","model":"gpt-4o-mini"},n=20)
290
+ result = sdg.generate_qna(text, question_type ='complex',model_config={"provider":"openai","model":"openai/gpt-3.5-turbo"},n=5)
291
+
292
+ print(result.head())
284
293
 
285
294
  # Get supported Q&A types
286
295
  sdg.get_supported_qna()
@@ -291,5 +300,87 @@ sdg.get_supported_providers()
291
300
 
292
301
 
293
302
 
303
+ ### Guardrail Management
304
+
305
+ ```py
306
+ from ragaai_catalyst import GuardrailsManager
307
+
308
+ # Initialize Guardrails Manager
309
+ gdm = GuardrailsManager(project_name=project_name)
310
+
311
+ # Get list of Guardrails available
312
+ guardrails_list = gdm.list_guardrails()
313
+ print('guardrails_list:', guardrails_list)
314
+
315
+ # Get list of fail condition for guardrails
316
+ fail_conditions = gdm.list_fail_condition()
317
+ print('fail_conditions;', fail_conditions)
318
+
319
+ #Get list of deployment ids
320
+ deployment_list = gdm.list_deployment_ids()
321
+ print('deployment_list:', deployment_list)
322
+
323
+ # Get specific deployment id with guardrails information
324
+ deployment_id_detail = gdm.get_deployment(17)
325
+ print('deployment_id_detail:', deployment_id_detail)
326
+
327
+ # Add guardrails to a deployment id
328
+ guardrails_config = {"guardrailFailConditions": ["FAIL"],
329
+ "deploymentFailCondition": "ALL_FAIL",
330
+ "alternateResponse": "Your alternate response"}
331
+
332
+ guardrails = [
333
+ {
334
+ "displayName": "Response_Evaluator",
335
+ "name": "Response Evaluator",
336
+ "config":{
337
+ "mappings": [{
338
+ "schemaName": "Text",
339
+ "variableName": "Response"
340
+ }],
341
+ "params": {
342
+ "isActive": {"value": False},
343
+ "isHighRisk": {"value": True},
344
+ "threshold": {"eq": 0},
345
+ "competitors": {"value": ["Google","Amazon"]}
346
+ }
347
+ }
348
+ },
349
+ {
350
+ "displayName": "Regex_Check",
351
+ "name": "Regex Check",
352
+ "config":{
353
+ "mappings": [{
354
+ "schemaName": "Text",
355
+ "variableName": "Response"
356
+ }],
357
+ "params":{
358
+ "isActive": {"value": False},
359
+ "isHighRisk": {"value": True},
360
+ "threshold": {"lt1": 1}
361
+ }
362
+ }
363
+ }
364
+ ]
365
+
366
+ gdm.add_guardrails(deployment_id, guardrails, guardrails_config)
294
367
 
295
368
 
369
+ # Import GuardExecutor
370
+ from ragaai_catalyst import GuardExecutor
371
+
372
+ # Initialise GuardExecutor with required params and Evaluate
373
+ executor = GuardExecutor(deployment_id,gdm,field_map={'context':'document'})
374
+
375
+
376
+ message={'role':'user',
377
+ 'content':'What is the capital of France'
378
+ }
379
+ prompt_params={'document':' France'}
380
+
381
+ model_params = {'temperature':.7,'model':'gpt-4o-mini'}
382
+ llm_caller = 'litellm'
383
+
384
+ executor([message],prompt_params,model_params,llm_caller)
385
+
386
+ ```
@@ -21,6 +21,7 @@ litellm==1.51.1
21
21
  tenacity==8.3.0
22
22
  tqdm>=4.66.5
23
23
  llama-index==0.10.0
24
+ pyopenssl==24.2.1
24
25
 
25
26
  [dev]
26
27
  pytest
@@ -1,5 +1,5 @@
1
1
  toml==0.10.2
2
- aiohttp==3.9.5
2
+ aiohttp==3.10.2
3
3
  opentelemetry-api==1.25.0
4
4
  opentelemetry-sdk==1.25.0
5
5
  opentelemetry-instrumentation==0.46b0