ragaai-catalyst 2.0.5__py3-none-any.whl → 2.0.6b0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: ragaai_catalyst
3
- Version: 2.0.5
3
+ Version: 2.0.6b0
4
4
  Summary: RAGA AI CATALYST
5
5
  Author-email: Kiran Scaria <kiran.scaria@raga.ai>, Kedar Gaikwad <kedar.gaikwad@raga.ai>, Dushyant Mahajan <dushyant.mahajan@raga.ai>, Siddhartha Kosti <siddhartha.kosti@raga.ai>, Ritika Goel <ritika.goel@raga.ai>, Vijay Chaurasia <vijay.chaurasia@raga.ai>
6
6
  Requires-Python: >=3.9
@@ -24,7 +24,10 @@ Requires-Dist: groq>=0.11.0
24
24
  Requires-Dist: PyPDF2>=3.0.1
25
25
  Requires-Dist: google-generativeai>=0.8.2
26
26
  Requires-Dist: Markdown>=3.7
27
+ Requires-Dist: litellm==1.51.1
27
28
  Requires-Dist: tenacity==8.3.0
29
+ Requires-Dist: tqdm>=4.66.5
30
+ Requires-Dist: llama-index==0.10.0
28
31
  Provides-Extra: dev
29
32
  Requires-Dist: pytest; extra == "dev"
30
33
  Requires-Dist: pytest-cov; extra == "dev"
@@ -45,10 +48,11 @@ RagaAI Catalyst is a powerful tool for managing and optimizing LLM projects. It
45
48
  - [Configuration](#configuration)
46
49
  - [Usage](#usage)
47
50
  - [Project Management](#project-management)
48
- - [Trace Management](#trace-management)
49
- - [Experiment Management](#experiment-management)
50
51
  - [Dataset Management](#dataset-management)
52
+ - [Evaluation Management](#evaluation)
53
+ - [Trace Management](#trace-management)
51
54
  - [Prompt Management](#prompt-management)
55
+ - [Synthetic Data Generation](#synthetic-data-generation)
52
56
 
53
57
  ## Installation
54
58
 
@@ -84,145 +88,208 @@ Create and manage projects using RagaAI Catalyst:
84
88
  # Create a project
85
89
  project = catalyst.create_project(
86
90
  project_name="Test-RAG-App-1",
87
- description="Description of the project"
91
+ usecase="Chatbot"
88
92
  )
89
93
 
94
+ # Get project usecases
95
+ catalyst.project_use_cases()
96
+
90
97
  # List projects
91
98
  projects = catalyst.list_projects()
92
99
  print(projects)
93
100
  ```
94
101
 
95
- ### Trace Management
102
+ ### Dataset Management
103
+ Manage datasets efficiently for your projects:
96
104
 
97
- Record and analyze traces of your RAG application:
105
+ ```py
106
+ from ragaai_catalyst import Dataset
98
107
 
99
- ```python
100
- from ragaai_catalyst import Tracer
108
+ # Initialize Dataset management for a specific project
109
+ dataset_manager = Dataset(project_name="project_name")
101
110
 
102
- # Start a trace recording
103
- tracer = Tracer(
104
- project_name="Test-RAG-App-1",
105
- metadata={"key1": "value1", "key2": "value2"},
106
- tracer_type="langchain",
107
- pipeline={
108
- "llm_model": "gpt-3.5-turbo",
109
- "vector_store": "faiss",
110
- "embed_model": "text-embedding-ada-002",
111
- }
112
- ).start()
111
+ # List existing datasets
112
+ datasets = dataset_manager.list_datasets()
113
+ print("Existing Datasets:", datasets)
113
114
 
114
- # Your code here
115
+ # Create a dataset from CSV
116
+ dataset_manager.create_from_csv(
117
+ csv_path='path/to/your.csv',
118
+ dataset_name='MyDataset',
119
+ schema_mapping={'column1': 'schema_element1', 'column2': 'schema_element2'}
120
+ )
115
121
 
116
- # Stop the trace recording
117
- tracer.stop()
122
+ # Get project schema mapping
123
+ dataset_manager.get_schema_mapping()
118
124
 
119
- # Alternatively, use a context manager
120
- with tracer.trace():
121
- # Your code here
122
125
  ```
123
126
 
124
- ### Experiment Management
127
+ For more detailed information on Dataset Management, including CSV schema handling and advanced usage, please refer to the [Dataset Management documentation](docs/dataset_management.md).
128
+
129
+
130
+ ### Evaluation
125
131
 
126
- Create and manage experiments to evaluate your RAG application:
132
+ Create and manage metric evaluation of your RAG application:
127
133
 
128
134
  ```python
129
- from ragaai_catalyst import Experiment
135
+ from ragaai_catalyst import Evaluation
130
136
 
131
137
  # Create an experiment
132
- experiment_manager = Experiment(
138
+ evaluation = Evaluation(
133
139
  project_name="Test-RAG-App-1",
134
- experiment_name="Exp-01",
135
- experiment_description="Experiment Description",
136
- dataset_name="Dataset Created from UI",
140
+ dataset_name="MyDataset",
137
141
  )
138
142
 
143
+ # Get list of available metrics
144
+ evaluation.list_metrics()
145
+
139
146
  # Add metrics to the experiment
140
- experiment_manager.add_metrics(
147
+ schema_mapping={
148
+ 'Query': 'prompt',
149
+ 'response': 'response',
150
+ 'Context': 'context',
151
+ 'expectedResponse': 'expected_response'
152
+ }
153
+
154
+ # Add single metric
155
+ evaluation.add_metrics(
141
156
  metrics=[
142
- {"name": "hallucination", "config": {"model": "gpt-4o", "provider":"OpenAI"}}
157
+ {"name": "Faithfulness", "config": {"model": "gpt-4o-mini", "provider": "openai", "threshold": {"gte": 0.232323}}, "column_name": "Faithfulness_v1", "schema_mapping": schema_mapping},
158
+
143
159
  ]
144
160
  )
145
161
 
146
162
  # Add multiple metrics
147
- experiment_manager.add_metrics(
163
+ evaluation.add_metrics(
148
164
  metrics=[
149
- {"name": "hallucination", "config": {"model": "gpt-4o", "provider":"OpenAI"}},
150
- {"name": "hallucination", "config": {"model": "gpt-4", "provider":"OpenAI"}},
151
- {"name": "hallucination", "config": {"model": "gpt-3.5-turbo", "provider":"OpenAI"}}
165
+ {"name": "Faithfulness", "config": {"model": "gpt-4o-mini", "provider": "openai", "threshold": {"gte": 0.323}}, "column_name": "Faithfulness_gte", "schema_mapping": schema_mapping},
166
+ {"name": "Hallucination", "config": {"model": "gpt-4o-mini", "provider": "openai", "threshold": {"lte": 0.323}}, "column_name": "Hallucination_lte", "schema_mapping": schema_mapping},
167
+ {"name": "Hallucination", "config": {"model": "gpt-4o-mini", "provider": "openai", "threshold": {"eq": 0.323}}, "column_name": "Hallucination_eq", "schema_mapping": schema_mapping},
152
168
  ]
153
169
  )
154
170
 
155
171
  # Get the status of the experiment
156
- status = experiment_manager.get_status()
172
+ status = evaluation.get_status()
157
173
  print("Experiment Status:", status)
158
174
 
159
175
  # Get the results of the experiment
160
- results = experiment_manager.get_results()
176
+ results = evaluation.get_results()
161
177
  print("Experiment Results:", results)
162
178
  ```
163
179
 
164
180
 
165
181
 
166
- ## Dataset Management
167
- Manage datasets efficiently for your projects:
182
+ ### Trace Management
168
183
 
169
- ```py
170
- from ragaai_catalyst import Dataset
184
+ Record and analyze traces of your RAG application:
171
185
 
172
- # Initialize Dataset management for a specific project
173
- dataset_manager = Dataset(project_name="project_name")
186
+ ```python
187
+ from ragaai_catalyst import Tracer
174
188
 
175
- # List existing datasets
176
- datasets = dataset_manager.list_datasets()
177
- print("Existing Datasets:", datasets)
189
+ # Start a trace recording
190
+ tracer = Tracer(
191
+ project_name="Test-RAG-App-1",
192
+ dataset_name="tracer_dataset_name"
193
+ metadata={"key1": "value1", "key2": "value2"},
194
+ tracer_type="langchain",
195
+ pipeline={
196
+ "llm_model": "gpt-3.5-turbo",
197
+ "vector_store": "faiss",
198
+ "embed_model": "text-embedding-ada-002",
199
+ }
200
+ ).start()
178
201
 
179
- # Create a dataset from trace
180
- dataset_manager.create_from_trace(
181
- dataset_name='Test-dataset-1',
182
- filter_list=[
183
- {"name": "llm_model", "values": ["gpt-3.5-turbo", "gpt-4"]},
184
- {"name": "prompt_length", "lte": 27, "gte": 23}
185
- ]
186
- )
202
+ # Your code here
187
203
 
188
- # Create a dataset from CSV
189
- dataset_manager.create_from_csv(
190
- csv_path='path/to/your.csv',
191
- dataset_name='MyDataset',
192
- schema_mapping={'column1': 'schema_element1', 'column2': 'schema_element2'}
193
- )
204
+ # Stop the trace recording
205
+ tracer.stop()
194
206
  ```
195
207
 
196
- For more detailed information on Dataset Management, including CSV schema handling and advanced usage, please refer to the [Dataset Management documentation](docs/dataset_management.md).
197
208
 
198
- ## Prompt Management
209
+ ### Prompt Management
199
210
 
200
211
  Manage and use prompts efficiently in your projects:
201
212
 
202
213
  ```py
203
- from ragaai_catalyst.prompt_manager import PromptManager
214
+ from ragaai_catalyst import PromptManager
204
215
 
205
216
  # Initialize PromptManager
206
- prompt_manager = PromptManager("your-project-name")
217
+ prompt_manager = PromptManager(project_name="Test-RAG-App-1")
207
218
 
208
219
  # List available prompts
209
220
  prompts = prompt_manager.list_prompts()
210
221
  print("Available prompts:", prompts)
211
222
 
212
- # Get a specific prompt
223
+ # Get default prompt by prompt_name
213
224
  prompt_name = "your_prompt_name"
214
225
  prompt = prompt_manager.get_prompt(prompt_name)
215
226
 
227
+ # Get specific version of prompt by prompt_name and version
228
+ prompt_name = "your_prompt_name"
229
+ version = "v1"
230
+ prompt = prompt_manager.get_prompt(prompt_name,version)
231
+
232
+ # Get variables in a prompt
233
+ variable = prompt.get_variables()
234
+ print("variable:",variable)
235
+
236
+ # Get prompt content
237
+ prompt_content = prompt.get_prompt_content()
238
+ print("prompt_content:", prompt_content)
239
+
216
240
  # Compile a prompt with variables
217
241
  compiled_prompt = prompt.compile(query="What's the weather?", context="sunny", llm_response="It's sunny today")
218
242
  print("Compiled prompt:", compiled_prompt)
219
243
 
220
- # Get prompt parameters
221
- parameters = prompt.get_parameters()
222
- print("Prompt parameters:", parameters)
223
- ```
244
+ # implement compiled_prompt with openai
245
+ import openai
246
+ def get_openai_response(prompt):
247
+ client = openai.OpenAI()
248
+ response = client.chat.completions.create(
249
+ model="gpt-4o-mini",
250
+ messages=prompt
251
+ )
252
+ return response.choices[0].message.content
253
+ openai_response = get_openai_response(compiled_prompt)
254
+ print("openai_response:", openai_response)
255
+
256
+ # implement compiled_prompt with litellm
257
+ import litellm
258
+ def get_litellm_response(prompt):
259
+ response = litellm.completion(
260
+ model="gpt-4o-mini",
261
+ messages=prompt
262
+ )
263
+ return response.choices[0].message.content
264
+ litellm_response = get_litellm_response(compiled_prompt)
265
+ print("litellm_response:", litellm_response)
224
266
 
267
+ ```
225
268
  For more detailed information on Prompt Management, please refer to the [Prompt Management documentation](docs/prompt_management.md).
226
269
 
227
270
 
271
+ ### Synthetic Data Generation
272
+
273
+ ```py
274
+ from ragaai_catalyst import SyntheticDataGeneration
275
+
276
+ # Initialize Synthetic Data Generation
277
+ sdg = SyntheticDataGeneration()
278
+
279
+ # Process your file
280
+ text = sdg.process_document(input_data="file_path")
281
+
282
+ # Generate results
283
+ result = sdg.generate_qna(text, question_type ='simple',model_config={"provider":"openai","model":"gpt-4o-mini"},n=20)
284
+
285
+ # Get supported Q&A types
286
+ sdg.get_supported_qna()
287
+
288
+ # Get supported providers
289
+ sdg.get_supported_providers()
290
+ ```
291
+
292
+
293
+
294
+
228
295
 
@@ -1,15 +1,18 @@
1
- ragaai_catalyst/__init__.py,sha256=T0-X4yfIAe26-tWx6kLwNkKIjaFoQL2aNLIRp5wBG5w,424
1
+ ragaai_catalyst/__init__.py,sha256=Gwhhw9Q-ze2BTwvo2FsXCS6ad9Dy4RktMRL3R_SbCR0,495
2
2
  ragaai_catalyst/_version.py,sha256=JKt9KaVNOMVeGs8ojO6LvIZr7ZkMzNN-gCcvryy4x8E,460
3
- ragaai_catalyst/dataset.py,sha256=XjI06Exs6-64pQPQlky4mtcUllNMCgKP-bnM_t9EWkY,10920
4
- ragaai_catalyst/evaluation.py,sha256=PR7rMkvZ4km26B24sSc60GPNS0JkrUMIYo5CPEqX2Qw,19315
3
+ ragaai_catalyst/dataset.py,sha256=bDNZkcji22sg-zJqMHEwueTO8A2f_GJu70WcEHESwQk,10729
4
+ ragaai_catalyst/evaluation.py,sha256=ZS5G5RjmATjljQhAKYCrDXW2mUNXscpRRoL8cseDjAA,20283
5
5
  ragaai_catalyst/experiment.py,sha256=8KvqgJg5JVnt9ghhGDJvdb4mN7ETBX_E5gNxBT0Nsn8,19010
6
+ ragaai_catalyst/guardrails_manager.py,sha256=4G8RkZH4QXXZXPGFDlJ7_93dSBCtVwZOrLvg2jawtHc,10533
7
+ ragaai_catalyst/internal_api_completion.py,sha256=51YwXcas5NviC1wjr8EX5Y6BOyTbJ4FlKHM8gE46Wtk,2916
6
8
  ragaai_catalyst/prompt_manager.py,sha256=ZMIHrmsnPMq20YfeNxWXLtrxnJyMcxpeJ8Uya7S5dUA,16411
7
- ragaai_catalyst/proxy_call.py,sha256=nlMdJCSW73sfN0fMbCbtIk6W992Nac5FJvcfNd6UDJk,5497
9
+ ragaai_catalyst/proxy_call.py,sha256=CHxldeceZUaLU-to_hs_Kf1z_b2vHMssLS_cOBedu78,5499
8
10
  ragaai_catalyst/ragaai_catalyst.py,sha256=5Q1VCE7P33DtjaOtVGRUgBL8dpDL9kjisWGIkOyX4nE,17426
9
- ragaai_catalyst/synthetic_data_generation.py,sha256=STpZF-a1mYT3GR4CGdDvhBdctf2ciSLyvDANqJxnQp8,12989
11
+ ragaai_catalyst/synthetic_data_generation.py,sha256=49DtTzYVGaGvye-g6cgpYNf7YMf2wZFLQ7ACxtqu9_E,18945
10
12
  ragaai_catalyst/utils.py,sha256=TlhEFwLyRU690HvANbyoRycR3nQ67lxVUQoUOfTPYQ0,3772
11
13
  ragaai_catalyst/tracers/__init__.py,sha256=NppmJhD3sQ5R1q6teaZLS7rULj08Gb6JT8XiPRIe_B0,49
12
- ragaai_catalyst/tracers/tracer.py,sha256=eaGJdLEIjadHpbWBXBl5AhMa2vL97SVjik4U1L8gros,9591
14
+ ragaai_catalyst/tracers/llamaindex_callback.py,sha256=vPE7MieKjfwLrLUnnPs20Df0xNYqoCCj-Mt2NbiuiKU,14023
15
+ ragaai_catalyst/tracers/tracer.py,sha256=Y7eGoUDU1tAF3adccfn1ukE38zMs38azUKfO7hB4Zto,11300
13
16
  ragaai_catalyst/tracers/exporters/__init__.py,sha256=kVA8zp05h3phu4e-iHSlnznp_PzMRczB7LphSsZgUjg,138
14
17
  ragaai_catalyst/tracers/exporters/file_span_exporter.py,sha256=RgGteu-NVGprXKkynvyIO5yOjpbtA41R3W_NzCjnkwE,6445
15
18
  ragaai_catalyst/tracers/exporters/raga_exporter.py,sha256=rQ5Wj71f2Ke3qLlV8KiWCskbGBR-ia_hlzDx86rPrEo,18188
@@ -19,7 +22,7 @@ ragaai_catalyst/tracers/instrumentators/llamaindex.py,sha256=SMrRlR4xM7k9HK43hak
19
22
  ragaai_catalyst/tracers/instrumentators/openai.py,sha256=14R4KW9wQCR1xysLfsP_nxS7cqXrTPoD8En4MBAaZUU,379
20
23
  ragaai_catalyst/tracers/utils/__init__.py,sha256=KeMaZtYaTojilpLv65qH08QmpYclfpacDA0U3wg6Ybw,64
21
24
  ragaai_catalyst/tracers/utils/utils.py,sha256=ViygfJ7vZ7U0CTSA1lbxVloHp4NSlmfDzBRNCJuMhis,2374
22
- ragaai_catalyst-2.0.5.dist-info/METADATA,sha256=tWppjo0sERHjjugIOAWdwD1p05HO6T6N_E1KYd9G9hY,6625
23
- ragaai_catalyst-2.0.5.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
24
- ragaai_catalyst-2.0.5.dist-info/top_level.txt,sha256=HpgsdRgEJMk8nqrU6qdCYk3di7MJkDL0B19lkc7dLfM,16
25
- ragaai_catalyst-2.0.5.dist-info/RECORD,,
25
+ ragaai_catalyst-2.0.6b0.dist-info/METADATA,sha256=jx1IOXpDN1OeHqmHrwMucgmzHcIer7saIPSU2923JHI,8525
26
+ ragaai_catalyst-2.0.6b0.dist-info/WHEEL,sha256=a7TGlA-5DaHMRrarXjVbQagU3Man_dCnGIWMJr5kRWo,91
27
+ ragaai_catalyst-2.0.6b0.dist-info/top_level.txt,sha256=HpgsdRgEJMk8nqrU6qdCYk3di7MJkDL0B19lkc7dLfM,16
28
+ ragaai_catalyst-2.0.6b0.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (75.1.0)
2
+ Generator: setuptools (75.4.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5