ragaai-catalyst 2.1.3__py3-none-any.whl → 2.1.4b0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -191,6 +191,9 @@ class Tracer(AgenticTracing):
191
191
  print("Stopping tracer and initiating trace upload...")
192
192
  self._cleanup()
193
193
  self._upload_task = self._run_async(self._upload_traces())
194
+ self.is_active = False
195
+ self.dataset_name = None
196
+
194
197
  return "Trace upload initiated. Use get_upload_status() to check the status."
195
198
  elif self.tracer_type == "llamaindex":
196
199
  from ragaai_catalyst.tracers.llamaindex_callback import LlamaIndexTracer
@@ -0,0 +1,430 @@
1
+ Metadata-Version: 2.2
2
+ Name: ragaai_catalyst
3
+ Version: 2.1.4b0
4
+ Summary: RAGA AI CATALYST
5
+ Author-email: Kiran Scaria <kiran.scaria@raga.ai>, Kedar Gaikwad <kedar.gaikwad@raga.ai>, Dushyant Mahajan <dushyant.mahajan@raga.ai>, Siddhartha Kosti <siddhartha.kosti@raga.ai>, Ritika Goel <ritika.goel@raga.ai>, Vijay Chaurasia <vijay.chaurasia@raga.ai>
6
+ Requires-Python: <3.13,>=3.9
7
+ Description-Content-Type: text/markdown
8
+ Requires-Dist: aiohttp>=3.10.2
9
+ Requires-Dist: opentelemetry-api==1.25.0
10
+ Requires-Dist: opentelemetry-sdk==1.25.0
11
+ Requires-Dist: opentelemetry-exporter-otlp-proto-grpc==1.25.0
12
+ Requires-Dist: opentelemetry-instrumentation==0.46b0
13
+ Requires-Dist: opentelemetry-instrumentation-fastapi==0.46b0
14
+ Requires-Dist: opentelemetry-instrumentation-asgi==0.46b0
15
+ Requires-Dist: opentelemetry-semantic-conventions==0.46b0
16
+ Requires-Dist: opentelemetry-util-http==0.46b0
17
+ Requires-Dist: opentelemetry-instrumentation-langchain~=0.24.0
18
+ Requires-Dist: opentelemetry-instrumentation-openai~=0.24.0
19
+ Requires-Dist: langchain-core>=0.2.11
20
+ Requires-Dist: langchain>=0.2.11
21
+ Requires-Dist: openai>=1.57.0
22
+ Requires-Dist: pandas
23
+ Requires-Dist: groq>=0.11.0
24
+ Requires-Dist: PyPDF2>=3.0.1
25
+ Requires-Dist: google-generativeai>=0.8.2
26
+ Requires-Dist: Markdown>=3.7
27
+ Requires-Dist: litellm==1.51.1
28
+ Requires-Dist: tenacity==8.3.0
29
+ Requires-Dist: tqdm>=4.66.5
30
+ Requires-Dist: llama-index<0.11.0,>=0.10.0
31
+ Requires-Dist: pyopenssl>=24.2.1
32
+ Requires-Dist: psutil~=6.0.0
33
+ Requires-Dist: py-cpuinfo~=9.0.0
34
+ Requires-Dist: requests~=2.32.3
35
+ Requires-Dist: GPUtil~=1.4.0
36
+ Requires-Dist: astor>=0.8.1
37
+ Provides-Extra: dev
38
+ Requires-Dist: pytest; extra == "dev"
39
+ Requires-Dist: pytest-cov; extra == "dev"
40
+ Requires-Dist: black; extra == "dev"
41
+ Requires-Dist: isort; extra == "dev"
42
+ Requires-Dist: mypy; extra == "dev"
43
+ Requires-Dist: flake8; extra == "dev"
44
+
45
+ # RagaAI Catalyst&nbsp; ![GitHub release (latest by date)](https://img.shields.io/github/v/release/raga-ai-hub/ragaai-catalyst) ![GitHub stars](https://img.shields.io/github/stars/raga-ai-hub/ragaai-catalyst?style=social) ![Issues](https://img.shields.io/github/issues/raga-ai-hub/ragaai-catalyst)
46
+
47
+ RagaAI Catalyst is a comprehensive platform designed to enhance the management and optimization of LLM projects. It offers a wide range of features, including project management, dataset management, evaluation management, trace management, prompt management, synthetic data generation, and guardrail management. These functionalities enable you to efficiently evaluate, and safeguard your LLM applications.
48
+
49
+ ## Table of Contents
50
+
51
+ - [RagaAI Catalyst](#ragaai-catalyst)
52
+ - [Table of Contents](#table-of-contents)
53
+ - [Installation](#installation)
54
+ - [Configuration](#configuration)
55
+ - [Usage](#usage)
56
+ - [Project Management](#project-management)
57
+ - [Dataset Management](#dataset-management)
58
+ - [Evaluation Management](#evaluation)
59
+ - [Trace Management](#trace-management)
60
+ - [Prompt Management](#prompt-management)
61
+ - [Synthetic Data Generation](#synthetic-data-generation)
62
+ - [Guardrail Management](#guardrail-management)
63
+ - [Agentic Tracing](#agentic-tracing)
64
+
65
+ ## Installation
66
+
67
+ To install RagaAI Catalyst, you can use pip:
68
+
69
+ ```bash
70
+ pip install ragaai-catalyst
71
+ ```
72
+
73
+ ## Configuration
74
+
75
+ Before using RagaAI Catalyst, you need to set up your credentials. You can do this by setting environment variables or passing them directly to the `RagaAICatalyst` class:
76
+
77
+ ```python
78
+ from ragaai_catalyst import RagaAICatalyst
79
+
80
+ catalyst = RagaAICatalyst(
81
+ access_key="YOUR_ACCESS_KEY",
82
+ secret_key="YOUR_SECRET_KEY",
83
+ base_url="BASE_URL"
84
+ )
85
+ ```
86
+ **Note**: Authetication to RagaAICatalyst is necessary to perform any operations below
87
+
88
+
89
+ ## Usage
90
+
91
+ ### Project Management
92
+
93
+ Create and manage projects using RagaAI Catalyst:
94
+
95
+ ```python
96
+ # Create a project
97
+ project = catalyst.create_project(
98
+ project_name="Test-RAG-App-1",
99
+ usecase="Chatbot"
100
+ )
101
+
102
+ # Get project usecases
103
+ catalyst.project_use_cases()
104
+
105
+ # List projects
106
+ projects = catalyst.list_projects()
107
+ print(projects)
108
+ ```
109
+
110
+ ### Dataset Management
111
+ Manage datasets efficiently for your projects:
112
+
113
+ ```py
114
+ from ragaai_catalyst import Dataset
115
+
116
+ # Initialize Dataset management for a specific project
117
+ dataset_manager = Dataset(project_name="project_name")
118
+
119
+ # List existing datasets
120
+ datasets = dataset_manager.list_datasets()
121
+ print("Existing Datasets:", datasets)
122
+
123
+ # Create a dataset from CSV
124
+ dataset_manager.create_from_csv(
125
+ csv_path='path/to/your.csv',
126
+ dataset_name='MyDataset',
127
+ schema_mapping={'column1': 'schema_element1', 'column2': 'schema_element2'}
128
+ )
129
+
130
+ # Get project schema mapping
131
+ dataset_manager.get_schema_mapping()
132
+
133
+ ```
134
+
135
+ For more detailed information on Dataset Management, including CSV schema handling and advanced usage, please refer to the [Dataset Management documentation](docs/dataset_management.md).
136
+
137
+
138
+ ### Evaluation
139
+
140
+ Create and manage metric evaluation of your RAG application:
141
+
142
+ ```python
143
+ from ragaai_catalyst import Evaluation
144
+
145
+ # Create an experiment
146
+ evaluation = Evaluation(
147
+ project_name="Test-RAG-App-1",
148
+ dataset_name="MyDataset",
149
+ )
150
+
151
+ # Get list of available metrics
152
+ evaluation.list_metrics()
153
+
154
+ # Add metrics to the experiment
155
+
156
+ schema_mapping={
157
+ 'Query': 'prompt',
158
+ 'response': 'response',
159
+ 'Context': 'context',
160
+ 'expectedResponse': 'expected_response'
161
+ }
162
+
163
+ # Add single metric
164
+ evaluation.add_metrics(
165
+ metrics=[
166
+ {"name": "Faithfulness", "config": {"model": "gpt-4o-mini", "provider": "openai", "threshold": {"gte": 0.232323}}, "column_name": "Faithfulness_v1", "schema_mapping": schema_mapping},
167
+
168
+ ]
169
+ )
170
+
171
+ # Add multiple metrics
172
+ evaluation.add_metrics(
173
+ metrics=[
174
+ {"name": "Faithfulness", "config": {"model": "gpt-4o-mini", "provider": "openai", "threshold": {"gte": 0.323}}, "column_name": "Faithfulness_gte", "schema_mapping": schema_mapping},
175
+ {"name": "Hallucination", "config": {"model": "gpt-4o-mini", "provider": "openai", "threshold": {"lte": 0.323}}, "column_name": "Hallucination_lte", "schema_mapping": schema_mapping},
176
+ {"name": "Hallucination", "config": {"model": "gpt-4o-mini", "provider": "openai", "threshold": {"eq": 0.323}}, "column_name": "Hallucination_eq", "schema_mapping": schema_mapping},
177
+ ]
178
+ )
179
+
180
+ # Get the status of the experiment
181
+ status = evaluation.get_status()
182
+ print("Experiment Status:", status)
183
+
184
+ # Get the results of the experiment
185
+ results = evaluation.get_results()
186
+ print("Experiment Results:", results)
187
+ ```
188
+
189
+
190
+
191
+ ### Trace Management
192
+
193
+ Record and analyze traces of your RAG application:
194
+
195
+ ```python
196
+ from ragaai_catalyst import Tracer
197
+
198
+ # Start a trace recording
199
+ tracer = Tracer(
200
+ project_name="Test-RAG-App-1",
201
+ dataset_name="tracer_dataset_name",
202
+ metadata={"key1": "value1", "key2": "value2"},
203
+ tracer_type="langchain",
204
+ pipeline={
205
+ "llm_model": "gpt-4o-mini",
206
+ "vector_store": "faiss",
207
+ "embed_model": "text-embedding-ada-002",
208
+ }
209
+ ).start()
210
+
211
+ # Your code here
212
+
213
+
214
+ # Stop the trace recording
215
+ tracer.stop()
216
+
217
+ # Get upload status
218
+ tracer.get_upload_status()
219
+ ```
220
+
221
+
222
+ ### Prompt Management
223
+
224
+ Manage and use prompts efficiently in your projects:
225
+
226
+ ```py
227
+ from ragaai_catalyst import PromptManager
228
+
229
+ # Initialize PromptManager
230
+ prompt_manager = PromptManager(project_name="Test-RAG-App-1")
231
+
232
+ # List available prompts
233
+ prompts = prompt_manager.list_prompts()
234
+ print("Available prompts:", prompts)
235
+
236
+ # Get default prompt by prompt_name
237
+ prompt_name = "your_prompt_name"
238
+ prompt = prompt_manager.get_prompt(prompt_name)
239
+
240
+ # Get specific version of prompt by prompt_name and version
241
+ prompt_name = "your_prompt_name"
242
+ version = "v1"
243
+ prompt = prompt_manager.get_prompt(prompt_name,version)
244
+
245
+ # Get variables in a prompt
246
+ variable = prompt.get_variables()
247
+ print("variable:",variable)
248
+
249
+ # Get prompt content
250
+ prompt_content = prompt.get_prompt_content()
251
+ print("prompt_content:", prompt_content)
252
+
253
+ # Compile the prompt with variables
254
+ compiled_prompt = prompt.compile(query="What's the weather?", context="sunny", llm_response="It's sunny today")
255
+ print("Compiled prompt:", compiled_prompt)
256
+
257
+ # implement compiled_prompt with openai
258
+ import openai
259
+ def get_openai_response(prompt):
260
+ client = openai.OpenAI()
261
+ response = client.chat.completions.create(
262
+ model="gpt-4o-mini",
263
+ messages=prompt
264
+ )
265
+ return response.choices[0].message.content
266
+ openai_response = get_openai_response(compiled_prompt)
267
+ print("openai_response:", openai_response)
268
+
269
+ # implement compiled_prompt with litellm
270
+ import litellm
271
+ def get_litellm_response(prompt):
272
+ response = litellm.completion(
273
+ model="gpt-4o-mini",
274
+ messages=prompt
275
+ )
276
+ return response.choices[0].message.content
277
+ litellm_response = get_litellm_response(compiled_prompt)
278
+ print("litellm_response:", litellm_response)
279
+
280
+ ```
281
+ For more detailed information on Prompt Management, please refer to the [Prompt Management documentation](docs/prompt_management.md).
282
+
283
+
284
+ ### Synthetic Data Generation
285
+
286
+ ```py
287
+ from ragaai_catalyst import SyntheticDataGeneration
288
+
289
+ # Initialize Synthetic Data Generation
290
+ sdg = SyntheticDataGeneration()
291
+
292
+ # Process your file
293
+ text = sdg.process_document(input_data="file_path")
294
+
295
+ # Generate results
296
+ result = sdg.generate_qna(text, question_type ='complex',model_config={"provider":"openai","model":"openai/gpt-3.5-turbo"},n=5)
297
+
298
+ print(result.head())
299
+
300
+ # Get supported Q&A types
301
+ sdg.get_supported_qna()
302
+
303
+ # Get supported providers
304
+ sdg.get_supported_providers()
305
+ ```
306
+
307
+
308
+
309
+ ### Guardrail Management
310
+
311
+ ```py
312
+ from ragaai_catalyst import GuardrailsManager
313
+
314
+ # Initialize Guardrails Manager
315
+ gdm = GuardrailsManager(project_name=project_name)
316
+
317
+ # Get list of Guardrails available
318
+ guardrails_list = gdm.list_guardrails()
319
+ print('guardrails_list:', guardrails_list)
320
+
321
+ # Get list of fail condition for guardrails
322
+ fail_conditions = gdm.list_fail_condition()
323
+ print('fail_conditions;', fail_conditions)
324
+
325
+ #Get list of deployment ids
326
+ deployment_list = gdm.list_deployment_ids()
327
+ print('deployment_list:', deployment_list)
328
+
329
+ # Get specific deployment id with guardrails information
330
+ deployment_id_detail = gdm.get_deployment(17)
331
+ print('deployment_id_detail:', deployment_id_detail)
332
+
333
+ # Add guardrails to a deployment id
334
+ guardrails_config = {"guardrailFailConditions": ["FAIL"],
335
+ "deploymentFailCondition": "ALL_FAIL",
336
+ "alternateResponse": "Your alternate response"}
337
+
338
+ guardrails = [
339
+ {
340
+ "displayName": "Response_Evaluator",
341
+ "name": "Response Evaluator",
342
+ "config":{
343
+ "mappings": [{
344
+ "schemaName": "Text",
345
+ "variableName": "Response"
346
+ }],
347
+ "params": {
348
+ "isActive": {"value": False},
349
+ "isHighRisk": {"value": True},
350
+ "threshold": {"eq": 0},
351
+ "competitors": {"value": ["Google","Amazon"]}
352
+ }
353
+ }
354
+ },
355
+ {
356
+ "displayName": "Regex_Check",
357
+ "name": "Regex Check",
358
+ "config":{
359
+ "mappings": [{
360
+ "schemaName": "Text",
361
+ "variableName": "Response"
362
+ }],
363
+ "params":{
364
+ "isActive": {"value": False},
365
+ "isHighRisk": {"value": True},
366
+ "threshold": {"lt1": 1}
367
+ }
368
+ }
369
+ }
370
+ ]
371
+
372
+ gdm.add_guardrails(deployment_id, guardrails, guardrails_config)
373
+
374
+
375
+ # Import GuardExecutor
376
+ from ragaai_catalyst import GuardExecutor
377
+
378
+ # Initialise GuardExecutor with required params and Evaluate
379
+ executor = GuardExecutor(deployment_id,gdm,field_map={'context':'document'})
380
+
381
+
382
+ message={'role':'user',
383
+ 'content':'What is the capital of France'
384
+ }
385
+ prompt_params={'document':' France'}
386
+
387
+ model_params = {'temperature':.7,'model':'gpt-4o-mini'}
388
+ llm_caller = 'litellm'
389
+
390
+ executor([message],prompt_params,model_params,llm_caller)
391
+
392
+ ```
393
+
394
+ ### Agentic Tracing
395
+
396
+ The Agentic Tracing module provides comprehensive monitoring and analysis capabilities for AI agent systems. It helps track various aspects of agent behavior including:
397
+
398
+ - LLM interactions and token usage
399
+ - Tool utilization and execution patterns
400
+ - Network activities and API calls
401
+ - User interactions and feedback
402
+ - Agent decision-making processes
403
+
404
+ The module includes utilities for cost tracking, performance monitoring, and debugging agent behavior. This helps in understanding and optimizing AI agent performance while maintaining transparency in agent operations.
405
+
406
+ ```python
407
+ from ragaai_catalyst import AgenticTracer
408
+
409
+ # Initialize tracer
410
+ tracer = AgenticTracer(
411
+ project_name="project_name",
412
+ dataset_name="dataset_name",
413
+ tracer_type="agentic",
414
+ )
415
+
416
+ # Define tracers
417
+ @tracer.trace_agents("agent_name")
418
+ # Agent Definition
419
+
420
+ @tracer.trace_llm("llm_name")
421
+ # LLM Definition
422
+
423
+ @tracer.trace_tool("tool_name")
424
+ # Tool Definition
425
+
426
+ # Perform tracing
427
+ with tracer:
428
+ # Agent execution code
429
+ pass
430
+
@@ -13,7 +13,7 @@ ragaai_catalyst/synthetic_data_generation.py,sha256=uDV9tNwto2xSkWg5XHXUvjErW-4P
13
13
  ragaai_catalyst/utils.py,sha256=TlhEFwLyRU690HvANbyoRycR3nQ67lxVUQoUOfTPYQ0,3772
14
14
  ragaai_catalyst/tracers/__init__.py,sha256=yxepo7iVjTNI_wFdk3Z6Ghu64SazVyszCPEHYrX5WQk,50
15
15
  ragaai_catalyst/tracers/llamaindex_callback.py,sha256=vPE7MieKjfwLrLUnnPs20Df0xNYqoCCj-Mt2NbiuiKU,14023
16
- ragaai_catalyst/tracers/tracer.py,sha256=NgJrhC-nEbSYeFHYvWtzg73V1XokWVVoTs5z1n-xQUs,12716
16
+ ragaai_catalyst/tracers/tracer.py,sha256=MPyrmJ1T68jOkbXx3WwnrzJjGbd54C_9ZhGDfu2tfZE,12801
17
17
  ragaai_catalyst/tracers/upload_traces.py,sha256=hs0PEmit3n3_uUqrdbwcBdyK5Nbkik3JQVwJMEwYTd4,4796
18
18
  ragaai_catalyst/tracers/agentic_tracing/README.md,sha256=X4QwLb7-Jg7GQMIXj-SerZIgDETfw-7VgYlczOR8ZeQ,4508
19
19
  ragaai_catalyst/tracers/agentic_tracing/__init__.py,sha256=yf6SKvOPSpH-9LiKaoLKXwqj5sez8F_5wkOb91yp0oE,260
@@ -54,7 +54,7 @@ ragaai_catalyst/tracers/instrumentators/llamaindex.py,sha256=SMrRlR4xM7k9HK43hak
54
54
  ragaai_catalyst/tracers/instrumentators/openai.py,sha256=14R4KW9wQCR1xysLfsP_nxS7cqXrTPoD8En4MBAaZUU,379
55
55
  ragaai_catalyst/tracers/utils/__init__.py,sha256=KeMaZtYaTojilpLv65qH08QmpYclfpacDA0U3wg6Ybw,64
56
56
  ragaai_catalyst/tracers/utils/utils.py,sha256=ViygfJ7vZ7U0CTSA1lbxVloHp4NSlmfDzBRNCJuMhis,2374
57
- ragaai_catalyst-2.1.3.dist-info/METADATA,sha256=H6mSUPhVI2PggdxVms8ggrA_0Y0J3v3TzbRewfDj5Rw,1802
58
- ragaai_catalyst-2.1.3.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
59
- ragaai_catalyst-2.1.3.dist-info/top_level.txt,sha256=HpgsdRgEJMk8nqrU6qdCYk3di7MJkDL0B19lkc7dLfM,16
60
- ragaai_catalyst-2.1.3.dist-info/RECORD,,
57
+ ragaai_catalyst-2.1.4b0.dist-info/METADATA,sha256=E3Uqt-UmOtCN4VKHgjt_1LA57p9woqutuWce1NDxXaU,12745
58
+ ragaai_catalyst-2.1.4b0.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
59
+ ragaai_catalyst-2.1.4b0.dist-info/top_level.txt,sha256=HpgsdRgEJMk8nqrU6qdCYk3di7MJkDL0B19lkc7dLfM,16
60
+ ragaai_catalyst-2.1.4b0.dist-info/RECORD,,
@@ -1,43 +0,0 @@
1
- Metadata-Version: 2.2
2
- Name: ragaai_catalyst
3
- Version: 2.1.3
4
- Summary: RAGA AI CATALYST
5
- Author-email: Kiran Scaria <kiran.scaria@raga.ai>, Kedar Gaikwad <kedar.gaikwad@raga.ai>, Dushyant Mahajan <dushyant.mahajan@raga.ai>, Siddhartha Kosti <siddhartha.kosti@raga.ai>, Ritika Goel <ritika.goel@raga.ai>, Vijay Chaurasia <vijay.chaurasia@raga.ai>
6
- Requires-Python: <3.13,>=3.9
7
- Description-Content-Type: text/markdown
8
- Requires-Dist: aiohttp>=3.10.2
9
- Requires-Dist: opentelemetry-api==1.25.0
10
- Requires-Dist: opentelemetry-sdk==1.25.0
11
- Requires-Dist: opentelemetry-exporter-otlp-proto-grpc==1.25.0
12
- Requires-Dist: opentelemetry-instrumentation==0.46b0
13
- Requires-Dist: opentelemetry-instrumentation-fastapi==0.46b0
14
- Requires-Dist: opentelemetry-instrumentation-asgi==0.46b0
15
- Requires-Dist: opentelemetry-semantic-conventions==0.46b0
16
- Requires-Dist: opentelemetry-util-http==0.46b0
17
- Requires-Dist: opentelemetry-instrumentation-langchain~=0.24.0
18
- Requires-Dist: opentelemetry-instrumentation-openai~=0.24.0
19
- Requires-Dist: langchain-core>=0.2.11
20
- Requires-Dist: langchain>=0.2.11
21
- Requires-Dist: openai>=1.57.0
22
- Requires-Dist: pandas
23
- Requires-Dist: groq>=0.11.0
24
- Requires-Dist: PyPDF2>=3.0.1
25
- Requires-Dist: google-generativeai>=0.8.2
26
- Requires-Dist: Markdown>=3.7
27
- Requires-Dist: litellm==1.51.1
28
- Requires-Dist: tenacity==8.3.0
29
- Requires-Dist: tqdm>=4.66.5
30
- Requires-Dist: llama-index<0.11.0,>=0.10.0
31
- Requires-Dist: pyopenssl>=24.2.1
32
- Requires-Dist: psutil~=6.0.0
33
- Requires-Dist: py-cpuinfo~=9.0.0
34
- Requires-Dist: requests~=2.32.3
35
- Requires-Dist: GPUtil~=1.4.0
36
- Requires-Dist: astor>=0.8.1
37
- Provides-Extra: dev
38
- Requires-Dist: pytest; extra == "dev"
39
- Requires-Dist: pytest-cov; extra == "dev"
40
- Requires-Dist: black; extra == "dev"
41
- Requires-Dist: isort; extra == "dev"
42
- Requires-Dist: mypy; extra == "dev"
43
- Requires-Dist: flake8; extra == "dev"