ragaai-catalyst 2.1.5b41__py3-none-any.whl → 2.1.5.1b2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -2,27 +2,47 @@ import litellm
2
2
  import json
3
3
  import requests
4
4
  import os
5
+ from google import genai
6
+ from google.genai.types import GenerateContentConfig
7
+ from typing import Optional, List, Dict, Any
5
8
  import logging
6
9
  logger = logging.getLogger('LiteLLM')
7
10
  logger.setLevel(logging.ERROR)
8
11
 
9
12
  class GuardExecutor:
10
13
 
11
- def __init__(self,id,guard_manager,field_map={}):
12
- self.deployment_id = id
14
+ def __init__(self,guard_manager,input_deployment_id = None,output_deployment_id=None,field_map={}):
13
15
  self.field_map = field_map
14
16
  self.guard_manager = guard_manager
15
- self.deployment_details = self.guard_manager.get_deployment(id)
16
- if not self.deployment_details:
17
- raise ValueError('Error in getting deployment details')
17
+ try:
18
+ if input_deployment_id:
19
+ self.input_deployment_id = input_deployment_id
20
+ self.input_deployment_details = self.guard_manager.get_deployment(input_deployment_id)
21
+ if output_deployment_id:
22
+ self.output_deployment_id = output_deployment_id
23
+ self.output_deployment_details = self.guard_manager.get_deployment(output_deployment_id)
24
+ if input_deployment_id and output_deployment_id:
25
+ # check if 2 deployments are mapped to same dataset
26
+ if self.input_deployment_details['data']['datasetId'] != self.output_deployment_details['data']['datasetId']:
27
+ raise ValueError('Input deployment and output deployment should be mapped to same dataset')
28
+ for guardrail in self.input_deployment_details['data']['guardrailsResponse']:
29
+ maps = guardrail['metricSpec']['config']['mappings']
30
+ for _map in maps:
31
+ if _map['schemaName']=='Response':
32
+ raise ValueError('Response field should be mapped only in output guardrails')
33
+ except Exception as e:
34
+ raise ValueError(str(e))
18
35
  self.base_url = guard_manager.base_url
19
36
  for key in field_map.keys():
20
37
  if key not in ['prompt','context','response','instruction']:
21
38
  print('Keys in field map should be in ["prompt","context","response","instruction"]')
39
+ self.current_trace_id = None
40
+ self.id_2_doc = {}
22
41
 
23
- def execute_deployment(self,payload):
24
- api = self.base_url + f'/guardrail/deployment/{self.deployment_id}/ingest'
25
-
42
+ def execute_deployment(self, deployment_id, payload):
43
+ api = self.base_url + f'/guardrail/deployment/{deployment_id}/ingest'
44
+ if self.current_trace_id:
45
+ payload['traceId'] = self.current_trace_id
26
46
  payload = json.dumps(payload)
27
47
  headers = {
28
48
  'x-project-id': str(self.guard_manager.project_id),
@@ -42,17 +62,58 @@ class GuardExecutor:
42
62
  print(response.json()['message'])
43
63
  return None
44
64
 
45
- def llm_executor(self,messages,model_params,llm_caller):
65
+ def llm_executor(self,prompt,model_params,llm_caller):
66
+ messages = [{
67
+ 'role':'user',
68
+ 'content':prompt
69
+ }]
70
+ if self.current_trace_id:
71
+ doc = self.id_2_doc[self.current_trace_id]
72
+ messages[0]['content'] = messages[0]['content'] + '\n' + doc.get('context','')
46
73
  if llm_caller == 'litellm':
47
74
  model_params['messages'] = messages
48
75
  response = litellm.completion(**model_params)
49
- return response
76
+ return response['choices'][0].message.content
77
+ elif llm_caller == 'genai':
78
+ genai_client = genai.Client(api_key=os.getenv('GENAI_API_KEY'))
79
+ model_params['messages'] = messages
80
+ response = genai_client.models.generate(**model_params)
81
+ return response.text
50
82
  else:
51
83
  print(f"{llm_caller} not supported currently, use litellm as llm caller")
84
+ '''
85
+ elif llm_caller == 'anthropic':
86
+ response = anthropic.completion(prompt=messages, **model_params)
87
+ return response['completion']
88
+ elif llm_caller == 'langchain':
89
+ response = langchain.completion(prompt=messages, **model_params)
90
+ return response['choices'][0].text
91
+ elif llm_caller == 'azure_openai':
92
+ response = azure_openai.completion(prompt=messages, **model_params)
93
+ return response['choices'][0].text
94
+ elif llm_caller == 'aws_bedrock':
95
+ response = aws_bedrock.completion(prompt=messages, **model_params)
96
+ return response['choices'][0].text
97
+ elif llm_caller == 'meta':
98
+ response = meta.completion(prompt=messages, **model_params)
99
+ return response['choices'][0].text
100
+ elif llm_csller == 'llamaindex':
101
+ response = llamaindex.completion(prompt=messages, **model_params)
102
+ return response['choices'][0].text'''
103
+
104
+ def set_input_params(self, prompt: None, context: None, instruction: None, **kwargs):
105
+ if 'latest' not in self.id_2_doc:
106
+ self.id_2_doc['latest'] = {}
107
+ if prompt:
108
+ self.id_2_doc['latest']['prompt'] = prompt
109
+ if context:
110
+ self.id_2_doc['latest']['context'] = context
111
+ if instruction:
112
+ self.id_2_doc['latest']['instruction'] = instruction
52
113
 
53
114
 
54
- def __call__(self,messages,prompt_params,model_params,llm_caller='litellm'):
55
- for key in self.field_map:
115
+ def __call__(self,prompt,prompt_params,model_params,llm_caller='litellm'):
116
+ '''for key in self.field_map:
56
117
  if key not in ['prompt','response']:
57
118
  if self.field_map[key] not in prompt_params:
58
119
  raise ValueError(f'{key} added as field map but not passed as prompt parameter')
@@ -66,32 +127,145 @@ class GuardExecutor:
66
127
  msg['content'] += '\n' + prompt_params[context_var]
67
128
  doc = dict()
68
129
  doc['prompt'] = prompt
69
- doc['context'] = prompt_params[context_var]
130
+ doc['context'] = prompt_params[context_var]'''
70
131
 
71
- # inactive the guardrails that needs Response variable
72
- #deployment_response = self.execute_deployment(doc)
132
+ # Run the input guardrails
133
+ alternate_response,input_deployment_response = self.execute_input_guardrails(prompt,prompt_params)
134
+ if input_deployment_response and input_deployment_response['data']['status'].lower() == 'fail':
135
+ return alternate_response, None, input_deployment_response
73
136
 
74
137
  # activate only guardrails that require response
75
138
  try:
76
- llm_response = self.llm_executor(messages,model_params,llm_caller)
139
+ llm_response = self.llm_executor(prompt,model_params,llm_caller)
77
140
  except Exception as e:
78
141
  print('Error in running llm:',str(e))
79
- return None
80
- doc['response'] = llm_response['choices'][0].message.content
142
+ return None, None, input_deployment_response
81
143
  if 'instruction' in self.field_map:
82
144
  instruction = prompt_params[self.field_map['instruction']]
83
- doc['instruction'] = instruction
84
- response = self.execute_deployment(doc)
85
- if response and response['data']['status'] == 'FAIL':
86
- print('Guardrail deployment run retured failed status, replacing with alternate response')
87
- return response['data']['alternateResponse'],llm_response,response
145
+ alternate_op_response,output_deployment_response = self.execute_output_guardrails(llm_response)
146
+ if output_deployment_response and output_deployment_response['data']['status'].lower() == 'fail':
147
+ return alternate_op_response,llm_response,output_deployment_response
88
148
  else:
89
- return None,llm_response,response
149
+ return None,llm_response,output_deployment_response
150
+
151
+ def set_variables(self,prompt,prompt_params):
152
+ for key in self.field_map:
153
+ if key not in ['prompt', 'response']:
154
+ if self.field_map[key] not in prompt_params:
155
+ raise ValueError(f'{key} added as field map but not passed as prompt parameter')
156
+ context_var = self.field_map.get('context', None)
157
+
158
+ doc = dict()
159
+ doc['prompt'] = prompt
160
+ doc['context'] = prompt_params[context_var]
161
+ if 'instruction' in self.field_map:
162
+ instruction = prompt_params[self.field_map['instruction']]
163
+ doc['instruction'] = instruction
164
+ return doc
90
165
 
166
+ def execute_input_guardrails(self, prompt, prompt_params):
167
+ doc = self.set_variables(prompt,prompt_params)
168
+ deployment_response = self.execute_deployment(self.input_deployment_id,doc)
169
+ self.current_trace_id = deployment_response['data']['results'][0]['executionId']
170
+ self.id_2_doc[self.current_trace_id] = doc
171
+ if deployment_response and deployment_response['data']['status'].lower() == 'fail':
172
+ return deployment_response['data']['alternateResponse'], deployment_response
173
+ elif deployment_response:
174
+ return None, deployment_response
91
175
 
176
+ def execute_output_guardrails(self, llm_response: str, prompt=None, prompt_params=None) -> None:
177
+ if not prompt: # user has not passed input
178
+ if self.current_trace_id not in self.id_2_doc:
179
+ raise Exception(f'No input doc found for trace_id: {self.current_trace_id}')
180
+ else:
181
+ doc = self.id_2_doc[self.current_trace_id]
182
+ doc['response'] = llm_response
183
+ else:
184
+ doc = self.set_variables(prompt,prompt_params)
185
+ deployment_response = self.execute_deployment(self.output_deployment_id,doc)
186
+ del self.id_2_doc[self.current_trace_id]
187
+ self.current_trace_id = None
188
+ if deployment_response and deployment_response['data']['status'].lower() == 'fail':
189
+ return deployment_response['data']['alternateResponse'], deployment_response
190
+ elif deployment_response:
191
+ return None, deployment_response
92
192
 
93
193
 
94
-
194
+ '''
195
+ # doc = dict()
196
+ # doc['response'] = llm_response
197
+ # if trace_id:
198
+ # doc['trace_id'] = trace_id
199
+ trace_id = self.current_trace_id
200
+ if not trace_id:
201
+ for key in self.field_map:
202
+ if key not in ['prompt', 'response']:
203
+ if not prompt_params or self.field_map[key] not in prompt_params:
204
+ if key not in self.id_2_doc.get('latest', {}):
205
+ raise ValueError(f'{key} added as field map but not passed as prompt parameter or set in executor')
206
+ elif key == 'prompt':
207
+ if not messages:
208
+ if key not in self.id_2_doc.get('latest', {}):
209
+ raise ValueError('messages should be provided when prompt is used as field or prompt should be set in executor')
210
+ # raise Exception(f'\'doc_id\' not provided and there is no doc_id currently available. Either run \'execute_input_guardrails\' or pass a valid \'doc_id\'')
211
+ #deployment_details = self.guard_manager.get_deployment(self.output_deployment_id)
212
+ #deployed_guardrails = deployment_details['data']['guardrailsResponse']
213
+
214
+ for guardrail in deployed_guardrails:
215
+ metric_spec_mappings = guardrail['metricSpec']['config']['mappings']
216
+ var_names = [mapping['variableNmae'].lower() for mapping in metric_spec_mappings]
217
+ for var_name in var_names:
218
+ if var_name not in ['prompt', 'response']:
219
+ if var_name not in self.field_map:
220
+ raise ValueError(f'{var_name} requrired for {guardrail} guardrail in deployment {self.deployment_id} but not added as field map')
221
+ if not prompt_params or (self.field_map[var_name] not in prompt_params):
222
+ if var_name not in self.id_2_doc.get('latest', {}):
223
+ raise ValueError(f'{var_name} added as field map but not passed as prompt parameter')
224
+ elif var_name == 'prompt':
225
+ if not messages:
226
+ if var_name not in self.id_2_doc.get('latest', {}):
227
+ raise ValueError('messages must be provided if doc_id is not provided')
228
+
229
+ prompt = None
230
+ if messages:
231
+ for msg in messages:
232
+ if 'role' in msg:
233
+ if msg['role'] == 'user':
234
+ prompt = msg['content']
235
+ else:
236
+ prompt = self.id_2_doc['latest']['prompt']
237
+ context_var = self.field_map.get('context', None)
238
+ doc = dict()
239
+ doc['prompt'] = prompt
240
+ if context_var and prompt_params and context_var in prompt_params:
241
+ doc['context'] = prompt_params[self.field_map[context_var]]
242
+ elif context_var:
243
+ doc['context'] = self.id_2_doc['latest']['context']
244
+ elif 'latest' in self.id_2_doc and 'context' in self.id_2_doc['latest'] and self.id_2_doc['latest']['context']:
245
+ doc['context'] = self.id_2_doc['latest']['context']
246
+ else:
247
+ doc['context'] = ''
248
+ if 'instruction' in self.field_map:
249
+ if prompt_params and 'instruction' in prompt_params:
250
+ instruction = prompt_params[self.field_map['instruction']]
251
+ elif 'latest' in self.id_2_doc and 'instruction' in self.id_2_doc['latest'] and self.id_2_doc['latest']['instruction']:
252
+ instruction = self.id_2_doc['latest']['instruction']
253
+ else:
254
+ raise ValueError('instruction added as field map but not passed as prompt parameter or set in executor')
255
+ doc['instruction'] = instruction
256
+ elif trace_id not in self.id_2_doc:
257
+ raise Exception(f'trace_id {trace_id} is not valid. Please run \'execute_input_guardrails\' first')
258
+ else:
259
+ doc = self.id_2_doc[trace_id]
260
+ doc['response'] = llm_response
261
+ response = self.execute_deployment(doc)
262
+ if response and response['data']['status'] == 'FAIL':
263
+ print('Guardrail deployment run retured failed status, replacing with alternate response')
264
+ return response['data']['alternateResponse'], llm_response, response
265
+ else:
266
+ self.current_trace_id = None
267
+ return None, llm_response, response
268
+ '''
95
269
 
96
270
 
97
271
 
@@ -1,6 +1,9 @@
1
1
  import requests
2
2
  import json
3
3
  import os
4
+ import logging
5
+ logger = logging.getLogger(__name__)
6
+ from .utils import response_checker
4
7
  from .ragaai_catalyst import RagaAICatalyst
5
8
 
6
9
 
@@ -107,20 +110,82 @@ class GuardrailsManager:
107
110
  return response.json()["data"]
108
111
 
109
112
 
110
- def create_deployment(self, deployment_name):
113
+ def list_datasets(self):
114
+ """
115
+ Retrieves a list of datasets for a given project.
116
+
117
+ Returns:
118
+ list: A list of dataset names.
119
+
120
+ Raises:
121
+ None.
122
+ """
123
+
124
+ def make_request():
125
+ headers = {
126
+ 'Content-Type': 'application/json',
127
+ "Authorization": f"Bearer {os.getenv('RAGAAI_CATALYST_TOKEN')}",
128
+ "X-Project-Id": str(self.project_id),
129
+ }
130
+ json_data = {"size": 12, "page": "0", "projectId": str(self.project_id), "search": ""}
131
+ try:
132
+ response = requests.post(
133
+ f"{self.base_url}/v2/llm/dataset",
134
+ headers=headers,
135
+ json=json_data,
136
+ timeout=30,
137
+ )
138
+ response.raise_for_status()
139
+ return response
140
+ except requests.exceptions.RequestException as e:
141
+ logger.error(f"Failed to list datasets: {e}")
142
+ raise
143
+
144
+ try:
145
+ response = make_request()
146
+ response_checker(response, "Dataset.list_datasets")
147
+ if response.status_code == 401:
148
+ response = make_request() # Retry the request
149
+ if response.status_code != 200:
150
+ return {
151
+ "status_code": response.status_code,
152
+ "message": response.json(),
153
+ }
154
+ datasets = response.json()["data"]["content"]
155
+ dataset_list = [dataset["name"] for dataset in datasets]
156
+ return dataset_list
157
+ except Exception as e:
158
+ logger.error(f"Error in list_datasets: {e}")
159
+ raise
160
+
161
+
162
+ def create_deployment(self, deployment_name, deployment_dataset_name):
111
163
  """
112
164
  Create a new deployment ID with the given name.
113
165
 
114
166
  :param deployment_name: The name of the new deployment.
167
+ :param deployment_dataset_name: The name of the tracking dataset.
115
168
  :raises ValueError: If a deployment with the given name already exists.
116
169
  """
117
170
  self.deployment_name = deployment_name
171
+ self.deployment_dataset_name = deployment_dataset_name
118
172
  list_deployment_ids = self.list_deployment_ids()
119
173
  list_deployment_names = [_["name"] for _ in list_deployment_ids]
120
174
  if deployment_name in list_deployment_names:
121
175
  raise ValueError(f"Deployment with '{deployment_name}' already exists, choose a unique name")
122
176
 
123
- payload = json.dumps({"name": str(deployment_name)})
177
+ # Check if dataset name exists
178
+ list_datasets = self.list_datasets()
179
+ # Assuming this method exists to get list of datasets
180
+ is_new_dataset = deployment_dataset_name not in list_datasets
181
+
182
+ payload = json.dumps({
183
+ "name": str(deployment_name),
184
+ "trackingDataset": {
185
+ "addNew": is_new_dataset,
186
+ "name": str(deployment_dataset_name)
187
+ }
188
+ })
124
189
  headers = {
125
190
  'Authorization': f'Bearer {os.getenv("RAGAAI_CATALYST_TOKEN")}',
126
191
  'Content-Type': 'application/json',
@@ -337,9 +337,15 @@ class LangchainTracer(BaseCallbackHandler):
337
337
 
338
338
  try:
339
339
  from langchain.chains import create_retrieval_chain, RetrievalQA
340
+ from langchain_core.runnables import RunnableBinding
341
+ from langchain_core.runnables import RunnableSequence
342
+ from langchain.chains import ConversationalRetrievalChain
340
343
  components_to_patch["RetrievalQA"] = (RetrievalQA, "from_chain_type")
341
344
  components_to_patch["create_retrieval_chain"] = (create_retrieval_chain, None)
342
345
  components_to_patch['RetrievalQA.invoke'] = (RetrievalQA, 'invoke')
346
+ components_to_patch["RunnableBinding"] = (RunnableBinding, "invoke")
347
+ components_to_patch["RunnableSequence"] = (RunnableSequence, "invoke")
348
+ components_to_patch["ConversationalRetrievalChain"] = (ConversationalRetrievalChain, "invoke")
343
349
  except ImportError:
344
350
  logger.debug("Langchain chains not available for patching")
345
351
 
@@ -407,10 +413,16 @@ class LangchainTracer(BaseCallbackHandler):
407
413
  elif name == "ChatOpenAI_ChatModels":
408
414
  from langchain.chat_models import ChatOpenAI as ChatOpenAI_ChatModels
409
415
  imported_components[name] = ChatOpenAI_ChatModels
410
- elif name in ["RetrievalQA", "create_retrieval_chain", 'RetrievalQA.invoke']:
416
+ elif name in ["RetrievalQA", "create_retrieval_chain", 'RetrievalQA.invoke', "RunnableBinding", "RunnableSequence","ConversationalRetrievalChain"]:
411
417
  from langchain.chains import create_retrieval_chain, RetrievalQA
418
+ from langchain_core.runnables import RunnableBinding
419
+ from langchain_core.runnables import RunnableSequence
420
+ from langchain.chains import ConversationalRetrievalChain
412
421
  imported_components["RetrievalQA"] = RetrievalQA
413
422
  imported_components["create_retrieval_chain"] = create_retrieval_chain
423
+ imported_components["RunnableBinding"] = RunnableBinding
424
+ imported_components["RunnableSequence"] = RunnableSequence
425
+ imported_components["ConversationalRetrievalChain"] = ConversationalRetrievalChain
414
426
  except ImportError:
415
427
  logger.debug(f"{name} not available for restoration")
416
428
 
@@ -414,7 +414,7 @@ class Tracer(AgenticTracing):
414
414
  with open(filepath_3, 'w') as f:
415
415
  json.dump(final_result, f, indent=2)
416
416
 
417
- print(filepath_3)
417
+ # print(filepath_3)
418
418
  else:
419
419
  logger.warning("No valid langchain traces found in final_result")
420
420
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: ragaai_catalyst
3
- Version: 2.1.5b41
3
+ Version: 2.1.5.1b2
4
4
  Summary: RAGA AI CATALYST
5
5
  Author-email: Kiran Scaria <kiran.scaria@raga.ai>, Kedar Gaikwad <kedar.gaikwad@raga.ai>, Dushyant Mahajan <dushyant.mahajan@raga.ai>, Siddhartha Kosti <siddhartha.kosti@raga.ai>, Ritika Goel <ritika.goel@raga.ai>, Vijay Chaurasia <vijay.chaurasia@raga.ai>, Tushar Kumar <tushar.kumar@raga.ai>
6
6
  Requires-Python: <3.13,>=3.9
@@ -24,6 +24,7 @@ Requires-Dist: pandas
24
24
  Requires-Dist: groq>=0.11.0
25
25
  Requires-Dist: PyPDF2>=3.0.1
26
26
  Requires-Dist: google-generativeai>=0.8.2
27
+ Requires-Dist: google-genai>=1.3.0
27
28
  Requires-Dist: Markdown>=3.7
28
29
  Requires-Dist: litellm==1.51.1
29
30
  Requires-Dist: tenacity==8.3.0
@@ -56,8 +57,6 @@ RagaAI Catalyst is a comprehensive platform designed to enhance the management a
56
57
 
57
58
  ![RagaAI Catalyst](docs/img/main.png)
58
59
 
59
- ![RagaAI Catalyst](docs/img/main.png)
60
-
61
60
  ## Table of Contents
62
61
 
63
62
  - [RagaAI Catalyst](#ragaai-catalyst)
@@ -3,8 +3,8 @@ ragaai_catalyst/_version.py,sha256=JKt9KaVNOMVeGs8ojO6LvIZr7ZkMzNN-gCcvryy4x8E,4
3
3
  ragaai_catalyst/dataset.py,sha256=YCj8Ovu6y38KEw-1HCe4xQWkmYPgfNTtMa8Q0g6B62o,29401
4
4
  ragaai_catalyst/evaluation.py,sha256=O96CydYVPh3duUmXjY6REIXMOR-tOPixSG-Qhrf636A,22955
5
5
  ragaai_catalyst/experiment.py,sha256=8yQo1phCHlpnJ-4CqCaIbLXg_1ZlAuLGI9kqGBl-OTE,18859
6
- ragaai_catalyst/guard_executor.py,sha256=llPbE3DyVtrybojXknzBZj8-dtUrGBQwi9-ZiPJxGRo,3762
7
- ragaai_catalyst/guardrails_manager.py,sha256=DILMOAASK57FH9BLq_8yC1AQzRJ8McMFLwCXgYwNAd4,11904
6
+ ragaai_catalyst/guard_executor.py,sha256=rSdgf_3PB1Eaeoxz0cz6jbAoGvHXAeEfmM1jFUwL8cI,13970
7
+ ragaai_catalyst/guardrails_manager.py,sha256=_VrARJ1udmCF8TklNKy7XTQUaM8ATDhTOAGDonBkFro,14245
8
8
  ragaai_catalyst/internal_api_completion.py,sha256=DdICI5yfEudiOAIC8L4oxH0Qz7kX-BZCdo9IWsi2gNo,2965
9
9
  ragaai_catalyst/prompt_manager.py,sha256=W8ypramzOprrJ7-22d5vkBXIuIQ8v9XAzKDGxKsTK28,16550
10
10
  ragaai_catalyst/proxy_call.py,sha256=CHxldeceZUaLU-to_hs_Kf1z_b2vHMssLS_cOBedu78,5499
@@ -28,10 +28,10 @@ ragaai_catalyst/redteaming/utils/issue_description.py,sha256=iB0XbeOjdqHTPrikCKS
28
28
  ragaai_catalyst/redteaming/utils/rt.png,sha256=HzVC8bz_4UgwafKXuMe8RJVI6CyK_UmSgo53ceAOQK8,282154
29
29
  ragaai_catalyst/tracers/__init__.py,sha256=LfgTes-nHpazssbGKnn8kyLZNr49kIPrlkrqqoTFTfc,301
30
30
  ragaai_catalyst/tracers/distributed.py,sha256=MwlBwIxCAng-OI-7Ove_rkE1mTLeuW4Jw-wWEVJBNlI,9968
31
- ragaai_catalyst/tracers/langchain_callback.py,sha256=KooENtkX0Hp0S_d_1WI3iH3qNVt-ZcnwOKVlydv4dUk,33518
31
+ ragaai_catalyst/tracers/langchain_callback.py,sha256=CB75zzG3-DkYTELj0vI1MOHQTY0MuQJfoHIXz9Cl8S8,34568
32
32
  ragaai_catalyst/tracers/llamaindex_callback.py,sha256=ZY0BJrrlz-P9Mg2dX-ZkVKG3gSvzwqBtk7JL_05MiYA,14028
33
33
  ragaai_catalyst/tracers/llamaindex_instrumentation.py,sha256=Ys_jLkvVqo12bKgXDmkp4TxJu9HkBATrFE8cIcTYxWw,14329
34
- ragaai_catalyst/tracers/tracer.py,sha256=ZA57OqwDZblU9iPR4Lj5t7gEeqLUmOi_Wa10NxMGQsc,27825
34
+ ragaai_catalyst/tracers/tracer.py,sha256=oaag7-VdUufR5LygnKcUgjTvlAEcxToVxNYkQCWEhkg,27827
35
35
  ragaai_catalyst/tracers/upload_traces.py,sha256=OKsc-Obf8bJvKBprt3dqj8GQQNkoX3kT_t8TBDi9YDQ,5670
36
36
  ragaai_catalyst/tracers/agentic_tracing/README.md,sha256=X4QwLb7-Jg7GQMIXj-SerZIgDETfw-7VgYlczOR8ZeQ,4508
37
37
  ragaai_catalyst/tracers/agentic_tracing/__init__.py,sha256=yf6SKvOPSpH-9LiKaoLKXwqj5sez8F_5wkOb91yp0oE,260
@@ -90,8 +90,8 @@ ragaai_catalyst/tracers/utils/langchain_tracer_extraction_logic.py,sha256=XS2_x2
90
90
  ragaai_catalyst/tracers/utils/model_prices_and_context_window_backup.json,sha256=C3uwkibJ08C9sOX-54kulZYmJlIpZ-SQpfE6HNGrjbM,343502
91
91
  ragaai_catalyst/tracers/utils/trace_json_converter.py,sha256=qXSYKr4JMUpGQsB3mnr9_2qH6FqzUhCynNqlDp1IWTs,12440
92
92
  ragaai_catalyst/tracers/utils/utils.py,sha256=ViygfJ7vZ7U0CTSA1lbxVloHp4NSlmfDzBRNCJuMhis,2374
93
- ragaai_catalyst-2.1.5b41.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
94
- ragaai_catalyst-2.1.5b41.dist-info/METADATA,sha256=CrlkR9TD7BsrlN3EbTlxlCqOKuk2GFWLMGJV3hqkHJQ,22060
95
- ragaai_catalyst-2.1.5b41.dist-info/WHEEL,sha256=52BFRY2Up02UkjOa29eZOS2VxUrpPORXg1pkohGGUS8,91
96
- ragaai_catalyst-2.1.5b41.dist-info/top_level.txt,sha256=HpgsdRgEJMk8nqrU6qdCYk3di7MJkDL0B19lkc7dLfM,16
97
- ragaai_catalyst-2.1.5b41.dist-info/RECORD,,
93
+ ragaai_catalyst-2.1.5.1b2.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
94
+ ragaai_catalyst-2.1.5.1b2.dist-info/METADATA,sha256=Nv0jgHG5lZLvef0tdbH7msv7Wb2nkPVEH1GgK_JH-xQ,22057
95
+ ragaai_catalyst-2.1.5.1b2.dist-info/WHEEL,sha256=52BFRY2Up02UkjOa29eZOS2VxUrpPORXg1pkohGGUS8,91
96
+ ragaai_catalyst-2.1.5.1b2.dist-info/top_level.txt,sha256=HpgsdRgEJMk8nqrU6qdCYk3di7MJkDL0B19lkc7dLfM,16
97
+ ragaai_catalyst-2.1.5.1b2.dist-info/RECORD,,