ragaai-catalyst 2.1.5b7__py3-none-any.whl → 2.1.5b8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -150,8 +150,6 @@ class LLMTracerMixin:
150
150
  beta_module = openai_module.beta
151
151
 
152
152
  # Patch openai.beta.threads
153
- import openai
154
- openai.api_type = "openai"
155
153
  if hasattr(beta_module, "threads"):
156
154
  threads_obj = beta_module.threads
157
155
  # Patch top-level methods on openai.beta.threads
@@ -48,8 +48,9 @@ class LangchainTracer(BaseCallbackHandler):
48
48
  self._original_methods = {}
49
49
  self.additional_metadata = {}
50
50
  self._save_task = None
51
- self._current_query = None # Add this line to track the current query
51
+ self._current_query = None
52
52
  self.filepath = None
53
+ self.model_names = {} # Store model names by component instance
53
54
  logger.setLevel(log_level)
54
55
 
55
56
  if not os.path.exists(output_path):
@@ -86,7 +87,7 @@ class LangchainTracer(BaseCallbackHandler):
86
87
  "retriever_actions": [],
87
88
  "tokens": [],
88
89
  "errors": [],
89
- "query": self._current_query, # Add this line to include the query in the trace
90
+ "query": self._current_query,
90
91
  "metadata": {
91
92
  "version": "2.0",
92
93
  "trace_all": self.trace_all,
@@ -172,6 +173,14 @@ class LangchainTracer(BaseCallbackHandler):
172
173
  kwargs_copy['callbacks'] = [self]
173
174
  elif self not in kwargs_copy['callbacks']:
174
175
  kwargs_copy['callbacks'].append(self)
176
+
177
+ # Store model name if available
178
+ if component_name in ["OpenAI", "ChatOpenAI_LangchainOpenAI", "ChatOpenAI_ChatModels",
179
+ "ChatVertexAI", "ChatGoogleGenerativeAI", "ChatAnthropic", "ChatLiteLLM"]:
180
+ instance = args[0] if args else None
181
+ model_name = kwargs.get('model_name') or kwargs.get('model')
182
+ if instance and model_name:
183
+ self.model_names[id(instance)] = model_name
175
184
 
176
185
  # Try different method signatures
177
186
  try:
@@ -201,28 +210,56 @@ class LangchainTracer(BaseCallbackHandler):
201
210
 
202
211
  def _monkey_patch(self):
203
212
  """Enhanced monkey-patching with comprehensive component support"""
204
- from langchain.llms import OpenAI
205
- # from langchain_groq import ChatGroq
206
- # from langchain_google_genai import ChatGoogleGenerativeAI
207
- # from langchain_anthropic import ChatAnthropic
208
- from langchain_community.chat_models import ChatLiteLLM
209
- # from langchain_cohere import ChatCohere
210
- from langchain_openai import ChatOpenAI as ChatOpenAI_LangchainOpenAI
211
- from langchain.chat_models import ChatOpenAI as ChatOpenAI_ChatModels
212
- from langchain.chains import create_retrieval_chain, RetrievalQA
213
-
214
- components_to_patch = {
215
- "OpenAI": (OpenAI, "__init__"),
216
- # "ChatGroq": (ChatGroq, "__init__"),
217
- # "ChatGoogleGenerativeAI": (ChatGoogleGenerativeAI, "__init__"),
218
- # "ChatAnthropic": (ChatAnthropic, "__init__"),
219
- "ChatLiteLLM": (ChatLiteLLM, "__init__"),
220
- # "ChatCohere": (ChatCohere, "__init__"),
221
- "ChatOpenAI_LangchainOpenAI": (ChatOpenAI_LangchainOpenAI, "__init__"),
222
- "ChatOpenAI_ChatModels": (ChatOpenAI_ChatModels, "__init__"),
223
- "RetrievalQA": (RetrievalQA, "from_chain_type"),
224
- "create_retrieval_chain": (create_retrieval_chain, None),
225
- }
213
+ components_to_patch = {}
214
+
215
+ try:
216
+ from langchain.llms import OpenAI
217
+ components_to_patch["OpenAI"] = (OpenAI, "__init__")
218
+ except ImportError:
219
+ logger.debug("OpenAI not available for patching")
220
+
221
+ try:
222
+ from langchain_google_vertexai import ChatVertexAI
223
+ components_to_patch["ChatVertexAI"] = (ChatVertexAI, "__init__")
224
+ except ImportError:
225
+ logger.debug("ChatVertexAI not available for patching")
226
+
227
+ try:
228
+ from langchain_google_genai import ChatGoogleGenerativeAI
229
+ components_to_patch["ChatGoogleGenerativeAI"] = (ChatGoogleGenerativeAI, "__init__")
230
+ except ImportError:
231
+ logger.debug("ChatGoogleGenerativeAI not available for patching")
232
+
233
+ try:
234
+ from langchain_anthropic import ChatAnthropic
235
+ components_to_patch["ChatAnthropic"] = (ChatAnthropic, "__init__")
236
+ except ImportError:
237
+ logger.debug("ChatAnthropic not available for patching")
238
+
239
+ try:
240
+ from langchain_community.chat_models import ChatLiteLLM
241
+ components_to_patch["ChatLiteLLM"] = (ChatLiteLLM, "__init__")
242
+ except ImportError:
243
+ logger.debug("ChatLiteLLM not available for patching")
244
+
245
+ try:
246
+ from langchain_openai import ChatOpenAI as ChatOpenAI_LangchainOpenAI
247
+ components_to_patch["ChatOpenAI_LangchainOpenAI"] = (ChatOpenAI_LangchainOpenAI, "__init__")
248
+ except ImportError:
249
+ logger.debug("ChatOpenAI_LangchainOpenAI not available for patching")
250
+
251
+ try:
252
+ from langchain.chat_models import ChatOpenAI as ChatOpenAI_ChatModels
253
+ components_to_patch["ChatOpenAI_ChatModels"] = (ChatOpenAI_ChatModels, "__init__")
254
+ except ImportError:
255
+ logger.debug("ChatOpenAI_ChatModels not available for patching")
256
+
257
+ try:
258
+ from langchain.chains import create_retrieval_chain, RetrievalQA
259
+ components_to_patch["RetrievalQA"] = (RetrievalQA, "from_chain_type")
260
+ components_to_patch["create_retrieval_chain"] = (create_retrieval_chain, None)
261
+ except ImportError:
262
+ logger.debug("Langchain chains not available for patching")
226
263
 
227
264
  for name, (component, method_name) in components_to_patch.items():
228
265
  try:
@@ -249,21 +286,45 @@ class LangchainTracer(BaseCallbackHandler):
249
286
 
250
287
  def _restore_original_methods(self):
251
288
  """Restore all original methods and functions with enhanced error handling"""
252
- from langchain.llms import OpenAI
253
- # from langchain_groq import ChatGroq
254
- # from langchain_google_genai import ChatGoogleGenerativeAI
255
- # from langchain_anthropic import ChatAnthropic
256
- from langchain_community.chat_models import ChatLiteLLM
257
- # from langchain_cohere import ChatCohere
258
- from langchain_openai import ChatOpenAI as ChatOpenAI_LangchainOpenAI
259
- from langchain.chat_models import ChatOpenAI as ChatOpenAI_ChatModels
260
- from langchain.chains import create_retrieval_chain, RetrievalQA
261
-
289
+ # Dynamically import only what we need based on what was patched
290
+ imported_components = {}
291
+
292
+ if self._original_inits or self._original_methods:
293
+ for name in list(self._original_inits.keys()) + list(self._original_methods.keys()):
294
+ try:
295
+ if name == "OpenAI":
296
+ from langchain.llms import OpenAI
297
+ imported_components[name] = OpenAI
298
+ elif name == "ChatVertexAI":
299
+ from langchain_google_vertexai import ChatVertexAI
300
+ imported_components[name] = ChatVertexAI
301
+ elif name == "ChatGoogleGenerativeAI":
302
+ from langchain_google_genai import ChatGoogleGenerativeAI
303
+ imported_components[name] = ChatGoogleGenerativeAI
304
+ elif name == "ChatAnthropic":
305
+ from langchain_anthropic import ChatAnthropic
306
+ imported_components[name] = ChatAnthropic
307
+ elif name == "ChatLiteLLM":
308
+ from langchain_community.chat_models import ChatLiteLLM
309
+ imported_components[name] = ChatLiteLLM
310
+ elif name == "ChatOpenAI_LangchainOpenAI":
311
+ from langchain_openai import ChatOpenAI as ChatOpenAI_LangchainOpenAI
312
+ imported_components[name] = ChatOpenAI_LangchainOpenAI
313
+ elif name == "ChatOpenAI_ChatModels":
314
+ from langchain.chat_models import ChatOpenAI as ChatOpenAI_ChatModels
315
+ imported_components[name] = ChatOpenAI_ChatModels
316
+ elif name in ["RetrievalQA", "create_retrieval_chain"]:
317
+ from langchain.chains import create_retrieval_chain, RetrievalQA
318
+ imported_components["RetrievalQA"] = RetrievalQA
319
+ imported_components["create_retrieval_chain"] = create_retrieval_chain
320
+ except ImportError:
321
+ logger.debug(f"{name} not available for restoration")
262
322
 
263
323
  for name, original in self._original_inits.items():
264
324
  try:
265
- component = eval(name)
266
- component.__init__ = original
325
+ if name in imported_components:
326
+ component = imported_components[name]
327
+ component.__init__ = original
267
328
  except Exception as e:
268
329
  logger.error(f"Error restoring {name}: {e}")
269
330
  self.on_error(e, context=f"restore_{name}")
@@ -272,10 +333,12 @@ class LangchainTracer(BaseCallbackHandler):
272
333
  try:
273
334
  if "." in name:
274
335
  module_name, method_name = name.rsplit(".", 1)
275
- module = eval(module_name)
276
- setattr(module, method_name, original)
336
+ if module_name in imported_components:
337
+ module = imported_components[module_name]
338
+ setattr(module, method_name, original)
277
339
  else:
278
- globals()[name] = original
340
+ if name in imported_components:
341
+ globals()[name] = original
279
342
  except Exception as e:
280
343
  logger.error(f"Error restoring {name}: {e}")
281
344
  self.on_error(e, context=f"restore_{name}")
@@ -359,16 +422,92 @@ class LangchainTracer(BaseCallbackHandler):
359
422
  }
360
423
  )
361
424
 
425
+ # Calculate latency
362
426
  end_time = datetime.now()
363
- self.additional_metadata["latency"] = (end_time - self.current_trace["start_time"]).total_seconds()
427
+ latency = (end_time - self.current_trace["start_time"]).total_seconds()
364
428
 
429
+ # Check if values are there in llm_output
430
+ model = ""
431
+ prompt_tokens = 0
432
+ completion_tokens = 0
433
+ total_tokens = 0
434
+
435
+ # Try to get model name from llm_output first
365
436
  if response and response.llm_output:
366
- self.additional_metadata["model_name"] = response.llm_output.get("model_name", "")
367
- self.additional_metadata["tokens"] = {}
368
- if response.llm_output.get("token_usage"):
369
- self.additional_metadata["tokens"]["total"] = response.llm_output["token_usage"].get("total_tokens", 0)
370
- self.additional_metadata["tokens"]["prompt"] = response.llm_output["token_usage"].get("prompt_tokens", 0)
371
- self.additional_metadata["tokens"]["completion"] = response.llm_output["token_usage"].get("completion_tokens", 0)
437
+ try:
438
+ model = response.llm_output.get("model_name")
439
+ if not model:
440
+ model = response.llm_output.get("model", "")
441
+ except Exception as e:
442
+ # logger.debug(f"Error getting model name: {e}")
443
+ model = ""
444
+
445
+ # Add model name
446
+ if not model:
447
+ try:
448
+ model = response.llm_output.get("model_name")
449
+ if not model:
450
+ model = response.llm_output.get("model", "")
451
+ except Exception as e:
452
+ # logger.debug(f"Error getting model name: {e}")
453
+ model = ""
454
+
455
+
456
+ # Add token usage
457
+ try:
458
+ token_usage = response.llm_output.get("token_usage", {})
459
+ if token_usage=={}:
460
+ try:
461
+ token_usage = response.llm_output.get("usage")
462
+ except Exception as e:
463
+ # logger.debug(f"Error getting token usage: {e}")
464
+ token_usage = {}
465
+
466
+ if token_usage !={}:
467
+ prompt_tokens = token_usage.get("prompt_tokens", 0)
468
+ if prompt_tokens==0:
469
+ prompt_tokens = token_usage.get("input_tokens", 0)
470
+ completion_tokens = token_usage.get("completion_tokens", 0)
471
+ if completion_tokens==0:
472
+ completion_tokens = token_usage.get("output_tokens", 0)
473
+
474
+ total_tokens = prompt_tokens + completion_tokens
475
+ except Exception as e:
476
+ # logger.debug(f"Error getting token usage: {e}")
477
+ prompt_tokens = 0
478
+ completion_tokens = 0
479
+ total_tokens = 0
480
+
481
+ # Check if values are there in
482
+ if prompt_tokens == 0 and completion_tokens == 0:
483
+ try:
484
+ usage_data = response.generations[0][0].message.usage_metadata
485
+ prompt_tokens = usage_data.get("input_tokens", 0)
486
+ completion_tokens = usage_data.get("output_tokens", 0)
487
+ total_tokens = prompt_tokens + completion_tokens
488
+ except Exception as e:
489
+ # logger.debug(f"Error getting usage data: {e}")
490
+ prompt_tokens = 0
491
+ completion_tokens = 0
492
+ total_tokens = 0
493
+
494
+ # If no model name in llm_output, try to get it from stored model names
495
+ try:
496
+ if model == "":
497
+ model = list(self.model_names.values())[0]
498
+ except Exception as e:
499
+ model=""
500
+
501
+ self.additional_metadata = {
502
+ 'latency': latency,
503
+ 'model_name': model,
504
+ 'tokens': {
505
+ 'prompt': prompt_tokens,
506
+ 'completion': completion_tokens,
507
+ 'total': total_tokens
508
+ }
509
+ }
510
+
372
511
  except Exception as e:
373
512
  self.on_error(e, context="llm_end")
374
513
 
@@ -283,64 +283,48 @@ class Tracer(AgenticTracing):
283
283
  data, additional_metadata = self.langchain_tracer.stop()
284
284
 
285
285
  # Add cost if possible
286
- if additional_metadata.get('model_name'):
286
+ # import pdb; pdb.set_trace()
287
+ if additional_metadata['model_name']:
287
288
  try:
288
289
  model_cost_data = self.model_cost_dict[additional_metadata['model_name']]
289
- if 'tokens' in additional_metadata and all(k in additional_metadata['tokens'] for k in ['prompt', 'completion']):
290
- prompt_cost = additional_metadata["tokens"]["prompt"]*model_cost_data["input_cost_per_token"]
291
- completion_cost = additional_metadata["tokens"]["completion"]*model_cost_data["output_cost_per_token"]
292
- additional_metadata.setdefault('cost', {})["total_cost"] = prompt_cost + completion_cost
293
- else:
294
- logger.warning("Token information missing in additional_metadata")
290
+ prompt_cost = additional_metadata["tokens"]["prompt"]*model_cost_data["input_cost_per_token"]
291
+ completion_cost = additional_metadata["tokens"]["completion"]*model_cost_data["output_cost_per_token"]
292
+ # additional_metadata.setdefault('cost', {})["prompt_cost"] = prompt_cost
293
+ # additional_metadata.setdefault('cost', {})["completion_cost"] = completion_cost
294
+ additional_metadata.setdefault('cost', {})["total_cost"] = prompt_cost + completion_cost
295
295
  except Exception as e:
296
296
  logger.warning(f"Error adding cost: {e}")
297
- else:
298
- logger.debug("Model name not available in additional_metadata, skipping cost calculation")
299
-
300
- # Safely get total tokens and cost
301
- if 'tokens' in additional_metadata and 'total' in additional_metadata['tokens']:
302
- additional_metadata["total_tokens"] = additional_metadata["tokens"]["total"]
303
- else:
304
- additional_metadata["total_tokens"] = 0
305
- logger.warning("Total tokens information not available")
306
297
 
307
- if 'cost' in additional_metadata and 'total_cost' in additional_metadata['cost']:
298
+ # with open(filepath, 'r') as f:
299
+ # data = json.load(f)
300
+ additional_metadata["total_tokens"] = additional_metadata["tokens"]["total"]
301
+ del additional_metadata["tokens"]
302
+ if "cost" in additional_metadata:
308
303
  additional_metadata["total_cost"] = additional_metadata["cost"]["total_cost"]
304
+ del additional_metadata["cost"]
309
305
  else:
310
306
  additional_metadata["total_cost"] = 0.0
311
- logger.warning("Total cost information not available")
312
-
313
- # Safely remove tokens and cost dictionaries if they exist
314
- additional_metadata.pop("tokens", None)
315
- additional_metadata.pop("cost", None)
316
307
 
317
- # Safely merge metadata
318
- combined_metadata = {}
319
- if user_detail.get('trace_user_detail', {}).get('metadata'):
320
- combined_metadata.update(user_detail['trace_user_detail']['metadata'])
321
- if additional_metadata:
322
- combined_metadata.update(additional_metadata)
308
+ combined_metadata = user_detail['trace_user_detail']['metadata'].copy()
309
+ combined_metadata.update(additional_metadata)
310
+ combined_metadata
323
311
 
324
312
  langchain_traces = langchain_tracer_extraction(data)
325
313
  final_result = convert_langchain_callbacks_output(langchain_traces)
314
+ final_result[0]['project_name'] = user_detail['project_name']
315
+ final_result[0]['trace_id'] = str(uuid.uuid4())
316
+ final_result[0]['session_id'] = None
317
+ final_result[0]['metadata'] = combined_metadata
318
+ final_result[0]['pipeline'] = user_detail['trace_user_detail']['pipeline']
319
+
320
+ filepath_3 = os.path.join(os.getcwd(), "final_result.json")
321
+ with open(filepath_3, 'w') as f:
322
+ json.dump(final_result, f, indent=2)
326
323
 
327
- # Safely set required fields in final_result
328
- if final_result and isinstance(final_result, list) and len(final_result) > 0:
329
- final_result[0]['project_name'] = user_detail.get('project_name', '')
330
- final_result[0]['trace_id'] = str(uuid.uuid4())
331
- final_result[0]['session_id'] = None
332
- final_result[0]['metadata'] = combined_metadata
333
- final_result[0]['pipeline'] = user_detail.get('trace_user_detail', {}).get('pipeline')
334
-
335
- filepath_3 = os.path.join(os.getcwd(), "final_result.json")
336
- with open(filepath_3, 'w') as f:
337
- json.dump(final_result, f, indent=2)
338
-
339
- print(filepath_3)
340
- else:
341
- logger.warning("No valid langchain traces found in final_result")
324
+
325
+ print(filepath_3)
342
326
 
343
- additional_metadata_keys = list(additional_metadata.keys()) if additional_metadata else None
327
+ additional_metadata_keys = additional_metadata.keys() if additional_metadata else None
344
328
 
345
329
  UploadTraces(json_file_path=filepath_3,
346
330
  project_name=self.project_name,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: ragaai_catalyst
3
- Version: 2.1.5b7
3
+ Version: 2.1.5b8
4
4
  Summary: RAGA AI CATALYST
5
5
  Author-email: Kiran Scaria <kiran.scaria@raga.ai>, Kedar Gaikwad <kedar.gaikwad@raga.ai>, Dushyant Mahajan <dushyant.mahajan@raga.ai>, Siddhartha Kosti <siddhartha.kosti@raga.ai>, Ritika Goel <ritika.goel@raga.ai>, Vijay Chaurasia <vijay.chaurasia@raga.ai>
6
6
  Requires-Python: <3.13,>=3.9
@@ -13,9 +13,9 @@ ragaai_catalyst/synthetic_data_generation.py,sha256=uDV9tNwto2xSkWg5XHXUvjErW-4P
13
13
  ragaai_catalyst/utils.py,sha256=TlhEFwLyRU690HvANbyoRycR3nQ67lxVUQoUOfTPYQ0,3772
14
14
  ragaai_catalyst/tracers/__init__.py,sha256=LfgTes-nHpazssbGKnn8kyLZNr49kIPrlkrqqoTFTfc,301
15
15
  ragaai_catalyst/tracers/distributed.py,sha256=AIRvS5Ur4jbFDXsUkYuCTmtGoHHx3LOG4n5tWOh610U,10330
16
- ragaai_catalyst/tracers/langchain_callback.py,sha256=LvMBhgvAX8ftyBQ9Naeui46EoDa2nHQZq48Ra6nL-Qg,21991
16
+ ragaai_catalyst/tracers/langchain_callback.py,sha256=lLeED0Eg2kT4-_O9IUw3pAyi_Hm4AaX57VfeSiOwaUw,28134
17
17
  ragaai_catalyst/tracers/llamaindex_callback.py,sha256=ZY0BJrrlz-P9Mg2dX-ZkVKG3gSvzwqBtk7JL_05MiYA,14028
18
- ragaai_catalyst/tracers/tracer.py,sha256=bLgO3lQmoumo-JtqZFi4DUqhu9itM5GaLFNY7hmsI1g,20267
18
+ ragaai_catalyst/tracers/tracer.py,sha256=Yq2HhgT4785t9573kksJ7ngM3qCLPgZbZ0IpgOHdTTo,19223
19
19
  ragaai_catalyst/tracers/upload_traces.py,sha256=2TWdRTN6FMaX-dqDv8BJWQS0xrCGYKkXEYOi2kK3Z3Y,5487
20
20
  ragaai_catalyst/tracers/agentic_tracing/README.md,sha256=X4QwLb7-Jg7GQMIXj-SerZIgDETfw-7VgYlczOR8ZeQ,4508
21
21
  ragaai_catalyst/tracers/agentic_tracing/__init__.py,sha256=yf6SKvOPSpH-9LiKaoLKXwqj5sez8F_5wkOb91yp0oE,260
@@ -32,7 +32,7 @@ ragaai_catalyst/tracers/agentic_tracing/tracers/agent_tracer.py,sha256=--wvhOJ-J
32
32
  ragaai_catalyst/tracers/agentic_tracing/tracers/base.py,sha256=88rX7OkOGEyVNECUrc4bYqODyulXve_-99d9ku5hBeQ,37373
33
33
  ragaai_catalyst/tracers/agentic_tracing/tracers/custom_tracer.py,sha256=l3x3uFO5ov93I7UUrUX1M06WVGy2ug2jEZ1G7o315z4,13075
34
34
  ragaai_catalyst/tracers/agentic_tracing/tracers/langgraph_tracer.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
35
- ragaai_catalyst/tracers/agentic_tracing/tracers/llm_tracer.py,sha256=s6BRoBteCRF8XrXGnmZ98ZWPrSONC5RObPXNaq-im3w,31782
35
+ ragaai_catalyst/tracers/agentic_tracing/tracers/llm_tracer.py,sha256=91aWXJGb3GDfyDfJyA7Irnk3XSyfkQaQppW_NMORGJQ,31725
36
36
  ragaai_catalyst/tracers/agentic_tracing/tracers/main_tracer.py,sha256=6hsg-Yw11v4qeELI1CWrdX8BXf-wJrTF5smBI5prgoo,15873
37
37
  ragaai_catalyst/tracers/agentic_tracing/tracers/network_tracer.py,sha256=m8CxYkl7iMiFya_lNwN1ykBc3Pmo-2pR_2HmpptwHWQ,10352
38
38
  ragaai_catalyst/tracers/agentic_tracing/tracers/tool_tracer.py,sha256=4rWL7fIJE5wN0nwh6fMWyh3OrrenZHJkNzyQXikyzQI,13771
@@ -65,8 +65,8 @@ ragaai_catalyst/tracers/utils/__init__.py,sha256=KeMaZtYaTojilpLv65qH08QmpYclfpa
65
65
  ragaai_catalyst/tracers/utils/convert_langchain_callbacks_output.py,sha256=ofrNrxf2b1hpjDh_zeaxiYq86azn1MF3kW8-ViYPEg0,1641
66
66
  ragaai_catalyst/tracers/utils/langchain_tracer_extraction_logic.py,sha256=cghjCuUe8w-2MZdh9xgtRGe3y219u26GGzpnuY4Wt6Q,3047
67
67
  ragaai_catalyst/tracers/utils/utils.py,sha256=ViygfJ7vZ7U0CTSA1lbxVloHp4NSlmfDzBRNCJuMhis,2374
68
- ragaai_catalyst-2.1.5b7.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
69
- ragaai_catalyst-2.1.5b7.dist-info/METADATA,sha256=cVHEchxtHjkR_9AKHeDlqTpZqYNMSoF5gKR37D9icAY,12764
70
- ragaai_catalyst-2.1.5b7.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
71
- ragaai_catalyst-2.1.5b7.dist-info/top_level.txt,sha256=HpgsdRgEJMk8nqrU6qdCYk3di7MJkDL0B19lkc7dLfM,16
72
- ragaai_catalyst-2.1.5b7.dist-info/RECORD,,
68
+ ragaai_catalyst-2.1.5b8.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
69
+ ragaai_catalyst-2.1.5b8.dist-info/METADATA,sha256=OaiEW7uA1wnQO562QbKGgtlZuue1PTGTjK9-AW5gkLQ,12764
70
+ ragaai_catalyst-2.1.5b8.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
71
+ ragaai_catalyst-2.1.5b8.dist-info/top_level.txt,sha256=HpgsdRgEJMk8nqrU6qdCYk3di7MJkDL0B19lkc7dLfM,16
72
+ ragaai_catalyst-2.1.5b8.dist-info/RECORD,,