mb-rag 1.1.38__tar.gz → 1.1.40__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mb-rag might be problematic. Click here for more details.

@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: mb_rag
3
- Version: 1.1.38
3
+ Version: 1.1.40
4
4
  Summary: RAG function file
5
5
  Author: ['Malav Bateriwala']
6
6
  Requires-Python: >=3.8
@@ -292,7 +292,7 @@ class ModelFactory:
292
292
  except Exception as e:
293
293
  raise ValueError(f"Error with pydantic_model: {e}")
294
294
  if images:
295
- res = self._model_invoke_images(images=images,prompt=query,pydantic_model=pydantic_model)
295
+ res = self._model_invoke_images(images=images,prompt=query,pydantic_model=pydantic_model,get_content_only=get_content_only)
296
296
  else:
297
297
  res = self.model.invoke(query)
298
298
  if get_content_only:
@@ -306,7 +306,7 @@ class ModelFactory:
306
306
  with open(image, "rb") as f:
307
307
  return base64.b64encode(f.read()).decode('utf-8')
308
308
 
309
- def _model_invoke_images(self,images: list, prompt: str,pydantic_model = None):
309
+ def _model_invoke_images(self,images: list, prompt: str,pydantic_model = None,get_content_only: bool = True) -> str:
310
310
  """
311
311
  Function to invoke the model with images
312
312
  Args:
@@ -328,11 +328,27 @@ class ModelFactory:
328
328
  print("Continuing without structured output")
329
329
  message= HumanMessage(content=prompt_new,)
330
330
  response = self.model.invoke([message])
331
- try:
332
- return response.content
333
- except Exception:
331
+
332
+ if get_content_only:
333
+ try:
334
+ return response.content
335
+ except Exception:
336
+ print("Failed to get content from response. Returning response object")
337
+ return response
338
+ else:
334
339
  return response
335
340
 
341
+ def _get_llm_metadata(self):
342
+ """
343
+ Returns Basic metadata about the LLM
344
+ """
345
+ print("Model Name: ", self.model)
346
+ print("Model Temperature: ", self.temperature)
347
+ print("Model Max Tokens: ", self.max_output_tokens)
348
+ print("Model Top P: ", self.top_p)
349
+ print("Model Top K: ", self.top_k)
350
+ print("Model Input Schema:",self.input_schema)
351
+
336
352
  class ConversationModel:
337
353
  """
338
354
  A class to handle conversation with AI models
@@ -454,7 +454,8 @@ class embedding_generator:
454
454
 
455
455
  def load_retriever(self, embeddings_folder_path: str,
456
456
  search_type: List[str] = ["similarity_score_threshold"],
457
- search_params: List[Dict] = [{"k": 3, "score_threshold": 0.9}]):
457
+ search_params: List[Dict] = [{"k": 3, "score_threshold": 0.9}],
458
+ collection_name: str = 'test'):
458
459
  """
459
460
  Load retriever with search configuration.
460
461
 
@@ -462,6 +463,7 @@ class embedding_generator:
462
463
  embeddings_folder_path (str): Path to embeddings folder
463
464
  search_type (List[str]): List of search types
464
465
  search_params (List[Dict]): List of search parameters
466
+ collection_name (str): Name of the collection. Default: 'test'
465
467
 
466
468
  Returns:
467
469
  Union[Any, List[Any]]: Single retriever or list of retrievers
@@ -475,7 +477,7 @@ class embedding_generator:
475
477
  )
476
478
  ```
477
479
  """
478
- db = self.load_embeddings(embeddings_folder_path)
480
+ db = self.load_embeddings(embeddings_folder_path, collection_name)
479
481
  if db is not None:
480
482
  if self.vector_store_type == 'chroma':
481
483
  if len(search_type) != len(search_params):
@@ -511,7 +513,7 @@ class embedding_generator:
511
513
  chunk_overlap (int): Overlap between chunks
512
514
  """
513
515
  if self.vector_store_type == 'chroma':
514
- db = self.load_embeddings(embeddings_folder_path)
516
+ db = self.load_embeddings(embeddings_folder_path, collection_name)
515
517
  if db is not None:
516
518
  docs = self.tokenize(data, text_splitter_type, chunk_size, chunk_overlap)
517
519
  db.add_documents(docs)
@@ -1,5 +1,5 @@
1
1
  MAJOR_VERSION = 1
2
2
  MINOR_VERSION = 1
3
- PATCH_VERSION = 38
3
+ PATCH_VERSION = 40
4
4
  version = '{}.{}.{}'.format(MAJOR_VERSION, MINOR_VERSION, PATCH_VERSION)
5
5
  __all__ = ['MAJOR_VERSION', 'MINOR_VERSION', 'PATCH_VERSION', 'version']
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: mb_rag
3
- Version: 1.1.38
3
+ Version: 1.1.40
4
4
  Summary: RAG function file
5
5
  Author: ['Malav Bateriwala']
6
6
  Requires-Python: >=3.8
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes