mb-rag 1.1.26__tar.gz → 1.1.29__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mb-rag might be problematic. Click here for more details.

@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: mb_rag
3
- Version: 1.1.26
3
+ Version: 1.1.29
4
4
  Summary: RAG function file
5
5
  Author: ['Malav Bateriwala']
6
6
  Requires-Python: >=3.8
@@ -146,10 +146,11 @@ class ModelFactory:
146
146
  if not check_package("langchain_ollama"):
147
147
  raise ImportError("Langchain Community package not found. Please install it using: pip install langchain_ollama")
148
148
 
149
- from langchain_ollama import ChatOllama
149
+ from langchain_ollama import OllamaLLM
150
+
150
151
  print(f"Current Ollama serve model is {os.system('ollama ps')}")
151
152
  kwargs["model"] = model_name
152
- return ChatOllama(**kwargs)
153
+ return OllamaLLM(**kwargs)
153
154
 
154
155
  @classmethod
155
156
  def create_groq(cls, model_name: str = "llama-3.3-70b-versatile", **kwargs) -> Any:
@@ -315,21 +316,26 @@ class ModelFactory:
315
316
  str: Output from the model
316
317
  """
317
318
  base64_images = [self._image_to_base64(image) for image in images]
318
- image_prompt_create = [{"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{base64_images[i]}"}} for i in range(len(images))]
319
- prompt_new = [{"type": "text", "text": prompt},
320
- *image_prompt_create,]
321
- if pydantic_model is not None:
322
- try:
323
- self.model = self.model.with_structured_output(pydantic_model)
324
- except Exception as e:
325
- print(f"Error with pydantic_model: {e}")
326
- print("Continuing without structured output")
327
- message= HumanMessage(content=prompt_new,)
328
- response = self.model.invoke([message])
329
- try:
319
+ if self.model_name=='ollama':
320
+ ollama_model = self.model.bind(images=[base64_images])
321
+ response = ollama_model.invoke([HumanMessage(content=prompt)])
330
322
  return response.content
331
- except Exception:
332
- return response
323
+ else:
324
+ image_prompt_create = [{"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{base64_images[i]}"}} for i in range(len(images))]
325
+ prompt_new = [{"type": "text", "text": prompt},
326
+ *image_prompt_create,]
327
+ if pydantic_model is not None:
328
+ try:
329
+ self.model = self.model.with_structured_output(pydantic_model)
330
+ except Exception as e:
331
+ print(f"Error with pydantic_model: {e}")
332
+ print("Continuing without structured output")
333
+ message= HumanMessage(content=prompt_new,)
334
+ response = self.model.invoke([message])
335
+ try:
336
+ return response.content
337
+ except Exception:
338
+ return response
333
339
 
334
340
  class ConversationModel:
335
341
  """
@@ -1,5 +1,5 @@
1
1
  MAJOR_VERSION = 1
2
2
  MINOR_VERSION = 1
3
- PATCH_VERSION = 26
3
+ PATCH_VERSION = 29
4
4
  version = '{}.{}.{}'.format(MAJOR_VERSION, MINOR_VERSION, PATCH_VERSION)
5
5
  __all__ = ['MAJOR_VERSION', 'MINOR_VERSION', 'PATCH_VERSION', 'version']
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: mb_rag
3
- Version: 1.1.26
3
+ Version: 1.1.29
4
4
  Summary: RAG function file
5
5
  Author: ['Malav Bateriwala']
6
6
  Requires-Python: >=3.8
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes