mb-rag 1.1.20__tar.gz → 1.1.21__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mb-rag might be problematic. Click here for more details.

@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: mb_rag
3
- Version: 1.1.20
3
+ Version: 1.1.21
4
4
  Summary: RAG function file
5
5
  Author: ['Malav Bateriwala']
6
6
  Requires-Python: >=3.8
@@ -70,6 +70,7 @@ class ModelFactory:
70
70
  'groq': self.create_groq,
71
71
  'deepseek': self.create_deepseek,
72
72
  'qwen' : self.create_qwen,
73
+ 'hugging_face': self.create_hugging_face
73
74
  }
74
75
 
75
76
  model_data = creators.get(model_type)
@@ -201,6 +202,55 @@ class ModelFactory:
201
202
  kwargs["model"] = model_name
202
203
  return ChatTongyi(streaming=True,**kwargs)
203
204
 
205
+ @classmethod
206
+ def create_hugging_face(cls, model_name: str = "Qwen/Qwen2.5-VL-7B-Instruct",model_function: str = "image-text-to-text",
207
+ **kwargs) -> Any:
208
+ """
209
+ Create and load hugging face model.
210
+ Args:
211
+ model_name (str): Name of the model
212
+ model_function (str): model function
213
+ **kwargs: Additional arguments
214
+ Returns:
215
+ ChatHuggingFace: Chatbot model
216
+ """
217
+ if not check_package("transformers"):
218
+ raise ImportError("Transformers package not found. Please install it using: pip install transformers")
219
+ if not check_package("langchain_huggingface"):
220
+ raise ImportError("Transformers package not found. Please install it using: pip install langchain_huggingface")
221
+ if not check_package("torch"):
222
+ raise ImportError("Torch package not found. Please install it using: pip install torch")
223
+
224
+ from langchain_huggingface import HuggingFacePipeline
225
+ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
226
+ import torch
227
+
228
+ device = "cuda" if torch.cuda.is_available() else "cpu"
229
+
230
+ temperature = kwargs.pop("temperature", 0.7)
231
+ max_length = kwargs.pop("max_length", 1024)
232
+
233
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
234
+ model = AutoModelForCausalLM.from_pretrained(
235
+ model_name,
236
+ torch_dtype=torch.float16 if device == "cuda" else torch.float32,
237
+ device_map=device,
238
+ **kwargs
239
+ )
240
+
241
+ # Create pipeline
242
+ pipe = pipeline(
243
+ "text-generation",
244
+ model=model,
245
+ tokenizer=tokenizer,
246
+ max_length=max_length,
247
+ temperature=temperature,
248
+ device=device
249
+ )
250
+
251
+ # Create and return LangChain HuggingFacePipeline
252
+ return HuggingFacePipeline(pipeline=pipe)
253
+
204
254
  def _reset_model(self):
205
255
  """Reset the model"""
206
256
  self.model = self.model.reset()
@@ -1,5 +1,5 @@
1
1
  MAJOR_VERSION = 1
2
2
  MINOR_VERSION = 1
3
- PATCH_VERSION = 20
3
+ PATCH_VERSION = 21
4
4
  version = '{}.{}.{}'.format(MAJOR_VERSION, MINOR_VERSION, PATCH_VERSION)
5
5
  __all__ = ['MAJOR_VERSION', 'MINOR_VERSION', 'PATCH_VERSION', 'version']
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: mb_rag
3
- Version: 1.1.20
3
+ Version: 1.1.21
4
4
  Summary: RAG function file
5
5
  Author: ['Malav Bateriwala']
6
6
  Requires-Python: >=3.8
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes