mb-rag 1.1.22__tar.gz → 1.1.23__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mb-rag might be problematic. Click here for more details.

@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: mb_rag
3
- Version: 1.1.22
3
+ Version: 1.1.23
4
4
  Summary: RAG function file
5
5
  Author: ['Malav Bateriwala']
6
6
  Requires-Python: >=3.8
@@ -209,7 +209,7 @@ class ModelFactory:
209
209
  Create and load hugging face model.
210
210
  Args:
211
211
  model_name (str): Name of the model
212
- model_function (str): model function
212
+ model_function (str): model function. Default is image-text-to-text.
213
213
  device (str): Device to use. Default is cpu
214
214
  **kwargs: Additional arguments
215
215
  Returns:
@@ -223,7 +223,7 @@ class ModelFactory:
223
223
  raise ImportError("Torch package not found. Please install it using: pip install torch")
224
224
 
225
225
  from langchain_huggingface import HuggingFacePipeline
226
- from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
226
+ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline, AutoModelForImageTextToText
227
227
  import torch
228
228
 
229
229
  device = torch.device(device) if torch.cuda.is_available() else torch.device("cpu")
@@ -232,13 +232,21 @@ class ModelFactory:
232
232
  max_length = kwargs.pop("max_length", 1024)
233
233
 
234
234
  tokenizer = AutoTokenizer.from_pretrained(model_name,trust_remote_code=True)
235
- model = AutoModelForCausalLM.from_pretrained(
236
- model_name,
237
- torch_dtype=torch.float16 if device == "cuda" else torch.float32,
238
- device_map=device,
239
- trust_remote_code=True,
240
- **kwargs
241
- )
235
+ if model_function == "image-text-to-text":
236
+ model = AutoModelForImageTextToText.from_pretrained(
237
+ model_name,
238
+ torch_dtype=torch.float16 if device == "cuda" else torch.float32,
239
+ device_map=device,
240
+ trust_remote_code=True,
241
+ **kwargs
242
+ )
243
+ else:
244
+ model = AutoModelForCausalLM.from_pretrained(
245
+ model_name,
246
+ torch_dtype=torch.float16 if device == "cuda" else torch.float32,
247
+ device_map=device,
248
+ trust_remote_code=True,
249
+ **kwargs)
242
250
 
243
251
  # Create pipeline
244
252
  pipe = pipeline(
@@ -246,8 +254,7 @@ class ModelFactory:
246
254
  model=model,
247
255
  tokenizer=tokenizer,
248
256
  max_length=max_length,
249
- temperature=temperature,
250
- device=device
257
+ temperature=temperature
251
258
  )
252
259
 
253
260
  # Create and return LangChain HuggingFacePipeline
@@ -1,5 +1,5 @@
1
1
  MAJOR_VERSION = 1
2
2
  MINOR_VERSION = 1
3
- PATCH_VERSION = 22
3
+ PATCH_VERSION = 23
4
4
  version = '{}.{}.{}'.format(MAJOR_VERSION, MINOR_VERSION, PATCH_VERSION)
5
5
  __all__ = ['MAJOR_VERSION', 'MINOR_VERSION', 'PATCH_VERSION', 'version']
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: mb_rag
3
- Version: 1.1.22
3
+ Version: 1.1.23
4
4
  Summary: RAG function file
5
5
  Author: ['Malav Bateriwala']
6
6
  Requires-Python: >=3.8
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes