mb-rag 1.1.19__tar.gz → 1.1.21__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mb-rag might be problematic. Click here for more details.
- {mb_rag-1.1.19 → mb_rag-1.1.21}/PKG-INFO +1 -1
- {mb_rag-1.1.19 → mb_rag-1.1.21}/mb_rag/chatbot/basic.py +52 -1
- {mb_rag-1.1.19 → mb_rag-1.1.21}/mb_rag/version.py +1 -1
- {mb_rag-1.1.19 → mb_rag-1.1.21}/mb_rag.egg-info/PKG-INFO +1 -1
- {mb_rag-1.1.19 → mb_rag-1.1.21}/README.md +0 -0
- {mb_rag-1.1.19 → mb_rag-1.1.21}/mb_rag/__init__.py +0 -0
- {mb_rag-1.1.19 → mb_rag-1.1.21}/mb_rag/chatbot/__init__.py +0 -0
- {mb_rag-1.1.19 → mb_rag-1.1.21}/mb_rag/chatbot/chains.py +0 -0
- {mb_rag-1.1.19 → mb_rag-1.1.21}/mb_rag/chatbot/prompts.py +0 -0
- {mb_rag-1.1.19 → mb_rag-1.1.21}/mb_rag/rag/__init__.py +0 -0
- {mb_rag-1.1.19 → mb_rag-1.1.21}/mb_rag/rag/embeddings.py +0 -0
- {mb_rag-1.1.19 → mb_rag-1.1.21}/mb_rag/utils/__init__.py +0 -0
- {mb_rag-1.1.19 → mb_rag-1.1.21}/mb_rag/utils/bounding_box.py +0 -0
- {mb_rag-1.1.19 → mb_rag-1.1.21}/mb_rag/utils/extra.py +0 -0
- {mb_rag-1.1.19 → mb_rag-1.1.21}/mb_rag.egg-info/SOURCES.txt +0 -0
- {mb_rag-1.1.19 → mb_rag-1.1.21}/mb_rag.egg-info/dependency_links.txt +0 -0
- {mb_rag-1.1.19 → mb_rag-1.1.21}/mb_rag.egg-info/requires.txt +0 -0
- {mb_rag-1.1.19 → mb_rag-1.1.21}/mb_rag.egg-info/top_level.txt +0 -0
- {mb_rag-1.1.19 → mb_rag-1.1.21}/pyproject.toml +0 -0
- {mb_rag-1.1.19 → mb_rag-1.1.21}/setup.cfg +0 -0
- {mb_rag-1.1.19 → mb_rag-1.1.21}/setup.py +0 -0
|
@@ -12,7 +12,8 @@ __all__ = [
|
|
|
12
12
|
'ChatbotBase',
|
|
13
13
|
'ModelFactory',
|
|
14
14
|
'ConversationModel',
|
|
15
|
-
'IPythonStreamHandler'
|
|
15
|
+
'IPythonStreamHandler',
|
|
16
|
+
'AgentFactory'
|
|
16
17
|
]
|
|
17
18
|
|
|
18
19
|
class ChatbotBase:
|
|
@@ -69,6 +70,7 @@ class ModelFactory:
|
|
|
69
70
|
'groq': self.create_groq,
|
|
70
71
|
'deepseek': self.create_deepseek,
|
|
71
72
|
'qwen' : self.create_qwen,
|
|
73
|
+
'hugging_face': self.create_hugging_face
|
|
72
74
|
}
|
|
73
75
|
|
|
74
76
|
model_data = creators.get(model_type)
|
|
@@ -200,6 +202,55 @@ class ModelFactory:
|
|
|
200
202
|
kwargs["model"] = model_name
|
|
201
203
|
return ChatTongyi(streaming=True,**kwargs)
|
|
202
204
|
|
|
205
|
+
@classmethod
|
|
206
|
+
def create_hugging_face(cls, model_name: str = "Qwen/Qwen2.5-VL-7B-Instruct",model_function: str = "image-text-to-text",
|
|
207
|
+
**kwargs) -> Any:
|
|
208
|
+
"""
|
|
209
|
+
Create and load hugging face model.
|
|
210
|
+
Args:
|
|
211
|
+
model_name (str): Name of the model
|
|
212
|
+
model_function (str): model function
|
|
213
|
+
**kwargs: Additional arguments
|
|
214
|
+
Returns:
|
|
215
|
+
ChatHuggingFace: Chatbot model
|
|
216
|
+
"""
|
|
217
|
+
if not check_package("transformers"):
|
|
218
|
+
raise ImportError("Transformers package not found. Please install it using: pip install transformers")
|
|
219
|
+
if not check_package("langchain_huggingface"):
|
|
220
|
+
raise ImportError("Transformers package not found. Please install it using: pip install langchain_huggingface")
|
|
221
|
+
if not check_package("torch"):
|
|
222
|
+
raise ImportError("Torch package not found. Please install it using: pip install torch")
|
|
223
|
+
|
|
224
|
+
from langchain_huggingface import HuggingFacePipeline
|
|
225
|
+
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
|
226
|
+
import torch
|
|
227
|
+
|
|
228
|
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
229
|
+
|
|
230
|
+
temperature = kwargs.pop("temperature", 0.7)
|
|
231
|
+
max_length = kwargs.pop("max_length", 1024)
|
|
232
|
+
|
|
233
|
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
|
234
|
+
model = AutoModelForCausalLM.from_pretrained(
|
|
235
|
+
model_name,
|
|
236
|
+
torch_dtype=torch.float16 if device == "cuda" else torch.float32,
|
|
237
|
+
device_map=device,
|
|
238
|
+
**kwargs
|
|
239
|
+
)
|
|
240
|
+
|
|
241
|
+
# Create pipeline
|
|
242
|
+
pipe = pipeline(
|
|
243
|
+
"text-generation",
|
|
244
|
+
model=model,
|
|
245
|
+
tokenizer=tokenizer,
|
|
246
|
+
max_length=max_length,
|
|
247
|
+
temperature=temperature,
|
|
248
|
+
device=device
|
|
249
|
+
)
|
|
250
|
+
|
|
251
|
+
# Create and return LangChain HuggingFacePipeline
|
|
252
|
+
return HuggingFacePipeline(pipeline=pipe)
|
|
253
|
+
|
|
203
254
|
def _reset_model(self):
|
|
204
255
|
"""Reset the model"""
|
|
205
256
|
self.model = self.model.reset()
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|