mb-rag 1.1.61__tar.gz → 1.1.63__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mb-rag might be problematic. Click here for more details.
- {mb_rag-1.1.61 → mb_rag-1.1.63}/PKG-INFO +1 -1
- {mb_rag-1.1.61 → mb_rag-1.1.63}/mb_rag/basic.py +2 -0
- mb_rag-1.1.63/mb_rag/prompts_bank.py +65 -0
- {mb_rag-1.1.61 → mb_rag-1.1.63}/mb_rag/version.py +1 -1
- {mb_rag-1.1.61 → mb_rag-1.1.63}/mb_rag.egg-info/PKG-INFO +1 -1
- {mb_rag-1.1.61 → mb_rag-1.1.63}/mb_rag.egg-info/SOURCES.txt +1 -1
- mb_rag-1.1.61/mb_rag/chatbot/prompts.py +0 -59
- {mb_rag-1.1.61 → mb_rag-1.1.63}/README.md +0 -0
- {mb_rag-1.1.61 → mb_rag-1.1.63}/mb_rag/__init__.py +0 -0
- {mb_rag-1.1.61 → mb_rag-1.1.63}/mb_rag/chatbot/__init__.py +0 -0
- {mb_rag-1.1.61 → mb_rag-1.1.63}/mb_rag/chatbot/chains.py +0 -0
- {mb_rag-1.1.61 → mb_rag-1.1.63}/mb_rag/chatbot/conversation.py +0 -0
- {mb_rag-1.1.61 → mb_rag-1.1.63}/mb_rag/rag/__init__.py +0 -0
- {mb_rag-1.1.61 → mb_rag-1.1.63}/mb_rag/rag/embeddings.py +0 -0
- {mb_rag-1.1.61 → mb_rag-1.1.63}/mb_rag/utils/__init__.py +0 -0
- {mb_rag-1.1.61 → mb_rag-1.1.63}/mb_rag/utils/all_data_extract.py +0 -0
- {mb_rag-1.1.61 → mb_rag-1.1.63}/mb_rag/utils/bounding_box.py +0 -0
- {mb_rag-1.1.61 → mb_rag-1.1.63}/mb_rag/utils/document_extract.py +0 -0
- {mb_rag-1.1.61 → mb_rag-1.1.63}/mb_rag/utils/extra.py +0 -0
- {mb_rag-1.1.61 → mb_rag-1.1.63}/mb_rag/utils/pdf_extract.py +0 -0
- {mb_rag-1.1.61 → mb_rag-1.1.63}/mb_rag.egg-info/dependency_links.txt +0 -0
- {mb_rag-1.1.61 → mb_rag-1.1.63}/mb_rag.egg-info/requires.txt +0 -0
- {mb_rag-1.1.61 → mb_rag-1.1.63}/mb_rag.egg-info/top_level.txt +0 -0
- {mb_rag-1.1.61 → mb_rag-1.1.63}/pyproject.toml +0 -0
- {mb_rag-1.1.61 → mb_rag-1.1.63}/setup.cfg +0 -0
- {mb_rag-1.1.61 → mb_rag-1.1.63}/setup.py +0 -0
|
@@ -357,6 +357,8 @@ class ModelFactory:
|
|
|
357
357
|
Returns:
|
|
358
358
|
str: Output from the model
|
|
359
359
|
"""
|
|
360
|
+
if not isinstance(images, list):
|
|
361
|
+
images = [images]
|
|
360
362
|
base64_images = [self._image_to_base64(image) for image in images]
|
|
361
363
|
image_prompt_create = [{"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{base64_images[i]}"}} for i in range(len(images))]
|
|
362
364
|
prompt_new = [{"type": "text", "text": prompt}, *image_prompt_create]
|
|
@@ -0,0 +1,65 @@
|
|
|
1
|
+
from langchain_core.prompts.chat import ChatPromptTemplate
|
|
2
|
+
|
|
3
|
+
__all__ = ["PromptManager"]
|
|
4
|
+
|
|
5
|
+
class PromptManager:
|
|
6
|
+
"""
|
|
7
|
+
Central class for storing and invoking prompt templates.
|
|
8
|
+
|
|
9
|
+
Example:
|
|
10
|
+
pm = PromptManager()
|
|
11
|
+
prompt_text = pm.render_prompt("greeting")
|
|
12
|
+
print(prompt_text)
|
|
13
|
+
|
|
14
|
+
pm = PromptManager()
|
|
15
|
+
prompt_text = pm.render_prompt("todo_task", {"task": "Plan a deep learning project for image recognition"})
|
|
16
|
+
print(prompt_text)
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
def __init__(self):
|
|
20
|
+
self.templates = {
|
|
21
|
+
"coding_python": """You are a Python developer.
|
|
22
|
+
Human: {question}
|
|
23
|
+
Assistant:""",
|
|
24
|
+
|
|
25
|
+
"greeting": """You are a friendly assistant.
|
|
26
|
+
Human: Hello!
|
|
27
|
+
Assistant: Hi there! How can I assist you today?""",
|
|
28
|
+
|
|
29
|
+
"goodbye": """You are a friendly assistant.
|
|
30
|
+
Human: Goodbye!
|
|
31
|
+
Assistant: Goodbye! Have a great day!""",
|
|
32
|
+
|
|
33
|
+
"todo_task": """You are a helpful assistant.
|
|
34
|
+
Human: Please create a to-do list for the following task: {task}
|
|
35
|
+
Assistant:""",
|
|
36
|
+
|
|
37
|
+
"map_function": "*map(lambda x: image_url, baseframes_list)"
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
def get_template(self, name: str) -> str:
|
|
41
|
+
"""
|
|
42
|
+
Get a prompt template by name.
|
|
43
|
+
Args:
|
|
44
|
+
name (str): The key name of the prompt.
|
|
45
|
+
Returns:
|
|
46
|
+
str: The prompt template string.
|
|
47
|
+
"""
|
|
48
|
+
template = self.templates.get(name)
|
|
49
|
+
if not template:
|
|
50
|
+
raise ValueError(f"Prompt '{name}' not found. Available prompts: {list(self.templates.keys())}")
|
|
51
|
+
return template
|
|
52
|
+
|
|
53
|
+
def render_prompt(self, name: str, context: dict = None) -> str:
|
|
54
|
+
"""
|
|
55
|
+
Fill and return a rendered prompt string.
|
|
56
|
+
Args:
|
|
57
|
+
name (str): The key name of the prompt.
|
|
58
|
+
context (dict): Variables to fill into the template.
|
|
59
|
+
Returns:
|
|
60
|
+
str: The final rendered prompt text.
|
|
61
|
+
"""
|
|
62
|
+
template = self.get_template(name)
|
|
63
|
+
chat_prompt = ChatPromptTemplate.from_template(template)
|
|
64
|
+
rendered = chat_prompt.invoke(context or {})
|
|
65
|
+
return rendered.to_string()
|
|
@@ -3,6 +3,7 @@ pyproject.toml
|
|
|
3
3
|
setup.py
|
|
4
4
|
mb_rag/__init__.py
|
|
5
5
|
mb_rag/basic.py
|
|
6
|
+
mb_rag/prompts_bank.py
|
|
6
7
|
mb_rag/version.py
|
|
7
8
|
mb_rag.egg-info/PKG-INFO
|
|
8
9
|
mb_rag.egg-info/SOURCES.txt
|
|
@@ -12,7 +13,6 @@ mb_rag.egg-info/top_level.txt
|
|
|
12
13
|
mb_rag/chatbot/__init__.py
|
|
13
14
|
mb_rag/chatbot/chains.py
|
|
14
15
|
mb_rag/chatbot/conversation.py
|
|
15
|
-
mb_rag/chatbot/prompts.py
|
|
16
16
|
mb_rag/rag/__init__.py
|
|
17
17
|
mb_rag/rag/embeddings.py
|
|
18
18
|
mb_rag/utils/__init__.py
|
|
@@ -1,59 +0,0 @@
|
|
|
1
|
-
## file for storing basic prompts template
|
|
2
|
-
from langchain.prompts import ChatPromptTemplate
|
|
3
|
-
|
|
4
|
-
__all__ = ["prompts", "invoke_prompt"]
|
|
5
|
-
|
|
6
|
-
class prompts:
|
|
7
|
-
"""
|
|
8
|
-
Class to get different prompts example for chatbot and templates
|
|
9
|
-
"""
|
|
10
|
-
|
|
11
|
-
def get_code_prompts(self):
|
|
12
|
-
"""
|
|
13
|
-
Get code prompts
|
|
14
|
-
Returns:
|
|
15
|
-
str: Code prompt
|
|
16
|
-
"""
|
|
17
|
-
list_code_prompts = {'coding_python ': """You are a Python developer.
|
|
18
|
-
Human: {}"""}
|
|
19
|
-
|
|
20
|
-
def get_text_prompts(self):
|
|
21
|
-
"""
|
|
22
|
-
Get text prompts
|
|
23
|
-
Returns:
|
|
24
|
-
str: Text prompt
|
|
25
|
-
"""
|
|
26
|
-
list_text_prompts = {
|
|
27
|
-
'multiple_placeholders': """You are a helpful assistant.
|
|
28
|
-
Human: Tell me a more about {adjective1} and its relation to {adjective2}.
|
|
29
|
-
Assistant:"""
|
|
30
|
-
}
|
|
31
|
-
|
|
32
|
-
def get_image_prompts(self):
|
|
33
|
-
"""
|
|
34
|
-
Get image prompts
|
|
35
|
-
Returns:
|
|
36
|
-
str: Image prompt
|
|
37
|
-
"""
|
|
38
|
-
list_image_prompts = {'map_function': "*map(lambda x: image_url, baseframes_list)"} # for passing multiple images from a video or a list of images
|
|
39
|
-
|
|
40
|
-
def get_assistant_prompts(self):
|
|
41
|
-
"""
|
|
42
|
-
Get assistant prompts
|
|
43
|
-
Returns:
|
|
44
|
-
str: Assistant prompt
|
|
45
|
-
"""
|
|
46
|
-
list_assistant_prompts = {}
|
|
47
|
-
|
|
48
|
-
def invoke_prompt(template: str, input_dict : dict = None):
|
|
49
|
-
"""
|
|
50
|
-
Invoke a prompt
|
|
51
|
-
Args:
|
|
52
|
-
template (str): Template for the prompt
|
|
53
|
-
input_dict (dict): Input dictionary for the prompt
|
|
54
|
-
Returns:
|
|
55
|
-
str: Prompt
|
|
56
|
-
"""
|
|
57
|
-
prompt_multiple = ChatPromptTemplate.from_template(template)
|
|
58
|
-
prompt = prompt_multiple.invoke(input_dict)
|
|
59
|
-
return prompt
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|