mb-rag 1.1.58__tar.gz → 1.1.66__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (28) hide show
  1. {mb_rag-1.1.58 → mb_rag-1.1.66}/PKG-INFO +1 -1
  2. {mb_rag-1.1.58 → mb_rag-1.1.66}/README.md +3 -0
  3. {mb_rag-1.1.58 → mb_rag-1.1.66}/mb_rag/basic.py +6 -2
  4. mb_rag-1.1.66/mb_rag/prompts_bank.py +77 -0
  5. mb_rag-1.1.66/mb_rag/utils/llm_wrapper.py +31 -0
  6. mb_rag-1.1.66/mb_rag/utils/viewer.py +8 -0
  7. {mb_rag-1.1.58 → mb_rag-1.1.66}/mb_rag/version.py +1 -1
  8. {mb_rag-1.1.58 → mb_rag-1.1.66}/mb_rag.egg-info/PKG-INFO +1 -1
  9. {mb_rag-1.1.58 → mb_rag-1.1.66}/mb_rag.egg-info/SOURCES.txt +4 -2
  10. mb_rag-1.1.58/mb_rag/chatbot/prompts.py +0 -59
  11. {mb_rag-1.1.58 → mb_rag-1.1.66}/mb_rag/__init__.py +0 -0
  12. {mb_rag-1.1.58 → mb_rag-1.1.66}/mb_rag/chatbot/__init__.py +0 -0
  13. {mb_rag-1.1.58 → mb_rag-1.1.66}/mb_rag/chatbot/chains.py +0 -0
  14. {mb_rag-1.1.58 → mb_rag-1.1.66}/mb_rag/chatbot/conversation.py +0 -0
  15. {mb_rag-1.1.58 → mb_rag-1.1.66}/mb_rag/rag/__init__.py +0 -0
  16. {mb_rag-1.1.58 → mb_rag-1.1.66}/mb_rag/rag/embeddings.py +0 -0
  17. {mb_rag-1.1.58 → mb_rag-1.1.66}/mb_rag/utils/__init__.py +0 -0
  18. {mb_rag-1.1.58 → mb_rag-1.1.66}/mb_rag/utils/all_data_extract.py +0 -0
  19. {mb_rag-1.1.58 → mb_rag-1.1.66}/mb_rag/utils/bounding_box.py +0 -0
  20. {mb_rag-1.1.58 → mb_rag-1.1.66}/mb_rag/utils/document_extract.py +0 -0
  21. {mb_rag-1.1.58 → mb_rag-1.1.66}/mb_rag/utils/extra.py +0 -0
  22. {mb_rag-1.1.58 → mb_rag-1.1.66}/mb_rag/utils/pdf_extract.py +0 -0
  23. {mb_rag-1.1.58 → mb_rag-1.1.66}/mb_rag.egg-info/dependency_links.txt +0 -0
  24. {mb_rag-1.1.58 → mb_rag-1.1.66}/mb_rag.egg-info/requires.txt +0 -0
  25. {mb_rag-1.1.58 → mb_rag-1.1.66}/mb_rag.egg-info/top_level.txt +0 -0
  26. {mb_rag-1.1.58 → mb_rag-1.1.66}/pyproject.toml +0 -0
  27. {mb_rag-1.1.58 → mb_rag-1.1.66}/setup.cfg +0 -0
  28. {mb_rag-1.1.58 → mb_rag-1.1.66}/setup.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: mb_rag
3
- Version: 1.1.58
3
+ Version: 1.1.66
4
4
  Summary: RAG function file
5
5
  Author: ['Malav Bateriwala']
6
6
  Requires-Python: >=3.8
@@ -73,6 +73,9 @@ ollama_model = ModelFactory(
73
73
  )
74
74
  response = ollama_model.invoke_query("What is the meaning of life?")
75
75
 
76
+ ## Running in threads
77
+ response = model.invoke_query_threads(query_list=['q1','q2'],input_data=[[images_data],[images_data]],n_workers=4)
78
+
76
79
 
77
80
  ## check example_conversation.ipynb for more details
78
81
 
@@ -2,6 +2,7 @@
2
2
 
3
3
  import os
4
4
  from langchain_core.messages import HumanMessage
5
+ import torch
5
6
  from mb_rag.utils.extra import check_package
6
7
  import base64
7
8
  from .utils.extra import check_package
@@ -41,7 +42,7 @@ class ModelFactory:
41
42
 
42
43
  self.model_type = model_type
43
44
  self.model_name = model_name
44
- model_data = creators.get(model_type)
45
+ model_data = creators[model_type] if model_type in creators else None
45
46
  if not model_data:
46
47
  raise ValueError(f"Unsupported model type: {model_type}")
47
48
 
@@ -195,7 +196,7 @@ class ModelFactory:
195
196
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline, AutoModelForImageTextToText,AutoProcessor
196
197
  import torch
197
198
 
198
- device = torch.device(device) if torch.cuda.is_available() else torch.device("cpu")
199
+ device = torch.device("cuda" if torch.cuda.is_available() and device == "cuda" else "cpu")
199
200
 
200
201
  temperature = kwargs.pop("temperature", 0.7)
201
202
  max_length = kwargs.pop("max_length", 1024)
@@ -301,6 +302,7 @@ class ModelFactory:
301
302
 
302
303
  df = pd.DataFrame(query_list, columns=["query"])
303
304
  df["response"] = None
305
+ df["input_data"] = None if input_data is None else input_data
304
306
 
305
307
  structured_model = None
306
308
  if pydantic_model is not None:
@@ -355,6 +357,8 @@ class ModelFactory:
355
357
  Returns:
356
358
  str: Output from the model
357
359
  """
360
+ if not isinstance(images, list):
361
+ images = [images]
358
362
  base64_images = [self._image_to_base64(image) for image in images]
359
363
  image_prompt_create = [{"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{base64_images[i]}"}} for i in range(len(images))]
360
364
  prompt_new = [{"type": "text", "text": prompt}, *image_prompt_create]
@@ -0,0 +1,77 @@
1
+ from langchain_core.prompts.chat import ChatPromptTemplate
2
+
3
+ __all__ = ["PromptManager"]
4
+
5
+ class PromptManager:
6
+ """
7
+ Central class for storing and invoking prompt templates.
8
+
9
+ Example:
10
+ pm = PromptManager()
11
+ prompt_text = pm.render_prompt("greeting")
12
+ print(prompt_text)
13
+
14
+ pm = PromptManager()
15
+ prompt_text = pm.render_prompt("todo_task", {"task": "Plan a deep learning project for image recognition"})
16
+ print(prompt_text)
17
+ """
18
+
19
+ def __init__(self):
20
+ self.templates = {
21
+ "coding_python": """You are a Python developer.
22
+ Human: {question}
23
+ Assistant:""",
24
+
25
+ "greeting": """You are a friendly assistant.
26
+ Human: Hello!
27
+ Assistant: Hi there! How can I assist you today?""",
28
+
29
+ "goodbye": """You are a friendly assistant.
30
+ Human: Goodbye!
31
+ Assistant: Goodbye! Have a great day!""",
32
+
33
+ "todo_task": """You are a helpful assistant.
34
+ Human: Please create a to-do list for the following task: {task}
35
+ Assistant:""",
36
+
37
+ "map_function": "*map(lambda x: image_url, baseframes_list)",
38
+
39
+ "SQL_AGENT_SYS_PROMPT": """You are an expert SQL agent. Your task is to generate and execute SQL queries based on user requests.
40
+ RULES:
41
+ - THINK step by step before answering.
42
+ - Use the provided database schema to inform your queries.
43
+ - When you need to retrieve data, generate a SQL query and execute it using the provided tools.
44
+ - Read-only mode: Do not attempt to modify the database.
45
+ - NO INSERT/UPDATE/DELETE/ALTER/DROP/CREATE/REPLACE/TRUNCATE statements allowed.
46
+ - LIMIT your results to 10 rows. Unless specified otherwise.
47
+ - If you encounter an error while executing a query, analyze the error message and adjust your query accordingly.
48
+ - Prefer using explicit column names instead of SELECT * for better performance.
49
+ - Always ensure your SQL syntax is correct. """
50
+ }
51
+
52
+ def get_template(self, name: str) -> str:
53
+ """
54
+ Get a prompt template by name.
55
+ Args:
56
+ name (str): The key name of the prompt.
57
+ Returns:
58
+ str: The prompt template string.
59
+ """
60
+ template = self.templates.get(name)
61
+ if not template:
62
+ raise ValueError(f"Prompt '{name}' not found. Available prompts: {list(self.templates.keys())}")
63
+ return template
64
+
65
+ def render_prompt(self, name: str, context: dict = None) -> str:
66
+ """
67
+ Fill and return a rendered prompt string.
68
+ Args:
69
+ name (str): The key name of the prompt.
70
+ context (dict): Variables to fill into the template.
71
+ Returns:
72
+ str: The final rendered prompt text.
73
+ """
74
+ template = self.get_template(name)
75
+ chat_prompt = ChatPromptTemplate.from_template(template)
76
+ rendered = chat_prompt.invoke(context or {})
77
+ return rendered.to_string()
@@ -0,0 +1,31 @@
1
+ ## simple llm wrapper to replace invoke with invoke_query/own model query
2
+
3
+ __all__ = ["LLMWrapper"]
4
+
5
+ class LLMWrapper:
6
+ """A simple wrapper for the language model to standardize the invoke method.
7
+ """
8
+
9
+ def __init__(self, llm):
10
+ self.llm = llm
11
+
12
+ def __getattr__(self, name):
13
+ """Get all attributes from llm module. (invoke_query, invoke_query_threads, etc.)"""
14
+ return getattr(self.llm, name)
15
+
16
+ def invoke(self, use_threads=False,**kwargs) -> str:
17
+ """
18
+ Invoke the language model with a list of messages.
19
+ Using invoke_query method of the underlying model.
20
+ Check ModelFactory for more details.
21
+
22
+ Args:
23
+ use_threads (bool): Whether to use threading for invocation. Defaults to False.
24
+ **kwargs: Keyword arguments for the model invocation.
25
+
26
+ Returns:
27
+ str: The generated response.
28
+ """
29
+ if use_threads:
30
+ return self.llm.invoke_query_threads(**kwargs)
31
+ return self.llm.invoke_query(**kwargs)
@@ -0,0 +1,8 @@
1
+ from IPython.display import Image, display
2
+
3
+ __all__ = ["display_graph_png"]
4
+
5
+
6
+ def display_graph_png(graph):
7
+ """Display a graph using IPython's display capabilities."""
8
+ display(Image(graph.get_graph().draw_mermaid_png()))
@@ -1,5 +1,5 @@
1
1
  MAJOR_VERSION = 1
2
2
  MINOR_VERSION = 1
3
- PATCH_VERSION = 58
3
+ PATCH_VERSION = 66
4
4
  version = '{}.{}.{}'.format(MAJOR_VERSION, MINOR_VERSION, PATCH_VERSION)
5
5
  __all__ = ['MAJOR_VERSION', 'MINOR_VERSION', 'PATCH_VERSION', 'version']
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: mb_rag
3
- Version: 1.1.58
3
+ Version: 1.1.66
4
4
  Summary: RAG function file
5
5
  Author: ['Malav Bateriwala']
6
6
  Requires-Python: >=3.8
@@ -3,6 +3,7 @@ pyproject.toml
3
3
  setup.py
4
4
  mb_rag/__init__.py
5
5
  mb_rag/basic.py
6
+ mb_rag/prompts_bank.py
6
7
  mb_rag/version.py
7
8
  mb_rag.egg-info/PKG-INFO
8
9
  mb_rag.egg-info/SOURCES.txt
@@ -12,7 +13,6 @@ mb_rag.egg-info/top_level.txt
12
13
  mb_rag/chatbot/__init__.py
13
14
  mb_rag/chatbot/chains.py
14
15
  mb_rag/chatbot/conversation.py
15
- mb_rag/chatbot/prompts.py
16
16
  mb_rag/rag/__init__.py
17
17
  mb_rag/rag/embeddings.py
18
18
  mb_rag/utils/__init__.py
@@ -20,4 +20,6 @@ mb_rag/utils/all_data_extract.py
20
20
  mb_rag/utils/bounding_box.py
21
21
  mb_rag/utils/document_extract.py
22
22
  mb_rag/utils/extra.py
23
- mb_rag/utils/pdf_extract.py
23
+ mb_rag/utils/llm_wrapper.py
24
+ mb_rag/utils/pdf_extract.py
25
+ mb_rag/utils/viewer.py
@@ -1,59 +0,0 @@
1
- ## file for storing basic prompts template
2
- from langchain.prompts import ChatPromptTemplate
3
-
4
- __all__ = ["prompts", "invoke_prompt"]
5
-
6
- class prompts:
7
- """
8
- Class to get different prompts example for chatbot and templates
9
- """
10
-
11
- def get_code_prompts(self):
12
- """
13
- Get code prompts
14
- Returns:
15
- str: Code prompt
16
- """
17
- list_code_prompts = {'coding_python ': """You are a Python developer.
18
- Human: {}"""}
19
-
20
- def get_text_prompts(self):
21
- """
22
- Get text prompts
23
- Returns:
24
- str: Text prompt
25
- """
26
- list_text_prompts = {
27
- 'multiple_placeholders': """You are a helpful assistant.
28
- Human: Tell me a more about {adjective1} and its relation to {adjective2}.
29
- Assistant:"""
30
- }
31
-
32
- def get_image_prompts(self):
33
- """
34
- Get image prompts
35
- Returns:
36
- str: Image prompt
37
- """
38
- list_image_prompts = {'map_function': "*map(lambda x: image_url, baseframes_list)"} # for passing multiple images from a video or a list of images
39
-
40
- def get_assistant_prompts(self):
41
- """
42
- Get assistant prompts
43
- Returns:
44
- str: Assistant prompt
45
- """
46
- list_assistant_prompts = {}
47
-
48
- def invoke_prompt(template: str, input_dict : dict = None):
49
- """
50
- Invoke a prompt
51
- Args:
52
- template (str): Template for the prompt
53
- input_dict (dict): Input dictionary for the prompt
54
- Returns:
55
- str: Prompt
56
- """
57
- prompt_multiple = ChatPromptTemplate.from_template(template)
58
- prompt = prompt_multiple.invoke(input_dict)
59
- return prompt
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes