ws-bom-robot-app 0.0.32__py3-none-any.whl → 0.0.34__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (29) hide show
  1. ws_bom_robot_app/config.py +10 -1
  2. ws_bom_robot_app/llm/agent_description.py +123 -124
  3. ws_bom_robot_app/llm/agent_handler.py +180 -167
  4. ws_bom_robot_app/llm/agent_lcel.py +54 -64
  5. ws_bom_robot_app/llm/api.py +33 -21
  6. ws_bom_robot_app/llm/defaut_prompt.py +15 -9
  7. ws_bom_robot_app/llm/main.py +109 -102
  8. ws_bom_robot_app/llm/models/api.py +55 -7
  9. ws_bom_robot_app/llm/models/kb.py +11 -2
  10. ws_bom_robot_app/llm/providers/__init__.py +0 -0
  11. ws_bom_robot_app/llm/providers/llm_manager.py +174 -0
  12. ws_bom_robot_app/llm/settings.py +4 -4
  13. ws_bom_robot_app/llm/tools/models/main.py +5 -3
  14. ws_bom_robot_app/llm/tools/tool_builder.py +23 -19
  15. ws_bom_robot_app/llm/tools/tool_manager.py +133 -101
  16. ws_bom_robot_app/llm/tools/utils.py +25 -25
  17. ws_bom_robot_app/llm/utils/agent_utils.py +17 -16
  18. ws_bom_robot_app/llm/utils/download.py +79 -79
  19. ws_bom_robot_app/llm/utils/print.py +29 -29
  20. ws_bom_robot_app/llm/vector_store/generator.py +137 -137
  21. ws_bom_robot_app/llm/vector_store/loader/base.py +6 -5
  22. ws_bom_robot_app/llm/vector_store/loader/docling.py +27 -6
  23. ws_bom_robot_app/llm/vector_store/loader/json_loader.py +25 -25
  24. ws_bom_robot_app/main.py +7 -2
  25. {ws_bom_robot_app-0.0.32.dist-info → ws_bom_robot_app-0.0.34.dist-info}/METADATA +25 -12
  26. {ws_bom_robot_app-0.0.32.dist-info → ws_bom_robot_app-0.0.34.dist-info}/RECORD +28 -27
  27. ws_bom_robot_app/llm/utils/faiss_helper.py +0 -127
  28. {ws_bom_robot_app-0.0.32.dist-info → ws_bom_robot_app-0.0.34.dist-info}/WHEEL +0 -0
  29. {ws_bom_robot_app-0.0.32.dist-info → ws_bom_robot_app-0.0.34.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,174 @@
1
+ from typing import Any, Optional
2
+ from langchain_core.embeddings import Embeddings
3
+ from langchain_core.language_models import BaseChatModel
4
+ from pydantic import BaseModel, ConfigDict, Field
5
+
6
+ class LlmConfig(BaseModel):
7
+ api_key: str
8
+ embedding_api_key: Optional[str] = None
9
+ model: Optional[str] = None
10
+ temperature: Optional[float] = Field(0.7, ge=0.0, le=2.0)
11
+
12
+ # Define an abstract LLM interface
13
+ class LlmInterface:
14
+ def __init__(self, config: LlmConfig):
15
+ self.config = config
16
+
17
+ def get_llm(self) -> BaseChatModel:
18
+ raise NotImplementedError
19
+
20
+ def get_embeddings(self) -> Embeddings:
21
+ from langchain_openai import OpenAIEmbeddings
22
+ return OpenAIEmbeddings(model="text-embedding-3-small")
23
+
24
+ def get_models(self) -> list:
25
+ raise NotImplementedError
26
+
27
+ def get_formatter(self,intermadiate_steps):
28
+ from langchain.agents.format_scratchpad.openai_tools import format_to_openai_tool_messages
29
+ return format_to_openai_tool_messages(intermediate_steps=intermadiate_steps)
30
+
31
+ def get_parser(self):
32
+ from langchain.agents.output_parsers.openai_tools import OpenAIToolsAgentOutputParser
33
+ return OpenAIToolsAgentOutputParser()
34
+
35
+ class OpenAI(LlmInterface):
36
+ def get_llm(self):
37
+ from langchain_openai import ChatOpenAI
38
+ chat = ChatOpenAI(api_key=self.config.api_key, model=self.config.model)
39
+ if not any(self.config.model.startswith(prefix) for prefix in ["o1", "o3"]):
40
+ chat.temperature = self.config.temperature
41
+ chat.streaming = True
42
+ return chat
43
+
44
+ def get_embeddings(self):
45
+ from langchain_openai import OpenAIEmbeddings
46
+ return OpenAIEmbeddings(
47
+ api_key=self.config.api_key,
48
+ model="text-embedding-3-small")
49
+
50
+ def get_models(self):
51
+ import openai
52
+ openai.api_key = self.config.api_key
53
+ response = openai.models.list()
54
+ return response.data
55
+
56
+ class DeepSeek(LlmInterface):
57
+ def get_llm(self):
58
+ from langchain_openai import ChatOpenAI
59
+ return ChatOpenAI(
60
+ api_key=self.config.api_key,
61
+ model=self.config.model,
62
+ base_url="https://api.deepseek.com/v1",
63
+ max_tokens=8192,
64
+ temperature=self.config.temperature,
65
+ streaming=True,
66
+ )
67
+
68
+ def get_embeddings(self):
69
+ from langchain_openai import OpenAIEmbeddings
70
+ return OpenAIEmbeddings(
71
+ api_key=self.config.embedding_api_key,
72
+ model="text-embedding-3-small")
73
+
74
+ def get_models(self):
75
+ return [
76
+ {"id":"deepseek-chat"},
77
+ {"id":"deepseek-reasoner"}
78
+ ]
79
+
80
+ class Google(LlmInterface):
81
+ def get_llm(self):
82
+ from langchain_google_genai.chat_models import ChatGoogleGenerativeAI
83
+ return ChatGoogleGenerativeAI(
84
+ name="chat",
85
+ api_key=self.config.api_key,
86
+ model=self.config.model,
87
+ temperature=self.config.temperature,
88
+ disable_streaming=False
89
+ )
90
+
91
+ def get_embeddings(self):
92
+ from langchain_google_genai.embeddings import GoogleGenerativeAIEmbeddings
93
+ return GoogleGenerativeAIEmbeddings(
94
+ google_api_key=self.config.api_key,
95
+ model="models/text-embedding-004")
96
+
97
+ def get_models(self):
98
+ import google.generativeai as genai
99
+ genai.configure(api_key=self.config.api_key)
100
+ response = genai.list_models()
101
+ return [{
102
+ "id": model.name,
103
+ "name": model.display_name,
104
+ "description": model.description,
105
+ "input_token_limit": model.input_token_limit,
106
+ "output_token_limit": model.output_token_limit
107
+ } for model in response if "gemini" in model.name.lower()]
108
+
109
+ class Gvertex(LlmInterface):
110
+ def get_llm(self):
111
+ from langchain_google_vertexai import ChatVertexAI
112
+ return ChatVertexAI(
113
+ model=self.config.model,
114
+ temperature=self.config.temperature,
115
+ )
116
+ def get_embeddings(self):
117
+ from langchain_google_vertexai import VertexAIEmbeddings
118
+ return VertexAIEmbeddings(model_name="text-embedding-004")
119
+ def get_models(self):
120
+ from google.cloud import aiplatform
121
+ aiplatform.init()
122
+ models = aiplatform.Model.list()
123
+ #see https://cloud.google.com/vertex-ai/generative-ai/docs/learn/locations#united-states for available models
124
+ return models or [
125
+ {"id":"gemini-2.0-flash-001"},
126
+ {"id":"gemini-1.5-pro-001"},
127
+ {"id":"gemini-1.5-pro-002"}
128
+ ]
129
+
130
+ class Anthropic(LlmInterface):
131
+ def get_llm(self):
132
+ from langchain_anthropic import ChatAnthropic
133
+ return ChatAnthropic(
134
+ api_key=self.config.api_key,
135
+ model=self.config.model,
136
+ temperature=self.config.temperature,
137
+ streaming=True,
138
+ stream_usage=False
139
+ )
140
+
141
+ def get_embeddings(self):
142
+ #from langchain_voyageai import VoyageAIEmbeddings
143
+ #return VoyageAIEmbeddings(
144
+ # api_key=self.config.embedding_api_key, #voyage api key
145
+ # model="voyage-3")
146
+ from langchain_openai import OpenAIEmbeddings
147
+ return OpenAIEmbeddings(
148
+ api_key=self.config.embedding_api_key,
149
+ model="text-embedding-3-small")
150
+
151
+ def get_models(self):
152
+ import anthropic
153
+ client = anthropic.Client(api_key=self.config.api_key)
154
+ response = client.models.list()
155
+ return response.data
156
+
157
+ def get_formatter(self,intermadiate_steps):
158
+ from langchain.agents.format_scratchpad.tools import format_to_tool_messages
159
+ return format_to_tool_messages(intermediate_steps=intermadiate_steps)
160
+
161
+ def get_parser(self):
162
+ from langchain.agents.output_parsers.tools import ToolsAgentOutputParser
163
+ return ToolsAgentOutputParser()
164
+
165
+ class LlmManager:
166
+
167
+ #class variables (static)
168
+ _list: dict[str,LlmInterface] = {
169
+ "anthropic": Anthropic,
170
+ "deepseek": DeepSeek,
171
+ "google": Google,
172
+ "gvertex": Gvertex,
173
+ "openai": OpenAI,
174
+ }
@@ -1,4 +1,4 @@
1
- def init():
2
- """Initialize the chat history list as a global var"""
3
- global chat_history
4
- chat_history = []
1
+ def init():
2
+ """Initialize the chat history list as a global var"""
3
+ global chat_history
4
+ chat_history = []
@@ -1,7 +1,9 @@
1
1
  from pydantic import BaseModel, Field
2
2
 
3
+ class NoopInput(BaseModel):
4
+ pass
5
+ class DocumentRetrieverInput(BaseModel):
6
+ query: str = Field(description="The search query string")
3
7
  class ImageGeneratorInput(BaseModel):
4
- query: str = Field(
5
- description="description of the image to generate."
6
- )
8
+ query: str = Field(description="description of the image to generate.")
7
9
  language: str = Field(description="Language of the query. Default is 'it'", default="it")
@@ -1,19 +1,23 @@
1
- from asyncio import Queue
2
- from langchain.tools import StructuredTool
3
- from ws_bom_robot_app.llm.models.api import LlmAppTool
4
- from ws_bom_robot_app.llm.tools.tool_manager import ToolManager
5
-
6
- def get_structured_tools(tools: list[LlmAppTool], api_key:str, callbacks:list, queue: Queue) -> list[StructuredTool]:
7
- _structured_tools :list[StructuredTool] = []
8
- for tool in [tool for tool in tools if tool.is_active]:
9
- if _tool_config := ToolManager._list.get(tool.function_name):
10
- _tool_instance = ToolManager(tool, api_key, callbacks, queue)
11
- _structured_tool = StructuredTool.from_function(
12
- coroutine=_tool_instance.get_coroutine(),
13
- name=tool.function_id,
14
- description=tool.function_description,
15
- args_schema=_tool_config.model
16
- )
17
- _structured_tool.tags = [tool.function_id]
18
- _structured_tools.append(_structured_tool)
19
- return _structured_tools
1
+ from asyncio import Queue
2
+ from langchain.tools import Tool, StructuredTool
3
+ from ws_bom_robot_app.llm.models.api import LlmAppTool
4
+ from ws_bom_robot_app.llm.tools.tool_manager import ToolManager
5
+ from ws_bom_robot_app.llm.providers.llm_manager import LlmInterface
6
+
7
+ def get_structured_tools(llm: LlmInterface, tools: list[LlmAppTool], callbacks:list, queue: Queue) -> list[StructuredTool]:
8
+ _structured_tools :list[StructuredTool] = []
9
+ for tool in [tool for tool in tools if tool.is_active]:
10
+ if _tool_config := ToolManager._list.get(tool.function_name):
11
+ _tool_instance = ToolManager(llm, tool, callbacks, queue)
12
+ _structured_tool = StructuredTool.from_function(
13
+ coroutine=_tool_instance.get_coroutine(),
14
+ name=tool.function_id if tool.function_id else tool.function_name,
15
+ description=tool.function_description,
16
+ args_schema=_tool_config.model
17
+ #infer_schema=True,
18
+ #parse_docstring=True,
19
+ #error_on_invalid_docstring=True
20
+ )
21
+ _structured_tool.tags = [tool.function_id if tool.function_id else tool.function_name]
22
+ _structured_tools.append(_structured_tool)
23
+ return _structured_tools
@@ -1,101 +1,133 @@
1
- from asyncio import Queue
2
- from typing import Optional, Type, Callable
3
- from ws_bom_robot_app.llm.models.api import LlmAppTool
4
- from ws_bom_robot_app.llm.utils.faiss_helper import FaissHelper
5
- from ws_bom_robot_app.llm.tools.utils import getRandomWaitingMessage, translate_text
6
- from ws_bom_robot_app.llm.tools.models.main import ImageGeneratorInput
7
- from pydantic import BaseModel, ConfigDict
8
- from langchain_community.utilities.dalle_image_generator import DallEAPIWrapper
9
-
10
- class ToolConfig(BaseModel):
11
- function: Callable
12
- model: Optional[Type[BaseModel]] = None
13
- model_config = ConfigDict(
14
- arbitrary_types_allowed=True
15
- )
16
-
17
- class ToolManager:
18
- """
19
- ToolManager is responsible for managing various tools used in the application.
20
-
21
- Attributes:
22
- app_tool (LlmAppTool): The application tool configuration.
23
- api_key (str): The API key for accessing external services.
24
- callbacks (list): A list of callback functions to be executed.
25
-
26
- Methods:
27
- document_retriever(query: str): Asynchronously retrieves documents based on the query.
28
- image_generator(query: str, language: str = "it"): Asynchronously generates an image based on the query.
29
- get_coroutine(): Retrieves the coroutine function based on the tool configuration.
30
- """
31
-
32
- def __init__(
33
- self,
34
- app_tool: LlmAppTool,
35
- api_key: str,
36
- callbacks: list,
37
- queue: Optional[Queue] = None
38
- ):
39
- self.app_tool = app_tool
40
- self.api_key = api_key
41
- self.callbacks = callbacks
42
- self.queue = queue
43
-
44
-
45
- #region functions
46
- async def document_retriever(self, query: str):
47
- if (
48
- self.app_tool.type == "function" and self.app_tool.vector_db
49
- #and self.settings.get("dataSource") == "knowledgebase"
50
- ):
51
- search_type = "similarity"
52
- search_kwargs = {"k": 4}
53
- if self.app_tool.search_settings:
54
- search_settings = self.app_tool.search_settings # type: ignore
55
- if search_settings.search_type == "similarityScoreThreshold":
56
- search_type = "similarity_score_threshold"
57
- search_kwargs = {
58
- "score_threshold": search_settings.score_threshold_id if search_settings.score_threshold_id else 0.5,
59
- "k": search_settings.search_k if search_settings.search_k else 100
60
- }
61
- elif search_settings.search_type == "mmr":
62
- search_type = "mmr"
63
- search_kwargs = {"k": search_settings.search_k if search_settings.search_k else 4}
64
- elif search_settings.search_type == "default":
65
- search_type = "similarity"
66
- search_kwargs = {"k": search_settings.search_k if search_settings.search_k else 4}
67
- else:
68
- search_type = "mixed"
69
- search_kwargs = {"k": search_settings.search_k if search_settings.search_k else 4}
70
- if self.queue:
71
- await self.queue.put(getRandomWaitingMessage(self.app_tool.waiting_message, traduction=False))
72
- return await FaissHelper.invoke(self.app_tool.vector_db, self.api_key, query, search_type, search_kwargs)
73
- return []
74
- #raise ValueError(f"Invalid configuration for {self.settings.name} tool of type {self.settings.type}. Must be a function or vector db not found.")
75
-
76
- async def image_generator(self, query: str, language: str = "it"):
77
- model = self.app_tool.model or "dall-e-3"
78
- random_waiting_message = getRandomWaitingMessage(self.app_tool.waiting_message, traduction=False)
79
- if not language:
80
- language = "it"
81
- await translate_text(
82
- self.api_key, language, random_waiting_message, self.callbacks
83
- )
84
- try:
85
- image_url = DallEAPIWrapper(api_key=self.api_key, model=model).run(query) # type: ignore
86
- return image_url
87
- except Exception as e:
88
- return f"Error: {str(e)}"
89
-
90
- #endregion
91
-
92
- #class variables (static)
93
- _list: dict[str,ToolConfig] = {
94
- "document_retriever": ToolConfig(function=document_retriever),
95
- "image_generator": ToolConfig(function=image_generator, model=ImageGeneratorInput),
96
- }
97
-
98
- #instance methods
99
- def get_coroutine(self):
100
- tool_cfg = self._list.get(self.app_tool.function_name)
101
- return getattr(self, tool_cfg.function.__name__) # type: ignore
1
+ from asyncio import Queue
2
+ from typing import Optional, Type, Callable
3
+ from ws_bom_robot_app.llm.models.api import LlmAppTool
4
+ from ws_bom_robot_app.llm.providers.llm_manager import LlmInterface
5
+ from ws_bom_robot_app.llm.vector_store.db.manager import VectorDbManager
6
+ from ws_bom_robot_app.llm.tools.utils import getRandomWaitingMessage, translate_text
7
+ from ws_bom_robot_app.llm.tools.models.main import NoopInput,DocumentRetrieverInput,ImageGeneratorInput
8
+ from pydantic import BaseModel, ConfigDict
9
+
10
+ class ToolConfig(BaseModel):
11
+ function: Callable
12
+ model: Optional[Type[BaseModel]] = NoopInput
13
+ model_config = ConfigDict(
14
+ arbitrary_types_allowed=True
15
+ )
16
+
17
+ class ToolManager:
18
+ """
19
+ ToolManager is responsible for managing various tools used in the application.
20
+
21
+ Attributes:
22
+ app_tool (LlmAppTool): The application tool configuration.
23
+ api_key (str): The API key for accessing external services.
24
+ callbacks (list): A list of callback functions to be executed.
25
+
26
+ Methods:
27
+ document_retriever(query: str): Asynchronously retrieves documents based on the query.
28
+ image_generator(query: str, language: str = "it"): Asynchronously generates an image based on the query.
29
+ get_coroutine(): Retrieves the coroutine function based on the tool configuration.
30
+ """
31
+
32
+ def __init__(
33
+ self,
34
+ llm: LlmInterface,
35
+ app_tool: LlmAppTool,
36
+ callbacks: list,
37
+ queue: Optional[Queue] = None
38
+ ):
39
+ self.llm = llm
40
+ self.app_tool = app_tool
41
+ self.callbacks = callbacks
42
+ self.queue = queue
43
+
44
+
45
+ #region functions
46
+ async def document_retriever(self, query: str) -> list:
47
+ """
48
+ Asynchronously retrieves documents based on the provided query using the specified search settings.
49
+
50
+ Args:
51
+ query (str): The search query string.
52
+
53
+ Returns:
54
+ list: A list of retrieved documents based on the search criteria.
55
+
56
+ Raises:
57
+ ValueError: If the configuration for the tool is invalid or the vector database is not found.
58
+
59
+ Notes:
60
+ - The function supports different search types such as "similarity", "similarity_score_threshold", "mmr", and "mixed".
61
+ - The search settings can be customized through the `app_tool.search_settings` attribute.
62
+ - If a queue is provided, a waiting message is put into the queue before invoking the search.
63
+ """
64
+ if (
65
+ self.app_tool.type == "function" and self.app_tool.vector_db
66
+ #and self.settings.get("dataSource") == "knowledgebase"
67
+ ):
68
+ search_type = "similarity"
69
+ search_kwargs = {"k": 4}
70
+ if self.app_tool.search_settings:
71
+ search_settings = self.app_tool.search_settings # type: ignore
72
+ if search_settings.search_type == "similarityScoreThreshold":
73
+ search_type = "similarity_score_threshold"
74
+ search_kwargs = {
75
+ "score_threshold": search_settings.score_threshold_id if search_settings.score_threshold_id else 0.5,
76
+ "k": search_settings.search_k if search_settings.search_k else 100
77
+ }
78
+ elif search_settings.search_type == "mmr":
79
+ search_type = "mmr"
80
+ search_kwargs = {"k": search_settings.search_k if search_settings.search_k else 4}
81
+ elif search_settings.search_type == "default":
82
+ search_type = "similarity"
83
+ search_kwargs = {"k": search_settings.search_k if search_settings.search_k else 4}
84
+ else:
85
+ search_type = "mixed"
86
+ search_kwargs = {"k": search_settings.search_k if search_settings.search_k else 4}
87
+ if self.queue:
88
+ await self.queue.put(getRandomWaitingMessage(self.app_tool.waiting_message, traduction=False))
89
+
90
+ return await VectorDbManager.get_strategy(self.app_tool.vector_type).invoke(
91
+ self.llm.get_embeddings(),
92
+ self.app_tool.vector_db,
93
+ query,
94
+ search_type,
95
+ search_kwargs,
96
+ app_tool=self.app_tool,
97
+ llm=self.llm.get_llm()
98
+ )
99
+ return []
100
+ #raise ValueError(f"Invalid configuration for {self.settings.name} tool of type {self.settings.type}. Must be a function or vector db not found.")
101
+
102
+ async def image_generator(self, query: str, language: str = "it"):
103
+ """
104
+ Asynchronously generates an image based on the query.
105
+ set OPENAI_API_KEY in your environment variables
106
+ """
107
+ from langchain_community.utilities.dalle_image_generator import DallEAPIWrapper
108
+ model = self.app_tool.model or "dall-e-3"
109
+ random_waiting_message = getRandomWaitingMessage(self.app_tool.waiting_message, traduction=False)
110
+ if not language:
111
+ language = "it"
112
+ await translate_text(
113
+ self.llm, language, random_waiting_message, self.callbacks
114
+ )
115
+ try:
116
+ #set os.environ.get("OPENAI_API_KEY")!
117
+ image_url = DallEAPIWrapper(model=model).run(query) # type: ignore
118
+ return image_url
119
+ except Exception as e:
120
+ return f"Error: {str(e)}"
121
+
122
+ #endregion
123
+
124
+ #class variables (static)
125
+ _list: dict[str,ToolConfig] = {
126
+ "document_retriever": ToolConfig(function=document_retriever, model=DocumentRetrieverInput),
127
+ "image_generator": ToolConfig(function=image_generator, model=ImageGeneratorInput),
128
+ }
129
+
130
+ #instance methods
131
+ def get_coroutine(self):
132
+ tool_cfg = self._list.get(self.app_tool.function_name)
133
+ return getattr(self, tool_cfg.function.__name__) # type: ignore
@@ -1,25 +1,25 @@
1
- import random, os
2
- from langchain_openai import ChatOpenAI
3
- from langchain_core.prompts import PromptTemplate
4
- from ws_bom_robot_app.llm.utils.print import printString
5
-
6
- def __print_output(data: str) -> str:
7
- return printString(data) if os.environ.get("AGENT_HANDLER_FORMATTED") == str(True) else f"{data} "
8
-
9
- def getRandomWaitingMessage(waiting_messages: str, traduction: bool = True) -> str:
10
- if not waiting_messages: return ""
11
- messages = [msg.strip() for msg in waiting_messages.split(";") if msg.strip()]
12
- if not messages: return ""
13
- chosen_message = random.choice(messages) + "\n"
14
- if not traduction:
15
- return __print_output(chosen_message)
16
- return chosen_message
17
-
18
- async def translate_text(api_key, language, text: str, callbacks: list) -> str:
19
- if language == "it":
20
- return __print_output(text)
21
- llm = ChatOpenAI(api_key=api_key, model="gpt-3.5-turbo-0125", streaming=True)
22
- sys_message = """Il tuo compito è di tradurre il testo_da_tradure nella seguente lingua: \n\n lingua: {language}\n\n testo_da_tradure: {testo_da_tradure} \n\nTraduci il testo_da_tradure nella lingua {language} senza aggiungere altro:"""
23
- prompt = PromptTemplate.from_template(sys_message)
24
- chain = prompt | llm
25
- await chain.ainvoke({"language":language, "testo_da_tradure": text}, {"callbacks": callbacks})
1
+ import random, os
2
+ from langchain_openai import ChatOpenAI
3
+ from langchain_core.prompts import PromptTemplate
4
+ from ws_bom_robot_app.llm.providers.llm_manager import LlmInterface
5
+ from ws_bom_robot_app.llm.utils.print import printString
6
+
7
+ def __print_output(data: str) -> str:
8
+ return printString(data) if os.environ.get("AGENT_HANDLER_FORMATTED") == str(True) else f"{data} "
9
+
10
+ def getRandomWaitingMessage(waiting_messages: str, traduction: bool = True) -> str:
11
+ if not waiting_messages: return ""
12
+ messages = [msg.strip() for msg in waiting_messages.split(";") if msg.strip()]
13
+ if not messages: return ""
14
+ chosen_message = random.choice(messages) + "\n"
15
+ if not traduction:
16
+ return __print_output(chosen_message)
17
+ return chosen_message
18
+
19
+ async def translate_text(llm: LlmInterface, language, text: str, callbacks: list) -> str:
20
+ if language == "it":
21
+ return __print_output(text)
22
+ sys_message = """Il tuo compito è di tradurre il testo_da_tradurre nella seguente lingua: \n\n lingua: {language}\n\n testo_da_tradurre: {testo_da_tradurre} \n\nTraduci il testo_da_tradurre nella lingua {language} senza aggiungere altro:"""
23
+ prompt = PromptTemplate.from_template(sys_message)
24
+ chain = prompt | llm.get_llm()
25
+ await chain.ainvoke({"language":language, "testo_da_tradurre": text}, {"callbacks": callbacks})
@@ -1,16 +1,17 @@
1
- import os
2
- from ws_bom_robot_app.llm.models.api import LlmRules
3
- from ws_bom_robot_app.llm.utils.print import HiddenPrints
4
- from ws_bom_robot_app.llm.utils.faiss_helper import FaissHelper
5
-
6
- async def get_rules(rules: LlmRules, api_key:str, input: str) -> str:
7
- with HiddenPrints():
8
- if any([input=="",rules is None,rules and rules.vector_db == "",rules and not os.path.exists(rules.vector_db)]):
9
- return ""
10
- rules_prompt = ""
11
- rules_doc = await FaissHelper.invoke(rules.vector_db,api_key,input,search_type="similarity_score_threshold", search_kwargs={"score_threshold": rules.threshold}) #type: ignore
12
- if len(rules_doc) > 0:
13
- rules_prompt = "\nFollow this rules: \n RULES: \n"
14
- for rule_doc in rules_doc:
15
- rules_prompt += "- " + rule_doc.page_content + "\n"
16
- return rules_prompt
1
+ import os
2
+ from langchain_core.embeddings import Embeddings
3
+ from ws_bom_robot_app.llm.models.api import LlmRules
4
+ from ws_bom_robot_app.llm.utils.print import HiddenPrints
5
+ from ws_bom_robot_app.llm.vector_store.db.manager import VectorDbManager
6
+
7
+ async def get_rules(embeddings: Embeddings, rules: LlmRules, input: str) -> str:
8
+ with HiddenPrints():
9
+ if any([input=="",rules is None,rules and rules.vector_db == "",rules and not os.path.exists(rules.vector_db)]):
10
+ return ""
11
+ rules_prompt = ""
12
+ rules_doc = await VectorDbManager.get_strategy(rules.vector_type).invoke(embeddings, rules.vector_db,input,search_type="similarity_score_threshold", search_kwargs={"score_threshold": rules.threshold}) #type: ignore
13
+ if len(rules_doc) > 0:
14
+ rules_prompt = "\nFollow this rules: \n RULES: \n"
15
+ for rule_doc in rules_doc:
16
+ rules_prompt += "- " + rule_doc.page_content + "\n"
17
+ return rules_prompt