ws-bom-robot-app 0.0.60__py3-none-any.whl → 0.0.62__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (29) hide show
  1. ws_bom_robot_app/config.py +2 -3
  2. ws_bom_robot_app/cron_manager.py +2 -2
  3. ws_bom_robot_app/llm/agent_description.py +123 -123
  4. ws_bom_robot_app/llm/agent_handler.py +177 -177
  5. ws_bom_robot_app/llm/agent_lcel.py +45 -46
  6. ws_bom_robot_app/llm/api.py +12 -0
  7. ws_bom_robot_app/llm/defaut_prompt.py +15 -15
  8. ws_bom_robot_app/llm/feedbacks/feedback_manager.py +66 -74
  9. ws_bom_robot_app/llm/main.py +134 -134
  10. ws_bom_robot_app/llm/models/api.py +6 -0
  11. ws_bom_robot_app/llm/models/feedback.py +30 -30
  12. ws_bom_robot_app/llm/nebuly_handler.py +182 -173
  13. ws_bom_robot_app/llm/settings.py +4 -4
  14. ws_bom_robot_app/llm/tools/models/main.py +4 -0
  15. ws_bom_robot_app/llm/tools/tool_builder.py +65 -23
  16. ws_bom_robot_app/llm/tools/tool_manager.py +312 -228
  17. ws_bom_robot_app/llm/tools/utils.py +41 -41
  18. ws_bom_robot_app/llm/utils/agent.py +34 -34
  19. ws_bom_robot_app/llm/utils/cms.py +77 -0
  20. ws_bom_robot_app/llm/utils/download.py +79 -79
  21. ws_bom_robot_app/llm/utils/print.py +29 -29
  22. ws_bom_robot_app/llm/vector_store/generator.py +137 -137
  23. ws_bom_robot_app/llm/vector_store/loader/json_loader.py +25 -25
  24. ws_bom_robot_app/task_manager.py +3 -1
  25. ws_bom_robot_app/util.py +59 -20
  26. {ws_bom_robot_app-0.0.60.dist-info → ws_bom_robot_app-0.0.62.dist-info}/METADATA +17 -17
  27. {ws_bom_robot_app-0.0.60.dist-info → ws_bom_robot_app-0.0.62.dist-info}/RECORD +29 -28
  28. {ws_bom_robot_app-0.0.60.dist-info → ws_bom_robot_app-0.0.62.dist-info}/WHEEL +0 -0
  29. {ws_bom_robot_app-0.0.60.dist-info → ws_bom_robot_app-0.0.62.dist-info}/top_level.txt +0 -0
@@ -1,228 +1,312 @@
1
- from asyncio import Queue
2
- from typing import Optional, Type, Callable
3
- from ws_bom_robot_app.llm.models.api import LlmAppTool
4
- from ws_bom_robot_app.llm.providers.llm_manager import LlmInterface
5
- from ws_bom_robot_app.llm.vector_store.db.manager import VectorDbManager
6
- from ws_bom_robot_app.llm.tools.utils import getRandomWaitingMessage, translate_text
7
- from ws_bom_robot_app.llm.tools.models.main import NoopInput,DocumentRetrieverInput,ImageGeneratorInput,LlmChainInput,SearchOnlineInput
8
- from pydantic import BaseModel, ConfigDict
9
-
10
- class ToolConfig(BaseModel):
11
- function: Callable
12
- model: Optional[Type[BaseModel]] = NoopInput
13
- model_config = ConfigDict(
14
- arbitrary_types_allowed=True
15
- )
16
-
17
- class ToolManager:
18
- """
19
- ToolManager is responsible for managing various tools used in the application.
20
-
21
- Attributes:
22
- app_tool (LlmAppTool): The application tool configuration.
23
- api_key (str): The API key for accessing external services.
24
- callbacks (list): A list of callback functions to be executed.
25
-
26
- Methods:
27
- document_retriever(query: str): Asynchronously retrieves documents based on the query.
28
- image_generator(query: str, language: str = "it"): Asynchronously generates an image based on the query.
29
- get_coroutine(): Retrieves the coroutine function based on the tool configuration.
30
- """
31
-
32
- def __init__(
33
- self,
34
- llm: LlmInterface,
35
- app_tool: LlmAppTool,
36
- callbacks: list,
37
- queue: Optional[Queue] = None
38
- ):
39
- self.llm = llm
40
- self.app_tool = app_tool
41
- self.callbacks = callbacks
42
- self.queue = queue
43
-
44
- async def __extract_documents(self, query: str, app_tool: LlmAppTool):
45
- search_type = "similarity"
46
- search_kwargs = {"k": 4}
47
- if app_tool.search_settings:
48
- search_settings = app_tool.search_settings # type: ignore
49
- if search_settings.search_type == "similarityScoreThreshold":
50
- search_type = "similarity_score_threshold"
51
- search_kwargs = {
52
- "score_threshold": search_settings.score_threshold_id if search_settings.score_threshold_id else 0.5,
53
- "k": search_settings.search_k if search_settings.search_k else 100
54
- }
55
- elif search_settings.search_type == "mmr":
56
- search_type = "mmr"
57
- search_kwargs = {"k": search_settings.search_k if search_settings.search_k else 4}
58
- elif search_settings.search_type == "default":
59
- search_type = "similarity"
60
- search_kwargs = {"k": search_settings.search_k if search_settings.search_k else 4}
61
- else:
62
- search_type = "mixed"
63
- search_kwargs = {"k": search_settings.search_k if search_settings.search_k else 4}
64
- if self.queue:
65
- await self.queue.put(getRandomWaitingMessage(app_tool.waiting_message, traduction=False))
66
-
67
- return await VectorDbManager.get_strategy(app_tool.vector_type).invoke(
68
- self.llm.get_embeddings(),
69
- app_tool.vector_db,
70
- query,
71
- search_type,
72
- search_kwargs,
73
- app_tool=app_tool,
74
- llm=self.llm.get_llm(),
75
- source=app_tool.function_id,
76
- )
77
-
78
- #region functions
79
- async def document_retriever(self, query: str) -> list:
80
- """
81
- Asynchronously retrieves documents based on the provided query using the specified search settings.
82
-
83
- Args:
84
- query (str): The search query string.
85
-
86
- Returns:
87
- list: A list of retrieved documents based on the search criteria.
88
-
89
- Raises:
90
- ValueError: If the configuration for the tool is invalid or the vector database is not found.
91
-
92
- Notes:
93
- - The function supports different search types such as "similarity", "similarity_score_threshold", "mmr", and "mixed".
94
- - The search settings can be customized through the `app_tool.search_settings` attribute.
95
- - If a queue is provided, a waiting message is put into the queue before invoking the search.
96
- """
97
- if (
98
- self.app_tool.type == "function" and self.app_tool.vector_db
99
- #and self.settings.get("dataSource") == "knowledgebase"
100
- ):
101
- return await self.__extract_documents(query, self.app_tool)
102
-
103
- async def image_generator(self, query: str, language: str = "it"):
104
- """
105
- Asynchronously generates an image based on the query.
106
- set OPENAI_API_KEY in your environment variables
107
- """
108
- from langchain_community.utilities.dalle_image_generator import DallEAPIWrapper
109
- model = self.app_tool.model or "dall-e-3"
110
- random_waiting_message = getRandomWaitingMessage(self.app_tool.waiting_message, traduction=False)
111
- if not language:
112
- language = "it"
113
- await translate_text(
114
- self.llm, language, random_waiting_message, self.callbacks
115
- )
116
- try:
117
- #set os.environ.get("OPENAI_API_KEY")!
118
- image_url = DallEAPIWrapper(model=model).run(query) # type: ignore
119
- return image_url
120
- except Exception as e:
121
- return f"Error: {str(e)}"
122
-
123
- async def llm_chain(self, input: str):
124
- if self.app_tool.type == "llmChain":
125
- from langchain_core.prompts import ChatPromptTemplate
126
- from langchain_core.output_parsers import StrOutputParser
127
- system_message = self.app_tool.llm_chain_settings.prompt
128
- context = []
129
- if self.app_tool.data_source == "knowledgebase":
130
- context = await self.__extract_documents(input, self.app_tool)
131
- if len(context) > 0:
132
- for doc in context:
133
- system_message += f"\n\nContext:\n{doc.metadata.get("source", "")}: {doc.page_content}"
134
- prompt = ChatPromptTemplate.from_messages(
135
- [ ("system", system_message),
136
- ("user", "{input}")],
137
- )
138
- model = self.app_tool.llm_chain_settings.model
139
- self.llm.config.model = model
140
- llm = self.llm.get_llm()
141
- chain = prompt | llm | StrOutputParser()
142
- result = await chain.ainvoke({"input": input})
143
- return result
144
-
145
-
146
- async def search_online(self, query: str):
147
- from ws_bom_robot_app.llm.tools.utils import fetch_page, extract_content_with_trafilatura
148
- from langchain_community.utilities import DuckDuckGoSearchAPIWrapper
149
- import aiohttp, asyncio, ast
150
- # Wrapper DuckDuckGo
151
- search = DuckDuckGoSearchAPIWrapper(max_results=10)
152
- try:
153
- raw_results = search.results(query, max_results=10)
154
- except Exception as e:
155
- print(f"[!] Errore ricerca: {e}")
156
- urls = [r["link"] for r in raw_results]
157
- async with aiohttp.ClientSession() as session:
158
- tasks = [fetch_page(session, url) for url in urls]
159
- responses = await asyncio.gather(*tasks)
160
- final_results = []
161
- for item in responses:
162
- url = item["url"]
163
- html = item["html"]
164
- if html:
165
- content = await extract_content_with_trafilatura(html)
166
- if content:
167
- final_results.append({"url": url, "content": content})
168
- else:
169
- final_results.append({"url": url, "content": "No content found"})
170
- else:
171
- final_results.append({"url": url, "content": "Page not found"})
172
- return final_results
173
-
174
- async def search_online_google(self, query: str):
175
- from langchain_google_community import GoogleSearchAPIWrapper
176
- from ws_bom_robot_app.llm.tools.utils import fetch_page, extract_content_with_trafilatura
177
- import aiohttp, asyncio
178
- secrets = {}
179
- for d in self.app_tool.secrets:
180
- secrets[d.get("secretId")] = d.get("secretValue")
181
- search_type = secrets.get("searchType")
182
- if search_type:
183
- search_kwargs = {"searchType" : search_type}
184
- search = GoogleSearchAPIWrapper(
185
- google_api_key=secrets.get("GOOGLE_API_KEY"),
186
- google_cse_id=secrets.get("GOOGLE_CSE_ID"),
187
- )
188
- if search_type:
189
- raw_results = search.results(query=query,
190
- num_results=secrets.get("num_results", 5),
191
- search_params=search_kwargs)
192
- return raw_results
193
- raw_results = search.results(
194
- query=query,
195
- num_results=secrets.get("num_results", 5)
196
- )
197
- urls = [r["link"] for r in raw_results]
198
- async with aiohttp.ClientSession() as session:
199
- tasks = [fetch_page(session, url) for url in urls]
200
- responses = await asyncio.gather(*tasks)
201
- final_results = []
202
- for item in responses:
203
- url = item["url"]
204
- html = item["html"]
205
- if html:
206
- content = await extract_content_with_trafilatura(html)
207
- if content:
208
- final_results.append({"url": url, "content": content, "type": "web"})
209
- else:
210
- final_results.append({"url": url, "content": "No content found", "type": "web"})
211
- else:
212
- final_results.append({"url": url, "content": "Page not found", "type": "web"})
213
- return final_results
214
- #endregion
215
-
216
- #class variables (static)
217
- _list: dict[str,ToolConfig] = {
218
- "document_retriever": ToolConfig(function=document_retriever, model=DocumentRetrieverInput),
219
- "image_generator": ToolConfig(function=image_generator, model=ImageGeneratorInput),
220
- "llm_chain": ToolConfig(function=llm_chain, model=LlmChainInput),
221
- "search_online": ToolConfig(function=search_online, model=SearchOnlineInput),
222
- "search_online_google": ToolConfig(function=search_online_google, model=SearchOnlineInput),
223
- }
224
-
225
- #instance methods
226
- def get_coroutine(self):
227
- tool_cfg = self._list.get(self.app_tool.function_name)
228
- return getattr(self, tool_cfg.function.__name__) # type: ignore
1
+ from asyncio import Queue
2
+ import aiohttp
3
+ from typing import Optional, Type, Callable
4
+ from ws_bom_robot_app.config import config
5
+ from ws_bom_robot_app.llm.models.api import LlmApp,LlmAppTool
6
+ from ws_bom_robot_app.llm.providers.llm_manager import LlmInterface
7
+ from ws_bom_robot_app.llm.utils.cms import CmsApp, get_app_by_id
8
+ from ws_bom_robot_app.llm.vector_store.db.manager import VectorDbManager
9
+ from ws_bom_robot_app.llm.tools.utils import getRandomWaitingMessage, translate_text
10
+ from ws_bom_robot_app.llm.tools.models.main import NoopInput,DocumentRetrieverInput,ImageGeneratorInput,LlmChainInput,SearchOnlineInput,EmailSenderInput
11
+ from pydantic import BaseModel, ConfigDict
12
+
13
+ class ToolConfig(BaseModel):
14
+ function: Callable
15
+ model: Optional[Type[BaseModel]] = NoopInput
16
+ model_config = ConfigDict(
17
+ arbitrary_types_allowed=True
18
+ )
19
+
20
+ class ToolManager:
21
+ """
22
+ ToolManager is responsible for managing various tools used in the application.
23
+
24
+ Attributes:
25
+ app_tool (LlmAppTool): The application tool configuration.
26
+ api_key (str): The API key for accessing external services.
27
+ callbacks (list): A list of callback functions to be executed.
28
+
29
+ Methods:
30
+ document_retriever(query: str): Asynchronously retrieves documents based on the query.
31
+ image_generator(query: str, language: str = "it"): Asynchronously generates an image based on the query.
32
+ get_coroutine(): Retrieves the coroutine function based on the tool configuration.
33
+ """
34
+
35
+ def __init__(
36
+ self,
37
+ llm: LlmInterface,
38
+ app_tool: LlmAppTool,
39
+ callbacks: list,
40
+ queue: Optional[Queue] = None
41
+ ):
42
+ self.llm = llm
43
+ self.app_tool = app_tool
44
+ self.callbacks = callbacks
45
+ self.queue = queue
46
+
47
+ async def __extract_documents(self, query: str, app_tool: LlmAppTool):
48
+ search_type = "similarity"
49
+ search_kwargs = {"k": 4}
50
+ if app_tool.search_settings:
51
+ search_settings = app_tool.search_settings # type: ignore
52
+ if search_settings.search_type == "similarityScoreThreshold":
53
+ search_type = "similarity_score_threshold"
54
+ search_kwargs = {
55
+ "score_threshold": search_settings.score_threshold_id if search_settings.score_threshold_id else 0.5,
56
+ "k": search_settings.search_k if search_settings.search_k else 100
57
+ }
58
+ elif search_settings.search_type == "mmr":
59
+ search_type = "mmr"
60
+ search_kwargs = {"k": search_settings.search_k if search_settings.search_k else 4}
61
+ elif search_settings.search_type == "default":
62
+ search_type = "similarity"
63
+ search_kwargs = {"k": search_settings.search_k if search_settings.search_k else 4}
64
+ else:
65
+ search_type = "mixed"
66
+ search_kwargs = {"k": search_settings.search_k if search_settings.search_k else 4}
67
+ if self.queue:
68
+ await self.queue.put(getRandomWaitingMessage(app_tool.waiting_message, traduction=False))
69
+
70
+ return await VectorDbManager.get_strategy(app_tool.vector_type).invoke(
71
+ self.llm.get_embeddings(),
72
+ app_tool.vector_db,
73
+ query,
74
+ search_type,
75
+ search_kwargs,
76
+ app_tool=app_tool,
77
+ llm=self.llm.get_llm(),
78
+ source=app_tool.function_id,
79
+ )
80
+
81
+ #region functions
82
+ async def document_retriever(self, query: str) -> list:
83
+ """
84
+ Asynchronously retrieves documents based on the provided query using the specified search settings.
85
+
86
+ Args:
87
+ query (str): The search query string.
88
+
89
+ Returns:
90
+ list: A list of retrieved documents based on the search criteria.
91
+
92
+ Raises:
93
+ ValueError: If the configuration for the tool is invalid or the vector database is not found.
94
+
95
+ Notes:
96
+ - The function supports different search types such as "similarity", "similarity_score_threshold", "mmr", and "mixed".
97
+ - The search settings can be customized through the `app_tool.search_settings` attribute.
98
+ - If a queue is provided, a waiting message is put into the queue before invoking the search.
99
+ """
100
+ if (
101
+ self.app_tool.type == "function" and self.app_tool.vector_db
102
+ #and self.settings.get("dataSource") == "knowledgebase"
103
+ ):
104
+ return await self.__extract_documents(query, self.app_tool)
105
+
106
+ async def image_generator(self, query: str, language: str = "it"):
107
+ """
108
+ Asynchronously generates an image based on the query.
109
+ set OPENAI_API_KEY in your environment variables
110
+ """
111
+ from langchain_community.utilities.dalle_image_generator import DallEAPIWrapper
112
+ model = self.app_tool.model or "dall-e-3"
113
+ random_waiting_message = getRandomWaitingMessage(self.app_tool.waiting_message, traduction=False)
114
+ if not language:
115
+ language = "it"
116
+ await translate_text(
117
+ self.llm, language, random_waiting_message, self.callbacks
118
+ )
119
+ try:
120
+ #set os.environ.get("OPENAI_API_KEY")!
121
+ image_url = DallEAPIWrapper(model=model).run(query) # type: ignore
122
+ return image_url
123
+ except Exception as e:
124
+ return f"Error: {str(e)}"
125
+
126
+ async def llm_chain(self, input: str):
127
+ if self.app_tool.type == "llmChain":
128
+ from langchain_core.prompts import ChatPromptTemplate
129
+ from langchain_core.output_parsers import StrOutputParser, JsonOutputParser
130
+ from pydantic import create_model
131
+ system_message = self.app_tool.llm_chain_settings.prompt
132
+ context = []
133
+ if self.app_tool.data_source == "knowledgebase":
134
+ context = await self.__extract_documents(input, self.app_tool)
135
+ if len(context) > 0:
136
+ for doc in context:
137
+ system_message += f"\n\nContext:\n{doc.metadata.get("source", "")}: {doc.page_content}"
138
+ # Determine output parser and format based on output type
139
+ output_type = self.app_tool.llm_chain_settings.outputStructure.get("outputType")
140
+ is_json_output = output_type == "json"
141
+
142
+ if is_json_output:
143
+ output_format = self.app_tool.llm_chain_settings.outputStructure.get("outputFormat", {})
144
+ json_schema = create_model('json_schema', **{k: (type(v), ...) for k, v in output_format.items()})
145
+ output_parser = JsonOutputParser(pydantic_object=json_schema)
146
+ system_message += "\n\nFormat instructions:\n{format_instructions}".strip()
147
+ else:
148
+ output_parser = StrOutputParser()
149
+ # Create prompt template with or without format instructions
150
+ base_messages = [
151
+ ("system", system_message),
152
+ ("user", "{input}")
153
+ ]
154
+ if is_json_output:
155
+ prompt = ChatPromptTemplate.from_messages(base_messages).partial(
156
+ format_instructions=output_parser.get_format_instructions()
157
+ )
158
+ else:
159
+ prompt = ChatPromptTemplate.from_messages(base_messages)
160
+ model = self.app_tool.llm_chain_settings.model
161
+ self.llm.config.model = model
162
+ llm = self.llm.get_llm()
163
+ llm.tags = ["llm_chain"]
164
+ chain = prompt | llm | output_parser
165
+ result = await chain.ainvoke({"input": input})
166
+ return result
167
+
168
+ async def proxy_app_chat(self, query: str) -> str | None:
169
+ secrets = self.app_tool.secrets_to_dict()
170
+ app_id = secrets.get("appId")
171
+ if not app_id:
172
+ raise ValueError("Tool configuration is invalid. 'appId' is required.")
173
+ app: CmsApp = await get_app_by_id(app_id)
174
+ if not app:
175
+ raise ValueError(f"App with id {app_id} not found.")
176
+ url = f"{config.robot_cms_host}/api/llm/message?locale=en&raw=true"
177
+ auth = config.robot_cms_auth
178
+ headers = {"Authorization": auth} if auth else {}
179
+ async with aiohttp.ClientSession() as session:
180
+ data = {
181
+ "appKey": app.credentials.app_key,
182
+ "apiKey": app.credentials.api_key,
183
+ "messages": [
184
+ {
185
+ "role": "user",
186
+ "content": query
187
+ }
188
+ ]
189
+ }
190
+ async with session.post(url, json=data, headers=headers) as response:
191
+ if response.status == 200:
192
+ return await response.text()
193
+ else:
194
+ raise ValueError(f"Error fetching chat response: {response.status}")
195
+ return None
196
+
197
+ async def proxy_app_tool(self) -> None:
198
+ return None
199
+
200
+ async def _fetch_urls(self, urls: list[str]) -> list[dict]:
201
+ import aiohttp, asyncio
202
+ from ws_bom_robot_app.llm.tools.utils import fetch_page, extract_content_with_trafilatura
203
+ if not urls:
204
+ return []
205
+ async with aiohttp.ClientSession() as session:
206
+ tasks = [fetch_page(session, url) for url in urls]
207
+ responses = await asyncio.gather(*tasks, return_exceptions=True)
208
+ final_results = []
209
+ for item in responses:
210
+ if isinstance(item, Exception):
211
+ continue
212
+ url = item["url"]
213
+ html = item["html"]
214
+ if html:
215
+ content = await extract_content_with_trafilatura(html)
216
+ if content:
217
+ final_results.append({"url": url, "content": content})
218
+ else:
219
+ final_results.append({"url": url, "content": "No content found"})
220
+ else:
221
+ final_results.append({"url": url, "content": "Page not found"})
222
+ return final_results
223
+
224
+ async def search_online(self, query: str) -> list[dict]:
225
+ from langchain_community.utilities import DuckDuckGoSearchAPIWrapper
226
+ # Wrapper DuckDuckGo
227
+ search = DuckDuckGoSearchAPIWrapper(max_results=10)
228
+ try:
229
+ raw_results = search.results(query, max_results=10)
230
+ except Exception as e:
231
+ print(f"[!] Errore ricerca: {e}")
232
+ urls = [r["link"] for r in raw_results]
233
+ return await self._fetch_urls(urls)
234
+
235
+ async def search_online_google(self, query: str) -> list[dict]:
236
+ from langchain_google_community import GoogleSearchAPIWrapper
237
+ secrets = self.app_tool.secrets_to_dict()
238
+ search_type = secrets.get("searchType")
239
+ if search_type:
240
+ search_kwargs = {"searchType" : search_type}
241
+ search = GoogleSearchAPIWrapper(
242
+ google_api_key=secrets.get("GOOGLE_API_KEY"),
243
+ google_cse_id=secrets.get("GOOGLE_CSE_ID"),
244
+ )
245
+ if search_type:
246
+ raw_results = search.results(query=query,
247
+ num_results=secrets.get("num_results", 5),
248
+ search_params=search_kwargs)
249
+ return raw_results
250
+ raw_results = search.results(
251
+ query=query,
252
+ num_results=secrets.get("num_results", 5)
253
+ )
254
+ urls = [r["link"] for r in raw_results]
255
+ return await self._fetch_urls(urls)
256
+
257
+ async def send_email(self, email_subject: str, body: str, to_email:str):
258
+ import smtplib
259
+ from email.mime.multipart import MIMEMultipart
260
+ from email.mime.text import MIMEText
261
+ secrets = self.app_tool.secrets
262
+ secrets = {item["secretId"]: item["secretValue"] for item in secrets}
263
+ # Email configuration
264
+ smtp_server = secrets.get("smtp_server")
265
+ smtp_port = secrets.get("smtp_port")
266
+ smtp_user = secrets.get("smtp_user")
267
+ smtp_password = secrets.get("smtp_password")
268
+ from_email = secrets.get("from_email")
269
+ if not to_email or to_email == "":
270
+ return "No recipient email provided"
271
+ if not email_subject or email_subject == "":
272
+ return "No email object provided"
273
+ # Create the email content
274
+ msg = MIMEMultipart()
275
+ msg['From'] = from_email
276
+ msg['To'] = to_email
277
+ msg['Subject'] = email_subject
278
+
279
+ # Create the email body
280
+ msg.attach(MIMEText(body, 'plain'))
281
+
282
+ # Send the email
283
+ try:
284
+ with smtplib.SMTP(smtp_server, smtp_port) as server:
285
+ # Use authentication and SSL only if password is provided
286
+ if smtp_password:
287
+ server.starttls()
288
+ server.login(smtp_user, smtp_password)
289
+ server.send_message(msg)
290
+ except Exception as e:
291
+ return f"Failed to send email: {str(e)}"
292
+ return "Email sent successfully"
293
+
294
+ #endregion
295
+
296
+ #class variables (static)
297
+ _list: dict[str,ToolConfig] = {
298
+ f"{document_retriever.__name__}": ToolConfig(function=document_retriever, model=DocumentRetrieverInput),
299
+ f"{image_generator.__name__}": ToolConfig(function=image_generator, model=ImageGeneratorInput),
300
+ f"{llm_chain.__name__}": ToolConfig(function=llm_chain, model=LlmChainInput),
301
+ f"{search_online.__name__}": ToolConfig(function=search_online, model=SearchOnlineInput),
302
+ f"{search_online_google.__name__}": ToolConfig(function=search_online_google, model=SearchOnlineInput),
303
+ f"{send_email.__name__}": ToolConfig(function=send_email, model=EmailSenderInput),
304
+ f"{proxy_app_chat.__name__}": ToolConfig(function=proxy_app_chat, model=DocumentRetrieverInput),
305
+ f"{proxy_app_tool.__name__}": ToolConfig(function=proxy_app_tool, model=NoopInput),
306
+
307
+ }
308
+
309
+ #instance methods
310
+ def get_coroutine(self):
311
+ tool_cfg = self._list.get(self.app_tool.function_name)
312
+ return getattr(self, tool_cfg.function.__name__) # type: ignore
@@ -1,41 +1,41 @@
1
- import random, os
2
- from langchain_openai import ChatOpenAI
3
- from langchain_core.prompts import PromptTemplate
4
- from ws_bom_robot_app.llm.providers.llm_manager import LlmInterface
5
- from ws_bom_robot_app.llm.utils.print import print_string
6
-
7
- def __print_output(data: str) -> str:
8
- return print_string(data) if os.environ.get("AGENT_HANDLER_FORMATTED") == str(True) else f"{data} "
9
-
10
- def getRandomWaitingMessage(waiting_messages: str, traduction: bool = True) -> str:
11
- if not waiting_messages: return ""
12
- messages = [msg.strip() for msg in waiting_messages.split(";") if msg.strip()]
13
- if not messages: return ""
14
- chosen_message = random.choice(messages) + "\n"
15
- if not traduction:
16
- return __print_output(chosen_message)
17
- return chosen_message
18
-
19
- async def translate_text(llm: LlmInterface, language, text: str, callbacks: list) -> str:
20
- if language == "it":
21
- return __print_output(text)
22
- sys_message = """Il tuo compito è di tradurre il testo_da_tradurre nella seguente lingua: \n\n lingua: {language}\n\n testo_da_tradurre: {testo_da_tradurre} \n\nTraduci il testo_da_tradurre nella lingua {language} senza aggiungere altro:"""
23
- prompt = PromptTemplate.from_template(sys_message)
24
- chain = prompt | llm.get_llm()
25
- await chain.ainvoke({"language":language, "testo_da_tradurre": text}, {"callbacks": callbacks})
26
-
27
- async def fetch_page(session, url):
28
- try:
29
- async with session.get(url, timeout=10, ssl=False) as response:
30
- if response.status == 200:
31
- text = await response.text()
32
- return {"url": url, "html": text}
33
- else:
34
- return {"url": url, "html": None}
35
- except Exception as e:
36
- return {"url": url, "html": None}
37
-
38
- async def extract_content_with_trafilatura(html):
39
- """Estrae solo il testo principale usando trafilatura"""
40
- import trafilatura
41
- return trafilatura.extract(html)
1
+ import random, os
2
+ from langchain_openai import ChatOpenAI
3
+ from langchain_core.prompts import PromptTemplate
4
+ from ws_bom_robot_app.llm.providers.llm_manager import LlmInterface
5
+ from ws_bom_robot_app.llm.utils.print import print_string
6
+
7
+ def __print_output(data: str) -> str:
8
+ return print_string(data) if os.environ.get("AGENT_HANDLER_FORMATTED") == str(True) else f"{data} "
9
+
10
+ def getRandomWaitingMessage(waiting_messages: str, traduction: bool = True) -> str:
11
+ if not waiting_messages: return ""
12
+ messages = [msg.strip() for msg in waiting_messages.split(";") if msg.strip()]
13
+ if not messages: return ""
14
+ chosen_message = random.choice(messages) + "\n"
15
+ if not traduction:
16
+ return __print_output(chosen_message)
17
+ return chosen_message
18
+
19
+ async def translate_text(llm: LlmInterface, language, text: str, callbacks: list) -> str:
20
+ if language == "it":
21
+ return __print_output(text)
22
+ sys_message = """Il tuo compito è di tradurre il testo_da_tradurre nella seguente lingua: \n\n lingua: {language}\n\n testo_da_tradurre: {testo_da_tradurre} \n\nTraduci il testo_da_tradurre nella lingua {language} senza aggiungere altro:"""
23
+ prompt = PromptTemplate.from_template(sys_message)
24
+ chain = prompt | llm.get_llm()
25
+ await chain.ainvoke({"language":language, "testo_da_tradurre": text}, {"callbacks": callbacks})
26
+
27
+ async def fetch_page(session, url):
28
+ try:
29
+ async with session.get(url, timeout=10, ssl=False) as response:
30
+ if response.status == 200:
31
+ text = await response.text()
32
+ return {"url": url, "html": text}
33
+ else:
34
+ return {"url": url, "html": None}
35
+ except Exception as e:
36
+ return {"url": url, "html": None}
37
+
38
+ async def extract_content_with_trafilatura(html):
39
+ """Estrae solo il testo principale usando trafilatura"""
40
+ import trafilatura
41
+ return trafilatura.extract(html)