ws-bom-robot-app 0.0.90__py3-none-any.whl → 0.0.92__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ws_bom_robot_app/llm/agent_description.py +123 -123
- ws_bom_robot_app/llm/agent_handler.py +174 -174
- ws_bom_robot_app/llm/agent_lcel.py +50 -50
- ws_bom_robot_app/llm/defaut_prompt.py +15 -15
- ws_bom_robot_app/llm/feedbacks/feedback_manager.py +66 -66
- ws_bom_robot_app/llm/main.py +158 -158
- ws_bom_robot_app/llm/models/feedback.py +30 -30
- ws_bom_robot_app/llm/nebuly_handler.py +185 -185
- ws_bom_robot_app/llm/tools/tool_builder.py +68 -68
- ws_bom_robot_app/llm/tools/tool_manager.py +332 -332
- ws_bom_robot_app/llm/tools/utils.py +41 -41
- ws_bom_robot_app/llm/utils/agent.py +34 -34
- ws_bom_robot_app/llm/utils/cms.py +114 -114
- ws_bom_robot_app/llm/utils/download.py +183 -183
- ws_bom_robot_app/llm/utils/print.py +29 -29
- ws_bom_robot_app/llm/vector_store/generator.py +137 -137
- ws_bom_robot_app/llm/vector_store/integration/shopify.py +143 -143
- ws_bom_robot_app/llm/vector_store/integration/thron.py +236 -236
- ws_bom_robot_app/llm/vector_store/loader/json_loader.py +25 -25
- {ws_bom_robot_app-0.0.90.dist-info → ws_bom_robot_app-0.0.92.dist-info}/METADATA +2 -2
- {ws_bom_robot_app-0.0.90.dist-info → ws_bom_robot_app-0.0.92.dist-info}/RECORD +23 -23
- {ws_bom_robot_app-0.0.90.dist-info → ws_bom_robot_app-0.0.92.dist-info}/WHEEL +0 -0
- {ws_bom_robot_app-0.0.90.dist-info → ws_bom_robot_app-0.0.92.dist-info}/top_level.txt +0 -0
|
@@ -1,332 +1,332 @@
|
|
|
1
|
-
from asyncio import Queue
|
|
2
|
-
import aiohttp
|
|
3
|
-
from typing import Optional, Type, Callable
|
|
4
|
-
from ws_bom_robot_app.config import config
|
|
5
|
-
from ws_bom_robot_app.llm.models.api import LlmApp,LlmAppTool
|
|
6
|
-
from ws_bom_robot_app.llm.providers.llm_manager import LlmInterface
|
|
7
|
-
from ws_bom_robot_app.llm.utils.cms import CmsApp, get_app_by_id
|
|
8
|
-
from ws_bom_robot_app.llm.vector_store.db.manager import VectorDbManager
|
|
9
|
-
from ws_bom_robot_app.llm.tools.utils import getRandomWaitingMessage, translate_text
|
|
10
|
-
from ws_bom_robot_app.llm.tools.models.main import NoopInput,DocumentRetrieverInput,ImageGeneratorInput,LlmChainInput,SearchOnlineInput,EmailSenderInput
|
|
11
|
-
from pydantic import BaseModel, ConfigDict
|
|
12
|
-
|
|
13
|
-
class ToolConfig(BaseModel):
|
|
14
|
-
function: Callable
|
|
15
|
-
model: Optional[Type[BaseModel]] = NoopInput
|
|
16
|
-
model_config = ConfigDict(
|
|
17
|
-
arbitrary_types_allowed=True
|
|
18
|
-
)
|
|
19
|
-
|
|
20
|
-
class ToolManager:
|
|
21
|
-
"""
|
|
22
|
-
ToolManager is responsible for managing various tools used in the application.
|
|
23
|
-
|
|
24
|
-
Attributes:
|
|
25
|
-
app_tool (LlmAppTool): The application tool configuration.
|
|
26
|
-
api_key (str): The API key for accessing external services.
|
|
27
|
-
callbacks (list): A list of callback functions to be executed.
|
|
28
|
-
|
|
29
|
-
Methods:
|
|
30
|
-
document_retriever(query: str): Asynchronously retrieves documents based on the query.
|
|
31
|
-
image_generator(query: str, language: str = "it"): Asynchronously generates an image based on the query.
|
|
32
|
-
get_coroutine(): Retrieves the coroutine function based on the tool configuration.
|
|
33
|
-
"""
|
|
34
|
-
|
|
35
|
-
def __init__(
|
|
36
|
-
self,
|
|
37
|
-
llm: LlmInterface,
|
|
38
|
-
app_tool: LlmAppTool,
|
|
39
|
-
callbacks: list,
|
|
40
|
-
queue: Optional[Queue] = None
|
|
41
|
-
):
|
|
42
|
-
self.llm = llm
|
|
43
|
-
self.app_tool = app_tool
|
|
44
|
-
self.callbacks = callbacks
|
|
45
|
-
self.queue = queue
|
|
46
|
-
|
|
47
|
-
async def __extract_documents(self, query: str, app_tool: LlmAppTool):
|
|
48
|
-
search_type = "similarity"
|
|
49
|
-
search_kwargs = {"k": 4}
|
|
50
|
-
if app_tool.search_settings:
|
|
51
|
-
search_settings = app_tool.search_settings # type: ignore
|
|
52
|
-
if search_settings.search_type == "similarityScoreThreshold":
|
|
53
|
-
search_type = "similarity_score_threshold"
|
|
54
|
-
search_kwargs = {
|
|
55
|
-
"score_threshold": search_settings.score_threshold_id if search_settings.score_threshold_id else 0.5,
|
|
56
|
-
"k": search_settings.search_k if search_settings.search_k else 100
|
|
57
|
-
}
|
|
58
|
-
elif search_settings.search_type == "mmr":
|
|
59
|
-
search_type = "mmr"
|
|
60
|
-
search_kwargs = {"k": search_settings.search_k if search_settings.search_k else 4}
|
|
61
|
-
elif search_settings.search_type == "default":
|
|
62
|
-
search_type = "similarity"
|
|
63
|
-
search_kwargs = {"k": search_settings.search_k if search_settings.search_k else 4}
|
|
64
|
-
else:
|
|
65
|
-
search_type = "mixed"
|
|
66
|
-
search_kwargs = {"k": search_settings.search_k if search_settings.search_k else 4}
|
|
67
|
-
if self.queue:
|
|
68
|
-
await self.queue.put(getRandomWaitingMessage(app_tool.waiting_message, traduction=False))
|
|
69
|
-
|
|
70
|
-
return await VectorDbManager.get_strategy(app_tool.vector_type).invoke(
|
|
71
|
-
self.llm.get_embeddings(),
|
|
72
|
-
app_tool.vector_db,
|
|
73
|
-
query,
|
|
74
|
-
search_type,
|
|
75
|
-
search_kwargs,
|
|
76
|
-
app_tool=app_tool,
|
|
77
|
-
llm=self.llm.get_llm(),
|
|
78
|
-
source=app_tool.function_id,
|
|
79
|
-
)
|
|
80
|
-
|
|
81
|
-
#region functions
|
|
82
|
-
async def document_retriever(self, query: str) -> list:
|
|
83
|
-
"""
|
|
84
|
-
Asynchronously retrieves documents based on the provided query using the specified search settings.
|
|
85
|
-
|
|
86
|
-
Args:
|
|
87
|
-
query (str): The search query string.
|
|
88
|
-
|
|
89
|
-
Returns:
|
|
90
|
-
list: A list of retrieved documents based on the search criteria.
|
|
91
|
-
|
|
92
|
-
Raises:
|
|
93
|
-
ValueError: If the configuration for the tool is invalid or the vector database is not found.
|
|
94
|
-
|
|
95
|
-
Notes:
|
|
96
|
-
- The function supports different search types such as "similarity", "similarity_score_threshold", "mmr", and "mixed".
|
|
97
|
-
- The search settings can be customized through the `app_tool.search_settings` attribute.
|
|
98
|
-
- If a queue is provided, a waiting message is put into the queue before invoking the search.
|
|
99
|
-
"""
|
|
100
|
-
if (
|
|
101
|
-
self.app_tool.type == "function" and self.app_tool.vector_db
|
|
102
|
-
#and self.settings.get("dataSource") == "knowledgebase"
|
|
103
|
-
):
|
|
104
|
-
return await self.__extract_documents(query, self.app_tool)
|
|
105
|
-
|
|
106
|
-
async def image_generator(self, query: str, language: str = "it"):
|
|
107
|
-
"""
|
|
108
|
-
Asynchronously generates an image based on the query.
|
|
109
|
-
set OPENAI_API_KEY in your environment variables
|
|
110
|
-
"""
|
|
111
|
-
from langchain_community.utilities.dalle_image_generator import DallEAPIWrapper
|
|
112
|
-
model = self.app_tool.model or "dall-e-3"
|
|
113
|
-
random_waiting_message = getRandomWaitingMessage(self.app_tool.waiting_message, traduction=False)
|
|
114
|
-
if not language:
|
|
115
|
-
language = "it"
|
|
116
|
-
await translate_text(
|
|
117
|
-
self.llm, language, random_waiting_message, self.callbacks
|
|
118
|
-
)
|
|
119
|
-
try:
|
|
120
|
-
#set os.environ.get("OPENAI_API_KEY")!
|
|
121
|
-
image_url = DallEAPIWrapper(model=model).run(query) # type: ignore
|
|
122
|
-
return image_url
|
|
123
|
-
except Exception as e:
|
|
124
|
-
return f"Error: {str(e)}"
|
|
125
|
-
|
|
126
|
-
async def llm_chain(self, input: str):
|
|
127
|
-
if self.app_tool.type == "llmChain":
|
|
128
|
-
from langchain_core.prompts import ChatPromptTemplate
|
|
129
|
-
from langchain_core.output_parsers import StrOutputParser, JsonOutputParser
|
|
130
|
-
from pydantic import create_model
|
|
131
|
-
system_message = self.app_tool.llm_chain_settings.prompt.format(
|
|
132
|
-
thread_id = self.app_tool.thread_id if self.app_tool.thread_id else "no-thread-id",
|
|
133
|
-
)
|
|
134
|
-
context = []
|
|
135
|
-
if self.app_tool.data_source == "knowledgebase":
|
|
136
|
-
context = await self.__extract_documents(input, self.app_tool)
|
|
137
|
-
if len(context) > 0:
|
|
138
|
-
for doc in context:
|
|
139
|
-
system_message += f"\n\nContext:\n{doc.metadata.get("source", "")}: {doc.page_content}"
|
|
140
|
-
# Determine output parser and format based on output type
|
|
141
|
-
output_type = self.app_tool.llm_chain_settings.outputStructure.get("outputType")
|
|
142
|
-
is_json_output = output_type == "json"
|
|
143
|
-
|
|
144
|
-
if is_json_output:
|
|
145
|
-
output_format = self.app_tool.llm_chain_settings.outputStructure.get("outputFormat", {})
|
|
146
|
-
json_schema = create_model('json_schema', **{k: (type(v), ...) for k, v in output_format.items()})
|
|
147
|
-
output_parser = JsonOutputParser(pydantic_object=json_schema)
|
|
148
|
-
system_message += "\n\nFormat instructions:\n{format_instructions}".strip()
|
|
149
|
-
else:
|
|
150
|
-
output_parser = StrOutputParser()
|
|
151
|
-
# Create prompt template with or without format instructions
|
|
152
|
-
base_messages = [
|
|
153
|
-
("system", system_message),
|
|
154
|
-
("user", "{input}")
|
|
155
|
-
]
|
|
156
|
-
if is_json_output:
|
|
157
|
-
prompt = ChatPromptTemplate.from_messages(base_messages).partial(
|
|
158
|
-
format_instructions=output_parser.get_format_instructions()
|
|
159
|
-
)
|
|
160
|
-
else:
|
|
161
|
-
prompt = ChatPromptTemplate.from_messages(base_messages)
|
|
162
|
-
model = self.app_tool.llm_chain_settings.model
|
|
163
|
-
self.llm.config.model = model
|
|
164
|
-
llm = self.llm.get_llm()
|
|
165
|
-
llm.tags = ["llm_chain"]
|
|
166
|
-
chain = prompt | llm | output_parser
|
|
167
|
-
result = await chain.ainvoke({"input": input})
|
|
168
|
-
return result
|
|
169
|
-
|
|
170
|
-
async def proxy_app_chat(self, query: str) -> str | None:
|
|
171
|
-
from ws_bom_robot_app.llm.models.api import LlmMessage
|
|
172
|
-
secrets = self.app_tool.secrets_to_dict()
|
|
173
|
-
app_id = secrets.get("appId")
|
|
174
|
-
if not app_id:
|
|
175
|
-
raise ValueError("Tool configuration is invalid. 'appId' is required.")
|
|
176
|
-
app: CmsApp = await get_app_by_id(app_id)
|
|
177
|
-
if not app:
|
|
178
|
-
raise ValueError(f"App with id {app_id} not found.")
|
|
179
|
-
# message
|
|
180
|
-
app.rq.messages.append(LlmMessage(role="user", content=query))
|
|
181
|
-
# tracing
|
|
182
|
-
if str(secrets.get("disable_tracing", False)).lower() in ['1','true','yes']:
|
|
183
|
-
app.rq.lang_chain_tracing = False
|
|
184
|
-
app.rq.lang_chain_project = ''
|
|
185
|
-
app.rq.secrets['nebulyApiKey'] = ''
|
|
186
|
-
# http: for debugging purposes
|
|
187
|
-
if str(secrets.get("use_http", False)).lower() in ['1','true','yes']:
|
|
188
|
-
import base64
|
|
189
|
-
url = f"http://localhost:{config.runtime_options().tcp_port}/api/llm/stream/raw"
|
|
190
|
-
auth = f"Basic {base64.b64encode((config.robot_user + ':' + config.robot_password).encode('utf-8')).decode('utf-8')}"
|
|
191
|
-
headers = {"Authorization": auth} if auth else {}
|
|
192
|
-
async with aiohttp.ClientSession() as session:
|
|
193
|
-
_data = app.rq.model_dump(mode='json',by_alias=True,exclude_unset=True,exclude_none=True, exclude_defaults=True)
|
|
194
|
-
async with session.post(url, json=_data, headers=headers) as response:
|
|
195
|
-
if response.status == 200:
|
|
196
|
-
return await response.text()
|
|
197
|
-
else:
|
|
198
|
-
raise ValueError(f"Error fetching chat response: {response.status}")
|
|
199
|
-
return None
|
|
200
|
-
else: # default
|
|
201
|
-
try:
|
|
202
|
-
from ws_bom_robot_app.llm.main import stream
|
|
203
|
-
chunks = []
|
|
204
|
-
async for chunk in stream(rq=app.rq, ctx=None, formatted=False):
|
|
205
|
-
chunks.append(chunk)
|
|
206
|
-
rs = ''.join(chunks) if chunks else None
|
|
207
|
-
return rs
|
|
208
|
-
except Exception as e:
|
|
209
|
-
print(f"[!] Error in proxy_app_chat: {e}")
|
|
210
|
-
return None
|
|
211
|
-
|
|
212
|
-
async def proxy_app_tool(self) -> None:
|
|
213
|
-
return None
|
|
214
|
-
|
|
215
|
-
async def _fetch_urls(self, urls: list[str]) -> list[dict]:
|
|
216
|
-
import aiohttp, asyncio
|
|
217
|
-
from ws_bom_robot_app.llm.tools.utils import fetch_page, extract_content_with_trafilatura
|
|
218
|
-
if not urls:
|
|
219
|
-
return []
|
|
220
|
-
async with aiohttp.ClientSession() as session:
|
|
221
|
-
tasks = [fetch_page(session, url) for url in urls]
|
|
222
|
-
responses = await asyncio.gather(*tasks, return_exceptions=True)
|
|
223
|
-
final_results = []
|
|
224
|
-
for item in responses:
|
|
225
|
-
if isinstance(item, Exception):
|
|
226
|
-
continue
|
|
227
|
-
url = item["url"]
|
|
228
|
-
html = item["html"]
|
|
229
|
-
if html:
|
|
230
|
-
content = await extract_content_with_trafilatura(html)
|
|
231
|
-
if content:
|
|
232
|
-
final_results.append({"url": url, "content": content})
|
|
233
|
-
else:
|
|
234
|
-
final_results.append({"url": url, "content": "No content found"})
|
|
235
|
-
else:
|
|
236
|
-
final_results.append({"url": url, "content": "Page not found"})
|
|
237
|
-
return final_results
|
|
238
|
-
|
|
239
|
-
async def search_online(self, query: str) -> list[dict]:
|
|
240
|
-
from langchain_community.utilities import DuckDuckGoSearchAPIWrapper
|
|
241
|
-
# Wrapper DuckDuckGo
|
|
242
|
-
search = DuckDuckGoSearchAPIWrapper(max_results=10)
|
|
243
|
-
try:
|
|
244
|
-
raw_results = search.results(query, max_results=10)
|
|
245
|
-
except Exception as e:
|
|
246
|
-
return f"[!] Errore ricerca: {e}"
|
|
247
|
-
urls = [r["link"] for r in raw_results]
|
|
248
|
-
return await self._fetch_urls(urls)
|
|
249
|
-
|
|
250
|
-
async def search_online_google(self, query: str) -> list[dict]:
|
|
251
|
-
from langchain_google_community import GoogleSearchAPIWrapper
|
|
252
|
-
secrets = self.app_tool.secrets_to_dict()
|
|
253
|
-
search_type = secrets.get("searchType")
|
|
254
|
-
if search_type:
|
|
255
|
-
search_kwargs = {"searchType" : search_type}
|
|
256
|
-
search = GoogleSearchAPIWrapper(
|
|
257
|
-
google_api_key=secrets.get("GOOGLE_API_KEY"),
|
|
258
|
-
google_cse_id=secrets.get("GOOGLE_CSE_ID"),
|
|
259
|
-
)
|
|
260
|
-
if search_type:
|
|
261
|
-
raw_results = search.results(query=query,
|
|
262
|
-
num_results=secrets.get("num_results", 5),
|
|
263
|
-
search_params=search_kwargs)
|
|
264
|
-
return raw_results
|
|
265
|
-
raw_results = search.results(
|
|
266
|
-
query=query,
|
|
267
|
-
num_results=secrets.get("num_results", 5)
|
|
268
|
-
)
|
|
269
|
-
urls = [r["link"] for r in raw_results]
|
|
270
|
-
return await self._fetch_urls(urls)
|
|
271
|
-
|
|
272
|
-
async def send_email(self, email_subject: str, body: str, to_email:str):
|
|
273
|
-
import smtplib
|
|
274
|
-
from email.mime.multipart import MIMEMultipart
|
|
275
|
-
from email.mime.text import MIMEText
|
|
276
|
-
secrets = self.app_tool.secrets
|
|
277
|
-
secrets = {item["secretId"]: item["secretValue"] for item in secrets}
|
|
278
|
-
import urllib.parse as urlparse
|
|
279
|
-
url_preview = secrets.get("url_preview", "")
|
|
280
|
-
if url_preview and url_preview != "":
|
|
281
|
-
message_tread = "Puoi visualizzare la chat su questo indirizzo: " + urlparse.urljoin(url_preview, f"?llmThreadId={self.app_tool.thread_id}")
|
|
282
|
-
body = body.replace("##url_preview##", message_tread)
|
|
283
|
-
# Email configuration
|
|
284
|
-
smtp_server = secrets.get("smtp_server")
|
|
285
|
-
smtp_port = secrets.get("smtp_port")
|
|
286
|
-
smtp_user = secrets.get("smtp_user")
|
|
287
|
-
smtp_password = secrets.get("smtp_password")
|
|
288
|
-
from_email = secrets.get("from_email")
|
|
289
|
-
if not to_email or to_email == "":
|
|
290
|
-
return "No recipient email provided"
|
|
291
|
-
if not email_subject or email_subject == "":
|
|
292
|
-
return "No email object provided"
|
|
293
|
-
# Create the email content
|
|
294
|
-
msg = MIMEMultipart()
|
|
295
|
-
msg['From'] = from_email
|
|
296
|
-
msg['To'] = to_email
|
|
297
|
-
msg['Subject'] = email_subject
|
|
298
|
-
|
|
299
|
-
# Create the email body
|
|
300
|
-
msg.attach(MIMEText(body, 'plain'))
|
|
301
|
-
|
|
302
|
-
# Send the email
|
|
303
|
-
try:
|
|
304
|
-
with smtplib.SMTP(smtp_server, smtp_port) as server:
|
|
305
|
-
# Use authentication and SSL only if password is provided
|
|
306
|
-
if smtp_password:
|
|
307
|
-
server.starttls()
|
|
308
|
-
server.login(smtp_user, smtp_password)
|
|
309
|
-
server.send_message(msg)
|
|
310
|
-
except Exception as e:
|
|
311
|
-
return f"Failed to send email: {str(e)}"
|
|
312
|
-
return "Email sent successfully"
|
|
313
|
-
|
|
314
|
-
#endregion
|
|
315
|
-
|
|
316
|
-
#class variables (static)
|
|
317
|
-
_list: dict[str,ToolConfig] = {
|
|
318
|
-
f"{document_retriever.__name__}": ToolConfig(function=document_retriever, model=DocumentRetrieverInput),
|
|
319
|
-
f"{image_generator.__name__}": ToolConfig(function=image_generator, model=ImageGeneratorInput),
|
|
320
|
-
f"{llm_chain.__name__}": ToolConfig(function=llm_chain, model=LlmChainInput),
|
|
321
|
-
f"{search_online.__name__}": ToolConfig(function=search_online, model=SearchOnlineInput),
|
|
322
|
-
f"{search_online_google.__name__}": ToolConfig(function=search_online_google, model=SearchOnlineInput),
|
|
323
|
-
f"{send_email.__name__}": ToolConfig(function=send_email, model=EmailSenderInput),
|
|
324
|
-
f"{proxy_app_chat.__name__}": ToolConfig(function=proxy_app_chat, model=DocumentRetrieverInput),
|
|
325
|
-
f"{proxy_app_tool.__name__}": ToolConfig(function=proxy_app_tool, model=NoopInput),
|
|
326
|
-
|
|
327
|
-
}
|
|
328
|
-
|
|
329
|
-
#instance methods
|
|
330
|
-
def get_coroutine(self):
|
|
331
|
-
tool_cfg = self._list.get(self.app_tool.function_name)
|
|
332
|
-
return getattr(self, tool_cfg.function.__name__) # type: ignore
|
|
1
|
+
from asyncio import Queue
|
|
2
|
+
import aiohttp
|
|
3
|
+
from typing import Optional, Type, Callable
|
|
4
|
+
from ws_bom_robot_app.config import config
|
|
5
|
+
from ws_bom_robot_app.llm.models.api import LlmApp,LlmAppTool
|
|
6
|
+
from ws_bom_robot_app.llm.providers.llm_manager import LlmInterface
|
|
7
|
+
from ws_bom_robot_app.llm.utils.cms import CmsApp, get_app_by_id
|
|
8
|
+
from ws_bom_robot_app.llm.vector_store.db.manager import VectorDbManager
|
|
9
|
+
from ws_bom_robot_app.llm.tools.utils import getRandomWaitingMessage, translate_text
|
|
10
|
+
from ws_bom_robot_app.llm.tools.models.main import NoopInput,DocumentRetrieverInput,ImageGeneratorInput,LlmChainInput,SearchOnlineInput,EmailSenderInput
|
|
11
|
+
from pydantic import BaseModel, ConfigDict
|
|
12
|
+
|
|
13
|
+
class ToolConfig(BaseModel):
|
|
14
|
+
function: Callable
|
|
15
|
+
model: Optional[Type[BaseModel]] = NoopInput
|
|
16
|
+
model_config = ConfigDict(
|
|
17
|
+
arbitrary_types_allowed=True
|
|
18
|
+
)
|
|
19
|
+
|
|
20
|
+
class ToolManager:
|
|
21
|
+
"""
|
|
22
|
+
ToolManager is responsible for managing various tools used in the application.
|
|
23
|
+
|
|
24
|
+
Attributes:
|
|
25
|
+
app_tool (LlmAppTool): The application tool configuration.
|
|
26
|
+
api_key (str): The API key for accessing external services.
|
|
27
|
+
callbacks (list): A list of callback functions to be executed.
|
|
28
|
+
|
|
29
|
+
Methods:
|
|
30
|
+
document_retriever(query: str): Asynchronously retrieves documents based on the query.
|
|
31
|
+
image_generator(query: str, language: str = "it"): Asynchronously generates an image based on the query.
|
|
32
|
+
get_coroutine(): Retrieves the coroutine function based on the tool configuration.
|
|
33
|
+
"""
|
|
34
|
+
|
|
35
|
+
def __init__(
|
|
36
|
+
self,
|
|
37
|
+
llm: LlmInterface,
|
|
38
|
+
app_tool: LlmAppTool,
|
|
39
|
+
callbacks: list,
|
|
40
|
+
queue: Optional[Queue] = None
|
|
41
|
+
):
|
|
42
|
+
self.llm = llm
|
|
43
|
+
self.app_tool = app_tool
|
|
44
|
+
self.callbacks = callbacks
|
|
45
|
+
self.queue = queue
|
|
46
|
+
|
|
47
|
+
async def __extract_documents(self, query: str, app_tool: LlmAppTool):
|
|
48
|
+
search_type = "similarity"
|
|
49
|
+
search_kwargs = {"k": 4}
|
|
50
|
+
if app_tool.search_settings:
|
|
51
|
+
search_settings = app_tool.search_settings # type: ignore
|
|
52
|
+
if search_settings.search_type == "similarityScoreThreshold":
|
|
53
|
+
search_type = "similarity_score_threshold"
|
|
54
|
+
search_kwargs = {
|
|
55
|
+
"score_threshold": search_settings.score_threshold_id if search_settings.score_threshold_id else 0.5,
|
|
56
|
+
"k": search_settings.search_k if search_settings.search_k else 100
|
|
57
|
+
}
|
|
58
|
+
elif search_settings.search_type == "mmr":
|
|
59
|
+
search_type = "mmr"
|
|
60
|
+
search_kwargs = {"k": search_settings.search_k if search_settings.search_k else 4}
|
|
61
|
+
elif search_settings.search_type == "default":
|
|
62
|
+
search_type = "similarity"
|
|
63
|
+
search_kwargs = {"k": search_settings.search_k if search_settings.search_k else 4}
|
|
64
|
+
else:
|
|
65
|
+
search_type = "mixed"
|
|
66
|
+
search_kwargs = {"k": search_settings.search_k if search_settings.search_k else 4}
|
|
67
|
+
if self.queue:
|
|
68
|
+
await self.queue.put(getRandomWaitingMessage(app_tool.waiting_message, traduction=False))
|
|
69
|
+
|
|
70
|
+
return await VectorDbManager.get_strategy(app_tool.vector_type).invoke(
|
|
71
|
+
self.llm.get_embeddings(),
|
|
72
|
+
app_tool.vector_db,
|
|
73
|
+
query,
|
|
74
|
+
search_type,
|
|
75
|
+
search_kwargs,
|
|
76
|
+
app_tool=app_tool,
|
|
77
|
+
llm=self.llm.get_llm(),
|
|
78
|
+
source=app_tool.function_id,
|
|
79
|
+
)
|
|
80
|
+
|
|
81
|
+
#region functions
|
|
82
|
+
async def document_retriever(self, query: str) -> list:
|
|
83
|
+
"""
|
|
84
|
+
Asynchronously retrieves documents based on the provided query using the specified search settings.
|
|
85
|
+
|
|
86
|
+
Args:
|
|
87
|
+
query (str): The search query string.
|
|
88
|
+
|
|
89
|
+
Returns:
|
|
90
|
+
list: A list of retrieved documents based on the search criteria.
|
|
91
|
+
|
|
92
|
+
Raises:
|
|
93
|
+
ValueError: If the configuration for the tool is invalid or the vector database is not found.
|
|
94
|
+
|
|
95
|
+
Notes:
|
|
96
|
+
- The function supports different search types such as "similarity", "similarity_score_threshold", "mmr", and "mixed".
|
|
97
|
+
- The search settings can be customized through the `app_tool.search_settings` attribute.
|
|
98
|
+
- If a queue is provided, a waiting message is put into the queue before invoking the search.
|
|
99
|
+
"""
|
|
100
|
+
if (
|
|
101
|
+
self.app_tool.type == "function" and self.app_tool.vector_db
|
|
102
|
+
#and self.settings.get("dataSource") == "knowledgebase"
|
|
103
|
+
):
|
|
104
|
+
return await self.__extract_documents(query, self.app_tool)
|
|
105
|
+
|
|
106
|
+
async def image_generator(self, query: str, language: str = "it"):
|
|
107
|
+
"""
|
|
108
|
+
Asynchronously generates an image based on the query.
|
|
109
|
+
set OPENAI_API_KEY in your environment variables
|
|
110
|
+
"""
|
|
111
|
+
from langchain_community.utilities.dalle_image_generator import DallEAPIWrapper
|
|
112
|
+
model = self.app_tool.model or "dall-e-3"
|
|
113
|
+
random_waiting_message = getRandomWaitingMessage(self.app_tool.waiting_message, traduction=False)
|
|
114
|
+
if not language:
|
|
115
|
+
language = "it"
|
|
116
|
+
await translate_text(
|
|
117
|
+
self.llm, language, random_waiting_message, self.callbacks
|
|
118
|
+
)
|
|
119
|
+
try:
|
|
120
|
+
#set os.environ.get("OPENAI_API_KEY")!
|
|
121
|
+
image_url = DallEAPIWrapper(model=model).run(query) # type: ignore
|
|
122
|
+
return image_url
|
|
123
|
+
except Exception as e:
|
|
124
|
+
return f"Error: {str(e)}"
|
|
125
|
+
|
|
126
|
+
async def llm_chain(self, input: str):
|
|
127
|
+
if self.app_tool.type == "llmChain":
|
|
128
|
+
from langchain_core.prompts import ChatPromptTemplate
|
|
129
|
+
from langchain_core.output_parsers import StrOutputParser, JsonOutputParser
|
|
130
|
+
from pydantic import create_model
|
|
131
|
+
system_message = self.app_tool.llm_chain_settings.prompt.format(
|
|
132
|
+
thread_id = self.app_tool.thread_id if self.app_tool.thread_id else "no-thread-id",
|
|
133
|
+
)
|
|
134
|
+
context = []
|
|
135
|
+
if self.app_tool.data_source == "knowledgebase":
|
|
136
|
+
context = await self.__extract_documents(input, self.app_tool)
|
|
137
|
+
if len(context) > 0:
|
|
138
|
+
for doc in context:
|
|
139
|
+
system_message += f"\n\nContext:\n{doc.metadata.get("source", "")}: {doc.page_content}"
|
|
140
|
+
# Determine output parser and format based on output type
|
|
141
|
+
output_type = self.app_tool.llm_chain_settings.outputStructure.get("outputType")
|
|
142
|
+
is_json_output = output_type == "json"
|
|
143
|
+
|
|
144
|
+
if is_json_output:
|
|
145
|
+
output_format = self.app_tool.llm_chain_settings.outputStructure.get("outputFormat", {})
|
|
146
|
+
json_schema = create_model('json_schema', **{k: (type(v), ...) for k, v in output_format.items()})
|
|
147
|
+
output_parser = JsonOutputParser(pydantic_object=json_schema)
|
|
148
|
+
system_message += "\n\nFormat instructions:\n{format_instructions}".strip()
|
|
149
|
+
else:
|
|
150
|
+
output_parser = StrOutputParser()
|
|
151
|
+
# Create prompt template with or without format instructions
|
|
152
|
+
base_messages = [
|
|
153
|
+
("system", system_message),
|
|
154
|
+
("user", "{input}")
|
|
155
|
+
]
|
|
156
|
+
if is_json_output:
|
|
157
|
+
prompt = ChatPromptTemplate.from_messages(base_messages).partial(
|
|
158
|
+
format_instructions=output_parser.get_format_instructions()
|
|
159
|
+
)
|
|
160
|
+
else:
|
|
161
|
+
prompt = ChatPromptTemplate.from_messages(base_messages)
|
|
162
|
+
model = self.app_tool.llm_chain_settings.model
|
|
163
|
+
self.llm.config.model = model
|
|
164
|
+
llm = self.llm.get_llm()
|
|
165
|
+
llm.tags = ["llm_chain"]
|
|
166
|
+
chain = prompt | llm | output_parser
|
|
167
|
+
result = await chain.ainvoke({"input": input})
|
|
168
|
+
return result
|
|
169
|
+
|
|
170
|
+
async def proxy_app_chat(self, query: str) -> str | None:
|
|
171
|
+
from ws_bom_robot_app.llm.models.api import LlmMessage
|
|
172
|
+
secrets = self.app_tool.secrets_to_dict()
|
|
173
|
+
app_id = secrets.get("appId")
|
|
174
|
+
if not app_id:
|
|
175
|
+
raise ValueError("Tool configuration is invalid. 'appId' is required.")
|
|
176
|
+
app: CmsApp = await get_app_by_id(app_id)
|
|
177
|
+
if not app:
|
|
178
|
+
raise ValueError(f"App with id {app_id} not found.")
|
|
179
|
+
# message
|
|
180
|
+
app.rq.messages.append(LlmMessage(role="user", content=query))
|
|
181
|
+
# tracing
|
|
182
|
+
if str(secrets.get("disable_tracing", False)).lower() in ['1','true','yes']:
|
|
183
|
+
app.rq.lang_chain_tracing = False
|
|
184
|
+
app.rq.lang_chain_project = ''
|
|
185
|
+
app.rq.secrets['nebulyApiKey'] = ''
|
|
186
|
+
# http: for debugging purposes
|
|
187
|
+
if str(secrets.get("use_http", False)).lower() in ['1','true','yes']:
|
|
188
|
+
import base64
|
|
189
|
+
url = f"http://localhost:{config.runtime_options().tcp_port}/api/llm/stream/raw"
|
|
190
|
+
auth = f"Basic {base64.b64encode((config.robot_user + ':' + config.robot_password).encode('utf-8')).decode('utf-8')}"
|
|
191
|
+
headers = {"Authorization": auth} if auth else {}
|
|
192
|
+
async with aiohttp.ClientSession() as session:
|
|
193
|
+
_data = app.rq.model_dump(mode='json',by_alias=True,exclude_unset=True,exclude_none=True, exclude_defaults=True)
|
|
194
|
+
async with session.post(url, json=_data, headers=headers) as response:
|
|
195
|
+
if response.status == 200:
|
|
196
|
+
return await response.text()
|
|
197
|
+
else:
|
|
198
|
+
raise ValueError(f"Error fetching chat response: {response.status}")
|
|
199
|
+
return None
|
|
200
|
+
else: # default
|
|
201
|
+
try:
|
|
202
|
+
from ws_bom_robot_app.llm.main import stream
|
|
203
|
+
chunks = []
|
|
204
|
+
async for chunk in stream(rq=app.rq, ctx=None, formatted=False):
|
|
205
|
+
chunks.append(chunk)
|
|
206
|
+
rs = ''.join(chunks) if chunks else None
|
|
207
|
+
return rs
|
|
208
|
+
except Exception as e:
|
|
209
|
+
print(f"[!] Error in proxy_app_chat: {e}")
|
|
210
|
+
return None
|
|
211
|
+
|
|
212
|
+
async def proxy_app_tool(self) -> None:
|
|
213
|
+
return None
|
|
214
|
+
|
|
215
|
+
async def _fetch_urls(self, urls: list[str]) -> list[dict]:
|
|
216
|
+
import aiohttp, asyncio
|
|
217
|
+
from ws_bom_robot_app.llm.tools.utils import fetch_page, extract_content_with_trafilatura
|
|
218
|
+
if not urls:
|
|
219
|
+
return []
|
|
220
|
+
async with aiohttp.ClientSession() as session:
|
|
221
|
+
tasks = [fetch_page(session, url) for url in urls]
|
|
222
|
+
responses = await asyncio.gather(*tasks, return_exceptions=True)
|
|
223
|
+
final_results = []
|
|
224
|
+
for item in responses:
|
|
225
|
+
if isinstance(item, Exception):
|
|
226
|
+
continue
|
|
227
|
+
url = item["url"]
|
|
228
|
+
html = item["html"]
|
|
229
|
+
if html:
|
|
230
|
+
content = await extract_content_with_trafilatura(html)
|
|
231
|
+
if content:
|
|
232
|
+
final_results.append({"url": url, "content": content})
|
|
233
|
+
else:
|
|
234
|
+
final_results.append({"url": url, "content": "No content found"})
|
|
235
|
+
else:
|
|
236
|
+
final_results.append({"url": url, "content": "Page not found"})
|
|
237
|
+
return final_results
|
|
238
|
+
|
|
239
|
+
async def search_online(self, query: str) -> list[dict]:
|
|
240
|
+
from langchain_community.utilities import DuckDuckGoSearchAPIWrapper
|
|
241
|
+
# Wrapper DuckDuckGo
|
|
242
|
+
search = DuckDuckGoSearchAPIWrapper(max_results=10)
|
|
243
|
+
try:
|
|
244
|
+
raw_results = search.results(query, max_results=10)
|
|
245
|
+
except Exception as e:
|
|
246
|
+
return f"[!] Errore ricerca: {e}"
|
|
247
|
+
urls = [r["link"] for r in raw_results]
|
|
248
|
+
return await self._fetch_urls(urls)
|
|
249
|
+
|
|
250
|
+
async def search_online_google(self, query: str) -> list[dict]:
|
|
251
|
+
from langchain_google_community import GoogleSearchAPIWrapper
|
|
252
|
+
secrets = self.app_tool.secrets_to_dict()
|
|
253
|
+
search_type = secrets.get("searchType")
|
|
254
|
+
if search_type:
|
|
255
|
+
search_kwargs = {"searchType" : search_type}
|
|
256
|
+
search = GoogleSearchAPIWrapper(
|
|
257
|
+
google_api_key=secrets.get("GOOGLE_API_KEY"),
|
|
258
|
+
google_cse_id=secrets.get("GOOGLE_CSE_ID"),
|
|
259
|
+
)
|
|
260
|
+
if search_type:
|
|
261
|
+
raw_results = search.results(query=query,
|
|
262
|
+
num_results=secrets.get("num_results", 5),
|
|
263
|
+
search_params=search_kwargs)
|
|
264
|
+
return raw_results
|
|
265
|
+
raw_results = search.results(
|
|
266
|
+
query=query,
|
|
267
|
+
num_results=secrets.get("num_results", 5)
|
|
268
|
+
)
|
|
269
|
+
urls = [r["link"] for r in raw_results]
|
|
270
|
+
return await self._fetch_urls(urls)
|
|
271
|
+
|
|
272
|
+
async def send_email(self, email_subject: str, body: str, to_email:str):
|
|
273
|
+
import smtplib
|
|
274
|
+
from email.mime.multipart import MIMEMultipart
|
|
275
|
+
from email.mime.text import MIMEText
|
|
276
|
+
secrets = self.app_tool.secrets
|
|
277
|
+
secrets = {item["secretId"]: item["secretValue"] for item in secrets}
|
|
278
|
+
import urllib.parse as urlparse
|
|
279
|
+
url_preview = secrets.get("url_preview", "")
|
|
280
|
+
if url_preview and url_preview != "":
|
|
281
|
+
message_tread = "Puoi visualizzare la chat su questo indirizzo: " + urlparse.urljoin(url_preview, f"?llmThreadId={self.app_tool.thread_id}")
|
|
282
|
+
body = body.replace("##url_preview##", message_tread)
|
|
283
|
+
# Email configuration
|
|
284
|
+
smtp_server = secrets.get("smtp_server")
|
|
285
|
+
smtp_port = secrets.get("smtp_port")
|
|
286
|
+
smtp_user = secrets.get("smtp_user")
|
|
287
|
+
smtp_password = secrets.get("smtp_password")
|
|
288
|
+
from_email = secrets.get("from_email")
|
|
289
|
+
if not to_email or to_email == "":
|
|
290
|
+
return "No recipient email provided"
|
|
291
|
+
if not email_subject or email_subject == "":
|
|
292
|
+
return "No email object provided"
|
|
293
|
+
# Create the email content
|
|
294
|
+
msg = MIMEMultipart()
|
|
295
|
+
msg['From'] = from_email
|
|
296
|
+
msg['To'] = to_email
|
|
297
|
+
msg['Subject'] = email_subject
|
|
298
|
+
|
|
299
|
+
# Create the email body
|
|
300
|
+
msg.attach(MIMEText(body, 'plain'))
|
|
301
|
+
|
|
302
|
+
# Send the email
|
|
303
|
+
try:
|
|
304
|
+
with smtplib.SMTP(smtp_server, smtp_port) as server:
|
|
305
|
+
# Use authentication and SSL only if password is provided
|
|
306
|
+
if smtp_password:
|
|
307
|
+
server.starttls()
|
|
308
|
+
server.login(smtp_user, smtp_password)
|
|
309
|
+
server.send_message(msg)
|
|
310
|
+
except Exception as e:
|
|
311
|
+
return f"Failed to send email: {str(e)}"
|
|
312
|
+
return "Email sent successfully"
|
|
313
|
+
|
|
314
|
+
#endregion
|
|
315
|
+
|
|
316
|
+
#class variables (static)
|
|
317
|
+
_list: dict[str,ToolConfig] = {
|
|
318
|
+
f"{document_retriever.__name__}": ToolConfig(function=document_retriever, model=DocumentRetrieverInput),
|
|
319
|
+
f"{image_generator.__name__}": ToolConfig(function=image_generator, model=ImageGeneratorInput),
|
|
320
|
+
f"{llm_chain.__name__}": ToolConfig(function=llm_chain, model=LlmChainInput),
|
|
321
|
+
f"{search_online.__name__}": ToolConfig(function=search_online, model=SearchOnlineInput),
|
|
322
|
+
f"{search_online_google.__name__}": ToolConfig(function=search_online_google, model=SearchOnlineInput),
|
|
323
|
+
f"{send_email.__name__}": ToolConfig(function=send_email, model=EmailSenderInput),
|
|
324
|
+
f"{proxy_app_chat.__name__}": ToolConfig(function=proxy_app_chat, model=DocumentRetrieverInput),
|
|
325
|
+
f"{proxy_app_tool.__name__}": ToolConfig(function=proxy_app_tool, model=NoopInput),
|
|
326
|
+
|
|
327
|
+
}
|
|
328
|
+
|
|
329
|
+
#instance methods
|
|
330
|
+
def get_coroutine(self):
|
|
331
|
+
tool_cfg = self._list.get(self.app_tool.function_name)
|
|
332
|
+
return getattr(self, tool_cfg.function.__name__) # type: ignore
|