ws-bom-robot-app 0.0.59__py3-none-any.whl → 0.0.61__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -60,7 +60,7 @@ class AgentHandler(AsyncCallbackHandler):
60
60
  tags: Optional[List[str]] = None,
61
61
  **kwargs: Any,
62
62
  ) -> None:
63
- if token:
63
+ if token and "llm_chain" not in tags:
64
64
  token = _parse_token(self.llm,token)
65
65
  if token:
66
66
  self.stream_buffer += token # append new data to pending buffer
@@ -29,6 +29,7 @@ class LlmAppToolChainSettings(BaseModel):
29
29
  provider: Optional[str] = "openai"
30
30
  model: Optional[str] = None
31
31
  temperature: Optional[float] = 0
32
+ outputStructure: Optional[dict] = None
32
33
 
33
34
  class LlmAppToolDbSettings(BaseModel):
34
35
  connection_string: Optional[str] = Field(None, validation_alias=AliasChoices("connectionString","connection_string"))
@@ -29,11 +29,9 @@ class NebulyHandler(AsyncCallbackHandler):
29
29
  input_tokens=0,
30
30
  output_tokens=0,
31
31
  )
32
- self.retrieval_trace = NebulyRetrievalTrace(
33
- source=None,
34
- input="",
35
- outputs=[],
36
- )
32
+ self.__response_with_rag: str = "false" # Flag to check if the AI used some retrieval tools
33
+ self.__retrieval_query: str = ""
34
+ self.retrieval_traces: list[NebulyRetrievalTrace] = []
37
35
 
38
36
  async def on_chat_model_start(self, serialized, messages, *, run_id, parent_run_id = None, tags = None, metadata = None, **kwargs):
39
37
  # Initialize the interaction with the input message
@@ -57,13 +55,22 @@ class NebulyHandler(AsyncCallbackHandler):
57
55
  self.llm_trace.output_tokens = usage_metadata.get("output_tokens", 0)
58
56
 
59
57
  async def on_retriever_start(self, serialized, query, *, run_id, parent_run_id = None, tags = None, metadata = None, **kwargs):
60
- if metadata.get("source"):
61
- self.retrieval_trace.input = query
62
- self.retrieval_trace.source = metadata.get("source", "retriever")
58
+ self.__retrieval_query = query
59
+
63
60
 
64
61
  async def on_retriever_end(self, documents, *, run_id, parent_run_id = None, tags = None, **kwargs):
65
62
  # pass the document source because of the large amount of data in the document content
66
- self.retrieval_trace.outputs.extend([ doc.metadata.get("source", "content unavailable") for doc in documents])
63
+ for doc in documents:
64
+ self.retrieval_traces.append(
65
+ NebulyRetrievalTrace(
66
+ source=doc.metadata.get("source", "content unavailable"),
67
+ input=self.__retrieval_query,
68
+ outputs=[doc.metadata.get("source", "content unavailable")]
69
+ )
70
+ )
71
+
72
+ async def on_tool_start(self, serialized, input_str, *, run_id, parent_run_id = None, tags = None, metadata = None, inputs = None, **kwargs):
73
+ self.__response_with_rag = "true" # Set the flag to true when the retriever starts
67
74
 
68
75
  async def on_agent_finish(self, finish, *, run_id, parent_run_id = None, tags = None, **kwargs):
69
76
  # Interaction
@@ -126,14 +133,16 @@ class NebulyHandler(AsyncCallbackHandler):
126
133
 
127
134
  def __prepare_payload(self):
128
135
  self.interaction.time_end = datetime.now().astimezone().isoformat()
136
+ self.interaction.tags["response_with_rag"] = self.__response_with_rag
129
137
  payload = {
130
138
  "interaction": self.interaction.__dict__,
131
139
  "traces": [
132
140
  self.llm_trace.__dict__,
133
141
  ]
134
142
  }
135
- if self.retrieval_trace.source:
136
- payload["traces"].append(self.retrieval_trace.__dict__)
143
+ for trace in self.retrieval_traces:
144
+ if trace.source:
145
+ payload["traces"].append(trace.__dict__)
137
146
  return payload
138
147
 
139
148
  def __parse_multimodal_input(self, input: list[dict]) -> str:
@@ -11,3 +11,7 @@ class LlmChainInput(BaseModel):
11
11
  input: str = Field(description="Input to the LLM chain")
12
12
  class SearchOnlineInput(BaseModel):
13
13
  query: str = Field(description="The search query string")
14
+ class EmailSenderInput(BaseModel):
15
+ email_subject: str = Field(description="The subject of the email to send")
16
+ body: str = Field(description="The body of the email to send")
17
+ to_email: str = Field(description="The recipient email address")
@@ -4,8 +4,11 @@ from ws_bom_robot_app.llm.models.api import LlmAppTool
4
4
  from ws_bom_robot_app.llm.providers.llm_manager import LlmInterface
5
5
  from ws_bom_robot_app.llm.vector_store.db.manager import VectorDbManager
6
6
  from ws_bom_robot_app.llm.tools.utils import getRandomWaitingMessage, translate_text
7
- from ws_bom_robot_app.llm.tools.models.main import NoopInput,DocumentRetrieverInput,ImageGeneratorInput,LlmChainInput,SearchOnlineInput
7
+ from ws_bom_robot_app.llm.tools.models.main import NoopInput,DocumentRetrieverInput,ImageGeneratorInput,LlmChainInput,SearchOnlineInput,EmailSenderInput
8
8
  from pydantic import BaseModel, ConfigDict
9
+ import smtplib
10
+ from email.mime.multipart import MIMEMultipart
11
+ from email.mime.text import MIMEText
9
12
 
10
13
  class ToolConfig(BaseModel):
11
14
  function: Callable
@@ -123,7 +126,8 @@ class ToolManager:
123
126
  async def llm_chain(self, input: str):
124
127
  if self.app_tool.type == "llmChain":
125
128
  from langchain_core.prompts import ChatPromptTemplate
126
- from langchain_core.output_parsers import StrOutputParser
129
+ from langchain_core.output_parsers import StrOutputParser, JsonOutputParser
130
+ from pydantic import create_model
127
131
  system_message = self.app_tool.llm_chain_settings.prompt
128
132
  context = []
129
133
  if self.app_tool.data_source == "knowledgebase":
@@ -131,14 +135,33 @@ class ToolManager:
131
135
  if len(context) > 0:
132
136
  for doc in context:
133
137
  system_message += f"\n\nContext:\n{doc.metadata.get("source", "")}: {doc.page_content}"
134
- prompt = ChatPromptTemplate.from_messages(
135
- [ ("system", system_message),
136
- ("user", "{input}")],
137
- )
138
+ # Determine output parser and format based on output type
139
+ output_type = self.app_tool.llm_chain_settings.outputStructure.get("outputType")
140
+ is_json_output = output_type == "json"
141
+
142
+ if is_json_output:
143
+ output_format = self.app_tool.llm_chain_settings.outputStructure.get("outputFormat", {})
144
+ json_schema = create_model('json_schema', **{k: (type(v), ...) for k, v in output_format.items()})
145
+ output_parser = JsonOutputParser(pydantic_object=json_schema)
146
+ system_message += "\n\nFormat instructions:\n{format_instructions}".strip()
147
+ else:
148
+ output_parser = StrOutputParser()
149
+ # Create prompt template with or without format instructions
150
+ base_messages = [
151
+ ("system", system_message),
152
+ ("user", "{input}")
153
+ ]
154
+ if is_json_output:
155
+ prompt = ChatPromptTemplate.from_messages(base_messages).partial(
156
+ format_instructions=output_parser.get_format_instructions()
157
+ )
158
+ else:
159
+ prompt = ChatPromptTemplate.from_messages(base_messages)
138
160
  model = self.app_tool.llm_chain_settings.model
139
161
  self.llm.config.model = model
140
162
  llm = self.llm.get_llm()
141
- chain = prompt | llm | StrOutputParser()
163
+ llm.tags = ["llm_chain"]
164
+ chain = prompt | llm | output_parser
142
165
  result = await chain.ainvoke({"input": input})
143
166
  return result
144
167
 
@@ -171,6 +194,81 @@ class ToolManager:
171
194
  final_results.append({"url": url, "content": "Page not found"})
172
195
  return final_results
173
196
 
197
+ async def search_online_google(self, query: str):
198
+ from langchain_google_community import GoogleSearchAPIWrapper
199
+ from ws_bom_robot_app.llm.tools.utils import fetch_page, extract_content_with_trafilatura
200
+ import aiohttp, asyncio
201
+ secrets = {}
202
+ for d in self.app_tool.secrets:
203
+ secrets[d.get("secretId")] = d.get("secretValue")
204
+ search_type = secrets.get("searchType")
205
+ if search_type:
206
+ search_kwargs = {"searchType" : search_type}
207
+ search = GoogleSearchAPIWrapper(
208
+ google_api_key=secrets.get("GOOGLE_API_KEY"),
209
+ google_cse_id=secrets.get("GOOGLE_CSE_ID"),
210
+ )
211
+ if search_type:
212
+ raw_results = search.results(query=query,
213
+ num_results=secrets.get("num_results", 5),
214
+ search_params=search_kwargs)
215
+ return raw_results
216
+ raw_results = search.results(
217
+ query=query,
218
+ num_results=secrets.get("num_results", 5)
219
+ )
220
+ urls = [r["link"] for r in raw_results]
221
+ async with aiohttp.ClientSession() as session:
222
+ tasks = [fetch_page(session, url) for url in urls]
223
+ responses = await asyncio.gather(*tasks)
224
+ final_results = []
225
+ for item in responses:
226
+ url = item["url"]
227
+ html = item["html"]
228
+ if html:
229
+ content = await extract_content_with_trafilatura(html)
230
+ if content:
231
+ final_results.append({"url": url, "content": content, "type": "web"})
232
+ else:
233
+ final_results.append({"url": url, "content": "No content found", "type": "web"})
234
+ else:
235
+ final_results.append({"url": url, "content": "Page not found", "type": "web"})
236
+ return final_results
237
+
238
+
239
+ async def send_email(self, email_subject: str, body: str, to_email:str):
240
+ secrets = self.app_tool.secrets
241
+ secrets = {item["secretId"]: item["secretValue"] for item in secrets}
242
+ # Email configuration
243
+ smtp_server = secrets.get("smtp_server")
244
+ smtp_port = secrets.get("smtp_port")
245
+ smtp_user = secrets.get("smtp_user")
246
+ smtp_password = secrets.get("smtp_password")
247
+ from_email = secrets.get("from_email")
248
+ if not to_email or to_email == "":
249
+ return "No recipient email provided"
250
+ if not email_subject or email_subject == "":
251
+ return "No email object provided"
252
+ # Create the email content
253
+ msg = MIMEMultipart()
254
+ msg['From'] = from_email
255
+ msg['To'] = to_email
256
+ msg['Subject'] = email_subject
257
+
258
+ # Create the email body
259
+ msg.attach(MIMEText(body, 'plain'))
260
+
261
+ # Send the email
262
+ try:
263
+ with smtplib.SMTP(smtp_server, smtp_port) as server:
264
+ # Use authentication and SSL only if password is provided
265
+ if smtp_password:
266
+ server.starttls()
267
+ server.login(smtp_user, smtp_password)
268
+ server.send_message(msg)
269
+ except Exception as e:
270
+ return f"Failed to send email: {str(e)}"
271
+ return "Email sent successfully"
174
272
 
175
273
  #endregion
176
274
 
@@ -180,6 +278,8 @@ class ToolManager:
180
278
  "image_generator": ToolConfig(function=image_generator, model=ImageGeneratorInput),
181
279
  "llm_chain": ToolConfig(function=llm_chain, model=LlmChainInput),
182
280
  "search_online": ToolConfig(function=search_online, model=SearchOnlineInput),
281
+ "search_online_google": ToolConfig(function=search_online_google, model=SearchOnlineInput),
282
+ "send_email": ToolConfig(function=send_email, model=EmailSenderInput),
183
283
  }
184
284
 
185
285
  #instance methods
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ws_bom_robot_app
3
- Version: 0.0.59
3
+ Version: 0.0.61
4
4
  Summary: A FastAPI application serving ws bom/robot/llm platform ai.
5
5
  Home-page: https://github.com/websolutespa/bom
6
6
  Author: Websolute Spa
@@ -50,6 +50,7 @@ Requires-Dist: unstructured-ingest[slack]
50
50
  Requires-Dist: html5lib==1.1
51
51
  Requires-Dist: markdownify==0.14.1
52
52
  Requires-Dist: duckduckgo-search==8.0.4
53
+ Requires-Dist: langchain_google_community==2.0.7
53
54
  Dynamic: author
54
55
  Dynamic: author-email
55
56
  Dynamic: classifier
@@ -8,17 +8,17 @@ ws_bom_robot_app/util.py,sha256=b49ItlZgh2Wzw-6K8k5Wa44eVgjQ0JmWQwJnEaQBVGw,3502
8
8
  ws_bom_robot_app/llm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
9
9
  ws_bom_robot_app/llm/agent_context.py,sha256=uatHJ8wcRly6h0S762BgfzDMpmcwCHwNzwo37aWjeE0,1305
10
10
  ws_bom_robot_app/llm/agent_description.py,sha256=5IP0qFSJvaE3zjGS7f0W1DuiegP0RHXRMBoDC5pCofA,4779
11
- ws_bom_robot_app/llm/agent_handler.py,sha256=4HYP8wbdtJhRi3bk6PvJ3cRDZyLYWt3Ow5tnHpkEg1o,7738
11
+ ws_bom_robot_app/llm/agent_handler.py,sha256=PzdDpBnfUdqxKuMpHcVYgVf0hxIFOJwdxT9YIyFGdYY,7766
12
12
  ws_bom_robot_app/llm/agent_lcel.py,sha256=8d10b43BXqE4rfXE5uh8YGT67o1bw0q0l7QXFT6wPKA,2320
13
13
  ws_bom_robot_app/llm/api.py,sha256=1nzQ7g2n_DlX6Ixo5ecS10UvyyKJ42qZQ6aD8-EI7BE,4709
14
14
  ws_bom_robot_app/llm/defaut_prompt.py,sha256=D9dn8yPveu0bVwGM1wQWLYftmBs5O76o0R_caLLll8w,1121
15
15
  ws_bom_robot_app/llm/main.py,sha256=UK33yI_0zDCdM5zKe9h7c_qzM41PIANvRFCxjGlAzlI,5140
16
- ws_bom_robot_app/llm/nebuly_handler.py,sha256=1HaBeBNzEhyTsgz9v-15Tt7oAc6UBGtqB_DBujpFIcw,7534
16
+ ws_bom_robot_app/llm/nebuly_handler.py,sha256=w895twhPgtRUH_jZz1pbX4W2leq8A3O_9gUwp_ridoY,8033
17
17
  ws_bom_robot_app/llm/settings.py,sha256=DCLaGZwxlw0xE46LpfUgin_FHD8_XJIthCgI6r2UDlM,121
18
18
  ws_bom_robot_app/llm/feedbacks/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
19
19
  ws_bom_robot_app/llm/feedbacks/feedback_manager.py,sha256=bnP0FEJTyrzT0YzqCVE73EC07Eu_4FLxVu3Cy-5Si0o,3211
20
20
  ws_bom_robot_app/llm/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
21
- ws_bom_robot_app/llm/models/api.py,sha256=266f5jc1ikfX9HnK5Ms4NxXowNRfxFEH8GPlQcDvN5Y,10709
21
+ ws_bom_robot_app/llm/models/api.py,sha256=DPhL_207RRN4qwPlKxbLrARnsYVAkdMYYUV7MkbN7Rk,10751
22
22
  ws_bom_robot_app/llm/models/base.py,sha256=1TqxuTK3rjJEALn7lvgoen_1ba3R2brAgGx6EDTtDZo,152
23
23
  ws_bom_robot_app/llm/models/feedback.py,sha256=pYNQGxNOBgeAAfdJLI95l7ePLBI5tVdsgnyjp5oMOQU,1722
24
24
  ws_bom_robot_app/llm/models/kb.py,sha256=oVSw6_dmNxikAHrPqcfxDXz9M0ezLIYuxpgvzfs_Now,9514
@@ -26,10 +26,10 @@ ws_bom_robot_app/llm/providers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5
26
26
  ws_bom_robot_app/llm/providers/llm_manager.py,sha256=zIkxgTLYQCcup2Ixf4eWap4mNinuJH2YmkjLjZGDyJM,8371
27
27
  ws_bom_robot_app/llm/tools/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
28
28
  ws_bom_robot_app/llm/tools/tool_builder.py,sha256=p_Q32_-OSydcxzj69PgPIuiny816zYv5dVsCHSY0ELc,1188
29
- ws_bom_robot_app/llm/tools/tool_manager.py,sha256=adWvaSIur5Ez2gGsuTFqNprZZlQP6ZZj5WknzJjtQ0c,8355
29
+ ws_bom_robot_app/llm/tools/tool_manager.py,sha256=I5HPQov-9ELSiNDhxMsm9-zOqZ77J_E5c6IDOXX_CFk,12935
30
30
  ws_bom_robot_app/llm/tools/utils.py,sha256=Ba7ScFZPVJ3ke8KLO8ik1wyR2f_zC99Bikqx0OGnKoI,1924
31
31
  ws_bom_robot_app/llm/tools/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
32
- ws_bom_robot_app/llm/tools/models/main.py,sha256=pBQNWPd1OZgZ2xkOnUOawNbujQ5oJXLdyuAex1afLWc,579
32
+ ws_bom_robot_app/llm/tools/models/main.py,sha256=1hICqHs-KS2heenkH7b2eH0N2GrPaaNGBrn64cl_A40,827
33
33
  ws_bom_robot_app/llm/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
34
34
  ws_bom_robot_app/llm/utils/agent.py,sha256=ISF9faaD5tBi-8sbgQpgfqWT1JIVcgv_lRhyaNAkI2Q,1445
35
35
  ws_bom_robot_app/llm/utils/chunker.py,sha256=N7570xBYlObneg-fsvDhPAJ-Pv8C8OaYZOBK6q7LmMI,607
@@ -65,7 +65,7 @@ ws_bom_robot_app/llm/vector_store/loader/__init__.py,sha256=47DEQpj8HBSa-_TImW-5
65
65
  ws_bom_robot_app/llm/vector_store/loader/base.py,sha256=L_ugekNuAq0N9O-24wtlHSNHkqSeD-KsJrfGt_FX9Oc,5340
66
66
  ws_bom_robot_app/llm/vector_store/loader/docling.py,sha256=yP0zgXLeFAlByaYuj-6cYariuknckrFds0dxdRcnVz8,3456
67
67
  ws_bom_robot_app/llm/vector_store/loader/json_loader.py,sha256=LDppW0ZATo4_1hh-KlsAM3TLawBvwBxva_a7k5Oz1sc,858
68
- ws_bom_robot_app-0.0.59.dist-info/METADATA,sha256=hTSpZWSUDrn0IY9jsYvUCGocnzK9kbrEiRINEwiDVe0,8406
69
- ws_bom_robot_app-0.0.59.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
70
- ws_bom_robot_app-0.0.59.dist-info/top_level.txt,sha256=Yl0akyHVbynsBX_N7wx3H3ZTkcMLjYyLJs5zBMDAKcM,17
71
- ws_bom_robot_app-0.0.59.dist-info/RECORD,,
68
+ ws_bom_robot_app-0.0.61.dist-info/METADATA,sha256=9Ph9kKWlaMASpyHCpha4k40H2clLDe2KbmUSq2D1uLw,8456
69
+ ws_bom_robot_app-0.0.61.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
70
+ ws_bom_robot_app-0.0.61.dist-info/top_level.txt,sha256=Yl0akyHVbynsBX_N7wx3H3ZTkcMLjYyLJs5zBMDAKcM,17
71
+ ws_bom_robot_app-0.0.61.dist-info/RECORD,,