ws-bom-robot-app 0.0.98__py3-none-any.whl → 0.0.99__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (27) hide show
  1. ws_bom_robot_app/llm/agent_description.py +123 -123
  2. ws_bom_robot_app/llm/agent_handler.py +176 -176
  3. ws_bom_robot_app/llm/agent_lcel.py +50 -50
  4. ws_bom_robot_app/llm/defaut_prompt.py +15 -15
  5. ws_bom_robot_app/llm/feedbacks/feedback_manager.py +66 -66
  6. ws_bom_robot_app/llm/main.py +158 -158
  7. ws_bom_robot_app/llm/models/feedback.py +30 -30
  8. ws_bom_robot_app/llm/nebuly_handler.py +185 -185
  9. ws_bom_robot_app/llm/tools/tool_builder.py +68 -68
  10. ws_bom_robot_app/llm/tools/tool_manager.py +332 -332
  11. ws_bom_robot_app/llm/tools/utils.py +41 -41
  12. ws_bom_robot_app/llm/utils/agent.py +34 -34
  13. ws_bom_robot_app/llm/utils/cms.py +114 -114
  14. ws_bom_robot_app/llm/utils/download.py +183 -183
  15. ws_bom_robot_app/llm/utils/print.py +29 -29
  16. ws_bom_robot_app/llm/vector_store/db/qdrant.py +28 -14
  17. ws_bom_robot_app/llm/vector_store/generator.py +137 -137
  18. ws_bom_robot_app/llm/vector_store/integration/base.py +1 -0
  19. ws_bom_robot_app/llm/vector_store/integration/shopify.py +143 -143
  20. ws_bom_robot_app/llm/vector_store/integration/thron.py +236 -236
  21. ws_bom_robot_app/llm/vector_store/loader/base.py +3 -2
  22. ws_bom_robot_app/llm/vector_store/loader/docling.py +32 -12
  23. ws_bom_robot_app/llm/vector_store/loader/json_loader.py +25 -25
  24. {ws_bom_robot_app-0.0.98.dist-info → ws_bom_robot_app-0.0.99.dist-info}/METADATA +364 -333
  25. {ws_bom_robot_app-0.0.98.dist-info → ws_bom_robot_app-0.0.99.dist-info}/RECORD +27 -27
  26. {ws_bom_robot_app-0.0.98.dist-info → ws_bom_robot_app-0.0.99.dist-info}/WHEEL +0 -0
  27. {ws_bom_robot_app-0.0.98.dist-info → ws_bom_robot_app-0.0.99.dist-info}/top_level.txt +0 -0
@@ -1,185 +1,185 @@
1
- from typing import Union
2
- from ws_bom_robot_app.llm.models.api import NebulyInteraction, NebulyLLMTrace, NebulyRetrievalTrace
3
- from datetime import datetime, timezone
4
- from langchain_core.callbacks.base import AsyncCallbackHandler
5
- from langchain_core.messages import BaseMessage, HumanMessage, AIMessage
6
- from langchain_core.outputs import ChatGenerationChunk, GenerationChunk
7
-
8
- class NebulyHandler(AsyncCallbackHandler):
9
- def __init__(self, llm_model: str | None, threadId: str = None, chat_history: list[BaseMessage] = [], url: str = None, api_key: str = None, user_id: str | None = None):
10
- super().__init__()
11
- self.__started: bool = False
12
- self.__url: str = url
13
- self.__api_key: str = api_key
14
- self.chat_history = chat_history
15
- self.interaction = NebulyInteraction(
16
- conversation_id=threadId,
17
- input="",
18
- output="",
19
- time_start="",
20
- time_end="",
21
- end_user= user_id if user_id and user_id != "" else threadId,
22
- tags={"model": llm_model},
23
- )
24
- self.llm_trace = NebulyLLMTrace(
25
- model=llm_model,
26
- messages=[],
27
- output="",
28
- input_tokens=0,
29
- output_tokens=0,
30
- )
31
- self.__response_with_rag: str = "false" # Flag to check if the AI used some retrieval tools
32
- self.__retrieval_query: str = ""
33
- self.retrieval_traces: list[NebulyRetrievalTrace] = []
34
-
35
- async def on_chat_model_start(self, serialized, messages, *, run_id, parent_run_id = None, tags = None, metadata = None, **kwargs):
36
- # Initialize the interaction with the input message
37
- if not self.__started:
38
- message_list = self.__flat_messages(messages)
39
- if isinstance(message_list[-1], HumanMessage):
40
- if isinstance(message_list[-1].content, list):
41
- self.interaction.input = self.__parse_multimodal_input(message_list[-1].content)
42
- else:
43
- self.interaction.input = message_list[-1].content
44
- self.interaction.tags["generated"] = self.__is_message_generated(message_list)
45
- else:
46
- raise ValueError("Last message is not a HumanMessage")
47
- self.interaction.time_start = datetime.now().astimezone().isoformat()
48
- self.__started = True
49
-
50
- async def on_llm_end(self, response, *, run_id, parent_run_id = None, tags = None, **kwargs):
51
- generation: Union[ChatGenerationChunk, GenerationChunk] = response.generations[0]
52
- usage_metadata: dict = generation[0].message.usage_metadata
53
- self.llm_trace.input_tokens = usage_metadata.get("input_tokens", 0)
54
- self.llm_trace.output_tokens = usage_metadata.get("output_tokens", 0)
55
-
56
- async def on_retriever_start(self, serialized, query, *, run_id, parent_run_id = None, tags = None, metadata = None, **kwargs):
57
- self.__retrieval_query = query
58
-
59
-
60
- async def on_retriever_end(self, documents, *, run_id, parent_run_id = None, tags = None, **kwargs):
61
- # pass the document source because of the large amount of data in the document content
62
- for doc in documents:
63
- self.retrieval_traces.append(
64
- NebulyRetrievalTrace(
65
- source=doc.metadata.get("source", "content unavailable"),
66
- input=self.__retrieval_query,
67
- outputs=[doc.metadata.get("source", "content unavailable")]
68
- )
69
- )
70
-
71
- async def on_tool_start(self, serialized, input_str, *, run_id, parent_run_id = None, tags = None, metadata = None, inputs = None, **kwargs):
72
- self.__response_with_rag = "true" # Set the flag to true when the retriever starts
73
-
74
- async def on_agent_finish(self, finish, *, run_id, parent_run_id = None, tags = None, **kwargs):
75
- # Interaction
76
- self.interaction.output = finish.return_values["output"]
77
- # Trace
78
- self.llm_trace.output = finish.return_values["output"]
79
- message_history = self._convert_to_json_format(self.chat_history)
80
- self.llm_trace.messages = self.__parse_multimodal_history(message_history)
81
- await self.__send_interaction()
82
-
83
- def __flat_messages(self, messages: list[list[BaseMessage]], to_json: bool = False) -> list[BaseMessage]:
84
- """
85
- Maps the messages to the format expected by the LLM.
86
- Flattens the nested list structure of messages.
87
- """
88
- # Flatten the nested list structure
89
- flattened_messages = []
90
- for message_list in messages:
91
- flattened_messages.extend(message_list)
92
- # Store JSON format in LLM trace
93
- if to_json:
94
- return self._convert_to_json_format(flattened_messages)
95
- return flattened_messages
96
-
97
- def _convert_to_json_format(self, messages: list[BaseMessage]) -> list[dict]:
98
- """Converts BaseMessage objects to JSON format with role and content."""
99
- result = []
100
- for message in messages:
101
- if isinstance(message, HumanMessage):
102
- role = "user"
103
- elif isinstance(message, AIMessage):
104
- role = "assistant"
105
- else:
106
- role = "system"
107
-
108
- result.append({
109
- "role": role,
110
- "content": message.content
111
- })
112
- return result
113
-
114
- async def __send_interaction(self):
115
- # Send the interaction to the server
116
- from urllib.parse import urljoin
117
- import requests
118
-
119
- payload = self.__prepare_payload()
120
- endpoint = urljoin(self.__url, "event-ingestion/api/v2/events/trace_interaction")
121
- # Prepare headers with authentication
122
- headers = {"Content-Type": "application/json"}
123
- if self.__api_key:
124
- headers["Authorization"] = f"Bearer {self.__api_key}"
125
- response = requests.post(
126
- url=endpoint,
127
- json=payload,
128
- headers=headers
129
- )
130
- if response.status_code != 200:
131
- print(f"Failed to send interaction: {response.status_code} {response.text}")
132
-
133
- def __prepare_payload(self):
134
- self.interaction.time_end = datetime.now().astimezone().isoformat()
135
- self.interaction.tags["response_with_rag"] = self.__response_with_rag
136
- payload = {
137
- "interaction": self.interaction.__dict__,
138
- "traces": [
139
- self.llm_trace.__dict__,
140
- ]
141
- }
142
- for trace in self.retrieval_traces:
143
- if trace.source:
144
- payload["traces"].append(trace.__dict__)
145
- return payload
146
-
147
- def __parse_multimodal_input(self, input: list[dict]) -> str:
148
- """Parse multimodal input and return a string representation."""
149
- type_mapping = {
150
- "text": lambda item: item.get("text", ""),
151
- "image": lambda _: " <image>",
152
- "image_url": lambda _: " <image>",
153
- "file": lambda _: " <file>",
154
- "media": lambda _: " <file>",
155
- "document": lambda _: " <file>",
156
- }
157
-
158
- return "".join(
159
- type_mapping.get(item.get("type", ""), lambda item: f" <{item.get('type', '')}>")
160
- (item) for item in input
161
- )
162
-
163
- def __parse_multimodal_history(self, messages: list[dict]) -> list[dict]:
164
- # Parse the multimodal history and return a list of dictionaries
165
- parsed_history = []
166
- for message in messages:
167
- if isinstance(message["content"], list):
168
- parsed_content = self.__parse_multimodal_input(message["content"])
169
- else:
170
- parsed_content = message["content"]
171
- parsed_history.append({
172
- "role": message["role"],
173
- "content": parsed_content
174
- })
175
- return parsed_history
176
-
177
- def __is_message_generated(self, messages: list[BaseMessage]) -> bool:
178
- # Check if the last message is generated by the model
179
- if len(messages) == 0:
180
- return False
181
- last_user_message = f'<div class="llm__pill">{messages[-1].content}</div>'
182
- last_ai_message = messages[-2].content
183
- if last_user_message in last_ai_message:
184
- return "true"
185
- return "false"
1
+ from typing import Union
2
+ from ws_bom_robot_app.llm.models.api import NebulyInteraction, NebulyLLMTrace, NebulyRetrievalTrace
3
+ from datetime import datetime, timezone
4
+ from langchain_core.callbacks.base import AsyncCallbackHandler
5
+ from langchain_core.messages import BaseMessage, HumanMessage, AIMessage
6
+ from langchain_core.outputs import ChatGenerationChunk, GenerationChunk
7
+
8
+ class NebulyHandler(AsyncCallbackHandler):
9
+ def __init__(self, llm_model: str | None, threadId: str = None, chat_history: list[BaseMessage] = [], url: str = None, api_key: str = None, user_id: str | None = None):
10
+ super().__init__()
11
+ self.__started: bool = False
12
+ self.__url: str = url
13
+ self.__api_key: str = api_key
14
+ self.chat_history = chat_history
15
+ self.interaction = NebulyInteraction(
16
+ conversation_id=threadId,
17
+ input="",
18
+ output="",
19
+ time_start="",
20
+ time_end="",
21
+ end_user= user_id if user_id and user_id != "" else threadId,
22
+ tags={"model": llm_model},
23
+ )
24
+ self.llm_trace = NebulyLLMTrace(
25
+ model=llm_model,
26
+ messages=[],
27
+ output="",
28
+ input_tokens=0,
29
+ output_tokens=0,
30
+ )
31
+ self.__response_with_rag: str = "false" # Flag to check if the AI used some retrieval tools
32
+ self.__retrieval_query: str = ""
33
+ self.retrieval_traces: list[NebulyRetrievalTrace] = []
34
+
35
+ async def on_chat_model_start(self, serialized, messages, *, run_id, parent_run_id = None, tags = None, metadata = None, **kwargs):
36
+ # Initialize the interaction with the input message
37
+ if not self.__started:
38
+ message_list = self.__flat_messages(messages)
39
+ if isinstance(message_list[-1], HumanMessage):
40
+ if isinstance(message_list[-1].content, list):
41
+ self.interaction.input = self.__parse_multimodal_input(message_list[-1].content)
42
+ else:
43
+ self.interaction.input = message_list[-1].content
44
+ self.interaction.tags["generated"] = self.__is_message_generated(message_list)
45
+ else:
46
+ raise ValueError("Last message is not a HumanMessage")
47
+ self.interaction.time_start = datetime.now().astimezone().isoformat()
48
+ self.__started = True
49
+
50
+ async def on_llm_end(self, response, *, run_id, parent_run_id = None, tags = None, **kwargs):
51
+ generation: Union[ChatGenerationChunk, GenerationChunk] = response.generations[0]
52
+ usage_metadata: dict = generation[0].message.usage_metadata
53
+ self.llm_trace.input_tokens = usage_metadata.get("input_tokens", 0)
54
+ self.llm_trace.output_tokens = usage_metadata.get("output_tokens", 0)
55
+
56
+ async def on_retriever_start(self, serialized, query, *, run_id, parent_run_id = None, tags = None, metadata = None, **kwargs):
57
+ self.__retrieval_query = query
58
+
59
+
60
+ async def on_retriever_end(self, documents, *, run_id, parent_run_id = None, tags = None, **kwargs):
61
+ # pass the document source because of the large amount of data in the document content
62
+ for doc in documents:
63
+ self.retrieval_traces.append(
64
+ NebulyRetrievalTrace(
65
+ source=doc.metadata.get("source", "content unavailable"),
66
+ input=self.__retrieval_query,
67
+ outputs=[doc.metadata.get("source", "content unavailable")]
68
+ )
69
+ )
70
+
71
+ async def on_tool_start(self, serialized, input_str, *, run_id, parent_run_id = None, tags = None, metadata = None, inputs = None, **kwargs):
72
+ self.__response_with_rag = "true" # Set the flag to true when the retriever starts
73
+
74
+ async def on_agent_finish(self, finish, *, run_id, parent_run_id = None, tags = None, **kwargs):
75
+ # Interaction
76
+ self.interaction.output = finish.return_values["output"]
77
+ # Trace
78
+ self.llm_trace.output = finish.return_values["output"]
79
+ message_history = self._convert_to_json_format(self.chat_history)
80
+ self.llm_trace.messages = self.__parse_multimodal_history(message_history)
81
+ await self.__send_interaction()
82
+
83
+ def __flat_messages(self, messages: list[list[BaseMessage]], to_json: bool = False) -> list[BaseMessage]:
84
+ """
85
+ Maps the messages to the format expected by the LLM.
86
+ Flattens the nested list structure of messages.
87
+ """
88
+ # Flatten the nested list structure
89
+ flattened_messages = []
90
+ for message_list in messages:
91
+ flattened_messages.extend(message_list)
92
+ # Store JSON format in LLM trace
93
+ if to_json:
94
+ return self._convert_to_json_format(flattened_messages)
95
+ return flattened_messages
96
+
97
+ def _convert_to_json_format(self, messages: list[BaseMessage]) -> list[dict]:
98
+ """Converts BaseMessage objects to JSON format with role and content."""
99
+ result = []
100
+ for message in messages:
101
+ if isinstance(message, HumanMessage):
102
+ role = "user"
103
+ elif isinstance(message, AIMessage):
104
+ role = "assistant"
105
+ else:
106
+ role = "system"
107
+
108
+ result.append({
109
+ "role": role,
110
+ "content": message.content
111
+ })
112
+ return result
113
+
114
+ async def __send_interaction(self):
115
+ # Send the interaction to the server
116
+ from urllib.parse import urljoin
117
+ import requests
118
+
119
+ payload = self.__prepare_payload()
120
+ endpoint = urljoin(self.__url, "event-ingestion/api/v2/events/trace_interaction")
121
+ # Prepare headers with authentication
122
+ headers = {"Content-Type": "application/json"}
123
+ if self.__api_key:
124
+ headers["Authorization"] = f"Bearer {self.__api_key}"
125
+ response = requests.post(
126
+ url=endpoint,
127
+ json=payload,
128
+ headers=headers
129
+ )
130
+ if response.status_code != 200:
131
+ print(f"Failed to send interaction: {response.status_code} {response.text}")
132
+
133
+ def __prepare_payload(self):
134
+ self.interaction.time_end = datetime.now().astimezone().isoformat()
135
+ self.interaction.tags["response_with_rag"] = self.__response_with_rag
136
+ payload = {
137
+ "interaction": self.interaction.__dict__,
138
+ "traces": [
139
+ self.llm_trace.__dict__,
140
+ ]
141
+ }
142
+ for trace in self.retrieval_traces:
143
+ if trace.source:
144
+ payload["traces"].append(trace.__dict__)
145
+ return payload
146
+
147
+ def __parse_multimodal_input(self, input: list[dict]) -> str:
148
+ """Parse multimodal input and return a string representation."""
149
+ type_mapping = {
150
+ "text": lambda item: item.get("text", ""),
151
+ "image": lambda _: " <image>",
152
+ "image_url": lambda _: " <image>",
153
+ "file": lambda _: " <file>",
154
+ "media": lambda _: " <file>",
155
+ "document": lambda _: " <file>",
156
+ }
157
+
158
+ return "".join(
159
+ type_mapping.get(item.get("type", ""), lambda item: f" <{item.get('type', '')}>")
160
+ (item) for item in input
161
+ )
162
+
163
+ def __parse_multimodal_history(self, messages: list[dict]) -> list[dict]:
164
+ # Parse the multimodal history and return a list of dictionaries
165
+ parsed_history = []
166
+ for message in messages:
167
+ if isinstance(message["content"], list):
168
+ parsed_content = self.__parse_multimodal_input(message["content"])
169
+ else:
170
+ parsed_content = message["content"]
171
+ parsed_history.append({
172
+ "role": message["role"],
173
+ "content": parsed_content
174
+ })
175
+ return parsed_history
176
+
177
+ def __is_message_generated(self, messages: list[BaseMessage]) -> bool:
178
+ # Check if the last message is generated by the model
179
+ if len(messages) == 0:
180
+ return False
181
+ last_user_message = f'<div class="llm__pill">{messages[-1].content}</div>'
182
+ last_ai_message = messages[-2].content
183
+ if last_user_message in last_ai_message:
184
+ return "true"
185
+ return "false"
@@ -1,68 +1,68 @@
1
- import asyncio
2
- from asyncio import Queue
3
- from langchain.tools import StructuredTool
4
- from ws_bom_robot_app.llm.models.api import LlmAppTool
5
- from ws_bom_robot_app.llm.tools.tool_manager import ToolManager
6
- from ws_bom_robot_app.llm.providers.llm_manager import LlmInterface
7
-
8
- async def __process_proxy_tool(proxy_tool: LlmAppTool) -> LlmAppTool | None:
9
- import os
10
- from ws_bom_robot_app.llm.utils.cms import CmsApp, get_app_by_id
11
- from ws_bom_robot_app.config import config
12
- try:
13
- secrets = proxy_tool.secrets_to_dict()
14
- app_id = secrets.get("appId")
15
- if not app_id:
16
- raise ValueError("Tool configuration is invalid. 'appId' is required.")
17
- app: CmsApp = await get_app_by_id(app_id)
18
- if not app:
19
- raise ValueError(f"App with id {app_id} not found.")
20
- tool_id = secrets.get("toolId")
21
- tool = next((t for t in app.rq.app_tools if app.rq.app_tools and t.id == tool_id), None)
22
- if not tool:
23
- raise ValueError(f"Tool with function_id {tool_id} not found in app {app.name}.")
24
- #override derived tool with proxy tool props
25
- tool.name = proxy_tool.name if proxy_tool.name else tool.name
26
- tool.description = proxy_tool.description if proxy_tool.description else tool.description
27
- tool.function_id = proxy_tool.function_id if proxy_tool.function_id else tool.function_id
28
- tool.function_description = proxy_tool.function_description if proxy_tool.function_description else tool.function_description
29
- #normalize vector_db
30
- if tool.vector_db:
31
- tool.vector_db = os.path.join(
32
- os.path.join(config.robot_data_folder,config.robot_data_db_folder,config.robot_data_db_folder_store),
33
- os.path.splitext(os.path.basename(tool.vector_db))[0]) if tool.vector_db else None
34
- return tool
35
- except Exception as e:
36
- print(f"[!] Error in proxy_app_tool: {e}")
37
- return None
38
-
39
- def get_structured_tools(llm: LlmInterface, tools: list[LlmAppTool], callbacks:list, queue: Queue) -> list[StructuredTool]:
40
- _structured_tools :list[StructuredTool] = []
41
- for tool in [tool for tool in tools if tool.is_active]:
42
- if tool.function_name == "proxy_app_tool":
43
- # override the tool
44
- loop = asyncio.get_event_loop()
45
- if loop.is_running():
46
- import nest_asyncio
47
- nest_asyncio.apply()
48
- processed_tool = loop.run_until_complete(__process_proxy_tool(tool))
49
- if processed_tool is None:
50
- continue
51
- tool = processed_tool
52
- if _tool_config := ToolManager._list.get(tool.function_name):
53
- _tool_instance = ToolManager(llm, tool, callbacks, queue)
54
- _structured_tool = StructuredTool.from_function(
55
- coroutine=_tool_instance.get_coroutine(),
56
- name=tool.function_id if tool.function_id else tool.function_name,
57
- description=tool.function_description,
58
- args_schema=_tool_config.model
59
- #infer_schema=True,
60
- #parse_docstring=True,
61
- #error_on_invalid_docstring=True
62
- )
63
- _structured_tool.tags = [tool.function_id if tool.function_id else tool.function_name]
64
- secrets = tool.secrets_to_dict()
65
- if secrets and secrets.get("stream") == "true":
66
- _structured_tool.tags.append("stream")
67
- _structured_tools.append(_structured_tool)
68
- return _structured_tools
1
+ import asyncio
2
+ from asyncio import Queue
3
+ from langchain.tools import StructuredTool
4
+ from ws_bom_robot_app.llm.models.api import LlmAppTool
5
+ from ws_bom_robot_app.llm.tools.tool_manager import ToolManager
6
+ from ws_bom_robot_app.llm.providers.llm_manager import LlmInterface
7
+
8
+ async def __process_proxy_tool(proxy_tool: LlmAppTool) -> LlmAppTool | None:
9
+ import os
10
+ from ws_bom_robot_app.llm.utils.cms import CmsApp, get_app_by_id
11
+ from ws_bom_robot_app.config import config
12
+ try:
13
+ secrets = proxy_tool.secrets_to_dict()
14
+ app_id = secrets.get("appId")
15
+ if not app_id:
16
+ raise ValueError("Tool configuration is invalid. 'appId' is required.")
17
+ app: CmsApp = await get_app_by_id(app_id)
18
+ if not app:
19
+ raise ValueError(f"App with id {app_id} not found.")
20
+ tool_id = secrets.get("toolId")
21
+ tool = next((t for t in app.rq.app_tools if app.rq.app_tools and t.id == tool_id), None)
22
+ if not tool:
23
+ raise ValueError(f"Tool with function_id {tool_id} not found in app {app.name}.")
24
+ #override derived tool with proxy tool props
25
+ tool.name = proxy_tool.name if proxy_tool.name else tool.name
26
+ tool.description = proxy_tool.description if proxy_tool.description else tool.description
27
+ tool.function_id = proxy_tool.function_id if proxy_tool.function_id else tool.function_id
28
+ tool.function_description = proxy_tool.function_description if proxy_tool.function_description else tool.function_description
29
+ #normalize vector_db
30
+ if tool.vector_db:
31
+ tool.vector_db = os.path.join(
32
+ os.path.join(config.robot_data_folder,config.robot_data_db_folder,config.robot_data_db_folder_store),
33
+ os.path.splitext(os.path.basename(tool.vector_db))[0]) if tool.vector_db else None
34
+ return tool
35
+ except Exception as e:
36
+ print(f"[!] Error in proxy_app_tool: {e}")
37
+ return None
38
+
39
+ def get_structured_tools(llm: LlmInterface, tools: list[LlmAppTool], callbacks:list, queue: Queue) -> list[StructuredTool]:
40
+ _structured_tools :list[StructuredTool] = []
41
+ for tool in [tool for tool in tools if tool.is_active]:
42
+ if tool.function_name == "proxy_app_tool":
43
+ # override the tool
44
+ loop = asyncio.get_event_loop()
45
+ if loop.is_running():
46
+ import nest_asyncio
47
+ nest_asyncio.apply()
48
+ processed_tool = loop.run_until_complete(__process_proxy_tool(tool))
49
+ if processed_tool is None:
50
+ continue
51
+ tool = processed_tool
52
+ if _tool_config := ToolManager._list.get(tool.function_name):
53
+ _tool_instance = ToolManager(llm, tool, callbacks, queue)
54
+ _structured_tool = StructuredTool.from_function(
55
+ coroutine=_tool_instance.get_coroutine(),
56
+ name=tool.function_id if tool.function_id else tool.function_name,
57
+ description=tool.function_description,
58
+ args_schema=_tool_config.model
59
+ #infer_schema=True,
60
+ #parse_docstring=True,
61
+ #error_on_invalid_docstring=True
62
+ )
63
+ _structured_tool.tags = [tool.function_id if tool.function_id else tool.function_name]
64
+ secrets = tool.secrets_to_dict()
65
+ if secrets and secrets.get("stream") == "true":
66
+ _structured_tool.tags.append("stream")
67
+ _structured_tools.append(_structured_tool)
68
+ return _structured_tools