ws-bom-robot-app 0.0.69__py3-none-any.whl → 0.0.71__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,181 +1,181 @@
1
- from typing import Union
2
- from ws_bom_robot_app.llm.models.api import NebulyInteraction, NebulyLLMTrace, NebulyRetrievalTrace
3
- from datetime import datetime, timezone
4
- from langchain_core.callbacks.base import AsyncCallbackHandler
5
- import ws_bom_robot_app.llm.settings as settings
6
- from langchain_core.messages import BaseMessage, HumanMessage, AIMessage
7
- from langchain_core.outputs import ChatGenerationChunk, GenerationChunk
8
-
9
- class NebulyHandler(AsyncCallbackHandler):
10
- def __init__(self, llm_model: str | None, threadId: str = None, url: str = None, api_key: str = None, user_id: str | None = None):
11
- super().__init__()
12
- self.__started: bool = False
13
- self.__url: str = url
14
- self.__api_key: str = api_key
15
- self.interaction = NebulyInteraction(
16
- conversation_id=threadId,
17
- input="",
18
- output="",
19
- time_start="",
20
- time_end="",
21
- end_user= user_id if user_id and user_id != "" else threadId,
22
- tags={"model": llm_model},
23
- )
24
- self.llm_trace = NebulyLLMTrace(
25
- model=llm_model,
26
- messages=[],
27
- output="",
28
- input_tokens=0,
29
- output_tokens=0,
30
- )
31
- self.__response_with_rag: str = "false" # Flag to check if the AI used some retrieval tools
32
- self.__retrieval_query: str = ""
33
- self.retrieval_traces: list[NebulyRetrievalTrace] = []
34
-
35
- async def on_chat_model_start(self, serialized, messages, *, run_id, parent_run_id = None, tags = None, metadata = None, **kwargs):
36
- # Initialize the interaction with the input message
37
- if not self.__started:
38
- message_list = self.__flat_messages(messages)
39
- if isinstance(message_list[-1], HumanMessage):
40
- if isinstance(message_list[-1].content, list):
41
- self.interaction.input = self.__parse_multimodal_input(message_list[-1].content)
42
- else:
43
- self.interaction.input = message_list[-1].content
44
- self.interaction.tags["generated"] = self.__is_message_generated(message_list)
45
- else:
46
- raise ValueError("Last message is not a HumanMessage")
47
- self.interaction.time_start = datetime.now().astimezone().isoformat()
48
- self.__started = True
49
-
50
- async def on_llm_end(self, response, *, run_id, parent_run_id = None, tags = None, **kwargs):
51
- generation: Union[ChatGenerationChunk, GenerationChunk] = response.generations[0]
52
- usage_metadata: dict = generation[0].message.usage_metadata
53
- self.llm_trace.input_tokens = usage_metadata.get("input_tokens", 0)
54
- self.llm_trace.output_tokens = usage_metadata.get("output_tokens", 0)
55
-
56
- async def on_retriever_start(self, serialized, query, *, run_id, parent_run_id = None, tags = None, metadata = None, **kwargs):
57
- self.__retrieval_query = query
58
-
59
-
60
- async def on_retriever_end(self, documents, *, run_id, parent_run_id = None, tags = None, **kwargs):
61
- # pass the document source because of the large amount of data in the document content
62
- for doc in documents:
63
- self.retrieval_traces.append(
64
- NebulyRetrievalTrace(
65
- source=doc.metadata.get("source", "content unavailable"),
66
- input=self.__retrieval_query,
67
- outputs=[doc.metadata.get("source", "content unavailable")]
68
- )
69
- )
70
-
71
- async def on_tool_start(self, serialized, input_str, *, run_id, parent_run_id = None, tags = None, metadata = None, inputs = None, **kwargs):
72
- self.__response_with_rag = "true" # Set the flag to true when the retriever starts
73
-
74
- async def on_agent_finish(self, finish, *, run_id, parent_run_id = None, tags = None, **kwargs):
75
- # Interaction
76
- self.interaction.output = finish.return_values["output"]
77
- # Trace
78
- self.llm_trace.output = finish.return_values["output"]
79
- message_history = self._convert_to_json_format(settings.chat_history)[:-1]
80
- self.llm_trace.messages = self.__parse_multimodal_history(message_history)
81
- await self.__send_interaction()
82
-
83
- def __flat_messages(self, messages: list[list[BaseMessage]], to_json: bool = False) -> list[BaseMessage]:
84
- """
85
- Maps the messages to the format expected by the LLM.
86
- Flattens the nested list structure of messages.
87
- """
88
- # Flatten the nested list structure
89
- flattened_messages = []
90
- for message_list in messages:
91
- flattened_messages.extend(message_list)
92
- # Store JSON format in LLM trace
93
- if to_json:
94
- return self._convert_to_json_format(flattened_messages)
95
- return flattened_messages
96
-
97
- def _convert_to_json_format(self, messages: list[BaseMessage]) -> list[dict]:
98
- """Converts BaseMessage objects to JSON format with role and content."""
99
- result = []
100
- for message in messages:
101
- if isinstance(message, HumanMessage):
102
- role = "user"
103
- elif isinstance(message, AIMessage):
104
- role = "assistant"
105
- else:
106
- role = "system"
107
-
108
- result.append({
109
- "role": role,
110
- "content": message.content
111
- })
112
- return result
113
-
114
- async def __send_interaction(self):
115
- # Send the interaction to the server
116
- from urllib.parse import urljoin
117
- import requests
118
-
119
- payload = self.__prepare_payload()
120
- endpoint = urljoin(self.__url, "event-ingestion/api/v2/events/trace_interaction")
121
- # Prepare headers with authentication
122
- headers = {"Content-Type": "application/json"}
123
- if self.__api_key:
124
- headers["Authorization"] = f"Bearer {self.__api_key}"
125
- response = requests.post(
126
- url=endpoint,
127
- json=payload,
128
- headers=headers
129
- )
130
- if response.status_code != 200:
131
- print(f"Failed to send interaction: {response.status_code} {response.text}")
132
-
133
- def __prepare_payload(self):
134
- self.interaction.time_end = datetime.now().astimezone().isoformat()
135
- self.interaction.tags["response_with_rag"] = self.__response_with_rag
136
- payload = {
137
- "interaction": self.interaction.__dict__,
138
- "traces": [
139
- self.llm_trace.__dict__,
140
- ]
141
- }
142
- for trace in self.retrieval_traces:
143
- if trace.source:
144
- payload["traces"].append(trace.__dict__)
145
- return payload
146
-
147
- def __parse_multimodal_input(self, input: list[dict]) -> str:
148
- # Parse the multimodal input and return a string representation
149
- # This is a placeholder implementation, you can customize it as needed
150
- parsed_input = ""
151
- for item in input:
152
- if item.get("type") == "text":
153
- parsed_input += item.get("text", "")
154
- elif item.get("type") == "image_url":
155
- parsed_input += " <image>"
156
- print(parsed_input)
157
- return parsed_input
158
-
159
- def __parse_multimodal_history(self, messages: list[dict]) -> list[dict]:
160
- # Parse the multimodal history and return a list of dictionaries
161
- parsed_history = []
162
- for message in messages:
163
- if isinstance(message["content"], list):
164
- parsed_content = self.__parse_multimodal_input(message["content"])
165
- else:
166
- parsed_content = message["content"]
167
- parsed_history.append({
168
- "role": message["role"],
169
- "content": parsed_content
170
- })
171
- return parsed_history
172
-
173
- def __is_message_generated(self, messages: list[BaseMessage]) -> bool:
174
- # Check if the last message is generated by the model
175
- if len(messages) == 0:
176
- return False
177
- last_user_message = f'<div class="llm__pill">{messages[-1].content}</div>'
178
- last_ai_message = messages[-2].content
179
- if last_user_message in last_ai_message:
180
- return "true"
181
- return "false"
1
+ from typing import Union
2
+ from ws_bom_robot_app.llm.models.api import NebulyInteraction, NebulyLLMTrace, NebulyRetrievalTrace
3
+ from datetime import datetime, timezone
4
+ from langchain_core.callbacks.base import AsyncCallbackHandler
5
+ import ws_bom_robot_app.llm.settings as settings
6
+ from langchain_core.messages import BaseMessage, HumanMessage, AIMessage
7
+ from langchain_core.outputs import ChatGenerationChunk, GenerationChunk
8
+
9
+ class NebulyHandler(AsyncCallbackHandler):
10
+ def __init__(self, llm_model: str | None, threadId: str = None, url: str = None, api_key: str = None, user_id: str | None = None):
11
+ super().__init__()
12
+ self.__started: bool = False
13
+ self.__url: str = url
14
+ self.__api_key: str = api_key
15
+ self.interaction = NebulyInteraction(
16
+ conversation_id=threadId,
17
+ input="",
18
+ output="",
19
+ time_start="",
20
+ time_end="",
21
+ end_user= user_id if user_id and user_id != "" else threadId,
22
+ tags={"model": llm_model},
23
+ )
24
+ self.llm_trace = NebulyLLMTrace(
25
+ model=llm_model,
26
+ messages=[],
27
+ output="",
28
+ input_tokens=0,
29
+ output_tokens=0,
30
+ )
31
+ self.__response_with_rag: str = "false" # Flag to check if the AI used some retrieval tools
32
+ self.__retrieval_query: str = ""
33
+ self.retrieval_traces: list[NebulyRetrievalTrace] = []
34
+
35
+ async def on_chat_model_start(self, serialized, messages, *, run_id, parent_run_id = None, tags = None, metadata = None, **kwargs):
36
+ # Initialize the interaction with the input message
37
+ if not self.__started:
38
+ message_list = self.__flat_messages(messages)
39
+ if isinstance(message_list[-1], HumanMessage):
40
+ if isinstance(message_list[-1].content, list):
41
+ self.interaction.input = self.__parse_multimodal_input(message_list[-1].content)
42
+ else:
43
+ self.interaction.input = message_list[-1].content
44
+ self.interaction.tags["generated"] = self.__is_message_generated(message_list)
45
+ else:
46
+ raise ValueError("Last message is not a HumanMessage")
47
+ self.interaction.time_start = datetime.now().astimezone().isoformat()
48
+ self.__started = True
49
+
50
+ async def on_llm_end(self, response, *, run_id, parent_run_id = None, tags = None, **kwargs):
51
+ generation: Union[ChatGenerationChunk, GenerationChunk] = response.generations[0]
52
+ usage_metadata: dict = generation[0].message.usage_metadata
53
+ self.llm_trace.input_tokens = usage_metadata.get("input_tokens", 0)
54
+ self.llm_trace.output_tokens = usage_metadata.get("output_tokens", 0)
55
+
56
+ async def on_retriever_start(self, serialized, query, *, run_id, parent_run_id = None, tags = None, metadata = None, **kwargs):
57
+ self.__retrieval_query = query
58
+
59
+
60
+ async def on_retriever_end(self, documents, *, run_id, parent_run_id = None, tags = None, **kwargs):
61
+ # pass the document source because of the large amount of data in the document content
62
+ for doc in documents:
63
+ self.retrieval_traces.append(
64
+ NebulyRetrievalTrace(
65
+ source=doc.metadata.get("source", "content unavailable"),
66
+ input=self.__retrieval_query,
67
+ outputs=[doc.metadata.get("source", "content unavailable")]
68
+ )
69
+ )
70
+
71
+ async def on_tool_start(self, serialized, input_str, *, run_id, parent_run_id = None, tags = None, metadata = None, inputs = None, **kwargs):
72
+ self.__response_with_rag = "true" # Set the flag to true when the retriever starts
73
+
74
+ async def on_agent_finish(self, finish, *, run_id, parent_run_id = None, tags = None, **kwargs):
75
+ # Interaction
76
+ self.interaction.output = finish.return_values["output"]
77
+ # Trace
78
+ self.llm_trace.output = finish.return_values["output"]
79
+ message_history = self._convert_to_json_format(settings.chat_history)[:-1]
80
+ self.llm_trace.messages = self.__parse_multimodal_history(message_history)
81
+ await self.__send_interaction()
82
+
83
+ def __flat_messages(self, messages: list[list[BaseMessage]], to_json: bool = False) -> list[BaseMessage]:
84
+ """
85
+ Maps the messages to the format expected by the LLM.
86
+ Flattens the nested list structure of messages.
87
+ """
88
+ # Flatten the nested list structure
89
+ flattened_messages = []
90
+ for message_list in messages:
91
+ flattened_messages.extend(message_list)
92
+ # Store JSON format in LLM trace
93
+ if to_json:
94
+ return self._convert_to_json_format(flattened_messages)
95
+ return flattened_messages
96
+
97
+ def _convert_to_json_format(self, messages: list[BaseMessage]) -> list[dict]:
98
+ """Converts BaseMessage objects to JSON format with role and content."""
99
+ result = []
100
+ for message in messages:
101
+ if isinstance(message, HumanMessage):
102
+ role = "user"
103
+ elif isinstance(message, AIMessage):
104
+ role = "assistant"
105
+ else:
106
+ role = "system"
107
+
108
+ result.append({
109
+ "role": role,
110
+ "content": message.content
111
+ })
112
+ return result
113
+
114
+ async def __send_interaction(self):
115
+ # Send the interaction to the server
116
+ from urllib.parse import urljoin
117
+ import requests
118
+
119
+ payload = self.__prepare_payload()
120
+ endpoint = urljoin(self.__url, "event-ingestion/api/v2/events/trace_interaction")
121
+ # Prepare headers with authentication
122
+ headers = {"Content-Type": "application/json"}
123
+ if self.__api_key:
124
+ headers["Authorization"] = f"Bearer {self.__api_key}"
125
+ response = requests.post(
126
+ url=endpoint,
127
+ json=payload,
128
+ headers=headers
129
+ )
130
+ if response.status_code != 200:
131
+ print(f"Failed to send interaction: {response.status_code} {response.text}")
132
+
133
+ def __prepare_payload(self):
134
+ self.interaction.time_end = datetime.now().astimezone().isoformat()
135
+ self.interaction.tags["response_with_rag"] = self.__response_with_rag
136
+ payload = {
137
+ "interaction": self.interaction.__dict__,
138
+ "traces": [
139
+ self.llm_trace.__dict__,
140
+ ]
141
+ }
142
+ for trace in self.retrieval_traces:
143
+ if trace.source:
144
+ payload["traces"].append(trace.__dict__)
145
+ return payload
146
+
147
+ def __parse_multimodal_input(self, input: list[dict]) -> str:
148
+ # Parse the multimodal input and return a string representation
149
+ # This is a placeholder implementation, you can customize it as needed
150
+ parsed_input = ""
151
+ for item in input:
152
+ if item.get("type") == "text":
153
+ parsed_input += item.get("text", "")
154
+ elif item.get("type") == "image_url":
155
+ parsed_input += " <image>"
156
+ print(parsed_input)
157
+ return parsed_input
158
+
159
+ def __parse_multimodal_history(self, messages: list[dict]) -> list[dict]:
160
+ # Parse the multimodal history and return a list of dictionaries
161
+ parsed_history = []
162
+ for message in messages:
163
+ if isinstance(message["content"], list):
164
+ parsed_content = self.__parse_multimodal_input(message["content"])
165
+ else:
166
+ parsed_content = message["content"]
167
+ parsed_history.append({
168
+ "role": message["role"],
169
+ "content": parsed_content
170
+ })
171
+ return parsed_history
172
+
173
+ def __is_message_generated(self, messages: list[BaseMessage]) -> bool:
174
+ # Check if the last message is generated by the model
175
+ if len(messages) == 0:
176
+ return False
177
+ last_user_message = f'<div class="llm__pill">{messages[-1].content}</div>'
178
+ last_ai_message = messages[-2].content
179
+ if last_user_message in last_ai_message:
180
+ return "true"
181
+ return "false"
@@ -107,9 +107,8 @@ class Google(LlmInterface):
107
107
  def get_llm(self):
108
108
  from langchain_google_genai.chat_models import ChatGoogleGenerativeAI
109
109
  return ChatGoogleGenerativeAI(
110
- name="chat",
111
- api_key=self.config.api_key or os.getenv("GOOGLE_API_KEY"),
112
110
  model=self.config.model,
111
+ google_api_key=self.config.api_key or os.getenv("GOOGLE_API_KEY"),
113
112
  temperature=self.config.temperature,
114
113
  disable_streaming=False,
115
114
  )
@@ -117,8 +116,8 @@ class Google(LlmInterface):
117
116
  def get_embeddings(self):
118
117
  from langchain_google_genai.embeddings import GoogleGenerativeAIEmbeddings
119
118
  return GoogleGenerativeAIEmbeddings(
120
- google_api_key=self.config.api_key,
121
- model="models/text-embedding-005")
119
+ google_api_key=self.config.api_key or os.getenv("GOOGLE_API_KEY"),
120
+ model="models/text-embedding-004")
122
121
 
123
122
  def get_models(self):
124
123
  import google.generativeai as genai
@@ -140,22 +139,33 @@ class Gvertex(LlmInterface):
140
139
  temperature=self.config.temperature
141
140
  )
142
141
  def get_embeddings(self):
143
- from langchain_google_vertexai import VertexAIEmbeddings
144
- return VertexAIEmbeddings(model_name="text-embedding-005")
142
+ from langchain_google_vertexai.embeddings import VertexAIEmbeddings
143
+ embeddings = VertexAIEmbeddings(model_name="text-embedding-004")
144
+ #fix gemini-embedding-001 batch size
145
+ #embeddings.instance["max_batch_size"] = 1
146
+ #embeddings.instance["batch_size"] = 1
147
+ return embeddings
145
148
  def get_models(self):
146
- #from google.cloud import aiplatform
147
- #aiplatform.init()
148
- #models = aiplatform.Model.list()
149
+ _models = [
150
+ {"id":"gemini-2.5-pro"},
151
+ {"id":"gemini-2.5-flash"},
152
+ {"id":"gemini-2.0-flash"},
153
+ {"id":"gemini-2.0-flash-lite"}
154
+ ]
155
+ try:
156
+ from google.cloud import aiplatform
157
+ aiplatform.init()
158
+ _list = aiplatform.Model.list()
159
+ if _list:
160
+ _models = list([{"id": model.name} for model in _list])
149
161
  # removed due issue: https://github.com/langchain-ai/langchain-google/issues/733
150
162
  # Message type "google.cloud.aiplatform.v1beta1.GenerateContentResponse" has no field named "createTime" at "GenerateContentResponse". Available Fields(except extensions): "['candidates', 'modelVersion', 'promptFeedback', 'usageMetadata']"
151
-
152
- #see https://cloud.google.com/vertex-ai/generative-ai/docs/learn/locations#united-states for available models
153
- return [
154
- {"id":"gemini-2.5-pro-preview-05-06"},
155
- {"id":"gemini-2.0-flash"},
156
- {"id":"gemini-2.0-flash-lite"},
157
- {"id":"gemini-1.5-pro-002"}
158
- ]
163
+ except Exception as e:
164
+ print(f"Error fetching models from Gvertex: {e}")
165
+ # fallback to hardcoded models
166
+ #see https://cloud.google.com/vertex-ai/generative-ai/docs/learn/locations#united-states for available models
167
+ finally:
168
+ return _models
159
169
 
160
170
  class Groq(LlmInterface):
161
171
  def get_llm(self):
@@ -1,4 +1,4 @@
1
- def init():
2
- """Initialize the chat history list as a global var"""
3
- global chat_history
4
- chat_history = []
1
+ def init():
2
+ """Initialize the chat history list as a global var"""
3
+ global chat_history
4
+ chat_history = []
@@ -1,65 +1,65 @@
1
- import asyncio
2
- from asyncio import Queue
3
- from langchain.tools import StructuredTool
4
- from ws_bom_robot_app.llm.models.api import LlmAppTool
5
- from ws_bom_robot_app.llm.tools.tool_manager import ToolManager
6
- from ws_bom_robot_app.llm.providers.llm_manager import LlmInterface
7
-
8
- async def __process_proxy_tool(proxy_tool: LlmAppTool) -> LlmAppTool | None:
9
- import os
10
- from ws_bom_robot_app.llm.utils.cms import CmsApp, get_app_by_id
11
- from ws_bom_robot_app.config import config
12
- try:
13
- secrets = proxy_tool.secrets_to_dict()
14
- app_id = secrets.get("appId")
15
- if not app_id:
16
- raise ValueError("Tool configuration is invalid. 'appId' is required.")
17
- app: CmsApp = await get_app_by_id(app_id)
18
- if not app:
19
- raise ValueError(f"App with id {app_id} not found.")
20
- tool_id = secrets.get("toolId")
21
- tool = next((t for t in app.rq.app_tools if app.rq.app_tools and t.id == tool_id), None)
22
- if not tool:
23
- raise ValueError(f"Tool with function_id {tool_id} not found in app {app.name}.")
24
- #override derived tool with proxy tool props
25
- tool.name = proxy_tool.name if proxy_tool.name else tool.name
26
- tool.description = proxy_tool.description if proxy_tool.description else tool.description
27
- tool.function_id = proxy_tool.function_id if proxy_tool.function_id else tool.function_id
28
- tool.function_description = proxy_tool.function_description if proxy_tool.function_description else tool.function_description
29
- #normalize vector_db
30
- if tool.vector_db:
31
- tool.vector_db = os.path.join(
32
- os.path.join(config.robot_data_folder,config.robot_data_db_folder,config.robot_data_db_folder_store),
33
- os.path.splitext(os.path.basename(tool.vector_db))[0]) if tool.vector_db else None
34
- return tool
35
- except Exception as e:
36
- print(f"[!] Error in proxy_app_tool: {e}")
37
- return None
38
-
39
- def get_structured_tools(llm: LlmInterface, tools: list[LlmAppTool], callbacks:list, queue: Queue) -> list[StructuredTool]:
40
- _structured_tools :list[StructuredTool] = []
41
- for tool in [tool for tool in tools if tool.is_active]:
42
- if tool.function_name == "proxy_app_tool":
43
- # override the tool
44
- loop = asyncio.get_event_loop()
45
- if loop.is_running():
46
- import nest_asyncio
47
- nest_asyncio.apply()
48
- processed_tool = loop.run_until_complete(__process_proxy_tool(tool))
49
- if processed_tool is None:
50
- continue
51
- tool = processed_tool
52
- if _tool_config := ToolManager._list.get(tool.function_name):
53
- _tool_instance = ToolManager(llm, tool, callbacks, queue)
54
- _structured_tool = StructuredTool.from_function(
55
- coroutine=_tool_instance.get_coroutine(),
56
- name=tool.function_id if tool.function_id else tool.function_name,
57
- description=tool.function_description,
58
- args_schema=_tool_config.model
59
- #infer_schema=True,
60
- #parse_docstring=True,
61
- #error_on_invalid_docstring=True
62
- )
63
- _structured_tool.tags = [tool.function_id if tool.function_id else tool.function_name]
64
- _structured_tools.append(_structured_tool)
65
- return _structured_tools
1
+ import asyncio
2
+ from asyncio import Queue
3
+ from langchain.tools import StructuredTool
4
+ from ws_bom_robot_app.llm.models.api import LlmAppTool
5
+ from ws_bom_robot_app.llm.tools.tool_manager import ToolManager
6
+ from ws_bom_robot_app.llm.providers.llm_manager import LlmInterface
7
+
8
+ async def __process_proxy_tool(proxy_tool: LlmAppTool) -> LlmAppTool | None:
9
+ import os
10
+ from ws_bom_robot_app.llm.utils.cms import CmsApp, get_app_by_id
11
+ from ws_bom_robot_app.config import config
12
+ try:
13
+ secrets = proxy_tool.secrets_to_dict()
14
+ app_id = secrets.get("appId")
15
+ if not app_id:
16
+ raise ValueError("Tool configuration is invalid. 'appId' is required.")
17
+ app: CmsApp = await get_app_by_id(app_id)
18
+ if not app:
19
+ raise ValueError(f"App with id {app_id} not found.")
20
+ tool_id = secrets.get("toolId")
21
+ tool = next((t for t in app.rq.app_tools if app.rq.app_tools and t.id == tool_id), None)
22
+ if not tool:
23
+ raise ValueError(f"Tool with function_id {tool_id} not found in app {app.name}.")
24
+ #override derived tool with proxy tool props
25
+ tool.name = proxy_tool.name if proxy_tool.name else tool.name
26
+ tool.description = proxy_tool.description if proxy_tool.description else tool.description
27
+ tool.function_id = proxy_tool.function_id if proxy_tool.function_id else tool.function_id
28
+ tool.function_description = proxy_tool.function_description if proxy_tool.function_description else tool.function_description
29
+ #normalize vector_db
30
+ if tool.vector_db:
31
+ tool.vector_db = os.path.join(
32
+ os.path.join(config.robot_data_folder,config.robot_data_db_folder,config.robot_data_db_folder_store),
33
+ os.path.splitext(os.path.basename(tool.vector_db))[0]) if tool.vector_db else None
34
+ return tool
35
+ except Exception as e:
36
+ print(f"[!] Error in proxy_app_tool: {e}")
37
+ return None
38
+
39
+ def get_structured_tools(llm: LlmInterface, tools: list[LlmAppTool], callbacks:list, queue: Queue) -> list[StructuredTool]:
40
+ _structured_tools :list[StructuredTool] = []
41
+ for tool in [tool for tool in tools if tool.is_active]:
42
+ if tool.function_name == "proxy_app_tool":
43
+ # override the tool
44
+ loop = asyncio.get_event_loop()
45
+ if loop.is_running():
46
+ import nest_asyncio
47
+ nest_asyncio.apply()
48
+ processed_tool = loop.run_until_complete(__process_proxy_tool(tool))
49
+ if processed_tool is None:
50
+ continue
51
+ tool = processed_tool
52
+ if _tool_config := ToolManager._list.get(tool.function_name):
53
+ _tool_instance = ToolManager(llm, tool, callbacks, queue)
54
+ _structured_tool = StructuredTool.from_function(
55
+ coroutine=_tool_instance.get_coroutine(),
56
+ name=tool.function_id if tool.function_id else tool.function_name,
57
+ description=tool.function_description,
58
+ args_schema=_tool_config.model
59
+ #infer_schema=True,
60
+ #parse_docstring=True,
61
+ #error_on_invalid_docstring=True
62
+ )
63
+ _structured_tool.tags = [tool.function_id if tool.function_id else tool.function_name]
64
+ _structured_tools.append(_structured_tool)
65
+ return _structured_tools