langroid 0.58.2__py3-none-any.whl → 0.59.0b1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- langroid/agent/base.py +39 -17
- langroid/agent/base.py-e +2216 -0
- langroid/agent/callbacks/chainlit.py +2 -1
- langroid/agent/chat_agent.py +73 -55
- langroid/agent/chat_agent.py-e +2086 -0
- langroid/agent/chat_document.py +7 -7
- langroid/agent/chat_document.py-e +513 -0
- langroid/agent/openai_assistant.py +9 -9
- langroid/agent/openai_assistant.py-e +882 -0
- langroid/agent/special/arangodb/arangodb_agent.py +10 -18
- langroid/agent/special/arangodb/arangodb_agent.py-e +648 -0
- langroid/agent/special/arangodb/tools.py +3 -3
- langroid/agent/special/doc_chat_agent.py +16 -14
- langroid/agent/special/lance_rag/critic_agent.py +2 -2
- langroid/agent/special/lance_rag/query_planner_agent.py +4 -4
- langroid/agent/special/lance_tools.py +6 -5
- langroid/agent/special/lance_tools.py-e +61 -0
- langroid/agent/special/neo4j/neo4j_chat_agent.py +3 -7
- langroid/agent/special/neo4j/neo4j_chat_agent.py-e +430 -0
- langroid/agent/special/relevance_extractor_agent.py +1 -1
- langroid/agent/special/sql/sql_chat_agent.py +11 -3
- langroid/agent/task.py +9 -87
- langroid/agent/task.py-e +2418 -0
- langroid/agent/tool_message.py +33 -17
- langroid/agent/tool_message.py-e +400 -0
- langroid/agent/tools/file_tools.py +4 -2
- langroid/agent/tools/file_tools.py-e +234 -0
- langroid/agent/tools/mcp/fastmcp_client.py +19 -6
- langroid/agent/tools/mcp/fastmcp_client.py-e +584 -0
- langroid/agent/tools/orchestration.py +22 -17
- langroid/agent/tools/orchestration.py-e +301 -0
- langroid/agent/tools/recipient_tool.py +3 -3
- langroid/agent/tools/task_tool.py +22 -16
- langroid/agent/tools/task_tool.py-e +249 -0
- langroid/agent/xml_tool_message.py +90 -35
- langroid/agent/xml_tool_message.py-e +392 -0
- langroid/cachedb/base.py +1 -1
- langroid/embedding_models/base.py +2 -2
- langroid/embedding_models/models.py +3 -7
- langroid/embedding_models/models.py-e +563 -0
- langroid/exceptions.py +4 -1
- langroid/language_models/azure_openai.py +2 -2
- langroid/language_models/azure_openai.py-e +134 -0
- langroid/language_models/base.py +6 -4
- langroid/language_models/base.py-e +812 -0
- langroid/language_models/client_cache.py +64 -0
- langroid/language_models/config.py +2 -4
- langroid/language_models/config.py-e +18 -0
- langroid/language_models/model_info.py +9 -1
- langroid/language_models/model_info.py-e +483 -0
- langroid/language_models/openai_gpt.py +119 -20
- langroid/language_models/openai_gpt.py-e +2280 -0
- langroid/language_models/provider_params.py +3 -22
- langroid/language_models/provider_params.py-e +153 -0
- langroid/mytypes.py +11 -4
- langroid/mytypes.py-e +132 -0
- langroid/parsing/code_parser.py +1 -1
- langroid/parsing/file_attachment.py +1 -1
- langroid/parsing/file_attachment.py-e +246 -0
- langroid/parsing/md_parser.py +14 -4
- langroid/parsing/md_parser.py-e +574 -0
- langroid/parsing/parser.py +22 -7
- langroid/parsing/parser.py-e +410 -0
- langroid/parsing/repo_loader.py +3 -1
- langroid/parsing/repo_loader.py-e +812 -0
- langroid/parsing/search.py +1 -1
- langroid/parsing/url_loader.py +17 -51
- langroid/parsing/url_loader.py-e +683 -0
- langroid/parsing/urls.py +5 -4
- langroid/parsing/urls.py-e +279 -0
- langroid/prompts/prompts_config.py +1 -1
- langroid/pydantic_v1/__init__.py +45 -6
- langroid/pydantic_v1/__init__.py-e +36 -0
- langroid/pydantic_v1/main.py +11 -4
- langroid/pydantic_v1/main.py-e +11 -0
- langroid/utils/configuration.py +13 -11
- langroid/utils/configuration.py-e +141 -0
- langroid/utils/constants.py +1 -1
- langroid/utils/constants.py-e +32 -0
- langroid/utils/globals.py +21 -5
- langroid/utils/globals.py-e +49 -0
- langroid/utils/html_logger.py +2 -1
- langroid/utils/html_logger.py-e +825 -0
- langroid/utils/object_registry.py +1 -1
- langroid/utils/object_registry.py-e +66 -0
- langroid/utils/pydantic_utils.py +55 -28
- langroid/utils/pydantic_utils.py-e +602 -0
- langroid/utils/types.py +2 -2
- langroid/utils/types.py-e +113 -0
- langroid/vector_store/base.py +3 -3
- langroid/vector_store/lancedb.py +5 -5
- langroid/vector_store/lancedb.py-e +404 -0
- langroid/vector_store/meilisearch.py +2 -2
- langroid/vector_store/pineconedb.py +4 -4
- langroid/vector_store/pineconedb.py-e +427 -0
- langroid/vector_store/postgres.py +1 -1
- langroid/vector_store/qdrantdb.py +3 -3
- langroid/vector_store/weaviatedb.py +1 -1
- {langroid-0.58.2.dist-info → langroid-0.59.0b1.dist-info}/METADATA +3 -2
- langroid-0.59.0b1.dist-info/RECORD +181 -0
- langroid/agent/special/doc_chat_task.py +0 -0
- langroid/mcp/__init__.py +0 -1
- langroid/mcp/server/__init__.py +0 -1
- langroid-0.58.2.dist-info/RECORD +0 -145
- {langroid-0.58.2.dist-info → langroid-0.59.0b1.dist-info}/WHEEL +0 -0
- {langroid-0.58.2.dist-info → langroid-0.59.0b1.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,301 @@
|
|
1
|
+
"""
|
2
|
+
Various tools to for agents to be able to control flow of Task, e.g.
|
3
|
+
termination, routing to another agent, etc.
|
4
|
+
"""
|
5
|
+
|
6
|
+
from typing import Any, List, Tuple
|
7
|
+
|
8
|
+
from langroid.agent.chat_agent import ChatAgent
|
9
|
+
from langroid.agent.chat_document import ChatDocument
|
10
|
+
from langroid.agent.tool_message import ToolMessage
|
11
|
+
from langroid.mytypes import Entity
|
12
|
+
from pydantic import ConfigDict
|
13
|
+
from langroid.utils.types import to_string
|
14
|
+
|
15
|
+
|
16
|
+
class AgentDoneTool(ToolMessage):
|
17
|
+
"""Tool for AGENT entity (i.e. agent_response or downstream tool handling fns) to
|
18
|
+
signal the current task is done."""
|
19
|
+
|
20
|
+
purpose: str = """
|
21
|
+
To signal the current task is done, along with an optional message <content>
|
22
|
+
of arbitrary type (default None) and an
|
23
|
+
optional list of <tools> (default empty list).
|
24
|
+
"""
|
25
|
+
request: str = "agent_done_tool"
|
26
|
+
content: Any = None
|
27
|
+
tools: List[ToolMessage] = []
|
28
|
+
# only meant for agent_response or tool-handlers, not for LLM generation:
|
29
|
+
_allow_llm_use: bool = False
|
30
|
+
|
31
|
+
def response(self, agent: ChatAgent) -> ChatDocument:
|
32
|
+
content_str = "" if self.content is None else to_string(self.content)
|
33
|
+
return agent.create_agent_response(
|
34
|
+
content=content_str,
|
35
|
+
content_any=self.content,
|
36
|
+
tool_messages=[self] + self.tools,
|
37
|
+
)
|
38
|
+
|
39
|
+
|
40
|
+
class DoneTool(ToolMessage):
|
41
|
+
"""Tool for Agent Entity (i.e. agent_response) or LLM entity (i.e. llm_response) to
|
42
|
+
signal the current task is done, with some content as the result."""
|
43
|
+
|
44
|
+
purpose: str = """
|
45
|
+
To signal the current task is done, along with an optional message <content>
|
46
|
+
of arbitrary type (default None).
|
47
|
+
"""
|
48
|
+
request: str = "done_tool"
|
49
|
+
content: str = ""
|
50
|
+
|
51
|
+
def response(self, agent: ChatAgent) -> ChatDocument:
|
52
|
+
return agent.create_agent_response(
|
53
|
+
content=self.content,
|
54
|
+
content_any=self.content,
|
55
|
+
tool_messages=[self],
|
56
|
+
)
|
57
|
+
|
58
|
+
@classmethod
|
59
|
+
def instructions(cls) -> str:
|
60
|
+
tool_name = cls.default_value("request")
|
61
|
+
return f"""
|
62
|
+
When you determine your task is finished,
|
63
|
+
use the tool `{tool_name}` to signal this,
|
64
|
+
along with any message or result, in the `content` field.
|
65
|
+
"""
|
66
|
+
|
67
|
+
|
68
|
+
class ResultTool(ToolMessage):
|
69
|
+
"""Class to use as a wrapper for sending arbitrary results from an Agent's
|
70
|
+
agent_response or tool handlers, to:
|
71
|
+
(a) trigger completion of the current task (similar to (Agent)DoneTool), and
|
72
|
+
(b) be returned as the result of the current task, i.e. this tool would appear
|
73
|
+
in the resulting ChatDocument's `tool_messages` list.
|
74
|
+
See test_tool_handlers_and_results in test_tool_messages.py, and
|
75
|
+
examples/basic/tool-extract-short-example.py.
|
76
|
+
|
77
|
+
Note:
|
78
|
+
- when defining a tool handler or agent_response, you can directly return
|
79
|
+
ResultTool(field1 = val1, ...),
|
80
|
+
where the values can be arbitrary data structures, including nested
|
81
|
+
Pydantic objs, or you can define a subclass of ResultTool with the
|
82
|
+
fields you want to return.
|
83
|
+
- This is a special ToolMessage that is NOT meant to be used or handled
|
84
|
+
by an agent.
|
85
|
+
- AgentDoneTool is more restrictive in that you can only send a `content`
|
86
|
+
or `tools` in the result.
|
87
|
+
"""
|
88
|
+
|
89
|
+
request: str = "result_tool"
|
90
|
+
purpose: str = "Ignored; Wrapper for a structured message"
|
91
|
+
id: str = "" # placeholder for OpenAI-API tool_call_id
|
92
|
+
|
93
|
+
model_config = ConfigDict(
|
94
|
+
extra="allow",
|
95
|
+
arbitrary_types_allowed=False,
|
96
|
+
validate_default=True,
|
97
|
+
validate_assignment=True,
|
98
|
+
json_schema_extra={"exclude": {"purpose", "id", "strict"}},
|
99
|
+
)
|
100
|
+
|
101
|
+
def handle(self) -> AgentDoneTool:
|
102
|
+
return AgentDoneTool(tools=[self])
|
103
|
+
|
104
|
+
|
105
|
+
class FinalResultTool(ToolMessage):
|
106
|
+
"""Class to use as a wrapper for sending arbitrary results from an Agent's
|
107
|
+
agent_response or tool handlers, to:
|
108
|
+
(a) trigger completion of the current task as well as all parent tasks, and
|
109
|
+
(b) be returned as the final result of the root task, i.e. this tool would appear
|
110
|
+
in the final ChatDocument's `tool_messages` list.
|
111
|
+
See test_tool_handlers_and_results in test_tool_messages.py, and
|
112
|
+
examples/basic/chat-tool-function.py.
|
113
|
+
|
114
|
+
Note:
|
115
|
+
- when defining a tool handler or agent_response, you can directly return
|
116
|
+
FinalResultTool(field1 = val1, ...),
|
117
|
+
where the values can be arbitrary data structures, including nested
|
118
|
+
Pydantic objs, or you can define a subclass of FinalResultTool with the
|
119
|
+
fields you want to return.
|
120
|
+
- This is a special ToolMessage that is NOT meant to be used by an agent's
|
121
|
+
llm_response, but only by agent_response or tool handlers.
|
122
|
+
- A subclass of this tool can be defined, with specific fields, and
|
123
|
+
with _allow_llm_use = True, to allow the LLM to generate this tool,
|
124
|
+
and have the effect of terminating the current and all parent tasks,
|
125
|
+
with the tool appearing in the final ChatDocument's `tool_messages` list.
|
126
|
+
See examples/basic/multi-agent-return-result.py.
|
127
|
+
"""
|
128
|
+
|
129
|
+
request: str = ""
|
130
|
+
purpose: str = "Ignored; Wrapper for a structured message"
|
131
|
+
id: str = "" # placeholder for OpenAI-API tool_call_id
|
132
|
+
_allow_llm_use: bool = False
|
133
|
+
|
134
|
+
model_config = ConfigDict(
|
135
|
+
extra="allow",
|
136
|
+
arbitrary_types_allowed=False,
|
137
|
+
validate_default=True,
|
138
|
+
validate_assignment=True,
|
139
|
+
json_schema_extra={"exclude": {"purpose", "id", "strict"}},
|
140
|
+
)
|
141
|
+
|
142
|
+
|
143
|
+
class PassTool(ToolMessage):
|
144
|
+
"""Tool for "passing" on the received msg (ChatDocument),
|
145
|
+
so that an as-yet-unspecified agent can handle it.
|
146
|
+
Similar to ForwardTool, but without specifying the recipient agent.
|
147
|
+
"""
|
148
|
+
|
149
|
+
purpose: str = """
|
150
|
+
To pass the current message so that other agents can handle it.
|
151
|
+
"""
|
152
|
+
request: str = "pass_tool"
|
153
|
+
|
154
|
+
def response(self, agent: ChatAgent, chat_doc: ChatDocument) -> ChatDocument:
|
155
|
+
"""When this tool is enabled for an Agent, this will result in a method
|
156
|
+
added to the Agent with signature:
|
157
|
+
`pass_tool(self, tool: PassTool, chat_doc: ChatDocument) -> ChatDocument:`
|
158
|
+
"""
|
159
|
+
# if PassTool is in chat_doc, pass its parent, else pass chat_doc itself
|
160
|
+
doc = chat_doc
|
161
|
+
while True:
|
162
|
+
tools = agent.get_tool_messages(doc)
|
163
|
+
if not any(isinstance(t, type(self)) for t in tools):
|
164
|
+
break
|
165
|
+
if doc.parent is None:
|
166
|
+
break
|
167
|
+
doc = doc.parent
|
168
|
+
assert doc is not None, "PassTool: parent of chat_doc must not be None"
|
169
|
+
new_doc = ChatDocument.deepcopy(doc)
|
170
|
+
new_doc.metadata.sender = Entity.AGENT
|
171
|
+
return new_doc
|
172
|
+
|
173
|
+
@classmethod
|
174
|
+
def instructions(cls) -> str:
|
175
|
+
return """
|
176
|
+
Use the `pass_tool` to PASS the current message
|
177
|
+
so that another agent can handle it.
|
178
|
+
"""
|
179
|
+
|
180
|
+
|
181
|
+
class DonePassTool(PassTool):
|
182
|
+
"""Tool to signal DONE, AND Pass incoming/current msg as result.
|
183
|
+
Similar to PassTool, except we append a DoneTool to the result tool_messages.
|
184
|
+
"""
|
185
|
+
|
186
|
+
purpose: str = """
|
187
|
+
To signal the current task is done, with results set to the current/incoming msg.
|
188
|
+
"""
|
189
|
+
request: str = "done_pass_tool"
|
190
|
+
|
191
|
+
def response(self, agent: ChatAgent, chat_doc: ChatDocument) -> ChatDocument:
|
192
|
+
# use PassTool to get the right ChatDocument to pass...
|
193
|
+
new_doc = PassTool.response(self, agent, chat_doc)
|
194
|
+
tools = agent.get_tool_messages(new_doc)
|
195
|
+
# ...then return an AgentDoneTool with content, tools from this ChatDocument
|
196
|
+
return AgentDoneTool(content=new_doc.content, tools=tools) # type: ignore
|
197
|
+
|
198
|
+
@classmethod
|
199
|
+
def instructions(cls) -> str:
|
200
|
+
return """
|
201
|
+
When you determine your task is finished,
|
202
|
+
and want to pass the current message as the result of the task,
|
203
|
+
use the `done_pass_tool` to signal this.
|
204
|
+
"""
|
205
|
+
|
206
|
+
|
207
|
+
class ForwardTool(PassTool):
|
208
|
+
"""Tool for forwarding the received msg (ChatDocument) to another agent or entity.
|
209
|
+
Similar to PassTool, but with a specified recipient agent.
|
210
|
+
"""
|
211
|
+
|
212
|
+
purpose: str = """
|
213
|
+
To forward the current message to an <agent>, where <agent>
|
214
|
+
could be the name of an agent, or an entity such as "user", "llm".
|
215
|
+
"""
|
216
|
+
request: str = "forward_tool"
|
217
|
+
agent: str
|
218
|
+
|
219
|
+
def response(self, agent: ChatAgent, chat_doc: ChatDocument) -> ChatDocument:
|
220
|
+
"""When this tool is enabled for an Agent, this will result in a method
|
221
|
+
added to the Agent with signature:
|
222
|
+
`forward_tool(self, tool: ForwardTool, chat_doc: ChatDocument) -> ChatDocument:`
|
223
|
+
"""
|
224
|
+
# if chat_doc contains ForwardTool, then we forward its parent ChatDocument;
|
225
|
+
# else forward chat_doc itself
|
226
|
+
new_doc = PassTool.response(self, agent, chat_doc)
|
227
|
+
new_doc.metadata.recipient = self.agent
|
228
|
+
return new_doc
|
229
|
+
|
230
|
+
@classmethod
|
231
|
+
def instructions(cls) -> str:
|
232
|
+
return """
|
233
|
+
If you need to forward the current message to another agent,
|
234
|
+
use the `forward_tool` to do so,
|
235
|
+
setting the `recipient` field to the name of the recipient agent.
|
236
|
+
"""
|
237
|
+
|
238
|
+
|
239
|
+
class SendTool(ToolMessage):
|
240
|
+
"""Tool for agent or LLM to send content to a specified agent.
|
241
|
+
Similar to RecipientTool.
|
242
|
+
"""
|
243
|
+
|
244
|
+
purpose: str = """
|
245
|
+
To send message <content> to agent specified in <to> field.
|
246
|
+
"""
|
247
|
+
request: str = "send_tool"
|
248
|
+
to: str
|
249
|
+
content: str = ""
|
250
|
+
|
251
|
+
def response(self, agent: ChatAgent) -> ChatDocument:
|
252
|
+
return agent.create_agent_response(
|
253
|
+
self.content,
|
254
|
+
recipient=self.to,
|
255
|
+
)
|
256
|
+
|
257
|
+
@classmethod
|
258
|
+
def instructions(cls) -> str:
|
259
|
+
return """
|
260
|
+
If you need to send a message to another agent,
|
261
|
+
use the `send_tool` to do so, with these field values:
|
262
|
+
- `to` field = name of the recipient agent,
|
263
|
+
- `content` field = the message to send.
|
264
|
+
"""
|
265
|
+
|
266
|
+
@classmethod
|
267
|
+
def examples(cls) -> List["ToolMessage" | Tuple[str, "ToolMessage"]]:
|
268
|
+
return [
|
269
|
+
cls(to="agent1", content="Hello, agent1!"),
|
270
|
+
(
|
271
|
+
"""
|
272
|
+
I need to send the content 'Who built the Gemini model?',
|
273
|
+
to the 'Searcher' agent.
|
274
|
+
""",
|
275
|
+
cls(to="Searcher", content="Who built the Gemini model?"),
|
276
|
+
),
|
277
|
+
]
|
278
|
+
|
279
|
+
|
280
|
+
class AgentSendTool(ToolMessage):
|
281
|
+
"""Tool for Agent (i.e. agent_response) to send content or tool_messages
|
282
|
+
to a specified agent. Similar to SendTool except that AgentSendTool is only
|
283
|
+
usable by agent_response (or handler of another tool), to send content or
|
284
|
+
tools to another agent. SendTool does not allow sending tools.
|
285
|
+
"""
|
286
|
+
|
287
|
+
purpose: str = """
|
288
|
+
To send message <content> and <tools> to agent specified in <to> field.
|
289
|
+
"""
|
290
|
+
request: str = "agent_send_tool"
|
291
|
+
to: str
|
292
|
+
content: str = ""
|
293
|
+
tools: List[ToolMessage] = []
|
294
|
+
_allow_llm_use: bool = False
|
295
|
+
|
296
|
+
def response(self, agent: ChatAgent) -> ChatDocument:
|
297
|
+
return agent.create_agent_response(
|
298
|
+
self.content,
|
299
|
+
tool_messages=self.tools,
|
300
|
+
recipient=self.to,
|
301
|
+
)
|
@@ -17,7 +17,7 @@ especially with weaker LLMs.
|
|
17
17
|
|
18
18
|
"""
|
19
19
|
|
20
|
-
from typing import List, Type
|
20
|
+
from typing import ClassVar, List, Type
|
21
21
|
|
22
22
|
from rich import print
|
23
23
|
|
@@ -106,8 +106,8 @@ class RecipientTool(ToolMessage):
|
|
106
106
|
only allows certain recipients, and possibly sets a default recipient."""
|
107
107
|
|
108
108
|
class RecipientToolRestricted(cls): # type: ignore
|
109
|
-
allowed_recipients = recipients
|
110
|
-
default_recipient = default
|
109
|
+
allowed_recipients: ClassVar[List[str]] = recipients
|
110
|
+
default_recipient: ClassVar[str] = default
|
111
111
|
|
112
112
|
return RecipientToolRestricted
|
113
113
|
|
@@ -6,13 +6,15 @@ TaskTool: A tool that allows agents to delegate a task to a sub-agent with
|
|
6
6
|
import uuid
|
7
7
|
from typing import List, Optional
|
8
8
|
|
9
|
+
from pydantic import Field
|
10
|
+
from pydantic.fields import ModelPrivateAttr
|
11
|
+
|
9
12
|
import langroid.language_models as lm
|
10
13
|
from langroid import ChatDocument
|
11
14
|
from langroid.agent.chat_agent import ChatAgent, ChatAgentConfig
|
12
15
|
from langroid.agent.task import Task
|
13
16
|
from langroid.agent.tool_message import ToolMessage
|
14
17
|
from langroid.agent.tools.orchestration import DoneTool
|
15
|
-
from langroid.pydantic_v1 import Field
|
16
18
|
|
17
19
|
|
18
20
|
class TaskTool(ToolMessage):
|
@@ -83,7 +85,7 @@ class TaskTool(ToolMessage):
|
|
83
85
|
""",
|
84
86
|
)
|
85
87
|
# TODO: ensure valid model name
|
86
|
-
model: str = Field(
|
88
|
+
model: Optional[str] = Field(
|
87
89
|
default=None,
|
88
90
|
description="""
|
89
91
|
Optional name of the LLM model to use for the sub-agent, e.g. 'gpt-4.1'
|
@@ -148,25 +150,29 @@ class TaskTool(ToolMessage):
|
|
148
150
|
if self.tools == ["ALL"]:
|
149
151
|
# Enable all tools from the parent agent:
|
150
152
|
# This is the list of all tools KNOWN (whether usable or handle-able or not)
|
151
|
-
tool_classes = [
|
152
|
-
|
153
|
-
|
154
|
-
|
155
|
-
|
156
|
-
|
157
|
-
|
158
|
-
|
153
|
+
tool_classes = []
|
154
|
+
for t in agent.llm_tools_known:
|
155
|
+
if t in agent.llm_tools_map and t != self.request:
|
156
|
+
tool_class = agent.llm_tools_map[t]
|
157
|
+
allow_llm_use = tool_class._allow_llm_use
|
158
|
+
if isinstance(allow_llm_use, ModelPrivateAttr):
|
159
|
+
allow_llm_use = allow_llm_use.default
|
160
|
+
if allow_llm_use:
|
161
|
+
tool_classes.append(tool_class)
|
159
162
|
elif self.tools == ["NONE"]:
|
160
163
|
# No tools enabled
|
161
164
|
tool_classes = []
|
162
165
|
else:
|
163
166
|
# Enable only specified tools
|
164
|
-
tool_classes = [
|
165
|
-
|
166
|
-
|
167
|
-
|
168
|
-
|
169
|
-
|
167
|
+
tool_classes = []
|
168
|
+
for tool_name in self.tools:
|
169
|
+
if tool_name in agent.llm_tools_map:
|
170
|
+
tool_class = agent.llm_tools_map[tool_name]
|
171
|
+
allow_llm_use = tool_class._allow_llm_use
|
172
|
+
if isinstance(allow_llm_use, ModelPrivateAttr):
|
173
|
+
allow_llm_use = allow_llm_use.default
|
174
|
+
if allow_llm_use:
|
175
|
+
tool_classes.append(tool_class)
|
170
176
|
|
171
177
|
# always enable the DoneTool to signal task completion
|
172
178
|
sub_agent.enable_message(tool_classes + [DoneTool], use=True, handle=True)
|
@@ -0,0 +1,249 @@
|
|
1
|
+
"""
|
2
|
+
TaskTool: A tool that allows agents to delegate a task to a sub-agent with
|
3
|
+
specific tools enabled.
|
4
|
+
"""
|
5
|
+
|
6
|
+
import uuid
|
7
|
+
from typing import List, Optional
|
8
|
+
|
9
|
+
import langroid.language_models as lm
|
10
|
+
from langroid import ChatDocument
|
11
|
+
from langroid.agent.chat_agent import ChatAgent, ChatAgentConfig
|
12
|
+
from langroid.agent.task import Task
|
13
|
+
from langroid.agent.tool_message import ToolMessage
|
14
|
+
from langroid.agent.tools.orchestration import DoneTool
|
15
|
+
from pydantic import Field
|
16
|
+
|
17
|
+
|
18
|
+
class TaskTool(ToolMessage):
|
19
|
+
"""
|
20
|
+
Tool that spawns a sub-agent with specified tools to handle a task.
|
21
|
+
|
22
|
+
The sub-agent can be given a custom name for identification in logs.
|
23
|
+
If no name is provided, a random unique name starting with 'agent'
|
24
|
+
will be generated.
|
25
|
+
"""
|
26
|
+
|
27
|
+
# TODO: setting up termination conditions of sub-task needs to be improved
|
28
|
+
request: str = "task_tool"
|
29
|
+
purpose: str = """
|
30
|
+
<HowToUse>
|
31
|
+
Use this tool to delegate a task to a sub-agent with specific tools enabled.
|
32
|
+
The sub-agent will be created with the specified tools and will run the task
|
33
|
+
non-interactively.
|
34
|
+
"""
|
35
|
+
|
36
|
+
# Parameters for the agent tool
|
37
|
+
|
38
|
+
system_message: Optional[str] = Field(
|
39
|
+
...,
|
40
|
+
description="""
|
41
|
+
Optional system message to configure the sub-agent's general behavior and
|
42
|
+
to specify the task and its context.
|
43
|
+
A good system message will have these components:
|
44
|
+
- Inform the sub-agent of its role, e.g. "You are a financial analyst."
|
45
|
+
- Clear spec of the task, with sufficient context for the sub-agent to
|
46
|
+
understand what it needs to do, since the sub-agent does
|
47
|
+
NOT have access to your conversation history!
|
48
|
+
- Any additional general context needed for the task, such as a
|
49
|
+
(part of a) document, or data items, etc.
|
50
|
+
- Specify when to use certain tools, e.g.
|
51
|
+
"You MUST use the 'stock_data' tool to extract stock information.
|
52
|
+
""",
|
53
|
+
)
|
54
|
+
|
55
|
+
prompt: str = Field(
|
56
|
+
...,
|
57
|
+
description="""
|
58
|
+
The prompt to run the sub-agent with. This differs from the agent's
|
59
|
+
system message: Whereas the system message configures the sub-agent's
|
60
|
+
GENERAL role and goals, the `prompt` is the SPECIFIC input that the
|
61
|
+
sub-agent will process. In LLM terms, the system message is sent to the
|
62
|
+
LLM as the first message, with role = "system" or "developer", and
|
63
|
+
the prompt is sent as a message with role = "user".
|
64
|
+
EXAMPLE: system_message = "You are a financial analyst, when the
|
65
|
+
user asks about the share-price of a company,
|
66
|
+
you must use your tools to do the research, and
|
67
|
+
return the final answer to the user."
|
68
|
+
|
69
|
+
prompt = "What is the share-price of Apple Inc.?"
|
70
|
+
""",
|
71
|
+
)
|
72
|
+
|
73
|
+
tools: List[str] = Field(
|
74
|
+
...,
|
75
|
+
description="""
|
76
|
+
A list of tool names to enable for the sub-agent.
|
77
|
+
This must be a list of strings referring to the names of tools
|
78
|
+
that are known to you.
|
79
|
+
If you want to enable all tools, or you do not have any preference
|
80
|
+
on what tools are enabled for the sub-agent, you can set
|
81
|
+
this field to a singleton list ['ALL']
|
82
|
+
To disable all tools, set it to a singleton list ['NONE']
|
83
|
+
""",
|
84
|
+
)
|
85
|
+
# TODO: ensure valid model name
|
86
|
+
model: str = Field(
|
87
|
+
default=None,
|
88
|
+
description="""
|
89
|
+
Optional name of the LLM model to use for the sub-agent, e.g. 'gpt-4.1'
|
90
|
+
If omitted, the sub-agent will use the same model as yours.
|
91
|
+
""",
|
92
|
+
)
|
93
|
+
max_iterations: Optional[int] = Field(
|
94
|
+
default=None,
|
95
|
+
description="Optional max iterations for the sub-agent to run the task",
|
96
|
+
)
|
97
|
+
agent_name: Optional[str] = Field(
|
98
|
+
default=None,
|
99
|
+
description="""
|
100
|
+
Optional name for the sub-agent. This will be used as the agent's name
|
101
|
+
in logs and for identification purposes. If not provided, a random unique
|
102
|
+
name starting with 'agent' will be generated.
|
103
|
+
""",
|
104
|
+
)
|
105
|
+
|
106
|
+
def _set_up_task(self, agent: ChatAgent) -> Task:
|
107
|
+
"""
|
108
|
+
Helper method to set up a task for the sub-agent.
|
109
|
+
|
110
|
+
Args:
|
111
|
+
agent: The parent ChatAgent that is handling this tool
|
112
|
+
"""
|
113
|
+
# Generate a random name if not provided
|
114
|
+
agent_name = self.agent_name or f"agent-{str(uuid.uuid4())[:8]}"
|
115
|
+
|
116
|
+
# Create chat agent config with system message if provided
|
117
|
+
# TODO: Maybe we just copy the parent agent's config and override chat_model?
|
118
|
+
# -- but what if parent agent has a MockLMConfig?
|
119
|
+
llm_config = lm.OpenAIGPTConfig(
|
120
|
+
chat_model=self.model or lm.OpenAIChatModel.GPT4_1_MINI,
|
121
|
+
)
|
122
|
+
config = ChatAgentConfig(
|
123
|
+
name=agent_name,
|
124
|
+
llm=llm_config,
|
125
|
+
handle_llm_no_tool=f"""
|
126
|
+
You forgot to use one of your TOOLs! Remember that you must either:
|
127
|
+
- use a tool, or a sequence of tools, to complete your task, OR
|
128
|
+
- if you are done with your task, use the `{DoneTool.name()}` tool
|
129
|
+
to return the result.
|
130
|
+
|
131
|
+
As a reminder, this was your task:
|
132
|
+
{self.prompt}
|
133
|
+
""",
|
134
|
+
system_message=f"""
|
135
|
+
{self.system_message}
|
136
|
+
|
137
|
+
When you are finished with your task, you MUST
|
138
|
+
use the TOOL `{DoneTool.name()}` to end the task
|
139
|
+
and return the result.
|
140
|
+
""",
|
141
|
+
)
|
142
|
+
|
143
|
+
# Create the sub-agent
|
144
|
+
sub_agent = ChatAgent(config)
|
145
|
+
|
146
|
+
# Enable the specified tools for the sub-agent
|
147
|
+
# Convert tool names to actual tool classes using parent agent's tools_map
|
148
|
+
if self.tools == ["ALL"]:
|
149
|
+
# Enable all tools from the parent agent:
|
150
|
+
# This is the list of all tools KNOWN (whether usable or handle-able or not)
|
151
|
+
tool_classes = [
|
152
|
+
agent.llm_tools_map[t]
|
153
|
+
for t in agent.llm_tools_known
|
154
|
+
if t in agent.llm_tools_map
|
155
|
+
and t != self.request
|
156
|
+
and agent.llm_tools_map[t]._allow_llm_use
|
157
|
+
# Exclude the TaskTool itself!
|
158
|
+
]
|
159
|
+
elif self.tools == ["NONE"]:
|
160
|
+
# No tools enabled
|
161
|
+
tool_classes = []
|
162
|
+
else:
|
163
|
+
# Enable only specified tools
|
164
|
+
tool_classes = [
|
165
|
+
agent.llm_tools_map[tool_name]
|
166
|
+
for tool_name in self.tools
|
167
|
+
if tool_name in agent.llm_tools_map
|
168
|
+
and agent.llm_tools_map[tool_name]._allow_llm_use
|
169
|
+
]
|
170
|
+
|
171
|
+
# always enable the DoneTool to signal task completion
|
172
|
+
sub_agent.enable_message(tool_classes + [DoneTool], use=True, handle=True)
|
173
|
+
|
174
|
+
# Create a non-interactive task
|
175
|
+
task = Task(sub_agent, interactive=False)
|
176
|
+
|
177
|
+
return task
|
178
|
+
|
179
|
+
def handle(
|
180
|
+
self, agent: ChatAgent, chat_doc: Optional[ChatDocument] = None
|
181
|
+
) -> Optional[ChatDocument]:
|
182
|
+
"""
|
183
|
+
|
184
|
+
Handle the TaskTool by creating a sub-agent with specified tools
|
185
|
+
and running the task non-interactively.
|
186
|
+
|
187
|
+
Args:
|
188
|
+
agent: The parent ChatAgent that is handling this tool
|
189
|
+
chat_doc: The ChatDocument containing this tool message
|
190
|
+
"""
|
191
|
+
|
192
|
+
task = self._set_up_task(agent)
|
193
|
+
|
194
|
+
# Create a ChatDocument for the prompt with parent pointer
|
195
|
+
prompt_doc = None
|
196
|
+
if chat_doc is not None:
|
197
|
+
from langroid.agent.chat_document import ChatDocMetaData
|
198
|
+
|
199
|
+
prompt_doc = ChatDocument(
|
200
|
+
content=self.prompt,
|
201
|
+
metadata=ChatDocMetaData(
|
202
|
+
parent_id=chat_doc.id(),
|
203
|
+
agent_id=agent.id,
|
204
|
+
sender=chat_doc.metadata.sender,
|
205
|
+
),
|
206
|
+
)
|
207
|
+
# Set bidirectional parent-child relationship
|
208
|
+
chat_doc.metadata.child_id = prompt_doc.id()
|
209
|
+
|
210
|
+
# Run the task with the ChatDocument or string prompt
|
211
|
+
result = task.run(prompt_doc or self.prompt, turns=self.max_iterations or 10)
|
212
|
+
return result
|
213
|
+
|
214
|
+
async def handle_async(
|
215
|
+
self, agent: ChatAgent, chat_doc: Optional[ChatDocument] = None
|
216
|
+
) -> Optional[ChatDocument]:
|
217
|
+
"""
|
218
|
+
Async method to handle the TaskTool by creating a sub-agent with specified tools
|
219
|
+
and running the task non-interactively.
|
220
|
+
|
221
|
+
Args:
|
222
|
+
agent: The parent ChatAgent that is handling this tool
|
223
|
+
chat_doc: The ChatDocument containing this tool message
|
224
|
+
"""
|
225
|
+
task = self._set_up_task(agent)
|
226
|
+
|
227
|
+
# Create a ChatDocument for the prompt with parent pointer
|
228
|
+
prompt_doc = None
|
229
|
+
if chat_doc is not None:
|
230
|
+
from langroid.agent.chat_document import ChatDocMetaData
|
231
|
+
|
232
|
+
prompt_doc = ChatDocument(
|
233
|
+
content=self.prompt,
|
234
|
+
metadata=ChatDocMetaData(
|
235
|
+
parent_id=chat_doc.id(),
|
236
|
+
agent_id=agent.id,
|
237
|
+
sender=chat_doc.metadata.sender,
|
238
|
+
),
|
239
|
+
)
|
240
|
+
# Set bidirectional parent-child relationship
|
241
|
+
chat_doc.metadata.child_id = prompt_doc.id()
|
242
|
+
|
243
|
+
# Run the task with the ChatDocument or string prompt
|
244
|
+
# TODO eventually allow the various task setup configs,
|
245
|
+
# including termination conditions
|
246
|
+
result = await task.run_async(
|
247
|
+
prompt_doc or self.prompt, turns=self.max_iterations or 10
|
248
|
+
)
|
249
|
+
return result
|