langroid 0.55.1__py3-none-any.whl → 0.56.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- langroid/agent/chat_document.py +1 -1
- langroid/agent/done_sequence_parser.py +0 -2
- langroid/agent/tools/task_tool.py +174 -0
- langroid/mcp/__init__.py +1 -0
- langroid/mcp/server/__init__.py +1 -0
- {langroid-0.55.1.dist-info → langroid-0.56.1.dist-info}/METADATA +1 -1
- {langroid-0.55.1.dist-info → langroid-0.56.1.dist-info}/RECORD +9 -6
- {langroid-0.55.1.dist-info → langroid-0.56.1.dist-info}/WHEEL +0 -0
- {langroid-0.55.1.dist-info → langroid-0.56.1.dist-info}/licenses/LICENSE +0 -0
langroid/agent/chat_document.py
CHANGED
@@ -268,7 +268,7 @@ class ChatDocument(Document):
|
|
268
268
|
@staticmethod
|
269
269
|
def _clean_fn_call(fc: LLMFunctionCall | None) -> None:
|
270
270
|
# Sometimes an OpenAI LLM (esp gpt-4o) may generate a function-call
|
271
|
-
# with
|
271
|
+
# with oddities:
|
272
272
|
# (a) the `name` is set, as well as `arguments.request` is set,
|
273
273
|
# and in langroid we use the `request` value as the `name`.
|
274
274
|
# In this case we override the `name` with the `request` value.
|
@@ -0,0 +1,174 @@
|
|
1
|
+
"""
|
2
|
+
TaskTool: A tool that allows agents to delegate a task to a sub-agent with
|
3
|
+
specific tools enabled.
|
4
|
+
"""
|
5
|
+
|
6
|
+
from typing import List, Optional
|
7
|
+
|
8
|
+
import langroid.language_models as lm
|
9
|
+
from langroid import ChatDocument
|
10
|
+
from langroid.agent.chat_agent import ChatAgent, ChatAgentConfig
|
11
|
+
from langroid.agent.task import Task
|
12
|
+
from langroid.agent.tool_message import ToolMessage
|
13
|
+
from langroid.agent.tools.orchestration import DoneTool
|
14
|
+
from langroid.pydantic_v1 import Field
|
15
|
+
|
16
|
+
|
17
|
+
class TaskTool(ToolMessage):
|
18
|
+
"""
|
19
|
+
Tool that spawns a sub-agent with specified tools to handle a task.
|
20
|
+
"""
|
21
|
+
|
22
|
+
request: str = "task_tool"
|
23
|
+
purpose: str = """
|
24
|
+
<HowToUse>
|
25
|
+
Use this tool to delegate a task to a sub-agent with specific tools enabled.
|
26
|
+
The sub-agent will be created with the specified tools and will run the task
|
27
|
+
non-interactively. Here is how to set the fields:
|
28
|
+
|
29
|
+
- `system_message`:
|
30
|
+
|
31
|
+
|
32
|
+
"""
|
33
|
+
|
34
|
+
# Parameters for the agent tool
|
35
|
+
|
36
|
+
system_message: Optional[str] = Field(
|
37
|
+
...,
|
38
|
+
description="""
|
39
|
+
Optional system message to configure the sub-agent's general behavior.
|
40
|
+
A good system message will have these components:
|
41
|
+
- Inform the sub-agent of its role, e.g. "You are a financial analyst."
|
42
|
+
- Clear spec of the task
|
43
|
+
- Any additional general context needed for the task, such as a
|
44
|
+
(part of a) document, or data items, etc.
|
45
|
+
- Specify when to use certain tools, e.g.
|
46
|
+
"You MUST use the 'stock_data' tool to extract stock information.
|
47
|
+
""",
|
48
|
+
)
|
49
|
+
|
50
|
+
prompt: str = Field(
|
51
|
+
...,
|
52
|
+
description="""
|
53
|
+
The prompt to run the sub-agent with. This differs from the agent's
|
54
|
+
system message: Whereas the system message configures the sub-agent's
|
55
|
+
GENERAL role and goals, the `prompt` is the SPECIFIC input that the
|
56
|
+
sub-agent will process. In LLM terms, the system message is sent to the
|
57
|
+
LLM as the first message, with role = "system" or "developer", and
|
58
|
+
the prompt is sent as a message with role = "user".
|
59
|
+
EXAMPLE: system_message = "You are a financial analyst, when the
|
60
|
+
user asks about the share-price of a company,
|
61
|
+
you must use your tools to do the research, and
|
62
|
+
return the final answer to the user."
|
63
|
+
|
64
|
+
prompt = "What is the share-price of Apple Inc.?"
|
65
|
+
""",
|
66
|
+
)
|
67
|
+
|
68
|
+
tools: List[str] = Field(
|
69
|
+
...,
|
70
|
+
description="""
|
71
|
+
A list of tool names to enable for the sub-agent.
|
72
|
+
This must be a list of strings referring to the names of tools
|
73
|
+
that are known to you.
|
74
|
+
If you want to enable all tools, you can set this field
|
75
|
+
to a singleton list containing 'ALL'
|
76
|
+
To disable all tools, set it to a singleton list containing 'NONE'
|
77
|
+
""",
|
78
|
+
)
|
79
|
+
model: str = Field(
|
80
|
+
default=None,
|
81
|
+
description="""
|
82
|
+
Optional name of the LLM model to use for the sub-agent, e.g. 'gpt-4.1'
|
83
|
+
If omitted, the sub-agent will use the same model as yours.
|
84
|
+
""",
|
85
|
+
)
|
86
|
+
max_iterations: Optional[int] = Field(
|
87
|
+
default=None,
|
88
|
+
description="Optional max iterations for the sub-agent to run the task",
|
89
|
+
)
|
90
|
+
|
91
|
+
def _set_up_task(self, agent: ChatAgent) -> Task:
|
92
|
+
"""
|
93
|
+
Helper method to set up a task for the sub-agent.
|
94
|
+
|
95
|
+
Args:
|
96
|
+
agent: The parent ChatAgent that is handling this tool
|
97
|
+
"""
|
98
|
+
# Create chat agent config with system message if provided
|
99
|
+
# TODO: Maybe we just copy the parent agent's config and override chat_model?
|
100
|
+
# -- but what if parent agent has a MockLMConfig?
|
101
|
+
llm_config = lm.OpenAIGPTConfig(
|
102
|
+
chat_model=self.model or "gpt-4.1-mini", # Default model if not specified
|
103
|
+
)
|
104
|
+
config = ChatAgentConfig(
|
105
|
+
llm=llm_config,
|
106
|
+
system_message=f"""
|
107
|
+
{self.system_message}
|
108
|
+
|
109
|
+
When you are finished with your task, you MUST
|
110
|
+
use the TOOL `{DoneTool.name()}` to end the task
|
111
|
+
and return the result.
|
112
|
+
""",
|
113
|
+
)
|
114
|
+
|
115
|
+
# Create the sub-agent
|
116
|
+
sub_agent = ChatAgent(config)
|
117
|
+
|
118
|
+
# Enable the specified tools for the sub-agent
|
119
|
+
# Convert tool names to actual tool classes using parent agent's tools_map
|
120
|
+
if self.tools == ["ALL"]:
|
121
|
+
# Enable all tools from the parent agent:
|
122
|
+
# This is the list of all tools KNOWN (whether usable or handle-able or not)
|
123
|
+
tool_classes = [
|
124
|
+
agent.llm_tools_map[t]
|
125
|
+
for t in agent.llm_tools_known
|
126
|
+
if t in agent.llm_tools_map and t != self.request
|
127
|
+
# Exclude the TaskTool itself!
|
128
|
+
]
|
129
|
+
elif self.tools == ["NONE"]:
|
130
|
+
# No tools enabled
|
131
|
+
tool_classes = []
|
132
|
+
else:
|
133
|
+
# Enable only specified tools
|
134
|
+
tool_classes = [
|
135
|
+
agent.llm_tools_map[tool_name]
|
136
|
+
for tool_name in self.tools
|
137
|
+
if tool_name in agent.llm_tools_map
|
138
|
+
]
|
139
|
+
|
140
|
+
# always enable the DoneTool to signal task completion
|
141
|
+
sub_agent.enable_message(tool_classes + [DoneTool], use=True, handle=True)
|
142
|
+
|
143
|
+
# Create a non-interactive task
|
144
|
+
task = Task(sub_agent, interactive=False)
|
145
|
+
|
146
|
+
return task
|
147
|
+
|
148
|
+
def handle(self, agent: ChatAgent) -> Optional[ChatDocument]:
|
149
|
+
"""
|
150
|
+
|
151
|
+
Handle the TaskTool by creating a sub-agent with specified tools
|
152
|
+
and running the task non-interactively.
|
153
|
+
|
154
|
+
Args:
|
155
|
+
agent: The parent ChatAgent that is handling this tool
|
156
|
+
"""
|
157
|
+
|
158
|
+
task = self._set_up_task(agent)
|
159
|
+
# Run the task on the prompt, and return the result
|
160
|
+
result = task.run(self.prompt, turns=self.max_iterations or 10)
|
161
|
+
return result
|
162
|
+
|
163
|
+
async def handle_async(self, agent: ChatAgent) -> Optional[ChatDocument]:
|
164
|
+
"""
|
165
|
+
Async method to handle the TaskTool by creating a sub-agent with specified tools
|
166
|
+
and running the task non-interactively.
|
167
|
+
|
168
|
+
Args:
|
169
|
+
agent: The parent ChatAgent that is handling this tool
|
170
|
+
"""
|
171
|
+
task = self._set_up_task(agent)
|
172
|
+
# Run the task on the prompt, and return the result
|
173
|
+
result = await task.run_async(self.prompt, turns=self.max_iterations or 10)
|
174
|
+
return result
|
langroid/mcp/__init__.py
ADDED
@@ -0,0 +1 @@
|
|
1
|
+
"""MCP (Model Context Protocol) integration for Langroid."""
|
@@ -0,0 +1 @@
|
|
1
|
+
"""MCP server implementation for Langroid."""
|
@@ -6,8 +6,8 @@ langroid/agent/__init__.py,sha256=ll0Cubd2DZ-fsCMl7e10hf9ZjFGKzphfBco396IKITY,78
|
|
6
6
|
langroid/agent/base.py,sha256=a45iqoWet2W60h4wUIOyHDYlotgS0asqIjfbONT4fZQ,85706
|
7
7
|
langroid/agent/batch.py,sha256=wpE9RqCNDVDhAXkCB7wEqfCIEAi6qKcrhaZ-Zr9T4C0,21375
|
8
8
|
langroid/agent/chat_agent.py,sha256=2HIYzYxkrGkRIS97ioKfIqjaW3RbX89M39LjzBobBEY,88381
|
9
|
-
langroid/agent/chat_document.py,sha256=
|
10
|
-
langroid/agent/done_sequence_parser.py,sha256=
|
9
|
+
langroid/agent/chat_document.py,sha256=0e6zYkqIorMIVbCsxOul9ziwAPPOWDsBsRV9E8ux-WI,18055
|
10
|
+
langroid/agent/done_sequence_parser.py,sha256=oUPzQCkkAo-5qos3ndSV47Lre7O_LoGWwTybjE9sCwc,4381
|
11
11
|
langroid/agent/openai_assistant.py,sha256=JkAcs02bIrgPNVvUWVR06VCthc5-ulla2QMBzux_q6o,34340
|
12
12
|
langroid/agent/task.py,sha256=PbHVcyFgyxiMZXHOB5xr5t8qaeACYfrjNF_lZQc8d8Y,101308
|
13
13
|
langroid/agent/tool_message.py,sha256=BhjP-_TfQ2tgxuY4Yo_JHLOwwt0mJ4BwjPnREvEY4vk,14744
|
@@ -54,6 +54,7 @@ langroid/agent/tools/recipient_tool.py,sha256=dr0yTxgNEIoxUYxH6TtaExC4G_8WdJ0xGo
|
|
54
54
|
langroid/agent/tools/retrieval_tool.py,sha256=zcAV20PP_6VzSd-UE-IJcabaBseFL_QNz59Bnig8-lE,946
|
55
55
|
langroid/agent/tools/rewind_tool.py,sha256=XAXL3BpNhCmBGYq_qi_sZfHJuIw7NY2jp4wnojJ7WRs,5606
|
56
56
|
langroid/agent/tools/segment_extract_tool.py,sha256=__srZ_VGYLVOdPrITUM8S0HpmX4q7r5FHWMDdHdEv8w,1440
|
57
|
+
langroid/agent/tools/task_tool.py,sha256=O-3-tALocGuwfxXikBzRWFenFKZWIlOoRVvnZ5mf7io,6514
|
57
58
|
langroid/agent/tools/tavily_search_tool.py,sha256=soI-j0HdgVQLf09wRQScaEK4b5RpAX9C4cwOivRFWWI,1903
|
58
59
|
langroid/agent/tools/mcp/__init__.py,sha256=DJNM0VeFnFS3pJKCyFGggT8JVjVu0rBzrGzasT1HaSM,387
|
59
60
|
langroid/agent/tools/mcp/decorators.py,sha256=h7dterhsmvWJ8q4mp_OopmuG2DF71ty8cZwOyzdDZuk,1127
|
@@ -83,6 +84,8 @@ langroid/language_models/prompt_formatter/__init__.py,sha256=2-5cdE24XoFDhifOLl8
|
|
83
84
|
langroid/language_models/prompt_formatter/base.py,sha256=eDS1sgRNZVnoajwV_ZIha6cba5Dt8xjgzdRbPITwx3Q,1221
|
84
85
|
langroid/language_models/prompt_formatter/hf_formatter.py,sha256=PVJppmjRvD-2DF-XNC6mE05vTZ9wbu37SmXwZBQhad0,5055
|
85
86
|
langroid/language_models/prompt_formatter/llama2_formatter.py,sha256=YdcO88qyBeuMENVIVvVqSYuEpvYSTndUe_jd6hVTko4,2899
|
87
|
+
langroid/mcp/__init__.py,sha256=HGtfHjcLVruCFJavOBAmVpk5u2h5UwzgqxOiOYIjXj0,61
|
88
|
+
langroid/mcp/server/__init__.py,sha256=HIju6Y3npEAC6lx8svT3q_vrMN1vT8fGUTzChBMIM4U,46
|
86
89
|
langroid/parsing/__init__.py,sha256=2oUWJJAxIavq9Wtw5RGlkXLq3GF3zgXeVLLW4j7yeb8,1138
|
87
90
|
langroid/parsing/agent_chats.py,sha256=sbZRV9ujdM5QXvvuHVjIi2ysYSYlap-uqfMMUKulrW0,1068
|
88
91
|
langroid/parsing/code_parser.py,sha256=5ze0MBytrGGkU69pA_bJDjRm6QZz_QYfPcIwkagUa7U,3796
|
@@ -134,7 +137,7 @@ langroid/vector_store/pineconedb.py,sha256=otxXZNaBKb9f_H75HTaU3lMHiaR2NUp5MqwLZ
|
|
134
137
|
langroid/vector_store/postgres.py,sha256=wHPtIi2qM4fhO4pMQr95pz1ZCe7dTb2hxl4VYspGZoA,16104
|
135
138
|
langroid/vector_store/qdrantdb.py,sha256=O6dSBoDZ0jzfeVBd7LLvsXu083xs2fxXtPa9gGX3JX4,18443
|
136
139
|
langroid/vector_store/weaviatedb.py,sha256=Yn8pg139gOy3zkaPfoTbMXEEBCiLiYa1MU5d_3UA1K4,11847
|
137
|
-
langroid-0.
|
138
|
-
langroid-0.
|
139
|
-
langroid-0.
|
140
|
-
langroid-0.
|
140
|
+
langroid-0.56.1.dist-info/METADATA,sha256=Yz9pHxVllXS-Sihx0AqWVjuXom9AjDbc_2DsU0wCgks,65366
|
141
|
+
langroid-0.56.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
142
|
+
langroid-0.56.1.dist-info/licenses/LICENSE,sha256=EgVbvA6VSYgUlvC3RvPKehSg7MFaxWDsFuzLOsPPfJg,1065
|
143
|
+
langroid-0.56.1.dist-info/RECORD,,
|
File without changes
|
File without changes
|