deepagents 0.0.12rc1__py3-none-any.whl → 0.0.12rc2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
deepagents/state.py DELETED
@@ -1,33 +0,0 @@
1
- from langchain.agents.middleware import AgentState
2
- from typing import NotRequired, Annotated
3
- from typing import Literal
4
- from typing_extensions import TypedDict
5
-
6
-
7
- class Todo(TypedDict):
8
- """Todo to track."""
9
-
10
- content: str
11
- status: Literal["pending", "in_progress", "completed"]
12
-
13
-
14
- def file_reducer(l, r):
15
- if l is None:
16
- return r
17
- elif r is None:
18
- return l
19
- else:
20
- return {**l, **r}
21
-
22
-
23
- class DeepAgentState(AgentState):
24
- todos: NotRequired[list[Todo]]
25
- files: Annotated[NotRequired[dict[str, str]], file_reducer]
26
-
27
-
28
- class PlanningState(AgentState):
29
- todos: NotRequired[list[Todo]]
30
-
31
-
32
- class FilesystemState(AgentState):
33
- files: Annotated[NotRequired[dict[str, str]], file_reducer]
deepagents/tools.py DELETED
@@ -1,313 +0,0 @@
1
- from re import L
2
- from langchain_core.tools import tool, InjectedToolCallId
3
- from langchain_core.messages import ToolMessage
4
- from langgraph.types import Command
5
- from langgraph.runtime import get_runtime, Runtime
6
- from langchain.tools.tool_node import InjectedState
7
- from typing import Annotated, Any
8
- from deepagents.state import Todo, FilesystemState
9
- from deepagents.prompts import (
10
- WRITE_TODOS_TOOL_DESCRIPTION,
11
- LIST_FILES_TOOL_DESCRIPTION,
12
- READ_FILE_TOOL_DESCRIPTION,
13
- WRITE_FILE_TOOL_DESCRIPTION,
14
- EDIT_FILE_TOOL_DESCRIPTION,
15
- LIST_FILES_TOOL_DESCRIPTION_LONGTERM_SUPPLEMENT,
16
- READ_FILE_TOOL_DESCRIPTION_LONGTERM_SUPPLEMENT,
17
- WRITE_FILE_TOOL_DESCRIPTION_LONGTERM_SUPPLEMENT,
18
- EDIT_FILE_TOOL_DESCRIPTION_LONGTERM_SUPPLEMENT,
19
- )
20
-
21
- def has_memories_prefix(file_path: str) -> bool:
22
- return file_path.startswith("memories/")
23
-
24
- def append_memories_prefix(file_path: str) -> str:
25
- return f"memories/{file_path}"
26
-
27
- def strip_memories_prefix(file_path: str) -> str:
28
- return file_path.replace("memories/", "")
29
-
30
- def get_namespace(runtime: Runtime[Any]) -> tuple[str, str]:
31
- namespace = ("filesystem")
32
- if runtime.context is None:
33
- return namespace
34
- assistant_id = runtime.context.get("assistant_id")
35
- if assistant_id is None:
36
- return namespace
37
- return (assistant_id, "filesystem")
38
-
39
- @tool(description=WRITE_TODOS_TOOL_DESCRIPTION)
40
- def write_todos(
41
- todos: list[Todo], tool_call_id: Annotated[str, InjectedToolCallId]
42
- ) -> Command:
43
- return Command(
44
- update={
45
- "todos": todos,
46
- "messages": [
47
- ToolMessage(f"Updated todo list to {todos}", tool_call_id=tool_call_id)
48
- ],
49
- }
50
- )
51
-
52
- def ls_tool_generator(has_longterm_memory: bool, custom_description: str = None) -> tool:
53
- tool_description = LIST_FILES_TOOL_DESCRIPTION
54
- if custom_description:
55
- tool_description = custom_description
56
- elif has_longterm_memory:
57
- tool_description += LIST_FILES_TOOL_DESCRIPTION_LONGTERM_SUPPLEMENT
58
-
59
- if has_longterm_memory:
60
- # Tool with Long-term memory
61
- @tool(description=tool_description)
62
- def ls(state: Annotated[FilesystemState, InjectedState]) -> list[str]:
63
- files = []
64
- files.extend(list(state.get("files", {}).keys()))
65
-
66
- runtime = get_runtime()
67
- store = runtime.store
68
- if store is None:
69
- raise ValueError("Longterm memory is enabled, but no store is available")
70
- namespace = get_namespace(runtime)
71
- file_data_list = store.search(namespace)
72
- memories_files = [append_memories_prefix(f.key) for f in file_data_list]
73
- files.extend(memories_files)
74
- return files
75
- else:
76
- # Tool without long-term memory
77
- @tool(description=tool_description)
78
- def ls(state: Annotated[FilesystemState, InjectedState]) -> list[str]:
79
- files = list(state.get("files", {}).keys())
80
- return files
81
- return ls
82
-
83
-
84
- def read_file_tool_generator(has_longterm_memory: bool, custom_description: str = None) -> tool:
85
- tool_description = READ_FILE_TOOL_DESCRIPTION
86
- if custom_description:
87
- tool_description = custom_description
88
- elif has_longterm_memory:
89
- tool_description += READ_FILE_TOOL_DESCRIPTION_LONGTERM_SUPPLEMENT
90
-
91
- if has_longterm_memory:
92
- # Tool with Long-term memory
93
- @tool(description=tool_description)
94
- def read_file(
95
- file_path: str,
96
- state: Annotated[FilesystemState, InjectedState],
97
- offset: int = 0,
98
- limit: int = 2000,
99
- ) -> str:
100
- if has_memories_prefix(file_path):
101
- stripped_file_path = strip_memories_prefix(file_path)
102
- runtime = get_runtime()
103
- store = runtime.store
104
- if store is None:
105
- raise ValueError("Longterm memory is enabled, but no store is available")
106
- namespace = get_namespace(runtime)
107
- item = store.get(namespace, stripped_file_path)
108
- if item is None:
109
- return f"Error: File '{file_path}' not found"
110
- content = item.value
111
- else:
112
- mock_filesystem = state.get("files", {})
113
- if file_path not in mock_filesystem:
114
- return f"Error: File '{file_path}' not found"
115
- content = mock_filesystem[file_path]
116
- if not content or content.strip() == "":
117
- return "System reminder: File exists but has empty contents"
118
- lines = content.splitlines()
119
- start_idx = offset
120
- end_idx = min(start_idx + limit, len(lines))
121
- if start_idx >= len(lines):
122
- return f"Error: Line offset {offset} exceeds file length ({len(lines)} lines)"
123
- result_lines = []
124
- for i in range(start_idx, end_idx):
125
- line_content = lines[i]
126
- if len(line_content) > 2000:
127
- line_content = line_content[:2000]
128
- line_number = i + 1
129
- result_lines.append(f"{line_number:6d}\t{line_content}")
130
-
131
- return "\n".join(result_lines)
132
- else:
133
- # Tool without long-term memory
134
- @tool(description=tool_description)
135
- def read_file(
136
- file_path: str,
137
- state: Annotated[FilesystemState, InjectedState],
138
- offset: int = 0,
139
- limit: int = 2000,
140
- ) -> str:
141
- mock_filesystem = state.get("files", {})
142
- if file_path not in mock_filesystem:
143
- return f"Error: File '{file_path}' not found"
144
- content = mock_filesystem[file_path]
145
- if not content or content.strip() == "":
146
- return "System reminder: File exists but has empty contents"
147
- lines = content.splitlines()
148
- start_idx = offset
149
- end_idx = min(start_idx + limit, len(lines))
150
- if start_idx >= len(lines):
151
- return f"Error: Line offset {offset} exceeds file length ({len(lines)} lines)"
152
- result_lines = []
153
- for i in range(start_idx, end_idx):
154
- line_content = lines[i]
155
- if len(line_content) > 2000:
156
- line_content = line_content[:2000]
157
- line_number = i + 1
158
- result_lines.append(f"{line_number:6d}\t{line_content}")
159
-
160
- return "\n".join(result_lines)
161
- return read_file
162
-
163
-
164
- def write_file_tool_generator(has_longterm_memory: bool, custom_description: str = None) -> tool:
165
- tool_description = WRITE_FILE_TOOL_DESCRIPTION
166
- if custom_description:
167
- tool_description = custom_description
168
- elif has_longterm_memory:
169
- tool_description += WRITE_FILE_TOOL_DESCRIPTION_LONGTERM_SUPPLEMENT
170
-
171
- if has_longterm_memory:
172
- # Tool with Long-term memory
173
- @tool(description=tool_description)
174
- def write_file(file_path: str, content: str, state: Annotated[FilesystemState, InjectedState], tool_call_id: Annotated[str, InjectedToolCallId]) -> Command:
175
- if has_memories_prefix(file_path):
176
- stripped_file_path = strip_memories_prefix(file_path)
177
- runtime = get_runtime()
178
- store = runtime.store
179
- if store is None:
180
- raise ValueError("Longterm memory is enabled, but no store is available")
181
- namespace = get_namespace(runtime)
182
- store.put(namespace, stripped_file_path, content)
183
- return Command(
184
- update={
185
- "messages": [ToolMessage(f"Updated longterm memories file {file_path}", tool_call_id=tool_call_id)]
186
- }
187
- )
188
- else:
189
- mock_filesystem = state.get("files", {})
190
- mock_filesystem[file_path] = content
191
- return Command(
192
- update={
193
- "files": mock_filesystem,
194
- "messages": [ToolMessage(f"Updated file {file_path}", tool_call_id=tool_call_id)]
195
- }
196
- )
197
- else:
198
- # Tool without long-term memory
199
- @tool(description=tool_description)
200
- def write_file(file_path: str, content: str, state: Annotated[FilesystemState, InjectedState], tool_call_id: Annotated[str, InjectedToolCallId]) -> Command:
201
- mock_filesystem = state.get("files", {})
202
- mock_filesystem[file_path] = content
203
- return Command(
204
- update={
205
- "files": mock_filesystem,
206
- "messages": [ToolMessage(f"Updated file {file_path}", tool_call_id=tool_call_id)]
207
- }
208
- )
209
- return write_file
210
-
211
-
212
- def edit_file_tool_generator(has_longterm_memory: bool, custom_description: str = None) -> tool:
213
- tool_description = EDIT_FILE_TOOL_DESCRIPTION
214
- if custom_description:
215
- tool_description = custom_description
216
- elif has_longterm_memory:
217
- tool_description += EDIT_FILE_TOOL_DESCRIPTION_LONGTERM_SUPPLEMENT
218
-
219
- if has_longterm_memory:
220
- # Tool with Long-term memory
221
- @tool(description=tool_description)
222
- def edit_file(file_path: str, old_string: str, new_string: str, state: Annotated[FilesystemState, InjectedState], tool_call_id: Annotated[str, InjectedToolCallId], replace_all: bool = False) -> Command:
223
- if has_memories_prefix(file_path):
224
- stripped_file_path = strip_memories_prefix(file_path)
225
- runtime = get_runtime()
226
- store = runtime.store
227
- if store is None:
228
- raise ValueError("Longterm memory is enabled, but no store is available")
229
- namespace = get_namespace(runtime)
230
- item = store.get(namespace, stripped_file_path)
231
- if item is None:
232
- return f"Error: File '{file_path}' not found"
233
- content = item.value
234
- if old_string not in content:
235
- return f"Error: String not found in file: '{old_string}'"
236
- if not replace_all:
237
- occurrences = content.count(old_string)
238
- if occurrences > 1:
239
- return f"Error: String '{old_string}' appears {occurrences} times in file. Use replace_all=True to replace all instances, or provide a more specific string with surrounding context."
240
- elif occurrences == 0:
241
- return f"Error: String not found in file: '{old_string}'"
242
- new_content = content.replace(old_string, new_string)
243
- replacement_count = content.count(old_string)
244
- store.put(namespace, stripped_file_path, new_content)
245
- return Command(
246
- update={
247
- "messages": [ToolMessage(f"Successfully replaced {replacement_count} instance(s) of the string in '{file_path}'", tool_call_id=tool_call_id)]
248
- }
249
- )
250
- else:
251
- mock_filesystem = state.get("files", {})
252
- if file_path not in mock_filesystem:
253
- return f"Error: File '{file_path}' not found"
254
- content = mock_filesystem[file_path]
255
- if old_string not in content:
256
- return f"Error: String not found in file: '{old_string}'"
257
- if not replace_all:
258
- occurrences = content.count(old_string)
259
- if occurrences > 1:
260
- return f"Error: String '{old_string}' appears {occurrences} times in file. Use replace_all=True to replace all instances, or provide a more specific string with surrounding context."
261
- elif occurrences == 0:
262
- return f"Error: String not found in file: '{old_string}'"
263
- new_content = content.replace(old_string, new_string)
264
- replacement_count = content.count(old_string)
265
- result_msg = f"Successfully replaced {replacement_count} instance(s) of the string in '{file_path}'"
266
- mock_filesystem[file_path] = new_content
267
- return Command(
268
- update={
269
- "files": mock_filesystem,
270
- "messages": [ToolMessage(result_msg, tool_call_id=tool_call_id)],
271
- }
272
- )
273
- else:
274
- # Tool without long-term memory
275
- @tool(description=tool_description)
276
- def edit_file(file_path: str, old_string: str, new_string: str, state: Annotated[FilesystemState, InjectedState], tool_call_id: Annotated[str, InjectedToolCallId], replace_all: bool = False) -> Command:
277
- mock_filesystem = state.get("files", {})
278
- if file_path not in mock_filesystem:
279
- return f"Error: File '{file_path}' not found"
280
- content = mock_filesystem[file_path]
281
- if old_string not in content:
282
- return f"Error: String not found in file: '{old_string}'"
283
- if not replace_all:
284
- occurrences = content.count(old_string)
285
- if occurrences > 1:
286
- return f"Error: String '{old_string}' appears {occurrences} times in file. Use replace_all=True to replace all instances, or provide a more specific string with surrounding context."
287
- elif occurrences == 0:
288
- return f"Error: String not found in file: '{old_string}'"
289
- new_content = content.replace(old_string, new_string)
290
- replacement_count = content.count(old_string)
291
- result_msg = f"Successfully replaced {replacement_count} instance(s) of the string in '{file_path}'"
292
- mock_filesystem[file_path] = new_content
293
- return Command(
294
- update={
295
- "files": mock_filesystem,
296
- "messages": [ToolMessage(result_msg, tool_call_id=tool_call_id)],
297
- }
298
- )
299
- return edit_file
300
-
301
- TOOL_GENERATORS = {
302
- "ls": ls_tool_generator,
303
- "read_file": read_file_tool_generator,
304
- "write_file": write_file_tool_generator,
305
- "edit_file": edit_file_tool_generator,
306
- }
307
-
308
- def get_filesystem_tools(has_longterm_memory: bool, custom_tool_descriptions: dict[str, str] = {}) -> list[tool]:
309
- tools = []
310
- for tool_name, tool_generator in TOOL_GENERATORS.items():
311
- tool = tool_generator(has_longterm_memory, custom_tool_descriptions.get(tool_name, None))
312
- tools.append(tool)
313
- return tools
deepagents/types.py DELETED
@@ -1,21 +0,0 @@
1
- from typing import NotRequired, Union, Any
2
- from typing_extensions import TypedDict
3
- from langchain_core.language_models import LanguageModelLike
4
- from langchain.agents.middleware import AgentMiddleware
5
- from langchain_core.runnables import Runnable
6
- from langchain_core.tools import BaseTool
7
-
8
- class SubAgent(TypedDict):
9
- name: str
10
- description: str
11
- prompt: str
12
- tools: NotRequired[list[BaseTool]]
13
- # Optional per-subagent model: can be either a model instance OR dict settings
14
- model: NotRequired[Union[LanguageModelLike, dict[str, Any]]]
15
- middleware: NotRequired[list[AgentMiddleware]]
16
-
17
-
18
- class CustomSubAgent(TypedDict):
19
- name: str
20
- description: str
21
- graph: Runnable
@@ -1,18 +0,0 @@
1
- deepagents/__init__.py,sha256=fA_91ByxPb3e8aPfci43zOXrWz8ylh_CFQALo7EUKi8,312
2
- deepagents/graph.py,sha256=1zK8l-kKwpyoAtNXyB0HfEFPEp5aF7G1dMN0xsm4q18,7163
3
- deepagents/middleware.py,sha256=Yovjyr3Zls3vNIeY7mawFk-9tAWpqCQbAN1vaegEk3I,8180
4
- deepagents/model.py,sha256=VyRIkdeXJH8HqLrudTKucHpBTtrwMFTQGRlXBj0kUyo,155
5
- deepagents/prompts.py,sha256=nnLAiPNY20-GNwhOzxe9Ia2CzcdrJqILSchPpPKNuZU,26228
6
- deepagents/state.py,sha256=8so3MgL-zRPYP8Ci_OuVg4wHrs5uAXCErKF1AjjCSt8,726
7
- deepagents/tools.py,sha256=lspab_EnKxVY4_SJP9sFJJBDks9EmHGXNcR4ebX8lHM,14567
8
- deepagents/types.py,sha256=5KBSUPlWOnv9It3SnJCMHrOtp9Y4_NQGtGCp69JsEjE,694
9
- deepagents-0.0.12rc1.dist-info/licenses/LICENSE,sha256=c__BaxUCK69leo2yEKynf8lWndu8iwYwge1CbyqAe-E,1071
10
- tests/test_deepagents.py,sha256=nTILGu2lsM908sq6MRy0PEXV0wuVMyvXS6Bm2NmEAm4,7655
11
- tests/test_filesystem.py,sha256=jrYkUqCDw2OHD1rpv3Vux1eKjT_d8CqM25g1KuvZBwk,9119
12
- tests/test_hitl.py,sha256=B16ZFiyaVSOcDLz7mh1RTaQZ93EMTKOPUY-IEslkcfM,2460
13
- tests/test_middleware.py,sha256=3HYmTx0Jw4XTNJjqLYeyGS_QZzcqkFuKfShtajIDhF4,2146
14
- tests/utils.py,sha256=B7Mc6VMzEkCmx8lORjdYNXp82tn-W3nwBkrusfJjAx0,2912
15
- deepagents-0.0.12rc1.dist-info/METADATA,sha256=W6_O9UQhpExMT9swmLRc4Nj55pI1Omegr0sPeCswGG8,17334
16
- deepagents-0.0.12rc1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
17
- deepagents-0.0.12rc1.dist-info/top_level.txt,sha256=_w9VMQtG4YDNg5A5eAeUre7dF7x7hk9zRpT9zsFaukY,17
18
- deepagents-0.0.12rc1.dist-info/RECORD,,
tests/test_deepagents.py DELETED
@@ -1,136 +0,0 @@
1
- from deepagents.graph import create_deep_agent
2
- from langchain.agents import create_agent
3
- from tests.utils import assert_all_deepagent_qualities, SAMPLE_MODEL, sample_tool, get_weather, get_soccer_scores, SampleMiddlewareWithTools, SampleMiddlewareWithToolsAndState, WeatherToolMiddleware, ResearchMiddleware, ResearchMiddlewareWithTools, TOY_BASKETBALL_RESEARCH
4
-
5
- class TestDeepAgents:
6
- def test_base_deep_agent(self):
7
- agent = create_deep_agent()
8
- assert_all_deepagent_qualities(agent)
9
-
10
- def test_deep_agent_with_tool(self):
11
- agent = create_deep_agent(tools=[sample_tool])
12
- assert_all_deepagent_qualities(agent)
13
- assert "sample_tool" in agent.nodes["tools"].bound._tools_by_name.keys()
14
-
15
- def test_deep_agent_with_middleware_with_tool(self):
16
- agent = create_deep_agent(middleware=[SampleMiddlewareWithTools()])
17
- assert_all_deepagent_qualities(agent)
18
- assert "sample_tool" in agent.nodes["tools"].bound._tools_by_name.keys()
19
-
20
- def test_deep_agent_with_middleware_with_tool_and_state(self):
21
- agent = create_deep_agent(middleware=[SampleMiddlewareWithToolsAndState()])
22
- assert_all_deepagent_qualities(agent)
23
- assert "sample_tool" in agent.nodes["tools"].bound._tools_by_name.keys()
24
- assert "sample_input" in agent.stream_channels
25
-
26
- def test_deep_agent_with_subagents(self):
27
- subagents = [
28
- {
29
- "name": "weather_agent",
30
- "description": "Use this agent to get the weather",
31
- "prompt": "You are a weather agent.",
32
- "tools": [get_weather],
33
- "model": SAMPLE_MODEL,
34
- }
35
- ]
36
- agent = create_deep_agent(tools=[sample_tool], subagents=subagents)
37
- assert_all_deepagent_qualities(agent)
38
- result = agent.invoke({"messages": [{"role": "user", "content": "What is the weather in Tokyo?"}]})
39
- agent_messages = [msg for msg in result.get("messages", []) if msg.type == "ai"]
40
- tool_calls = [tool_call for msg in agent_messages for tool_call in msg.tool_calls]
41
- assert any([tool_call["name"] == "task" and tool_call["args"].get("subagent_type") == "weather_agent" for tool_call in tool_calls])
42
-
43
- def test_deep_agent_with_subagents_gen_purpose(self):
44
- subagents = [
45
- {
46
- "name": "weather_agent",
47
- "description": "Use this agent to get the weather",
48
- "prompt": "You are a weather agent.",
49
- "tools": [get_weather],
50
- "model": SAMPLE_MODEL,
51
- }
52
- ]
53
- agent = create_deep_agent(tools=[sample_tool], subagents=subagents)
54
- assert_all_deepagent_qualities(agent)
55
- result = agent.invoke({"messages": [{"role": "user", "content": "Use the general purpose subagent to call the sample tool"}]})
56
- agent_messages = [msg for msg in result.get("messages", []) if msg.type == "ai"]
57
- tool_calls = [tool_call for msg in agent_messages for tool_call in msg.tool_calls]
58
- assert any([tool_call["name"] == "task" and tool_call["args"].get("subagent_type") == "general-purpose" for tool_call in tool_calls])
59
-
60
- def test_deep_agent_with_subagents_with_middleware(self):
61
- subagents = [
62
- {
63
- "name": "weather_agent",
64
- "description": "Use this agent to get the weather",
65
- "prompt": "You are a weather agent.",
66
- "tools": [],
67
- "model": SAMPLE_MODEL,
68
- "middleware": [WeatherToolMiddleware()],
69
- }
70
- ]
71
- agent = create_deep_agent(tools=[sample_tool], subagents=subagents)
72
- assert_all_deepagent_qualities(agent)
73
- result = agent.invoke({"messages": [{"role": "user", "content": "What is the weather in Tokyo?"}]})
74
- agent_messages = [msg for msg in result.get("messages", []) if msg.type == "ai"]
75
- tool_calls = [tool_call for msg in agent_messages for tool_call in msg.tool_calls]
76
- assert any([tool_call["name"] == "task" and tool_call["args"].get("subagent_type") == "weather_agent" for tool_call in tool_calls])
77
-
78
- def test_deep_agent_with_custom_subagents(self):
79
- subagents = [
80
- {
81
- "name": "weather_agent",
82
- "description": "Use this agent to get the weather",
83
- "prompt": "You are a weather agent.",
84
- "tools": [get_weather],
85
- "model": SAMPLE_MODEL,
86
- },
87
- {
88
- "name": "soccer_agent",
89
- "description": "Use this agent to get the latest soccer scores",
90
- "graph": create_agent(
91
- model=SAMPLE_MODEL,
92
- tools=[get_soccer_scores],
93
- system_prompt="You are a soccer agent.",
94
- )
95
- }
96
- ]
97
- agent = create_deep_agent(tools=[sample_tool], subagents=subagents)
98
- assert_all_deepagent_qualities(agent)
99
- result = agent.invoke({"messages": [{"role": "user", "content": "Look up the weather in Tokyo, and the latest scores for Manchester City!"}]})
100
- agent_messages = [msg for msg in result.get("messages", []) if msg.type == "ai"]
101
- tool_calls = [tool_call for msg in agent_messages for tool_call in msg.tool_calls]
102
- assert any([tool_call["name"] == "task" and tool_call["args"].get("subagent_type") == "weather_agent" for tool_call in tool_calls])
103
- assert any([tool_call["name"] == "task" and tool_call["args"].get("subagent_type") == "soccer_agent" for tool_call in tool_calls])
104
-
105
- def test_deep_agent_with_extended_state_and_subagents(self):
106
- subagents = [
107
- {
108
- "name": "basketball_info_agent",
109
- "description": "Use this agent to get surface level info on any basketball topic",
110
- "prompt": "You are a basketball info agent.",
111
- "middleware": [ResearchMiddlewareWithTools()],
112
- }
113
- ]
114
- agent = create_deep_agent(tools=[sample_tool], subagents=subagents, middleware=[ResearchMiddleware()])
115
- assert_all_deepagent_qualities(agent)
116
- assert "research" in agent.stream_channels
117
- result = agent.invoke({"messages": [{"role": "user", "content": "Get surface level info on lebron james"}]}, config={"recursion_limit": 100})
118
- agent_messages = [msg for msg in result.get("messages", []) if msg.type == "ai"]
119
- tool_calls = [tool_call for msg in agent_messages for tool_call in msg.tool_calls]
120
- assert any([tool_call["name"] == "task" and tool_call["args"].get("subagent_type") == "basketball_info_agent" for tool_call in tool_calls])
121
- assert TOY_BASKETBALL_RESEARCH in result["research"]
122
-
123
- def test_deep_agent_with_subagents_no_tools(self):
124
- subagents = [
125
- {
126
- "name": "basketball_info_agent",
127
- "description": "Use this agent to get surface level info on any basketball topic",
128
- "prompt": "You are a basketball info agent.",
129
- }
130
- ]
131
- agent = create_deep_agent(tools=[sample_tool], subagents=subagents)
132
- assert_all_deepagent_qualities(agent)
133
- result = agent.invoke({"messages": [{"role": "user", "content": "Use the basketball info subagent to call the sample tool"}]}, config={"recursion_limit": 100})
134
- agent_messages = [msg for msg in result.get("messages", []) if msg.type == "ai"]
135
- tool_calls = [tool_call for msg in agent_messages for tool_call in msg.tool_calls]
136
- assert any([tool_call["name"] == "task" and tool_call["args"].get("subagent_type") == "basketball_info_agent" for tool_call in tool_calls])