deepagents 0.0.6rc1__py3-none-any.whl → 0.0.6rc3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
deepagents/__init__.py CHANGED
@@ -1,9 +1,5 @@
1
1
  from deepagents.graph import create_deep_agent, async_create_deep_agent
2
- from deepagents.interrupt import ToolInterruptConfig
2
+ from deepagents.middleware import PlanningMiddleware, FilesystemMiddleware, SubAgentMiddleware
3
3
  from deepagents.state import DeepAgentState
4
- from deepagents.sub_agent import SubAgent
4
+ from deepagents.types import SubAgent, CustomSubAgent
5
5
  from deepagents.model import get_default_model
6
- from deepagents.builder import (
7
- create_configurable_agent,
8
- async_create_configurable_agent,
9
- )
deepagents/graph.py CHANGED
@@ -1,119 +1,81 @@
1
- from deepagents.sub_agent import (
2
- _create_task_tool,
3
- _create_sync_task_tool,
4
- SubAgent,
5
- CustomSubAgent,
6
- )
7
- from deepagents.model import get_default_model
8
- from deepagents.tools import write_todos, write_file, read_file, ls, edit_file
9
- from deepagents.state import DeepAgentState
10
- from typing import Sequence, Union, Callable, Any, TypeVar, Type, Optional
11
- from langchain_core.tools import BaseTool, tool
1
+ from typing import Sequence, Union, Callable, Any, Type, Optional
2
+ from langchain_core.tools import BaseTool
12
3
  from langchain_core.language_models import LanguageModelLike
13
- from deepagents.interrupt import create_interrupt_hook, ToolInterruptConfig
14
4
  from langgraph.types import Checkpointer
15
- from langgraph.prebuilt import create_react_agent
5
+ from langchain.agents import create_agent
6
+ from langchain.agents.middleware import AgentMiddleware, SummarizationMiddleware, HumanInTheLoopMiddleware
7
+ from langchain.agents.middleware.human_in_the_loop import ToolConfig
8
+ from langchain.agents.middleware.prompt_caching import AnthropicPromptCachingMiddleware
9
+ from deepagents.middleware import PlanningMiddleware, FilesystemMiddleware, SubAgentMiddleware
16
10
  from deepagents.prompts import BASE_AGENT_PROMPT
11
+ from deepagents.model import get_default_model
12
+ from deepagents.types import SubAgent, CustomSubAgent
17
13
 
18
- StateSchema = TypeVar("StateSchema", bound=DeepAgentState)
19
- StateSchemaType = Type[StateSchema]
20
-
21
-
22
- def _agent_builder(
14
+ def agent_builder(
23
15
  tools: Sequence[Union[BaseTool, Callable, dict[str, Any]]],
24
16
  instructions: str,
17
+ middleware: Optional[list[AgentMiddleware]] = None,
18
+ tool_configs: Optional[dict[str, bool | ToolConfig]] = None,
25
19
  model: Optional[Union[str, LanguageModelLike]] = None,
26
- subagents: list[SubAgent | CustomSubAgent] = None,
27
- state_schema: Optional[StateSchemaType] = None,
28
- builtin_tools: Optional[list[str]] = None,
29
- interrupt_config: Optional[ToolInterruptConfig] = None,
30
- config_schema: Optional[Type[Any]] = None,
20
+ subagents: Optional[list[SubAgent | CustomSubAgent]] = None,
21
+ context_schema: Optional[Type[Any]] = None,
31
22
  checkpointer: Optional[Checkpointer] = None,
32
- post_model_hook: Optional[Callable] = None,
33
23
  is_async: bool = False,
34
24
  ):
35
- prompt = instructions + BASE_AGENT_PROMPT
36
-
37
- all_builtin_tools = [write_todos, write_file, read_file, ls, edit_file]
38
-
39
- if builtin_tools is not None:
40
- tools_by_name = {}
41
- for tool_ in all_builtin_tools:
42
- if not isinstance(tool_, BaseTool):
43
- tool_ = tool(tool_)
44
- tools_by_name[tool_.name] = tool_
45
- # Only include built-in tools whose names are in the specified list
46
- built_in_tools = [tools_by_name[_tool] for _tool in builtin_tools]
47
- else:
48
- built_in_tools = all_builtin_tools
49
-
50
25
  if model is None:
51
26
  model = get_default_model()
52
- state_schema = state_schema or DeepAgentState
53
27
 
54
- # Should never be the case that both are specified
55
- if post_model_hook and interrupt_config:
56
- raise ValueError(
57
- "Cannot specify both post_model_hook and interrupt_config together. "
58
- "Use either interrupt_config for tool interrupts or post_model_hook for custom post-processing."
28
+ deepagent_middleware = [
29
+ PlanningMiddleware(),
30
+ FilesystemMiddleware(),
31
+ SubAgentMiddleware(
32
+ default_subagent_tools=tools, # NOTE: These tools are piped to the general-purpose subagent.
33
+ subagents=subagents if subagents is not None else [],
34
+ model=model,
35
+ is_async=is_async,
36
+ ),
37
+ SummarizationMiddleware(
38
+ model=model,
39
+ max_tokens_before_summary=150000,
40
+ messages_to_keep=20,
59
41
  )
60
- elif post_model_hook is not None:
61
- selected_post_model_hook = post_model_hook
62
- elif interrupt_config is not None:
63
- selected_post_model_hook = create_interrupt_hook(interrupt_config)
64
- else:
65
- selected_post_model_hook = None
42
+ ]
43
+ # Add tool interrupt config if provided
44
+ if tool_configs is not None:
45
+ deepagent_middleware.append(HumanInTheLoopMiddleware(tool_configs=tool_configs))
66
46
 
67
- if not is_async:
68
- task_tool = _create_sync_task_tool(
69
- list(tools) + built_in_tools,
70
- instructions,
71
- subagents or [],
72
- model,
73
- state_schema,
74
- selected_post_model_hook,
75
- )
76
- else:
77
- task_tool = _create_task_tool(
78
- list(tools) + built_in_tools,
79
- instructions,
80
- subagents or [],
81
- model,
82
- state_schema,
83
- selected_post_model_hook,
84
- )
85
- all_tools = built_in_tools + list(tools) + [task_tool]
47
+ # Add Anthropic prompt caching is model is Anthropic
48
+ # TODO: Add this back when fixed
49
+ # if isinstance(model, ChatAnthropic):
50
+ # deepagent_middleware.append(AnthropicPromptCachingMiddleware(ttl="5m"))
86
51
 
87
- return create_react_agent(
52
+ if middleware is not None:
53
+ deepagent_middleware.extend(middleware)
54
+
55
+ return create_agent(
88
56
  model,
89
- prompt=prompt,
90
- tools=all_tools,
91
- state_schema=state_schema,
92
- post_model_hook=selected_post_model_hook,
93
- config_schema=config_schema,
57
+ prompt=instructions + "\n\n" + BASE_AGENT_PROMPT,
58
+ tools=tools,
59
+ middleware=deepagent_middleware,
60
+ context_schema=context_schema,
94
61
  checkpointer=checkpointer,
95
62
  )
96
63
 
97
-
98
64
  def create_deep_agent(
99
- tools: Sequence[Union[BaseTool, Callable, dict[str, Any]]],
100
- instructions: str,
65
+ tools: Sequence[Union[BaseTool, Callable, dict[str, Any]]] = [],
66
+ instructions: str = "",
67
+ middleware: Optional[list[AgentMiddleware]] = None,
101
68
  model: Optional[Union[str, LanguageModelLike]] = None,
102
- subagents: list[SubAgent | CustomSubAgent] = None,
103
- state_schema: Optional[StateSchemaType] = None,
104
- builtin_tools: Optional[list[str]] = None,
105
- interrupt_config: Optional[ToolInterruptConfig] = None,
106
- config_schema: Optional[Type[Any]] = None,
69
+ subagents: Optional[list[SubAgent | CustomSubAgent]] = None,
70
+ context_schema: Optional[Type[Any]] = None,
107
71
  checkpointer: Optional[Checkpointer] = None,
108
- post_model_hook: Optional[Callable] = None,
72
+ tool_configs: Optional[dict[str, bool | ToolConfig]] = None,
109
73
  ):
110
74
  """Create a deep agent.
111
-
112
75
  This agent will by default have access to a tool to write todos (write_todos),
113
- and then four file editing tools: write_file, ls, read_file, edit_file.
114
-
76
+ four file editing tools: write_file, ls, read_file, edit_file, and a tool to call subagents.
115
77
  Args:
116
- tools: The additional tools the agent should have access to.
78
+ tools: The tools the agent should have access to.
117
79
  instructions: The additional instructions the agent should have. Will go in
118
80
  the system prompt.
119
81
  model: The model to use.
@@ -124,48 +86,38 @@ def create_deep_agent(
124
86
  - `prompt` (used as the system prompt in the subagent)
125
87
  - (optional) `tools`
126
88
  - (optional) `model` (either a LanguageModelLike instance or dict settings)
127
- state_schema: The schema of the deep agent. Should subclass from DeepAgentState
128
- builtin_tools: If not provided, all built-in tools are included. If provided,
129
- only the specified built-in tools are included.
130
- interrupt_config: Optional Dict[str, HumanInterruptConfig] mapping tool names to interrupt configs.
131
- config_schema: The schema of the deep agent.
132
- post_model_hook: Custom post model hook
89
+ - (optional) `middleware` (list of AgentMiddleware)
90
+ context_schema: The schema of the deep agent.
133
91
  checkpointer: Optional checkpointer for persisting agent state between runs.
92
+ tool_configs: Optional Dict[str, HumanInTheLoopConfig] mapping tool names to interrupt configs.
134
93
  """
135
- return _agent_builder(
94
+ return agent_builder(
136
95
  tools=tools,
137
96
  instructions=instructions,
97
+ middleware=middleware,
138
98
  model=model,
139
99
  subagents=subagents,
140
- state_schema=state_schema,
141
- builtin_tools=builtin_tools,
142
- interrupt_config=interrupt_config,
143
- config_schema=config_schema,
100
+ context_schema=context_schema,
144
101
  checkpointer=checkpointer,
145
- post_model_hook=post_model_hook,
102
+ tool_configs=tool_configs,
146
103
  is_async=False,
147
104
  )
148
105
 
149
-
150
106
  def async_create_deep_agent(
151
- tools: Sequence[Union[BaseTool, Callable, dict[str, Any]]],
152
- instructions: str,
107
+ tools: Sequence[Union[BaseTool, Callable, dict[str, Any]]] = [],
108
+ instructions: str = "",
109
+ middleware: Optional[list[AgentMiddleware]] = None,
153
110
  model: Optional[Union[str, LanguageModelLike]] = None,
154
- subagents: list[SubAgent | CustomSubAgent] = None,
155
- state_schema: Optional[StateSchemaType] = None,
156
- builtin_tools: Optional[list[str]] = None,
157
- interrupt_config: Optional[ToolInterruptConfig] = None,
158
- config_schema: Optional[Type[Any]] = None,
111
+ subagents: Optional[list[SubAgent | CustomSubAgent]] = None,
112
+ context_schema: Optional[Type[Any]] = None,
159
113
  checkpointer: Optional[Checkpointer] = None,
160
- post_model_hook: Optional[Callable] = None,
114
+ tool_configs: Optional[dict[str, bool | ToolConfig]] = None,
161
115
  ):
162
116
  """Create a deep agent.
163
-
164
117
  This agent will by default have access to a tool to write todos (write_todos),
165
- and then four file editing tools: write_file, ls, read_file, edit_file.
166
-
118
+ four file editing tools: write_file, ls, read_file, edit_file, and a tool to call subagents.
167
119
  Args:
168
- tools: The additional tools the agent should have access to.
120
+ tools: The tools the agent should have access to.
169
121
  instructions: The additional instructions the agent should have. Will go in
170
122
  the system prompt.
171
123
  model: The model to use.
@@ -176,24 +128,19 @@ def async_create_deep_agent(
176
128
  - `prompt` (used as the system prompt in the subagent)
177
129
  - (optional) `tools`
178
130
  - (optional) `model` (either a LanguageModelLike instance or dict settings)
179
- state_schema: The schema of the deep agent. Should subclass from DeepAgentState
180
- builtin_tools: If not provided, all built-in tools are included. If provided,
181
- only the specified built-in tools are included.
182
- interrupt_config: Optional Dict[str, HumanInterruptConfig] mapping tool names to interrupt configs.
183
- config_schema: The schema of the deep agent.
184
- post_model_hook: Custom post model hook
131
+ - (optional) `middleware` (list of AgentMiddleware)
132
+ context_schema: The schema of the deep agent.
185
133
  checkpointer: Optional checkpointer for persisting agent state between runs.
134
+ tool_configs: Optional Dict[str, HumanInTheLoopConfig] mapping tool names to interrupt configs.
186
135
  """
187
- return _agent_builder(
136
+ return agent_builder(
188
137
  tools=tools,
189
138
  instructions=instructions,
139
+ middleware=middleware,
190
140
  model=model,
191
141
  subagents=subagents,
192
- state_schema=state_schema,
193
- builtin_tools=builtin_tools,
194
- interrupt_config=interrupt_config,
195
- config_schema=config_schema,
142
+ context_schema=context_schema,
196
143
  checkpointer=checkpointer,
197
- post_model_hook=post_model_hook,
144
+ tool_configs=tool_configs,
198
145
  is_async=True,
199
- )
146
+ )
@@ -0,0 +1,198 @@
1
+ """DeepAgents implemented as Middleware"""
2
+
3
+ from langchain.agents import create_agent
4
+ from langchain.agents.middleware import AgentMiddleware, AgentState, ModelRequest, SummarizationMiddleware
5
+ from langchain.agents.middleware.prompt_caching import AnthropicPromptCachingMiddleware
6
+ from langchain_core.tools import BaseTool, tool, InjectedToolCallId
7
+ from langchain_core.messages import ToolMessage
8
+ from langchain.chat_models import init_chat_model
9
+ from langgraph.types import Command
10
+ from langchain.agents.tool_node import InjectedState
11
+ from typing import Annotated, Optional
12
+ from deepagents.state import PlanningState, FilesystemState
13
+ from deepagents.tools import write_todos, ls, read_file, write_file, edit_file
14
+ from deepagents.prompts import WRITE_TODOS_SYSTEM_PROMPT, TASK_SYSTEM_PROMPT, FILESYSTEM_SYSTEM_PROMPT, TASK_TOOL_DESCRIPTION, BASE_AGENT_PROMPT
15
+ from deepagents.types import SubAgent, CustomSubAgent
16
+
17
+ ###########################
18
+ # Planning Middleware
19
+ ###########################
20
+
21
+ class PlanningMiddleware(AgentMiddleware):
22
+ state_schema = PlanningState
23
+ tools = [write_todos]
24
+
25
+ def modify_model_request(self, request: ModelRequest, agent_state: PlanningState) -> ModelRequest:
26
+ request.system_prompt = request.system_prompt + "\n\n" + WRITE_TODOS_SYSTEM_PROMPT
27
+ return request
28
+
29
+ ###########################
30
+ # Filesystem Middleware
31
+ ###########################
32
+
33
+ class FilesystemMiddleware(AgentMiddleware):
34
+ state_schema = FilesystemState
35
+ tools = [ls, read_file, write_file, edit_file]
36
+
37
+ def modify_model_request(self, request: ModelRequest, agent_state: FilesystemState) -> ModelRequest:
38
+ request.system_prompt = request.system_prompt + "\n\n" + FILESYSTEM_SYSTEM_PROMPT
39
+ return request
40
+
41
+ ###########################
42
+ # SubAgent Middleware
43
+ ###########################
44
+
45
+ class SubAgentMiddleware(AgentMiddleware):
46
+ def __init__(
47
+ self,
48
+ default_subagent_tools: list[BaseTool] = [],
49
+ subagents: list[SubAgent | CustomSubAgent] = [],
50
+ model=None,
51
+ is_async=False,
52
+ ) -> None:
53
+ super().__init__()
54
+ task_tool = create_task_tool(
55
+ default_subagent_tools=default_subagent_tools,
56
+ subagents=subagents,
57
+ model=model,
58
+ is_async=is_async,
59
+ )
60
+ self.tools = [task_tool]
61
+
62
+ def modify_model_request(self, request: ModelRequest, agent_state: AgentState) -> ModelRequest:
63
+ request.system_prompt = request.system_prompt + "\n\n" + TASK_SYSTEM_PROMPT
64
+ return request
65
+
66
+ def _get_agents(
67
+ default_subagent_tools: list[BaseTool],
68
+ subagents: list[SubAgent | CustomSubAgent],
69
+ model
70
+ ):
71
+ default_subagent_middleware = [
72
+ PlanningMiddleware(),
73
+ FilesystemMiddleware(),
74
+ # TODO: Add this back when fixed
75
+ # AnthropicPromptCachingMiddleware(ttl="5m"),
76
+ SummarizationMiddleware(
77
+ model=model,
78
+ max_tokens_before_summary=150000,
79
+ messages_to_keep=20,
80
+ ),
81
+ ]
82
+ agents = {
83
+ "general-purpose": create_agent(
84
+ model,
85
+ prompt=BASE_AGENT_PROMPT,
86
+ tools=default_subagent_tools,
87
+ checkpointer=False,
88
+ middleware=default_subagent_middleware
89
+ )
90
+ }
91
+ for _agent in subagents:
92
+ if "graph" in _agent:
93
+ agents[_agent["name"]] = _agent["graph"]
94
+ continue
95
+ if "tools" in _agent:
96
+ _tools = _agent["tools"]
97
+ else:
98
+ _tools = default_subagent_tools.copy()
99
+ # Resolve per-subagent model: can be instance or dict
100
+ if "model" in _agent:
101
+ agent_model = _agent["model"]
102
+ if isinstance(agent_model, dict):
103
+ # Dictionary settings - create model from config
104
+ sub_model = init_chat_model(**agent_model)
105
+ else:
106
+ # Model instance - use directly
107
+ sub_model = agent_model
108
+ else:
109
+ # Fallback to main model
110
+ sub_model = model
111
+ if "middleware" in _agent:
112
+ _middleware = [*default_subagent_middleware, *_agent["middleware"]]
113
+ else:
114
+ _middleware = default_subagent_middleware
115
+ agents[_agent["name"]] = create_agent(
116
+ sub_model,
117
+ prompt=_agent["prompt"],
118
+ tools=_tools,
119
+ middleware=_middleware,
120
+ checkpointer=False,
121
+ )
122
+ return agents
123
+
124
+
125
+ def _get_subagent_description(subagents: list[SubAgent | CustomSubAgent]):
126
+ return [f"- {_agent['name']}: {_agent['description']}" for _agent in subagents]
127
+
128
+
129
+ def create_task_tool(
130
+ default_subagent_tools: list[BaseTool],
131
+ subagents: list[SubAgent | CustomSubAgent],
132
+ model,
133
+ is_async: bool = False,
134
+ ):
135
+ agents = _get_agents(
136
+ default_subagent_tools, subagents, model
137
+ )
138
+ other_agents_string = _get_subagent_description(subagents)
139
+
140
+ if is_async:
141
+ @tool(
142
+ description=TASK_TOOL_DESCRIPTION.format(other_agents=other_agents_string)
143
+ )
144
+ async def task(
145
+ description: str,
146
+ subagent_type: str,
147
+ state: Annotated[AgentState, InjectedState],
148
+ tool_call_id: Annotated[str, InjectedToolCallId],
149
+ ):
150
+ if subagent_type not in agents:
151
+ return f"Error: invoked agent of type {subagent_type}, the only allowed types are {[f'`{k}`' for k in agents]}"
152
+ sub_agent = agents[subagent_type]
153
+ state["messages"] = [{"role": "user", "content": description}]
154
+ result = await sub_agent.ainvoke(state)
155
+ state_update = {}
156
+ for k, v in result.items():
157
+ if k not in ["todos", "messages"]:
158
+ state_update[k] = v
159
+ return Command(
160
+ update={
161
+ **state_update,
162
+ "messages": [
163
+ ToolMessage(
164
+ result["messages"][-1].content, tool_call_id=tool_call_id
165
+ )
166
+ ],
167
+ }
168
+ )
169
+ else:
170
+ @tool(
171
+ description=TASK_TOOL_DESCRIPTION.format(other_agents=other_agents_string)
172
+ )
173
+ def task(
174
+ description: str,
175
+ subagent_type: str,
176
+ state: Annotated[AgentState, InjectedState],
177
+ tool_call_id: Annotated[str, InjectedToolCallId],
178
+ ):
179
+ if subagent_type not in agents:
180
+ return f"Error: invoked agent of type {subagent_type}, the only allowed types are {[f'`{k}`' for k in agents]}"
181
+ sub_agent = agents[subagent_type]
182
+ state["messages"] = [{"role": "user", "content": description}]
183
+ result = sub_agent.invoke(state)
184
+ state_update = {}
185
+ for k, v in result.items():
186
+ if k not in ["todos", "messages"]:
187
+ state_update[k] = v
188
+ return Command(
189
+ update={
190
+ **state_update,
191
+ "messages": [
192
+ ToolMessage(
193
+ result["messages"][-1].content, tool_call_id=tool_call_id
194
+ )
195
+ ],
196
+ }
197
+ )
198
+ return task
deepagents/prompts.py CHANGED
@@ -189,7 +189,8 @@ Using the todo list here is overkill and wastes time and tokens. These three too
189
189
  - Mark tasks complete IMMEDIATELY after finishing (don't batch completions)
190
190
  - Complete current tasks before starting new ones
191
191
  - Remove tasks that are no longer relevant from the list entirely
192
- - When you FIRST write this list, you should mark your first task (or tasks) as in_progress.
192
+ - IMPORTANT: When you write this todo list, you should mark your first task (or tasks) as in_progress immediately!.
193
+ - IMPORTANT: Unless all tasks are completed, you should always have at least one task in_progress to show the user that you are working on something.
193
194
 
194
195
  3. **Task Completion Requirements**:
195
196
  - ONLY mark a task as completed when you have FULLY accomplished it
@@ -360,10 +361,7 @@ Usage:
360
361
  - The write_file tool will create the a new file.
361
362
  - Prefer to edit existing files over creating new ones when possible."""
362
363
 
363
-
364
- BASE_AGENT_PROMPT = """In order to complete the objective that the user asks ofyou, you have access to a number of standard tools.
365
-
366
- ## `write_todos`
364
+ WRITE_TODOS_SYSTEM_PROMPT = """## `write_todos`
367
365
 
368
366
  You have access to the `write_todos` tool to help you manage and plan complex objectives.
369
367
  Use this tool for complex objectives to ensure that you are tracking each necessary step and giving the user visibility into your progress.
@@ -373,7 +371,11 @@ It is critical that you mark todos as completed as soon as you are done with a s
373
371
  For simple objectives that only require a few steps, it is better to just complete the objective directly and NOT use this tool.
374
372
  Writing todos takes time and tokens, use it when it is helpful for managing complex many-step problems! But not for simple few-step requests.
375
373
 
376
- ## `task` (subagent spawner)
374
+ ## Important To-Do List Usage Notes to Remember
375
+ - The `write_todos` tool should never be called multiple times in parallel.
376
+ - Don't be afraid to revise the To-Do list as you go. New information may reveal new tasks that need to be done, or old tasks that are irrelevant."""
377
+
378
+ TASK_SYSTEM_PROMPT = """## `task` (subagent spawner)
377
379
 
378
380
  You have access to a `task` tool to launch short-lived subagents that handle isolated tasks. These agents are ephemeral — they live only for the duration of the task and return a single result.
379
381
 
@@ -396,17 +398,19 @@ When NOT to use the task tool:
396
398
  - If delegating does not reduce token usage, complexity, or context switching
397
399
  - If splitting would add latency without benefit
398
400
 
399
- ## Filesystem Tools `ls`, `read_file`, `write_file`, `edit_file`
401
+ ## Important Task Tool Usage Notes to Remember
402
+ - Whenever possible, parallelize the work that you do. This is true for both tool_calls, and for tasks. Whenever you have independent steps to complete - make tool_calls, or kick off tasks (subagents) in parallel to accomplish them faster. This saves time for the user, which is incredibly important.
403
+ - Remember to use the `task` tool to silo independent tasks within a multi-part objective.
404
+ - You should use the `task` tool whenever you have a complex task that will take multiple steps, and is independent from other tasks that the agent needs to complete. These agents are highly competent and efficient."""
405
+
406
+ FILESYSTEM_SYSTEM_PROMPT = """## Filesystem Tools `ls`, `read_file`, `write_file`, `edit_file`
400
407
 
401
408
  You have access to a local, private filesystem which you can interact with using these tools.
402
409
  - ls: list all files in the local filesystem
403
410
  - read_file: read a file from the local filesystem
404
411
  - write_file: write to a file in the local filesystem
405
- - edit_file: edit a file in the local filesystem
412
+ - edit_file: edit a file in the local filesystem"""
406
413
 
407
- # Important Usage Notes to Remember
408
- - Don't be afraid to revise the To-Do list as you go. New information may reveal new tasks that need to be done, or old tasks that are irrelevant.
409
- - Whenever possible, parallelize the work that you do. This is true for both tool_calls, and for tasks. Whenever you have independent steps to complete - make tool_calls, or kick off tasks (subagents) in parallel to accomplish them faster. This saves time for the user, which is incredibly important.
410
- - Remember to use the `task` tool to silo independent tasks within a multi-part objective.
411
- - You should use the `task` tool whenever you have a complex task that will take multiple steps, and is independent from other tasks that the agent needs to complete. These agents are highly competent and efficient.
414
+ BASE_AGENT_PROMPT = """
415
+ In order to complete the objective that the user asks of you, you have access to a number of standard tools.
412
416
  """
deepagents/state.py CHANGED
@@ -1,4 +1,4 @@
1
- from langgraph.prebuilt.chat_agent_executor import AgentState
1
+ from langchain.agents.middleware import AgentState
2
2
  from typing import NotRequired, Annotated
3
3
  from typing import Literal
4
4
  from typing_extensions import TypedDict
@@ -23,3 +23,11 @@ def file_reducer(l, r):
23
23
  class DeepAgentState(AgentState):
24
24
  todos: NotRequired[list[Todo]]
25
25
  files: Annotated[NotRequired[dict[str, str]], file_reducer]
26
+
27
+
28
+ class PlanningState(AgentState):
29
+ todos: NotRequired[list[Todo]]
30
+
31
+
32
+ class FilesystemState(AgentState):
33
+ files: Annotated[NotRequired[dict[str, str]], file_reducer]
deepagents/tools.py CHANGED
@@ -1,9 +1,9 @@
1
1
  from langchain_core.tools import tool, InjectedToolCallId
2
- from langgraph.types import Command
3
2
  from langchain_core.messages import ToolMessage
3
+ from langgraph.types import Command
4
+ from langchain.agents.tool_node import InjectedState
4
5
  from typing import Annotated, Union
5
- from langgraph.prebuilt import InjectedState
6
-
6
+ from deepagents.state import Todo, FilesystemState
7
7
  from deepagents.prompts import (
8
8
  WRITE_TODOS_TOOL_DESCRIPTION,
9
9
  LIST_FILES_TOOL_DESCRIPTION,
@@ -11,7 +11,6 @@ from deepagents.prompts import (
11
11
  WRITE_FILE_TOOL_DESCRIPTION,
12
12
  EDIT_FILE_TOOL_DESCRIPTION,
13
13
  )
14
- from deepagents.state import Todo, DeepAgentState
15
14
 
16
15
 
17
16
  @tool(description=WRITE_TODOS_TOOL_DESCRIPTION)
@@ -29,7 +28,7 @@ def write_todos(
29
28
 
30
29
 
31
30
  @tool(description=LIST_FILES_TOOL_DESCRIPTION)
32
- def ls(state: Annotated[DeepAgentState, InjectedState]) -> list[str]:
31
+ def ls(state: Annotated[FilesystemState, InjectedState]) -> list[str]:
33
32
  """List all files"""
34
33
  return list(state.get("files", {}).keys())
35
34
 
@@ -37,7 +36,7 @@ def ls(state: Annotated[DeepAgentState, InjectedState]) -> list[str]:
37
36
  @tool(description=READ_FILE_TOOL_DESCRIPTION)
38
37
  def read_file(
39
38
  file_path: str,
40
- state: Annotated[DeepAgentState, InjectedState],
39
+ state: Annotated[FilesystemState, InjectedState],
41
40
  offset: int = 0,
42
41
  limit: int = 2000,
43
42
  ) -> str:
@@ -83,7 +82,7 @@ def read_file(
83
82
  def write_file(
84
83
  file_path: str,
85
84
  content: str,
86
- state: Annotated[DeepAgentState, InjectedState],
85
+ state: Annotated[FilesystemState, InjectedState],
87
86
  tool_call_id: Annotated[str, InjectedToolCallId],
88
87
  ) -> Command:
89
88
  files = state.get("files", {})
@@ -103,7 +102,7 @@ def edit_file(
103
102
  file_path: str,
104
103
  old_string: str,
105
104
  new_string: str,
106
- state: Annotated[DeepAgentState, InjectedState],
105
+ state: Annotated[FilesystemState, InjectedState],
107
106
  tool_call_id: Annotated[str, InjectedToolCallId],
108
107
  replace_all: bool = False,
109
108
  ) -> Union[Command, str]:
deepagents/types.py ADDED
@@ -0,0 +1,21 @@
1
+ from typing import NotRequired, Union, Any
2
+ from typing_extensions import TypedDict
3
+ from langchain_core.language_models import LanguageModelLike
4
+ from langchain.agents.middleware import AgentMiddleware
5
+ from langchain_core.runnables import Runnable
6
+ from langchain_core.tools import BaseTool
7
+
8
+ class SubAgent(TypedDict):
9
+ name: str
10
+ description: str
11
+ prompt: str
12
+ tools: NotRequired[list[BaseTool]]
13
+ # Optional per-subagent model: can be either a model instance OR dict settings
14
+ model: NotRequired[Union[LanguageModelLike, dict[str, Any]]]
15
+ middleware: NotRequired[list[AgentMiddleware]]
16
+
17
+
18
+ class CustomSubAgent(TypedDict):
19
+ name: str
20
+ description: str
21
+ graph: Runnable