deepagents 0.0.11rc1__py3-none-any.whl → 0.0.12rc2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
deepagents/__init__.py CHANGED
@@ -1,5 +1,7 @@
1
- from deepagents.graph import create_deep_agent, async_create_deep_agent
2
- from deepagents.middleware import PlanningMiddleware, FilesystemMiddleware, SubAgentMiddleware
3
- from deepagents.state import DeepAgentState
4
- from deepagents.types import SubAgent, CustomSubAgent
5
- from deepagents.model import get_default_model
1
+ """DeepAgents package."""
2
+
3
+ from deepagents.graph import create_deep_agent
4
+ from deepagents.middleware.filesystem import FilesystemMiddleware
5
+ from deepagents.middleware.subagents import CompiledSubAgent, SubAgent, SubAgentMiddleware
6
+
7
+ __all__ = ["CompiledSubAgent", "FilesystemMiddleware", "SubAgent", "SubAgentMiddleware", "create_deep_agent"]
deepagents/graph.py CHANGED
@@ -1,142 +1,140 @@
1
- from typing import Sequence, Union, Callable, Any, Type, Optional
1
+ """Deepagents come with planning, filesystem, and subagents."""
2
+
3
+ from collections.abc import Callable, Sequence
4
+ from typing import Any
5
+
6
+ from langchain.agents import create_agent
7
+ from langchain.agents.middleware import HumanInTheLoopMiddleware, InterruptOnConfig, TodoListMiddleware
8
+ from langchain.agents.middleware.summarization import SummarizationMiddleware
9
+ from langchain.agents.middleware.types import AgentMiddleware
10
+ from langchain.agents.structured_output import ResponseFormat
11
+ from langchain_anthropic import ChatAnthropic
12
+ from langchain_core.language_models import BaseChatModel
2
13
  from langchain_core.tools import BaseTool
3
- from langchain_core.language_models import LanguageModelLike
14
+ from langgraph.cache.base import BaseCache
15
+ from langgraph.graph.state import CompiledStateGraph
16
+ from langgraph.store.base import BaseStore
4
17
  from langgraph.types import Checkpointer
5
- from langchain.agents import create_agent
6
- from langchain.agents.middleware import AgentMiddleware, SummarizationMiddleware, HumanInTheLoopMiddleware
7
- from langchain.agents.middleware.human_in_the_loop import ToolConfig
8
- from langchain.agents.middleware.prompt_caching import AnthropicPromptCachingMiddleware
9
- from deepagents.middleware import PlanningMiddleware, FilesystemMiddleware, SubAgentMiddleware
10
- from deepagents.prompts import BASE_AGENT_PROMPT
11
- from deepagents.model import get_default_model
12
- from deepagents.types import SubAgent, CustomSubAgent
13
18
 
14
- def agent_builder(
15
- tools: Sequence[Union[BaseTool, Callable, dict[str, Any]]],
16
- instructions: str,
17
- middleware: Optional[list[AgentMiddleware]] = None,
18
- tool_configs: Optional[dict[str, bool | ToolConfig]] = None,
19
- model: Optional[Union[str, LanguageModelLike]] = None,
20
- subagents: Optional[list[SubAgent | CustomSubAgent]] = None,
21
- context_schema: Optional[Type[Any]] = None,
22
- checkpointer: Optional[Checkpointer] = None,
23
- is_async: bool = False,
24
- ):
25
- if model is None:
26
- model = get_default_model()
19
+ from deepagents.middleware.filesystem import FilesystemMiddleware
20
+ from deepagents.middleware.subagents import CompiledSubAgent, SubAgent, SubAgentMiddleware
27
21
 
28
- deepagent_middleware = [
29
- PlanningMiddleware(),
30
- FilesystemMiddleware(),
31
- SubAgentMiddleware(
32
- default_subagent_tools=tools, # NOTE: These tools are piped to the general-purpose subagent.
33
- subagents=subagents if subagents is not None else [],
34
- model=model,
35
- is_async=is_async,
36
- ),
37
- SummarizationMiddleware(
38
- model=model,
39
- max_tokens_before_summary=120000,
40
- messages_to_keep=20,
41
- ),
42
- AnthropicPromptCachingMiddleware(ttl="5m", unsupported_model_behavior="ignore")
43
- ]
44
- # Add tool interrupt config if provided
45
- if tool_configs is not None:
46
- deepagent_middleware.append(HumanInTheLoopMiddleware(interrupt_on=tool_configs))
22
+ BASE_AGENT_PROMPT = "In order to complete the objective that the user asks of you, you have access to a number of standard tools."
47
23
 
48
- if middleware is not None:
49
- deepagent_middleware.extend(middleware)
50
24
 
51
- return create_agent(
52
- model,
53
- prompt=instructions + "\n\n" + BASE_AGENT_PROMPT,
54
- tools=tools,
55
- middleware=deepagent_middleware,
56
- context_schema=context_schema,
57
- checkpointer=checkpointer,
58
- )
25
+ def get_default_model() -> ChatAnthropic:
26
+ """Get the default model for deep agents.
59
27
 
60
- def create_deep_agent(
61
- tools: Sequence[Union[BaseTool, Callable, dict[str, Any]]] = [],
62
- instructions: str = "",
63
- middleware: Optional[list[AgentMiddleware]] = None,
64
- model: Optional[Union[str, LanguageModelLike]] = None,
65
- subagents: Optional[list[SubAgent | CustomSubAgent]] = None,
66
- context_schema: Optional[Type[Any]] = None,
67
- checkpointer: Optional[Checkpointer] = None,
68
- tool_configs: Optional[dict[str, bool | ToolConfig]] = None,
69
- ):
70
- """Create a deep agent.
71
- This agent will by default have access to a tool to write todos (write_todos),
72
- four file editing tools: write_file, ls, read_file, edit_file, and a tool to call subagents.
73
- Args:
74
- tools: The tools the agent should have access to.
75
- instructions: The additional instructions the agent should have. Will go in
76
- the system prompt.
77
- model: The model to use.
78
- subagents: The subagents to use. Each subagent should be a dictionary with the
79
- following keys:
80
- - `name`
81
- - `description` (used by the main agent to decide whether to call the sub agent)
82
- - `prompt` (used as the system prompt in the subagent)
83
- - (optional) `tools`
84
- - (optional) `model` (either a LanguageModelLike instance or dict settings)
85
- - (optional) `middleware` (list of AgentMiddleware)
86
- context_schema: The schema of the deep agent.
87
- checkpointer: Optional checkpointer for persisting agent state between runs.
88
- tool_configs: Optional Dict[str, HumanInTheLoopConfig] mapping tool names to interrupt configs.
28
+ Returns:
29
+ ChatAnthropic instance configured with Claude Sonnet 4.
89
30
  """
90
- return agent_builder(
91
- tools=tools,
92
- instructions=instructions,
93
- middleware=middleware,
94
- model=model,
95
- subagents=subagents,
96
- context_schema=context_schema,
97
- checkpointer=checkpointer,
98
- tool_configs=tool_configs,
99
- is_async=False,
31
+ return ChatAnthropic(
32
+ model_name="claude-sonnet-4-20250514",
33
+ max_tokens=64000,
100
34
  )
101
35
 
102
- def async_create_deep_agent(
103
- tools: Sequence[Union[BaseTool, Callable, dict[str, Any]]] = [],
104
- instructions: str = "",
105
- middleware: Optional[list[AgentMiddleware]] = None,
106
- model: Optional[Union[str, LanguageModelLike]] = None,
107
- subagents: Optional[list[SubAgent | CustomSubAgent]] = None,
108
- context_schema: Optional[Type[Any]] = None,
109
- checkpointer: Optional[Checkpointer] = None,
110
- tool_configs: Optional[dict[str, bool | ToolConfig]] = None,
111
- ):
36
+
37
+ def create_deep_agent(
38
+ model: str | BaseChatModel | None = None,
39
+ tools: Sequence[BaseTool | Callable | dict[str, Any]] | None = None,
40
+ *,
41
+ system_prompt: str | None = None,
42
+ middleware: Sequence[AgentMiddleware] = (),
43
+ subagents: list[SubAgent | CompiledSubAgent] | None = None,
44
+ response_format: ResponseFormat | None = None,
45
+ context_schema: type[Any] | None = None,
46
+ checkpointer: Checkpointer | None = None,
47
+ store: BaseStore | None = None,
48
+ use_longterm_memory: bool = False,
49
+ interrupt_on: dict[str, bool | InterruptOnConfig] | None = None,
50
+ debug: bool = False,
51
+ name: str | None = None,
52
+ cache: BaseCache | None = None,
53
+ ) -> CompiledStateGraph:
112
54
  """Create a deep agent.
55
+
113
56
  This agent will by default have access to a tool to write todos (write_todos),
114
- four file editing tools: write_file, ls, read_file, edit_file, and a tool to call subagents.
57
+ four file editing tools: write_file, ls, read_file, edit_file, and a tool to call
58
+ subagents.
59
+
115
60
  Args:
116
61
  tools: The tools the agent should have access to.
117
- instructions: The additional instructions the agent should have. Will go in
62
+ system_prompt: The additional instructions the agent should have. Will go in
118
63
  the system prompt.
64
+ middleware: Additional middleware to apply after standard middleware.
119
65
  model: The model to use.
120
66
  subagents: The subagents to use. Each subagent should be a dictionary with the
121
67
  following keys:
122
68
  - `name`
123
- - `description` (used by the main agent to decide whether to call the sub agent)
69
+ - `description` (used by the main agent to decide whether to call the
70
+ sub agent)
124
71
  - `prompt` (used as the system prompt in the subagent)
125
72
  - (optional) `tools`
126
- - (optional) `model` (either a LanguageModelLike instance or dict settings)
73
+ - (optional) `model` (either a LanguageModelLike instance or dict
74
+ settings)
127
75
  - (optional) `middleware` (list of AgentMiddleware)
76
+ response_format: A structured output response format to use for the agent.
128
77
  context_schema: The schema of the deep agent.
129
78
  checkpointer: Optional checkpointer for persisting agent state between runs.
130
- tool_configs: Optional Dict[str, HumanInTheLoopConfig] mapping tool names to interrupt configs.
79
+ store: Optional store for persisting longterm memories.
80
+ use_longterm_memory: Whether to use longterm memory - you must provide a store
81
+ in order to use longterm memory.
82
+ interrupt_on: Optional Dict[str, bool | InterruptOnConfig] mapping tool names to
83
+ interrupt configs.
84
+ debug: Whether to enable debug mode. Passed through to create_agent.
85
+ name: The name of the agent. Passed through to create_agent.
86
+ cache: The cache to use for the agent. Passed through to create_agent.
87
+
88
+ Returns:
89
+ A configured deep agent.
131
90
  """
132
- return agent_builder(
91
+ if model is None:
92
+ model = get_default_model()
93
+
94
+ deepagent_middleware = [
95
+ TodoListMiddleware(),
96
+ FilesystemMiddleware(
97
+ long_term_memory=use_longterm_memory,
98
+ ),
99
+ SubAgentMiddleware(
100
+ default_model=model,
101
+ default_tools=tools,
102
+ subagents=subagents if subagents is not None else [],
103
+ default_middleware=[
104
+ TodoListMiddleware(),
105
+ FilesystemMiddleware(
106
+ long_term_memory=use_longterm_memory,
107
+ ),
108
+ SummarizationMiddleware(
109
+ model=model,
110
+ max_tokens_before_summary=120000,
111
+ messages_to_keep=20,
112
+ ),
113
+ ],
114
+ default_interrupt_on=interrupt_on,
115
+ general_purpose_agent=True,
116
+ ),
117
+ SummarizationMiddleware(
118
+ model=model,
119
+ max_tokens_before_summary=120000,
120
+ messages_to_keep=20,
121
+ ),
122
+ ]
123
+ if interrupt_on is not None:
124
+ deepagent_middleware.append(HumanInTheLoopMiddleware(interrupt_on=interrupt_on))
125
+ if middleware is not None:
126
+ deepagent_middleware.extend(middleware)
127
+
128
+ return create_agent(
129
+ model,
130
+ system_prompt=system_prompt + "\n\n" + BASE_AGENT_PROMPT if system_prompt else BASE_AGENT_PROMPT,
133
131
  tools=tools,
134
- instructions=instructions,
135
- middleware=middleware,
136
- model=model,
137
- subagents=subagents,
132
+ middleware=deepagent_middleware,
133
+ response_format=response_format,
138
134
  context_schema=context_schema,
139
135
  checkpointer=checkpointer,
140
- tool_configs=tool_configs,
141
- is_async=True,
142
- )
136
+ store=store,
137
+ debug=debug,
138
+ name=name,
139
+ cache=cache,
140
+ )
@@ -0,0 +1,6 @@
1
+ """Middleware for the DeepAgent."""
2
+
3
+ from deepagents.middleware.filesystem import FilesystemMiddleware
4
+ from deepagents.middleware.subagents import CompiledSubAgent, SubAgent, SubAgentMiddleware
5
+
6
+ __all__ = ["CompiledSubAgent", "FilesystemMiddleware", "SubAgent", "SubAgentMiddleware"]