deepagents 0.2.1rc2__tar.gz → 0.2.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (27) hide show
  1. {deepagents-0.2.1rc2/src/deepagents.egg-info → deepagents-0.2.2}/PKG-INFO +1 -1
  2. {deepagents-0.2.1rc2 → deepagents-0.2.2}/pyproject.toml +6 -1
  3. {deepagents-0.2.1rc2 → deepagents-0.2.2}/src/deepagents/graph.py +2 -2
  4. deepagents-0.2.2/src/deepagents/middleware/__init__.py +13 -0
  5. deepagents-0.2.2/src/deepagents/middleware/agent_memory.py +222 -0
  6. {deepagents-0.2.1rc2 → deepagents-0.2.2}/src/deepagents/middleware/filesystem.py +7 -3
  7. deepagents-0.2.2/src/deepagents/middleware/resumable_shell.py +85 -0
  8. {deepagents-0.2.1rc2 → deepagents-0.2.2/src/deepagents.egg-info}/PKG-INFO +1 -1
  9. {deepagents-0.2.1rc2 → deepagents-0.2.2}/src/deepagents.egg-info/SOURCES.txt +2 -0
  10. deepagents-0.2.1rc2/src/deepagents/middleware/__init__.py +0 -6
  11. {deepagents-0.2.1rc2 → deepagents-0.2.2}/LICENSE +0 -0
  12. {deepagents-0.2.1rc2 → deepagents-0.2.2}/README.md +0 -0
  13. {deepagents-0.2.1rc2 → deepagents-0.2.2}/setup.cfg +0 -0
  14. {deepagents-0.2.1rc2 → deepagents-0.2.2}/src/deepagents/__init__.py +0 -0
  15. {deepagents-0.2.1rc2 → deepagents-0.2.2}/src/deepagents/backends/__init__.py +0 -0
  16. {deepagents-0.2.1rc2 → deepagents-0.2.2}/src/deepagents/backends/composite.py +0 -0
  17. {deepagents-0.2.1rc2 → deepagents-0.2.2}/src/deepagents/backends/filesystem.py +0 -0
  18. {deepagents-0.2.1rc2 → deepagents-0.2.2}/src/deepagents/backends/protocol.py +0 -0
  19. {deepagents-0.2.1rc2 → deepagents-0.2.2}/src/deepagents/backends/state.py +0 -0
  20. {deepagents-0.2.1rc2 → deepagents-0.2.2}/src/deepagents/backends/store.py +0 -0
  21. {deepagents-0.2.1rc2 → deepagents-0.2.2}/src/deepagents/backends/utils.py +0 -0
  22. {deepagents-0.2.1rc2 → deepagents-0.2.2}/src/deepagents/middleware/patch_tool_calls.py +0 -0
  23. {deepagents-0.2.1rc2 → deepagents-0.2.2}/src/deepagents/middleware/subagents.py +0 -0
  24. {deepagents-0.2.1rc2 → deepagents-0.2.2}/src/deepagents.egg-info/dependency_links.txt +0 -0
  25. {deepagents-0.2.1rc2 → deepagents-0.2.2}/src/deepagents.egg-info/requires.txt +0 -0
  26. {deepagents-0.2.1rc2 → deepagents-0.2.2}/src/deepagents.egg-info/top_level.txt +0 -0
  27. {deepagents-0.2.1rc2 → deepagents-0.2.2}/tests/test_middleware.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: deepagents
3
- Version: 0.2.1rc2
3
+ Version: 0.2.2
4
4
  Summary: General purpose 'deep agent' with sub-agent spawning, todo list capabilities, and mock file system. Built on LangGraph.
5
5
  License: MIT
6
6
  Requires-Python: <4.0,>=3.11
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "deepagents"
3
- version = "0.2.1rc2"
3
+ version = "0.2.2"
4
4
  description = "General purpose 'deep agent' with sub-agent spawning, todo list capabilities, and mock file system. Built on LangGraph."
5
5
  readme = "README.md"
6
6
  license = { text = "MIT" }
@@ -93,3 +93,8 @@ enable_error_code = ["deprecated"]
93
93
  # Optional: reduce strictness if needed
94
94
  disallow_any_generics = false
95
95
  warn_return_any = false
96
+
97
+ [tool.uv.workspace]
98
+ members = [
99
+ "libs/deepagents-cli",
100
+ ]
@@ -123,10 +123,10 @@ def create_deep_agent(
123
123
  AnthropicPromptCachingMiddleware(unsupported_model_behavior="ignore"),
124
124
  PatchToolCallsMiddleware(),
125
125
  ]
126
+ if middleware:
127
+ deepagent_middleware.extend(middleware)
126
128
  if interrupt_on is not None:
127
129
  deepagent_middleware.append(HumanInTheLoopMiddleware(interrupt_on=interrupt_on))
128
- if middleware is not None:
129
- deepagent_middleware.extend(middleware)
130
130
 
131
131
  return create_agent(
132
132
  model,
@@ -0,0 +1,13 @@
1
+ """Middleware for the DeepAgent."""
2
+
3
+ from deepagents.middleware.filesystem import FilesystemMiddleware
4
+ from deepagents.middleware.resumable_shell import ResumableShellToolMiddleware
5
+ from deepagents.middleware.subagents import CompiledSubAgent, SubAgent, SubAgentMiddleware
6
+
7
+ __all__ = [
8
+ "CompiledSubAgent",
9
+ "FilesystemMiddleware",
10
+ "ResumableShellToolMiddleware",
11
+ "SubAgent",
12
+ "SubAgentMiddleware",
13
+ ]
@@ -0,0 +1,222 @@
1
+ """Middleware for loading agent-specific long-term memory into the system prompt."""
2
+
3
+ from collections.abc import Awaitable, Callable
4
+ from typing import TYPE_CHECKING, Any
5
+
6
+ if TYPE_CHECKING:
7
+ from langgraph.runtime import Runtime
8
+
9
+ from langchain.agents.middleware.types import (
10
+ AgentMiddleware,
11
+ AgentState,
12
+ ModelRequest,
13
+ ModelResponse,
14
+ )
15
+ from typing_extensions import NotRequired, TypedDict
16
+
17
+ from deepagents.backends.protocol import BackendProtocol
18
+
19
+
20
+ class AgentMemoryState(AgentState):
21
+ """State for the agent memory middleware."""
22
+
23
+ agent_memory: NotRequired[str | None]
24
+ """Long-term memory content for the agent."""
25
+
26
+
27
+ AGENT_MEMORY_FILE_PATH = "/agent.md"
28
+
29
+ # Long-term Memory Documentation
30
+ LONGTERM_MEMORY_SYSTEM_PROMPT = """
31
+
32
+ ## Long-term Memory
33
+
34
+ You have access to a long-term memory system using the {memory_path} path prefix.
35
+ Files stored in {memory_path} persist across sessions and conversations.
36
+
37
+ Your system prompt is loaded from {memory_path}agent.md at startup. You can update your own instructions by editing this file.
38
+
39
+ **When to CHECK/READ memories (CRITICAL - do this FIRST):**
40
+ - **At the start of ANY new session**: Run `ls {memory_path}` to see what you know
41
+ - **BEFORE answering questions**: If asked "what do you know about X?" or "how do I do Y?", check `ls {memory_path}` for relevant files FIRST
42
+ - **When user asks you to do something**: Check if you have guides, examples, or patterns in {memory_path} before proceeding
43
+ - **When user references past work or conversations**: Search {memory_path} for related content
44
+ - **If you're unsure**: Check your memories rather than guessing or using only general knowledge
45
+
46
+ **Memory-first response pattern:**
47
+ 1. User asks a question → Run `ls {memory_path}` to check for relevant files
48
+ 2. If relevant files exist → Read them with `read_file {memory_path}[filename]`
49
+ 3. Base your answer on saved knowledge (from memories) supplemented by general knowledge
50
+ 4. If no relevant memories exist → Use general knowledge, then consider if this is worth saving
51
+
52
+ **When to update memories:**
53
+ - **IMMEDIATELY when the user describes your role or how you should behave** (e.g., "you are a web researcher", "you are an expert in X")
54
+ - **IMMEDIATELY when the user gives feedback on your work** - Before continuing, update memories to capture what was wrong and how to do it better
55
+ - When the user explicitly asks you to remember something
56
+ - When patterns or preferences emerge (coding styles, conventions, workflows)
57
+ - After significant work where context would help in future sessions
58
+
59
+ **Learning from feedback:**
60
+ - When user says something is better/worse, capture WHY and encode it as a pattern
61
+ - Each correction is a chance to improve permanently - don't just fix the immediate issue, update your instructions
62
+ - When user says "you should remember X" or "be careful about Y", treat this as HIGH PRIORITY - update memories IMMEDIATELY
63
+ - Look for the underlying principle behind corrections, not just the specific mistake
64
+ - If it's something you "should have remembered", identify where that instruction should live permanently
65
+
66
+ **What to store where:**
67
+ - **{memory_path}agent.md**: Update this to modify your core instructions and behavioral patterns
68
+ - **Other {memory_path} files**: Use for project-specific context, reference information, or structured notes
69
+ - If you create additional memory files, add references to them in {memory_path}agent.md so you remember to consult them
70
+
71
+ The portion of your system prompt that comes from {memory_path}agent.md is marked with `<agent_memory>` tags so you can identify what instructions come from your persistent memory.
72
+
73
+ Example: `ls {memory_path}` to see what memories you have
74
+ Example: `read_file '{memory_path}deep-agents-guide.md'` to recall saved knowledge
75
+ Example: `edit_file('{memory_path}agent.md', ...)` to update your instructions
76
+ Example: `write_file('{memory_path}project_context.md', ...)` for project-specific notes, then reference it in agent.md
77
+
78
+ Remember: To interact with the longterm filesystem, you must prefix the filename with the {memory_path} path."""
79
+
80
+
81
+ DEFAULT_MEMORY_SNIPPET = """<agent_memory>
82
+ {agent_memory}
83
+ </agent_memory>
84
+ """
85
+
86
+ class AgentMemoryMiddleware(AgentMiddleware):
87
+ """Middleware for loading agent-specific long-term memory.
88
+
89
+ This middleware loads the agent's long-term memory from a file (agent.md)
90
+ and injects it into the system prompt. The memory is loaded once at the
91
+ start of the conversation and stored in state.
92
+
93
+ Args:
94
+ backend: Backend to use for loading the agent memory file.
95
+ system_prompt_template: Optional custom template for how to inject
96
+ the agent memory into the system prompt. Use {agent_memory} as
97
+ a placeholder. Defaults to a simple section header.
98
+
99
+ Example:
100
+ ```python
101
+ from deepagents.middleware.agent_memory import AgentMemoryMiddleware
102
+ from deepagents.memory.backends import FilesystemBackend
103
+ from pathlib import Path
104
+
105
+ # Set up backend pointing to agent's directory
106
+ agent_dir = Path.home() / ".deepagents" / "my-agent"
107
+ backend = FilesystemBackend(root_dir=agent_dir)
108
+
109
+ # Create middleware
110
+ middleware = AgentMemoryMiddleware(backend=backend)
111
+ ```
112
+ """
113
+
114
+ state_schema = AgentMemoryState
115
+
116
+ def __init__(
117
+ self,
118
+ *,
119
+ backend: BackendProtocol,
120
+ memory_path: str,
121
+ system_prompt_template: str | None = None,
122
+ ) -> None:
123
+ """Initialize the agent memory middleware.
124
+
125
+ Args:
126
+ backend: Backend to use for loading the agent memory file.
127
+ system_prompt_template: Optional custom template for injecting
128
+ agent memory into system prompt.
129
+ """
130
+ self.backend = backend
131
+ self.memory_path = memory_path
132
+ self.system_prompt_template = system_prompt_template or DEFAULT_MEMORY_SNIPPET
133
+
134
+ def before_agent(
135
+ self,
136
+ state: AgentMemoryState,
137
+ runtime,
138
+ ) -> AgentMemoryState:
139
+ """Load agent memory from file before agent execution.
140
+
141
+ Args:
142
+ state: Current agent state.
143
+ handler: Handler function to call after loading memory.
144
+
145
+ Returns:
146
+ Updated state with agent_memory populated.
147
+ """
148
+ # Only load memory if it hasn't been loaded yet
149
+ if "agent_memory" not in state or state.get("agent_memory") is None:
150
+ file_data = self.backend.read(AGENT_MEMORY_FILE_PATH)
151
+ return {"agent_memory": file_data}
152
+
153
+ async def abefore_agent(
154
+ self,
155
+ state: AgentMemoryState,
156
+ runtime,
157
+ ) -> AgentMemoryState:
158
+ """(async) Load agent memory from file before agent execution.
159
+
160
+ Args:
161
+ state: Current agent state.
162
+ handler: Handler function to call after loading memory.
163
+
164
+ Returns:
165
+ Updated state with agent_memory populated.
166
+ """
167
+ # Only load memory if it hasn't been loaded yet
168
+ if "agent_memory" not in state or state.get("agent_memory") is None:
169
+ file_data = self.backend.read(AGENT_MEMORY_FILE_PATH)
170
+ return {"agent_memory": file_data}
171
+
172
+ def wrap_model_call(
173
+ self,
174
+ request: ModelRequest,
175
+ handler: Callable[[ModelRequest], ModelResponse],
176
+ ) -> ModelResponse:
177
+ """Inject agent memory into the system prompt.
178
+
179
+ Args:
180
+ request: The model request being processed.
181
+ handler: The handler function to call with the modified request.
182
+
183
+ Returns:
184
+ The model response from the handler.
185
+ """
186
+ # Get agent memory from state
187
+ agent_memory = request.state.get("agent_memory", "")
188
+
189
+ memory_section = self.system_prompt_template.format(agent_memory=agent_memory)
190
+ if request.system_prompt:
191
+ request.system_prompt = memory_section + "\n\n" + request.system_prompt
192
+ else:
193
+ request.system_prompt = memory_section
194
+ request.system_prompt = request.system_prompt + "\n\n" + LONGTERM_MEMORY_SYSTEM_PROMPT.format(memory_path=self.memory_path)
195
+
196
+ return handler(request)
197
+
198
+ async def awrap_model_call(
199
+ self,
200
+ request: ModelRequest,
201
+ handler: Callable[[ModelRequest], Awaitable[ModelResponse]],
202
+ ) -> ModelResponse:
203
+ """(async) Inject agent memory into the system prompt.
204
+
205
+ Args:
206
+ request: The model request being processed.
207
+ handler: The handler function to call with the modified request.
208
+
209
+ Returns:
210
+ The model response from the handler.
211
+ """
212
+ # Get agent memory from state
213
+ agent_memory = request.state.get("agent_memory", "")
214
+
215
+ memory_section = self.system_prompt_template.format(agent_memory=agent_memory)
216
+ if request.system_prompt:
217
+ request.system_prompt = memory_section + "\n\n" + request.system_prompt
218
+ else:
219
+ request.system_prompt = memory_section
220
+ request.system_prompt = request.system_prompt + "\n\n" + LONGTERM_MEMORY_SYSTEM_PROMPT.format(memory_path=self.memory_path)
221
+
222
+ return await handler(request)
@@ -35,7 +35,7 @@ EMPTY_CONTENT_WARNING = "System reminder: File exists but has empty contents"
35
35
  MAX_LINE_LENGTH = 2000
36
36
  LINE_NUMBER_WIDTH = 6
37
37
  DEFAULT_READ_OFFSET = 0
38
- DEFAULT_READ_LIMIT = 2000
38
+ DEFAULT_READ_LIMIT = 500
39
39
  BACKEND_TYPES = (
40
40
  BackendProtocol
41
41
  | BackendFactory
@@ -155,8 +155,12 @@ Assume this tool is able to read all files on the machine. If the User provides
155
155
 
156
156
  Usage:
157
157
  - The file_path parameter must be an absolute path, not a relative path
158
- - By default, it reads up to 2000 lines starting from the beginning of the file
159
- - You can optionally specify a line offset and limit (especially handy for long files), but it's recommended to read the whole file by not providing these parameters
158
+ - By default, it reads up to 500 lines starting from the beginning of the file
159
+ - **IMPORTANT for large files and codebase exploration**: Use pagination with offset and limit parameters to avoid context overflow
160
+ - First scan: read_file(path, limit=100) to see file structure
161
+ - Read more sections: read_file(path, offset=100, limit=200) for next 200 lines
162
+ - Only omit limit (read full file) when necessary for editing
163
+ - Specify offset and limit: read_file(path, offset=0, limit=100) reads first 100 lines
160
164
  - Any lines longer than 2000 characters will be truncated
161
165
  - Results are returned using cat -n format, with line numbers starting at 1
162
166
  - You have the capability to call multiple tools in a single response. It is always better to speculatively read multiple files as a batch that are potentially useful.
@@ -0,0 +1,85 @@
1
+ """Shell tool middleware that survives HITL pauses."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import Any, Awaitable, Callable, cast
6
+
7
+ from langchain.agents.middleware.shell_tool import (
8
+ ShellToolMiddleware,
9
+ _PersistentShellTool,
10
+ _SessionResources,
11
+ ShellToolState,
12
+ )
13
+ from langchain.agents.middleware.types import AgentState
14
+ from langchain_core.messages import ToolMessage
15
+ from langchain.tools.tool_node import ToolCallRequest
16
+ from langgraph.types import Command
17
+
18
+
19
+ class ResumableShellToolMiddleware(ShellToolMiddleware):
20
+ """Shell middleware that recreates session resources after human interrupts.
21
+
22
+ ``ShellToolMiddleware`` stores its session handle in middleware state using an
23
+ ``UntrackedValue``. When a run pauses for human approval, that attribute is not
24
+ checkpointed. Upon resuming, LangGraph restores the state without the shell
25
+ resources, so the next tool execution fails with
26
+ ``Shell session resources are unavailable``.
27
+
28
+ This subclass lazily recreates the shell session the first time a resumed run
29
+ touches the shell tool again and only performs shutdown when a session is
30
+ actually active. This keeps behaviour identical for uninterrupted runs while
31
+ allowing HITL pauses to succeed.
32
+ """
33
+
34
+ def wrap_tool_call(
35
+ self,
36
+ request: ToolCallRequest,
37
+ handler: Callable[[ToolCallRequest], ToolMessage | Command],
38
+ ) -> ToolMessage | Command:
39
+ if isinstance(request.tool, _PersistentShellTool):
40
+ resources = self._get_or_create_resources(request.state)
41
+ return self._run_shell_tool(
42
+ resources,
43
+ request.tool_call["args"],
44
+ tool_call_id=request.tool_call.get("id"),
45
+ )
46
+ return super().wrap_tool_call(request, handler)
47
+
48
+ async def awrap_tool_call(
49
+ self,
50
+ request: ToolCallRequest,
51
+ handler: Callable[[ToolCallRequest], Awaitable[ToolMessage | Command]],
52
+ ) -> ToolMessage | Command:
53
+ if isinstance(request.tool, _PersistentShellTool):
54
+ resources = self._get_or_create_resources(request.state)
55
+ return self._run_shell_tool(
56
+ resources,
57
+ request.tool_call["args"],
58
+ tool_call_id=request.tool_call.get("id"),
59
+ )
60
+ return await super().awrap_tool_call(request, handler)
61
+
62
+ def after_agent(self, state: ShellToolState, runtime) -> None: # type: ignore[override]
63
+ if self._has_resources(state):
64
+ super().after_agent(state, runtime)
65
+
66
+ async def aafter_agent(self, state: ShellToolState, runtime) -> None: # type: ignore[override]
67
+ if self._has_resources(state):
68
+ await super().aafter_agent(state, runtime)
69
+
70
+ @staticmethod
71
+ def _has_resources(state: AgentState) -> bool:
72
+ resources = state.get("shell_session_resources")
73
+ return isinstance(resources, _SessionResources)
74
+
75
+ def _get_or_create_resources(self, state: AgentState) -> _SessionResources:
76
+ resources = state.get("shell_session_resources")
77
+ if isinstance(resources, _SessionResources):
78
+ return resources
79
+
80
+ new_resources = self._create_resources()
81
+ cast(dict[str, Any], state)["shell_session_resources"] = new_resources
82
+ return new_resources
83
+
84
+
85
+ __all__ = ["ResumableShellToolMiddleware"]
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: deepagents
3
- Version: 0.2.1rc2
3
+ Version: 0.2.2
4
4
  Summary: General purpose 'deep agent' with sub-agent spawning, todo list capabilities, and mock file system. Built on LangGraph.
5
5
  License: MIT
6
6
  Requires-Python: <4.0,>=3.11
@@ -16,7 +16,9 @@ src/deepagents/backends/state.py
16
16
  src/deepagents/backends/store.py
17
17
  src/deepagents/backends/utils.py
18
18
  src/deepagents/middleware/__init__.py
19
+ src/deepagents/middleware/agent_memory.py
19
20
  src/deepagents/middleware/filesystem.py
20
21
  src/deepagents/middleware/patch_tool_calls.py
22
+ src/deepagents/middleware/resumable_shell.py
21
23
  src/deepagents/middleware/subagents.py
22
24
  tests/test_middleware.py
@@ -1,6 +0,0 @@
1
- """Middleware for the DeepAgent."""
2
-
3
- from deepagents.middleware.filesystem import FilesystemMiddleware
4
- from deepagents.middleware.subagents import CompiledSubAgent, SubAgent, SubAgentMiddleware
5
-
6
- __all__ = ["CompiledSubAgent", "FilesystemMiddleware", "SubAgent", "SubAgentMiddleware"]
File without changes
File without changes
File without changes