deepagents 0.2.1rc1__py3-none-any.whl → 0.2.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -37,6 +37,15 @@ class GrepMatch(TypedDict):
37
37
  text: str
38
38
 
39
39
 
40
+ def sanitize_tool_call_id(tool_call_id: str) -> str:
41
+ """Sanitize tool_call_id to prevent path traversal and separator issues.
42
+
43
+ Replaces dangerous characters (., /, \) with underscores.
44
+ """
45
+ sanitized = tool_call_id.replace(".", "_").replace("/", "_").replace("\\", "_")
46
+ return sanitized
47
+
48
+
40
49
  def format_content_with_line_numbers(
41
50
  content: str | list[str],
42
51
  start_line: int = 1,
deepagents/graph.py CHANGED
@@ -123,10 +123,10 @@ def create_deep_agent(
123
123
  AnthropicPromptCachingMiddleware(unsupported_model_behavior="ignore"),
124
124
  PatchToolCallsMiddleware(),
125
125
  ]
126
+ if middleware:
127
+ deepagent_middleware.extend(middleware)
126
128
  if interrupt_on is not None:
127
129
  deepagent_middleware.append(HumanInTheLoopMiddleware(interrupt_on=interrupt_on))
128
- if middleware is not None:
129
- deepagent_middleware.extend(middleware)
130
130
 
131
131
  return create_agent(
132
132
  model,
@@ -1,6 +1,13 @@
1
1
  """Middleware for the DeepAgent."""
2
2
 
3
3
  from deepagents.middleware.filesystem import FilesystemMiddleware
4
+ from deepagents.middleware.resumable_shell import ResumableShellToolMiddleware
4
5
  from deepagents.middleware.subagents import CompiledSubAgent, SubAgent, SubAgentMiddleware
5
6
 
6
- __all__ = ["CompiledSubAgent", "FilesystemMiddleware", "SubAgent", "SubAgentMiddleware"]
7
+ __all__ = [
8
+ "CompiledSubAgent",
9
+ "FilesystemMiddleware",
10
+ "ResumableShellToolMiddleware",
11
+ "SubAgent",
12
+ "SubAgentMiddleware",
13
+ ]
@@ -0,0 +1,222 @@
1
+ """Middleware for loading agent-specific long-term memory into the system prompt."""
2
+
3
+ from collections.abc import Awaitable, Callable
4
+ from typing import TYPE_CHECKING, Any
5
+
6
+ if TYPE_CHECKING:
7
+ from langgraph.runtime import Runtime
8
+
9
+ from langchain.agents.middleware.types import (
10
+ AgentMiddleware,
11
+ AgentState,
12
+ ModelRequest,
13
+ ModelResponse,
14
+ )
15
+ from typing_extensions import NotRequired, TypedDict
16
+
17
+ from deepagents.backends.protocol import BackendProtocol
18
+
19
+
20
+ class AgentMemoryState(AgentState):
21
+ """State for the agent memory middleware."""
22
+
23
+ agent_memory: NotRequired[str | None]
24
+ """Long-term memory content for the agent."""
25
+
26
+
27
+ AGENT_MEMORY_FILE_PATH = "/agent.md"
28
+
29
+ # Long-term Memory Documentation
30
+ LONGTERM_MEMORY_SYSTEM_PROMPT = """
31
+
32
+ ## Long-term Memory
33
+
34
+ You have access to a long-term memory system using the {memory_path} path prefix.
35
+ Files stored in {memory_path} persist across sessions and conversations.
36
+
37
+ Your system prompt is loaded from {memory_path}agent.md at startup. You can update your own instructions by editing this file.
38
+
39
+ **When to CHECK/READ memories (CRITICAL - do this FIRST):**
40
+ - **At the start of ANY new session**: Run `ls {memory_path}` to see what you know
41
+ - **BEFORE answering questions**: If asked "what do you know about X?" or "how do I do Y?", check `ls {memory_path}` for relevant files FIRST
42
+ - **When user asks you to do something**: Check if you have guides, examples, or patterns in {memory_path} before proceeding
43
+ - **When user references past work or conversations**: Search {memory_path} for related content
44
+ - **If you're unsure**: Check your memories rather than guessing or using only general knowledge
45
+
46
+ **Memory-first response pattern:**
47
+ 1. User asks a question → Run `ls {memory_path}` to check for relevant files
48
+ 2. If relevant files exist → Read them with `read_file {memory_path}[filename]`
49
+ 3. Base your answer on saved knowledge (from memories) supplemented by general knowledge
50
+ 4. If no relevant memories exist → Use general knowledge, then consider if this is worth saving
51
+
52
+ **When to update memories:**
53
+ - **IMMEDIATELY when the user describes your role or how you should behave** (e.g., "you are a web researcher", "you are an expert in X")
54
+ - **IMMEDIATELY when the user gives feedback on your work** - Before continuing, update memories to capture what was wrong and how to do it better
55
+ - When the user explicitly asks you to remember something
56
+ - When patterns or preferences emerge (coding styles, conventions, workflows)
57
+ - After significant work where context would help in future sessions
58
+
59
+ **Learning from feedback:**
60
+ - When user says something is better/worse, capture WHY and encode it as a pattern
61
+ - Each correction is a chance to improve permanently - don't just fix the immediate issue, update your instructions
62
+ - When user says "you should remember X" or "be careful about Y", treat this as HIGH PRIORITY - update memories IMMEDIATELY
63
+ - Look for the underlying principle behind corrections, not just the specific mistake
64
+ - If it's something you "should have remembered", identify where that instruction should live permanently
65
+
66
+ **What to store where:**
67
+ - **{memory_path}agent.md**: Update this to modify your core instructions and behavioral patterns
68
+ - **Other {memory_path} files**: Use for project-specific context, reference information, or structured notes
69
+ - If you create additional memory files, add references to them in {memory_path}agent.md so you remember to consult them
70
+
71
+ The portion of your system prompt that comes from {memory_path}agent.md is marked with `<agent_memory>` tags so you can identify what instructions come from your persistent memory.
72
+
73
+ Example: `ls {memory_path}` to see what memories you have
74
+ Example: `read_file '{memory_path}deep-agents-guide.md'` to recall saved knowledge
75
+ Example: `edit_file('{memory_path}agent.md', ...)` to update your instructions
76
+ Example: `write_file('{memory_path}project_context.md', ...)` for project-specific notes, then reference it in agent.md
77
+
78
+ Remember: To interact with the longterm filesystem, you must prefix the filename with the {memory_path} path."""
79
+
80
+
81
+ DEFAULT_MEMORY_SNIPPET = """<agent_memory>
82
+ {agent_memory}
83
+ </agent_memory>
84
+ """
85
+
86
+ class AgentMemoryMiddleware(AgentMiddleware):
87
+ """Middleware for loading agent-specific long-term memory.
88
+
89
+ This middleware loads the agent's long-term memory from a file (agent.md)
90
+ and injects it into the system prompt. The memory is loaded once at the
91
+ start of the conversation and stored in state.
92
+
93
+ Args:
94
+ backend: Backend to use for loading the agent memory file.
95
+ system_prompt_template: Optional custom template for how to inject
96
+ the agent memory into the system prompt. Use {agent_memory} as
97
+ a placeholder. Defaults to a simple section header.
98
+
99
+ Example:
100
+ ```python
101
+ from deepagents.middleware.agent_memory import AgentMemoryMiddleware
102
+ from deepagents.memory.backends import FilesystemBackend
103
+ from pathlib import Path
104
+
105
+ # Set up backend pointing to agent's directory
106
+ agent_dir = Path.home() / ".deepagents" / "my-agent"
107
+ backend = FilesystemBackend(root_dir=agent_dir)
108
+
109
+ # Create middleware
110
+ middleware = AgentMemoryMiddleware(backend=backend)
111
+ ```
112
+ """
113
+
114
+ state_schema = AgentMemoryState
115
+
116
+ def __init__(
117
+ self,
118
+ *,
119
+ backend: BackendProtocol,
120
+ memory_path: str,
121
+ system_prompt_template: str | None = None,
122
+ ) -> None:
123
+ """Initialize the agent memory middleware.
124
+
125
+ Args:
126
+ backend: Backend to use for loading the agent memory file.
127
+ system_prompt_template: Optional custom template for injecting
128
+ agent memory into system prompt.
129
+ """
130
+ self.backend = backend
131
+ self.memory_path = memory_path
132
+ self.system_prompt_template = system_prompt_template or DEFAULT_MEMORY_SNIPPET
133
+
134
+ def before_agent(
135
+ self,
136
+ state: AgentMemoryState,
137
+ runtime,
138
+ ) -> AgentMemoryState:
139
+ """Load agent memory from file before agent execution.
140
+
141
+ Args:
142
+ state: Current agent state.
143
+ handler: Handler function to call after loading memory.
144
+
145
+ Returns:
146
+ Updated state with agent_memory populated.
147
+ """
148
+ # Only load memory if it hasn't been loaded yet
149
+ if "agent_memory" not in state or state.get("agent_memory") is None:
150
+ file_data = self.backend.read(AGENT_MEMORY_FILE_PATH)
151
+ return {"agent_memory": file_data}
152
+
153
+ async def abefore_agent(
154
+ self,
155
+ state: AgentMemoryState,
156
+ runtime,
157
+ ) -> AgentMemoryState:
158
+ """(async) Load agent memory from file before agent execution.
159
+
160
+ Args:
161
+ state: Current agent state.
162
+ handler: Handler function to call after loading memory.
163
+
164
+ Returns:
165
+ Updated state with agent_memory populated.
166
+ """
167
+ # Only load memory if it hasn't been loaded yet
168
+ if "agent_memory" not in state or state.get("agent_memory") is None:
169
+ file_data = self.backend.read(AGENT_MEMORY_FILE_PATH)
170
+ return {"agent_memory": file_data}
171
+
172
+ def wrap_model_call(
173
+ self,
174
+ request: ModelRequest,
175
+ handler: Callable[[ModelRequest], ModelResponse],
176
+ ) -> ModelResponse:
177
+ """Inject agent memory into the system prompt.
178
+
179
+ Args:
180
+ request: The model request being processed.
181
+ handler: The handler function to call with the modified request.
182
+
183
+ Returns:
184
+ The model response from the handler.
185
+ """
186
+ # Get agent memory from state
187
+ agent_memory = request.state.get("agent_memory", "")
188
+
189
+ memory_section = self.system_prompt_template.format(agent_memory=agent_memory)
190
+ if request.system_prompt:
191
+ request.system_prompt = memory_section + "\n\n" + request.system_prompt
192
+ else:
193
+ request.system_prompt = memory_section
194
+ request.system_prompt = request.system_prompt + "\n\n" + LONGTERM_MEMORY_SYSTEM_PROMPT.format(memory_path=self.memory_path)
195
+
196
+ return handler(request)
197
+
198
+ async def awrap_model_call(
199
+ self,
200
+ request: ModelRequest,
201
+ handler: Callable[[ModelRequest], Awaitable[ModelResponse]],
202
+ ) -> ModelResponse:
203
+ """(async) Inject agent memory into the system prompt.
204
+
205
+ Args:
206
+ request: The model request being processed.
207
+ handler: The handler function to call with the modified request.
208
+
209
+ Returns:
210
+ The model response from the handler.
211
+ """
212
+ # Get agent memory from state
213
+ agent_memory = request.state.get("agent_memory", "")
214
+
215
+ memory_section = self.system_prompt_template.format(agent_memory=agent_memory)
216
+ if request.system_prompt:
217
+ request.system_prompt = memory_section + "\n\n" + request.system_prompt
218
+ else:
219
+ request.system_prompt = memory_section
220
+ request.system_prompt = request.system_prompt + "\n\n" + LONGTERM_MEMORY_SYSTEM_PROMPT.format(memory_path=self.memory_path)
221
+
222
+ return await handler(request)
@@ -24,18 +24,18 @@ from typing_extensions import TypedDict
24
24
  from deepagents.backends.protocol import BackendProtocol, BackendFactory, WriteResult, EditResult
25
25
  from deepagents.backends import StateBackend
26
26
  from deepagents.backends.utils import (
27
- create_file_data,
28
27
  update_file_data,
29
28
  format_content_with_line_numbers,
30
29
  format_grep_matches,
31
30
  truncate_if_too_long,
31
+ sanitize_tool_call_id,
32
32
  )
33
33
 
34
34
  EMPTY_CONTENT_WARNING = "System reminder: File exists but has empty contents"
35
35
  MAX_LINE_LENGTH = 2000
36
36
  LINE_NUMBER_WIDTH = 6
37
37
  DEFAULT_READ_OFFSET = 0
38
- DEFAULT_READ_LIMIT = 2000
38
+ DEFAULT_READ_LIMIT = 500
39
39
  BACKEND_TYPES = (
40
40
  BackendProtocol
41
41
  | BackendFactory
@@ -155,8 +155,12 @@ Assume this tool is able to read all files on the machine. If the User provides
155
155
 
156
156
  Usage:
157
157
  - The file_path parameter must be an absolute path, not a relative path
158
- - By default, it reads up to 2000 lines starting from the beginning of the file
159
- - You can optionally specify a line offset and limit (especially handy for long files), but it's recommended to read the whole file by not providing these parameters
158
+ - By default, it reads up to 500 lines starting from the beginning of the file
159
+ - **IMPORTANT for large files and codebase exploration**: Use pagination with offset and limit parameters to avoid context overflow
160
+ - First scan: read_file(path, limit=100) to see file structure
161
+ - Read more sections: read_file(path, offset=100, limit=200) for next 200 lines
162
+ - Only omit limit (read full file) when necessary for editing
163
+ - Specify offset and limit: read_file(path, offset=0, limit=100) reads first 100 lines
160
164
  - Any lines longer than 2000 characters will be truncated
161
165
  - Results are returned using cat -n format, with line numbers starting at 1
162
166
  - You have the capability to call multiple tools in a single response. It is always better to speculatively read multiple files as a batch that are potentially useful.
@@ -227,6 +231,15 @@ All file paths must start with a /.
227
231
 
228
232
 
229
233
  def _get_backend(backend: BACKEND_TYPES, runtime: ToolRuntime) -> BackendProtocol:
234
+ """Get the resolved backend instance from backend or factory.
235
+
236
+ Args:
237
+ backend: Backend instance or factory function.
238
+ runtime: The tool runtime context.
239
+
240
+ Returns:
241
+ Resolved backend instance.
242
+ """
230
243
  if callable(backend):
231
244
  return backend(runtime)
232
245
  return backend
@@ -532,6 +545,19 @@ class FilesystemMiddleware(AgentMiddleware):
532
545
 
533
546
  self.tools = _get_filesystem_tools(self.backend, custom_tool_descriptions)
534
547
 
548
+ def _get_backend(self, runtime: ToolRuntime) -> BackendProtocol:
549
+ """Get the resolved backend instance from backend or factory.
550
+
551
+ Args:
552
+ runtime: The tool runtime context.
553
+
554
+ Returns:
555
+ Resolved backend instance.
556
+ """
557
+ if callable(self.backend):
558
+ return self.backend(runtime)
559
+ return self.backend
560
+
535
561
  def wrap_model_call(
536
562
  self,
537
563
  request: ModelRequest,
@@ -568,54 +594,70 @@ class FilesystemMiddleware(AgentMiddleware):
568
594
  request.system_prompt = request.system_prompt + "\n\n" + self.system_prompt if request.system_prompt else self.system_prompt
569
595
  return await handler(request)
570
596
 
571
- def _intercept_large_tool_result(self, tool_result: ToolMessage | Command) -> ToolMessage | Command:
597
+ def _process_large_message(
598
+ self,
599
+ message: ToolMessage,
600
+ resolved_backend: BackendProtocol,
601
+ ) -> tuple[ToolMessage, dict[str, FileData] | None]:
602
+ content = message.content
603
+ if not isinstance(content, str) or len(content) <= 4 * self.tool_token_limit_before_evict:
604
+ return message, None
605
+
606
+ sanitized_id = sanitize_tool_call_id(message.tool_call_id)
607
+ file_path = f"/large_tool_results/{sanitized_id}"
608
+ result = resolved_backend.write(file_path, content)
609
+ if result.error:
610
+ return message, None
611
+ content_sample = format_content_with_line_numbers(content.splitlines()[:10], start_line=1)
612
+ processed_message = ToolMessage(
613
+ TOO_LARGE_TOOL_MSG.format(
614
+ tool_call_id=message.tool_call_id,
615
+ file_path=file_path,
616
+ content_sample=content_sample,
617
+ ),
618
+ tool_call_id=message.tool_call_id,
619
+ )
620
+ return processed_message, result.files_update
621
+
622
+ def _intercept_large_tool_result(self, tool_result: ToolMessage | Command, runtime: ToolRuntime) -> ToolMessage | Command:
572
623
  if isinstance(tool_result, ToolMessage) and isinstance(tool_result.content, str):
573
- content = tool_result.content
574
- if self.tool_token_limit_before_evict and len(content) > 4 * self.tool_token_limit_before_evict:
575
- file_path = f"/large_tool_results/{tool_result.tool_call_id}"
576
- file_data = create_file_data(content)
577
- state_update = {
578
- "messages": [
579
- ToolMessage(
580
- TOO_LARGE_TOOL_MSG.format(
581
- tool_call_id=tool_result.tool_call_id,
582
- file_path=file_path,
583
- content_sample=format_content_with_line_numbers(file_data["content"][:10], start_line=1),
584
- ),
585
- tool_call_id=tool_result.tool_call_id,
586
- )
587
- ],
588
- "files": {file_path: file_data},
589
- }
590
- return Command(update=state_update)
624
+ if not (self.tool_token_limit_before_evict and
625
+ len(tool_result.content) > 4 * self.tool_token_limit_before_evict):
626
+ return tool_result
627
+ resolved_backend = self._get_backend(runtime)
628
+ processed_message, files_update = self._process_large_message(
629
+ tool_result,
630
+ resolved_backend,
631
+ )
632
+ return (Command(update={
633
+ "files": files_update,
634
+ "messages": [processed_message],
635
+ }) if files_update is not None else processed_message)
636
+
591
637
  elif isinstance(tool_result, Command):
592
638
  update = tool_result.update
593
639
  if update is None:
594
640
  return tool_result
595
- message_updates = update.get("messages", [])
596
- file_updates = update.get("files", {})
597
-
598
- edited_message_updates = []
599
- for message in message_updates:
600
- if self.tool_token_limit_before_evict and isinstance(message, ToolMessage) and isinstance(message.content, str):
601
- content = message.content
602
- if len(content) > 4 * self.tool_token_limit_before_evict:
603
- file_path = f"/large_tool_results/{message.tool_call_id}"
604
- file_data = create_file_data(content)
605
- edited_message_updates.append(
606
- ToolMessage(
607
- TOO_LARGE_TOOL_MSG.format(
608
- tool_call_id=message.tool_call_id,
609
- file_path=file_path,
610
- content_sample=format_content_with_line_numbers(file_data["content"][:10], start_line=1),
611
- ),
612
- tool_call_id=message.tool_call_id,
613
- )
614
- )
615
- file_updates[file_path] = file_data
616
- continue
617
- edited_message_updates.append(message)
618
- return Command(update={**update, "messages": edited_message_updates, "files": file_updates})
641
+ command_messages = update.get("messages", [])
642
+ accumulated_file_updates = dict(update.get("files", {}))
643
+ resolved_backend = self._get_backend(runtime)
644
+ processed_messages = []
645
+ for message in command_messages:
646
+ if not (self.tool_token_limit_before_evict and
647
+ isinstance(message, ToolMessage) and
648
+ isinstance(message.content, str) and
649
+ len(message.content) > 4 * self.tool_token_limit_before_evict):
650
+ processed_messages.append(message)
651
+ continue
652
+ processed_message, files_update = self._process_large_message(
653
+ message,
654
+ resolved_backend,
655
+ )
656
+ processed_messages.append(processed_message)
657
+ if files_update is not None:
658
+ accumulated_file_updates.update(files_update)
659
+ return Command(update={**update, "messages": processed_messages, "files": accumulated_file_updates})
660
+
619
661
  return tool_result
620
662
 
621
663
  def wrap_tool_call(
@@ -636,7 +678,7 @@ class FilesystemMiddleware(AgentMiddleware):
636
678
  return handler(request)
637
679
 
638
680
  tool_result = handler(request)
639
- return self._intercept_large_tool_result(tool_result)
681
+ return self._intercept_large_tool_result(tool_result, request.runtime)
640
682
 
641
683
  async def awrap_tool_call(
642
684
  self,
@@ -656,4 +698,4 @@ class FilesystemMiddleware(AgentMiddleware):
656
698
  return await handler(request)
657
699
 
658
700
  tool_result = await handler(request)
659
- return self._intercept_large_tool_result(tool_result)
701
+ return self._intercept_large_tool_result(tool_result, request.runtime)
@@ -3,9 +3,9 @@
3
3
  from typing import Any
4
4
 
5
5
  from langchain.agents.middleware import AgentMiddleware, AgentState
6
- from langchain_core.messages import RemoveMessage, ToolMessage
7
- from langgraph.graph.message import REMOVE_ALL_MESSAGES
6
+ from langchain_core.messages import ToolMessage
8
7
  from langgraph.runtime import Runtime
8
+ from langgraph.types import Overwrite
9
9
 
10
10
 
11
11
  class PatchToolCallsMiddleware(AgentMiddleware):
@@ -41,4 +41,4 @@ class PatchToolCallsMiddleware(AgentMiddleware):
41
41
  )
42
42
  )
43
43
 
44
- return {"messages": [RemoveMessage(id=REMOVE_ALL_MESSAGES), *patched_messages]}
44
+ return {"messages": Overwrite(patched_messages)}
@@ -0,0 +1,85 @@
1
+ """Shell tool middleware that survives HITL pauses."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import Any, Awaitable, Callable, cast
6
+
7
+ from langchain.agents.middleware.shell_tool import (
8
+ ShellToolMiddleware,
9
+ _PersistentShellTool,
10
+ _SessionResources,
11
+ ShellToolState,
12
+ )
13
+ from langchain.agents.middleware.types import AgentState
14
+ from langchain_core.messages import ToolMessage
15
+ from langchain.tools.tool_node import ToolCallRequest
16
+ from langgraph.types import Command
17
+
18
+
19
+ class ResumableShellToolMiddleware(ShellToolMiddleware):
20
+ """Shell middleware that recreates session resources after human interrupts.
21
+
22
+ ``ShellToolMiddleware`` stores its session handle in middleware state using an
23
+ ``UntrackedValue``. When a run pauses for human approval, that attribute is not
24
+ checkpointed. Upon resuming, LangGraph restores the state without the shell
25
+ resources, so the next tool execution fails with
26
+ ``Shell session resources are unavailable``.
27
+
28
+ This subclass lazily recreates the shell session the first time a resumed run
29
+ touches the shell tool again and only performs shutdown when a session is
30
+ actually active. This keeps behaviour identical for uninterrupted runs while
31
+ allowing HITL pauses to succeed.
32
+ """
33
+
34
+ def wrap_tool_call(
35
+ self,
36
+ request: ToolCallRequest,
37
+ handler: Callable[[ToolCallRequest], ToolMessage | Command],
38
+ ) -> ToolMessage | Command:
39
+ if isinstance(request.tool, _PersistentShellTool):
40
+ resources = self._get_or_create_resources(request.state)
41
+ return self._run_shell_tool(
42
+ resources,
43
+ request.tool_call["args"],
44
+ tool_call_id=request.tool_call.get("id"),
45
+ )
46
+ return super().wrap_tool_call(request, handler)
47
+
48
+ async def awrap_tool_call(
49
+ self,
50
+ request: ToolCallRequest,
51
+ handler: Callable[[ToolCallRequest], Awaitable[ToolMessage | Command]],
52
+ ) -> ToolMessage | Command:
53
+ if isinstance(request.tool, _PersistentShellTool):
54
+ resources = self._get_or_create_resources(request.state)
55
+ return self._run_shell_tool(
56
+ resources,
57
+ request.tool_call["args"],
58
+ tool_call_id=request.tool_call.get("id"),
59
+ )
60
+ return await super().awrap_tool_call(request, handler)
61
+
62
+ def after_agent(self, state: ShellToolState, runtime) -> None: # type: ignore[override]
63
+ if self._has_resources(state):
64
+ super().after_agent(state, runtime)
65
+
66
+ async def aafter_agent(self, state: ShellToolState, runtime) -> None: # type: ignore[override]
67
+ if self._has_resources(state):
68
+ await super().aafter_agent(state, runtime)
69
+
70
+ @staticmethod
71
+ def _has_resources(state: AgentState) -> bool:
72
+ resources = state.get("shell_session_resources")
73
+ return isinstance(resources, _SessionResources)
74
+
75
+ def _get_or_create_resources(self, state: AgentState) -> _SessionResources:
76
+ resources = state.get("shell_session_resources")
77
+ if isinstance(resources, _SessionResources):
78
+ return resources
79
+
80
+ new_resources = self._create_resources()
81
+ cast(dict[str, Any], state)["shell_session_resources"] = new_resources
82
+ return new_resources
83
+
84
+
85
+ __all__ = ["ResumableShellToolMiddleware"]
@@ -1,13 +1,13 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: deepagents
3
- Version: 0.2.1rc1
3
+ Version: 0.2.2
4
4
  Summary: General purpose 'deep agent' with sub-agent spawning, todo list capabilities, and mock file system. Built on LangGraph.
5
5
  License: MIT
6
6
  Requires-Python: <4.0,>=3.11
7
7
  Description-Content-Type: text/markdown
8
8
  License-File: LICENSE
9
9
  Requires-Dist: langchain-anthropic<2.0.0,>=1.0.0
10
- Requires-Dist: langchain<2.0.0,>=1.0.0
10
+ Requires-Dist: langchain<2.0.0,>=1.0.2
11
11
  Requires-Dist: langchain-core<2.0.0,>=1.0.0
12
12
  Requires-Dist: wcmatch
13
13
  Provides-Extra: dev
@@ -0,0 +1,20 @@
1
+ deepagents/__init__.py,sha256=9BVNn4lfF5N8l2KY8Ttxi82zO609I-fGqoSIF7DAxiU,342
2
+ deepagents/graph.py,sha256=xMCadqnXFtLj38npagyZIf3JqfB9Q6EJ8qMVmiY2eTw,6094
3
+ deepagents/backends/__init__.py,sha256=qb2dt2axTQsh8BGqW2EDyJq8GazC8_z87MQYo_VXhBw,457
4
+ deepagents/backends/composite.py,sha256=kyW0H346s1XIxNkRHRbDF9SSR-zeigeEFO8bizn5iFg,8555
5
+ deepagents/backends/filesystem.py,sha256=U9Tmf8BDTqKbw-gQjJkJOYG1nXQo9FhxyfzN97W9Z_8,18470
6
+ deepagents/backends/protocol.py,sha256=fwqJa_Ec6F4BoNYz0bcPHL_fiKksxw2RoyA6x5wr7dc,4181
7
+ deepagents/backends/state.py,sha256=BxMNm1kDpxtgzIzpuF78h1NuYh9VIpXqnUbbETGe4Y4,6584
8
+ deepagents/backends/store.py,sha256=VsPSj6ayABPjkKiN6CcvOGm7YCWKuWP_ltJWvFJ1nF0,13358
9
+ deepagents/backends/utils.py,sha256=vQDMFMjf7pmfKqprpTlF7851FWmswZnMdLj-cezTsBk,14432
10
+ deepagents/middleware/__init__.py,sha256=x7UHqGcrKlhzORNdChPvnUwa_PIJCbFUHY6zTKVfloI,418
11
+ deepagents/middleware/agent_memory.py,sha256=BRP8Dyuzl1ms4Eja-3nRHI3g2vNWfK8tUW6zBr2JJOc,9196
12
+ deepagents/middleware/filesystem.py,sha256=Zwpt6ILniHbNzfLWXrSGLbd__ZFkkO1xv0mGiRsNB7s,28144
13
+ deepagents/middleware/patch_tool_calls.py,sha256=PdNhxPaQqwnFkhEAZEE2kEzadTNAOO3_iJRA30WqpGE,1981
14
+ deepagents/middleware/resumable_shell.py,sha256=WbtjW81DItZgFiy5k4cSLAu5NTBHdOk2UEnFecuqlJU,3353
15
+ deepagents/middleware/subagents.py,sha256=JxXwZvi41pBKKMguKlyVqwjCoydnZboWEgJGkWOCIY8,23503
16
+ deepagents-0.2.2.dist-info/licenses/LICENSE,sha256=c__BaxUCK69leo2yEKynf8lWndu8iwYwge1CbyqAe-E,1071
17
+ deepagents-0.2.2.dist-info/METADATA,sha256=qCF7ZAsyD2cW8nqskeDWJ3DZxsp-V4ZF2RiVZ0vHtcY,18660
18
+ deepagents-0.2.2.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
19
+ deepagents-0.2.2.dist-info/top_level.txt,sha256=drAzchOzPNePwpb3_pbPuvLuayXkN7SNqeIKMBWJoAo,11
20
+ deepagents-0.2.2.dist-info/RECORD,,
@@ -1,18 +0,0 @@
1
- deepagents/__init__.py,sha256=9BVNn4lfF5N8l2KY8Ttxi82zO609I-fGqoSIF7DAxiU,342
2
- deepagents/graph.py,sha256=6hXBwvQDwbUtiF8Tgwf1jbvRHwJGLEJq89fy-DpVez0,6106
3
- deepagents/backends/__init__.py,sha256=qb2dt2axTQsh8BGqW2EDyJq8GazC8_z87MQYo_VXhBw,457
4
- deepagents/backends/composite.py,sha256=kyW0H346s1XIxNkRHRbDF9SSR-zeigeEFO8bizn5iFg,8555
5
- deepagents/backends/filesystem.py,sha256=U9Tmf8BDTqKbw-gQjJkJOYG1nXQo9FhxyfzN97W9Z_8,18470
6
- deepagents/backends/protocol.py,sha256=fwqJa_Ec6F4BoNYz0bcPHL_fiKksxw2RoyA6x5wr7dc,4181
7
- deepagents/backends/state.py,sha256=BxMNm1kDpxtgzIzpuF78h1NuYh9VIpXqnUbbETGe4Y4,6584
8
- deepagents/backends/store.py,sha256=VsPSj6ayABPjkKiN6CcvOGm7YCWKuWP_ltJWvFJ1nF0,13358
9
- deepagents/backends/utils.py,sha256=CeGzpz1VaaqUjiodwdfDTfHuUR1OUGBYLgPbpp6pWAw,14119
10
- deepagents/middleware/__init__.py,sha256=J7372TNGR27OU4C3uuQMryHHpXOBjFV_4aEZ_AoQ6n0,284
11
- deepagents/middleware/filesystem.py,sha256=IggpI5ENGy5ykB4HCtTq0l5mM618eUDaFiLYArTUi94,26897
12
- deepagents/middleware/patch_tool_calls.py,sha256=Cu8rUpt1GjrYgfMvZG6wOowvnmFeYTCauOJhlltNPmo,2045
13
- deepagents/middleware/subagents.py,sha256=JxXwZvi41pBKKMguKlyVqwjCoydnZboWEgJGkWOCIY8,23503
14
- deepagents-0.2.1rc1.dist-info/licenses/LICENSE,sha256=c__BaxUCK69leo2yEKynf8lWndu8iwYwge1CbyqAe-E,1071
15
- deepagents-0.2.1rc1.dist-info/METADATA,sha256=pyeoVgRcusjjJZq8uuZ50nLMt8yl1q1Vfl59kTmnz2s,18663
16
- deepagents-0.2.1rc1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
17
- deepagents-0.2.1rc1.dist-info/top_level.txt,sha256=drAzchOzPNePwpb3_pbPuvLuayXkN7SNqeIKMBWJoAo,11
18
- deepagents-0.2.1rc1.dist-info/RECORD,,