deepagents 0.3.3__tar.gz → 0.3.5__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {deepagents-0.3.3 → deepagents-0.3.5}/PKG-INFO +5 -5
- {deepagents-0.3.3 → deepagents-0.3.5}/deepagents/graph.py +34 -34
- {deepagents-0.3.3 → deepagents-0.3.5}/deepagents/middleware/filesystem.py +76 -20
- {deepagents-0.3.3 → deepagents-0.3.5}/deepagents/middleware/memory.py +70 -42
- {deepagents-0.3.3 → deepagents-0.3.5}/deepagents.egg-info/PKG-INFO +5 -5
- deepagents-0.3.5/deepagents.egg-info/requires.txt +5 -0
- deepagents-0.3.5/pyproject.toml +135 -0
- deepagents-0.3.3/deepagents.egg-info/requires.txt +0 -5
- deepagents-0.3.3/pyproject.toml +0 -100
- {deepagents-0.3.3 → deepagents-0.3.5}/README.md +0 -0
- {deepagents-0.3.3 → deepagents-0.3.5}/deepagents/__init__.py +0 -0
- {deepagents-0.3.3 → deepagents-0.3.5}/deepagents/backends/__init__.py +0 -0
- {deepagents-0.3.3 → deepagents-0.3.5}/deepagents/backends/composite.py +0 -0
- {deepagents-0.3.3 → deepagents-0.3.5}/deepagents/backends/filesystem.py +0 -0
- {deepagents-0.3.3 → deepagents-0.3.5}/deepagents/backends/protocol.py +0 -0
- {deepagents-0.3.3 → deepagents-0.3.5}/deepagents/backends/sandbox.py +0 -0
- {deepagents-0.3.3 → deepagents-0.3.5}/deepagents/backends/state.py +0 -0
- {deepagents-0.3.3 → deepagents-0.3.5}/deepagents/backends/store.py +0 -0
- {deepagents-0.3.3 → deepagents-0.3.5}/deepagents/backends/utils.py +0 -0
- {deepagents-0.3.3 → deepagents-0.3.5}/deepagents/middleware/__init__.py +0 -0
- {deepagents-0.3.3 → deepagents-0.3.5}/deepagents/middleware/patch_tool_calls.py +0 -0
- {deepagents-0.3.3 → deepagents-0.3.5}/deepagents/middleware/skills.py +0 -0
- {deepagents-0.3.3 → deepagents-0.3.5}/deepagents/middleware/subagents.py +0 -0
- {deepagents-0.3.3 → deepagents-0.3.5}/deepagents.egg-info/SOURCES.txt +0 -0
- {deepagents-0.3.3 → deepagents-0.3.5}/deepagents.egg-info/dependency_links.txt +0 -0
- {deepagents-0.3.3 → deepagents-0.3.5}/deepagents.egg-info/top_level.txt +0 -0
- {deepagents-0.3.3 → deepagents-0.3.5}/setup.cfg +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: deepagents
|
|
3
|
-
Version: 0.3.
|
|
3
|
+
Version: 0.3.5
|
|
4
4
|
Summary: General purpose 'deep agent' with sub-agent spawning, todo list capabilities, and mock file system. Built on LangGraph.
|
|
5
5
|
License: MIT
|
|
6
6
|
Project-URL: Homepage, https://docs.langchain.com/oss/python/deepagents/overview
|
|
@@ -11,10 +11,10 @@ Project-URL: Slack, https://www.langchain.com/join-community
|
|
|
11
11
|
Project-URL: Reddit, https://www.reddit.com/r/LangChain/
|
|
12
12
|
Requires-Python: <4.0,>=3.11
|
|
13
13
|
Description-Content-Type: text/markdown
|
|
14
|
-
Requires-Dist: langchain-
|
|
15
|
-
Requires-Dist: langchain
|
|
16
|
-
Requires-Dist: langchain<2.0.0,>=1.1
|
|
17
|
-
Requires-Dist: langchain-
|
|
14
|
+
Requires-Dist: langchain-core<2.0.0,>=1.2.6
|
|
15
|
+
Requires-Dist: langchain<2.0.0,>=1.2.3
|
|
16
|
+
Requires-Dist: langchain-anthropic<2.0.0,>=1.3.1
|
|
17
|
+
Requires-Dist: langchain-google-genai<5.0.0,>=4.1.3
|
|
18
18
|
Requires-Dist: wcmatch
|
|
19
19
|
|
|
20
20
|
# 🧠🤖Deep Agents
|
|
@@ -33,7 +33,7 @@ def get_default_model() -> ChatAnthropic:
|
|
|
33
33
|
"""Get the default model for deep agents.
|
|
34
34
|
|
|
35
35
|
Returns:
|
|
36
|
-
ChatAnthropic instance configured with Claude Sonnet 4.
|
|
36
|
+
`ChatAnthropic` instance configured with Claude Sonnet 4.5.
|
|
37
37
|
"""
|
|
38
38
|
return ChatAnthropic(
|
|
39
39
|
model_name="claude-sonnet-4-5-20250929",
|
|
@@ -62,51 +62,51 @@ def create_deep_agent(
|
|
|
62
62
|
) -> CompiledStateGraph:
|
|
63
63
|
"""Create a deep agent.
|
|
64
64
|
|
|
65
|
-
This agent will by default have access to a tool to write todos (write_todos),
|
|
66
|
-
seven file and execution tools: ls
|
|
65
|
+
This agent will by default have access to a tool to write todos (`write_todos`),
|
|
66
|
+
seven file and execution tools: `ls`, `read_file`, `write_file`, `edit_file`, `glob`, `grep`, `execute`,
|
|
67
67
|
and a tool to call subagents.
|
|
68
68
|
|
|
69
|
-
The execute tool allows running shell commands if the backend implements SandboxBackendProtocol
|
|
70
|
-
For non-sandbox backends, the execute tool will return an error message.
|
|
69
|
+
The `execute` tool allows running shell commands if the backend implements `SandboxBackendProtocol`.
|
|
70
|
+
For non-sandbox backends, the `execute` tool will return an error message.
|
|
71
71
|
|
|
72
72
|
Args:
|
|
73
|
-
model: The model to use. Defaults to
|
|
73
|
+
model: The model to use. Defaults to `claude-sonnet-4-5-20250929`.
|
|
74
74
|
tools: The tools the agent should have access to.
|
|
75
75
|
system_prompt: The additional instructions the agent should have. Will go in
|
|
76
76
|
the system prompt.
|
|
77
77
|
middleware: Additional middleware to apply after standard middleware.
|
|
78
|
-
subagents: The subagents to use.
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
skills: Optional list of skill source paths (e.g., ["/skills/user/", "/skills/project/"]).
|
|
78
|
+
subagents: The subagents to use.
|
|
79
|
+
|
|
80
|
+
Each subagent should be a `dict` with the following keys:
|
|
81
|
+
|
|
82
|
+
- `name`
|
|
83
|
+
- `description` (used by the main agent to decide whether to call the sub agent)
|
|
84
|
+
- `prompt` (used as the system prompt in the subagent)
|
|
85
|
+
- (optional) `tools`
|
|
86
|
+
- (optional) `model` (either a `LanguageModelLike` instance or `dict` settings)
|
|
87
|
+
- (optional) `middleware` (list of `AgentMiddleware`)
|
|
88
|
+
skills: Optional list of skill source paths (e.g., `["/skills/user/", "/skills/project/"]`).
|
|
89
|
+
|
|
89
90
|
Paths must be specified using POSIX conventions (forward slashes) and are relative
|
|
90
|
-
to the backend's root. When using StateBackend (default), provide skill files via
|
|
91
|
-
`invoke(files={...})`. With FilesystemBackend
|
|
92
|
-
to the backend's root_dir
|
|
91
|
+
to the backend's root. When using `StateBackend` (default), provide skill files via
|
|
92
|
+
`invoke(files={...})`. With `FilesystemBackend`, skills are loaded from disk relative
|
|
93
|
+
to the backend's `root_dir`. Later sources override earlier ones for skills with the
|
|
93
94
|
same name (last one wins).
|
|
94
|
-
memory: Optional list of memory file paths (AGENTS.md files) to load
|
|
95
|
-
(e.g., ["/memory/AGENTS.md"]). Display names
|
|
96
|
-
|
|
97
|
-
added into the system prompt.
|
|
95
|
+
memory: Optional list of memory file paths (`AGENTS.md` files) to load
|
|
96
|
+
(e.g., `["/memory/AGENTS.md"]`). Display names are automatically derived from paths.
|
|
97
|
+
Memory is loaded at agent startup and added into the system prompt.
|
|
98
98
|
response_format: A structured output response format to use for the agent.
|
|
99
99
|
context_schema: The schema of the deep agent.
|
|
100
|
-
checkpointer: Optional
|
|
101
|
-
store: Optional store for persistent storage (required if backend uses StoreBackend).
|
|
102
|
-
backend: Optional backend for file storage and execution.
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
debug: Whether to enable debug mode. Passed through to create_agent
|
|
108
|
-
name: The name of the agent. Passed through to create_agent
|
|
109
|
-
cache: The cache to use for the agent. Passed through to create_agent
|
|
100
|
+
checkpointer: Optional `Checkpointer` for persisting agent state between runs.
|
|
101
|
+
store: Optional store for persistent storage (required if backend uses `StoreBackend`).
|
|
102
|
+
backend: Optional backend for file storage and execution.
|
|
103
|
+
|
|
104
|
+
Pass either a `Backend` instance or a callable factory like `lambda rt: StateBackend(rt)`.
|
|
105
|
+
For execution support, use a backend that implements `SandboxBackendProtocol`.
|
|
106
|
+
interrupt_on: Mapping of tool names to interrupt configs.
|
|
107
|
+
debug: Whether to enable debug mode. Passed through to `create_agent`.
|
|
108
|
+
name: The name of the agent. Passed through to `create_agent`.
|
|
109
|
+
cache: The cache to use for the agent. Passed through to `create_agent`.
|
|
110
110
|
|
|
111
111
|
Returns:
|
|
112
112
|
A configured deep agent.
|
|
@@ -979,30 +979,91 @@ class FilesystemMiddleware(AgentMiddleware):
|
|
|
979
979
|
message: ToolMessage,
|
|
980
980
|
resolved_backend: BackendProtocol,
|
|
981
981
|
) -> tuple[ToolMessage, dict[str, FileData] | None]:
|
|
982
|
-
content
|
|
983
|
-
|
|
982
|
+
"""Process a large ToolMessage by evicting its content to filesystem.
|
|
983
|
+
|
|
984
|
+
Args:
|
|
985
|
+
message: The ToolMessage with large content to evict.
|
|
986
|
+
resolved_backend: The filesystem backend to write the content to.
|
|
987
|
+
|
|
988
|
+
Returns:
|
|
989
|
+
A tuple of (processed_message, files_update):
|
|
990
|
+
- processed_message: New ToolMessage with truncated content and file reference
|
|
991
|
+
- files_update: Dict of file updates to apply to state, or None if eviction failed
|
|
992
|
+
|
|
993
|
+
Note:
|
|
994
|
+
The entire content is converted to string, written to /large_tool_results/{tool_call_id},
|
|
995
|
+
and replaced with a truncated preview plus file reference. The replacement is always
|
|
996
|
+
returned as a plain string for consistency, regardless of original content type.
|
|
997
|
+
|
|
998
|
+
ToolMessage supports multimodal content blocks (images, audio, etc.), but these are
|
|
999
|
+
uncommon in tool results. For simplicity, all content is stringified and evicted.
|
|
1000
|
+
The model can recover by reading the offloaded file from the backend.
|
|
1001
|
+
"""
|
|
1002
|
+
# Early exit if eviction not configured
|
|
1003
|
+
if not self.tool_token_limit_before_evict:
|
|
984
1004
|
return message, None
|
|
985
1005
|
|
|
1006
|
+
# Convert content to string once for both size check and eviction
|
|
1007
|
+
# Special case: single text block - extract text directly for readability
|
|
1008
|
+
if (
|
|
1009
|
+
isinstance(message.content, list)
|
|
1010
|
+
and len(message.content) == 1
|
|
1011
|
+
and isinstance(message.content[0], dict)
|
|
1012
|
+
and message.content[0].get("type") == "text"
|
|
1013
|
+
and "text" in message.content[0]
|
|
1014
|
+
):
|
|
1015
|
+
content_str = str(message.content[0]["text"])
|
|
1016
|
+
elif isinstance(message.content, str):
|
|
1017
|
+
content_str = message.content
|
|
1018
|
+
else:
|
|
1019
|
+
# Multiple blocks or non-text content - stringify entire structure
|
|
1020
|
+
content_str = str(message.content)
|
|
1021
|
+
|
|
1022
|
+
# Check if content exceeds eviction threshold
|
|
1023
|
+
# Using 4 chars per token as a conservative approximation (actual ratio varies by content)
|
|
1024
|
+
# This errs on the high side to avoid premature eviction of content that might fit
|
|
1025
|
+
if len(content_str) <= 4 * self.tool_token_limit_before_evict:
|
|
1026
|
+
return message, None
|
|
1027
|
+
|
|
1028
|
+
# Write content to filesystem
|
|
986
1029
|
sanitized_id = sanitize_tool_call_id(message.tool_call_id)
|
|
987
1030
|
file_path = f"/large_tool_results/{sanitized_id}"
|
|
988
|
-
result = resolved_backend.write(file_path,
|
|
1031
|
+
result = resolved_backend.write(file_path, content_str)
|
|
989
1032
|
if result.error:
|
|
990
1033
|
return message, None
|
|
991
|
-
|
|
1034
|
+
|
|
1035
|
+
# Create truncated preview for the replacement message
|
|
1036
|
+
content_sample = format_content_with_line_numbers([line[:1000] for line in content_str.splitlines()[:10]], start_line=1)
|
|
1037
|
+
replacement_text = TOO_LARGE_TOOL_MSG.format(
|
|
1038
|
+
tool_call_id=message.tool_call_id,
|
|
1039
|
+
file_path=file_path,
|
|
1040
|
+
content_sample=content_sample,
|
|
1041
|
+
)
|
|
1042
|
+
|
|
1043
|
+
# Always return as plain string after eviction
|
|
992
1044
|
processed_message = ToolMessage(
|
|
993
|
-
|
|
994
|
-
tool_call_id=message.tool_call_id,
|
|
995
|
-
file_path=file_path,
|
|
996
|
-
content_sample=content_sample,
|
|
997
|
-
),
|
|
1045
|
+
content=replacement_text,
|
|
998
1046
|
tool_call_id=message.tool_call_id,
|
|
999
1047
|
)
|
|
1000
1048
|
return processed_message, result.files_update
|
|
1001
1049
|
|
|
1002
1050
|
def _intercept_large_tool_result(self, tool_result: ToolMessage | Command, runtime: ToolRuntime) -> ToolMessage | Command:
|
|
1003
|
-
|
|
1004
|
-
|
|
1005
|
-
|
|
1051
|
+
"""Intercept and process large tool results before they're added to state.
|
|
1052
|
+
|
|
1053
|
+
Args:
|
|
1054
|
+
tool_result: The tool result to potentially evict (ToolMessage or Command).
|
|
1055
|
+
runtime: The tool runtime providing access to the filesystem backend.
|
|
1056
|
+
|
|
1057
|
+
Returns:
|
|
1058
|
+
Either the original result (if small enough) or a Command with evicted
|
|
1059
|
+
content written to filesystem and truncated message.
|
|
1060
|
+
|
|
1061
|
+
Note:
|
|
1062
|
+
Handles both single ToolMessage results and Command objects containing
|
|
1063
|
+
multiple messages. Large content is automatically offloaded to filesystem
|
|
1064
|
+
to prevent context window overflow.
|
|
1065
|
+
"""
|
|
1066
|
+
if isinstance(tool_result, ToolMessage):
|
|
1006
1067
|
resolved_backend = self._get_backend(runtime)
|
|
1007
1068
|
processed_message, files_update = self._process_large_message(
|
|
1008
1069
|
tool_result,
|
|
@@ -1028,14 +1089,10 @@ class FilesystemMiddleware(AgentMiddleware):
|
|
|
1028
1089
|
resolved_backend = self._get_backend(runtime)
|
|
1029
1090
|
processed_messages = []
|
|
1030
1091
|
for message in command_messages:
|
|
1031
|
-
if not (
|
|
1032
|
-
self.tool_token_limit_before_evict
|
|
1033
|
-
and isinstance(message, ToolMessage)
|
|
1034
|
-
and isinstance(message.content, str)
|
|
1035
|
-
and len(message.content) > 4 * self.tool_token_limit_before_evict
|
|
1036
|
-
):
|
|
1092
|
+
if not isinstance(message, ToolMessage):
|
|
1037
1093
|
processed_messages.append(message)
|
|
1038
1094
|
continue
|
|
1095
|
+
|
|
1039
1096
|
processed_message, files_update = self._process_large_message(
|
|
1040
1097
|
message,
|
|
1041
1098
|
resolved_backend,
|
|
@@ -1044,8 +1101,7 @@ class FilesystemMiddleware(AgentMiddleware):
|
|
|
1044
1101
|
if files_update is not None:
|
|
1045
1102
|
accumulated_file_updates.update(files_update)
|
|
1046
1103
|
return Command(update={**update, "messages": processed_messages, "files": accumulated_file_updates})
|
|
1047
|
-
|
|
1048
|
-
return tool_result
|
|
1104
|
+
raise AssertionError(f"Unreachable code reached in _intercept_large_tool_result: for tool_result of type {type(tool_result)}")
|
|
1049
1105
|
|
|
1050
1106
|
def wrap_tool_call(
|
|
1051
1107
|
self,
|
|
@@ -89,21 +89,65 @@ class MemoryStateUpdate(TypedDict):
|
|
|
89
89
|
memory_contents: dict[str, str]
|
|
90
90
|
|
|
91
91
|
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
-
|
|
104
|
-
-
|
|
105
|
-
-
|
|
106
|
-
-
|
|
92
|
+
MEMORY_SYSTEM_PROMPT = """<agent_memory>
|
|
93
|
+
{agent_memory}
|
|
94
|
+
</agent_memory>
|
|
95
|
+
|
|
96
|
+
<memory_guidelines>
|
|
97
|
+
The above <agent_memory> was loaded in from files in your filesystem. As you learn from your interactions with the user, you can save new knowledge by calling the `edit_file` tool.
|
|
98
|
+
|
|
99
|
+
**Learning from feedback:**
|
|
100
|
+
- One of your MAIN PRIORITIES is to learn from your interactions with the user. These learnings can be implicit or explicit. This means that in the future, you will remember this important information.
|
|
101
|
+
- When you need to remember something, updating memory must be your FIRST, IMMEDIATE action - before responding to the user, before calling other tools, before doing anything else. Just update memory immediately.
|
|
102
|
+
- When user says something is better/worse, capture WHY and encode it as a pattern.
|
|
103
|
+
- Each correction is a chance to improve permanently - don't just fix the immediate issue, update your instructions.
|
|
104
|
+
- A great opportunity to update your memories is when the user interrupts a tool call and provides feedback. You should update your memories immediately before revising the tool call.
|
|
105
|
+
- Look for the underlying principle behind corrections, not just the specific mistake.
|
|
106
|
+
- The user might not explicitly ask you to remember something, but if they provide information that is useful for future use, you should update your memories immediately.
|
|
107
|
+
|
|
108
|
+
**Asking for information:**
|
|
109
|
+
- If you lack context to perform an action (e.g. send a Slack DM, requires a user ID/email) you should explicitly ask the user for this information.
|
|
110
|
+
- It is preferred for you to ask for information, don't assume anything that you do not know!
|
|
111
|
+
- When the user provides information that is useful for future use, you should update your memories immediately.
|
|
112
|
+
|
|
113
|
+
**When to update memories:**
|
|
114
|
+
- When the user explicitly asks you to remember something (e.g., "remember my email", "save this preference")
|
|
115
|
+
- When the user describes your role or how you should behave (e.g., "you are a web researcher", "always do X")
|
|
116
|
+
- When the user gives feedback on your work - capture what was wrong and how to improve
|
|
117
|
+
- When the user provides information required for tool use (e.g., slack channel ID, email addresses)
|
|
118
|
+
- When the user provides context useful for future tasks, such as how to use tools, or which actions to take in a particular situation
|
|
119
|
+
- When you discover new patterns or preferences (coding styles, conventions, workflows)
|
|
120
|
+
|
|
121
|
+
**When to NOT update memories:**
|
|
122
|
+
- When the information is temporary or transient (e.g., "I'm running late", "I'm on my phone right now")
|
|
123
|
+
- When the information is a one-time task request (e.g., "Find me a recipe", "What's 25 * 4?")
|
|
124
|
+
- When the information is a simple question that doesn't reveal lasting preferences (e.g., "What day is it?", "Can you explain X?")
|
|
125
|
+
- When the information is an acknowledgment or small talk (e.g., "Sounds good!", "Hello", "Thanks for that")
|
|
126
|
+
- When the information is stale or irrelevant in future conversations
|
|
127
|
+
- Never store API keys, access tokens, passwords, or any other credentials in any file, memory, or system prompt.
|
|
128
|
+
- If the user asks where to put API keys or provides an API key, do NOT echo or save it.
|
|
129
|
+
|
|
130
|
+
**Examples:**
|
|
131
|
+
Example 1 (remembering user information):
|
|
132
|
+
User: Can you connect to my google account?
|
|
133
|
+
Agent: Sure, I'll connect to your google account, what's your google account email?
|
|
134
|
+
User: john@example.com
|
|
135
|
+
Agent: Let me save this to my memory.
|
|
136
|
+
Tool Call: edit_file(...) -> remembers that the user's google account email is john@example.com
|
|
137
|
+
|
|
138
|
+
Example 2 (remembering implicit user preferences):
|
|
139
|
+
User: Can you write me an example for creating a deep agent in LangChain?
|
|
140
|
+
Agent: Sure, I'll write you an example for creating a deep agent in LangChain <example code in Python>
|
|
141
|
+
User: Can you do this in JavaScript
|
|
142
|
+
Agent: Let me save this to my memory.
|
|
143
|
+
Tool Call: edit_file(...) -> remembers that the user prefers to get LangChaincode examples in JavaScript
|
|
144
|
+
Agent: Sure, here is the JavaScript example<example code in JavaScript>
|
|
145
|
+
|
|
146
|
+
Example 3 (do not remember transient information):
|
|
147
|
+
User: I'm going to play basketball tonight so I will be offline for a few hours.
|
|
148
|
+
Agent: Okay I'll add a black to your calendar.
|
|
149
|
+
Tool Call: create_calendar_event(...) -> just calls a tool, does not commit anything to memory, as it is transient information
|
|
150
|
+
</memory_guidelines>
|
|
107
151
|
"""
|
|
108
152
|
|
|
109
153
|
|
|
@@ -137,7 +181,6 @@ class MemoryMiddleware(AgentMiddleware):
|
|
|
137
181
|
"""
|
|
138
182
|
self._backend = backend
|
|
139
183
|
self.sources = sources
|
|
140
|
-
self.system_prompt_template = MEMORY_SYSTEM_PROMPT
|
|
141
184
|
|
|
142
185
|
def _get_backend(self, state: MemoryState, runtime: Runtime, config: RunnableConfig) -> BackendProtocol:
|
|
143
186
|
"""Resolve backend from instance or factory.
|
|
@@ -163,37 +206,28 @@ class MemoryMiddleware(AgentMiddleware):
|
|
|
163
206
|
return self._backend(tool_runtime)
|
|
164
207
|
return self._backend
|
|
165
208
|
|
|
166
|
-
def
|
|
167
|
-
"""Format memory
|
|
168
|
-
if not self.sources:
|
|
169
|
-
return "**Memory Sources:** None configured"
|
|
170
|
-
|
|
171
|
-
lines = ["**Memory Sources:**"]
|
|
172
|
-
for path in self.sources:
|
|
173
|
-
lines.append(f"- `{path}`")
|
|
174
|
-
return "\n".join(lines)
|
|
175
|
-
|
|
176
|
-
def _format_memory_contents(self, contents: dict[str, str]) -> str:
|
|
177
|
-
"""Format loaded memory contents for injection into prompt.
|
|
209
|
+
def _format_agent_memory(self, contents: dict[str, str]) -> str:
|
|
210
|
+
"""Format memory with locations and contents paired together.
|
|
178
211
|
|
|
179
212
|
Args:
|
|
180
213
|
contents: Dict mapping source paths to content.
|
|
181
214
|
|
|
182
215
|
Returns:
|
|
183
|
-
Formatted string with
|
|
216
|
+
Formatted string with location+content pairs wrapped in <agent_memory> tags.
|
|
184
217
|
"""
|
|
185
218
|
if not contents:
|
|
186
|
-
return "(No memory loaded)"
|
|
219
|
+
return MEMORY_SYSTEM_PROMPT.format(agent_memory="(No memory loaded)")
|
|
187
220
|
|
|
188
221
|
sections = []
|
|
189
222
|
for path in self.sources:
|
|
190
223
|
if contents.get(path):
|
|
191
|
-
sections.append(contents[path])
|
|
224
|
+
sections.append(f"{path}\n{contents[path]}")
|
|
192
225
|
|
|
193
226
|
if not sections:
|
|
194
|
-
return "(No memory loaded)"
|
|
227
|
+
return MEMORY_SYSTEM_PROMPT.format(agent_memory="(No memory loaded)")
|
|
195
228
|
|
|
196
|
-
|
|
229
|
+
memory_body = "\n\n".join(sections)
|
|
230
|
+
return MEMORY_SYSTEM_PROMPT.format(agent_memory=memory_body)
|
|
197
231
|
|
|
198
232
|
async def _load_memory_from_backend(
|
|
199
233
|
self,
|
|
@@ -329,18 +363,12 @@ class MemoryMiddleware(AgentMiddleware):
|
|
|
329
363
|
Modified request with memory injected into system prompt.
|
|
330
364
|
"""
|
|
331
365
|
contents = request.state.get("memory_contents", {})
|
|
332
|
-
|
|
333
|
-
memory_contents = self._format_memory_contents(contents)
|
|
334
|
-
|
|
335
|
-
memory_section = self.system_prompt_template.format(
|
|
336
|
-
memory_locations=memory_locations,
|
|
337
|
-
memory_contents=memory_contents,
|
|
338
|
-
)
|
|
366
|
+
agent_memory = self._format_agent_memory(contents)
|
|
339
367
|
|
|
340
368
|
if request.system_prompt:
|
|
341
|
-
system_prompt =
|
|
369
|
+
system_prompt = agent_memory + "\n\n" + request.system_prompt
|
|
342
370
|
else:
|
|
343
|
-
system_prompt =
|
|
371
|
+
system_prompt = agent_memory
|
|
344
372
|
|
|
345
373
|
return request.override(system_message=SystemMessage(system_prompt))
|
|
346
374
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: deepagents
|
|
3
|
-
Version: 0.3.
|
|
3
|
+
Version: 0.3.5
|
|
4
4
|
Summary: General purpose 'deep agent' with sub-agent spawning, todo list capabilities, and mock file system. Built on LangGraph.
|
|
5
5
|
License: MIT
|
|
6
6
|
Project-URL: Homepage, https://docs.langchain.com/oss/python/deepagents/overview
|
|
@@ -11,10 +11,10 @@ Project-URL: Slack, https://www.langchain.com/join-community
|
|
|
11
11
|
Project-URL: Reddit, https://www.reddit.com/r/LangChain/
|
|
12
12
|
Requires-Python: <4.0,>=3.11
|
|
13
13
|
Description-Content-Type: text/markdown
|
|
14
|
-
Requires-Dist: langchain-
|
|
15
|
-
Requires-Dist: langchain
|
|
16
|
-
Requires-Dist: langchain<2.0.0,>=1.1
|
|
17
|
-
Requires-Dist: langchain-
|
|
14
|
+
Requires-Dist: langchain-core<2.0.0,>=1.2.6
|
|
15
|
+
Requires-Dist: langchain<2.0.0,>=1.2.3
|
|
16
|
+
Requires-Dist: langchain-anthropic<2.0.0,>=1.3.1
|
|
17
|
+
Requires-Dist: langchain-google-genai<5.0.0,>=4.1.3
|
|
18
18
|
Requires-Dist: wcmatch
|
|
19
19
|
|
|
20
20
|
# 🧠🤖Deep Agents
|
|
@@ -0,0 +1,135 @@
|
|
|
1
|
+
[project]
|
|
2
|
+
name = "deepagents"
|
|
3
|
+
version = "0.3.5"
|
|
4
|
+
description = "General purpose 'deep agent' with sub-agent spawning, todo list capabilities, and mock file system. Built on LangGraph."
|
|
5
|
+
readme = "README.md"
|
|
6
|
+
license = { text = "MIT" }
|
|
7
|
+
requires-python = ">=3.11,<4.0"
|
|
8
|
+
dependencies = [
|
|
9
|
+
"langchain-core>=1.2.6,<2.0.0",
|
|
10
|
+
"langchain>=1.2.3,<2.0.0",
|
|
11
|
+
"langchain-anthropic>=1.3.1,<2.0.0",
|
|
12
|
+
"langchain-google-genai>=4.1.3,<5.0.0",
|
|
13
|
+
"wcmatch",
|
|
14
|
+
]
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
[project.urls]
|
|
18
|
+
Homepage = "https://docs.langchain.com/oss/python/deepagents/overview"
|
|
19
|
+
Documentation = "https://reference.langchain.com/python/deepagents/"
|
|
20
|
+
Source = "https://github.com/langchain-ai/deepagents"
|
|
21
|
+
Twitter = "https://x.com/LangChainAI"
|
|
22
|
+
Slack = "https://www.langchain.com/join-community"
|
|
23
|
+
Reddit = "https://www.reddit.com/r/LangChain/"
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
[dependency-groups]
|
|
27
|
+
test = [
|
|
28
|
+
"pytest",
|
|
29
|
+
"pytest-cov",
|
|
30
|
+
"pytest-xdist",
|
|
31
|
+
"ruff>=0.12.2,<0.13.0",
|
|
32
|
+
"mypy>=1.18.1,<1.19.0",
|
|
33
|
+
"pytest-asyncio>=1.3.0",
|
|
34
|
+
]
|
|
35
|
+
|
|
36
|
+
dev = [
|
|
37
|
+
"langchain-openai",
|
|
38
|
+
"twine",
|
|
39
|
+
"build",
|
|
40
|
+
]
|
|
41
|
+
|
|
42
|
+
[build-system]
|
|
43
|
+
requires = ["setuptools>=73.0.0", "wheel"]
|
|
44
|
+
build-backend = "setuptools.build_meta"
|
|
45
|
+
|
|
46
|
+
[tool.setuptools.package-data]
|
|
47
|
+
"*" = ["py.typed", "*.md"]
|
|
48
|
+
|
|
49
|
+
[tool.ruff]
|
|
50
|
+
line-length = 150
|
|
51
|
+
# Exclude any files that shouldn't be linted
|
|
52
|
+
exclude = []
|
|
53
|
+
|
|
54
|
+
[tool.ruff.format]
|
|
55
|
+
docstring-code-format = true # Formats code blocks in docstrings
|
|
56
|
+
|
|
57
|
+
[tool.ruff.lint]
|
|
58
|
+
select = [
|
|
59
|
+
"ALL" # Enable all rules by default
|
|
60
|
+
]
|
|
61
|
+
ignore = [
|
|
62
|
+
"COM812", # Messes with the formatter
|
|
63
|
+
"ISC001", # Messes with the formatter
|
|
64
|
+
"PERF203", # Rarely useful
|
|
65
|
+
"SLF001", # Private member access
|
|
66
|
+
"PLC0415", # Imports should be at the top. Not always desirable
|
|
67
|
+
"PLR0913", # Too many arguments in function definition
|
|
68
|
+
"PLC0414", # Inconsistent with how type checkers expect to be notified of intentional re-exports
|
|
69
|
+
"C901", # Too complex
|
|
70
|
+
]
|
|
71
|
+
unfixable = ["B028"] # Rules that shouldn't be auto-fixed
|
|
72
|
+
|
|
73
|
+
[tool.ruff.lint.pyupgrade]
|
|
74
|
+
keep-runtime-typing = true
|
|
75
|
+
|
|
76
|
+
[tool.ruff.lint.flake8-annotations]
|
|
77
|
+
allow-star-arg-any = true
|
|
78
|
+
|
|
79
|
+
[tool.ruff.lint.pydocstyle]
|
|
80
|
+
convention = "google" # Google-style docstrings
|
|
81
|
+
ignore-var-parameters = true
|
|
82
|
+
|
|
83
|
+
[tool.ruff.lint.per-file-ignores]
|
|
84
|
+
"tests/*" = [
|
|
85
|
+
"D1", # Skip documentation rules in tests
|
|
86
|
+
"S101", # Allow asserts in tests
|
|
87
|
+
"S311", # Allow pseudo-random generators in tests
|
|
88
|
+
# Add more test-specific ignores
|
|
89
|
+
]
|
|
90
|
+
|
|
91
|
+
"deepagents/backends/composite.py" = ["B007", "BLE001", "D102", "EM101", "FBT001", "FBT002", "PLW2901", "S110"]
|
|
92
|
+
"deepagents/backends/filesystem.py" = ["BLE001", "D102", "D205", "D417", "DTZ006", "EM101", "EM102", "FBT001", "FBT002", "PLR0912", "S112", "TRY003"]
|
|
93
|
+
"deepagents/backends/protocol.py" = ["B024", "B027", "FBT001", "FBT002"]
|
|
94
|
+
"deepagents/backends/sandbox.py" = ["FBT001", "FBT002", "PLR2004"]
|
|
95
|
+
"deepagents/backends/state.py" = ["ANN204", "D102", "D205", "EM101", "FBT001", "FBT002", "PERF401"]
|
|
96
|
+
"deepagents/backends/store.py" = ["A002", "ANN204", "BLE001", "D102", "D205", "F821", "FBT001", "FBT002", "PERF401"]
|
|
97
|
+
"deepagents/backends/utils.py" = ["D301", "E501", "EM101", "FBT001", "RET504", "RUF005", "TRY003"]
|
|
98
|
+
"deepagents/middleware/filesystem.py" = ["EM102", "TRY003"]
|
|
99
|
+
"deepagents/middleware/memory.py" = ["E501", "EM102", "G004", "PERF401", "SIM108", "T201", "TC002", "TC003", "TRY003"]
|
|
100
|
+
"deepagents/middleware/skills.py" = ["EM101", "SIM108", "TC002", "TC003", "TRY003"]
|
|
101
|
+
"tests/integration_tests/test_deepagents.py" = ["ANN201", "C419", "E731", "PLR2004", "SIM118", "TID252"]
|
|
102
|
+
"tests/integration_tests/test_filesystem_middleware.py" = ["ANN001", "ANN201", "ANN202", "ARG002", "E731", "PLR2004", "SIM118", "T201", "TID252"]
|
|
103
|
+
"tests/integration_tests/test_hitl.py" = ["ANN201", "C419", "E501", "PLR2004", "TID252"]
|
|
104
|
+
"tests/integration_tests/test_subagent_middleware.py" = ["ANN001", "ANN201", "F841", "RUF012", "SIM118"]
|
|
105
|
+
"tests/unit_tests/backends/test_composite_backend.py" = ["ANN001", "ANN201", "ANN202", "ARG001", "ARG002", "F841", "INP001", "PLR2004", "PT018"]
|
|
106
|
+
"tests/unit_tests/backends/test_composite_backend_async.py" = ["ANN001", "ANN201", "ANN202", "ARG001", "ARG002", "F841", "INP001", "PLR2004", "PT018"]
|
|
107
|
+
"tests/unit_tests/backends/test_filesystem_backend.py" = ["ANN201", "ARG005", "B007", "B011", "INP001", "PLR2004", "PT015", "PT018"]
|
|
108
|
+
"tests/unit_tests/backends/test_filesystem_backend_async.py" = ["ANN201", "ARG005", "B007", "INP001", "PLR2004", "PT011", "PT018"]
|
|
109
|
+
"tests/unit_tests/backends/test_state_backend.py" = ["ANN001", "ANN201", "INP001", "PLR2004", "PT018"]
|
|
110
|
+
"tests/unit_tests/backends/test_state_backend_async.py" = ["ANN001", "ANN201", "INP001", "PLR2004", "PT018"]
|
|
111
|
+
"tests/unit_tests/backends/test_store_backend.py" = ["ANN201", "INP001", "PLR2004", "PT018"]
|
|
112
|
+
"tests/unit_tests/backends/test_store_backend_async.py" = ["ANN201", "INP001", "PLR2004", "PT018"]
|
|
113
|
+
"tests/unit_tests/chat_model.py" = ["ARG002", "D301", "PLR0912", "RUF012"]
|
|
114
|
+
"tests/unit_tests/middleware/test_memory_middleware.py" = ["F841", "PGH003", "PLR2004", "RUF001", "TC002"]
|
|
115
|
+
"tests/unit_tests/middleware/test_memory_middleware_async.py" = ["F841", "PGH003", "PLR2004", "RUF001"]
|
|
116
|
+
"tests/unit_tests/middleware/test_skills_middleware.py" = ["F841", "PGH003", "PLR2004", "TC002"]
|
|
117
|
+
"tests/unit_tests/middleware/test_skills_middleware_async.py" = ["F841", "PGH003", "PLR2004"]
|
|
118
|
+
"tests/unit_tests/middleware/test_validate_path.py" = ["ANN201"]
|
|
119
|
+
"tests/unit_tests/test_end_to_end.py" = ["ARG002", "PLR2004"]
|
|
120
|
+
"tests/unit_tests/test_middleware.py" = ["ANN001", "ANN201", "ANN202", "ARG002", "E731", "PLR2004", "SIM118", "T201"]
|
|
121
|
+
"tests/unit_tests/test_middleware_async.py" = ["ANN001", "ANN201", "ANN202", "ARG002"]
|
|
122
|
+
"tests/unit_tests/test_subagents.py" = ["PLR2004"]
|
|
123
|
+
"tests/unit_tests/test_todo_middleware.py" = ["E501", "PLR2004"]
|
|
124
|
+
"tests/utils.py" = ["ANN001", "ANN201", "RUF012", "SIM118"]
|
|
125
|
+
|
|
126
|
+
[tool.mypy]
|
|
127
|
+
strict = true
|
|
128
|
+
ignore_missing_imports = true
|
|
129
|
+
enable_error_code = ["deprecated"]
|
|
130
|
+
# Optional: reduce strictness if needed
|
|
131
|
+
disallow_any_generics = false
|
|
132
|
+
warn_return_any = false
|
|
133
|
+
|
|
134
|
+
[tool.pytest.ini_options]
|
|
135
|
+
asyncio_mode = "auto"
|
deepagents-0.3.3/pyproject.toml
DELETED
|
@@ -1,100 +0,0 @@
|
|
|
1
|
-
[project]
|
|
2
|
-
name = "deepagents"
|
|
3
|
-
version = "0.3.3"
|
|
4
|
-
description = "General purpose 'deep agent' with sub-agent spawning, todo list capabilities, and mock file system. Built on LangGraph."
|
|
5
|
-
readme = "README.md"
|
|
6
|
-
license = { text = "MIT" }
|
|
7
|
-
requires-python = ">=3.11,<4.0"
|
|
8
|
-
dependencies = [
|
|
9
|
-
"langchain-anthropic>=1.2.0,<2.0.0",
|
|
10
|
-
"langchain-google-genai",
|
|
11
|
-
"langchain>=1.1.0,<2.0.0",
|
|
12
|
-
"langchain-core>=1.2.5,<2.0.0",
|
|
13
|
-
"wcmatch",
|
|
14
|
-
]
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
[project.urls]
|
|
18
|
-
Homepage = "https://docs.langchain.com/oss/python/deepagents/overview"
|
|
19
|
-
Documentation = "https://reference.langchain.com/python/deepagents/"
|
|
20
|
-
Source = "https://github.com/langchain-ai/deepagents"
|
|
21
|
-
Twitter = "https://x.com/LangChainAI"
|
|
22
|
-
Slack = "https://www.langchain.com/join-community"
|
|
23
|
-
Reddit = "https://www.reddit.com/r/LangChain/"
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
[dependency-groups]
|
|
27
|
-
test = [
|
|
28
|
-
"pytest",
|
|
29
|
-
"pytest-cov",
|
|
30
|
-
"pytest-xdist",
|
|
31
|
-
"ruff>=0.12.2,<0.13.0",
|
|
32
|
-
"mypy>=1.18.1,<1.19.0",
|
|
33
|
-
"pytest-asyncio>=1.3.0",
|
|
34
|
-
]
|
|
35
|
-
|
|
36
|
-
dev = [
|
|
37
|
-
"langchain-openai",
|
|
38
|
-
"twine",
|
|
39
|
-
"build",
|
|
40
|
-
]
|
|
41
|
-
|
|
42
|
-
[build-system]
|
|
43
|
-
requires = ["setuptools>=73.0.0", "wheel"]
|
|
44
|
-
build-backend = "setuptools.build_meta"
|
|
45
|
-
|
|
46
|
-
[tool.setuptools.package-data]
|
|
47
|
-
"*" = ["py.typed", "*.md"]
|
|
48
|
-
|
|
49
|
-
[tool.ruff]
|
|
50
|
-
line-length = 150
|
|
51
|
-
# Exclude any files that shouldn't be linted
|
|
52
|
-
exclude = []
|
|
53
|
-
|
|
54
|
-
[tool.ruff.format]
|
|
55
|
-
docstring-code-format = true # Formats code blocks in docstrings
|
|
56
|
-
|
|
57
|
-
[tool.ruff.lint]
|
|
58
|
-
select = [
|
|
59
|
-
"ALL" # Enable all rules by default
|
|
60
|
-
]
|
|
61
|
-
ignore = [
|
|
62
|
-
"COM812", # Messes with the formatter
|
|
63
|
-
"ISC001", # Messes with the formatter
|
|
64
|
-
"PERF203", # Rarely useful
|
|
65
|
-
"SLF001", # Private member access
|
|
66
|
-
"PLC0415", # Imports should be at the top. Not always desirable
|
|
67
|
-
"PLR0913", # Too many arguments in function definition
|
|
68
|
-
"PLC0414", # Inconsistent with how type checkers expect to be notified of intentional re-exports
|
|
69
|
-
"C901", # Too complex
|
|
70
|
-
]
|
|
71
|
-
unfixable = ["B028"] # Rules that shouldn't be auto-fixed
|
|
72
|
-
|
|
73
|
-
[tool.ruff.lint.pyupgrade]
|
|
74
|
-
keep-runtime-typing = true
|
|
75
|
-
|
|
76
|
-
[tool.ruff.lint.flake8-annotations]
|
|
77
|
-
allow-star-arg-any = true
|
|
78
|
-
|
|
79
|
-
[tool.ruff.lint.pydocstyle]
|
|
80
|
-
convention = "google" # Google-style docstrings
|
|
81
|
-
ignore-var-parameters = true
|
|
82
|
-
|
|
83
|
-
[tool.ruff.lint.per-file-ignores]
|
|
84
|
-
"tests/*" = [
|
|
85
|
-
"D1", # Skip documentation rules in tests
|
|
86
|
-
"S101", # Allow asserts in tests
|
|
87
|
-
"S311", # Allow pseudo-random generators in tests
|
|
88
|
-
# Add more test-specific ignores
|
|
89
|
-
]
|
|
90
|
-
|
|
91
|
-
[tool.mypy]
|
|
92
|
-
strict = true
|
|
93
|
-
ignore_missing_imports = true
|
|
94
|
-
enable_error_code = ["deprecated"]
|
|
95
|
-
# Optional: reduce strictness if needed
|
|
96
|
-
disallow_any_generics = false
|
|
97
|
-
warn_return_any = false
|
|
98
|
-
|
|
99
|
-
[tool.pytest.ini_options]
|
|
100
|
-
asyncio_mode = "auto"
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|