deepagents-cli 0.0.1__py3-none-any.whl → 0.0.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of deepagents-cli might be problematic. Click here for more details.

@@ -0,0 +1,44 @@
1
+ """Middleware to patch dangling tool calls in the messages history."""
2
+
3
+ from typing import Any
4
+
5
+ from langchain.agents.middleware import AgentMiddleware, AgentState
6
+ from langchain_core.messages import RemoveMessage, ToolMessage
7
+ from langgraph.graph.message import REMOVE_ALL_MESSAGES
8
+ from langgraph.runtime import Runtime
9
+
10
+
11
+ class PatchToolCallsMiddleware(AgentMiddleware):
12
+ """Middleware to patch dangling tool calls in the messages history."""
13
+
14
+ def before_agent(self, state: AgentState, runtime: Runtime[Any]) -> dict[str, Any] | None: # noqa: ARG002
15
+ """Before the agent runs, handle dangling tool calls from any AIMessage."""
16
+ messages = state["messages"]
17
+ if not messages or len(messages) == 0:
18
+ return None
19
+
20
+ patched_messages = []
21
+ # Iterate over the messages and add any dangling tool calls
22
+ for i, msg in enumerate(messages):
23
+ patched_messages.append(msg)
24
+ if msg.type == "ai" and msg.tool_calls:
25
+ for tool_call in msg.tool_calls:
26
+ corresponding_tool_msg = next(
27
+ (msg for msg in messages[i:] if msg.type == "tool" and msg.tool_call_id == tool_call["id"]),
28
+ None,
29
+ )
30
+ if corresponding_tool_msg is None:
31
+ # We have a dangling tool call which needs a ToolMessage
32
+ tool_msg = (
33
+ f"Tool call {tool_call['name']} with id {tool_call['id']} was "
34
+ "cancelled - another message came in before it could be completed."
35
+ )
36
+ patched_messages.append(
37
+ ToolMessage(
38
+ content=tool_msg,
39
+ name=tool_call["name"],
40
+ tool_call_id=tool_call["id"],
41
+ )
42
+ )
43
+
44
+ return {"messages": [RemoveMessage(id=REMOVE_ALL_MESSAGES), *patched_messages]}
@@ -14,11 +14,6 @@ from langchain_core.runnables import Runnable
14
14
  from langchain_core.tools import StructuredTool
15
15
  from langgraph.types import Command
16
16
 
17
- try:
18
- from langchain_anthropic.middleware.prompt_caching import AnthropicPromptCachingMiddleware
19
- except ImportError:
20
- AnthropicPromptCachingMiddleware = None
21
-
22
17
 
23
18
  class SubAgent(TypedDict):
24
19
  """Specification for an agent.
@@ -267,7 +262,7 @@ def _get_subagents(
267
262
 
268
263
  subagent_model = agent_.get("model", default_model)
269
264
 
270
- _middleware = [*default_subagent_middleware, *agent_["middleware"]] if "middleware" in agent_ else default_subagent_middleware
265
+ _middleware = [*default_subagent_middleware, *agent_["middleware"]] if "middleware" in agent_ else [*default_subagent_middleware]
271
266
 
272
267
  interrupt_on = agent_.get("interrupt_on", default_interrupt_on)
273
268
  if interrupt_on:
@@ -353,6 +348,9 @@ def _create_task_tool(
353
348
  ) -> str | Command:
354
349
  subagent, subagent_state = _validate_and_prepare_state(subagent_type, description, runtime)
355
350
  result = subagent.invoke(subagent_state)
351
+ if not runtime.tool_call_id:
352
+ value_error_msg = "Tool call ID is required for subagent invocation"
353
+ raise ValueError(value_error_msg)
356
354
  return _return_command_with_state_update(result, runtime.tool_call_id)
357
355
 
358
356
  async def atask(
@@ -362,6 +360,9 @@ def _create_task_tool(
362
360
  ) -> str | Command:
363
361
  subagent, subagent_state = _validate_and_prepare_state(subagent_type, description, runtime)
364
362
  result = await subagent.ainvoke(subagent_state)
363
+ if not runtime.tool_call_id:
364
+ value_error_msg = "Tool call ID is required for subagent invocation"
365
+ raise ValueError(value_error_msg)
365
366
  return _return_command_with_state_update(result, runtime.tool_call_id)
366
367
 
367
368
  return StructuredTool.from_function(
@@ -0,0 +1,289 @@
1
+ #!/usr/bin/env python3
2
+ """Minimalist prompt-toolkit TUI for DeepAgents sandbox chat.
3
+
4
+ Beautiful emerald green theme with streaming responses.
5
+
6
+ Controls:
7
+ - Enter: Submit message
8
+ - Alt-Enter (or Esc then Enter): New line for multiline input
9
+ - Ctrl+C or Ctrl+D: Quit
10
+ """
11
+
12
+ import asyncio
13
+
14
+ from prompt_toolkit import PromptSession
15
+ from prompt_toolkit.key_binding import KeyBindings
16
+ from prompt_toolkit.styles import Style
17
+ from rich.console import Console
18
+ from rich.spinner import Spinner
19
+
20
+ from chat_agent import create_sandbox_chat_agent
21
+
22
+
23
+ # ============================================================================
24
+ # CONFIGURATION
25
+ # ============================================================================
26
+
27
+ COLORS = {
28
+ "primary": "#10b981", # Emerald 500
29
+ "dim": "#6b7280", # Gray 500
30
+ "user": "#ffffff", # White
31
+ "agent": "#10b981", # Emerald 500
32
+ "thinking": "#34d399", # Emerald 400
33
+ "tool": "#fbbf24", # Amber 400
34
+ }
35
+
36
+ DEEP_AGENTS_ASCII = """
37
+ ██████╗ ███████╗ ███████╗ ██████╗
38
+ ██╔══██╗ ██╔════╝ ██╔════╝ ██╔══██╗
39
+ ██║ ██║ █████╗ █████╗ ██████╔╝
40
+ ██║ ██║ ██╔══╝ ██╔══╝ ██╔═══╝
41
+ ██████╔╝ ███████╗ ███████╗ ██║
42
+ ╚═════╝ ╚══════╝ ╚══════╝ ╚═╝
43
+
44
+ █████╗ ██████╗ ███████╗ ███╗ ██╗ ████████╗ ███████╗
45
+ ██╔══██╗ ██╔════╝ ██╔════╝ ████╗ ██║ ╚══██╔══╝ ██╔════╝
46
+ ███████║ ██║ ███╗ █████╗ ██╔██╗ ██║ ██║ ███████╗
47
+ ██╔══██║ ██║ ██║ ██╔══╝ ██║╚██╗██║ ██║ ╚════██║
48
+ ██║ ██║ ╚██████╔╝ ███████╗ ██║ ╚████║ ██║ ███████║
49
+ ╚═╝ ╚═╝ ╚═════╝ ╚══════╝ ╚═╝ ╚═══╝ ╚═╝ ╚══════╝
50
+ """
51
+
52
+ # ============================================================================
53
+ # GLOBALS
54
+ # ============================================================================
55
+
56
+ console = Console()
57
+ agent = None
58
+ config = {"configurable": {"thread_id": "sandbox-chat"}}
59
+
60
+ # ============================================================================
61
+ # UTILITIES
62
+ # ============================================================================
63
+
64
+ MAX_ARG_LENGTH = 150
65
+
66
+ TOOL_ICONS = {
67
+ "shell": "⚡",
68
+ "write_file": "✏️",
69
+ "read_file": "📖",
70
+ "edit_file": "✂️",
71
+ "ls": "📁",
72
+ "glob": "🔍",
73
+ "grep": "🔎",
74
+ }
75
+
76
+
77
+ def truncate(text: str, max_len: int) -> str:
78
+ """Truncate text with ellipsis."""
79
+ return text[:max_len] + "..." if len(text) > max_len else text
80
+
81
+
82
+ # ============================================================================
83
+ # DISPLAY FUNCTIONS
84
+ # ============================================================================
85
+
86
+
87
+ def show_welcome():
88
+ """Display welcome screen and wait for Enter."""
89
+ console.clear()
90
+ console.print(DEEP_AGENTS_ASCII, style=f"bold {COLORS['primary']}")
91
+ console.print("\n")
92
+ console.print("Press Enter to start", style=COLORS["dim"])
93
+ input()
94
+ # Don't clear - keep ASCII art visible!
95
+
96
+
97
+ # ============================================================================
98
+ # AGENT INTERACTION
99
+ # ============================================================================
100
+
101
+
102
+ async def stream_agent_response(user_input: str):
103
+ """Stream agent response using async iteration."""
104
+ global agent
105
+
106
+ has_responded = False
107
+ current_text = ""
108
+
109
+ # Start spinner manually so we can stop it when we have content
110
+ status = console.status("[bold #34d399]Agent is thinking...", spinner="dots")
111
+ status.start()
112
+ spinner_active = True
113
+
114
+ async for _, chunk in agent.astream(
115
+ {"messages": [{"role": "user", "content": user_input}]},
116
+ stream_mode="updates",
117
+ subgraphs=True,
118
+ config=config,
119
+ durability="exit",
120
+ ):
121
+ chunk_data = list(chunk.values())[0]
122
+ if not chunk_data or "messages" not in chunk_data:
123
+ continue
124
+
125
+ last_message = chunk_data["messages"][-1]
126
+ message_role = getattr(last_message, "type", None)
127
+ message_content = getattr(last_message, "content", None)
128
+
129
+ # Handle tool calls from AI messages (LangChain tool_calls attribute)
130
+ tool_calls = getattr(last_message, "tool_calls", None)
131
+ if tool_calls and message_role == "ai":
132
+ for tool_call in tool_calls:
133
+ tool_name = tool_call.get("name", "unknown")
134
+ tool_args = tool_call.get("args", {})
135
+
136
+ icon = TOOL_ICONS.get(tool_name, "🔧")
137
+ args_str = ", ".join(
138
+ f"{k}={truncate(str(v), 50)}" for k, v in tool_args.items()
139
+ )
140
+
141
+ # Stop spinner temporarily to print tool call
142
+ if spinner_active:
143
+ status.stop()
144
+ console.print(f" {icon} {tool_name}({args_str})", style=f"dim {COLORS['tool']}")
145
+ # Restart spinner for next tool/processing
146
+ if spinner_active:
147
+ status.start()
148
+
149
+ # Skip tool results - they're verbose and the agent will summarize
150
+ if message_role == "tool":
151
+ continue
152
+
153
+ if not message_content:
154
+ continue
155
+
156
+ # Handle tool calls from content blocks (alternative format)
157
+ if message_role == "ai" and isinstance(message_content, list):
158
+ for block in message_content:
159
+ if isinstance(block, dict) and block.get("type") == "tool_use":
160
+ tool_name = block.get("name", "unknown")
161
+ tool_input = block.get("input", {})
162
+
163
+ icon = TOOL_ICONS.get(tool_name, "🔧")
164
+ args = ", ".join(
165
+ f"{k}={truncate(str(v), 50)}" for k, v in tool_input.items()
166
+ )
167
+
168
+ # Stop spinner temporarily to print tool call
169
+ if spinner_active:
170
+ status.stop()
171
+ console.print(f" {icon} {tool_name}({args})", style=f"dim {COLORS['tool']}")
172
+ # Restart spinner for next tool/processing
173
+ if spinner_active:
174
+ status.start()
175
+
176
+ # Handle agent text responses
177
+ if message_role == "ai":
178
+ text_content = ""
179
+
180
+ if isinstance(message_content, str):
181
+ text_content = message_content
182
+ elif isinstance(message_content, list):
183
+ for block in message_content:
184
+ if isinstance(block, dict) and block.get("type") == "text":
185
+ text_content = block.get("text", "")
186
+ break
187
+
188
+ if text_content.strip():
189
+ # Stop spinner when we have actual text to display
190
+ if spinner_active:
191
+ status.stop()
192
+ spinner_active = False
193
+
194
+ # Print prefix on first response
195
+ if not has_responded:
196
+ console.print("... ", style=COLORS["agent"], end="")
197
+ has_responded = True
198
+
199
+ # Stream new content
200
+ if text_content != current_text:
201
+ new_text = text_content[len(current_text) :]
202
+ console.print(new_text, style=COLORS["agent"], end="")
203
+ current_text = text_content
204
+
205
+ # Make sure spinner is stopped (in case loop ended without content)
206
+ if spinner_active:
207
+ status.stop()
208
+
209
+ if has_responded:
210
+ console.print() # Newline
211
+ console.print() # Blank line
212
+
213
+
214
+ # ============================================================================
215
+ # MAIN
216
+ # ============================================================================
217
+
218
+
219
+ async def main():
220
+ """Main entry point."""
221
+ global agent
222
+
223
+ # Show welcome
224
+ show_welcome()
225
+
226
+ # Initialize agent
227
+ console.print("\nInitializing agent...", style=COLORS["dim"])
228
+ agent = create_sandbox_chat_agent()
229
+
230
+ # Show ready message
231
+ console.print("\n... Ready to code! What would you like to build?", style=COLORS["agent"])
232
+ console.print()
233
+
234
+ # One-time hint for multiline input
235
+ console.print(" Tip: Alt-Enter for newline, Enter to submit", style=f"dim {COLORS['dim']}")
236
+
237
+ # Setup key bindings for multiline input
238
+ kb = KeyBindings()
239
+
240
+ @kb.add('enter')
241
+ def _(event):
242
+ buffer = event.current_buffer
243
+ if buffer.text.strip(): # Only submit if buffer has content
244
+ buffer.validate_and_handle()
245
+ # else: do nothing - no newline, no submission
246
+
247
+ @kb.add('escape', 'enter') # Alt-Enter (or Esc then Enter) for newline
248
+ def _(event):
249
+ event.current_buffer.insert_text('\n')
250
+
251
+ # Setup prompt session with multiline support
252
+ style = Style.from_dict({"prompt": COLORS["user"]})
253
+ session = PromptSession(
254
+ message="> ",
255
+ style=style,
256
+ multiline=True,
257
+ prompt_continuation=lambda width, line_number, is_soft_wrap: " ",
258
+ key_bindings=kb,
259
+ )
260
+
261
+ # Main chat loop
262
+ while True:
263
+ try:
264
+ # Get user input
265
+ user_input = await session.prompt_async()
266
+
267
+ # Handle quit commands
268
+ if user_input.strip().lower() in ["quit", "exit", "q"]:
269
+ break
270
+
271
+ # Skip empty input
272
+ if not user_input.strip():
273
+ continue
274
+
275
+ # Add spacing
276
+ console.print()
277
+
278
+ # Stream agent response
279
+ await stream_agent_response(user_input)
280
+
281
+ except (KeyboardInterrupt, EOFError):
282
+ break
283
+
284
+ # Goodbye message
285
+ console.print("\nGoodbye!", style=COLORS["primary"])
286
+
287
+
288
+ if __name__ == "__main__":
289
+ asyncio.run(main())
@@ -1,20 +1,22 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: deepagents-cli
3
- Version: 0.0.1
3
+ Version: 0.0.3
4
4
  Summary: General purpose 'deep agent' with sub-agent spawning, todo list capabilities, and mock file system. Built on LangGraph.
5
5
  License: MIT
6
6
  Requires-Python: <4.0,>=3.11
7
7
  Description-Content-Type: text/markdown
8
8
  License-File: LICENSE
9
- Requires-Dist: langchain-anthropic==1.0.0
10
- Requires-Dist: langchain==1.0.0
11
- Requires-Dist: langchain-core==1.0.0
12
9
  Requires-Dist: tavily-python
13
10
  Requires-Dist: python-dotenv
14
11
  Requires-Dist: requests
15
12
  Requires-Dist: rich>=13.0.0
13
+ Requires-Dist: langchain-anthropic<2.0.0,>=1.0.0
14
+ Requires-Dist: langchain<2.0.0,>=1.0.0
15
+ Requires-Dist: langchain-core<2.0.0,>=1.0.0
16
+ Requires-Dist: wcmatch
16
17
  Provides-Extra: dev
17
18
  Requires-Dist: pytest; extra == "dev"
19
+ Requires-Dist: pytest-recording; extra == "dev"
18
20
  Requires-Dist: pytest-cov; extra == "dev"
19
21
  Requires-Dist: build; extra == "dev"
20
22
  Requires-Dist: twine; extra == "dev"
@@ -319,40 +321,30 @@ agent = create_deep_agent(
319
321
  )
320
322
  ```
321
323
 
322
- ### `use_longterm_memory`
323
- Deep agents come with a local filesystem to offload memory to. This filesystem is stored in state, and is therefore transient to a single thread.
324
+ ### `memory_backend`
325
+ Deep agents come with a local filesystem to offload memory to. By default, this filesystem is stored in state (ephemeral, transient to a single thread).
324
326
 
325
- You can extend deep agents with long-term memory by providing a Store and setting use_longterm_memory=True.
327
+ You can configure persistent long-term memory using a CompositeBackend with StoreBackend:
326
328
 
327
329
  ```python
328
330
  from deepagents import create_deep_agent
331
+ from deepagents.memory.backends import StateBackend, StoreBackend, CompositeBackend
329
332
  from langgraph.store.memory import InMemoryStore
330
333
 
331
334
  store = InMemoryStore() # Or any other Store object
332
- agent = create_deep_agent(
333
- store=store,
334
- use_longterm_memory=True
335
- )
336
- ```
337
335
 
338
- ### `use_local_filesystem`
339
- If you prefer the agent to operate on your machine's filesystem (read/write actual files), enable local mode:
340
-
341
- ```python
342
- from deepagents import create_deep_agent
336
+ # Create a hybrid backend: ephemeral files in / and persistent files in /memories/
337
+ backend = CompositeBackend(
338
+ default=StateBackend(),
339
+ routes={"/memories/": StoreBackend()}
340
+ )
343
341
 
344
342
  agent = create_deep_agent(
345
- use_local_filesystem=True, # Tools operate on disk
346
- # use_longterm_memory=False # Must be False when using local filesystem
343
+ memory_backend=backend,
344
+ store=store
347
345
  )
348
346
  ```
349
347
 
350
- Notes:
351
- - Local filesystem mode injects `ls`, `read_file`, `write_file`, `edit_file`, plus `glob` and `grep` (ripgrep-powered) for discovery/search.
352
- - Long-term memory is not supported in local mode; attempting to set both `use_local_filesystem=True` and `use_longterm_memory=True` raises a ValueError.
353
- - Large tool outputs are automatically evicted to the in-memory filesystem state as files under `/large_tool_results/{tool_call_id}`; use `read_file` with `offset`/`limit` to page through results.
354
- - Be careful when enabling local mode in sensitive environments — tools can read and write files on disk.
355
-
356
348
  ### `interrupt_on`
357
349
  A common reality for agents is that some tool operations may be sensitive and require human approval before execution. Deep Agents supports human-in-the-loop workflows through LangGraph’s interrupt capabilities. You can configure which tools require approval using a checkpointer.
358
350
 
@@ -416,17 +408,16 @@ agent = create_agent(
416
408
  ### FilesystemMiddleware
417
409
 
418
410
  Context engineering is one of the main challenges in building effective agents. This can be particularly hard when using tools that can return variable length results (ex. web_search, rag), as long ToolResults can quickly fill up your context window.
419
- **FilesystemMiddleware** provides four tools to your agent to interact with short-term memory (in agent state) and, optionally, long-term memory (via a Store).
411
+ **FilesystemMiddleware** provides four tools to your agent to interact with both short-term and long-term memory.
420
412
  - **ls**: List the files in your filesystem
421
413
  - **read_file**: Read an entire file, or a certain number of lines from a file
422
414
  - **write_file**: Write a new file to your filesystem
423
415
  - **edit_file**: Edit an existing file in your filesystem
424
416
 
425
- If you want these tools to operate on your host filesystem instead of the agent state, use `create_deep_agent(..., use_local_filesystem=True)` or attach `LocalFilesystemMiddleware` directly. Local mode also provides `glob` and `grep` helpers for file discovery and content search.
426
-
427
417
  ```python
428
418
  from langchain.agents import create_agent
429
419
  from deepagents.middleware.filesystem import FilesystemMiddleware
420
+ from deepagents.memory.backends import StateBackend, StoreBackend, CompositeBackend
430
421
 
431
422
  # FilesystemMiddleware is included by default in create_deep_agent
432
423
  # You can customize it if building a custom agent
@@ -434,8 +425,13 @@ agent = create_agent(
434
425
  model="anthropic:claude-sonnet-4-20250514",
435
426
  middleware=[
436
427
  FilesystemMiddleware(
437
- long_term_memory=False, # Enables access to long-term memory, defaults to False. You must attach a store to use long-term memory.
438
- system_prompt="Write to the filesystem when...", # Optional custom addition to the system prompt
428
+ memory_backend=StateBackend(), # Optional: customize storage backend (defaults to StateBackend)
429
+ # For persistent memory, use CompositeBackend:
430
+ # memory_backend=CompositeBackend(
431
+ # default=StateBackend(),
432
+ # routes={"/memories/": StoreBackend()}
433
+ # )
434
+ system_prompt="Write to the filesystem when...", # Optional custom system prompt override
439
435
  custom_tool_descriptions={
440
436
  "ls": "Use the ls tool when...",
441
437
  "read_file": "Use the read_file tool to..."
@@ -0,0 +1,24 @@
1
+ deepagents/__init__.py,sha256=9BVNn4lfF5N8l2KY8Ttxi82zO609I-fGqoSIF7DAxiU,342
2
+ deepagents/cli.py,sha256=3UA2SQvAGmUv_elLhi03LjT5U1uyPJ4_XC6wg8Qpl4E,21381
3
+ deepagents/default_agent_prompt.md,sha256=4UbINqwOHbkolh3TTeV3FZWadrVdViHbvIuewnf7wxQ,2925
4
+ deepagents/graph.py,sha256=238tz_eU8HUXFz2PE6uqbW64cPbTKxT8VNfzmNEoLBM,6245
5
+ deepagents/pretty_cli.py,sha256=h43O2j-NA3d6_blvB9_fikSstSY6Nske5K4PjYaOajc,10326
6
+ deepagents/memory/__init__.py,sha256=AnII-JcpwkJlBNzReZy2mgWbZQ7nrc-QN9kNxPoK4fg,353
7
+ deepagents/memory/protocol.py,sha256=Rb_I6Fs27xL-iwHIiD60JluWcho_B_LilamAVlRs8RE,6100
8
+ deepagents/memory/backends/__init__.py,sha256=PdBH3KkEuVAp5SHmGlitM80OV5JXzhfjfehTMOe5Zg4,472
9
+ deepagents/memory/backends/composite.py,sha256=Ar53CGX8yzJrjOqV1dy3i7iGvzEkTfJgwIUYK9ffV14,10261
10
+ deepagents/memory/backends/filesystem.py,sha256=-i_OG2K-dyPROXlKbKgIBvHdj0MWQUWzy7xm603kjWY,11684
11
+ deepagents/memory/backends/state.py,sha256=iP46YlS1lR65WE5lGj2J1cH09UGjntoESVQ_v42Co9Y,6754
12
+ deepagents/memory/backends/store.py,sha256=aWhpiR43-32HwO2VlL238_OqhU7qVabUL6i49UINaH8,11822
13
+ deepagents/memory/backends/utils.py,sha256=n7z73Jes7qZUN8-Tjbz6fFAoZ3XHqZpm5rD9i-0Uvbk,9605
14
+ deepagents/middleware/__init__.py,sha256=Uf4L69XweeHTcQiFz-IEd27wM5L8Mrq5u8OpJ3nwvQY,400
15
+ deepagents/middleware/agent_memory.py,sha256=4bMs-EFbp0-pqDDfOjh_kQkP49M_p5KvUL7JafC4rag,8061
16
+ deepagents/middleware/filesystem.py,sha256=oPTlcW35ijcB643pX4PldF3Qxpj-o2wEdQ9HILx7SLk,25334
17
+ deepagents/middleware/patch_tool_calls.py,sha256=Cu8rUpt1GjrYgfMvZG6wOowvnmFeYTCauOJhlltNPmo,2045
18
+ deepagents/middleware/subagents.py,sha256=NH7QEShEPAosc3HLbSFwlgtlZv3SZpkPEAqHRbuqE_c,23538
19
+ deepagents_cli-0.0.3.dist-info/licenses/LICENSE,sha256=c__BaxUCK69leo2yEKynf8lWndu8iwYwge1CbyqAe-E,1071
20
+ deepagents_cli-0.0.3.dist-info/METADATA,sha256=r7jWMUaQJIf22JG6x9_bOrqoFVE1gqItgV3oI21B-M4,19792
21
+ deepagents_cli-0.0.3.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
22
+ deepagents_cli-0.0.3.dist-info/entry_points.txt,sha256=5qO2sNhal5xQqcexm2VtT981A29FBKt-75aE4gatH8Q,55
23
+ deepagents_cli-0.0.3.dist-info/top_level.txt,sha256=drAzchOzPNePwpb3_pbPuvLuayXkN7SNqeIKMBWJoAo,11
24
+ deepagents_cli-0.0.3.dist-info/RECORD,,
@@ -1,16 +0,0 @@
1
- """Common middleware constants shared across filesystem middlewares."""
2
-
3
- # Message used when evicting a large tool result to the in-memory filesystem
4
- TOO_LARGE_TOOL_MSG = (
5
- "Tool result too large, the result of this tool call {tool_call_id} was saved in the filesystem at this path: {file_path}\n"
6
- "You can read the result from the filesystem by using the read_file tool, but make sure to only read part of the result at a time.\n"
7
- "You can do this by specifying an offset and limit in the read_file tool call.\n"
8
- "For example, to read the first 100 lines, you can use the read_file tool with offset=0 and limit=100.\n\n"
9
- "Here are the first 10 lines of the result:\n{content_sample}\n"
10
- )
11
-
12
- # Supplement lines for tools that aren't listed in the base filesystem system prompt
13
- FILESYSTEM_SYSTEM_PROMPT_GLOB_GREP_SUPPLEMENT = (
14
- "\n- glob: find files/directories by pattern\n"
15
- "- grep: search file contents using ripgrep (rg)"
16
- )