deepagents 0.0.12rc2__py3-none-any.whl → 0.0.12rc3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- deepagents/graph.py +10 -7
- deepagents/middleware/filesystem.py +23 -30
- deepagents/middleware/subagents.py +10 -12
- deepagents-0.0.12rc3.dist-info/METADATA +533 -0
- deepagents-0.0.12rc3.dist-info/RECORD +10 -0
- deepagents-0.0.12rc2.dist-info/METADATA +0 -455
- deepagents-0.0.12rc2.dist-info/RECORD +0 -10
- {deepagents-0.0.12rc2.dist-info → deepagents-0.0.12rc3.dist-info}/WHEEL +0 -0
- {deepagents-0.0.12rc2.dist-info → deepagents-0.0.12rc3.dist-info}/licenses/LICENSE +0 -0
- {deepagents-0.0.12rc2.dist-info → deepagents-0.0.12rc3.dist-info}/top_level.txt +0 -0
deepagents/graph.py
CHANGED
|
@@ -9,6 +9,7 @@ from langchain.agents.middleware.summarization import SummarizationMiddleware
|
|
|
9
9
|
from langchain.agents.middleware.types import AgentMiddleware
|
|
10
10
|
from langchain.agents.structured_output import ResponseFormat
|
|
11
11
|
from langchain_anthropic import ChatAnthropic
|
|
12
|
+
from langchain_anthropic.middleware import AnthropicPromptCachingMiddleware
|
|
12
13
|
from langchain_core.language_models import BaseChatModel
|
|
13
14
|
from langchain_core.tools import BaseTool
|
|
14
15
|
from langgraph.cache.base import BaseCache
|
|
@@ -29,8 +30,8 @@ def get_default_model() -> ChatAnthropic:
|
|
|
29
30
|
ChatAnthropic instance configured with Claude Sonnet 4.
|
|
30
31
|
"""
|
|
31
32
|
return ChatAnthropic(
|
|
32
|
-
model_name="claude-sonnet-4-
|
|
33
|
-
max_tokens=
|
|
33
|
+
model_name="claude-sonnet-4-5-20250929",
|
|
34
|
+
max_tokens=20000,
|
|
34
35
|
)
|
|
35
36
|
|
|
36
37
|
|
|
@@ -107,18 +108,20 @@ def create_deep_agent(
|
|
|
107
108
|
),
|
|
108
109
|
SummarizationMiddleware(
|
|
109
110
|
model=model,
|
|
110
|
-
max_tokens_before_summary=
|
|
111
|
-
messages_to_keep=
|
|
111
|
+
max_tokens_before_summary=170000,
|
|
112
|
+
messages_to_keep=6,
|
|
112
113
|
),
|
|
114
|
+
AnthropicPromptCachingMiddleware(unsupported_model_behavior="ignore"),
|
|
113
115
|
],
|
|
114
116
|
default_interrupt_on=interrupt_on,
|
|
115
117
|
general_purpose_agent=True,
|
|
116
118
|
),
|
|
117
119
|
SummarizationMiddleware(
|
|
118
120
|
model=model,
|
|
119
|
-
max_tokens_before_summary=
|
|
120
|
-
messages_to_keep=
|
|
121
|
+
max_tokens_before_summary=170000,
|
|
122
|
+
messages_to_keep=6,
|
|
121
123
|
),
|
|
124
|
+
AnthropicPromptCachingMiddleware(unsupported_model_behavior="ignore"),
|
|
122
125
|
]
|
|
123
126
|
if interrupt_on is not None:
|
|
124
127
|
deepagent_middleware.append(HumanInTheLoopMiddleware(interrupt_on=interrupt_on))
|
|
@@ -137,4 +140,4 @@ def create_deep_agent(
|
|
|
137
140
|
debug=debug,
|
|
138
141
|
name=name,
|
|
139
142
|
cache=cache,
|
|
140
|
-
)
|
|
143
|
+
).with_config({"recursion_limit": 1000})
|
|
@@ -17,11 +17,12 @@ from langchain.agents.middleware.types import (
|
|
|
17
17
|
ModelRequest,
|
|
18
18
|
ModelResponse,
|
|
19
19
|
)
|
|
20
|
-
from langchain.tools
|
|
20
|
+
from langchain.tools import ToolRuntime
|
|
21
|
+
from langchain.tools.tool_node import ToolCallRequest
|
|
21
22
|
from langchain_core.messages import ToolMessage
|
|
22
|
-
from langchain_core.tools import BaseTool,
|
|
23
|
+
from langchain_core.tools import BaseTool, tool
|
|
23
24
|
from langgraph.config import get_config
|
|
24
|
-
from langgraph.runtime import Runtime
|
|
25
|
+
from langgraph.runtime import Runtime
|
|
25
26
|
from langgraph.store.base import BaseStore, Item
|
|
26
27
|
from langgraph.types import Command
|
|
27
28
|
from typing_extensions import TypedDict
|
|
@@ -569,10 +570,9 @@ def _ls_tool_generator(custom_description: str | None = None, *, long_term_memor
|
|
|
569
570
|
if long_term_memory:
|
|
570
571
|
|
|
571
572
|
@tool(description=tool_description)
|
|
572
|
-
def ls(
|
|
573
|
-
files = _get_filenames_from_state(state)
|
|
573
|
+
def ls(runtime: ToolRuntime[None, FilesystemState], path: str | None = None) -> list[str]:
|
|
574
|
+
files = _get_filenames_from_state(runtime.state)
|
|
574
575
|
# Add filenames from longterm memory
|
|
575
|
-
runtime = get_runtime()
|
|
576
576
|
store = _get_store(runtime)
|
|
577
577
|
namespace = _get_namespace()
|
|
578
578
|
longterm_files = store.search(namespace)
|
|
@@ -582,8 +582,8 @@ def _ls_tool_generator(custom_description: str | None = None, *, long_term_memor
|
|
|
582
582
|
else:
|
|
583
583
|
|
|
584
584
|
@tool(description=tool_description)
|
|
585
|
-
def ls(
|
|
586
|
-
files = _get_filenames_from_state(state)
|
|
585
|
+
def ls(runtime: ToolRuntime[None, FilesystemState], path: str | None = None) -> list[str]:
|
|
586
|
+
files = _get_filenames_from_state(runtime.state)
|
|
587
587
|
return _filter_files_by_path(files, path)
|
|
588
588
|
|
|
589
589
|
return ls
|
|
@@ -633,14 +633,13 @@ def _read_file_tool_generator(custom_description: str | None = None, *, long_ter
|
|
|
633
633
|
@tool(description=tool_description)
|
|
634
634
|
def read_file(
|
|
635
635
|
file_path: str,
|
|
636
|
-
|
|
636
|
+
runtime: ToolRuntime[None, FilesystemState],
|
|
637
637
|
offset: int = DEFAULT_READ_OFFSET,
|
|
638
638
|
limit: int = DEFAULT_READ_LIMIT,
|
|
639
639
|
) -> str:
|
|
640
640
|
file_path = _validate_path(file_path)
|
|
641
641
|
if _has_memories_prefix(file_path):
|
|
642
642
|
stripped_file_path = _strip_memories_prefix(file_path)
|
|
643
|
-
runtime = get_runtime()
|
|
644
643
|
store = _get_store(runtime)
|
|
645
644
|
namespace = _get_namespace()
|
|
646
645
|
item: Item | None = store.get(namespace, stripped_file_path)
|
|
@@ -649,7 +648,7 @@ def _read_file_tool_generator(custom_description: str | None = None, *, long_ter
|
|
|
649
648
|
file_data = _convert_store_item_to_file_data(item)
|
|
650
649
|
else:
|
|
651
650
|
try:
|
|
652
|
-
file_data = _get_file_data_from_state(state, file_path)
|
|
651
|
+
file_data = _get_file_data_from_state(runtime.state, file_path)
|
|
653
652
|
except ValueError as e:
|
|
654
653
|
return str(e)
|
|
655
654
|
return _read_file_data_content(file_data, offset, limit)
|
|
@@ -659,13 +658,13 @@ def _read_file_tool_generator(custom_description: str | None = None, *, long_ter
|
|
|
659
658
|
@tool(description=tool_description)
|
|
660
659
|
def read_file(
|
|
661
660
|
file_path: str,
|
|
662
|
-
|
|
661
|
+
runtime: ToolRuntime[None, FilesystemState],
|
|
663
662
|
offset: int = DEFAULT_READ_OFFSET,
|
|
664
663
|
limit: int = DEFAULT_READ_LIMIT,
|
|
665
664
|
) -> str:
|
|
666
665
|
file_path = _validate_path(file_path)
|
|
667
666
|
try:
|
|
668
|
-
file_data = _get_file_data_from_state(state, file_path)
|
|
667
|
+
file_data = _get_file_data_from_state(runtime.state, file_path)
|
|
669
668
|
except ValueError as e:
|
|
670
669
|
return str(e)
|
|
671
670
|
return _read_file_data_content(file_data, offset, limit)
|
|
@@ -719,13 +718,11 @@ def _write_file_tool_generator(custom_description: str | None = None, *, long_te
|
|
|
719
718
|
def write_file(
|
|
720
719
|
file_path: str,
|
|
721
720
|
content: str,
|
|
722
|
-
|
|
723
|
-
tool_call_id: Annotated[str, InjectedToolCallId],
|
|
721
|
+
runtime: ToolRuntime[None, FilesystemState],
|
|
724
722
|
) -> Command | str:
|
|
725
723
|
file_path = _validate_path(file_path)
|
|
726
724
|
if _has_memories_prefix(file_path):
|
|
727
725
|
stripped_file_path = _strip_memories_prefix(file_path)
|
|
728
|
-
runtime = get_runtime()
|
|
729
726
|
store = _get_store(runtime)
|
|
730
727
|
namespace = _get_namespace()
|
|
731
728
|
if store.get(namespace, stripped_file_path) is not None:
|
|
@@ -733,7 +730,7 @@ def _write_file_tool_generator(custom_description: str | None = None, *, long_te
|
|
|
733
730
|
new_file_data = _create_file_data(content)
|
|
734
731
|
store.put(namespace, stripped_file_path, _convert_file_data_to_store_item(new_file_data))
|
|
735
732
|
return f"Updated longterm memories file {file_path}"
|
|
736
|
-
return _write_file_to_state(state, tool_call_id, file_path, content)
|
|
733
|
+
return _write_file_to_state(runtime.state, runtime.tool_call_id, file_path, content)
|
|
737
734
|
|
|
738
735
|
else:
|
|
739
736
|
|
|
@@ -741,11 +738,10 @@ def _write_file_tool_generator(custom_description: str | None = None, *, long_te
|
|
|
741
738
|
def write_file(
|
|
742
739
|
file_path: str,
|
|
743
740
|
content: str,
|
|
744
|
-
|
|
745
|
-
tool_call_id: Annotated[str, InjectedToolCallId],
|
|
741
|
+
runtime: ToolRuntime[None, FilesystemState],
|
|
746
742
|
) -> Command | str:
|
|
747
743
|
file_path = _validate_path(file_path)
|
|
748
|
-
return _write_file_to_state(state, tool_call_id, file_path, content)
|
|
744
|
+
return _write_file_to_state(runtime.state, runtime.tool_call_id, file_path, content)
|
|
749
745
|
|
|
750
746
|
return write_file
|
|
751
747
|
|
|
@@ -803,8 +799,7 @@ def _edit_file_tool_generator(custom_description: str | None = None, *, long_ter
|
|
|
803
799
|
file_path: str,
|
|
804
800
|
old_string: str,
|
|
805
801
|
new_string: str,
|
|
806
|
-
|
|
807
|
-
tool_call_id: Annotated[str, InjectedToolCallId],
|
|
802
|
+
runtime: ToolRuntime[None, FilesystemState],
|
|
808
803
|
*,
|
|
809
804
|
replace_all: bool = False,
|
|
810
805
|
) -> Command | str:
|
|
@@ -814,7 +809,6 @@ def _edit_file_tool_generator(custom_description: str | None = None, *, long_ter
|
|
|
814
809
|
# Retrieve file data from appropriate storage
|
|
815
810
|
if is_longterm_memory:
|
|
816
811
|
stripped_file_path = _strip_memories_prefix(file_path)
|
|
817
|
-
runtime = get_runtime()
|
|
818
812
|
store = _get_store(runtime)
|
|
819
813
|
namespace = _get_namespace()
|
|
820
814
|
item: Item | None = store.get(namespace, stripped_file_path)
|
|
@@ -823,7 +817,7 @@ def _edit_file_tool_generator(custom_description: str | None = None, *, long_ter
|
|
|
823
817
|
file_data = _convert_store_item_to_file_data(item)
|
|
824
818
|
else:
|
|
825
819
|
try:
|
|
826
|
-
file_data = _get_file_data_from_state(state, file_path)
|
|
820
|
+
file_data = _get_file_data_from_state(runtime.state, file_path)
|
|
827
821
|
except ValueError as e:
|
|
828
822
|
return str(e)
|
|
829
823
|
|
|
@@ -843,7 +837,7 @@ def _edit_file_tool_generator(custom_description: str | None = None, *, long_ter
|
|
|
843
837
|
return Command(
|
|
844
838
|
update={
|
|
845
839
|
"files": {file_path: new_file_data},
|
|
846
|
-
"messages": [ToolMessage(full_msg, tool_call_id=tool_call_id)],
|
|
840
|
+
"messages": [ToolMessage(full_msg, tool_call_id=runtime.tool_call_id)],
|
|
847
841
|
}
|
|
848
842
|
)
|
|
849
843
|
else:
|
|
@@ -853,8 +847,7 @@ def _edit_file_tool_generator(custom_description: str | None = None, *, long_ter
|
|
|
853
847
|
file_path: str,
|
|
854
848
|
old_string: str,
|
|
855
849
|
new_string: str,
|
|
856
|
-
|
|
857
|
-
tool_call_id: Annotated[str, InjectedToolCallId],
|
|
850
|
+
runtime: ToolRuntime[None, FilesystemState],
|
|
858
851
|
*,
|
|
859
852
|
replace_all: bool = False,
|
|
860
853
|
) -> Command | str:
|
|
@@ -862,7 +855,7 @@ def _edit_file_tool_generator(custom_description: str | None = None, *, long_ter
|
|
|
862
855
|
|
|
863
856
|
# Retrieve file data from state
|
|
864
857
|
try:
|
|
865
|
-
file_data = _get_file_data_from_state(state, file_path)
|
|
858
|
+
file_data = _get_file_data_from_state(runtime.state, file_path)
|
|
866
859
|
except ValueError as e:
|
|
867
860
|
return str(e)
|
|
868
861
|
|
|
@@ -877,7 +870,7 @@ def _edit_file_tool_generator(custom_description: str | None = None, *, long_ter
|
|
|
877
870
|
return Command(
|
|
878
871
|
update={
|
|
879
872
|
"files": {file_path: new_file_data},
|
|
880
|
-
"messages": [ToolMessage(full_msg, tool_call_id=tool_call_id)],
|
|
873
|
+
"messages": [ToolMessage(full_msg, tool_call_id=runtime.tool_call_id)],
|
|
881
874
|
}
|
|
882
875
|
)
|
|
883
876
|
|
|
@@ -958,7 +951,7 @@ class FilesystemMiddleware(AgentMiddleware):
|
|
|
958
951
|
long_term_memory: bool = False,
|
|
959
952
|
system_prompt: str | None = None,
|
|
960
953
|
custom_tool_descriptions: dict[str, str] | None = None,
|
|
961
|
-
tool_token_limit_before_evict: int | None =
|
|
954
|
+
tool_token_limit_before_evict: int | None = 20000,
|
|
962
955
|
) -> None:
|
|
963
956
|
"""Initialize the filesystem middleware.
|
|
964
957
|
|
|
@@ -1,12 +1,12 @@
|
|
|
1
1
|
"""Middleware for providing subagents to an agent via a `task` tool."""
|
|
2
2
|
|
|
3
3
|
from collections.abc import Awaitable, Callable, Sequence
|
|
4
|
-
from typing import
|
|
4
|
+
from typing import Any, NotRequired, TypedDict, cast
|
|
5
5
|
|
|
6
6
|
from langchain.agents import create_agent
|
|
7
7
|
from langchain.agents.middleware import HumanInTheLoopMiddleware, InterruptOnConfig
|
|
8
8
|
from langchain.agents.middleware.types import AgentMiddleware, ModelRequest, ModelResponse
|
|
9
|
-
from langchain.tools import BaseTool,
|
|
9
|
+
from langchain.tools import BaseTool, ToolRuntime
|
|
10
10
|
from langchain_core.language_models import BaseChatModel
|
|
11
11
|
from langchain_core.messages import HumanMessage, ToolMessage
|
|
12
12
|
from langchain_core.runnables import Runnable
|
|
@@ -327,14 +327,14 @@ def _create_task_tool(
|
|
|
327
327
|
}
|
|
328
328
|
)
|
|
329
329
|
|
|
330
|
-
def _validate_and_prepare_state(subagent_type: str, description: str,
|
|
330
|
+
def _validate_and_prepare_state(subagent_type: str, description: str, runtime: ToolRuntime) -> tuple[Runnable, dict]:
|
|
331
331
|
"""Validate subagent type and prepare state for invocation."""
|
|
332
332
|
if subagent_type not in subagent_graphs:
|
|
333
333
|
msg = f"Error: invoked agent of type {subagent_type}, the only allowed types are {[f'`{k}`' for k in subagent_graphs]}"
|
|
334
334
|
raise ValueError(msg)
|
|
335
335
|
subagent = subagent_graphs[subagent_type]
|
|
336
336
|
# Create a new state dict to avoid mutating the original
|
|
337
|
-
subagent_state = {k: v for k, v in state.items() if k not in _EXCLUDED_STATE_KEYS}
|
|
337
|
+
subagent_state = {k: v for k, v in runtime.state.items() if k not in _EXCLUDED_STATE_KEYS}
|
|
338
338
|
subagent_state["messages"] = [HumanMessage(content=description)]
|
|
339
339
|
return subagent, subagent_state
|
|
340
340
|
|
|
@@ -348,22 +348,20 @@ def _create_task_tool(
|
|
|
348
348
|
def task(
|
|
349
349
|
description: str,
|
|
350
350
|
subagent_type: str,
|
|
351
|
-
|
|
352
|
-
tool_call_id: Annotated[str, InjectedToolCallId],
|
|
351
|
+
runtime: ToolRuntime,
|
|
353
352
|
) -> str | Command:
|
|
354
|
-
subagent, subagent_state = _validate_and_prepare_state(subagent_type, description,
|
|
353
|
+
subagent, subagent_state = _validate_and_prepare_state(subagent_type, description, runtime)
|
|
355
354
|
result = subagent.invoke(subagent_state)
|
|
356
|
-
return _return_command_with_state_update(result, tool_call_id)
|
|
355
|
+
return _return_command_with_state_update(result, runtime.tool_call_id)
|
|
357
356
|
|
|
358
357
|
async def atask(
|
|
359
358
|
description: str,
|
|
360
359
|
subagent_type: str,
|
|
361
|
-
|
|
362
|
-
tool_call_id: Annotated[str, InjectedToolCallId],
|
|
360
|
+
runtime: ToolRuntime,
|
|
363
361
|
) -> str | Command:
|
|
364
|
-
subagent, subagent_state = _validate_and_prepare_state(subagent_type, description,
|
|
362
|
+
subagent, subagent_state = _validate_and_prepare_state(subagent_type, description, runtime)
|
|
365
363
|
result = await subagent.ainvoke(subagent_state)
|
|
366
|
-
return _return_command_with_state_update(result, tool_call_id)
|
|
364
|
+
return _return_command_with_state_update(result, runtime.tool_call_id)
|
|
367
365
|
|
|
368
366
|
return StructuredTool.from_function(
|
|
369
367
|
name="task",
|
|
@@ -0,0 +1,533 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: deepagents
|
|
3
|
+
Version: 0.0.12rc3
|
|
4
|
+
Summary: General purpose 'deep agent' with sub-agent spawning, todo list capabilities, and mock file system. Built on LangGraph.
|
|
5
|
+
License: MIT
|
|
6
|
+
Requires-Python: <4.0,>=3.11
|
|
7
|
+
Description-Content-Type: text/markdown
|
|
8
|
+
License-File: LICENSE
|
|
9
|
+
Requires-Dist: langgraph==1.0.0a4
|
|
10
|
+
Requires-Dist: langchain-anthropic==1.0.0a5
|
|
11
|
+
Requires-Dist: langchain==1.0.0rc2
|
|
12
|
+
Requires-Dist: langgraph-prebuilt==0.7.0a2
|
|
13
|
+
Requires-Dist: langchain-core==1.0.0rc3
|
|
14
|
+
Provides-Extra: dev
|
|
15
|
+
Requires-Dist: pytest; extra == "dev"
|
|
16
|
+
Requires-Dist: pytest-cov; extra == "dev"
|
|
17
|
+
Requires-Dist: build; extra == "dev"
|
|
18
|
+
Requires-Dist: twine; extra == "dev"
|
|
19
|
+
Requires-Dist: langchain-openai; extra == "dev"
|
|
20
|
+
Dynamic: license-file
|
|
21
|
+
|
|
22
|
+
# 🧠🤖Deep Agents
|
|
23
|
+
|
|
24
|
+
Using an LLM to call tools in a loop is the simplest form of an agent.
|
|
25
|
+
This architecture, however, can yield agents that are “shallow” and fail to plan and act over longer, more complex tasks.
|
|
26
|
+
|
|
27
|
+
Applications like “Deep Research”, "Manus", and “Claude Code” have gotten around this limitation by implementing a combination of four things:
|
|
28
|
+
a **planning tool**, **sub agents**, access to a **file system**, and a **detailed prompt**.
|
|
29
|
+
|
|
30
|
+
<img src="deep_agents.png" alt="deep agent" width="600"/>
|
|
31
|
+
|
|
32
|
+
`deepagents` is a Python package that implements these in a general purpose way so that you can easily create a Deep Agent for your application.
|
|
33
|
+
|
|
34
|
+
**Acknowledgements: This project was primarily inspired by Claude Code, and initially was largely an attempt to see what made Claude Code general purpose, and make it even more so.**
|
|
35
|
+
|
|
36
|
+
## Installation
|
|
37
|
+
|
|
38
|
+
```bash
|
|
39
|
+
# pip
|
|
40
|
+
pip install deepagents
|
|
41
|
+
|
|
42
|
+
# uv
|
|
43
|
+
uv add deepagents
|
|
44
|
+
|
|
45
|
+
# poetry
|
|
46
|
+
poetry add deepagents
|
|
47
|
+
```
|
|
48
|
+
|
|
49
|
+
## Usage
|
|
50
|
+
|
|
51
|
+
(To run the example below, you will need to `pip install tavily-python`).
|
|
52
|
+
|
|
53
|
+
Make sure to set `TAVILY_API_KEY` in your environment. You can generate one [here](https://www.tavily.com/).
|
|
54
|
+
|
|
55
|
+
```python
|
|
56
|
+
import os
|
|
57
|
+
from typing import Literal
|
|
58
|
+
from tavily import TavilyClient
|
|
59
|
+
from deepagents import create_deep_agent
|
|
60
|
+
|
|
61
|
+
tavily_client = TavilyClient(api_key=os.environ["TAVILY_API_KEY"])
|
|
62
|
+
|
|
63
|
+
# Web search tool
|
|
64
|
+
def internet_search(
|
|
65
|
+
query: str,
|
|
66
|
+
max_results: int = 5,
|
|
67
|
+
topic: Literal["general", "news", "finance"] = "general",
|
|
68
|
+
include_raw_content: bool = False,
|
|
69
|
+
):
|
|
70
|
+
"""Run a web search"""
|
|
71
|
+
return tavily_client.search(
|
|
72
|
+
query,
|
|
73
|
+
max_results=max_results,
|
|
74
|
+
include_raw_content=include_raw_content,
|
|
75
|
+
topic=topic,
|
|
76
|
+
)
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
# System prompt to steer the agent to be an expert researcher
|
|
80
|
+
research_instructions = """You are an expert researcher. Your job is to conduct thorough research, and then write a polished report.
|
|
81
|
+
|
|
82
|
+
You have access to an internet search tool as your primary means of gathering information.
|
|
83
|
+
|
|
84
|
+
## `internet_search`
|
|
85
|
+
|
|
86
|
+
Use this to run an internet search for a given query. You can specify the max number of results to return, the topic, and whether raw content should be included.
|
|
87
|
+
"""
|
|
88
|
+
|
|
89
|
+
# Create the deep agent
|
|
90
|
+
agent = create_deep_agent(
|
|
91
|
+
tools=[internet_search],
|
|
92
|
+
system_prompt=research_instructions,
|
|
93
|
+
)
|
|
94
|
+
|
|
95
|
+
# Invoke the agent
|
|
96
|
+
result = agent.invoke({"messages": [{"role": "user", "content": "What is langgraph?"}]})
|
|
97
|
+
```
|
|
98
|
+
|
|
99
|
+
See [examples/research/research_agent.py](examples/research/research_agent.py) for a more complex example.
|
|
100
|
+
|
|
101
|
+
The agent created with `create_deep_agent` is just a LangGraph graph - so you can interact with it (streaming, human-in-the-loop, memory, studio)
|
|
102
|
+
in the same way you would any LangGraph agent.
|
|
103
|
+
|
|
104
|
+
## Core Capabilities
|
|
105
|
+
**Planning & Task Decomposition**
|
|
106
|
+
|
|
107
|
+
Deep Agents include a built-in `write_todos` tool that enables agents to break down complex tasks into discrete steps, track progress, and adapt plans as new information emerges.
|
|
108
|
+
|
|
109
|
+
**Context Management**
|
|
110
|
+
|
|
111
|
+
File system tools (`ls`, `read_file`, `write_file`, `edit_file`) allow agents to offload large context to memory, preventing context window overflow and enabling work with variable-length tool results.
|
|
112
|
+
|
|
113
|
+
**Subagent Spawning**
|
|
114
|
+
|
|
115
|
+
A built-in `task` tool enables agents to spawn specialized subagents for context isolation. This keeps the main agent’s context clean while still going deep on specific subtasks.
|
|
116
|
+
|
|
117
|
+
**Long-term Memory**
|
|
118
|
+
|
|
119
|
+
Extend agents with persistent memory across threads using LangGraph’s Store. Agents can save and retrieve information from previous conversations.
|
|
120
|
+
|
|
121
|
+
## Customizing Deep Agents
|
|
122
|
+
|
|
123
|
+
There are several parameters you can pass to `create_deep_agent` to create your own custom deep agent.
|
|
124
|
+
|
|
125
|
+
### `model`
|
|
126
|
+
|
|
127
|
+
By default, `deepagents` uses `"claude-sonnet-4-5-20250929"`. You can customize this by passing any [LangChain model object](https://python.langchain.com/docs/integrations/chat/).
|
|
128
|
+
|
|
129
|
+
```python
|
|
130
|
+
from langchain.chat_models import init_chat_model
|
|
131
|
+
from deepagents import create_deep_agent
|
|
132
|
+
|
|
133
|
+
model = init_chat_model(
|
|
134
|
+
model="openai:gpt-5",
|
|
135
|
+
)
|
|
136
|
+
agent = create_deep_agent(
|
|
137
|
+
model=model,
|
|
138
|
+
)
|
|
139
|
+
```
|
|
140
|
+
|
|
141
|
+
### `system_prompt`
|
|
142
|
+
Deep Agents come with a built-in system prompt. This is relatively detailed prompt that is heavily based on and inspired by [attempts](https://github.com/kn1026/cc/blob/main/claudecode.md) to [replicate](https://github.com/asgeirtj/system_prompts_leaks/blob/main/Anthropic/claude-code.md)
|
|
143
|
+
Claude Code's system prompt. It was made more general purpose than Claude Code's system prompt. The default prompt contains detailed instructions for how to use the built-in planning tool, file system tools, and sub agents.
|
|
144
|
+
|
|
145
|
+
Each deep agent tailored to a use case should include a custom system prompt specific to that use case as well. The importance of prompting for creating a successful deep agent cannot be overstated.
|
|
146
|
+
|
|
147
|
+
```python
|
|
148
|
+
from deepagents import create_deep_agent
|
|
149
|
+
|
|
150
|
+
research_instructions = """You are an expert researcher. Your job is to conduct thorough research, and then write a polished report.
|
|
151
|
+
"""
|
|
152
|
+
|
|
153
|
+
agent = create_deep_agent(
|
|
154
|
+
system_prompt=research_instructions,
|
|
155
|
+
)
|
|
156
|
+
```
|
|
157
|
+
|
|
158
|
+
### `tools`
|
|
159
|
+
|
|
160
|
+
Just like with tool-calling agents, you can provide a deep agent with a set of tools that it has access to.
|
|
161
|
+
|
|
162
|
+
```python
|
|
163
|
+
import os
|
|
164
|
+
from typing import Literal
|
|
165
|
+
from tavily import TavilyClient
|
|
166
|
+
from deepagents import create_deep_agent
|
|
167
|
+
|
|
168
|
+
tavily_client = TavilyClient(api_key=os.environ["TAVILY_API_KEY"])
|
|
169
|
+
|
|
170
|
+
def internet_search(
|
|
171
|
+
query: str,
|
|
172
|
+
max_results: int = 5,
|
|
173
|
+
topic: Literal["general", "news", "finance"] = "general",
|
|
174
|
+
include_raw_content: bool = False,
|
|
175
|
+
):
|
|
176
|
+
"""Run a web search"""
|
|
177
|
+
return tavily_client.search(
|
|
178
|
+
query,
|
|
179
|
+
max_results=max_results,
|
|
180
|
+
include_raw_content=include_raw_content,
|
|
181
|
+
topic=topic,
|
|
182
|
+
)
|
|
183
|
+
|
|
184
|
+
agent = create_deep_agent(
|
|
185
|
+
tools=[internet_search]
|
|
186
|
+
)
|
|
187
|
+
```
|
|
188
|
+
|
|
189
|
+
### `middleware`
|
|
190
|
+
`create_deep_agent` is implemented with middleware that can be customized. You can provide additional middleware to extend functionality, add tools, or implement custom hooks.
|
|
191
|
+
|
|
192
|
+
```python
|
|
193
|
+
from langchain_core.tools import tool
|
|
194
|
+
from deepagents import create_deep_agent
|
|
195
|
+
from langchain.agents.middleware import AgentMiddleware
|
|
196
|
+
|
|
197
|
+
@tool
|
|
198
|
+
def get_weather(city: str) -> str:
|
|
199
|
+
"""Get the weather in a city."""
|
|
200
|
+
return f"The weather in {city} is sunny."
|
|
201
|
+
|
|
202
|
+
@tool
|
|
203
|
+
def get_temperature(city: str) -> str:
|
|
204
|
+
"""Get the temperature in a city."""
|
|
205
|
+
return f"The temperature in {city} is 70 degrees Fahrenheit."
|
|
206
|
+
|
|
207
|
+
class WeatherMiddleware(AgentMiddleware):
|
|
208
|
+
tools = [get_weather, get_temperature]
|
|
209
|
+
|
|
210
|
+
agent = create_deep_agent(
|
|
211
|
+
model="anthropic:claude-sonnet-4-20250514",
|
|
212
|
+
middleware=[WeatherMiddleware()]
|
|
213
|
+
)
|
|
214
|
+
```
|
|
215
|
+
|
|
216
|
+
### `subagents`
|
|
217
|
+
|
|
218
|
+
A main feature of Deep Agents is their ability to spawn subagents. You can specify custom subagents that your agent can hand off work to in the subagents parameter. Sub agents are useful for context quarantine (to help not pollute the overall context of the main agent) as well as custom instructions.
|
|
219
|
+
|
|
220
|
+
`subagents` should be a list of dictionaries, where each dictionary follow this schema:
|
|
221
|
+
|
|
222
|
+
```python
|
|
223
|
+
class SubAgent(TypedDict):
|
|
224
|
+
name: str
|
|
225
|
+
description: str
|
|
226
|
+
prompt: str
|
|
227
|
+
tools: Sequence[BaseTool | Callable | dict[str, Any]]
|
|
228
|
+
model: NotRequired[str | BaseChatModel]
|
|
229
|
+
middleware: NotRequired[list[AgentMiddleware]]
|
|
230
|
+
interrupt_on: NotRequired[dict[str, bool | InterruptOnConfig]]
|
|
231
|
+
|
|
232
|
+
class CompiledSubAgent(TypedDict):
|
|
233
|
+
name: str
|
|
234
|
+
description: str
|
|
235
|
+
runnable: Runnable
|
|
236
|
+
```
|
|
237
|
+
|
|
238
|
+
**SubAgent fields:**
|
|
239
|
+
- **name**: This is the name of the subagent, and how the main agent will call the subagent
|
|
240
|
+
- **description**: This is the description of the subagent that is shown to the main agent
|
|
241
|
+
- **prompt**: This is the prompt used for the subagent
|
|
242
|
+
- **tools**: This is the list of tools that the subagent has access to.
|
|
243
|
+
- **model**: Optional model name or model instance.
|
|
244
|
+
- **middleware** Additional middleware to attach to the subagent. See [here](https://docs.langchain.com/oss/python/langchain/middleware) for an introduction into middleware and how it works with create_agent.
|
|
245
|
+
- **interrupt_on** A custom interrupt config that specifies human-in-the-loop interactions for your tools.
|
|
246
|
+
|
|
247
|
+
**CompiledSubAgent fields:**
|
|
248
|
+
- **name**: This is the name of the subagent, and how the main agent will call the subagent
|
|
249
|
+
- **description**: This is the description of the subagent that is shown to the main agent
|
|
250
|
+
- **runnable**: A pre-built LangGraph graph/agent that will be used as the subagent
|
|
251
|
+
|
|
252
|
+
#### Using SubAgent
|
|
253
|
+
|
|
254
|
+
```python
|
|
255
|
+
import os
|
|
256
|
+
from typing import Literal
|
|
257
|
+
from tavily import TavilyClient
|
|
258
|
+
from deepagents import create_deep_agent
|
|
259
|
+
|
|
260
|
+
tavily_client = TavilyClient(api_key=os.environ["TAVILY_API_KEY"])
|
|
261
|
+
|
|
262
|
+
def internet_search(
|
|
263
|
+
query: str,
|
|
264
|
+
max_results: int = 5,
|
|
265
|
+
topic: Literal["general", "news", "finance"] = "general",
|
|
266
|
+
include_raw_content: bool = False,
|
|
267
|
+
):
|
|
268
|
+
"""Run a web search"""
|
|
269
|
+
return tavily_client.search(
|
|
270
|
+
query,
|
|
271
|
+
max_results=max_results,
|
|
272
|
+
include_raw_content=include_raw_content,
|
|
273
|
+
topic=topic,
|
|
274
|
+
)
|
|
275
|
+
|
|
276
|
+
research_subagent = {
|
|
277
|
+
"name": "research-agent",
|
|
278
|
+
"description": "Used to research more in depth questions",
|
|
279
|
+
"system_prompt": "You are a great researcher",
|
|
280
|
+
"tools": [internet_search],
|
|
281
|
+
"model": "openai:gpt-4o", # Optional override, defaults to main agent model
|
|
282
|
+
}
|
|
283
|
+
subagents = [research_subagent]
|
|
284
|
+
|
|
285
|
+
agent = create_deep_agent(
|
|
286
|
+
model="anthropic:claude-sonnet-4-20250514",
|
|
287
|
+
subagents=subagents
|
|
288
|
+
)
|
|
289
|
+
```
|
|
290
|
+
|
|
291
|
+
#### Using CustomSubAgent
|
|
292
|
+
|
|
293
|
+
For more complex use cases, you can provide your own pre-built LangGraph graph as a subagent:
|
|
294
|
+
|
|
295
|
+
```python
|
|
296
|
+
# Create a custom agent graph
|
|
297
|
+
custom_graph = create_agent(
|
|
298
|
+
model=your_model,
|
|
299
|
+
tools=specialized_tools,
|
|
300
|
+
prompt="You are a specialized agent for data analysis..."
|
|
301
|
+
)
|
|
302
|
+
|
|
303
|
+
# Use it as a custom subagent
|
|
304
|
+
custom_subagent = CompiledSubAgent(
|
|
305
|
+
name="data-analyzer",
|
|
306
|
+
description="Specialized agent for complex data analysis tasks",
|
|
307
|
+
runnable=custom_graph
|
|
308
|
+
)
|
|
309
|
+
|
|
310
|
+
subagents = [custom_subagent]
|
|
311
|
+
|
|
312
|
+
agent = create_deep_agent(
|
|
313
|
+
model="anthropic:claude-sonnet-4-20250514",
|
|
314
|
+
tools=[internet_search],
|
|
315
|
+
system_prompt=research_instructions,
|
|
316
|
+
subagents=subagents
|
|
317
|
+
)
|
|
318
|
+
```
|
|
319
|
+
|
|
320
|
+
### `use_longterm_memory`
|
|
321
|
+
Deep agents come with a local filesystem to offload memory to. This filesystem is stored in state, and is therefore transient to a single thread.
|
|
322
|
+
|
|
323
|
+
You can extend deep agents with long-term memory by providing a Store and setting use_longterm_memory=True.
|
|
324
|
+
|
|
325
|
+
```python
|
|
326
|
+
from deepagents import create_deep_agent
|
|
327
|
+
from langgraph.store.memory import InMemoryStore
|
|
328
|
+
|
|
329
|
+
store = InMemoryStore() # Or any other Store object
|
|
330
|
+
agent = create_deep_agent(
|
|
331
|
+
store=store,
|
|
332
|
+
use_longterm_memory=True
|
|
333
|
+
)
|
|
334
|
+
```
|
|
335
|
+
|
|
336
|
+
### `interrupt_on`
|
|
337
|
+
A common reality for agents is that some tool operations may be sensitive and require human approval before execution. Deep Agents supports human-in-the-loop workflows through LangGraph’s interrupt capabilities. You can configure which tools require approval using a checkpointer.
|
|
338
|
+
|
|
339
|
+
These tool configs are passed to our prebuilt [HITL middleware](https://docs.langchain.com/oss/python/langchain/middleware#human-in-the-loop) so that the agent pauses execution and waits for feedback from the user before executing configured tools.
|
|
340
|
+
|
|
341
|
+
```python
|
|
342
|
+
from langchain_core.tools import tool
|
|
343
|
+
from deepagents import create_deep_agent
|
|
344
|
+
|
|
345
|
+
@tool
|
|
346
|
+
def get_weather(city: str) -> str:
|
|
347
|
+
"""Get the weather in a city."""
|
|
348
|
+
return f"The weather in {city} is sunny."
|
|
349
|
+
|
|
350
|
+
agent = create_deep_agent(
|
|
351
|
+
model="anthropic:claude-sonnet-4-20250514",
|
|
352
|
+
tools=[get_weather],
|
|
353
|
+
interrupt_on={
|
|
354
|
+
"get_weather": {
|
|
355
|
+
"allowed_decisions": ["approve", "edit", "reject"]
|
|
356
|
+
},
|
|
357
|
+
}
|
|
358
|
+
)
|
|
359
|
+
|
|
360
|
+
```
|
|
361
|
+
|
|
362
|
+
## Deep Agents Middleware
|
|
363
|
+
|
|
364
|
+
Deep Agents are built with a modular middleware architecture. As a reminder, Deep Agents have access to:
|
|
365
|
+
- A planning tool
|
|
366
|
+
- A filesystem for storing context and long-term memories
|
|
367
|
+
- The ability to spawn subagents
|
|
368
|
+
|
|
369
|
+
Each of these features is implemented as separate middleware. When you create a deep agent with `create_deep_agent`, we automatically attach **PlanningMiddleware**, **FilesystemMiddleware** and **SubAgentMiddleware** to your agent.
|
|
370
|
+
|
|
371
|
+
Middleware is a composable concept, and you can choose to add as many or as few middleware to an agent depending on your use case. That means that you can also use any of the aforementioned middleware independently!
|
|
372
|
+
|
|
373
|
+
### TodoListMiddleware
|
|
374
|
+
|
|
375
|
+
Planning is integral to solving complex problems. If you’ve used claude code recently, you’ll notice how it writes out a To-Do list before tackling complex, multi-part tasks. You’ll also notice how it can adapt and update this To-Do list on the fly as more information comes in.
|
|
376
|
+
|
|
377
|
+
**TodoListMiddleware** provides your agent with a tool specifically for updating this To-Do list. Before, and while it executes a multi-part task, the agent is prompted to use the write_todos tool to keep track of what its doing, and what still needs to be done.
|
|
378
|
+
|
|
379
|
+
```python
|
|
380
|
+
from langchain.agents import create_agent
|
|
381
|
+
from langchain.agents.middleware import TodoListMiddleware
|
|
382
|
+
|
|
383
|
+
# TodoListMiddleware is included by default in create_deep_agent
|
|
384
|
+
# You can customize it if building a custom agent
|
|
385
|
+
agent = create_agent(
|
|
386
|
+
model="anthropic:claude-sonnet-4-20250514",
|
|
387
|
+
# Custom planning instructions can be added via middleware
|
|
388
|
+
middleware=[
|
|
389
|
+
TodoListMiddleware(
|
|
390
|
+
system_prompt="Use the write_todos tool to..." # Optional: Custom addition to the system prompt
|
|
391
|
+
),
|
|
392
|
+
],
|
|
393
|
+
)
|
|
394
|
+
```
|
|
395
|
+
|
|
396
|
+
### FilesystemMiddleware
|
|
397
|
+
|
|
398
|
+
Context engineering is one of the main challenges in building effective agents. This can be particularly hard when using tools that can return variable length results (ex. web_search, rag), as long ToolResults can quickly fill up your context window.
|
|
399
|
+
**FilesystemMiddleware** provides four tools to your agent to interact with both short-term and long-term memory.
|
|
400
|
+
- **ls**: List the files in your filesystem
|
|
401
|
+
- **read_file**: Read an entire file, or a certain number of lines from a file
|
|
402
|
+
- **write_file**: Write a new file to your filesystem
|
|
403
|
+
- **edit_file**: Edit an existing file in your filesystem
|
|
404
|
+
|
|
405
|
+
```python
|
|
406
|
+
from langchain.agents import create_agent
|
|
407
|
+
from deepagents.middleware.filesystem import FilesystemMiddleware
|
|
408
|
+
|
|
409
|
+
# FilesystemMiddleware is included by default in create_deep_agent
|
|
410
|
+
# You can customize it if building a custom agent
|
|
411
|
+
agent = create_agent(
|
|
412
|
+
model="anthropic:claude-sonnet-4-20250514",
|
|
413
|
+
middleware=[
|
|
414
|
+
FilesystemMiddleware(
|
|
415
|
+
long_term_memory=False, # Enables access to long-term memory, defaults to False. You must attach a store to use long-term memory.
|
|
416
|
+
system_prompt="Write to the filesystem when...", # Optional custom addition to the system prompt
|
|
417
|
+
custom_tool_descriptions={
|
|
418
|
+
"ls": "Use the ls tool when...",
|
|
419
|
+
"read_file": "Use the read_file tool to..."
|
|
420
|
+
} # Optional: Custom descriptions for filesystem tools
|
|
421
|
+
),
|
|
422
|
+
],
|
|
423
|
+
)
|
|
424
|
+
```
|
|
425
|
+
|
|
426
|
+
### SubAgentMiddleware
|
|
427
|
+
|
|
428
|
+
Handing off tasks to subagents is a great way to isolate context, keeping the context window of the main (supervisor) agent clean while still going deep on a task. The subagents middleware allows you supply subagents through a task tool.
|
|
429
|
+
|
|
430
|
+
A subagent is defined with a name, description, system prompt, and tools. You can also provide a subagent with a custom model, or with additional middleware. This can be particularly useful when you want to give the subagent an additional state key to share with the main agent.
|
|
431
|
+
|
|
432
|
+
```python
|
|
433
|
+
from langchain_core.tools import tool
|
|
434
|
+
from langchain.agents import create_agent
|
|
435
|
+
from deepagents.middleware.subagents import SubAgentMiddleware
|
|
436
|
+
|
|
437
|
+
|
|
438
|
+
@tool
|
|
439
|
+
def get_weather(city: str) -> str:
|
|
440
|
+
"""Get the weather in a city."""
|
|
441
|
+
return f"The weather in {city} is sunny."
|
|
442
|
+
|
|
443
|
+
agent = create_agent(
|
|
444
|
+
model="claude-sonnet-4-20250514",
|
|
445
|
+
middleware=[
|
|
446
|
+
SubAgentMiddleware(
|
|
447
|
+
default_model="claude-sonnet-4-20250514",
|
|
448
|
+
default_tools=[],
|
|
449
|
+
subagents=[
|
|
450
|
+
{
|
|
451
|
+
"name": "weather",
|
|
452
|
+
"description": "This subagent can get weather in cities.",
|
|
453
|
+
"system_prompt": "Use the get_weather tool to get the weather in a city.",
|
|
454
|
+
"tools": [get_weather],
|
|
455
|
+
"model": "gpt-4.1",
|
|
456
|
+
"middleware": [],
|
|
457
|
+
}
|
|
458
|
+
],
|
|
459
|
+
)
|
|
460
|
+
],
|
|
461
|
+
)
|
|
462
|
+
```
|
|
463
|
+
|
|
464
|
+
For more complex use cases, you can also provide your own pre-built LangGraph graph as a subagent.
|
|
465
|
+
|
|
466
|
+
```python
|
|
467
|
+
# Create a custom LangGraph graph
|
|
468
|
+
def create_weather_graph():
|
|
469
|
+
workflow = StateGraph(...)
|
|
470
|
+
# Build your custom graph
|
|
471
|
+
return workflow.compile()
|
|
472
|
+
|
|
473
|
+
weather_graph = create_weather_graph()
|
|
474
|
+
|
|
475
|
+
# Wrap it in a CompiledSubAgent
|
|
476
|
+
weather_subagent = CompiledSubAgent(
|
|
477
|
+
name="weather",
|
|
478
|
+
description="This subagent can get weather in cities.",
|
|
479
|
+
runnable=weather_graph
|
|
480
|
+
)
|
|
481
|
+
|
|
482
|
+
agent = create_agent(
|
|
483
|
+
model="anthropic:claude-sonnet-4-20250514",
|
|
484
|
+
middleware=[
|
|
485
|
+
SubAgentMiddleware(
|
|
486
|
+
default_model="claude-sonnet-4-20250514",
|
|
487
|
+
default_tools=[],
|
|
488
|
+
subagents=[weather_subagent],
|
|
489
|
+
)
|
|
490
|
+
],
|
|
491
|
+
)
|
|
492
|
+
```
|
|
493
|
+
|
|
494
|
+
## Sync vs Async
|
|
495
|
+
|
|
496
|
+
Prior versions of deepagents separated sync and async agent factories.
|
|
497
|
+
|
|
498
|
+
`async_create_deep_agent` has been folded in to `create_deep_agent`.
|
|
499
|
+
|
|
500
|
+
**You should use `create_deep_agent` as the factory for both sync and async agents**
|
|
501
|
+
|
|
502
|
+
|
|
503
|
+
## MCP
|
|
504
|
+
|
|
505
|
+
The `deepagents` library can be ran with MCP tools. This can be achieved by using the [Langchain MCP Adapter library](https://github.com/langchain-ai/langchain-mcp-adapters).
|
|
506
|
+
|
|
507
|
+
**NOTE:** You will want to use `from deepagents import async_create_deep_agent` to use the async version of `deepagents`, since MCP tools are async
|
|
508
|
+
|
|
509
|
+
(To run the example below, will need to `pip install langchain-mcp-adapters`)
|
|
510
|
+
|
|
511
|
+
```python
|
|
512
|
+
import asyncio
|
|
513
|
+
from langchain_mcp_adapters.client import MultiServerMCPClient
|
|
514
|
+
from deepagents import create_deep_agent
|
|
515
|
+
|
|
516
|
+
async def main():
|
|
517
|
+
# Collect MCP tools
|
|
518
|
+
mcp_client = MultiServerMCPClient(...)
|
|
519
|
+
mcp_tools = await mcp_client.get_tools()
|
|
520
|
+
|
|
521
|
+
# Create agent
|
|
522
|
+
agent = create_deep_agent(tools=mcp_tools, ....)
|
|
523
|
+
|
|
524
|
+
# Stream the agent
|
|
525
|
+
async for chunk in agent.astream(
|
|
526
|
+
{"messages": [{"role": "user", "content": "what is langgraph?"}]},
|
|
527
|
+
stream_mode="values"
|
|
528
|
+
):
|
|
529
|
+
if "messages" in chunk:
|
|
530
|
+
chunk["messages"][-1].pretty_print()
|
|
531
|
+
|
|
532
|
+
asyncio.run(main())
|
|
533
|
+
```
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
deepagents/__init__.py,sha256=9BVNn4lfF5N8l2KY8Ttxi82zO609I-fGqoSIF7DAxiU,342
|
|
2
|
+
deepagents/graph.py,sha256=z2Vpdl_6WpWIJf6qibE87mHGinXpJA-bvm8IYjTOURk,5854
|
|
3
|
+
deepagents/middleware/__init__.py,sha256=J7372TNGR27OU4C3uuQMryHHpXOBjFV_4aEZ_AoQ6n0,284
|
|
4
|
+
deepagents/middleware/filesystem.py,sha256=mBx0845irJf7Lf558XCBf_ShqJT7YGepu2NmvjIZmeY,43739
|
|
5
|
+
deepagents/middleware/subagents.py,sha256=pBJwBuOw6Y7L94hwOClVQ7msnEnom7U9qQH_xy9LoTY,23343
|
|
6
|
+
deepagents-0.0.12rc3.dist-info/licenses/LICENSE,sha256=c__BaxUCK69leo2yEKynf8lWndu8iwYwge1CbyqAe-E,1071
|
|
7
|
+
deepagents-0.0.12rc3.dist-info/METADATA,sha256=zPnVQ7lUHrkXqws4v_smGP66fRtQkPhDQQ1p5JSzrgQ,19161
|
|
8
|
+
deepagents-0.0.12rc3.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
9
|
+
deepagents-0.0.12rc3.dist-info/top_level.txt,sha256=drAzchOzPNePwpb3_pbPuvLuayXkN7SNqeIKMBWJoAo,11
|
|
10
|
+
deepagents-0.0.12rc3.dist-info/RECORD,,
|
|
@@ -1,455 +0,0 @@
|
|
|
1
|
-
Metadata-Version: 2.4
|
|
2
|
-
Name: deepagents
|
|
3
|
-
Version: 0.0.12rc2
|
|
4
|
-
Summary: General purpose 'deep agent' with sub-agent spawning, todo list capabilities, and mock file system. Built on LangGraph.
|
|
5
|
-
License: MIT
|
|
6
|
-
Requires-Python: <4.0,>=3.11
|
|
7
|
-
Description-Content-Type: text/markdown
|
|
8
|
-
License-File: LICENSE
|
|
9
|
-
Requires-Dist: langgraph>=1.0.0a4
|
|
10
|
-
Requires-Dist: langchain-anthropic>=1.0.0a4
|
|
11
|
-
Requires-Dist: langchain>=1.0.0a15
|
|
12
|
-
Requires-Dist: langgraph-prebuilt>=0.7.0a2
|
|
13
|
-
Requires-Dist: langchain-core>=1.0.0a6
|
|
14
|
-
Provides-Extra: dev
|
|
15
|
-
Requires-Dist: pytest; extra == "dev"
|
|
16
|
-
Requires-Dist: pytest-cov; extra == "dev"
|
|
17
|
-
Requires-Dist: build; extra == "dev"
|
|
18
|
-
Requires-Dist: twine; extra == "dev"
|
|
19
|
-
Requires-Dist: langchain-openai; extra == "dev"
|
|
20
|
-
Dynamic: license-file
|
|
21
|
-
|
|
22
|
-
# 🧠🤖Deep Agents
|
|
23
|
-
|
|
24
|
-
Using an LLM to call tools in a loop is the simplest form of an agent.
|
|
25
|
-
This architecture, however, can yield agents that are “shallow” and fail to plan and act over longer, more complex tasks.
|
|
26
|
-
Applications like “Deep Research”, "Manus", and “Claude Code” have gotten around this limitation by implementing a combination of four things:
|
|
27
|
-
a **planning tool**, **sub agents**, access to a **file system**, and a **detailed prompt**.
|
|
28
|
-
|
|
29
|
-
<img src="deep_agents.png" alt="deep agent" width="600"/>
|
|
30
|
-
|
|
31
|
-
`deepagents` is a Python package that implements these in a general purpose way so that you can easily create a Deep Agent for your application.
|
|
32
|
-
|
|
33
|
-
**Acknowledgements: This project was primarily inspired by Claude Code, and initially was largely an attempt to see what made Claude Code general purpose, and make it even more so.**
|
|
34
|
-
|
|
35
|
-
## Installation
|
|
36
|
-
|
|
37
|
-
```bash
|
|
38
|
-
pip install deepagents
|
|
39
|
-
```
|
|
40
|
-
|
|
41
|
-
## Usage
|
|
42
|
-
|
|
43
|
-
(To run the example below, will need to `pip install tavily-python`)
|
|
44
|
-
|
|
45
|
-
```python
|
|
46
|
-
import os
|
|
47
|
-
from typing import Literal
|
|
48
|
-
from tavily import TavilyClient
|
|
49
|
-
from deepagents import create_deep_agent
|
|
50
|
-
|
|
51
|
-
tavily_client = TavilyClient(api_key=os.environ["TAVILY_API_KEY"])
|
|
52
|
-
|
|
53
|
-
# Search tool to use to do research
|
|
54
|
-
def internet_search(
|
|
55
|
-
query: str,
|
|
56
|
-
max_results: int = 5,
|
|
57
|
-
topic: Literal["general", "news", "finance"] = "general",
|
|
58
|
-
include_raw_content: bool = False,
|
|
59
|
-
):
|
|
60
|
-
"""Run a web search"""
|
|
61
|
-
return tavily_client.search(
|
|
62
|
-
query,
|
|
63
|
-
max_results=max_results,
|
|
64
|
-
include_raw_content=include_raw_content,
|
|
65
|
-
topic=topic,
|
|
66
|
-
)
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
# Prompt prefix to steer the agent to be an expert researcher
|
|
70
|
-
research_instructions = """You are an expert researcher. Your job is to conduct thorough research, and then write a polished report.
|
|
71
|
-
|
|
72
|
-
You have access to a few tools.
|
|
73
|
-
|
|
74
|
-
## `internet_search`
|
|
75
|
-
|
|
76
|
-
Use this to run an internet search for a given query. You can specify the number of results, the topic, and whether raw content should be included.
|
|
77
|
-
"""
|
|
78
|
-
|
|
79
|
-
# Create the agent
|
|
80
|
-
agent = create_deep_agent(
|
|
81
|
-
tools=[internet_search],
|
|
82
|
-
system_prompt=research_instructions,
|
|
83
|
-
)
|
|
84
|
-
|
|
85
|
-
# Invoke the agent
|
|
86
|
-
result = agent.invoke({"messages": [{"role": "user", "content": "what is langgraph?"}]})
|
|
87
|
-
```
|
|
88
|
-
|
|
89
|
-
See [examples/research/research_agent.py](examples/research/research_agent.py) for a more complex example.
|
|
90
|
-
|
|
91
|
-
The agent created with `create_deep_agent` is just a LangGraph graph - so you can interact with it (streaming, human-in-the-loop, memory, studio)
|
|
92
|
-
in the same way you would any LangGraph agent.
|
|
93
|
-
|
|
94
|
-
## Creating a custom deep agent
|
|
95
|
-
|
|
96
|
-
There are several parameters you can pass to `create_deep_agent` to create your own custom deep agent.
|
|
97
|
-
|
|
98
|
-
### `tools` (Required)
|
|
99
|
-
|
|
100
|
-
The first argument to `create_deep_agent` is `tools`.
|
|
101
|
-
This should be a list of functions or LangChain `@tool` objects.
|
|
102
|
-
The agent (and any subagents) will have access to these tools.
|
|
103
|
-
|
|
104
|
-
### `instructions` (Required)
|
|
105
|
-
|
|
106
|
-
The second argument to `create_deep_agent` is `instructions`.
|
|
107
|
-
This will serve as part of the prompt of the deep agent.
|
|
108
|
-
Note that our deep agent middleware appends further instructions to the deep agent regarding to-do list, filesystem, and subagent usage, so this is not the *entire* prompt the agent will see.
|
|
109
|
-
|
|
110
|
-
### `subagents` (Optional)
|
|
111
|
-
|
|
112
|
-
A keyword-only argument to `create_deep_agent` is `subagents`.
|
|
113
|
-
This can be used to specify any custom subagents this deep agent will have access to.
|
|
114
|
-
You can read more about why you would want to use subagents [here](#sub-agents)
|
|
115
|
-
|
|
116
|
-
`subagents` should be a list of dictionaries, where each dictionary follow this schema:
|
|
117
|
-
|
|
118
|
-
```python
|
|
119
|
-
class SubAgent(TypedDict):
|
|
120
|
-
name: str
|
|
121
|
-
description: str
|
|
122
|
-
prompt: str
|
|
123
|
-
tools: NotRequired[list[str]]
|
|
124
|
-
model: NotRequired[Union[LanguageModelLike, dict[str, Any]]]
|
|
125
|
-
middleware: NotRequired[list[AgentMiddleware]]
|
|
126
|
-
|
|
127
|
-
class CustomSubAgent(TypedDict):
|
|
128
|
-
name: str
|
|
129
|
-
description: str
|
|
130
|
-
graph: Runnable
|
|
131
|
-
```
|
|
132
|
-
|
|
133
|
-
**SubAgent fields:**
|
|
134
|
-
- **name**: This is the name of the subagent, and how the main agent will call the subagent
|
|
135
|
-
- **description**: This is the description of the subagent that is shown to the main agent
|
|
136
|
-
- **prompt**: This is the prompt used for the subagent
|
|
137
|
-
- **tools**: This is the list of tools that the subagent has access to. By default will have access to all tools passed in, as well as all built-in tools.
|
|
138
|
-
- **model**: Optional model instance OR dictionary for per-subagent model configuration (inherits the main model when omitted).
|
|
139
|
-
- **middleware** Additional middleware to attach to the subagent. See [here](https://docs.langchain.com/oss/python/langchain/middleware) for an introduction into middleware and how it works with create_agent.
|
|
140
|
-
|
|
141
|
-
**CustomSubAgent fields:**
|
|
142
|
-
- **name**: This is the name of the subagent, and how the main agent will call the subagent
|
|
143
|
-
- **description**: This is the description of the subagent that is shown to the main agent
|
|
144
|
-
- **graph**: A pre-built LangGraph graph/agent that will be used as the subagent
|
|
145
|
-
|
|
146
|
-
#### Using SubAgent
|
|
147
|
-
|
|
148
|
-
```python
|
|
149
|
-
research_subagent = {
|
|
150
|
-
"name": "research-agent",
|
|
151
|
-
"description": "Used to research more in depth questions",
|
|
152
|
-
"prompt": sub_research_prompt,
|
|
153
|
-
"tools": [internet_search]
|
|
154
|
-
}
|
|
155
|
-
subagents = [research_subagent]
|
|
156
|
-
agent = create_deep_agent(
|
|
157
|
-
tools,
|
|
158
|
-
prompt,
|
|
159
|
-
subagents=subagents
|
|
160
|
-
)
|
|
161
|
-
```
|
|
162
|
-
|
|
163
|
-
#### Using CustomSubAgent
|
|
164
|
-
|
|
165
|
-
For more complex use cases, you can provide your own pre-built LangGraph graph as a subagent:
|
|
166
|
-
|
|
167
|
-
```python
|
|
168
|
-
from langchain.agents import create_agent
|
|
169
|
-
|
|
170
|
-
# Create a custom agent graph
|
|
171
|
-
custom_graph = create_agent(
|
|
172
|
-
model=your_model,
|
|
173
|
-
tools=specialized_tools,
|
|
174
|
-
prompt="You are a specialized agent for data analysis..."
|
|
175
|
-
)
|
|
176
|
-
|
|
177
|
-
# Use it as a custom subagent
|
|
178
|
-
custom_subagent = {
|
|
179
|
-
"name": "data-analyzer",
|
|
180
|
-
"description": "Specialized agent for complex data analysis tasks",
|
|
181
|
-
"graph": custom_graph
|
|
182
|
-
}
|
|
183
|
-
|
|
184
|
-
subagents = [custom_subagent]
|
|
185
|
-
agent = create_deep_agent(
|
|
186
|
-
tools,
|
|
187
|
-
prompt,
|
|
188
|
-
subagents=subagents
|
|
189
|
-
)
|
|
190
|
-
```
|
|
191
|
-
|
|
192
|
-
### `model` (Optional)
|
|
193
|
-
|
|
194
|
-
By default, `deepagents` uses `"claude-sonnet-4-20250514"`. You can customize this by passing any [LangChain model object](https://python.langchain.com/docs/integrations/chat/).
|
|
195
|
-
|
|
196
|
-
#### Example: Using a Custom Model
|
|
197
|
-
|
|
198
|
-
Here's how to use a custom model (like OpenAI's `gpt-oss` model via Ollama):
|
|
199
|
-
|
|
200
|
-
(Requires `pip install langchain` and then `pip install langchain-ollama` for Ollama models)
|
|
201
|
-
|
|
202
|
-
```python
|
|
203
|
-
from deepagents import create_deep_agent
|
|
204
|
-
|
|
205
|
-
# ... existing agent definitions ...
|
|
206
|
-
|
|
207
|
-
model = init_chat_model(
|
|
208
|
-
model="ollama:gpt-oss:20b",
|
|
209
|
-
)
|
|
210
|
-
agent = create_deep_agent(
|
|
211
|
-
tools=tools,
|
|
212
|
-
instructions=instructions,
|
|
213
|
-
model=model,
|
|
214
|
-
...
|
|
215
|
-
)
|
|
216
|
-
```
|
|
217
|
-
|
|
218
|
-
#### Example: Per-subagent model override (optional)
|
|
219
|
-
|
|
220
|
-
Use a fast, deterministic model for a critique sub-agent, while keeping a different default model for the main agent and others:
|
|
221
|
-
|
|
222
|
-
```python
|
|
223
|
-
from deepagents import create_deep_agent
|
|
224
|
-
|
|
225
|
-
critique_sub_agent = {
|
|
226
|
-
"name": "critique-agent",
|
|
227
|
-
"description": "Critique the final report",
|
|
228
|
-
"prompt": "You are a tough editor.",
|
|
229
|
-
"model_settings": {
|
|
230
|
-
"model": "claude-sonnet-4-202505142",
|
|
231
|
-
"temperature": 0,
|
|
232
|
-
"max_tokens": 8192
|
|
233
|
-
}
|
|
234
|
-
}
|
|
235
|
-
|
|
236
|
-
agent = create_deep_agent(
|
|
237
|
-
tools=[internet_search],
|
|
238
|
-
instructions="You are an expert researcher...",
|
|
239
|
-
model="claude-sonnet-4-20250514", # default for main agent and other sub-agents
|
|
240
|
-
subagents=[critique_sub_agent],
|
|
241
|
-
)
|
|
242
|
-
```
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
### `middleware` (Optional)
|
|
246
|
-
Both the main agent and sub-agents can take additional custom AgentMiddleware. Middleware is the best supported approach for extending the state_schema, adding additional tools, and adding pre / post model hooks. See this [doc](https://docs.langchain.com/oss/python/langchain/middleware) to learn more about Middleware and how you can use it!
|
|
247
|
-
|
|
248
|
-
### `interrupt_on` (Optional)
|
|
249
|
-
Tool configs are used to specify how to handle Human In The Loop interactions on certain tools that require additional human oversight.
|
|
250
|
-
|
|
251
|
-
These tool configs are passed to our prebuilt [HITL middleware](https://docs.langchain.com/oss/python/langchain/middleware#human-in-the-loop) so that the agent pauses execution and waits for feedback from the user before executing configured tools.
|
|
252
|
-
|
|
253
|
-
## Deep Agent Details
|
|
254
|
-
|
|
255
|
-
The below components are built into `deepagents` and helps make it work for deep tasks off-the-shelf.
|
|
256
|
-
|
|
257
|
-
### System Prompt
|
|
258
|
-
|
|
259
|
-
`deepagents` comes with a [built-in system prompt](src/deepagents/prompts.py). This is relatively detailed prompt that is heavily based on and inspired by [attempts](https://github.com/kn1026/cc/blob/main/claudecode.md) to [replicate](https://github.com/asgeirtj/system_prompts_leaks/blob/main/Anthropic/claude-code.md)
|
|
260
|
-
Claude Code's system prompt. It was made more general purpose than Claude Code's system prompt.
|
|
261
|
-
This contains detailed instructions for how to use the built-in planning tool, file system tools, and sub agents.
|
|
262
|
-
Note that part of this system prompt [can be customized](#instructions-required)
|
|
263
|
-
|
|
264
|
-
Without this default system prompt - the agent would not be nearly as successful at going as it is.
|
|
265
|
-
The importance of prompting for creating a "deep" agent cannot be understated.
|
|
266
|
-
|
|
267
|
-
### Planning Tool
|
|
268
|
-
|
|
269
|
-
`deepagents` comes with a built-in planning tool. This planning tool is very simple and is based on ClaudeCode's TodoWrite tool.
|
|
270
|
-
This tool doesn't actually do anything - it is just a way for the agent to come up with a plan, and then have that in the context to help keep it on track.
|
|
271
|
-
|
|
272
|
-
### File System Tools
|
|
273
|
-
|
|
274
|
-
`deepagents` comes with four built-in file system tools: `ls`, `edit_file`, `read_file`, `write_file`.
|
|
275
|
-
These do not actually use a file system - rather, they mock out a file system using LangGraph's State object.
|
|
276
|
-
This means you can easily run many of these agents on the same machine without worrying that they will edit the same underlying files.
|
|
277
|
-
|
|
278
|
-
Right now the "file system" will only be one level deep (no sub directories).
|
|
279
|
-
|
|
280
|
-
These files can be passed in (and also retrieved) by using the `files` key in the LangGraph State object.
|
|
281
|
-
|
|
282
|
-
```python
|
|
283
|
-
agent = create_deep_agent(...)
|
|
284
|
-
|
|
285
|
-
result = agent.invoke({
|
|
286
|
-
"messages": ...,
|
|
287
|
-
# Pass in files to the agent using this key
|
|
288
|
-
# "files": {"foo.txt": "foo", ...}
|
|
289
|
-
})
|
|
290
|
-
|
|
291
|
-
# Access any files afterwards like this
|
|
292
|
-
result["files"]
|
|
293
|
-
```
|
|
294
|
-
|
|
295
|
-
### Sub Agents
|
|
296
|
-
|
|
297
|
-
`deepagents` comes with the built-in ability to call sub agents (based on Claude Code).
|
|
298
|
-
It has access to a `general-purpose` subagent at all times - this is a subagent with the same instructions as the main agent and all the tools that is has access to.
|
|
299
|
-
You can also specify [custom sub agents](#subagents-optional) with their own instructions and tools.
|
|
300
|
-
|
|
301
|
-
Sub agents are useful for ["context quarantine"](https://www.dbreunig.com/2025/06/26/how-to-fix-your-context.html#context-quarantine) (to help not pollute the overall context of the main agent)
|
|
302
|
-
as well as custom instructions.
|
|
303
|
-
|
|
304
|
-
### Built In Tools
|
|
305
|
-
|
|
306
|
-
By default, deep agents come with five built-in tools:
|
|
307
|
-
|
|
308
|
-
- `write_todos`: Tool for writing todos
|
|
309
|
-
- `write_file`: Tool for writing to a file in the virtual filesystem
|
|
310
|
-
- `read_file`: Tool for reading from a file in the virtual filesystem
|
|
311
|
-
- `ls`: Tool for listing files in the virtual filesystem
|
|
312
|
-
- `edit_file`: Tool for editing a file in the virtual filesystem
|
|
313
|
-
|
|
314
|
-
If you want to omit some deepagents functionality, use specific middleware components directly!
|
|
315
|
-
|
|
316
|
-
### Human-in-the-Loop
|
|
317
|
-
|
|
318
|
-
`deepagents` supports human-in-the-loop approval for tool execution. You can configure specific tools to require human approval before execution using the `interrupt_on` parameter, which maps tool names to a `HumanInTheLoopConfig`.
|
|
319
|
-
|
|
320
|
-
`HumanInTheLoopConfig` is how you specify what type of human in the loop patterns are supported.
|
|
321
|
-
It is a dictionary with four specific keys:
|
|
322
|
-
|
|
323
|
-
- `allow_accept`: Whether the human can approve the current action without changes
|
|
324
|
-
- `allow_respond`: Whether the human can reject the current action with feedback
|
|
325
|
-
- `allow_edit`: Whether the human can approve the current action with edited content
|
|
326
|
-
|
|
327
|
-
Instead of specifying a `HumanInTheLoopConfig` for a tool, you can also just set `True`. This will set `allow_ignore`, `allow_respond`, `allow_edit`, and `allow_accept` to be `True`.
|
|
328
|
-
|
|
329
|
-
In order to use human in the loop, you need to have a checkpointer attached.
|
|
330
|
-
Note: if you are using LangGraph Platform, this is automatically attached.
|
|
331
|
-
|
|
332
|
-
Example usage:
|
|
333
|
-
|
|
334
|
-
```python
|
|
335
|
-
from deepagents import create_deep_agent
|
|
336
|
-
from langgraph.checkpoint.memory import InMemorySaver
|
|
337
|
-
|
|
338
|
-
# Create agent with file operations requiring approval
|
|
339
|
-
agent = create_deep_agent(
|
|
340
|
-
tools=[your_tools],
|
|
341
|
-
instructions="Your instructions here",
|
|
342
|
-
interrupt_on={
|
|
343
|
-
# You can specify a dictionary for fine grained control over what interrupt options exist
|
|
344
|
-
"tool_1": {
|
|
345
|
-
"allow_respond": True,
|
|
346
|
-
"allow_edit": True,
|
|
347
|
-
"allow_accept":True,
|
|
348
|
-
},
|
|
349
|
-
# You can specify a boolean for shortcut
|
|
350
|
-
# This is a shortcut for the same functionality as above
|
|
351
|
-
"tool_2": True,
|
|
352
|
-
}
|
|
353
|
-
)
|
|
354
|
-
|
|
355
|
-
checkpointer= InMemorySaver()
|
|
356
|
-
agent.checkpointer = checkpointer
|
|
357
|
-
```
|
|
358
|
-
|
|
359
|
-
#### Approve
|
|
360
|
-
|
|
361
|
-
To "approve" a tool call means the agent will execute the tool call as is.
|
|
362
|
-
|
|
363
|
-
This flow shows how to approve a tool call (assuming the tool requiring approval is called):
|
|
364
|
-
|
|
365
|
-
```python
|
|
366
|
-
config = {"configurable": {"thread_id": "1"}}
|
|
367
|
-
for s in agent.stream({"messages": [{"role": "user", "content": message}]}, config=config):
|
|
368
|
-
print(s)
|
|
369
|
-
# If this calls a tool with an interrupt, this will then return an interrupt
|
|
370
|
-
for s in agent.stream(Command(resume=[{"type": "accept"}]), config=config):
|
|
371
|
-
print(s)
|
|
372
|
-
|
|
373
|
-
```
|
|
374
|
-
|
|
375
|
-
#### Edit
|
|
376
|
-
|
|
377
|
-
To "edit" a tool call means the agent will execute the new tool with the new arguments. You can change both the tool to call or the arguments to pass to that tool.
|
|
378
|
-
|
|
379
|
-
The `args` parameter you pass back should be a dictionary with two keys:
|
|
380
|
-
|
|
381
|
-
- `action`: maps to a string which is the name of the tool to call
|
|
382
|
-
- `args`: maps to a dictionary which is the arguments to pass to the tool
|
|
383
|
-
|
|
384
|
-
This flow shows how to edit a tool call (assuming the tool requiring approval is called):
|
|
385
|
-
|
|
386
|
-
```python
|
|
387
|
-
config = {"configurable": {"thread_id": "1"}}
|
|
388
|
-
for s in agent.stream({"messages": [{"role": "user", "content": message}]}, config=config):
|
|
389
|
-
print(s)
|
|
390
|
-
# If this calls a tool with an interrupt, this will then return an interrupt
|
|
391
|
-
# Replace the `...` with the tool name you want to call, and the arguments
|
|
392
|
-
for s in agent.stream(Command(resume=[{"type": "edit", "args": {"action": "...", "args": {...}}}]), config=config):
|
|
393
|
-
print(s)
|
|
394
|
-
|
|
395
|
-
```
|
|
396
|
-
|
|
397
|
-
#### Respond
|
|
398
|
-
|
|
399
|
-
To "respond" to a tool call means that tool is NOT called. Rather, a tool message is appended with the content you respond with, and the updated messages list is then sent back to the model.
|
|
400
|
-
|
|
401
|
-
The `args` parameter you pass back should be a string with your response.
|
|
402
|
-
|
|
403
|
-
This flow shows how to respond to a tool call (assuming the tool requiring approval is called):
|
|
404
|
-
|
|
405
|
-
```python
|
|
406
|
-
config = {"configurable": {"thread_id": "1"}}
|
|
407
|
-
for s in agent.stream({"messages": [{"role": "user", "content": message}]}, config=config):
|
|
408
|
-
print(s)
|
|
409
|
-
# If this calls a tool with an interrupt, this will then return an interrupt
|
|
410
|
-
# Replace the `...` with the response to use all the ToolMessage content
|
|
411
|
-
for s in agent.stream(Command(resume=[{"type": "response", "args": "..."}]), config=config):
|
|
412
|
-
print(s)
|
|
413
|
-
|
|
414
|
-
```
|
|
415
|
-
## Async
|
|
416
|
-
|
|
417
|
-
If you are passing async tools to your agent, you will want to use `from deepagents import async_create_deep_agent`
|
|
418
|
-
## MCP
|
|
419
|
-
|
|
420
|
-
The `deepagents` library can be ran with MCP tools. This can be achieved by using the [Langchain MCP Adapter library](https://github.com/langchain-ai/langchain-mcp-adapters).
|
|
421
|
-
|
|
422
|
-
**NOTE:** You will want to use `from deepagents import async_create_deep_agent` to use the async version of `deepagents`, since MCP tools are async
|
|
423
|
-
|
|
424
|
-
(To run the example below, will need to `pip install langchain-mcp-adapters`)
|
|
425
|
-
|
|
426
|
-
```python
|
|
427
|
-
import asyncio
|
|
428
|
-
from langchain_mcp_adapters.client import MultiServerMCPClient
|
|
429
|
-
from deepagents import create_deep_agent
|
|
430
|
-
|
|
431
|
-
async def main():
|
|
432
|
-
# Collect MCP tools
|
|
433
|
-
mcp_client = MultiServerMCPClient(...)
|
|
434
|
-
mcp_tools = await mcp_client.get_tools()
|
|
435
|
-
|
|
436
|
-
# Create agent
|
|
437
|
-
agent = async_create_deep_agent(tools=mcp_tools, ....)
|
|
438
|
-
|
|
439
|
-
# Stream the agent
|
|
440
|
-
async for chunk in agent.astream(
|
|
441
|
-
{"messages": [{"role": "user", "content": "what is langgraph?"}]},
|
|
442
|
-
stream_mode="values"
|
|
443
|
-
):
|
|
444
|
-
if "messages" in chunk:
|
|
445
|
-
chunk["messages"][-1].pretty_print()
|
|
446
|
-
|
|
447
|
-
asyncio.run(main())
|
|
448
|
-
```
|
|
449
|
-
|
|
450
|
-
## Roadmap
|
|
451
|
-
- [ ] Allow users to customize full system prompt
|
|
452
|
-
- [ ] Code cleanliness (type hinting, docstrings, formating)
|
|
453
|
-
- [ ] Allow for more of a robust virtual filesystem
|
|
454
|
-
- [ ] Create an example of a deep coding agent built on top of this
|
|
455
|
-
- [ ] Benchmark the example of [deep research agent](examples/research/research_agent.py)
|
|
@@ -1,10 +0,0 @@
|
|
|
1
|
-
deepagents/__init__.py,sha256=9BVNn4lfF5N8l2KY8Ttxi82zO609I-fGqoSIF7DAxiU,342
|
|
2
|
-
deepagents/graph.py,sha256=V4l11BSR435w8taLTluqE9ej1C1qoE9L4KYKx4atcqM,5573
|
|
3
|
-
deepagents/middleware/__init__.py,sha256=J7372TNGR27OU4C3uuQMryHHpXOBjFV_4aEZ_AoQ6n0,284
|
|
4
|
-
deepagents/middleware/filesystem.py,sha256=9yVWCeKYNEdY4IRi2Ik_NcgKyRPtfrzWabHRWoLTP1c,44095
|
|
5
|
-
deepagents/middleware/subagents.py,sha256=v1uxqVfunrFm1SDX_gTlNadFm2OmbvjjTpdE_DKGGzM,23489
|
|
6
|
-
deepagents-0.0.12rc2.dist-info/licenses/LICENSE,sha256=c__BaxUCK69leo2yEKynf8lWndu8iwYwge1CbyqAe-E,1071
|
|
7
|
-
deepagents-0.0.12rc2.dist-info/METADATA,sha256=PPxb1WF9HJf1aMn4vctkpYeBhtHQw1tZZEgZN-h0bOU,17393
|
|
8
|
-
deepagents-0.0.12rc2.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
9
|
-
deepagents-0.0.12rc2.dist-info/top_level.txt,sha256=drAzchOzPNePwpb3_pbPuvLuayXkN7SNqeIKMBWJoAo,11
|
|
10
|
-
deepagents-0.0.12rc2.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|