deepagents 0.3.5__py3-none-any.whl → 0.3.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -114,6 +114,8 @@ from langchain_core.runnables import RunnableConfig
114
114
  from langgraph.prebuilt import ToolRuntime
115
115
  from langgraph.runtime import Runtime
116
116
 
117
+ from deepagents.middleware._utils import append_to_system_message
118
+
117
119
  logger = logging.getLogger(__name__)
118
120
 
119
121
  # Security: Maximum size for SKILL.md files to prevent DoS attacks (10MB)
@@ -558,18 +560,20 @@ class SkillsMiddleware(AgentMiddleware):
558
560
  lines = []
559
561
  for skill in skills:
560
562
  lines.append(f"- **{skill['name']}**: {skill['description']}")
563
+ if skill["allowed_tools"]:
564
+ lines.append(f" -> Allowed tools: {', '.join(skill['allowed_tools'])}")
561
565
  lines.append(f" -> Read `{skill['path']}` for full instructions")
562
566
 
563
567
  return "\n".join(lines)
564
568
 
565
569
  def modify_request(self, request: ModelRequest) -> ModelRequest:
566
- """Inject skills documentation into a model request's system prompt.
570
+ """Inject skills documentation into a model request's system message.
567
571
 
568
572
  Args:
569
573
  request: Model request to modify
570
574
 
571
575
  Returns:
572
- New model request with skills documentation injected into system prompt
576
+ New model request with skills documentation injected into system message
573
577
  """
574
578
  skills_metadata = request.state.get("skills_metadata", [])
575
579
  skills_locations = self._format_skills_locations()
@@ -580,12 +584,9 @@ class SkillsMiddleware(AgentMiddleware):
580
584
  skills_list=skills_list,
581
585
  )
582
586
 
583
- if request.system_prompt:
584
- system_prompt = request.system_prompt + "\n\n" + skills_section
585
- else:
586
- system_prompt = skills_section
587
+ new_system_message = append_to_system_message(request.system_message, skills_section)
587
588
 
588
- return request.override(system_prompt=system_prompt)
589
+ return request.override(system_message=new_system_message)
589
590
 
590
591
  def before_agent(self, state: SkillsState, runtime: Runtime, config: RunnableConfig) -> SkillsStateUpdate | None:
591
592
  """Load skills metadata before agent execution (synchronous).
@@ -602,7 +603,7 @@ class SkillsMiddleware(AgentMiddleware):
602
603
  config: Runnable config.
603
604
 
604
605
  Returns:
605
- State update with skills_metadata populated, or None if already present
606
+ State update with `skills_metadata` populated, or `None` if already present
606
607
  """
607
608
  # Skip if skills_metadata is already present in state (even if empty)
608
609
  if "skills_metadata" in state:
@@ -637,7 +638,7 @@ class SkillsMiddleware(AgentMiddleware):
637
638
  config: Runnable config.
638
639
 
639
640
  Returns:
640
- State update with skills_metadata populated, or None if already present
641
+ State update with `skills_metadata` populated, or `None` if already present
641
642
  """
642
643
  # Skip if skills_metadata is already present in state (even if empty)
643
644
  if "skills_metadata" in state:
@@ -1,7 +1,7 @@
1
1
  """Middleware for providing subagents to an agent via a `task` tool."""
2
2
 
3
3
  from collections.abc import Awaitable, Callable, Sequence
4
- from typing import Any, NotRequired, TypedDict, cast
4
+ from typing import Annotated, Any, NotRequired, TypedDict, cast
5
5
 
6
6
  from langchain.agents import create_agent
7
7
  from langchain.agents.middleware import HumanInTheLoopMiddleware, InterruptOnConfig
@@ -13,6 +13,8 @@ from langchain_core.runnables import Runnable
13
13
  from langchain_core.tools import StructuredTool
14
14
  from langgraph.types import Command
15
15
 
16
+ from deepagents.middleware._utils import append_to_system_message
17
+
16
18
 
17
19
  class SubAgent(TypedDict):
18
20
  """Specification for an agent.
@@ -21,41 +23,83 @@ class SubAgent(TypedDict):
21
23
  will be applied first, followed by any `middleware` specified in this spec.
22
24
  To use only custom middleware without the defaults, pass `default_middleware=[]`
23
25
  to `SubAgentMiddleware`.
26
+
27
+ Required fields:
28
+ name: Unique identifier for the subagent.
29
+
30
+ The main agent uses this name when calling the `task()` tool.
31
+ description: What this subagent does.
32
+
33
+ Be specific and action-oriented. The main agent uses this to decide when to delegate.
34
+ system_prompt: Instructions for the subagent.
35
+
36
+ Include tool usage guidance and output format requirements.
37
+ tools: Tools the subagent can use.
38
+
39
+ Keep this minimal and include only what's needed.
40
+
41
+ Optional fields:
42
+ model: Override the main agent's model.
43
+
44
+ Use the format `'provider:model-name'` (e.g., `'openai:gpt-4o'`).
45
+ middleware: Additional middleware for custom behavior, logging, or rate limiting.
46
+ interrupt_on: Configure human-in-the-loop for specific tools.
47
+
48
+ Requires a checkpointer.
24
49
  """
25
50
 
26
51
  name: str
27
- """The name of the agent."""
52
+ """Unique identifier for the subagent."""
28
53
 
29
54
  description: str
30
- """The description of the agent."""
55
+ """What this subagent does. The main agent uses this to decide when to delegate."""
31
56
 
32
57
  system_prompt: str
33
- """The system prompt to use for the agent."""
58
+ """Instructions for the subagent."""
34
59
 
35
60
  tools: Sequence[BaseTool | Callable | dict[str, Any]]
36
- """The tools to use for the agent."""
61
+ """Tools the subagent can use."""
37
62
 
38
63
  model: NotRequired[str | BaseChatModel]
39
- """The model for the agent. Defaults to `default_model`."""
64
+ """Override the main agent's model. Use `'provider:model-name'` format."""
40
65
 
41
66
  middleware: NotRequired[list[AgentMiddleware]]
42
- """Additional middleware to append after `default_middleware`."""
67
+ """Additional middleware for custom behavior."""
43
68
 
44
69
  interrupt_on: NotRequired[dict[str, bool | InterruptOnConfig]]
45
- """The tool configs to use for the agent."""
70
+ """Configure human-in-the-loop for specific tools."""
46
71
 
47
72
 
48
73
  class CompiledSubAgent(TypedDict):
49
- """A pre-compiled agent spec."""
74
+ """A pre-compiled agent spec.
75
+
76
+ !!! note
77
+
78
+ The runnable's state schema must include a 'messages' key.
79
+
80
+ This is required for the subagent to communicate results back to the main agent.
81
+
82
+ When the subagent completes, the final message in the 'messages' list will be
83
+ extracted and returned as a `ToolMessage` to the parent agent.
84
+ """
50
85
 
51
86
  name: str
52
- """The name of the agent."""
87
+ """Unique identifier for the subagent."""
53
88
 
54
89
  description: str
55
- """The description of the agent."""
90
+ """What this subagent does."""
56
91
 
57
92
  runnable: Runnable
58
- """The Runnable to use for the agent."""
93
+ """A custom agent implementation.
94
+
95
+ Create a custom agent using either:
96
+
97
+ 1. LangChain's [`create_agent()`](https://docs.langchain.com/oss/python/langchain/quickstart)
98
+ 2. A custom graph using [`langgraph`](https://docs.langchain.com/oss/python/langgraph/quickstart)
99
+
100
+ If you're creating a custom graph, make sure the state schema includes a 'messages' key.
101
+ This is required for the subagent to communicate results back to the main agent.
102
+ """
59
103
 
60
104
 
61
105
  DEFAULT_SUBAGENT_PROMPT = "In order to complete the objective that the user asks of you, you have access to a number of standard tools."
@@ -251,6 +295,7 @@ def _get_subagents(
251
295
  system_prompt=DEFAULT_SUBAGENT_PROMPT,
252
296
  tools=default_tools,
253
297
  middleware=general_purpose_middleware,
298
+ name="general-purpose",
254
299
  )
255
300
  agents["general-purpose"] = general_purpose_subagent
256
301
  subagent_descriptions.append(f"- general-purpose: {DEFAULT_GENERAL_PURPOSE_DESCRIPTION}")
@@ -277,6 +322,7 @@ def _get_subagents(
277
322
  system_prompt=agent_["system_prompt"],
278
323
  tools=_tools,
279
324
  middleware=_middleware,
325
+ name=agent_["name"],
280
326
  )
281
327
  return agents, subagent_descriptions
282
328
 
@@ -318,6 +364,15 @@ def _create_task_tool(
318
364
  subagent_description_str = "\n".join(subagent_descriptions)
319
365
 
320
366
  def _return_command_with_state_update(result: dict, tool_call_id: str) -> Command:
367
+ # Validate that the result contains a 'messages' key
368
+ if "messages" not in result:
369
+ error_msg = (
370
+ "CompiledSubAgent must return a state containing a 'messages' key. "
371
+ "Custom StateGraphs used with CompiledSubAgent should include 'messages' "
372
+ "in their state schema to communicate results back to the main agent."
373
+ )
374
+ raise ValueError(error_msg)
375
+
321
376
  state_update = {k: v for k, v in result.items() if k not in _EXCLUDED_STATE_KEYS}
322
377
  # Strip trailing whitespace to prevent API errors with Anthropic
323
378
  message_text = result["messages"][-1].text.rstrip() if result["messages"][-1].text else ""
@@ -344,30 +399,36 @@ def _create_task_tool(
344
399
  task_description = task_description.format(available_agents=subagent_description_str)
345
400
 
346
401
  def task(
347
- description: str,
348
- subagent_type: str,
402
+ description: Annotated[
403
+ str,
404
+ "A detailed description of the task for the subagent to perform autonomously. Include all necessary context and specify the expected output format.", # noqa: E501
405
+ ],
406
+ subagent_type: Annotated[str, "The type of subagent to use. Must be one of the available agent types listed in the tool description."],
349
407
  runtime: ToolRuntime,
350
408
  ) -> str | Command:
351
409
  if subagent_type not in subagent_graphs:
352
410
  allowed_types = ", ".join([f"`{k}`" for k in subagent_graphs])
353
411
  return f"We cannot invoke subagent {subagent_type} because it does not exist, the only allowed types are {allowed_types}"
354
412
  subagent, subagent_state = _validate_and_prepare_state(subagent_type, description, runtime)
355
- result = subagent.invoke(subagent_state, runtime.config)
413
+ result = subagent.invoke(subagent_state)
356
414
  if not runtime.tool_call_id:
357
415
  value_error_msg = "Tool call ID is required for subagent invocation"
358
416
  raise ValueError(value_error_msg)
359
417
  return _return_command_with_state_update(result, runtime.tool_call_id)
360
418
 
361
419
  async def atask(
362
- description: str,
363
- subagent_type: str,
420
+ description: Annotated[
421
+ str,
422
+ "A detailed description of the task for the subagent to perform autonomously. Include all necessary context and specify the expected output format.", # noqa: E501
423
+ ],
424
+ subagent_type: Annotated[str, "The type of subagent to use. Must be one of the available agent types listed in the tool description."],
364
425
  runtime: ToolRuntime,
365
426
  ) -> str | Command:
366
427
  if subagent_type not in subagent_graphs:
367
428
  allowed_types = ", ".join([f"`{k}`" for k in subagent_graphs])
368
429
  return f"We cannot invoke subagent {subagent_type} because it does not exist, the only allowed types are {allowed_types}"
369
430
  subagent, subagent_state = _validate_and_prepare_state(subagent_type, description, runtime)
370
- result = await subagent.ainvoke(subagent_state, runtime.config)
431
+ result = await subagent.ainvoke(subagent_state)
371
432
  if not runtime.tool_call_id:
372
433
  value_error_msg = "Tool call ID is required for subagent invocation"
373
434
  raise ValueError(value_error_msg)
@@ -399,18 +460,24 @@ class SubAgentMiddleware(AgentMiddleware):
399
460
 
400
461
  Args:
401
462
  default_model: The model to use for subagents.
402
- Can be a LanguageModelLike or a dict for init_chat_model.
463
+
464
+ Can be a `LanguageModelLike` or a dict for `init_chat_model`.
403
465
  default_tools: The tools to use for the default general-purpose subagent.
404
- default_middleware: Default middleware to apply to all subagents. If `None` (default),
405
- no default middleware is applied. Pass a list to specify custom middleware.
406
- default_interrupt_on: The tool configs to use for the default general-purpose subagent. These
407
- are also the fallback for any subagents that don't specify their own tool configs.
466
+ default_middleware: Default middleware to apply to all subagents.
467
+
468
+ If `None`, no default middleware is applied.
469
+
470
+ Pass a list to specify custom middleware.
471
+ default_interrupt_on: The tool configs to use for the default general-purpose subagent.
472
+
473
+ These are also the fallback for any subagents that don't specify their own tool configs.
408
474
  subagents: A list of additional subagents to provide to the agent.
409
475
  system_prompt: Full system prompt override. When provided, completely replaces
410
476
  the agent's system prompt.
411
- general_purpose_agent: Whether to include the general-purpose agent. Defaults to `True`.
412
- task_description: Custom description for the task tool. If `None`, uses the
413
- default description template.
477
+ general_purpose_agent: Whether to include the general-purpose agent.
478
+ task_description: Custom description for the task tool.
479
+
480
+ If `None`, uses the default description template.
414
481
 
415
482
  Example:
416
483
  ```python
@@ -454,7 +521,7 @@ class SubAgentMiddleware(AgentMiddleware):
454
521
  general_purpose_agent: bool = True,
455
522
  task_description: str | None = None,
456
523
  ) -> None:
457
- """Initialize the SubAgentMiddleware."""
524
+ """Initialize the `SubAgentMiddleware`."""
458
525
  super().__init__()
459
526
  self.system_prompt = system_prompt
460
527
  task_tool = _create_task_tool(
@@ -473,10 +540,10 @@ class SubAgentMiddleware(AgentMiddleware):
473
540
  request: ModelRequest,
474
541
  handler: Callable[[ModelRequest], ModelResponse],
475
542
  ) -> ModelResponse:
476
- """Update the system prompt to include instructions on using subagents."""
543
+ """Update the system message to include instructions on using subagents."""
477
544
  if self.system_prompt is not None:
478
- system_prompt = request.system_prompt + "\n\n" + self.system_prompt if request.system_prompt else self.system_prompt
479
- return handler(request.override(system_prompt=system_prompt))
545
+ new_system_message = append_to_system_message(request.system_message, self.system_prompt)
546
+ return handler(request.override(system_message=new_system_message))
480
547
  return handler(request)
481
548
 
482
549
  async def awrap_model_call(
@@ -484,8 +551,8 @@ class SubAgentMiddleware(AgentMiddleware):
484
551
  request: ModelRequest,
485
552
  handler: Callable[[ModelRequest], Awaitable[ModelResponse]],
486
553
  ) -> ModelResponse:
487
- """(async) Update the system prompt to include instructions on using subagents."""
554
+ """(async) Update the system message to include instructions on using subagents."""
488
555
  if self.system_prompt is not None:
489
- system_prompt = request.system_prompt + "\n\n" + self.system_prompt if request.system_prompt else self.system_prompt
490
- return await handler(request.override(system_prompt=system_prompt))
556
+ new_system_message = append_to_system_message(request.system_message, self.system_prompt)
557
+ return await handler(request.override(system_message=new_system_message))
491
558
  return await handler(request)