langchain 1.0.0a14__tar.gz → 1.0.0rc1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain might be problematic. Click here for more details.

Files changed (113) hide show
  1. langchain-1.0.0rc1/PKG-INFO +85 -0
  2. langchain-1.0.0rc1/README.md +39 -0
  3. {langchain-1.0.0a14 → langchain-1.0.0rc1}/langchain/__init__.py +1 -1
  4. langchain-1.0.0rc1/langchain/agents/__init__.py +15 -0
  5. {langchain-1.0.0a14 → langchain-1.0.0rc1}/langchain/agents/factory.py +74 -49
  6. {langchain-1.0.0a14 → langchain-1.0.0rc1}/langchain/agents/middleware/__init__.py +15 -6
  7. {langchain-1.0.0a14 → langchain-1.0.0rc1}/langchain/agents/middleware/context_editing.py +30 -2
  8. {langchain-1.0.0a14 → langchain-1.0.0rc1}/langchain/agents/middleware/human_in_the_loop.py +24 -20
  9. {langchain-1.0.0a14 → langchain-1.0.0rc1}/langchain/agents/middleware/model_call_limit.py +42 -9
  10. {langchain-1.0.0a14 → langchain-1.0.0rc1}/langchain/agents/middleware/model_fallback.py +36 -3
  11. {langchain-1.0.0a14 → langchain-1.0.0rc1}/langchain/agents/middleware/pii.py +7 -7
  12. langchain-1.0.0a14/langchain/agents/middleware/planning.py → langchain-1.0.0rc1/langchain/agents/middleware/todo.py +18 -5
  13. {langchain-1.0.0a14 → langchain-1.0.0rc1}/langchain/agents/middleware/tool_call_limit.py +89 -16
  14. {langchain-1.0.0a14 → langchain-1.0.0rc1}/langchain/agents/middleware/tool_emulator.py +2 -2
  15. langchain-1.0.0rc1/langchain/agents/middleware/tool_retry.py +384 -0
  16. {langchain-1.0.0a14 → langchain-1.0.0rc1}/langchain/agents/middleware/types.py +111 -63
  17. {langchain-1.0.0a14 → langchain-1.0.0rc1}/langchain/agents/structured_output.py +29 -25
  18. langchain-1.0.0rc1/langchain/chat_models/__init__.py +13 -0
  19. {langchain-1.0.0a14 → langchain-1.0.0rc1}/langchain/chat_models/base.py +98 -108
  20. langchain-1.0.0rc1/langchain/embeddings/__init__.py +16 -0
  21. {langchain-1.0.0a14 → langchain-1.0.0rc1}/langchain/embeddings/base.py +1 -1
  22. {langchain-1.0.0a14 → langchain-1.0.0rc1}/langchain/messages/__init__.py +10 -1
  23. {langchain-1.0.0a14 → langchain-1.0.0rc1}/langchain/tools/__init__.py +9 -3
  24. {langchain-1.0.0a14 → langchain-1.0.0rc1}/langchain/tools/tool_node.py +288 -94
  25. {langchain-1.0.0a14 → langchain-1.0.0rc1}/pyproject.toml +1 -1
  26. {langchain-1.0.0a14 → langchain-1.0.0rc1}/tests/unit_tests/agents/__snapshots__/test_middleware_agent.ambr +0 -7
  27. langchain-1.0.0rc1/tests/unit_tests/agents/__snapshots__/test_return_direct_graph.ambr +69 -0
  28. langchain-1.0.0rc1/tests/unit_tests/agents/middleware/test_override_methods.py +381 -0
  29. langchain-1.0.0rc1/tests/unit_tests/agents/middleware/test_tool_retry.py +895 -0
  30. {langchain-1.0.0a14 → langchain-1.0.0rc1}/tests/unit_tests/agents/middleware/test_wrap_tool_call_decorator.py +1 -2
  31. {langchain-1.0.0a14 → langchain-1.0.0rc1}/tests/unit_tests/agents/test_context_editing_middleware.py +166 -0
  32. {langchain-1.0.0a14 → langchain-1.0.0rc1}/tests/unit_tests/agents/test_handler_composition.py +3 -3
  33. langchain-1.0.0rc1/tests/unit_tests/agents/test_injected_runtime_create_agent.py +591 -0
  34. {langchain-1.0.0a14 → langchain-1.0.0rc1}/tests/unit_tests/agents/test_middleware_agent.py +38 -146
  35. langchain-1.0.0rc1/tests/unit_tests/agents/test_model_fallback_middleware.py +215 -0
  36. langchain-1.0.0rc1/tests/unit_tests/agents/test_return_direct_graph.py +73 -0
  37. langchain-1.0.0rc1/tests/unit_tests/agents/test_state_schema.py +188 -0
  38. langchain-1.0.0rc1/tests/unit_tests/agents/test_todo_middleware.py +172 -0
  39. {langchain-1.0.0a14 → langchain-1.0.0rc1}/tests/unit_tests/agents/test_tool_call_limit.py +29 -9
  40. {langchain-1.0.0a14 → langchain-1.0.0rc1}/tests/unit_tests/agents/test_tool_node.py +118 -48
  41. langchain-1.0.0rc1/tests/unit_tests/agents/test_tool_node_interceptor_unregistered.py +571 -0
  42. {langchain-1.0.0a14 → langchain-1.0.0rc1}/tests/unit_tests/tools/test_imports.py +1 -1
  43. {langchain-1.0.0a14 → langchain-1.0.0rc1}/tests/unit_tests/tools/test_on_tool_call.py +74 -32
  44. {langchain-1.0.0a14 → langchain-1.0.0rc1}/uv.lock +4 -3
  45. langchain-1.0.0a14/PKG-INFO +0 -125
  46. langchain-1.0.0a14/README.md +0 -79
  47. langchain-1.0.0a14/langchain/agents/__init__.py +0 -9
  48. langchain-1.0.0a14/langchain/agents/middleware/prompt_caching.py +0 -89
  49. langchain-1.0.0a14/langchain/chat_models/__init__.py +0 -7
  50. langchain-1.0.0a14/langchain/embeddings/__init__.py +0 -10
  51. {langchain-1.0.0a14 → langchain-1.0.0rc1}/.gitignore +0 -0
  52. {langchain-1.0.0a14 → langchain-1.0.0rc1}/LICENSE +0 -0
  53. {langchain-1.0.0a14 → langchain-1.0.0rc1}/Makefile +0 -0
  54. {langchain-1.0.0a14 → langchain-1.0.0rc1}/extended_testing_deps.txt +0 -0
  55. {langchain-1.0.0a14 → langchain-1.0.0rc1}/langchain/agents/middleware/summarization.py +0 -0
  56. {langchain-1.0.0a14 → langchain-1.0.0rc1}/langchain/agents/middleware/tool_selection.py +0 -0
  57. {langchain-1.0.0a14 → langchain-1.0.0rc1}/langchain/py.typed +0 -0
  58. {langchain-1.0.0a14 → langchain-1.0.0rc1}/langchain/rate_limiters/__init__.py +0 -0
  59. {langchain-1.0.0a14 → langchain-1.0.0rc1}/scripts/check_imports.py +0 -0
  60. {langchain-1.0.0a14 → langchain-1.0.0rc1}/tests/__init__.py +0 -0
  61. {langchain-1.0.0a14 → langchain-1.0.0rc1}/tests/integration_tests/__init__.py +0 -0
  62. {langchain-1.0.0a14 → langchain-1.0.0rc1}/tests/integration_tests/agents/__init__.py +0 -0
  63. {langchain-1.0.0a14 → langchain-1.0.0rc1}/tests/integration_tests/agents/test_response_format.py +0 -0
  64. {langchain-1.0.0a14 → langchain-1.0.0rc1}/tests/integration_tests/cache/__init__.py +0 -0
  65. {langchain-1.0.0a14 → langchain-1.0.0rc1}/tests/integration_tests/cache/fake_embeddings.py +0 -0
  66. {langchain-1.0.0a14 → langchain-1.0.0rc1}/tests/integration_tests/chat_models/__init__.py +0 -0
  67. {langchain-1.0.0a14 → langchain-1.0.0rc1}/tests/integration_tests/chat_models/test_base.py +0 -0
  68. {langchain-1.0.0a14 → langchain-1.0.0rc1}/tests/integration_tests/conftest.py +0 -0
  69. {langchain-1.0.0a14 → langchain-1.0.0rc1}/tests/integration_tests/embeddings/__init__.py +0 -0
  70. {langchain-1.0.0a14 → langchain-1.0.0rc1}/tests/integration_tests/embeddings/test_base.py +0 -0
  71. {langchain-1.0.0a14 → langchain-1.0.0rc1}/tests/integration_tests/test_compile.py +0 -0
  72. {langchain-1.0.0a14 → langchain-1.0.0rc1}/tests/unit_tests/__init__.py +0 -0
  73. {langchain-1.0.0a14 → langchain-1.0.0rc1}/tests/unit_tests/agents/__init__.py +0 -0
  74. {langchain-1.0.0a14 → langchain-1.0.0rc1}/tests/unit_tests/agents/__snapshots__/test_middleware_decorators.ambr +0 -0
  75. {langchain-1.0.0a14 → langchain-1.0.0rc1}/tests/unit_tests/agents/any_str.py +0 -0
  76. {langchain-1.0.0a14 → langchain-1.0.0rc1}/tests/unit_tests/agents/compose-postgres.yml +0 -0
  77. {langchain-1.0.0a14 → langchain-1.0.0rc1}/tests/unit_tests/agents/compose-redis.yml +0 -0
  78. {langchain-1.0.0a14 → langchain-1.0.0rc1}/tests/unit_tests/agents/conftest.py +0 -0
  79. {langchain-1.0.0a14 → langchain-1.0.0rc1}/tests/unit_tests/agents/conftest_checkpointer.py +0 -0
  80. {langchain-1.0.0a14 → langchain-1.0.0rc1}/tests/unit_tests/agents/conftest_store.py +0 -0
  81. {langchain-1.0.0a14 → langchain-1.0.0rc1}/tests/unit_tests/agents/memory_assert.py +0 -0
  82. {langchain-1.0.0a14 → langchain-1.0.0rc1}/tests/unit_tests/agents/messages.py +0 -0
  83. {langchain-1.0.0a14 → langchain-1.0.0rc1}/tests/unit_tests/agents/middleware/__init__.py +0 -0
  84. {langchain-1.0.0a14 → langchain-1.0.0rc1}/tests/unit_tests/agents/middleware/test_before_after_agent.py +0 -0
  85. {langchain-1.0.0a14 → langchain-1.0.0rc1}/tests/unit_tests/agents/middleware/test_llm_tool_selection.py +0 -0
  86. {langchain-1.0.0a14 → langchain-1.0.0rc1}/tests/unit_tests/agents/middleware/test_tool_emulator.py +0 -0
  87. {langchain-1.0.0a14 → langchain-1.0.0rc1}/tests/unit_tests/agents/middleware/test_wrap_model_call_decorator.py +0 -0
  88. {langchain-1.0.0a14 → langchain-1.0.0rc1}/tests/unit_tests/agents/middleware/test_wrap_model_call_middleware.py +0 -0
  89. {langchain-1.0.0a14 → langchain-1.0.0rc1}/tests/unit_tests/agents/model.py +0 -0
  90. {langchain-1.0.0a14 → langchain-1.0.0rc1}/tests/unit_tests/agents/specifications/responses.json +0 -0
  91. {langchain-1.0.0a14 → langchain-1.0.0rc1}/tests/unit_tests/agents/specifications/return_direct.json +0 -0
  92. {langchain-1.0.0a14 → langchain-1.0.0rc1}/tests/unit_tests/agents/test_middleware_decorators.py +0 -0
  93. {langchain-1.0.0a14 → langchain-1.0.0rc1}/tests/unit_tests/agents/test_middleware_tools.py +0 -0
  94. {langchain-1.0.0a14 → langchain-1.0.0rc1}/tests/unit_tests/agents/test_on_tool_call_middleware.py +0 -0
  95. {langchain-1.0.0a14 → langchain-1.0.0rc1}/tests/unit_tests/agents/test_pii_middleware.py +0 -0
  96. {langchain-1.0.0a14 → langchain-1.0.0rc1}/tests/unit_tests/agents/test_react_agent.py +0 -0
  97. {langchain-1.0.0a14 → langchain-1.0.0rc1}/tests/unit_tests/agents/test_response_format.py +0 -0
  98. {langchain-1.0.0a14 → langchain-1.0.0rc1}/tests/unit_tests/agents/test_responses.py +0 -0
  99. {langchain-1.0.0a14 → langchain-1.0.0rc1}/tests/unit_tests/agents/test_responses_spec.py +0 -0
  100. {langchain-1.0.0a14 → langchain-1.0.0rc1}/tests/unit_tests/agents/test_return_direct_spec.py +0 -0
  101. {langchain-1.0.0a14 → langchain-1.0.0rc1}/tests/unit_tests/agents/test_sync_async_tool_wrapper_composition.py +0 -0
  102. {langchain-1.0.0a14 → langchain-1.0.0rc1}/tests/unit_tests/agents/utils.py +0 -0
  103. {langchain-1.0.0a14 → langchain-1.0.0rc1}/tests/unit_tests/chat_models/__init__.py +0 -0
  104. {langchain-1.0.0a14 → langchain-1.0.0rc1}/tests/unit_tests/chat_models/test_chat_models.py +0 -0
  105. {langchain-1.0.0a14 → langchain-1.0.0rc1}/tests/unit_tests/conftest.py +0 -0
  106. {langchain-1.0.0a14 → langchain-1.0.0rc1}/tests/unit_tests/embeddings/__init__.py +0 -0
  107. {langchain-1.0.0a14 → langchain-1.0.0rc1}/tests/unit_tests/embeddings/test_base.py +0 -0
  108. {langchain-1.0.0a14 → langchain-1.0.0rc1}/tests/unit_tests/embeddings/test_imports.py +0 -0
  109. {langchain-1.0.0a14 → langchain-1.0.0rc1}/tests/unit_tests/stubs.py +0 -0
  110. {langchain-1.0.0a14 → langchain-1.0.0rc1}/tests/unit_tests/test_dependencies.py +0 -0
  111. {langchain-1.0.0a14 → langchain-1.0.0rc1}/tests/unit_tests/test_imports.py +0 -0
  112. {langchain-1.0.0a14 → langchain-1.0.0rc1}/tests/unit_tests/test_pytest_config.py +0 -0
  113. {langchain-1.0.0a14 → langchain-1.0.0rc1}/tests/unit_tests/tools/__init__.py +0 -0
@@ -0,0 +1,85 @@
1
+ Metadata-Version: 2.4
2
+ Name: langchain
3
+ Version: 1.0.0rc1
4
+ Summary: Building applications with LLMs through composability
5
+ Project-URL: homepage, https://docs.langchain.com/
6
+ Project-URL: repository, https://github.com/langchain-ai/langchain/tree/master/libs/langchain
7
+ Project-URL: changelog, https://github.com/langchain-ai/langchain/releases?q=tag%3A%22langchain%3D%3D1%22
8
+ Project-URL: twitter, https://x.com/LangChainAI
9
+ Project-URL: slack, https://www.langchain.com/join-community
10
+ Project-URL: reddit, https://www.reddit.com/r/LangChain/
11
+ License: MIT
12
+ License-File: LICENSE
13
+ Requires-Python: <4.0.0,>=3.10.0
14
+ Requires-Dist: langchain-core<2.0.0,>=1.0.0a7
15
+ Requires-Dist: langgraph<2.0.0,>=1.0.0a4
16
+ Requires-Dist: pydantic<3.0.0,>=2.7.4
17
+ Provides-Extra: anthropic
18
+ Requires-Dist: langchain-anthropic; extra == 'anthropic'
19
+ Provides-Extra: aws
20
+ Requires-Dist: langchain-aws; extra == 'aws'
21
+ Provides-Extra: community
22
+ Requires-Dist: langchain-community; extra == 'community'
23
+ Provides-Extra: deepseek
24
+ Requires-Dist: langchain-deepseek; extra == 'deepseek'
25
+ Provides-Extra: fireworks
26
+ Requires-Dist: langchain-fireworks; extra == 'fireworks'
27
+ Provides-Extra: google-genai
28
+ Requires-Dist: langchain-google-genai; extra == 'google-genai'
29
+ Provides-Extra: google-vertexai
30
+ Requires-Dist: langchain-google-vertexai; extra == 'google-vertexai'
31
+ Provides-Extra: groq
32
+ Requires-Dist: langchain-groq; extra == 'groq'
33
+ Provides-Extra: mistralai
34
+ Requires-Dist: langchain-mistralai; extra == 'mistralai'
35
+ Provides-Extra: ollama
36
+ Requires-Dist: langchain-ollama; extra == 'ollama'
37
+ Provides-Extra: openai
38
+ Requires-Dist: langchain-openai; extra == 'openai'
39
+ Provides-Extra: perplexity
40
+ Requires-Dist: langchain-perplexity; extra == 'perplexity'
41
+ Provides-Extra: together
42
+ Requires-Dist: langchain-together; extra == 'together'
43
+ Provides-Extra: xai
44
+ Requires-Dist: langchain-xai; extra == 'xai'
45
+ Description-Content-Type: text/markdown
46
+
47
+ # 🦜️🔗 LangChain
48
+
49
+ [![PyPI - Version](https://img.shields.io/pypi/v/langchain?label=%20)](https://pypi.org/project/langchain/#history)
50
+ [![PyPI - License](https://img.shields.io/pypi/l/langchain)](https://opensource.org/licenses/MIT)
51
+ [![PyPI - Downloads](https://img.shields.io/pepy/dt/langchain)](https://pypistats.org/packages/langchain)
52
+ [![Twitter](https://img.shields.io/twitter/url/https/twitter.com/langchainai.svg?style=social&label=Follow%20%40LangChainAI)](https://twitter.com/langchainai)
53
+
54
+ Looking for the JS/TS version? Check out [LangChain.js](https://github.com/langchain-ai/langchainjs).
55
+
56
+ To help you ship LangChain apps to production faster, check out [LangSmith](https://smith.langchain.com).
57
+ [LangSmith](https://smith.langchain.com) is a unified developer platform for building, testing, and monitoring LLM applications.
58
+
59
+ ## Quick Install
60
+
61
+ ```bash
62
+ pip install langchain
63
+ ```
64
+
65
+ ## 🤔 What is this?
66
+
67
+ LangChain is the easiest way to start building agents and applications powered by LLMs. With under 10 lines of code, you can connect to OpenAI, Anthropic, Google, and [more](https://docs.langchain.com/oss/python/integrations/providers/overview). LangChain provides a pre-built agent architecture and model integrations to help you get started quickly and seamlessly incorporate LLMs into your agents and applications.
68
+
69
+ We recommend you use LangChain if you want to quickly build agents and autonomous applications. Use [LangGraph](https://docs.langchain.com/oss/python/langgraph/overview), our low-level agent orchestration framework and runtime, when you have more advanced needs that require a combination of deterministic and agentic workflows, heavy customization, and carefully controlled latency.
70
+
71
+ LangChain [agents](https://docs.langchain.com/oss/python/langchain/agents) are built on top of LangGraph in order to provide durable execution, streaming, human-in-the-loop, persistence, and more. (You do not need to know LangGraph for basic LangChain agent usage.)
72
+
73
+ ## 📖 Documentation
74
+
75
+ For full documentation, see the [API reference](https://reference.langchain.com/python/langchain_classic).
76
+
77
+ ## 📕 Releases & Versioning
78
+
79
+ See our [Releases](https://docs.langchain.com/oss/python/release-policy) and [Versioning](https://docs.langchain.com/oss/python/versioning) policies.
80
+
81
+ ## 💁 Contributing
82
+
83
+ As an open-source project in a rapidly developing field, we are extremely open to contributions, whether it be in the form of a new feature, improved infrastructure, or better documentation.
84
+
85
+ For detailed information on how to contribute, see the [Contributing Guide](https://docs.langchain.com/oss/python/contributing/overview).
@@ -0,0 +1,39 @@
1
+ # 🦜️🔗 LangChain
2
+
3
+ [![PyPI - Version](https://img.shields.io/pypi/v/langchain?label=%20)](https://pypi.org/project/langchain/#history)
4
+ [![PyPI - License](https://img.shields.io/pypi/l/langchain)](https://opensource.org/licenses/MIT)
5
+ [![PyPI - Downloads](https://img.shields.io/pepy/dt/langchain)](https://pypistats.org/packages/langchain)
6
+ [![Twitter](https://img.shields.io/twitter/url/https/twitter.com/langchainai.svg?style=social&label=Follow%20%40LangChainAI)](https://twitter.com/langchainai)
7
+
8
+ Looking for the JS/TS version? Check out [LangChain.js](https://github.com/langchain-ai/langchainjs).
9
+
10
+ To help you ship LangChain apps to production faster, check out [LangSmith](https://smith.langchain.com).
11
+ [LangSmith](https://smith.langchain.com) is a unified developer platform for building, testing, and monitoring LLM applications.
12
+
13
+ ## Quick Install
14
+
15
+ ```bash
16
+ pip install langchain
17
+ ```
18
+
19
+ ## 🤔 What is this?
20
+
21
+ LangChain is the easiest way to start building agents and applications powered by LLMs. With under 10 lines of code, you can connect to OpenAI, Anthropic, Google, and [more](https://docs.langchain.com/oss/python/integrations/providers/overview). LangChain provides a pre-built agent architecture and model integrations to help you get started quickly and seamlessly incorporate LLMs into your agents and applications.
22
+
23
+ We recommend you use LangChain if you want to quickly build agents and autonomous applications. Use [LangGraph](https://docs.langchain.com/oss/python/langgraph/overview), our low-level agent orchestration framework and runtime, when you have more advanced needs that require a combination of deterministic and agentic workflows, heavy customization, and carefully controlled latency.
24
+
25
+ LangChain [agents](https://docs.langchain.com/oss/python/langchain/agents) are built on top of LangGraph in order to provide durable execution, streaming, human-in-the-loop, persistence, and more. (You do not need to know LangGraph for basic LangChain agent usage.)
26
+
27
+ ## 📖 Documentation
28
+
29
+ For full documentation, see the [API reference](https://reference.langchain.com/python/langchain_classic).
30
+
31
+ ## 📕 Releases & Versioning
32
+
33
+ See our [Releases](https://docs.langchain.com/oss/python/release-policy) and [Versioning](https://docs.langchain.com/oss/python/versioning) policies.
34
+
35
+ ## 💁 Contributing
36
+
37
+ As an open-source project in a rapidly developing field, we are extremely open to contributions, whether it be in the form of a new feature, improved infrastructure, or better documentation.
38
+
39
+ For detailed information on how to contribute, see the [Contributing Guide](https://docs.langchain.com/oss/python/contributing/overview).
@@ -1,3 +1,3 @@
1
1
  """Main entrypoint into LangChain."""
2
2
 
3
- __version__ = "1.0.0a13"
3
+ __version__ = "1.0.0rc1"
@@ -0,0 +1,15 @@
1
+ """Entrypoint to building [Agents](https://docs.langchain.com/oss/python/langchain/agents) with LangChain.
2
+
3
+ !!! warning "Reference docs"
4
+ This page contains **reference documentation** for Agents. See
5
+ [the docs](https://docs.langchain.com/oss/python/langchain/agents) for conceptual
6
+ guides, tutorials, and examples on using Agents.
7
+ """ # noqa: E501
8
+
9
+ from langchain.agents.factory import create_agent
10
+ from langchain.agents.middleware.types import AgentState
11
+
12
+ __all__ = [
13
+ "AgentState",
14
+ "create_agent",
15
+ ]
@@ -92,7 +92,7 @@ def _chain_model_call_handlers(
92
92
  handlers: List of handlers. First handler wraps all others.
93
93
 
94
94
  Returns:
95
- Composed handler, or None if handlers empty.
95
+ Composed handler, or `None` if handlers empty.
96
96
 
97
97
  Example:
98
98
  ```python
@@ -195,13 +195,13 @@ def _chain_async_model_call_handlers(
195
195
  ]
196
196
  | None
197
197
  ):
198
- """Compose multiple async wrap_model_call handlers into single middleware stack.
198
+ """Compose multiple async `wrap_model_call` handlers into single middleware stack.
199
199
 
200
200
  Args:
201
201
  handlers: List of async handlers. First handler wraps all others.
202
202
 
203
203
  Returns:
204
- Composed async handler, or None if handlers empty.
204
+ Composed async handler, or `None` if handlers empty.
205
205
  """
206
206
  if not handlers:
207
207
  return None
@@ -267,12 +267,13 @@ def _chain_async_model_call_handlers(
267
267
 
268
268
 
269
269
  def _resolve_schema(schemas: set[type], schema_name: str, omit_flag: str | None = None) -> type:
270
- """Resolve schema by merging schemas and optionally respecting OmitFromSchema annotations.
270
+ """Resolve schema by merging schemas and optionally respecting `OmitFromSchema` annotations.
271
271
 
272
272
  Args:
273
273
  schemas: List of schema types to merge
274
- schema_name: Name for the generated TypedDict
275
- omit_flag: If specified, omit fields with this flag set ('input' or 'output')
274
+ schema_name: Name for the generated `TypedDict`
275
+ omit_flag: If specified, omit fields with this flag set (`'input'` or
276
+ `'output'`)
276
277
  """
277
278
  all_annotations = {}
278
279
 
@@ -312,11 +313,11 @@ def _extract_metadata(type_: type) -> list:
312
313
 
313
314
 
314
315
  def _get_can_jump_to(middleware: AgentMiddleware[Any, Any], hook_name: str) -> list[JumpTo]:
315
- """Get the can_jump_to list from either sync or async hook methods.
316
+ """Get the `can_jump_to` list from either sync or async hook methods.
316
317
 
317
318
  Args:
318
319
  middleware: The middleware instance to inspect.
319
- hook_name: The name of the hook ('before_model' or 'after_model').
320
+ hook_name: The name of the hook (`'before_model'` or `'after_model'`).
320
321
 
321
322
  Returns:
322
323
  List of jump destinations, or empty list if not configured.
@@ -350,7 +351,7 @@ def _supports_provider_strategy(model: str | BaseChatModel) -> bool:
350
351
  """Check if a model supports provider-specific structured output.
351
352
 
352
353
  Args:
353
- model: Model name string or BaseChatModel instance.
354
+ model: Model name string or `BaseChatModel` instance.
354
355
 
355
356
  Returns:
356
357
  `True` if the model supports provider-specific structured output, `False` otherwise.
@@ -373,7 +374,7 @@ def _handle_structured_output_error(
373
374
  exception: Exception,
374
375
  response_format: ResponseFormat,
375
376
  ) -> tuple[bool, str]:
376
- """Handle structured output error. Returns (should_retry, retry_tool_message)."""
377
+ """Handle structured output error. Returns `(should_retry, retry_tool_message)`."""
377
378
  if not isinstance(response_format, ToolStrategy):
378
379
  return False, ""
379
380
 
@@ -408,7 +409,7 @@ def _chain_tool_call_wrappers(
408
409
  wrappers: Wrappers in middleware order.
409
410
 
410
411
  Returns:
411
- Composed wrapper, or None if empty.
412
+ Composed wrapper, or `None` if empty.
412
413
 
413
414
  Example:
414
415
  wrapper = _chain_tool_call_wrappers([auth, cache, retry])
@@ -465,7 +466,7 @@ def _chain_async_tool_call_wrappers(
465
466
  wrappers: Async wrappers in middleware order.
466
467
 
467
468
  Returns:
468
- Composed async wrapper, or None if empty.
469
+ Composed async wrapper, or `None` if empty.
469
470
  """
470
471
  if not wrappers:
471
472
  return None
@@ -516,6 +517,7 @@ def create_agent( # noqa: PLR0915
516
517
  system_prompt: str | None = None,
517
518
  middleware: Sequence[AgentMiddleware[AgentState[ResponseT], ContextT]] = (),
518
519
  response_format: ResponseFormat[ResponseT] | type[ResponseT] | None = None,
520
+ state_schema: type[AgentState[ResponseT]] | None = None,
519
521
  context_schema: type[ContextT] | None = None,
520
522
  checkpointer: Checkpointer | None = None,
521
523
  store: BaseStore | None = None,
@@ -534,19 +536,27 @@ def create_agent( # noqa: PLR0915
534
536
 
535
537
  Args:
536
538
  model: The language model for the agent. Can be a string identifier
537
- (e.g., `"openai:gpt-4"`), a chat model instance (e.g., `ChatOpenAI()`).
538
- tools: A list of tools, dicts, or callables. If `None` or an empty list,
539
+ (e.g., `"openai:gpt-4"`) or a chat model instance (e.g., `ChatOpenAI()`).
540
+ For a full list of supported model strings, see
541
+ [`init_chat_model`][langchain.chat_models.init_chat_model(model_provider)].
542
+ tools: A list of tools, `dicts`, or `Callable`. If `None` or an empty list,
539
543
  the agent will consist of a model node without a tool calling loop.
540
- system_prompt: An optional system prompt for the LLM. If provided as a string,
541
- it will be converted to a SystemMessage and added to the beginning
542
- of the message list.
544
+ system_prompt: An optional system prompt for the LLM. Prompts are converted to a
545
+ `SystemMessage` and added to the beginning of the message list.
543
546
  middleware: A sequence of middleware instances to apply to the agent.
544
547
  Middleware can intercept and modify agent behavior at various stages.
545
548
  response_format: An optional configuration for structured responses.
546
- Can be a ToolStrategy, ProviderStrategy, or a Pydantic model class.
549
+ Can be a `ToolStrategy`, `ProviderStrategy`, or a Pydantic model class.
547
550
  If provided, the agent will handle structured output during the
548
551
  conversation flow. Raw schemas will be wrapped in an appropriate strategy
549
552
  based on model capabilities.
553
+ state_schema: An optional `TypedDict` schema that extends `AgentState`.
554
+ When provided, this schema is used instead of `AgentState` as the base
555
+ schema for merging with middleware state schemas. This allows users to
556
+ add custom state fields without needing to create custom middleware.
557
+ Generally, it's recommended to use state_schema extensions via middleware
558
+ to keep relevant extensions scoped to corresponding hooks / tools.
559
+ The schema must be a subclass of `AgentState[ResponseT]`.
550
560
  context_schema: An optional schema for runtime context.
551
561
  checkpointer: An optional checkpoint saver object. This is used for persisting
552
562
  the state of the graph (e.g., as chat memory) for a single thread
@@ -554,24 +564,27 @@ def create_agent( # noqa: PLR0915
554
564
  store: An optional store object. This is used for persisting data
555
565
  across multiple threads (e.g., multiple conversations / users).
556
566
  interrupt_before: An optional list of node names to interrupt before.
557
- This is useful if you want to add a user confirmation or other interrupt
567
+ Useful if you want to add a user confirmation or other interrupt
558
568
  before taking an action.
559
569
  interrupt_after: An optional list of node names to interrupt after.
560
- This is useful if you want to return directly or run additional processing
570
+ Useful if you want to return directly or run additional processing
561
571
  on an output.
562
- debug: A flag indicating whether to enable debug mode.
563
- name: An optional name for the CompiledStateGraph.
572
+ debug: Whether to enable verbose logging for graph execution. When enabled,
573
+ prints detailed information about each node execution, state updates,
574
+ and transitions during agent runtime. Useful for debugging middleware
575
+ behavior and understanding agent execution flow.
576
+ name: An optional name for the `CompiledStateGraph`.
564
577
  This name will be automatically used when adding the agent graph to
565
578
  another graph as a subgraph node - particularly useful for building
566
579
  multi-agent systems.
567
- cache: An optional BaseCache instance to enable caching of graph execution.
580
+ cache: An optional `BaseCache` instance to enable caching of graph execution.
568
581
 
569
582
  Returns:
570
- A compiled StateGraph that can be used for chat interactions.
583
+ A compiled `StateGraph` that can be used for chat interactions.
571
584
 
572
585
  The agent node calls the language model with the messages list (after applying
573
- the system prompt). If the resulting AIMessage contains `tool_calls`, the graph will
574
- then call the tools. The tools node executes the tools and adds the responses
586
+ the system prompt). If the resulting `AIMessage` contains `tool_calls`, the graph
587
+ will then call the tools. The tools node executes the tools and adds the responses
575
588
  to the messages list as `ToolMessage` objects. The agent node then calls the
576
589
  language model again. The process repeats until no more `tool_calls` are
577
590
  present in the response. The agent then returns the full list of messages.
@@ -587,7 +600,7 @@ def create_agent( # noqa: PLR0915
587
600
 
588
601
 
589
602
  graph = create_agent(
590
- model="anthropic:claude-3-7-sonnet-latest",
603
+ model="anthropic:claude-sonnet-4-5-20250929",
591
604
  tools=[check_weather],
592
605
  system_prompt="You are a helpful assistant",
593
606
  )
@@ -757,9 +770,11 @@ def create_agent( # noqa: PLR0915
757
770
  awrap_model_call_handler = _chain_async_model_call_handlers(async_handlers)
758
771
 
759
772
  state_schemas = {m.state_schema for m in middleware}
760
- state_schemas.add(AgentState)
773
+ # Use provided state_schema if available, otherwise use base AgentState
774
+ base_state = state_schema if state_schema is not None else AgentState
775
+ state_schemas.add(base_state)
761
776
 
762
- state_schema = _resolve_schema(state_schemas, "StateSchema", None)
777
+ resolved_state_schema = _resolve_schema(state_schemas, "StateSchema", None)
763
778
  input_schema = _resolve_schema(state_schemas, "InputSchema", "input")
764
779
  output_schema = _resolve_schema(state_schemas, "OutputSchema", "output")
765
780
 
@@ -767,7 +782,7 @@ def create_agent( # noqa: PLR0915
767
782
  graph: StateGraph[
768
783
  AgentState[ResponseT], ContextT, PublicAgentState[ResponseT], PublicAgentState[ResponseT]
769
784
  ] = StateGraph(
770
- state_schema=state_schema,
785
+ state_schema=resolved_state_schema,
771
786
  input_schema=input_schema,
772
787
  output_schema=output_schema,
773
788
  context_schema=context_schema,
@@ -879,8 +894,9 @@ def create_agent( # noqa: PLR0915
879
894
  request: The model request containing model, tools, and response format.
880
895
 
881
896
  Returns:
882
- Tuple of (bound_model, effective_response_format) where `effective_response_format`
883
- is the actual strategy used (may differ from initial if auto-detected).
897
+ Tuple of `(bound_model, effective_response_format)` where
898
+ `effective_response_format` is the actual strategy used (may differ from
899
+ initial if auto-detected).
884
900
  """
885
901
  # Validate ONLY client-side tools that need to exist in tool_node
886
902
  # Build map of available client-side tools from the ToolNode
@@ -986,7 +1002,7 @@ def create_agent( # noqa: PLR0915
986
1002
  def _execute_model_sync(request: ModelRequest) -> ModelResponse:
987
1003
  """Execute model and return response.
988
1004
 
989
- This is the core model execution logic wrapped by wrap_model_call handlers.
1005
+ This is the core model execution logic wrapped by `wrap_model_call` handlers.
990
1006
  Raises any exceptions that occur during model invocation.
991
1007
  """
992
1008
  # Get the bound model (with auto-detection if needed)
@@ -1032,16 +1048,14 @@ def create_agent( # noqa: PLR0915
1032
1048
  if response.structured_response is not None:
1033
1049
  state_updates["structured_response"] = response.structured_response
1034
1050
 
1035
- return {
1036
- "thread_model_call_count": state.get("thread_model_call_count", 0) + 1,
1037
- "run_model_call_count": state.get("run_model_call_count", 0) + 1,
1038
- **state_updates,
1039
- }
1051
+ return state_updates
1040
1052
 
1041
1053
  async def _execute_model_async(request: ModelRequest) -> ModelResponse:
1042
1054
  """Execute model asynchronously and return response.
1043
1055
 
1044
- This is the core async model execution logic wrapped by wrap_model_call handlers.
1056
+ This is the core async model execution logic wrapped by `wrap_model_call`
1057
+ handlers.
1058
+
1045
1059
  Raises any exceptions that occur during model invocation.
1046
1060
  """
1047
1061
  # Get the bound model (with auto-detection if needed)
@@ -1087,11 +1101,7 @@ def create_agent( # noqa: PLR0915
1087
1101
  if response.structured_response is not None:
1088
1102
  state_updates["structured_response"] = response.structured_response
1089
1103
 
1090
- return {
1091
- "thread_model_call_count": state.get("thread_model_call_count", 0) + 1,
1092
- "run_model_call_count": state.get("run_model_call_count", 0) + 1,
1093
- **state_updates,
1094
- }
1104
+ return state_updates
1095
1105
 
1096
1106
  # Use sync or async based on model capabilities
1097
1107
  graph.add_node("model", RunnableCallable(model_node, amodel_node, trace=False))
@@ -1119,7 +1129,9 @@ def create_agent( # noqa: PLR0915
1119
1129
  else None
1120
1130
  )
1121
1131
  before_agent_node = RunnableCallable(sync_before_agent, async_before_agent, trace=False)
1122
- graph.add_node(f"{m.name}.before_agent", before_agent_node, input_schema=state_schema)
1132
+ graph.add_node(
1133
+ f"{m.name}.before_agent", before_agent_node, input_schema=resolved_state_schema
1134
+ )
1123
1135
 
1124
1136
  if (
1125
1137
  m.__class__.before_model is not AgentMiddleware.before_model
@@ -1138,7 +1150,9 @@ def create_agent( # noqa: PLR0915
1138
1150
  else None
1139
1151
  )
1140
1152
  before_node = RunnableCallable(sync_before, async_before, trace=False)
1141
- graph.add_node(f"{m.name}.before_model", before_node, input_schema=state_schema)
1153
+ graph.add_node(
1154
+ f"{m.name}.before_model", before_node, input_schema=resolved_state_schema
1155
+ )
1142
1156
 
1143
1157
  if (
1144
1158
  m.__class__.after_model is not AgentMiddleware.after_model
@@ -1157,7 +1171,7 @@ def create_agent( # noqa: PLR0915
1157
1171
  else None
1158
1172
  )
1159
1173
  after_node = RunnableCallable(sync_after, async_after, trace=False)
1160
- graph.add_node(f"{m.name}.after_model", after_node, input_schema=state_schema)
1174
+ graph.add_node(f"{m.name}.after_model", after_node, input_schema=resolved_state_schema)
1161
1175
 
1162
1176
  if (
1163
1177
  m.__class__.after_agent is not AgentMiddleware.after_agent
@@ -1176,7 +1190,9 @@ def create_agent( # noqa: PLR0915
1176
1190
  else None
1177
1191
  )
1178
1192
  after_agent_node = RunnableCallable(sync_after_agent, async_after_agent, trace=False)
1179
- graph.add_node(f"{m.name}.after_agent", after_agent_node, input_schema=state_schema)
1193
+ graph.add_node(
1194
+ f"{m.name}.after_agent", after_agent_node, input_schema=resolved_state_schema
1195
+ )
1180
1196
 
1181
1197
  # Determine the entry node (runs once at start): before_agent -> before_model -> model
1182
1198
  if middleware_w_before_agent:
@@ -1209,6 +1225,15 @@ def create_agent( # noqa: PLR0915
1209
1225
  graph.add_edge(START, entry_node)
1210
1226
  # add conditional edges only if tools exist
1211
1227
  if tool_node is not None:
1228
+ # Only include exit_node in destinations if any tool has return_direct=True
1229
+ # or if there are structured output tools
1230
+ tools_to_model_destinations = [loop_entry_node]
1231
+ if (
1232
+ any(tool.return_direct for tool in tool_node.tools_by_name.values())
1233
+ or structured_output_tools
1234
+ ):
1235
+ tools_to_model_destinations.append(exit_node)
1236
+
1212
1237
  graph.add_conditional_edges(
1213
1238
  "tools",
1214
1239
  _make_tools_to_model_edge(
@@ -1217,7 +1242,7 @@ def create_agent( # noqa: PLR0915
1217
1242
  structured_output_tools=structured_output_tools,
1218
1243
  end_destination=exit_node,
1219
1244
  ),
1220
- [loop_entry_node, exit_node],
1245
+ tools_to_model_destinations,
1221
1246
  )
1222
1247
 
1223
1248
  # base destinations are tools and exit_node
@@ -1,4 +1,10 @@
1
- """Middleware plugins for agents."""
1
+ """Entrypoint to using [Middleware](https://docs.langchain.com/oss/python/langchain/middleware) plugins with [Agents](https://docs.langchain.com/oss/python/langchain/agents).
2
+
3
+ !!! warning "Reference docs"
4
+ This page contains **reference documentation** for Middleware. See
5
+ [the docs](https://docs.langchain.com/oss/python/langchain/middleware) for conceptual
6
+ guides, tutorials, and examples on using Middleware.
7
+ """ # noqa: E501
2
8
 
3
9
  from .context_editing import (
4
10
  ClearToolUsesEdit,
@@ -11,16 +17,17 @@ from .human_in_the_loop import (
11
17
  from .model_call_limit import ModelCallLimitMiddleware
12
18
  from .model_fallback import ModelFallbackMiddleware
13
19
  from .pii import PIIDetectionError, PIIMiddleware
14
- from .planning import PlanningMiddleware
15
- from .prompt_caching import AnthropicPromptCachingMiddleware
16
20
  from .summarization import SummarizationMiddleware
21
+ from .todo import TodoListMiddleware
17
22
  from .tool_call_limit import ToolCallLimitMiddleware
18
23
  from .tool_emulator import LLMToolEmulator
24
+ from .tool_retry import ToolRetryMiddleware
19
25
  from .tool_selection import LLMToolSelectorMiddleware
20
26
  from .types import (
21
27
  AgentMiddleware,
22
28
  AgentState,
23
29
  ModelRequest,
30
+ ModelResponse,
24
31
  after_agent,
25
32
  after_model,
26
33
  before_agent,
@@ -28,13 +35,12 @@ from .types import (
28
35
  dynamic_prompt,
29
36
  hook_config,
30
37
  wrap_model_call,
38
+ wrap_tool_call,
31
39
  )
32
40
 
33
41
  __all__ = [
34
42
  "AgentMiddleware",
35
43
  "AgentState",
36
- # should move to langchain-anthropic if we decide to keep it
37
- "AnthropicPromptCachingMiddleware",
38
44
  "ClearToolUsesEdit",
39
45
  "ContextEditingMiddleware",
40
46
  "HumanInTheLoopMiddleware",
@@ -44,11 +50,13 @@ __all__ = [
44
50
  "ModelCallLimitMiddleware",
45
51
  "ModelFallbackMiddleware",
46
52
  "ModelRequest",
53
+ "ModelResponse",
47
54
  "PIIDetectionError",
48
55
  "PIIMiddleware",
49
- "PlanningMiddleware",
50
56
  "SummarizationMiddleware",
57
+ "TodoListMiddleware",
51
58
  "ToolCallLimitMiddleware",
59
+ "ToolRetryMiddleware",
52
60
  "after_agent",
53
61
  "after_model",
54
62
  "before_agent",
@@ -56,4 +64,5 @@ __all__ = [
56
64
  "dynamic_prompt",
57
65
  "hook_config",
58
66
  "wrap_model_call",
67
+ "wrap_tool_call",
59
68
  ]
@@ -8,7 +8,7 @@ with any LangChain chat model.
8
8
 
9
9
  from __future__ import annotations
10
10
 
11
- from collections.abc import Callable, Iterable, Sequence
11
+ from collections.abc import Awaitable, Callable, Iterable, Sequence
12
12
  from dataclasses import dataclass
13
13
  from typing import Literal
14
14
 
@@ -198,7 +198,7 @@ class ContextEditingMiddleware(AgentMiddleware):
198
198
  edits: Iterable[ContextEdit] | None = None,
199
199
  token_count_method: Literal["approximate", "model"] = "approximate", # noqa: S107
200
200
  ) -> None:
201
- """Initialise a context editing middleware instance.
201
+ """Initializes a context editing middleware instance.
202
202
 
203
203
  Args:
204
204
  edits: Sequence of edit strategies to apply. Defaults to a single
@@ -239,6 +239,34 @@ class ContextEditingMiddleware(AgentMiddleware):
239
239
 
240
240
  return handler(request)
241
241
 
242
+ async def awrap_model_call(
243
+ self,
244
+ request: ModelRequest,
245
+ handler: Callable[[ModelRequest], Awaitable[ModelResponse]],
246
+ ) -> ModelCallResult:
247
+ """Apply context edits before invoking the model via handler (async version)."""
248
+ if not request.messages:
249
+ return await handler(request)
250
+
251
+ if self.token_count_method == "approximate": # noqa: S105
252
+
253
+ def count_tokens(messages: Sequence[BaseMessage]) -> int:
254
+ return count_tokens_approximately(messages)
255
+ else:
256
+ system_msg = (
257
+ [SystemMessage(content=request.system_prompt)] if request.system_prompt else []
258
+ )
259
+
260
+ def count_tokens(messages: Sequence[BaseMessage]) -> int:
261
+ return request.model.get_num_tokens_from_messages(
262
+ system_msg + list(messages), request.tools
263
+ )
264
+
265
+ for edit in self.edits:
266
+ edit.apply(request.messages, count_tokens=count_tokens)
267
+
268
+ return await handler(request)
269
+
242
270
 
243
271
  __all__ = [
244
272
  "ClearToolUsesEdit",