langchain 1.0.0a14__tar.gz → 1.0.0a15__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain might be problematic. Click here for more details.

Files changed (103) hide show
  1. langchain-1.0.0a15/PKG-INFO +85 -0
  2. langchain-1.0.0a15/README.md +39 -0
  3. {langchain-1.0.0a14 → langchain-1.0.0a15}/langchain/__init__.py +1 -1
  4. {langchain-1.0.0a14 → langchain-1.0.0a15}/langchain/agents/factory.py +8 -17
  5. {langchain-1.0.0a14 → langchain-1.0.0a15}/langchain/agents/middleware/__init__.py +6 -5
  6. {langchain-1.0.0a14 → langchain-1.0.0a15}/langchain/agents/middleware/context_editing.py +29 -1
  7. {langchain-1.0.0a14 → langchain-1.0.0a15}/langchain/agents/middleware/human_in_the_loop.py +13 -13
  8. {langchain-1.0.0a14 → langchain-1.0.0a15}/langchain/agents/middleware/model_call_limit.py +38 -4
  9. {langchain-1.0.0a14 → langchain-1.0.0a15}/langchain/agents/middleware/model_fallback.py +36 -1
  10. langchain-1.0.0a14/langchain/agents/middleware/planning.py → langchain-1.0.0a15/langchain/agents/middleware/todo.py +18 -5
  11. {langchain-1.0.0a14 → langchain-1.0.0a15}/langchain/agents/middleware/tool_call_limit.py +88 -15
  12. {langchain-1.0.0a14 → langchain-1.0.0a15}/langchain/agents/middleware/types.py +44 -5
  13. {langchain-1.0.0a14 → langchain-1.0.0a15}/langchain/tools/__init__.py +1 -2
  14. {langchain-1.0.0a14 → langchain-1.0.0a15}/langchain/tools/tool_node.py +32 -0
  15. {langchain-1.0.0a14 → langchain-1.0.0a15}/pyproject.toml +1 -1
  16. langchain-1.0.0a15/tests/unit_tests/agents/middleware/test_override_methods.py +381 -0
  17. {langchain-1.0.0a14 → langchain-1.0.0a15}/tests/unit_tests/agents/test_context_editing_middleware.py +166 -0
  18. {langchain-1.0.0a14 → langchain-1.0.0a15}/tests/unit_tests/agents/test_middleware_agent.py +38 -146
  19. langchain-1.0.0a15/tests/unit_tests/agents/test_model_fallback_middleware.py +215 -0
  20. langchain-1.0.0a15/tests/unit_tests/agents/test_todo_middleware.py +172 -0
  21. {langchain-1.0.0a14 → langchain-1.0.0a15}/tests/unit_tests/agents/test_tool_call_limit.py +29 -9
  22. {langchain-1.0.0a14 → langchain-1.0.0a15}/tests/unit_tests/tools/test_imports.py +0 -1
  23. {langchain-1.0.0a14 → langchain-1.0.0a15}/uv.lock +3 -2
  24. langchain-1.0.0a14/PKG-INFO +0 -125
  25. langchain-1.0.0a14/README.md +0 -79
  26. langchain-1.0.0a14/langchain/agents/middleware/prompt_caching.py +0 -89
  27. {langchain-1.0.0a14 → langchain-1.0.0a15}/.gitignore +0 -0
  28. {langchain-1.0.0a14 → langchain-1.0.0a15}/LICENSE +0 -0
  29. {langchain-1.0.0a14 → langchain-1.0.0a15}/Makefile +0 -0
  30. {langchain-1.0.0a14 → langchain-1.0.0a15}/extended_testing_deps.txt +0 -0
  31. {langchain-1.0.0a14 → langchain-1.0.0a15}/langchain/agents/__init__.py +0 -0
  32. {langchain-1.0.0a14 → langchain-1.0.0a15}/langchain/agents/middleware/pii.py +0 -0
  33. {langchain-1.0.0a14 → langchain-1.0.0a15}/langchain/agents/middleware/summarization.py +0 -0
  34. {langchain-1.0.0a14 → langchain-1.0.0a15}/langchain/agents/middleware/tool_emulator.py +0 -0
  35. {langchain-1.0.0a14 → langchain-1.0.0a15}/langchain/agents/middleware/tool_selection.py +0 -0
  36. {langchain-1.0.0a14 → langchain-1.0.0a15}/langchain/agents/structured_output.py +0 -0
  37. {langchain-1.0.0a14 → langchain-1.0.0a15}/langchain/chat_models/__init__.py +0 -0
  38. {langchain-1.0.0a14 → langchain-1.0.0a15}/langchain/chat_models/base.py +0 -0
  39. {langchain-1.0.0a14 → langchain-1.0.0a15}/langchain/embeddings/__init__.py +0 -0
  40. {langchain-1.0.0a14 → langchain-1.0.0a15}/langchain/embeddings/base.py +0 -0
  41. {langchain-1.0.0a14 → langchain-1.0.0a15}/langchain/messages/__init__.py +0 -0
  42. {langchain-1.0.0a14 → langchain-1.0.0a15}/langchain/py.typed +0 -0
  43. {langchain-1.0.0a14 → langchain-1.0.0a15}/langchain/rate_limiters/__init__.py +0 -0
  44. {langchain-1.0.0a14 → langchain-1.0.0a15}/scripts/check_imports.py +0 -0
  45. {langchain-1.0.0a14 → langchain-1.0.0a15}/tests/__init__.py +0 -0
  46. {langchain-1.0.0a14 → langchain-1.0.0a15}/tests/integration_tests/__init__.py +0 -0
  47. {langchain-1.0.0a14 → langchain-1.0.0a15}/tests/integration_tests/agents/__init__.py +0 -0
  48. {langchain-1.0.0a14 → langchain-1.0.0a15}/tests/integration_tests/agents/test_response_format.py +0 -0
  49. {langchain-1.0.0a14 → langchain-1.0.0a15}/tests/integration_tests/cache/__init__.py +0 -0
  50. {langchain-1.0.0a14 → langchain-1.0.0a15}/tests/integration_tests/cache/fake_embeddings.py +0 -0
  51. {langchain-1.0.0a14 → langchain-1.0.0a15}/tests/integration_tests/chat_models/__init__.py +0 -0
  52. {langchain-1.0.0a14 → langchain-1.0.0a15}/tests/integration_tests/chat_models/test_base.py +0 -0
  53. {langchain-1.0.0a14 → langchain-1.0.0a15}/tests/integration_tests/conftest.py +0 -0
  54. {langchain-1.0.0a14 → langchain-1.0.0a15}/tests/integration_tests/embeddings/__init__.py +0 -0
  55. {langchain-1.0.0a14 → langchain-1.0.0a15}/tests/integration_tests/embeddings/test_base.py +0 -0
  56. {langchain-1.0.0a14 → langchain-1.0.0a15}/tests/integration_tests/test_compile.py +0 -0
  57. {langchain-1.0.0a14 → langchain-1.0.0a15}/tests/unit_tests/__init__.py +0 -0
  58. {langchain-1.0.0a14 → langchain-1.0.0a15}/tests/unit_tests/agents/__init__.py +0 -0
  59. {langchain-1.0.0a14 → langchain-1.0.0a15}/tests/unit_tests/agents/__snapshots__/test_middleware_agent.ambr +0 -0
  60. {langchain-1.0.0a14 → langchain-1.0.0a15}/tests/unit_tests/agents/__snapshots__/test_middleware_decorators.ambr +0 -0
  61. {langchain-1.0.0a14 → langchain-1.0.0a15}/tests/unit_tests/agents/any_str.py +0 -0
  62. {langchain-1.0.0a14 → langchain-1.0.0a15}/tests/unit_tests/agents/compose-postgres.yml +0 -0
  63. {langchain-1.0.0a14 → langchain-1.0.0a15}/tests/unit_tests/agents/compose-redis.yml +0 -0
  64. {langchain-1.0.0a14 → langchain-1.0.0a15}/tests/unit_tests/agents/conftest.py +0 -0
  65. {langchain-1.0.0a14 → langchain-1.0.0a15}/tests/unit_tests/agents/conftest_checkpointer.py +0 -0
  66. {langchain-1.0.0a14 → langchain-1.0.0a15}/tests/unit_tests/agents/conftest_store.py +0 -0
  67. {langchain-1.0.0a14 → langchain-1.0.0a15}/tests/unit_tests/agents/memory_assert.py +0 -0
  68. {langchain-1.0.0a14 → langchain-1.0.0a15}/tests/unit_tests/agents/messages.py +0 -0
  69. {langchain-1.0.0a14 → langchain-1.0.0a15}/tests/unit_tests/agents/middleware/__init__.py +0 -0
  70. {langchain-1.0.0a14 → langchain-1.0.0a15}/tests/unit_tests/agents/middleware/test_before_after_agent.py +0 -0
  71. {langchain-1.0.0a14 → langchain-1.0.0a15}/tests/unit_tests/agents/middleware/test_llm_tool_selection.py +0 -0
  72. {langchain-1.0.0a14 → langchain-1.0.0a15}/tests/unit_tests/agents/middleware/test_tool_emulator.py +0 -0
  73. {langchain-1.0.0a14 → langchain-1.0.0a15}/tests/unit_tests/agents/middleware/test_wrap_model_call_decorator.py +0 -0
  74. {langchain-1.0.0a14 → langchain-1.0.0a15}/tests/unit_tests/agents/middleware/test_wrap_model_call_middleware.py +0 -0
  75. {langchain-1.0.0a14 → langchain-1.0.0a15}/tests/unit_tests/agents/middleware/test_wrap_tool_call_decorator.py +0 -0
  76. {langchain-1.0.0a14 → langchain-1.0.0a15}/tests/unit_tests/agents/model.py +0 -0
  77. {langchain-1.0.0a14 → langchain-1.0.0a15}/tests/unit_tests/agents/specifications/responses.json +0 -0
  78. {langchain-1.0.0a14 → langchain-1.0.0a15}/tests/unit_tests/agents/specifications/return_direct.json +0 -0
  79. {langchain-1.0.0a14 → langchain-1.0.0a15}/tests/unit_tests/agents/test_handler_composition.py +0 -0
  80. {langchain-1.0.0a14 → langchain-1.0.0a15}/tests/unit_tests/agents/test_middleware_decorators.py +0 -0
  81. {langchain-1.0.0a14 → langchain-1.0.0a15}/tests/unit_tests/agents/test_middleware_tools.py +0 -0
  82. {langchain-1.0.0a14 → langchain-1.0.0a15}/tests/unit_tests/agents/test_on_tool_call_middleware.py +0 -0
  83. {langchain-1.0.0a14 → langchain-1.0.0a15}/tests/unit_tests/agents/test_pii_middleware.py +0 -0
  84. {langchain-1.0.0a14 → langchain-1.0.0a15}/tests/unit_tests/agents/test_react_agent.py +0 -0
  85. {langchain-1.0.0a14 → langchain-1.0.0a15}/tests/unit_tests/agents/test_response_format.py +0 -0
  86. {langchain-1.0.0a14 → langchain-1.0.0a15}/tests/unit_tests/agents/test_responses.py +0 -0
  87. {langchain-1.0.0a14 → langchain-1.0.0a15}/tests/unit_tests/agents/test_responses_spec.py +0 -0
  88. {langchain-1.0.0a14 → langchain-1.0.0a15}/tests/unit_tests/agents/test_return_direct_spec.py +0 -0
  89. {langchain-1.0.0a14 → langchain-1.0.0a15}/tests/unit_tests/agents/test_sync_async_tool_wrapper_composition.py +0 -0
  90. {langchain-1.0.0a14 → langchain-1.0.0a15}/tests/unit_tests/agents/test_tool_node.py +0 -0
  91. {langchain-1.0.0a14 → langchain-1.0.0a15}/tests/unit_tests/agents/utils.py +0 -0
  92. {langchain-1.0.0a14 → langchain-1.0.0a15}/tests/unit_tests/chat_models/__init__.py +0 -0
  93. {langchain-1.0.0a14 → langchain-1.0.0a15}/tests/unit_tests/chat_models/test_chat_models.py +0 -0
  94. {langchain-1.0.0a14 → langchain-1.0.0a15}/tests/unit_tests/conftest.py +0 -0
  95. {langchain-1.0.0a14 → langchain-1.0.0a15}/tests/unit_tests/embeddings/__init__.py +0 -0
  96. {langchain-1.0.0a14 → langchain-1.0.0a15}/tests/unit_tests/embeddings/test_base.py +0 -0
  97. {langchain-1.0.0a14 → langchain-1.0.0a15}/tests/unit_tests/embeddings/test_imports.py +0 -0
  98. {langchain-1.0.0a14 → langchain-1.0.0a15}/tests/unit_tests/stubs.py +0 -0
  99. {langchain-1.0.0a14 → langchain-1.0.0a15}/tests/unit_tests/test_dependencies.py +0 -0
  100. {langchain-1.0.0a14 → langchain-1.0.0a15}/tests/unit_tests/test_imports.py +0 -0
  101. {langchain-1.0.0a14 → langchain-1.0.0a15}/tests/unit_tests/test_pytest_config.py +0 -0
  102. {langchain-1.0.0a14 → langchain-1.0.0a15}/tests/unit_tests/tools/__init__.py +0 -0
  103. {langchain-1.0.0a14 → langchain-1.0.0a15}/tests/unit_tests/tools/test_on_tool_call.py +0 -0
@@ -0,0 +1,85 @@
1
+ Metadata-Version: 2.4
2
+ Name: langchain
3
+ Version: 1.0.0a15
4
+ Summary: Building applications with LLMs through composability
5
+ Project-URL: homepage, https://docs.langchain.com/
6
+ Project-URL: repository, https://github.com/langchain-ai/langchain/tree/master/libs/langchain
7
+ Project-URL: changelog, https://github.com/langchain-ai/langchain/releases?q=tag%3A%22langchain%3D%3D1%22
8
+ Project-URL: twitter, https://x.com/LangChainAI
9
+ Project-URL: slack, https://www.langchain.com/join-community
10
+ Project-URL: reddit, https://www.reddit.com/r/LangChain/
11
+ License: MIT
12
+ License-File: LICENSE
13
+ Requires-Python: <4.0.0,>=3.10.0
14
+ Requires-Dist: langchain-core<2.0.0,>=1.0.0a7
15
+ Requires-Dist: langgraph<2.0.0,>=1.0.0a4
16
+ Requires-Dist: pydantic<3.0.0,>=2.7.4
17
+ Provides-Extra: anthropic
18
+ Requires-Dist: langchain-anthropic; extra == 'anthropic'
19
+ Provides-Extra: aws
20
+ Requires-Dist: langchain-aws; extra == 'aws'
21
+ Provides-Extra: community
22
+ Requires-Dist: langchain-community; extra == 'community'
23
+ Provides-Extra: deepseek
24
+ Requires-Dist: langchain-deepseek; extra == 'deepseek'
25
+ Provides-Extra: fireworks
26
+ Requires-Dist: langchain-fireworks; extra == 'fireworks'
27
+ Provides-Extra: google-genai
28
+ Requires-Dist: langchain-google-genai; extra == 'google-genai'
29
+ Provides-Extra: google-vertexai
30
+ Requires-Dist: langchain-google-vertexai; extra == 'google-vertexai'
31
+ Provides-Extra: groq
32
+ Requires-Dist: langchain-groq; extra == 'groq'
33
+ Provides-Extra: mistralai
34
+ Requires-Dist: langchain-mistralai; extra == 'mistralai'
35
+ Provides-Extra: ollama
36
+ Requires-Dist: langchain-ollama; extra == 'ollama'
37
+ Provides-Extra: openai
38
+ Requires-Dist: langchain-openai; extra == 'openai'
39
+ Provides-Extra: perplexity
40
+ Requires-Dist: langchain-perplexity; extra == 'perplexity'
41
+ Provides-Extra: together
42
+ Requires-Dist: langchain-together; extra == 'together'
43
+ Provides-Extra: xai
44
+ Requires-Dist: langchain-xai; extra == 'xai'
45
+ Description-Content-Type: text/markdown
46
+
47
+ # 🦜️🔗 LangChain
48
+
49
+ [![PyPI - Version](https://img.shields.io/pypi/v/langchain?label=%20)](https://pypi.org/project/langchain/#history)
50
+ [![PyPI - License](https://img.shields.io/pypi/l/langchain)](https://opensource.org/licenses/MIT)
51
+ [![PyPI - Downloads](https://img.shields.io/pepy/dt/langchain)](https://pypistats.org/packages/langchain)
52
+ [![Twitter](https://img.shields.io/twitter/url/https/twitter.com/langchainai.svg?style=social&label=Follow%20%40LangChainAI)](https://twitter.com/langchainai)
53
+
54
+ Looking for the JS/TS version? Check out [LangChain.js](https://github.com/langchain-ai/langchainjs).
55
+
56
+ To help you ship LangChain apps to production faster, check out [LangSmith](https://smith.langchain.com).
57
+ [LangSmith](https://smith.langchain.com) is a unified developer platform for building, testing, and monitoring LLM applications.
58
+
59
+ ## Quick Install
60
+
61
+ ```bash
62
+ pip install langchain
63
+ ```
64
+
65
+ ## 🤔 What is this?
66
+
67
+ LangChain is the easiest way to start building agents and applications powered by LLMs. With under 10 lines of code, you can connect to OpenAI, Anthropic, Google, and [more](https://docs.langchain.com/oss/python/integrations/providers/overview). LangChain provides a pre-built agent architecture and model integrations to help you get started quickly and seamlessly incorporate LLMs into your agents and applications.
68
+
69
+ We recommend you use LangChain if you want to quickly build agents and autonomous applications. Use [LangGraph](https://docs.langchain.com/oss/python/langgraph/overview), our low-level agent orchestration framework and runtime, when you have more advanced needs that require a combination of deterministic and agentic workflows, heavy customization, and carefully controlled latency.
70
+
71
+ LangChain [agents](https://docs.langchain.com/oss/python/langchain/agents) are built on top of LangGraph in order to provide durable execution, streaming, human-in-the-loop, persistence, and more. (You do not need to know LangGraph for basic LangChain agent usage.)
72
+
73
+ ## 📖 Documentation
74
+
75
+ For full documentation, see the [API reference](https://reference.langchain.com/python/langchain_classic).
76
+
77
+ ## 📕 Releases & Versioning
78
+
79
+ See our [Releases](https://docs.langchain.com/oss/python/release-policy) and [Versioning](https://docs.langchain.com/oss/python/versioning) policies.
80
+
81
+ ## 💁 Contributing
82
+
83
+ As an open-source project in a rapidly developing field, we are extremely open to contributions, whether it be in the form of a new feature, improved infrastructure, or better documentation.
84
+
85
+ For detailed information on how to contribute, see the [Contributing Guide](https://docs.langchain.com/oss/python/contributing/overview).
@@ -0,0 +1,39 @@
1
+ # 🦜️🔗 LangChain
2
+
3
+ [![PyPI - Version](https://img.shields.io/pypi/v/langchain?label=%20)](https://pypi.org/project/langchain/#history)
4
+ [![PyPI - License](https://img.shields.io/pypi/l/langchain)](https://opensource.org/licenses/MIT)
5
+ [![PyPI - Downloads](https://img.shields.io/pepy/dt/langchain)](https://pypistats.org/packages/langchain)
6
+ [![Twitter](https://img.shields.io/twitter/url/https/twitter.com/langchainai.svg?style=social&label=Follow%20%40LangChainAI)](https://twitter.com/langchainai)
7
+
8
+ Looking for the JS/TS version? Check out [LangChain.js](https://github.com/langchain-ai/langchainjs).
9
+
10
+ To help you ship LangChain apps to production faster, check out [LangSmith](https://smith.langchain.com).
11
+ [LangSmith](https://smith.langchain.com) is a unified developer platform for building, testing, and monitoring LLM applications.
12
+
13
+ ## Quick Install
14
+
15
+ ```bash
16
+ pip install langchain
17
+ ```
18
+
19
+ ## 🤔 What is this?
20
+
21
+ LangChain is the easiest way to start building agents and applications powered by LLMs. With under 10 lines of code, you can connect to OpenAI, Anthropic, Google, and [more](https://docs.langchain.com/oss/python/integrations/providers/overview). LangChain provides a pre-built agent architecture and model integrations to help you get started quickly and seamlessly incorporate LLMs into your agents and applications.
22
+
23
+ We recommend you use LangChain if you want to quickly build agents and autonomous applications. Use [LangGraph](https://docs.langchain.com/oss/python/langgraph/overview), our low-level agent orchestration framework and runtime, when you have more advanced needs that require a combination of deterministic and agentic workflows, heavy customization, and carefully controlled latency.
24
+
25
+ LangChain [agents](https://docs.langchain.com/oss/python/langchain/agents) are built on top of LangGraph in order to provide durable execution, streaming, human-in-the-loop, persistence, and more. (You do not need to know LangGraph for basic LangChain agent usage.)
26
+
27
+ ## 📖 Documentation
28
+
29
+ For full documentation, see the [API reference](https://reference.langchain.com/python/langchain_classic).
30
+
31
+ ## 📕 Releases & Versioning
32
+
33
+ See our [Releases](https://docs.langchain.com/oss/python/release-policy) and [Versioning](https://docs.langchain.com/oss/python/versioning) policies.
34
+
35
+ ## 💁 Contributing
36
+
37
+ As an open-source project in a rapidly developing field, we are extremely open to contributions, whether it be in the form of a new feature, improved infrastructure, or better documentation.
38
+
39
+ For detailed information on how to contribute, see the [Contributing Guide](https://docs.langchain.com/oss/python/contributing/overview).
@@ -1,3 +1,3 @@
1
1
  """Main entrypoint into LangChain."""
2
2
 
3
- __version__ = "1.0.0a13"
3
+ __version__ = "1.0.0a14"
@@ -537,13 +537,12 @@ def create_agent( # noqa: PLR0915
537
537
  (e.g., `"openai:gpt-4"`), a chat model instance (e.g., `ChatOpenAI()`).
538
538
  tools: A list of tools, dicts, or callables. If `None` or an empty list,
539
539
  the agent will consist of a model node without a tool calling loop.
540
- system_prompt: An optional system prompt for the LLM. If provided as a string,
541
- it will be converted to a SystemMessage and added to the beginning
542
- of the message list.
540
+ system_prompt: An optional system prompt for the LLM. Prompts are converted to a
541
+ `SystemMessage` and added to the beginning of the message list.
543
542
  middleware: A sequence of middleware instances to apply to the agent.
544
543
  Middleware can intercept and modify agent behavior at various stages.
545
544
  response_format: An optional configuration for structured responses.
546
- Can be a ToolStrategy, ProviderStrategy, or a Pydantic model class.
545
+ Can be a `ToolStrategy`, `ProviderStrategy`, or a Pydantic model class.
547
546
  If provided, the agent will handle structured output during the
548
547
  conversation flow. Raw schemas will be wrapped in an appropriate strategy
549
548
  based on model capabilities.
@@ -560,14 +559,14 @@ def create_agent( # noqa: PLR0915
560
559
  This is useful if you want to return directly or run additional processing
561
560
  on an output.
562
561
  debug: A flag indicating whether to enable debug mode.
563
- name: An optional name for the CompiledStateGraph.
562
+ name: An optional name for the `CompiledStateGraph`.
564
563
  This name will be automatically used when adding the agent graph to
565
564
  another graph as a subgraph node - particularly useful for building
566
565
  multi-agent systems.
567
- cache: An optional BaseCache instance to enable caching of graph execution.
566
+ cache: An optional `BaseCache` instance to enable caching of graph execution.
568
567
 
569
568
  Returns:
570
- A compiled StateGraph that can be used for chat interactions.
569
+ A compiled `StateGraph` that can be used for chat interactions.
571
570
 
572
571
  The agent node calls the language model with the messages list (after applying
573
572
  the system prompt). If the resulting AIMessage contains `tool_calls`, the graph will
@@ -1032,11 +1031,7 @@ def create_agent( # noqa: PLR0915
1032
1031
  if response.structured_response is not None:
1033
1032
  state_updates["structured_response"] = response.structured_response
1034
1033
 
1035
- return {
1036
- "thread_model_call_count": state.get("thread_model_call_count", 0) + 1,
1037
- "run_model_call_count": state.get("run_model_call_count", 0) + 1,
1038
- **state_updates,
1039
- }
1034
+ return state_updates
1040
1035
 
1041
1036
  async def _execute_model_async(request: ModelRequest) -> ModelResponse:
1042
1037
  """Execute model asynchronously and return response.
@@ -1087,11 +1082,7 @@ def create_agent( # noqa: PLR0915
1087
1082
  if response.structured_response is not None:
1088
1083
  state_updates["structured_response"] = response.structured_response
1089
1084
 
1090
- return {
1091
- "thread_model_call_count": state.get("thread_model_call_count", 0) + 1,
1092
- "run_model_call_count": state.get("run_model_call_count", 0) + 1,
1093
- **state_updates,
1094
- }
1085
+ return state_updates
1095
1086
 
1096
1087
  # Use sync or async based on model capabilities
1097
1088
  graph.add_node("model", RunnableCallable(model_node, amodel_node, trace=False))
@@ -11,9 +11,8 @@ from .human_in_the_loop import (
11
11
  from .model_call_limit import ModelCallLimitMiddleware
12
12
  from .model_fallback import ModelFallbackMiddleware
13
13
  from .pii import PIIDetectionError, PIIMiddleware
14
- from .planning import PlanningMiddleware
15
- from .prompt_caching import AnthropicPromptCachingMiddleware
16
14
  from .summarization import SummarizationMiddleware
15
+ from .todo import TodoListMiddleware
17
16
  from .tool_call_limit import ToolCallLimitMiddleware
18
17
  from .tool_emulator import LLMToolEmulator
19
18
  from .tool_selection import LLMToolSelectorMiddleware
@@ -21,6 +20,7 @@ from .types import (
21
20
  AgentMiddleware,
22
21
  AgentState,
23
22
  ModelRequest,
23
+ ModelResponse,
24
24
  after_agent,
25
25
  after_model,
26
26
  before_agent,
@@ -28,13 +28,12 @@ from .types import (
28
28
  dynamic_prompt,
29
29
  hook_config,
30
30
  wrap_model_call,
31
+ wrap_tool_call,
31
32
  )
32
33
 
33
34
  __all__ = [
34
35
  "AgentMiddleware",
35
36
  "AgentState",
36
- # should move to langchain-anthropic if we decide to keep it
37
- "AnthropicPromptCachingMiddleware",
38
37
  "ClearToolUsesEdit",
39
38
  "ContextEditingMiddleware",
40
39
  "HumanInTheLoopMiddleware",
@@ -44,10 +43,11 @@ __all__ = [
44
43
  "ModelCallLimitMiddleware",
45
44
  "ModelFallbackMiddleware",
46
45
  "ModelRequest",
46
+ "ModelResponse",
47
47
  "PIIDetectionError",
48
48
  "PIIMiddleware",
49
- "PlanningMiddleware",
50
49
  "SummarizationMiddleware",
50
+ "TodoListMiddleware",
51
51
  "ToolCallLimitMiddleware",
52
52
  "after_agent",
53
53
  "after_model",
@@ -56,4 +56,5 @@ __all__ = [
56
56
  "dynamic_prompt",
57
57
  "hook_config",
58
58
  "wrap_model_call",
59
+ "wrap_tool_call",
59
60
  ]
@@ -8,7 +8,7 @@ with any LangChain chat model.
8
8
 
9
9
  from __future__ import annotations
10
10
 
11
- from collections.abc import Callable, Iterable, Sequence
11
+ from collections.abc import Awaitable, Callable, Iterable, Sequence
12
12
  from dataclasses import dataclass
13
13
  from typing import Literal
14
14
 
@@ -239,6 +239,34 @@ class ContextEditingMiddleware(AgentMiddleware):
239
239
 
240
240
  return handler(request)
241
241
 
242
+ async def awrap_model_call(
243
+ self,
244
+ request: ModelRequest,
245
+ handler: Callable[[ModelRequest], Awaitable[ModelResponse]],
246
+ ) -> ModelCallResult:
247
+ """Apply context edits before invoking the model via handler (async version)."""
248
+ if not request.messages:
249
+ return await handler(request)
250
+
251
+ if self.token_count_method == "approximate": # noqa: S105
252
+
253
+ def count_tokens(messages: Sequence[BaseMessage]) -> int:
254
+ return count_tokens_approximately(messages)
255
+ else:
256
+ system_msg = (
257
+ [SystemMessage(content=request.system_prompt)] if request.system_prompt else []
258
+ )
259
+
260
+ def count_tokens(messages: Sequence[BaseMessage]) -> int:
261
+ return request.model.get_num_tokens_from_messages(
262
+ system_msg + list(messages), request.tools
263
+ )
264
+
265
+ for edit in self.edits:
266
+ edit.apply(request.messages, count_tokens=count_tokens)
267
+
268
+ return await handler(request)
269
+
242
270
 
243
271
  __all__ = [
244
272
  "ClearToolUsesEdit",
@@ -11,23 +11,23 @@ from langchain.agents.middleware.types import AgentMiddleware, AgentState
11
11
 
12
12
 
13
13
  class Action(TypedDict):
14
- """Represents an action with a name and arguments."""
14
+ """Represents an action with a name and args."""
15
15
 
16
16
  name: str
17
17
  """The type or name of action being requested (e.g., "add_numbers")."""
18
18
 
19
- arguments: dict[str, Any]
20
- """Key-value pairs of arguments needed for the action (e.g., {"a": 1, "b": 2})."""
19
+ args: dict[str, Any]
20
+ """Key-value pairs of args needed for the action (e.g., {"a": 1, "b": 2})."""
21
21
 
22
22
 
23
23
  class ActionRequest(TypedDict):
24
- """Represents an action request with a name, arguments, and description."""
24
+ """Represents an action request with a name, args, and description."""
25
25
 
26
26
  name: str
27
27
  """The name of the action being requested."""
28
28
 
29
- arguments: dict[str, Any]
30
- """Key-value pairs of arguments needed for the action (e.g., {"a": 1, "b": 2})."""
29
+ args: dict[str, Any]
30
+ """Key-value pairs of args needed for the action (e.g., {"a": 1, "b": 2})."""
31
31
 
32
32
  description: NotRequired[str]
33
33
  """The description of the action to be reviewed."""
@@ -45,8 +45,8 @@ class ReviewConfig(TypedDict):
45
45
  allowed_decisions: list[DecisionType]
46
46
  """The decisions that are allowed for this request."""
47
47
 
48
- arguments_schema: NotRequired[dict[str, Any]]
49
- """JSON schema for the arguments associated with the action, if edits are allowed."""
48
+ args_schema: NotRequired[dict[str, Any]]
49
+ """JSON schema for the args associated with the action, if edits are allowed."""
50
50
 
51
51
 
52
52
  class HITLRequest(TypedDict):
@@ -150,8 +150,8 @@ class InterruptOnConfig(TypedDict):
150
150
  )
151
151
  ```
152
152
  """
153
- arguments_schema: NotRequired[dict[str, Any]]
154
- """JSON schema for the arguments associated with the action, if edits are allowed."""
153
+ args_schema: NotRequired[dict[str, Any]]
154
+ """JSON schema for the args associated with the action, if edits are allowed."""
155
155
 
156
156
 
157
157
  class HumanInTheLoopMiddleware(AgentMiddleware):
@@ -214,12 +214,12 @@ class HumanInTheLoopMiddleware(AgentMiddleware):
214
214
  # Create ActionRequest with description
215
215
  action_request = ActionRequest(
216
216
  name=tool_name,
217
- arguments=tool_args,
217
+ args=tool_args,
218
218
  description=description,
219
219
  )
220
220
 
221
221
  # Create ReviewConfig
222
- # eventually can get tool information and populate arguments_schema from there
222
+ # eventually can get tool information and populate args_schema from there
223
223
  review_config = ReviewConfig(
224
224
  action_name=tool_name,
225
225
  allowed_decisions=config["allowed_decisions"],
@@ -244,7 +244,7 @@ class HumanInTheLoopMiddleware(AgentMiddleware):
244
244
  ToolCall(
245
245
  type="tool_call",
246
246
  name=edited_action["name"],
247
- args=edited_action["arguments"],
247
+ args=edited_action["args"],
248
248
  id=tool_call["id"],
249
249
  ),
250
250
  None,
@@ -2,16 +2,33 @@
2
2
 
3
3
  from __future__ import annotations
4
4
 
5
- from typing import TYPE_CHECKING, Any, Literal
5
+ from typing import TYPE_CHECKING, Annotated, Any, Literal
6
6
 
7
7
  from langchain_core.messages import AIMessage
8
+ from langgraph.channels.untracked_value import UntrackedValue
9
+ from typing_extensions import NotRequired
8
10
 
9
- from langchain.agents.middleware.types import AgentMiddleware, AgentState, hook_config
11
+ from langchain.agents.middleware.types import (
12
+ AgentMiddleware,
13
+ AgentState,
14
+ PrivateStateAttr,
15
+ hook_config,
16
+ )
10
17
 
11
18
  if TYPE_CHECKING:
12
19
  from langgraph.runtime import Runtime
13
20
 
14
21
 
22
+ class ModelCallLimitState(AgentState):
23
+ """State schema for ModelCallLimitMiddleware.
24
+
25
+ Extends AgentState with model call tracking fields.
26
+ """
27
+
28
+ thread_model_call_count: NotRequired[Annotated[int, PrivateStateAttr]]
29
+ run_model_call_count: NotRequired[Annotated[int, UntrackedValue, PrivateStateAttr]]
30
+
31
+
15
32
  def _build_limit_exceeded_message(
16
33
  thread_count: int,
17
34
  run_count: int,
@@ -69,7 +86,7 @@ class ModelCallLimitExceededError(Exception):
69
86
  super().__init__(msg)
70
87
 
71
88
 
72
- class ModelCallLimitMiddleware(AgentMiddleware):
89
+ class ModelCallLimitMiddleware(AgentMiddleware[ModelCallLimitState, Any]):
73
90
  """Middleware that tracks model call counts and enforces limits.
74
91
 
75
92
  This middleware monitors the number of model calls made during agent execution
@@ -97,6 +114,8 @@ class ModelCallLimitMiddleware(AgentMiddleware):
97
114
  ```
98
115
  """
99
116
 
117
+ state_schema = ModelCallLimitState
118
+
100
119
  def __init__(
101
120
  self,
102
121
  *,
@@ -135,7 +154,7 @@ class ModelCallLimitMiddleware(AgentMiddleware):
135
154
  self.exit_behavior = exit_behavior
136
155
 
137
156
  @hook_config(can_jump_to=["end"])
138
- def before_model(self, state: AgentState, runtime: Runtime) -> dict[str, Any] | None: # noqa: ARG002
157
+ def before_model(self, state: ModelCallLimitState, runtime: Runtime) -> dict[str, Any] | None: # noqa: ARG002
139
158
  """Check model call limits before making a model call.
140
159
 
141
160
  Args:
@@ -175,3 +194,18 @@ class ModelCallLimitMiddleware(AgentMiddleware):
175
194
  return {"jump_to": "end", "messages": [limit_ai_message]}
176
195
 
177
196
  return None
197
+
198
+ def after_model(self, state: ModelCallLimitState, runtime: Runtime) -> dict[str, Any] | None: # noqa: ARG002
199
+ """Increment model call counts after a model call.
200
+
201
+ Args:
202
+ state: The current agent state.
203
+ runtime: The langgraph runtime.
204
+
205
+ Returns:
206
+ State updates with incremented call counts.
207
+ """
208
+ return {
209
+ "thread_model_call_count": state.get("thread_model_call_count", 0) + 1,
210
+ "run_model_call_count": state.get("run_model_call_count", 0) + 1,
211
+ }
@@ -13,7 +13,7 @@ from langchain.agents.middleware.types import (
13
13
  from langchain.chat_models import init_chat_model
14
14
 
15
15
  if TYPE_CHECKING:
16
- from collections.abc import Callable
16
+ from collections.abc import Awaitable, Callable
17
17
 
18
18
  from langchain_core.language_models.chat_models import BaseChatModel
19
19
 
@@ -102,3 +102,38 @@ class ModelFallbackMiddleware(AgentMiddleware):
102
102
  continue
103
103
 
104
104
  raise last_exception
105
+
106
+ async def awrap_model_call(
107
+ self,
108
+ request: ModelRequest,
109
+ handler: Callable[[ModelRequest], Awaitable[ModelResponse]],
110
+ ) -> ModelCallResult:
111
+ """Try fallback models in sequence on errors (async version).
112
+
113
+ Args:
114
+ request: Initial model request.
115
+ handler: Async callback to execute the model.
116
+
117
+ Returns:
118
+ AIMessage from successful model call.
119
+
120
+ Raises:
121
+ Exception: If all models fail, re-raises last exception.
122
+ """
123
+ # Try primary model first
124
+ last_exception: Exception
125
+ try:
126
+ return await handler(request)
127
+ except Exception as e: # noqa: BLE001
128
+ last_exception = e
129
+
130
+ # Try fallback models
131
+ for fallback_model in self.models:
132
+ request.model = fallback_model
133
+ try:
134
+ return await handler(request)
135
+ except Exception as e: # noqa: BLE001
136
+ last_exception = e
137
+ continue
138
+
139
+ raise last_exception
@@ -6,7 +6,7 @@ from __future__ import annotations
6
6
  from typing import TYPE_CHECKING, Annotated, Literal
7
7
 
8
8
  if TYPE_CHECKING:
9
- from collections.abc import Callable
9
+ from collections.abc import Awaitable, Callable
10
10
 
11
11
  from langchain_core.messages import ToolMessage
12
12
  from langchain_core.tools import tool
@@ -126,7 +126,7 @@ def write_todos(todos: list[Todo], tool_call_id: Annotated[str, InjectedToolCall
126
126
  )
127
127
 
128
128
 
129
- class PlanningMiddleware(AgentMiddleware):
129
+ class TodoListMiddleware(AgentMiddleware):
130
130
  """Middleware that provides todo list management capabilities to agents.
131
131
 
132
132
  This middleware adds a `write_todos` tool that allows agents to create and manage
@@ -139,10 +139,10 @@ class PlanningMiddleware(AgentMiddleware):
139
139
 
140
140
  Example:
141
141
  ```python
142
- from langchain.agents.middleware.planning import PlanningMiddleware
142
+ from langchain.agents.middleware.todo import TodoListMiddleware
143
143
  from langchain.agents import create_agent
144
144
 
145
- agent = create_agent("openai:gpt-4o", middleware=[PlanningMiddleware()])
145
+ agent = create_agent("openai:gpt-4o", middleware=[TodoListMiddleware()])
146
146
 
147
147
  # Agent now has access to write_todos tool and todo state tracking
148
148
  result = await agent.invoke({"messages": [HumanMessage("Help me refactor my codebase")]})
@@ -165,7 +165,7 @@ class PlanningMiddleware(AgentMiddleware):
165
165
  system_prompt: str = WRITE_TODOS_SYSTEM_PROMPT,
166
166
  tool_description: str = WRITE_TODOS_TOOL_DESCRIPTION,
167
167
  ) -> None:
168
- """Initialize the PlanningMiddleware with optional custom prompts.
168
+ """Initialize the TodoListMiddleware with optional custom prompts.
169
169
 
170
170
  Args:
171
171
  system_prompt: Custom system prompt to guide the agent on using the todo tool.
@@ -204,3 +204,16 @@ class PlanningMiddleware(AgentMiddleware):
204
204
  else self.system_prompt
205
205
  )
206
206
  return handler(request)
207
+
208
+ async def awrap_model_call(
209
+ self,
210
+ request: ModelRequest,
211
+ handler: Callable[[ModelRequest], Awaitable[ModelResponse]],
212
+ ) -> ModelCallResult:
213
+ """Update the system prompt to include the todo system prompt (async version)."""
214
+ request.system_prompt = (
215
+ request.system_prompt + "\n\n" + self.system_prompt
216
+ if request.system_prompt
217
+ else self.system_prompt
218
+ )
219
+ return await handler(request)