langchain 1.0.0a9__tar.gz → 1.0.0a10__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain might be problematic. Click here for more details.

Files changed (89) hide show
  1. {langchain-1.0.0a9 → langchain-1.0.0a10}/PKG-INFO +2 -2
  2. {langchain-1.0.0a9 → langchain-1.0.0a10}/langchain/__init__.py +1 -1
  3. {langchain-1.0.0a9 → langchain-1.0.0a10}/langchain/agents/__init__.py +1 -1
  4. {langchain-1.0.0a9 → langchain-1.0.0a10}/langchain/agents/middleware/human_in_the_loop.py +6 -8
  5. {langchain-1.0.0a9 → langchain-1.0.0a10}/langchain/agents/middleware_agent.py +11 -6
  6. {langchain-1.0.0a9 → langchain-1.0.0a10}/langchain/agents/react_agent.py +16 -15
  7. {langchain-1.0.0a9 → langchain-1.0.0a10}/langchain/tools/__init__.py +9 -0
  8. {langchain-1.0.0a9/langchain/agents → langchain-1.0.0a10/langchain/tools}/tool_node.py +8 -10
  9. {langchain-1.0.0a9 → langchain-1.0.0a10}/pyproject.toml +18 -5
  10. {langchain-1.0.0a9 → langchain-1.0.0a10}/tests/unit_tests/agents/test_middleware_agent.py +26 -8
  11. {langchain-1.0.0a9 → langchain-1.0.0a10}/tests/unit_tests/agents/test_react_agent.py +3 -1
  12. {langchain-1.0.0a9 → langchain-1.0.0a10}/tests/unit_tests/agents/test_tool_node.py +2 -4
  13. {langchain-1.0.0a9 → langchain-1.0.0a10}/tests/unit_tests/chat_models/test_chat_models.py +4 -8
  14. {langchain-1.0.0a9 → langchain-1.0.0a10}/tests/unit_tests/tools/test_imports.py +3 -0
  15. {langchain-1.0.0a9 → langchain-1.0.0a10}/LICENSE +0 -0
  16. {langchain-1.0.0a9 → langchain-1.0.0a10}/README.md +0 -0
  17. {langchain-1.0.0a9 → langchain-1.0.0a10}/langchain/_internal/__init__.py +0 -0
  18. {langchain-1.0.0a9 → langchain-1.0.0a10}/langchain/_internal/_documents.py +0 -0
  19. {langchain-1.0.0a9 → langchain-1.0.0a10}/langchain/_internal/_lazy_import.py +0 -0
  20. {langchain-1.0.0a9 → langchain-1.0.0a10}/langchain/_internal/_prompts.py +0 -0
  21. {langchain-1.0.0a9 → langchain-1.0.0a10}/langchain/_internal/_typing.py +0 -0
  22. {langchain-1.0.0a9 → langchain-1.0.0a10}/langchain/_internal/_utils.py +0 -0
  23. {langchain-1.0.0a9 → langchain-1.0.0a10}/langchain/agents/_internal/__init__.py +0 -0
  24. {langchain-1.0.0a9 → langchain-1.0.0a10}/langchain/agents/_internal/_typing.py +0 -0
  25. {langchain-1.0.0a9 → langchain-1.0.0a10}/langchain/agents/middleware/__init__.py +0 -0
  26. {langchain-1.0.0a9 → langchain-1.0.0a10}/langchain/agents/middleware/prompt_caching.py +0 -0
  27. {langchain-1.0.0a9 → langchain-1.0.0a10}/langchain/agents/middleware/summarization.py +0 -0
  28. {langchain-1.0.0a9 → langchain-1.0.0a10}/langchain/agents/middleware/types.py +0 -0
  29. {langchain-1.0.0a9 → langchain-1.0.0a10}/langchain/agents/structured_output.py +0 -0
  30. {langchain-1.0.0a9 → langchain-1.0.0a10}/langchain/chat_models/__init__.py +0 -0
  31. {langchain-1.0.0a9 → langchain-1.0.0a10}/langchain/chat_models/base.py +0 -0
  32. {langchain-1.0.0a9 → langchain-1.0.0a10}/langchain/documents/__init__.py +0 -0
  33. {langchain-1.0.0a9 → langchain-1.0.0a10}/langchain/embeddings/__init__.py +0 -0
  34. {langchain-1.0.0a9 → langchain-1.0.0a10}/langchain/embeddings/base.py +0 -0
  35. {langchain-1.0.0a9 → langchain-1.0.0a10}/langchain/embeddings/cache.py +0 -0
  36. {langchain-1.0.0a9 → langchain-1.0.0a10}/langchain/globals.py +0 -0
  37. {langchain-1.0.0a9 → langchain-1.0.0a10}/langchain/py.typed +0 -0
  38. {langchain-1.0.0a9 → langchain-1.0.0a10}/langchain/storage/__init__.py +0 -0
  39. {langchain-1.0.0a9 → langchain-1.0.0a10}/langchain/storage/encoder_backed.py +0 -0
  40. {langchain-1.0.0a9 → langchain-1.0.0a10}/langchain/storage/exceptions.py +0 -0
  41. {langchain-1.0.0a9 → langchain-1.0.0a10}/langchain/storage/in_memory.py +0 -0
  42. {langchain-1.0.0a9 → langchain-1.0.0a10}/langchain/text_splitter.py +0 -0
  43. {langchain-1.0.0a9 → langchain-1.0.0a10}/tests/__init__.py +0 -0
  44. {langchain-1.0.0a9 → langchain-1.0.0a10}/tests/integration_tests/__init__.py +0 -0
  45. {langchain-1.0.0a9 → langchain-1.0.0a10}/tests/integration_tests/agents/__init__.py +0 -0
  46. {langchain-1.0.0a9 → langchain-1.0.0a10}/tests/integration_tests/agents/test_response_format.py +0 -0
  47. {langchain-1.0.0a9 → langchain-1.0.0a10}/tests/integration_tests/cache/__init__.py +0 -0
  48. {langchain-1.0.0a9 → langchain-1.0.0a10}/tests/integration_tests/cache/fake_embeddings.py +0 -0
  49. {langchain-1.0.0a9 → langchain-1.0.0a10}/tests/integration_tests/chat_models/__init__.py +0 -0
  50. {langchain-1.0.0a9 → langchain-1.0.0a10}/tests/integration_tests/chat_models/test_base.py +0 -0
  51. {langchain-1.0.0a9 → langchain-1.0.0a10}/tests/integration_tests/conftest.py +0 -0
  52. {langchain-1.0.0a9 → langchain-1.0.0a10}/tests/integration_tests/embeddings/__init__.py +0 -0
  53. {langchain-1.0.0a9 → langchain-1.0.0a10}/tests/integration_tests/embeddings/test_base.py +0 -0
  54. {langchain-1.0.0a9 → langchain-1.0.0a10}/tests/integration_tests/test_compile.py +0 -0
  55. {langchain-1.0.0a9 → langchain-1.0.0a10}/tests/unit_tests/__init__.py +0 -0
  56. {langchain-1.0.0a9 → langchain-1.0.0a10}/tests/unit_tests/agents/__init__.py +0 -0
  57. {langchain-1.0.0a9 → langchain-1.0.0a10}/tests/unit_tests/agents/__snapshots__/test_middleware_agent.ambr +0 -0
  58. {langchain-1.0.0a9 → langchain-1.0.0a10}/tests/unit_tests/agents/__snapshots__/test_react_agent_graph.ambr +0 -0
  59. {langchain-1.0.0a9 → langchain-1.0.0a10}/tests/unit_tests/agents/any_str.py +0 -0
  60. {langchain-1.0.0a9 → langchain-1.0.0a10}/tests/unit_tests/agents/compose-postgres.yml +0 -0
  61. {langchain-1.0.0a9 → langchain-1.0.0a10}/tests/unit_tests/agents/compose-redis.yml +0 -0
  62. {langchain-1.0.0a9 → langchain-1.0.0a10}/tests/unit_tests/agents/conftest.py +0 -0
  63. {langchain-1.0.0a9 → langchain-1.0.0a10}/tests/unit_tests/agents/conftest_checkpointer.py +0 -0
  64. {langchain-1.0.0a9 → langchain-1.0.0a10}/tests/unit_tests/agents/conftest_store.py +0 -0
  65. {langchain-1.0.0a9 → langchain-1.0.0a10}/tests/unit_tests/agents/memory_assert.py +0 -0
  66. {langchain-1.0.0a9 → langchain-1.0.0a10}/tests/unit_tests/agents/messages.py +0 -0
  67. {langchain-1.0.0a9 → langchain-1.0.0a10}/tests/unit_tests/agents/model.py +0 -0
  68. {langchain-1.0.0a9 → langchain-1.0.0a10}/tests/unit_tests/agents/specifications/responses.json +0 -0
  69. {langchain-1.0.0a9 → langchain-1.0.0a10}/tests/unit_tests/agents/specifications/return_direct.json +0 -0
  70. {langchain-1.0.0a9 → langchain-1.0.0a10}/tests/unit_tests/agents/test_middleware_decorators.py +0 -0
  71. {langchain-1.0.0a9 → langchain-1.0.0a10}/tests/unit_tests/agents/test_react_agent_graph.py +0 -0
  72. {langchain-1.0.0a9 → langchain-1.0.0a10}/tests/unit_tests/agents/test_response_format.py +0 -0
  73. {langchain-1.0.0a9 → langchain-1.0.0a10}/tests/unit_tests/agents/test_responses.py +0 -0
  74. {langchain-1.0.0a9 → langchain-1.0.0a10}/tests/unit_tests/agents/test_responses_spec.py +0 -0
  75. {langchain-1.0.0a9 → langchain-1.0.0a10}/tests/unit_tests/agents/test_return_direct_spec.py +0 -0
  76. {langchain-1.0.0a9 → langchain-1.0.0a10}/tests/unit_tests/agents/utils.py +0 -0
  77. {langchain-1.0.0a9 → langchain-1.0.0a10}/tests/unit_tests/chat_models/__init__.py +0 -0
  78. {langchain-1.0.0a9 → langchain-1.0.0a10}/tests/unit_tests/conftest.py +0 -0
  79. {langchain-1.0.0a9 → langchain-1.0.0a10}/tests/unit_tests/embeddings/__init__.py +0 -0
  80. {langchain-1.0.0a9 → langchain-1.0.0a10}/tests/unit_tests/embeddings/test_base.py +0 -0
  81. {langchain-1.0.0a9 → langchain-1.0.0a10}/tests/unit_tests/embeddings/test_caching.py +0 -0
  82. {langchain-1.0.0a9 → langchain-1.0.0a10}/tests/unit_tests/embeddings/test_imports.py +0 -0
  83. {langchain-1.0.0a9 → langchain-1.0.0a10}/tests/unit_tests/storage/__init__.py +0 -0
  84. {langchain-1.0.0a9 → langchain-1.0.0a10}/tests/unit_tests/storage/test_imports.py +0 -0
  85. {langchain-1.0.0a9 → langchain-1.0.0a10}/tests/unit_tests/stubs.py +0 -0
  86. {langchain-1.0.0a9 → langchain-1.0.0a10}/tests/unit_tests/test_dependencies.py +0 -0
  87. {langchain-1.0.0a9 → langchain-1.0.0a10}/tests/unit_tests/test_imports.py +0 -0
  88. {langchain-1.0.0a9 → langchain-1.0.0a10}/tests/unit_tests/test_pytest_config.py +0 -0
  89. {langchain-1.0.0a9 → langchain-1.0.0a10}/tests/unit_tests/tools/__init__.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: langchain
3
- Version: 1.0.0a9
3
+ Version: 1.0.0a10
4
4
  Summary: Building applications with LLMs through composability
5
5
  License: MIT
6
6
  Project-URL: Source Code, https://github.com/langchain-ai/langchain/tree/master/libs/langchain
@@ -9,7 +9,7 @@ Project-URL: repository, https://github.com/langchain-ai/langchain
9
9
  Requires-Python: <4.0.0,>=3.10.0
10
10
  Requires-Dist: langchain-core<2.0.0,>=0.3.75
11
11
  Requires-Dist: langchain-text-splitters<2.0.0,>=0.3.11
12
- Requires-Dist: langgraph<2.0.0,>=0.6.7
12
+ Requires-Dist: langgraph==1.0.0a4
13
13
  Requires-Dist: pydantic<3.0.0,>=2.7.4
14
14
  Provides-Extra: community
15
15
  Requires-Dist: langchain-community; extra == "community"
@@ -2,7 +2,7 @@
2
2
 
3
3
  from typing import Any
4
4
 
5
- __version__ = "1.0.0a8"
5
+ __version__ = "1.0.0a10"
6
6
 
7
7
 
8
8
  def __getattr__(name: str) -> Any: # noqa: ANN401
@@ -1,7 +1,7 @@
1
1
  """langgraph.prebuilt exposes a higher-level API for creating and executing agents and tools."""
2
2
 
3
3
  from langchain.agents.react_agent import AgentState, create_agent
4
- from langchain.agents.tool_node import ToolNode
4
+ from langchain.tools import ToolNode
5
5
 
6
6
  __all__ = [
7
7
  "AgentState",
@@ -169,7 +169,7 @@ class HumanInTheLoopMiddleware(AgentMiddleware):
169
169
  return None
170
170
 
171
171
  # Process all tool calls that require interrupts
172
- approved_tool_calls: list[ToolCall] = auto_approved_tool_calls.copy()
172
+ revised_tool_calls: list[ToolCall] = auto_approved_tool_calls.copy()
173
173
  artificial_tool_messages: list[ToolMessage] = []
174
174
 
175
175
  # Create interrupt requests for all tools that need approval
@@ -210,10 +210,10 @@ class HumanInTheLoopMiddleware(AgentMiddleware):
210
210
  config = self.interrupt_on[tool_call["name"]]
211
211
 
212
212
  if response["type"] == "accept" and config.get("allow_accept"):
213
- approved_tool_calls.append(tool_call)
213
+ revised_tool_calls.append(tool_call)
214
214
  elif response["type"] == "edit" and config.get("allow_edit"):
215
215
  edited_action = response["args"]
216
- approved_tool_calls.append(
216
+ revised_tool_calls.append(
217
217
  ToolCall(
218
218
  type="tool_call",
219
219
  name=edited_action["action"],
@@ -233,6 +233,7 @@ class HumanInTheLoopMiddleware(AgentMiddleware):
233
233
  tool_call_id=tool_call["id"],
234
234
  status="error",
235
235
  )
236
+ revised_tool_calls.append(tool_call)
236
237
  artificial_tool_messages.append(tool_message)
237
238
  else:
238
239
  allowed_actions = [
@@ -249,9 +250,6 @@ class HumanInTheLoopMiddleware(AgentMiddleware):
249
250
  raise ValueError(msg)
250
251
 
251
252
  # Update the AI message to only include approved tool calls
252
- last_ai_msg.tool_calls = approved_tool_calls
253
+ last_ai_msg.tool_calls = revised_tool_calls
253
254
 
254
- if len(approved_tool_calls) > 0:
255
- return {"messages": [last_ai_msg, *artificial_tool_messages]}
256
-
257
- return {"jump_to": "model", "messages": artificial_tool_messages}
255
+ return {"messages": [last_ai_msg, *artificial_tool_messages]}
@@ -33,8 +33,8 @@ from langchain.agents.structured_output import (
33
33
  StructuredOutputValidationError,
34
34
  ToolStrategy,
35
35
  )
36
- from langchain.agents.tool_node import ToolNode
37
36
  from langchain.chat_models import init_chat_model
37
+ from langchain.tools import ToolNode
38
38
 
39
39
  STRUCTURED_OUTPUT_ERROR_TEMPLATE = "Error: {error}\n Please fix your mistakes."
40
40
 
@@ -532,29 +532,34 @@ def _make_model_to_tools_edge(
532
532
  first_node: str, structured_output_tools: dict[str, OutputToolBinding], tool_node: ToolNode
533
533
  ) -> Callable[[dict[str, Any]], str | list[Send] | None]:
534
534
  def model_to_tools(state: dict[str, Any]) -> str | list[Send] | None:
535
+ # 1. if there's an explicit jump_to in the state, use it
535
536
  if jump_to := state.get("jump_to"):
536
537
  return _resolve_jump(jump_to, first_node)
537
538
 
538
539
  last_ai_message, tool_messages = _fetch_last_ai_and_tool_messages(state["messages"])
539
540
  tool_message_ids = [m.tool_call_id for m in tool_messages]
540
541
 
542
+ # 2. if the model hasn't called any tools, jump to END
543
+ # this is the classic exit condition for an agent loop
544
+ if len(last_ai_message.tool_calls) == 0:
545
+ return END
546
+
541
547
  pending_tool_calls = [
542
548
  c
543
549
  for c in last_ai_message.tool_calls
544
550
  if c["id"] not in tool_message_ids and c["name"] not in structured_output_tools
545
551
  ]
546
552
 
553
+ # 3. if there are pending tool calls, jump to the tool node
547
554
  if pending_tool_calls:
548
- # imo we should not be injecting state, store here,
549
- # this should be done by the tool node itself ideally but this is a consequence
550
- # of using Send w/ tool calls directly which allows more intuitive interrupt behavior
551
- # largely internal so can be fixed later
552
555
  pending_tool_calls = [
553
556
  tool_node.inject_tool_args(call, state, None) for call in pending_tool_calls
554
557
  ]
555
558
  return [Send("tools", [tool_call]) for tool_call in pending_tool_calls]
556
559
 
557
- return END
560
+ # 4. AIMessage has tool calls, but there are no pending tool calls
561
+ # which suggests the injection of artificial tool messages. jump to the first node
562
+ return first_node
558
563
 
559
564
  return model_to_tools
560
565
 
@@ -54,8 +54,8 @@ from langchain.agents.structured_output import (
54
54
  StructuredOutputValidationError,
55
55
  ToolStrategy,
56
56
  )
57
- from langchain.agents.tool_node import ToolNode
58
57
  from langchain.chat_models import init_chat_model
58
+ from langchain.tools import ToolNode
59
59
 
60
60
  if TYPE_CHECKING:
61
61
  from langchain_core.tools import BaseTool
@@ -971,7 +971,7 @@ def create_agent( # noqa: D417
971
971
  return model.bind_tools(tools)
972
972
  ```
973
973
 
974
- !!! note "Dynamic Model Requirements"
974
+ .. note::
975
975
  Ensure returned models have appropriate tools bound via
976
976
  `.bind_tools()` and support required functionality. Bound tools
977
977
  must be a subset of those specified in the `tools` parameter.
@@ -985,10 +985,10 @@ def create_agent( # noqa: D417
985
985
  of the list of messages in state["messages"].
986
986
  - SystemMessage: this is added to the beginning of the list of messages
987
987
  in state["messages"].
988
- - Callable: This function should take in full graph state and the output is then passed
989
- to the language model.
990
- - Runnable: This runnable should take in full graph state and the output is then passed
991
- to the language model.
988
+ - Callable: This function should take in full graph state and the output is
989
+ then passed to the language model.
990
+ - Runnable: This runnable should take in full graph state and the output is
991
+ then passed to the language model.
992
992
 
993
993
  response_format: An optional UsingToolStrategy configuration for structured responses.
994
994
 
@@ -1002,7 +1002,8 @@ def create_agent( # noqa: D417
1002
1002
 
1003
1003
  - schemas: A sequence of ResponseSchema objects that define
1004
1004
  the structured output format
1005
- - tool_choice: Either "required" or "auto" to control when structured output is used
1005
+ - tool_choice: Either "required" or "auto" to control when structured
1006
+ output is used
1006
1007
 
1007
1008
  Each ResponseSchema contains:
1008
1009
 
@@ -1011,12 +1012,12 @@ def create_agent( # noqa: D417
1011
1012
  - description: Optional custom description (defaults to model docstring)
1012
1013
  - strict: Whether to enforce strict validation
1013
1014
 
1014
- !!! Important
1015
+ .. important::
1015
1016
  `response_format` requires the model to support tool calling
1016
1017
 
1017
- !!! Note
1018
- Structured responses are handled directly in the model call node via tool calls,
1019
- eliminating the need for separate structured response nodes.
1018
+ .. note::
1019
+ Structured responses are handled directly in the model call node via
1020
+ tool calls, eliminating the need for separate structured response nodes.
1020
1021
 
1021
1022
  pre_model_hook: An optional node to add before the `agent` node
1022
1023
  (i.e., the node that calls the LLM).
@@ -1037,12 +1038,12 @@ def create_agent( # noqa: D417
1037
1038
  }
1038
1039
  ```
1039
1040
 
1040
- !!! Important
1041
+ .. important::
1041
1042
  At least one of `messages` or `llm_input_messages` MUST be provided
1042
1043
  and will be used as an input to the `agent` node.
1043
1044
  The rest of the keys will be added to the graph state.
1044
1045
 
1045
- !!! Warning
1046
+ .. warning::
1046
1047
  If you are returning `messages` in the pre-model hook,
1047
1048
  you should OVERWRITE the `messages` key by doing the following:
1048
1049
 
@@ -1059,7 +1060,7 @@ def create_agent( # noqa: D417
1059
1060
  Post-model hook must be a callable or a runnable that takes in
1060
1061
  current graph state and returns a state update.
1061
1062
 
1062
- !!! Note
1063
+ .. note::
1063
1064
  Only available with `version="v2"`.
1064
1065
  state_schema: An optional state schema that defines graph state.
1065
1066
  Must have `messages` and `remaining_steps` keys.
@@ -1092,7 +1093,7 @@ def create_agent( # noqa: D417
1092
1093
  another graph as a subgraph node -
1093
1094
  particularly useful for building multi-agent systems.
1094
1095
 
1095
- !!! warning "`config_schema` Deprecated"
1096
+ .. warning::
1096
1097
  The `config_schema` parameter is deprecated in v0.6.0 and support will be removed in v2.0.0.
1097
1098
  Please use `context_schema` instead to specify the schema for run-scoped context.
1098
1099
 
@@ -8,10 +8,19 @@ from langchain_core.tools import (
8
8
  tool,
9
9
  )
10
10
 
11
+ from langchain.tools.tool_node import (
12
+ InjectedState,
13
+ InjectedStore,
14
+ ToolNode,
15
+ )
16
+
11
17
  __all__ = [
12
18
  "BaseTool",
19
+ "InjectedState",
20
+ "InjectedStore",
13
21
  "InjectedToolArg",
14
22
  "InjectedToolCallId",
15
23
  "ToolException",
24
+ "ToolNode",
16
25
  "tool",
17
26
  ]
@@ -21,7 +21,7 @@ Key Components:
21
21
  Typical Usage:
22
22
  ```python
23
23
  from langchain_core.tools import tool
24
- from langchain.agents import ToolNode
24
+ from langchain.tools import ToolNode
25
25
 
26
26
 
27
27
  @tool
@@ -344,7 +344,7 @@ class ToolNode(RunnableCallable):
344
344
  Basic usage:
345
345
 
346
346
  ```python
347
- from langchain.agents import ToolNode
347
+ from langchain.tools import ToolNode
348
348
  from langchain_core.tools import tool
349
349
 
350
350
  @tool
@@ -359,7 +359,7 @@ class ToolNode(RunnableCallable):
359
359
 
360
360
  ```python
361
361
  from typing_extensions import Annotated
362
- from langgraph.agents.tool_node import InjectedState
362
+ from langchain.tools import InjectedState
363
363
 
364
364
  @tool
365
365
  def context_tool(query: str, state: Annotated[dict, InjectedState]) -> str:
@@ -885,7 +885,8 @@ def tools_condition(
885
885
 
886
886
  ```python
887
887
  from langgraph.graph import StateGraph
888
- from langgraph.agents.tool_node import ToolNode, tools_condition
888
+ from langchain.tools import ToolNode
889
+ from langchain.tools.tool_node import tools_condition
889
890
  from typing_extensions import TypedDict
890
891
 
891
892
 
@@ -950,9 +951,7 @@ class InjectedState(InjectedToolArg):
950
951
  from typing_extensions import Annotated, TypedDict
951
952
 
952
953
  from langchain_core.messages import BaseMessage, AIMessage
953
- from langchain_core.tools import tool
954
-
955
- from langgraph.agents.tool_node import InjectedState, ToolNode
954
+ from langchain.tools import InjectedState, ToolNode, tool
956
955
 
957
956
 
958
957
  class AgentState(TypedDict):
@@ -1020,15 +1019,14 @@ class InjectedStore(InjectedToolArg):
1020
1019
  for maintaining context, user preferences, or any other data that needs to
1021
1020
  persist beyond individual workflow executions.
1022
1021
 
1023
- !!! Warning
1022
+ .. warning::
1024
1023
  `InjectedStore` annotation requires `langchain-core >= 0.3.8`
1025
1024
 
1026
1025
  Example:
1027
1026
  ```python
1028
1027
  from typing_extensions import Annotated
1029
- from langchain_core.tools import tool
1030
1028
  from langgraph.store.memory import InMemoryStore
1031
- from langgraph.agents.tool_node import InjectedStore, ToolNode
1029
+ from langchain.tools import InjectedStore, ToolNode, tool
1032
1030
 
1033
1031
  @tool
1034
1032
  def save_preference(
@@ -10,11 +10,11 @@ requires-python = ">=3.10.0,<4.0.0"
10
10
  dependencies = [
11
11
  "langchain-core>=0.3.75,<2.0.0",
12
12
  "langchain-text-splitters>=0.3.11,<2.0.0",
13
- "langgraph>=0.6.7,<2.0.0",
13
+ "langgraph==1.0.0a4",
14
14
  "pydantic>=2.7.4,<3.0.0",
15
15
  ]
16
16
  name = "langchain"
17
- version = "1.0.0a9"
17
+ version = "1.0.0a10"
18
18
  description = "Building applications with LLMs through composability"
19
19
  readme = "README.md"
20
20
 
@@ -111,6 +111,9 @@ test_integration = [
111
111
  "langchain-text-splitters",
112
112
  ]
113
113
 
114
+ [tool.uv]
115
+ prerelease = "allow"
116
+
114
117
  [tool.uv.sources.langchain-core]
115
118
  path = "../core"
116
119
  editable = true
@@ -153,15 +156,16 @@ unfixable = [
153
156
  "B028",
154
157
  ]
155
158
 
156
- [tool.ruff.lint.pydocstyle]
157
- convention = "google"
158
-
159
159
  [tool.ruff.lint.pyupgrade]
160
160
  keep-runtime-typing = true
161
161
 
162
162
  [tool.ruff.lint.flake8-annotations]
163
163
  allow-star-arg-any = true
164
164
 
165
+ [tool.ruff.lint.pydocstyle]
166
+ convention = "google"
167
+ ignore-var-parameters = true
168
+
165
169
  [tool.ruff.lint.per-file-ignores]
166
170
  "tests/*" = [
167
171
  "D1",
@@ -215,6 +219,15 @@ allow-star-arg-any = true
215
219
  "BLE001",
216
220
  "N801",
217
221
  ]
222
+ "langchain/tools/tool_node.py" = [
223
+ "ANN401",
224
+ "A002",
225
+ "A001",
226
+ "B904",
227
+ "PLR2004",
228
+ "C901",
229
+ "TRY004",
230
+ ]
218
231
 
219
232
  [tool.mypy]
220
233
  strict = true
@@ -21,7 +21,7 @@ from langchain_core.messages import (
21
21
  from langchain_core.tools import tool, InjectedToolCallId
22
22
 
23
23
  from langchain.agents.middleware_agent import create_agent
24
- from langchain.agents.tool_node import InjectedState
24
+ from langchain.tools import InjectedState
25
25
  from langchain.agents.middleware.human_in_the_loop import (
26
26
  HumanInTheLoopMiddleware,
27
27
  ActionRequest,
@@ -510,11 +510,12 @@ def test_human_in_the_loop_middleware_single_tool_response() -> None:
510
510
  result = middleware.after_model(state)
511
511
  assert result is not None
512
512
  assert "messages" in result
513
- assert len(result["messages"]) == 1 # Only tool message when no approved tool calls
514
- assert isinstance(result["messages"][0], ToolMessage)
515
- assert result["messages"][0].content == "Custom response message"
516
- assert result["messages"][0].name == "test_tool"
517
- assert result["messages"][0].tool_call_id == "1"
513
+ assert len(result["messages"]) == 2
514
+ assert isinstance(result["messages"][0], AIMessage)
515
+ assert isinstance(result["messages"][1], ToolMessage)
516
+ assert result["messages"][1].content == "Custom response message"
517
+ assert result["messages"][1].name == "test_tool"
518
+ assert result["messages"][1].tool_call_id == "1"
518
519
 
519
520
 
520
521
  def test_human_in_the_loop_middleware_multiple_tools_mixed_responses() -> None:
@@ -552,10 +553,11 @@ def test_human_in_the_loop_middleware_multiple_tools_mixed_responses() -> None:
552
553
  len(result["messages"]) == 2
553
554
  ) # AI message with accepted tool call + tool message for rejected
554
555
 
555
- # First message should be the AI message with updated tool calls
556
+ # First message should be the AI message with both tool calls
556
557
  updated_ai_message = result["messages"][0]
557
- assert len(updated_ai_message.tool_calls) == 1 # Only accepted tool call
558
+ assert len(updated_ai_message.tool_calls) == 2 # Both tool calls remain
558
559
  assert updated_ai_message.tool_calls[0]["name"] == "get_forecast" # Accepted
560
+ assert updated_ai_message.tool_calls[1]["name"] == "get_temperature" # Got response
559
561
 
560
562
  # Second message should be the tool message for the rejected tool call
561
563
  tool_message = result["messages"][1]
@@ -1405,3 +1407,19 @@ def test_injected_state_in_middleware_agent() -> None:
1405
1407
  assert tool_message.name == "test_state"
1406
1408
  assert "success" in tool_message.content
1407
1409
  assert tool_message.tool_call_id == "test_call_1"
1410
+
1411
+
1412
+ def test_jump_to_is_ephemeral() -> None:
1413
+ class MyMiddleware(AgentMiddleware):
1414
+ def before_model(self, state: AgentState) -> dict[str, Any]:
1415
+ assert "jump_to" not in state
1416
+ return {"jump_to": "model"}
1417
+
1418
+ def after_model(self, state: AgentState) -> dict[str, Any]:
1419
+ assert "jump_to" not in state
1420
+ return {"jump_to": "model"}
1421
+
1422
+ agent = create_agent(model=FakeToolCallingModel(), middleware=[MyMiddleware()])
1423
+ agent = agent.compile()
1424
+ result = agent.invoke({"messages": [HumanMessage("Hello")]})
1425
+ assert "jump_to" not in result
@@ -36,9 +36,11 @@ from langchain.agents import (
36
36
  create_agent,
37
37
  )
38
38
  from langchain.agents.react_agent import _validate_chat_history
39
- from langchain.agents.tool_node import (
39
+ from langchain.tools import (
40
40
  InjectedState,
41
41
  InjectedStore,
42
+ )
43
+ from langchain.tools.tool_node import (
42
44
  _get_state_args,
43
45
  _infer_handled_types,
44
46
  )
@@ -35,13 +35,11 @@ from typing_extensions import TypedDict
35
35
  from langchain.agents import (
36
36
  ToolNode,
37
37
  )
38
- from langchain.agents.tool_node import (
39
- TOOL_CALL_ERROR_TEMPLATE,
38
+ from langchain.tools import (
40
39
  InjectedState,
41
40
  InjectedStore,
42
- ToolInvocationError,
43
- tools_condition,
44
41
  )
42
+ from langchain.tools.tool_node import TOOL_CALL_ERROR_TEMPLATE, ToolInvocationError, tools_condition
45
43
 
46
44
  from .messages import _AnyIdHumanMessage, _AnyIdToolMessage
47
45
  from .model import FakeToolCallingModel
@@ -79,7 +79,7 @@ def test_configurable() -> None:
79
79
 
80
80
  Example:
81
81
 
82
- .. python::
82
+ .. code-block:: python
83
83
 
84
84
  # This creates a configurable model without specifying which model
85
85
  model = init_chat_model()
@@ -88,10 +88,7 @@ def test_configurable() -> None:
88
88
  model.get_num_tokens("hello") # AttributeError!
89
89
 
90
90
  # This works - provides model at runtime
91
- response = model.invoke(
92
- "Hello",
93
- config={"configurable": {"model": "gpt-4o"}}
94
- )
91
+ response = model.invoke("Hello", config={"configurable": {"model": "gpt-4o"}})
95
92
 
96
93
  """
97
94
  model = init_chat_model()
@@ -208,7 +205,7 @@ def test_configurable_with_default() -> None:
208
205
 
209
206
  Example:
210
207
 
211
- .. python::
208
+ .. code-block:: python
212
209
 
213
210
  # This creates a configurable model with default parameters (model)
214
211
  model = init_chat_model("gpt-4o", configurable_fields="any", config_prefix="bar")
@@ -218,8 +215,7 @@ def test_configurable_with_default() -> None:
218
215
 
219
216
  # This also works - switches to Claude at runtime
220
217
  response = model.invoke(
221
- "Hello",
222
- config={"configurable": {"my_model_model": "claude-3-sonnet-20240229"}}
218
+ "Hello", config={"configurable": {"my_model_model": "claude-3-sonnet-20240229"}}
223
219
  )
224
220
 
225
221
  """
@@ -2,9 +2,12 @@ from langchain import tools
2
2
 
3
3
  EXPECTED_ALL = {
4
4
  "BaseTool",
5
+ "InjectedState",
6
+ "InjectedStore",
5
7
  "InjectedToolArg",
6
8
  "InjectedToolCallId",
7
9
  "ToolException",
10
+ "ToolNode",
8
11
  "tool",
9
12
  }
10
13
 
File without changes
File without changes