langchain 1.0.1__py3-none-any.whl → 1.0.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
langchain/__init__.py CHANGED
@@ -1,3 +1,3 @@
1
1
  """Main entrypoint into LangChain."""
2
2
 
3
- __version__ = "1.0.1"
3
+ __version__ = "1.0.5"
@@ -3,7 +3,15 @@
3
3
  from __future__ import annotations
4
4
 
5
5
  import itertools
6
- from typing import TYPE_CHECKING, Annotated, Any, cast, get_args, get_origin, get_type_hints
6
+ from typing import (
7
+ TYPE_CHECKING,
8
+ Annotated,
9
+ Any,
10
+ cast,
11
+ get_args,
12
+ get_origin,
13
+ get_type_hints,
14
+ )
7
15
 
8
16
  from langchain_core.language_models.chat_models import BaseChatModel
9
17
  from langchain_core.messages import AIMessage, AnyMessage, SystemMessage, ToolMessage
@@ -11,10 +19,11 @@ from langchain_core.tools import BaseTool
11
19
  from langgraph._internal._runnable import RunnableCallable
12
20
  from langgraph.constants import END, START
13
21
  from langgraph.graph.state import StateGraph
22
+ from langgraph.prebuilt.tool_node import ToolCallWithContext, ToolNode
14
23
  from langgraph.runtime import Runtime # noqa: TC002
15
24
  from langgraph.types import Command, Send
16
25
  from langgraph.typing import ContextT # noqa: TC002
17
- from typing_extensions import NotRequired, Required, TypedDict, TypeVar
26
+ from typing_extensions import NotRequired, Required, TypedDict
18
27
 
19
28
  from langchain.agents.middleware.types import (
20
29
  AgentMiddleware,
@@ -23,6 +32,8 @@ from langchain.agents.middleware.types import (
23
32
  ModelRequest,
24
33
  ModelResponse,
25
34
  OmitFromSchema,
35
+ ResponseT,
36
+ StateT_co,
26
37
  _InputAgentState,
27
38
  _OutputAgentState,
28
39
  )
@@ -33,11 +44,11 @@ from langchain.agents.structured_output import (
33
44
  ProviderStrategy,
34
45
  ProviderStrategyBinding,
35
46
  ResponseFormat,
47
+ StructuredOutputError,
36
48
  StructuredOutputValidationError,
37
49
  ToolStrategy,
38
50
  )
39
51
  from langchain.chat_models import init_chat_model
40
- from langchain.tools.tool_node import ToolCallWithContext, _ToolNode
41
52
 
42
53
  if TYPE_CHECKING:
43
54
  from collections.abc import Awaitable, Callable, Sequence
@@ -48,12 +59,10 @@ if TYPE_CHECKING:
48
59
  from langgraph.store.base import BaseStore
49
60
  from langgraph.types import Checkpointer
50
61
 
51
- from langchain.tools.tool_node import ToolCallRequest, ToolCallWrapper
62
+ from langchain.agents.middleware.types import ToolCallRequest, ToolCallWrapper
52
63
 
53
64
  STRUCTURED_OUTPUT_ERROR_TEMPLATE = "Error: {error}\n Please fix your mistakes."
54
65
 
55
- ResponseT = TypeVar("ResponseT")
56
-
57
66
 
58
67
  def _normalize_to_model_response(result: ModelResponse | AIMessage) -> ModelResponse:
59
68
  """Normalize middleware return value to ModelResponse."""
@@ -508,7 +517,7 @@ def create_agent( # noqa: PLR0915
508
517
  tools: Sequence[BaseTool | Callable | dict[str, Any]] | None = None,
509
518
  *,
510
519
  system_prompt: str | None = None,
511
- middleware: Sequence[AgentMiddleware[AgentState[ResponseT], ContextT]] = (),
520
+ middleware: Sequence[AgentMiddleware[StateT_co, ContextT]] = (),
512
521
  response_format: ResponseFormat[ResponseT] | type[ResponseT] | None = None,
513
522
  state_schema: type[AgentState[ResponseT]] | None = None,
514
523
  context_schema: type[ContextT] | None = None,
@@ -529,44 +538,65 @@ def create_agent( # noqa: PLR0915
529
538
 
530
539
  Args:
531
540
  model: The language model for the agent. Can be a string identifier
532
- (e.g., `"openai:gpt-4"`) or a chat model instance (e.g., `ChatOpenAI()`).
541
+ (e.g., `"openai:gpt-4"`) or a direct chat model instance (e.g.,
542
+ [`ChatOpenAI`][langchain_openai.ChatOpenAI] or other another
543
+ [chat model](https://docs.langchain.com/oss/python/integrations/chat)).
544
+
533
545
  For a full list of supported model strings, see
534
546
  [`init_chat_model`][langchain.chat_models.init_chat_model(model_provider)].
535
- tools: A list of tools, `dicts`, or `Callable`. If `None` or an empty list,
536
- the agent will consist of a model node without a tool calling loop.
537
- system_prompt: An optional system prompt for the LLM. Prompts are converted to a
538
- `SystemMessage` and added to the beginning of the message list.
547
+ tools: A list of tools, `dicts`, or `Callable`.
548
+
549
+ If `None` or an empty list, the agent will consist of a model node without a
550
+ tool calling loop.
551
+ system_prompt: An optional system prompt for the LLM.
552
+
553
+ Prompts are converted to a
554
+ [`SystemMessage`][langchain.messages.SystemMessage] and added to the
555
+ beginning of the message list.
539
556
  middleware: A sequence of middleware instances to apply to the agent.
540
- Middleware can intercept and modify agent behavior at various stages.
557
+
558
+ Middleware can intercept and modify agent behavior at various stages. See
559
+ the [full guide](https://docs.langchain.com/oss/python/langchain/middleware).
541
560
  response_format: An optional configuration for structured responses.
561
+
542
562
  Can be a `ToolStrategy`, `ProviderStrategy`, or a Pydantic model class.
563
+
543
564
  If provided, the agent will handle structured output during the
544
565
  conversation flow. Raw schemas will be wrapped in an appropriate strategy
545
566
  based on model capabilities.
546
567
  state_schema: An optional `TypedDict` schema that extends `AgentState`.
568
+
547
569
  When provided, this schema is used instead of `AgentState` as the base
548
570
  schema for merging with middleware state schemas. This allows users to
549
571
  add custom state fields without needing to create custom middleware.
550
- Generally, it's recommended to use state_schema extensions via middleware
572
+ Generally, it's recommended to use `state_schema` extensions via middleware
551
573
  to keep relevant extensions scoped to corresponding hooks / tools.
574
+
552
575
  The schema must be a subclass of `AgentState[ResponseT]`.
553
576
  context_schema: An optional schema for runtime context.
554
- checkpointer: An optional checkpoint saver object. This is used for persisting
555
- the state of the graph (e.g., as chat memory) for a single thread
556
- (e.g., a single conversation).
557
- store: An optional store object. This is used for persisting data
558
- across multiple threads (e.g., multiple conversations / users).
577
+ checkpointer: An optional checkpoint saver object.
578
+
579
+ Used for persisting the state of the graph (e.g., as chat memory) for a
580
+ single thread (e.g., a single conversation).
581
+ store: An optional store object.
582
+
583
+ Used for persisting data across multiple threads (e.g., multiple
584
+ conversations / users).
559
585
  interrupt_before: An optional list of node names to interrupt before.
586
+
560
587
  Useful if you want to add a user confirmation or other interrupt
561
588
  before taking an action.
562
589
  interrupt_after: An optional list of node names to interrupt after.
590
+
563
591
  Useful if you want to return directly or run additional processing
564
592
  on an output.
565
- debug: Whether to enable verbose logging for graph execution. When enabled,
566
- prints detailed information about each node execution, state updates,
567
- and transitions during agent runtime. Useful for debugging middleware
568
- behavior and understanding agent execution flow.
593
+ debug: Whether to enable verbose logging for graph execution.
594
+
595
+ When enabled, prints detailed information about each node execution, state
596
+ updates, and transitions during agent runtime. Useful for debugging
597
+ middleware behavior and understanding agent execution flow.
569
598
  name: An optional name for the `CompiledStateGraph`.
599
+
570
600
  This name will be automatically used when adding the agent graph to
571
601
  another graph as a subgraph node - particularly useful for building
572
602
  multi-agent systems.
@@ -576,11 +606,12 @@ def create_agent( # noqa: PLR0915
576
606
  A compiled `StateGraph` that can be used for chat interactions.
577
607
 
578
608
  The agent node calls the language model with the messages list (after applying
579
- the system prompt). If the resulting `AIMessage` contains `tool_calls`, the graph
580
- will then call the tools. The tools node executes the tools and adds the responses
581
- to the messages list as `ToolMessage` objects. The agent node then calls the
582
- language model again. The process repeats until no more `tool_calls` are
583
- present in the response. The agent then returns the full list of messages.
609
+ the system prompt). If the resulting [`AIMessage`][langchain.messages.AIMessage]
610
+ contains `tool_calls`, the graph will then call the tools. The tools node executes
611
+ the tools and adds the responses to the messages list as
612
+ [`ToolMessage`][langchain.messages.ToolMessage] objects. The agent node then calls
613
+ the language model again. The process repeats until no more `tool_calls` are present
614
+ in the response. The agent then returns the full list of messages.
584
615
 
585
616
  Example:
586
617
  ```python
@@ -593,7 +624,7 @@ def create_agent( # noqa: PLR0915
593
624
 
594
625
 
595
626
  graph = create_agent(
596
- model="anthropic:claude-sonnet-4-5",
627
+ model="anthropic:claude-sonnet-4-5-20250929",
597
628
  tools=[check_weather],
598
629
  system_prompt="You are a helpful assistant",
599
630
  )
@@ -675,7 +706,7 @@ def create_agent( # noqa: PLR0915
675
706
  awrap_tool_call_wrapper = _chain_async_tool_call_wrappers(async_wrappers)
676
707
 
677
708
  # Setup tools
678
- tool_node: _ToolNode | None = None
709
+ tool_node: ToolNode | None = None
679
710
  # Extract built-in provider tools (dict format) and regular tools (BaseTool/callables)
680
711
  built_in_tools = [t for t in tools if isinstance(t, dict)]
681
712
  regular_tools = [t for t in tools if not isinstance(t, dict)]
@@ -685,7 +716,7 @@ def create_agent( # noqa: PLR0915
685
716
 
686
717
  # Only create ToolNode if we have client-side tools
687
718
  tool_node = (
688
- _ToolNode(
719
+ ToolNode(
689
720
  tools=available_tools,
690
721
  wrap_tool_call=wrap_tool_call_wrapper,
691
722
  awrap_tool_call=awrap_tool_call_wrapper,
@@ -762,7 +793,7 @@ def create_agent( # noqa: PLR0915
762
793
  async_handlers = [m.awrap_model_call for m in middleware_w_awrap_model_call]
763
794
  awrap_model_call_handler = _chain_async_model_call_handlers(async_handlers)
764
795
 
765
- state_schemas = {m.state_schema for m in middleware}
796
+ state_schemas: set[type] = {m.state_schema for m in middleware}
766
797
  # Use provided state_schema if available, otherwise use base AgentState
767
798
  base_state = state_schema if state_schema is not None else AgentState
768
799
  state_schemas.add(base_state)
@@ -797,8 +828,16 @@ def create_agent( # noqa: PLR0915
797
828
  provider_strategy_binding = ProviderStrategyBinding.from_schema_spec(
798
829
  effective_response_format.schema_spec
799
830
  )
800
- structured_response = provider_strategy_binding.parse(output)
801
- return {"messages": [output], "structured_response": structured_response}
831
+ try:
832
+ structured_response = provider_strategy_binding.parse(output)
833
+ except Exception as exc: # noqa: BLE001
834
+ schema_name = getattr(
835
+ effective_response_format.schema_spec.schema, "__name__", "response_format"
836
+ )
837
+ validation_error = StructuredOutputValidationError(schema_name, exc, output)
838
+ raise validation_error
839
+ else:
840
+ return {"messages": [output], "structured_response": structured_response}
802
841
  return {"messages": [output]}
803
842
 
804
843
  # Handle structured output with tool strategy
@@ -812,11 +851,11 @@ def create_agent( # noqa: PLR0915
812
851
  ]
813
852
 
814
853
  if structured_tool_calls:
815
- exception: Exception | None = None
854
+ exception: StructuredOutputError | None = None
816
855
  if len(structured_tool_calls) > 1:
817
856
  # Handle multiple structured outputs error
818
857
  tool_names = [tc["name"] for tc in structured_tool_calls]
819
- exception = MultipleStructuredOutputsError(tool_names)
858
+ exception = MultipleStructuredOutputsError(tool_names, output)
820
859
  should_retry, error_message = _handle_structured_output_error(
821
860
  exception, effective_response_format
822
861
  )
@@ -858,7 +897,7 @@ def create_agent( # noqa: PLR0915
858
897
  "structured_response": structured_response,
859
898
  }
860
899
  except Exception as exc: # noqa: BLE001
861
- exception = StructuredOutputValidationError(tool_call["name"], exc)
900
+ exception = StructuredOutputValidationError(tool_call["name"], exc, output)
862
901
  should_retry, error_message = _handle_structured_output_error(
863
902
  exception, effective_response_format
864
903
  )
@@ -1229,11 +1268,14 @@ def create_agent( # noqa: PLR0915
1229
1268
 
1230
1269
  graph.add_conditional_edges(
1231
1270
  "tools",
1232
- _make_tools_to_model_edge(
1233
- tool_node=tool_node,
1234
- model_destination=loop_entry_node,
1235
- structured_output_tools=structured_output_tools,
1236
- end_destination=exit_node,
1271
+ RunnableCallable(
1272
+ _make_tools_to_model_edge(
1273
+ tool_node=tool_node,
1274
+ model_destination=loop_entry_node,
1275
+ structured_output_tools=structured_output_tools,
1276
+ end_destination=exit_node,
1277
+ ),
1278
+ trace=False,
1237
1279
  ),
1238
1280
  tools_to_model_destinations,
1239
1281
  )
@@ -1250,19 +1292,25 @@ def create_agent( # noqa: PLR0915
1250
1292
 
1251
1293
  graph.add_conditional_edges(
1252
1294
  loop_exit_node,
1253
- _make_model_to_tools_edge(
1254
- model_destination=loop_entry_node,
1255
- structured_output_tools=structured_output_tools,
1256
- end_destination=exit_node,
1295
+ RunnableCallable(
1296
+ _make_model_to_tools_edge(
1297
+ model_destination=loop_entry_node,
1298
+ structured_output_tools=structured_output_tools,
1299
+ end_destination=exit_node,
1300
+ ),
1301
+ trace=False,
1257
1302
  ),
1258
1303
  model_to_tools_destinations,
1259
1304
  )
1260
1305
  elif len(structured_output_tools) > 0:
1261
1306
  graph.add_conditional_edges(
1262
1307
  loop_exit_node,
1263
- _make_model_to_model_edge(
1264
- model_destination=loop_entry_node,
1265
- end_destination=exit_node,
1308
+ RunnableCallable(
1309
+ _make_model_to_model_edge(
1310
+ model_destination=loop_entry_node,
1311
+ end_destination=exit_node,
1312
+ ),
1313
+ trace=False,
1266
1314
  ),
1267
1315
  [loop_entry_node, exit_node],
1268
1316
  )
@@ -1372,7 +1420,7 @@ def create_agent( # noqa: PLR0915
1372
1420
  debug=debug,
1373
1421
  name=name,
1374
1422
  cache=cache,
1375
- )
1423
+ ).with_config({"recursion_limit": 10_000})
1376
1424
 
1377
1425
 
1378
1426
  def _resolve_jump(
@@ -1491,7 +1539,7 @@ def _make_model_to_model_edge(
1491
1539
 
1492
1540
  def _make_tools_to_model_edge(
1493
1541
  *,
1494
- tool_node: _ToolNode,
1542
+ tool_node: ToolNode,
1495
1543
  model_destination: str,
1496
1544
  structured_output_tools: dict[str, OutputToolBinding],
1497
1545
  end_destination: str,
@@ -1563,7 +1611,7 @@ def _add_middleware_edge(
1563
1611
  if "model" in can_jump_to and name != model_destination:
1564
1612
  destinations.append(model_destination)
1565
1613
 
1566
- graph.add_conditional_edges(name, jump_edge, destinations)
1614
+ graph.add_conditional_edges(name, RunnableCallable(jump_edge, trace=False), destinations)
1567
1615
 
1568
1616
  else:
1569
1617
  graph.add_edge(name, default_destination)
@@ -182,7 +182,7 @@ class ClearToolUsesEdit(ContextEdit):
182
182
 
183
183
 
184
184
  class ContextEditingMiddleware(AgentMiddleware):
185
- """Middleware that automatically prunes tool results to manage context size.
185
+ """Automatically prunes tool results to manage context size.
186
186
 
187
187
  The middleware applies a sequence of edits when the total input token count
188
188
  exceeds configured thresholds. Currently the `ClearToolUsesEdit` strategy is
@@ -87,7 +87,7 @@ class ModelCallLimitExceededError(Exception):
87
87
 
88
88
 
89
89
  class ModelCallLimitMiddleware(AgentMiddleware[ModelCallLimitState, Any]):
90
- """Middleware that tracks model call counts and enforces limits.
90
+ """Tracks model call counts and enforces limits.
91
91
 
92
92
  This middleware monitors the number of model calls made during agent execution
93
93
  and can terminate the agent when specified limits are reached. It supports
@@ -31,7 +31,7 @@ class ModelFallbackMiddleware(AgentMiddleware):
31
31
 
32
32
  fallback = ModelFallbackMiddleware(
33
33
  "openai:gpt-4o-mini", # Try first on error
34
- "anthropic:claude-3-5-sonnet-20241022", # Then this
34
+ "anthropic:claude-sonnet-4-5-20250929", # Then this
35
35
  )
36
36
 
37
37
  agent = create_agent(
@@ -39,7 +39,7 @@ class ModelFallbackMiddleware(AgentMiddleware):
39
39
  middleware=[fallback],
40
40
  )
41
41
 
42
- # If primary fails: tries gpt-4o-mini, then claude-3-5-sonnet
42
+ # If primary fails: tries gpt-4o-mini, then claude-sonnet-4-5-20250929
43
43
  result = await agent.invoke({"messages": [HumanMessage("Hello")]})
44
44
  ```
45
45
  """
@@ -45,7 +45,7 @@ if TYPE_CHECKING:
45
45
  from langgraph.runtime import Runtime
46
46
  from langgraph.types import Command
47
47
 
48
- from langchain.tools.tool_node import ToolCallRequest
48
+ from langchain.agents.middleware.types import ToolCallRequest
49
49
 
50
50
  LOGGER = logging.getLogger(__name__)
51
51
  _DONE_MARKER_PREFIX = "__LC_SHELL_DONE__"
@@ -60,7 +60,7 @@ _SEARCH_RANGE_FOR_TOOL_PAIRS = 5
60
60
 
61
61
 
62
62
  class SummarizationMiddleware(AgentMiddleware):
63
- """Middleware that summarizes conversation history when token limits are approached.
63
+ """Summarizes conversation history when token limits are approached.
64
64
 
65
65
  This middleware monitors message token counts and automatically summarizes older
66
66
  messages when a threshold is reached, preserving recent messages and maintaining
@@ -19,6 +19,7 @@ from langchain.agents.middleware.types import (
19
19
  ModelCallResult,
20
20
  ModelRequest,
21
21
  ModelResponse,
22
+ OmitFromInput,
22
23
  )
23
24
  from langchain.tools import InjectedToolCallId
24
25
 
@@ -36,7 +37,7 @@ class Todo(TypedDict):
36
37
  class PlanningState(AgentState):
37
38
  """State schema for the todo middleware."""
38
39
 
39
- todos: NotRequired[list[Todo]]
40
+ todos: Annotated[NotRequired[list[Todo]], OmitFromInput]
40
41
  """List of todo items for tracking task progress."""
41
42
 
42
43