langchain 1.0.0a4__py3-none-any.whl → 1.0.0a5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- langchain/__init__.py +1 -1
- langchain/_internal/_lazy_import.py +2 -3
- langchain/_internal/_prompts.py +11 -18
- langchain/_internal/_typing.py +3 -3
- langchain/agents/_internal/_typing.py +2 -2
- langchain/agents/interrupt.py +14 -9
- langchain/agents/middleware/human_in_the_loop.py +9 -2
- langchain/agents/middleware/prompt_caching.py +11 -6
- langchain/agents/middleware/summarization.py +1 -1
- langchain/agents/middleware/types.py +2 -1
- langchain/agents/middleware_agent.py +29 -26
- langchain/agents/react_agent.py +86 -61
- langchain/agents/structured_output.py +29 -24
- langchain/agents/tool_node.py +71 -65
- langchain/chat_models/base.py +28 -32
- langchain/embeddings/base.py +4 -10
- langchain/embeddings/cache.py +5 -8
- langchain/storage/encoder_backed.py +7 -4
- {langchain-1.0.0a4.dist-info → langchain-1.0.0a5.dist-info}/METADATA +13 -17
- langchain-1.0.0a5.dist-info/RECORD +40 -0
- langchain-1.0.0a4.dist-info/RECORD +0 -40
- {langchain-1.0.0a4.dist-info → langchain-1.0.0a5.dist-info}/WHEEL +0 -0
- {langchain-1.0.0a4.dist-info → langchain-1.0.0a5.dist-info}/entry_points.txt +0 -0
- {langchain-1.0.0a4.dist-info → langchain-1.0.0a5.dist-info}/licenses/LICENSE +0 -0
langchain/agents/react_agent.py
CHANGED
|
@@ -11,7 +11,6 @@ from typing import (
|
|
|
11
11
|
Any,
|
|
12
12
|
Generic,
|
|
13
13
|
Literal,
|
|
14
|
-
Union,
|
|
15
14
|
cast,
|
|
16
15
|
get_type_hints,
|
|
17
16
|
)
|
|
@@ -104,12 +103,12 @@ class AgentStateWithStructuredResponsePydantic(AgentStatePydantic, Generic[Struc
|
|
|
104
103
|
|
|
105
104
|
PROMPT_RUNNABLE_NAME = "Prompt"
|
|
106
105
|
|
|
107
|
-
Prompt =
|
|
108
|
-
SystemMessage
|
|
109
|
-
str
|
|
110
|
-
Callable[[StateT], LanguageModelInput]
|
|
111
|
-
Runnable[StateT, LanguageModelInput]
|
|
112
|
-
|
|
106
|
+
Prompt = (
|
|
107
|
+
SystemMessage
|
|
108
|
+
| str
|
|
109
|
+
| Callable[[StateT], LanguageModelInput]
|
|
110
|
+
| Runnable[StateT, LanguageModelInput]
|
|
111
|
+
)
|
|
113
112
|
|
|
114
113
|
|
|
115
114
|
def _get_state_value(state: StateT, key: str, default: Any = None) -> Any:
|
|
@@ -177,8 +176,9 @@ def _validate_chat_history(
|
|
|
177
176
|
error_message = create_error_message(
|
|
178
177
|
message="Found AIMessages with tool_calls that do not have a corresponding ToolMessage. "
|
|
179
178
|
f"Here are the first few of those tool calls: {tool_calls_without_results[:3]}.\n\n"
|
|
180
|
-
"Every tool call (LLM requesting to call a tool) in the message history
|
|
181
|
-
"(result of a tool invocation to return to the LLM) -
|
|
179
|
+
"Every tool call (LLM requesting to call a tool) in the message history "
|
|
180
|
+
"MUST have a corresponding ToolMessage (result of a tool invocation to return to the LLM) -"
|
|
181
|
+
" this is required by most LLM providers.",
|
|
182
182
|
error_code=ErrorCode.INVALID_CHAT_HISTORY,
|
|
183
183
|
)
|
|
184
184
|
raise ValueError(error_message)
|
|
@@ -189,12 +189,8 @@ class _AgentBuilder(Generic[StateT, ContextT, StructuredResponseT]):
|
|
|
189
189
|
|
|
190
190
|
def __init__(
|
|
191
191
|
self,
|
|
192
|
-
model:
|
|
193
|
-
|
|
194
|
-
BaseChatModel,
|
|
195
|
-
SyncOrAsync[[StateT, Runtime[ContextT]], BaseChatModel],
|
|
196
|
-
],
|
|
197
|
-
tools: Union[Sequence[Union[BaseTool, Callable, dict[str, Any]]], ToolNode],
|
|
192
|
+
model: str | BaseChatModel | SyncOrAsync[[StateT, Runtime[ContextT]], BaseChatModel],
|
|
193
|
+
tools: Sequence[BaseTool | Callable | dict[str, Any]] | ToolNode,
|
|
198
194
|
*,
|
|
199
195
|
prompt: Prompt | None = None,
|
|
200
196
|
response_format: ResponseFormat[StructuredResponseT] | None = None,
|
|
@@ -221,7 +217,8 @@ class _AgentBuilder(Generic[StateT, ContextT, StructuredResponseT]):
|
|
|
221
217
|
if isinstance(model, Runnable) and not isinstance(model, BaseChatModel):
|
|
222
218
|
msg = (
|
|
223
219
|
"Expected `model` to be a BaseChatModel or a string, got {type(model)}."
|
|
224
|
-
"The `model` parameter should not have pre-bound tools,
|
|
220
|
+
"The `model` parameter should not have pre-bound tools, "
|
|
221
|
+
"simply pass the model and tools separately."
|
|
225
222
|
)
|
|
226
223
|
raise ValueError(msg)
|
|
227
224
|
|
|
@@ -313,7 +310,8 @@ class _AgentBuilder(Generic[StateT, ContextT, StructuredResponseT]):
|
|
|
313
310
|
Command with structured response update if found, None otherwise
|
|
314
311
|
|
|
315
312
|
Raises:
|
|
316
|
-
MultipleStructuredOutputsError: If multiple structured responses are returned
|
|
313
|
+
MultipleStructuredOutputsError: If multiple structured responses are returned
|
|
314
|
+
and error handling is disabled
|
|
317
315
|
StructuredOutputParsingError: If parsing fails and error handling is disabled
|
|
318
316
|
"""
|
|
319
317
|
if not isinstance(self.response_format, ToolStrategy) or not response.tool_calls:
|
|
@@ -457,7 +455,11 @@ class _AgentBuilder(Generic[StateT, ContextT, StructuredResponseT]):
|
|
|
457
455
|
return model.bind(**kwargs)
|
|
458
456
|
|
|
459
457
|
def _handle_structured_response_native(self, response: AIMessage) -> Command | None:
|
|
460
|
-
"""
|
|
458
|
+
"""Handle structured output using the native output.
|
|
459
|
+
|
|
460
|
+
If native output is configured and there are no tool calls,
|
|
461
|
+
parse using ProviderStrategyBinding.
|
|
462
|
+
"""
|
|
461
463
|
if self.native_output_binding is None:
|
|
462
464
|
return None
|
|
463
465
|
if response.tool_calls:
|
|
@@ -691,10 +693,10 @@ class _AgentBuilder(Generic[StateT, ContextT, StructuredResponseT]):
|
|
|
691
693
|
return CallModelInputSchema
|
|
692
694
|
return self._final_state_schema
|
|
693
695
|
|
|
694
|
-
def create_model_router(self) -> Callable[[StateT],
|
|
696
|
+
def create_model_router(self) -> Callable[[StateT], str | list[Send]]:
|
|
695
697
|
"""Create routing function for model node conditional edges."""
|
|
696
698
|
|
|
697
|
-
def should_continue(state: StateT) ->
|
|
699
|
+
def should_continue(state: StateT) -> str | list[Send]:
|
|
698
700
|
messages = _get_state_value(state, "messages")
|
|
699
701
|
last_message = messages[-1]
|
|
700
702
|
|
|
@@ -731,10 +733,10 @@ class _AgentBuilder(Generic[StateT, ContextT, StructuredResponseT]):
|
|
|
731
733
|
|
|
732
734
|
def create_post_model_hook_router(
|
|
733
735
|
self,
|
|
734
|
-
) -> Callable[[StateT],
|
|
736
|
+
) -> Callable[[StateT], str | list[Send]]:
|
|
735
737
|
"""Create a routing function for post_model_hook node conditional edges."""
|
|
736
738
|
|
|
737
|
-
def post_model_hook_router(state: StateT) ->
|
|
739
|
+
def post_model_hook_router(state: StateT) -> str | list[Send]:
|
|
738
740
|
messages = _get_state_value(state, "messages")
|
|
739
741
|
|
|
740
742
|
# Check if the last message is a ToolMessage from a structured tool.
|
|
@@ -882,7 +884,7 @@ class _AgentBuilder(Generic[StateT, ContextT, StructuredResponseT]):
|
|
|
882
884
|
|
|
883
885
|
|
|
884
886
|
def _supports_native_structured_output(
|
|
885
|
-
model:
|
|
887
|
+
model: str | BaseChatModel | SyncOrAsync[[StateT, Runtime[ContextT]], BaseChatModel],
|
|
886
888
|
) -> bool:
|
|
887
889
|
"""Check if a model supports native structured output.
|
|
888
890
|
|
|
@@ -903,20 +905,14 @@ def _supports_native_structured_output(
|
|
|
903
905
|
|
|
904
906
|
|
|
905
907
|
def create_agent( # noqa: D417
|
|
906
|
-
model:
|
|
907
|
-
|
|
908
|
-
BaseChatModel,
|
|
909
|
-
SyncOrAsync[[StateT, Runtime[ContextT]], BaseChatModel],
|
|
910
|
-
],
|
|
911
|
-
tools: Union[Sequence[Union[BaseTool, Callable, dict[str, Any]]], ToolNode],
|
|
908
|
+
model: str | BaseChatModel | SyncOrAsync[[StateT, Runtime[ContextT]], BaseChatModel],
|
|
909
|
+
tools: Sequence[BaseTool | Callable | dict[str, Any]] | ToolNode,
|
|
912
910
|
*,
|
|
913
911
|
middleware: Sequence[AgentMiddleware] = (),
|
|
914
912
|
prompt: Prompt | None = None,
|
|
915
|
-
response_format:
|
|
916
|
-
|
|
917
|
-
|
|
918
|
-
type[StructuredResponseT],
|
|
919
|
-
]
|
|
913
|
+
response_format: ToolStrategy[StructuredResponseT]
|
|
914
|
+
| ProviderStrategy[StructuredResponseT]
|
|
915
|
+
| type[StructuredResponseT]
|
|
920
916
|
| None = None,
|
|
921
917
|
pre_model_hook: RunnableLike | None = None,
|
|
922
918
|
post_model_hook: RunnableLike | None = None,
|
|
@@ -933,7 +929,8 @@ def create_agent( # noqa: D417
|
|
|
933
929
|
) -> CompiledStateGraph[StateT, ContextT]:
|
|
934
930
|
"""Creates an agent graph that calls tools in a loop until a stopping condition is met.
|
|
935
931
|
|
|
936
|
-
For more details on using `create_agent`,
|
|
932
|
+
For more details on using `create_agent`,
|
|
933
|
+
visit [Agents](https://langchain-ai.github.io/langgraph/agents/overview/) documentation.
|
|
937
934
|
|
|
938
935
|
Args:
|
|
939
936
|
model: The language model for the agent. Supports static and dynamic
|
|
@@ -957,14 +954,17 @@ def create_agent( # noqa: D417
|
|
|
957
954
|
```python
|
|
958
955
|
from dataclasses import dataclass
|
|
959
956
|
|
|
957
|
+
|
|
960
958
|
@dataclass
|
|
961
959
|
class ModelContext:
|
|
962
960
|
model_name: str = "gpt-3.5-turbo"
|
|
963
961
|
|
|
962
|
+
|
|
964
963
|
# Instantiate models globally
|
|
965
964
|
gpt4_model = ChatOpenAI(model="gpt-4")
|
|
966
965
|
gpt35_model = ChatOpenAI(model="gpt-3.5-turbo")
|
|
967
966
|
|
|
967
|
+
|
|
968
968
|
def select_model(state: AgentState, runtime: Runtime[ModelContext]) -> ChatOpenAI:
|
|
969
969
|
model_name = runtime.context.model_name
|
|
970
970
|
model = gpt4_model if model_name == "gpt-4" else gpt35_model
|
|
@@ -977,25 +977,35 @@ def create_agent( # noqa: D417
|
|
|
977
977
|
must be a subset of those specified in the `tools` parameter.
|
|
978
978
|
|
|
979
979
|
tools: A list of tools or a ToolNode instance.
|
|
980
|
-
If an empty list is provided, the agent will consist of a single LLM node
|
|
980
|
+
If an empty list is provided, the agent will consist of a single LLM node
|
|
981
|
+
without tool calling.
|
|
981
982
|
prompt: An optional prompt for the LLM. Can take a few different forms:
|
|
982
983
|
|
|
983
|
-
- str: This is converted to a SystemMessage and added to the beginning
|
|
984
|
-
|
|
985
|
-
-
|
|
986
|
-
|
|
984
|
+
- str: This is converted to a SystemMessage and added to the beginning
|
|
985
|
+
of the list of messages in state["messages"].
|
|
986
|
+
- SystemMessage: this is added to the beginning of the list of messages
|
|
987
|
+
in state["messages"].
|
|
988
|
+
- Callable: This function should take in full graph state and the output is then passed
|
|
989
|
+
to the language model.
|
|
990
|
+
- Runnable: This runnable should take in full graph state and the output is then passed
|
|
991
|
+
to the language model.
|
|
987
992
|
|
|
988
993
|
response_format: An optional UsingToolStrategy configuration for structured responses.
|
|
989
994
|
|
|
990
|
-
If provided, the agent will handle structured output via tool calls
|
|
991
|
-
|
|
995
|
+
If provided, the agent will handle structured output via tool calls
|
|
996
|
+
during the normal conversation flow.
|
|
997
|
+
When the model calls a structured output tool, the response will be captured
|
|
998
|
+
and returned in the 'structured_response' state key.
|
|
992
999
|
If not provided, `structured_response` will not be present in the output state.
|
|
993
1000
|
|
|
994
1001
|
The UsingToolStrategy should contain:
|
|
995
|
-
|
|
1002
|
+
|
|
1003
|
+
- schemas: A sequence of ResponseSchema objects that define
|
|
1004
|
+
the structured output format
|
|
996
1005
|
- tool_choice: Either "required" or "auto" to control when structured output is used
|
|
997
1006
|
|
|
998
1007
|
Each ResponseSchema contains:
|
|
1008
|
+
|
|
999
1009
|
- schema: A Pydantic model that defines the structure
|
|
1000
1010
|
- name: Optional custom name for the tool (defaults to model name)
|
|
1001
1011
|
- description: Optional custom description (defaults to model docstring)
|
|
@@ -1005,11 +1015,15 @@ def create_agent( # noqa: D417
|
|
|
1005
1015
|
`response_format` requires the model to support tool calling
|
|
1006
1016
|
|
|
1007
1017
|
!!! Note
|
|
1008
|
-
Structured responses are handled directly in the model call node via tool calls,
|
|
1009
|
-
|
|
1010
|
-
|
|
1011
|
-
|
|
1012
|
-
|
|
1018
|
+
Structured responses are handled directly in the model call node via tool calls,
|
|
1019
|
+
eliminating the need for separate structured response nodes.
|
|
1020
|
+
|
|
1021
|
+
pre_model_hook: An optional node to add before the `agent` node
|
|
1022
|
+
(i.e., the node that calls the LLM).
|
|
1023
|
+
Useful for managing long message histories
|
|
1024
|
+
(e.g., message trimming, summarization, etc.).
|
|
1025
|
+
Pre-model hook must be a callable or a runnable that takes in current
|
|
1026
|
+
graph state and returns a state update in the form of
|
|
1013
1027
|
```python
|
|
1014
1028
|
# At least one of `messages` or `llm_input_messages` MUST be provided
|
|
1015
1029
|
{
|
|
@@ -1024,11 +1038,13 @@ def create_agent( # noqa: D417
|
|
|
1024
1038
|
```
|
|
1025
1039
|
|
|
1026
1040
|
!!! Important
|
|
1027
|
-
At least one of `messages` or `llm_input_messages` MUST be provided
|
|
1041
|
+
At least one of `messages` or `llm_input_messages` MUST be provided
|
|
1042
|
+
and will be used as an input to the `agent` node.
|
|
1028
1043
|
The rest of the keys will be added to the graph state.
|
|
1029
1044
|
|
|
1030
1045
|
!!! Warning
|
|
1031
|
-
If you are returning `messages` in the pre-model hook,
|
|
1046
|
+
If you are returning `messages` in the pre-model hook,
|
|
1047
|
+
you should OVERWRITE the `messages` key by doing the following:
|
|
1032
1048
|
|
|
1033
1049
|
```python
|
|
1034
1050
|
{
|
|
@@ -1036,9 +1052,12 @@ def create_agent( # noqa: D417
|
|
|
1036
1052
|
...
|
|
1037
1053
|
}
|
|
1038
1054
|
```
|
|
1039
|
-
post_model_hook: An optional node to add after the `agent` node
|
|
1040
|
-
|
|
1041
|
-
|
|
1055
|
+
post_model_hook: An optional node to add after the `agent` node
|
|
1056
|
+
(i.e., the node that calls the LLM).
|
|
1057
|
+
Useful for implementing human-in-the-loop, guardrails, validation,
|
|
1058
|
+
or other post-processing.
|
|
1059
|
+
Post-model hook must be a callable or a runnable that takes in
|
|
1060
|
+
current graph state and returns a state update.
|
|
1042
1061
|
|
|
1043
1062
|
!!! Note
|
|
1044
1063
|
Only available with `version="v2"`.
|
|
@@ -1047,12 +1066,14 @@ def create_agent( # noqa: D417
|
|
|
1047
1066
|
Defaults to `AgentState` that defines those two keys.
|
|
1048
1067
|
context_schema: An optional schema for runtime context.
|
|
1049
1068
|
checkpointer: An optional checkpoint saver object. This is used for persisting
|
|
1050
|
-
the state of the graph (e.g., as chat memory) for a single thread
|
|
1069
|
+
the state of the graph (e.g., as chat memory) for a single thread
|
|
1070
|
+
(e.g., a single conversation).
|
|
1051
1071
|
store: An optional store object. This is used for persisting data
|
|
1052
1072
|
across multiple threads (e.g., multiple conversations / users).
|
|
1053
1073
|
interrupt_before: An optional list of node names to interrupt before.
|
|
1054
1074
|
Should be one of the following: "agent", "tools".
|
|
1055
|
-
This is useful if you want to add a user confirmation or other interrupt
|
|
1075
|
+
This is useful if you want to add a user confirmation or other interrupt
|
|
1076
|
+
before taking an action.
|
|
1056
1077
|
interrupt_after: An optional list of node names to interrupt after.
|
|
1057
1078
|
Should be one of the following: "agent", "tools".
|
|
1058
1079
|
This is useful if you want to return directly or run additional processing on an output.
|
|
@@ -1067,7 +1088,8 @@ def create_agent( # noqa: D417
|
|
|
1067
1088
|
node using the [Send](https://langchain-ai.github.io/langgraph/concepts/low_level/#send)
|
|
1068
1089
|
API.
|
|
1069
1090
|
name: An optional name for the CompiledStateGraph.
|
|
1070
|
-
This name will be automatically used when adding ReAct agent graph to
|
|
1091
|
+
This name will be automatically used when adding ReAct agent graph to
|
|
1092
|
+
another graph as a subgraph node -
|
|
1071
1093
|
particularly useful for building multi-agent systems.
|
|
1072
1094
|
|
|
1073
1095
|
!!! warning "`config_schema` Deprecated"
|
|
@@ -1079,9 +1101,11 @@ def create_agent( # noqa: D417
|
|
|
1079
1101
|
A compiled LangChain runnable that can be used for chat interactions.
|
|
1080
1102
|
|
|
1081
1103
|
The "agent" node calls the language model with the messages list (after applying the prompt).
|
|
1082
|
-
If the resulting AIMessage contains `tool_calls`,
|
|
1083
|
-
|
|
1084
|
-
|
|
1104
|
+
If the resulting AIMessage contains `tool_calls`,
|
|
1105
|
+
the graph will then call the ["tools"][langgraph.prebuilt.tool_node.ToolNode].
|
|
1106
|
+
The "tools" node executes the tools (1 tool per `tool_call`)
|
|
1107
|
+
and adds the responses to the messages list as `ToolMessage` objects.
|
|
1108
|
+
The agent node then calls the language model again.
|
|
1085
1109
|
The process repeats until no more `tool_calls` are present in the response.
|
|
1086
1110
|
The agent then returns the full list of messages as a dictionary containing the key "messages".
|
|
1087
1111
|
|
|
@@ -1143,7 +1167,8 @@ def create_agent( # noqa: D417
|
|
|
1143
1167
|
# Handle deprecated config_schema parameter
|
|
1144
1168
|
if (config_schema := deprecated_kwargs.pop("config_schema", MISSING)) is not MISSING:
|
|
1145
1169
|
warn(
|
|
1146
|
-
"`config_schema` is deprecated and will be removed.
|
|
1170
|
+
"`config_schema` is deprecated and will be removed. "
|
|
1171
|
+
"Please use `context_schema` instead.",
|
|
1147
1172
|
category=DeprecationWarning,
|
|
1148
1173
|
stacklevel=2,
|
|
1149
1174
|
)
|
|
@@ -1172,7 +1197,7 @@ def create_agent( # noqa: D417
|
|
|
1172
1197
|
model=model,
|
|
1173
1198
|
tools=tools,
|
|
1174
1199
|
prompt=prompt,
|
|
1175
|
-
response_format=cast("
|
|
1200
|
+
response_format=cast("ResponseFormat[StructuredResponseT] | None", response_format),
|
|
1176
1201
|
pre_model_hook=pre_model_hook,
|
|
1177
1202
|
post_model_hook=post_model_hook,
|
|
1178
1203
|
state_schema=state_schema,
|
|
@@ -47,7 +47,8 @@ class MultipleStructuredOutputsError(StructuredOutputError):
|
|
|
47
47
|
self.tool_names = tool_names
|
|
48
48
|
|
|
49
49
|
super().__init__(
|
|
50
|
-
|
|
50
|
+
"Model incorrectly returned multiple structured responses "
|
|
51
|
+
f"({', '.join(tool_names)}) when only one is expected."
|
|
51
52
|
)
|
|
52
53
|
|
|
53
54
|
|
|
@@ -67,7 +68,7 @@ class StructuredOutputValidationError(StructuredOutputError):
|
|
|
67
68
|
|
|
68
69
|
|
|
69
70
|
def _parse_with_schema(
|
|
70
|
-
schema:
|
|
71
|
+
schema: type[SchemaT] | dict, schema_kind: SchemaKind, data: dict[str, Any]
|
|
71
72
|
) -> Any:
|
|
72
73
|
"""Parse data using for any supported schema type.
|
|
73
74
|
|
|
@@ -98,7 +99,8 @@ class _SchemaSpec(Generic[SchemaT]):
|
|
|
98
99
|
"""Describes a structured output schema."""
|
|
99
100
|
|
|
100
101
|
schema: type[SchemaT]
|
|
101
|
-
"""The schema for the response, can be a Pydantic model, dataclass, TypedDict,
|
|
102
|
+
"""The schema for the response, can be a Pydantic model, dataclass, TypedDict,
|
|
103
|
+
or JSON schema dict."""
|
|
102
104
|
|
|
103
105
|
name: str
|
|
104
106
|
"""Name of the schema, used for tool calling.
|
|
@@ -178,15 +180,12 @@ class ToolStrategy(Generic[SchemaT]):
|
|
|
178
180
|
"""Schema specs for the tool calls."""
|
|
179
181
|
|
|
180
182
|
tool_message_content: str | None
|
|
181
|
-
"""The content of the tool message to be returned when the model calls
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
str,
|
|
186
|
-
|
|
187
|
-
tuple[type[Exception], ...],
|
|
188
|
-
Callable[[Exception], str],
|
|
189
|
-
]
|
|
183
|
+
"""The content of the tool message to be returned when the model calls
|
|
184
|
+
an artificial structured output tool."""
|
|
185
|
+
|
|
186
|
+
handle_errors: (
|
|
187
|
+
bool | str | type[Exception] | tuple[type[Exception], ...] | Callable[[Exception], str]
|
|
188
|
+
)
|
|
190
189
|
"""Error handling strategy for structured output via ToolStrategy. Default is True.
|
|
191
190
|
|
|
192
191
|
- True: Catch all errors with default error template
|
|
@@ -202,15 +201,16 @@ class ToolStrategy(Generic[SchemaT]):
|
|
|
202
201
|
schema: type[SchemaT],
|
|
203
202
|
*,
|
|
204
203
|
tool_message_content: str | None = None,
|
|
205
|
-
handle_errors:
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
Callable[[Exception], str],
|
|
211
|
-
] = True,
|
|
204
|
+
handle_errors: bool
|
|
205
|
+
| str
|
|
206
|
+
| type[Exception]
|
|
207
|
+
| tuple[type[Exception], ...]
|
|
208
|
+
| Callable[[Exception], str] = True,
|
|
212
209
|
) -> None:
|
|
213
|
-
"""Initialize ToolStrategy
|
|
210
|
+
"""Initialize ToolStrategy.
|
|
211
|
+
|
|
212
|
+
Initialize ToolStrategy with schemas, tool message content, and error handling strategy.
|
|
213
|
+
"""
|
|
214
214
|
self.schema = schema
|
|
215
215
|
self.tool_message_content = tool_message_content
|
|
216
216
|
self.handle_errors = handle_errors
|
|
@@ -274,7 +274,8 @@ class OutputToolBinding(Generic[SchemaT]):
|
|
|
274
274
|
"""
|
|
275
275
|
|
|
276
276
|
schema: type[SchemaT]
|
|
277
|
-
"""The original schema provided for structured output
|
|
277
|
+
"""The original schema provided for structured output
|
|
278
|
+
(Pydantic model, dataclass, TypedDict, or JSON schema dict)."""
|
|
278
279
|
|
|
279
280
|
schema_kind: SchemaKind
|
|
280
281
|
"""Classification of the schema type for proper response construction."""
|
|
@@ -327,7 +328,8 @@ class ProviderStrategyBinding(Generic[SchemaT]):
|
|
|
327
328
|
"""
|
|
328
329
|
|
|
329
330
|
schema: type[SchemaT]
|
|
330
|
-
"""The original schema provided for structured output
|
|
331
|
+
"""The original schema provided for structured output
|
|
332
|
+
(Pydantic model, dataclass, TypedDict, or JSON schema dict)."""
|
|
331
333
|
|
|
332
334
|
schema_kind: SchemaKind
|
|
333
335
|
"""Classification of the schema type for proper response construction."""
|
|
@@ -368,7 +370,10 @@ class ProviderStrategyBinding(Generic[SchemaT]):
|
|
|
368
370
|
data = json.loads(raw_text)
|
|
369
371
|
except Exception as e:
|
|
370
372
|
schema_name = getattr(self.schema, "__name__", "response_format")
|
|
371
|
-
msg =
|
|
373
|
+
msg = (
|
|
374
|
+
f"Native structured output expected valid JSON for {schema_name}, "
|
|
375
|
+
f"but parsing failed: {e}."
|
|
376
|
+
)
|
|
372
377
|
raise ValueError(msg) from e
|
|
373
378
|
|
|
374
379
|
# Parse according to schema
|
|
@@ -400,4 +405,4 @@ class ProviderStrategyBinding(Generic[SchemaT]):
|
|
|
400
405
|
return str(content)
|
|
401
406
|
|
|
402
407
|
|
|
403
|
-
ResponseFormat =
|
|
408
|
+
ResponseFormat = ToolStrategy[SchemaT] | ProviderStrategy[SchemaT]
|