langchain-core 0.4.0.dev0__py3-none-any.whl → 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of langchain-core might be problematic. Click here for more details.
- langchain_core/__init__.py +1 -1
- langchain_core/_api/__init__.py +3 -4
- langchain_core/_api/beta_decorator.py +45 -70
- langchain_core/_api/deprecation.py +80 -80
- langchain_core/_api/path.py +22 -8
- langchain_core/_import_utils.py +10 -4
- langchain_core/agents.py +25 -21
- langchain_core/caches.py +53 -63
- langchain_core/callbacks/__init__.py +1 -8
- langchain_core/callbacks/base.py +341 -348
- langchain_core/callbacks/file.py +55 -44
- langchain_core/callbacks/manager.py +546 -683
- langchain_core/callbacks/stdout.py +29 -30
- langchain_core/callbacks/streaming_stdout.py +35 -36
- langchain_core/callbacks/usage.py +65 -70
- langchain_core/chat_history.py +48 -55
- langchain_core/document_loaders/base.py +46 -21
- langchain_core/document_loaders/langsmith.py +39 -36
- langchain_core/documents/__init__.py +0 -1
- langchain_core/documents/base.py +96 -74
- langchain_core/documents/compressor.py +12 -9
- langchain_core/documents/transformers.py +29 -28
- langchain_core/embeddings/fake.py +56 -57
- langchain_core/env.py +2 -3
- langchain_core/example_selectors/base.py +12 -0
- langchain_core/example_selectors/length_based.py +1 -1
- langchain_core/example_selectors/semantic_similarity.py +21 -25
- langchain_core/exceptions.py +15 -9
- langchain_core/globals.py +4 -163
- langchain_core/indexing/api.py +132 -125
- langchain_core/indexing/base.py +64 -67
- langchain_core/indexing/in_memory.py +26 -6
- langchain_core/language_models/__init__.py +15 -27
- langchain_core/language_models/_utils.py +267 -117
- langchain_core/language_models/base.py +92 -177
- langchain_core/language_models/chat_models.py +547 -407
- langchain_core/language_models/fake.py +11 -11
- langchain_core/language_models/fake_chat_models.py +72 -118
- langchain_core/language_models/llms.py +168 -242
- langchain_core/load/dump.py +8 -11
- langchain_core/load/load.py +32 -28
- langchain_core/load/mapping.py +2 -4
- langchain_core/load/serializable.py +50 -56
- langchain_core/messages/__init__.py +36 -51
- langchain_core/messages/ai.py +377 -150
- langchain_core/messages/base.py +239 -47
- langchain_core/messages/block_translators/__init__.py +111 -0
- langchain_core/messages/block_translators/anthropic.py +470 -0
- langchain_core/messages/block_translators/bedrock.py +94 -0
- langchain_core/messages/block_translators/bedrock_converse.py +297 -0
- langchain_core/messages/block_translators/google_genai.py +530 -0
- langchain_core/messages/block_translators/google_vertexai.py +21 -0
- langchain_core/messages/block_translators/groq.py +143 -0
- langchain_core/messages/block_translators/langchain_v0.py +301 -0
- langchain_core/messages/block_translators/openai.py +1010 -0
- langchain_core/messages/chat.py +2 -3
- langchain_core/messages/content.py +1423 -0
- langchain_core/messages/function.py +7 -7
- langchain_core/messages/human.py +44 -38
- langchain_core/messages/modifier.py +3 -2
- langchain_core/messages/system.py +40 -27
- langchain_core/messages/tool.py +160 -58
- langchain_core/messages/utils.py +527 -638
- langchain_core/output_parsers/__init__.py +1 -14
- langchain_core/output_parsers/base.py +68 -104
- langchain_core/output_parsers/json.py +13 -17
- langchain_core/output_parsers/list.py +11 -33
- langchain_core/output_parsers/openai_functions.py +56 -74
- langchain_core/output_parsers/openai_tools.py +68 -109
- langchain_core/output_parsers/pydantic.py +15 -13
- langchain_core/output_parsers/string.py +6 -2
- langchain_core/output_parsers/transform.py +17 -60
- langchain_core/output_parsers/xml.py +34 -44
- langchain_core/outputs/__init__.py +1 -1
- langchain_core/outputs/chat_generation.py +26 -11
- langchain_core/outputs/chat_result.py +1 -3
- langchain_core/outputs/generation.py +17 -6
- langchain_core/outputs/llm_result.py +15 -8
- langchain_core/prompt_values.py +29 -123
- langchain_core/prompts/__init__.py +3 -27
- langchain_core/prompts/base.py +48 -63
- langchain_core/prompts/chat.py +259 -288
- langchain_core/prompts/dict.py +19 -11
- langchain_core/prompts/few_shot.py +84 -90
- langchain_core/prompts/few_shot_with_templates.py +14 -12
- langchain_core/prompts/image.py +19 -14
- langchain_core/prompts/loading.py +6 -8
- langchain_core/prompts/message.py +7 -8
- langchain_core/prompts/prompt.py +42 -43
- langchain_core/prompts/string.py +37 -16
- langchain_core/prompts/structured.py +43 -46
- langchain_core/rate_limiters.py +51 -60
- langchain_core/retrievers.py +52 -192
- langchain_core/runnables/base.py +1727 -1683
- langchain_core/runnables/branch.py +52 -73
- langchain_core/runnables/config.py +89 -103
- langchain_core/runnables/configurable.py +128 -130
- langchain_core/runnables/fallbacks.py +93 -82
- langchain_core/runnables/graph.py +127 -127
- langchain_core/runnables/graph_ascii.py +63 -41
- langchain_core/runnables/graph_mermaid.py +87 -70
- langchain_core/runnables/graph_png.py +31 -36
- langchain_core/runnables/history.py +145 -161
- langchain_core/runnables/passthrough.py +141 -144
- langchain_core/runnables/retry.py +84 -68
- langchain_core/runnables/router.py +33 -37
- langchain_core/runnables/schema.py +79 -72
- langchain_core/runnables/utils.py +95 -139
- langchain_core/stores.py +85 -131
- langchain_core/structured_query.py +11 -15
- langchain_core/sys_info.py +31 -32
- langchain_core/tools/__init__.py +1 -14
- langchain_core/tools/base.py +221 -247
- langchain_core/tools/convert.py +144 -161
- langchain_core/tools/render.py +10 -10
- langchain_core/tools/retriever.py +12 -19
- langchain_core/tools/simple.py +52 -29
- langchain_core/tools/structured.py +56 -60
- langchain_core/tracers/__init__.py +1 -9
- langchain_core/tracers/_streaming.py +6 -7
- langchain_core/tracers/base.py +103 -112
- langchain_core/tracers/context.py +29 -48
- langchain_core/tracers/core.py +142 -105
- langchain_core/tracers/evaluation.py +30 -34
- langchain_core/tracers/event_stream.py +162 -117
- langchain_core/tracers/langchain.py +34 -36
- langchain_core/tracers/log_stream.py +87 -49
- langchain_core/tracers/memory_stream.py +3 -3
- langchain_core/tracers/root_listeners.py +18 -34
- langchain_core/tracers/run_collector.py +8 -20
- langchain_core/tracers/schemas.py +0 -125
- langchain_core/tracers/stdout.py +3 -3
- langchain_core/utils/__init__.py +1 -4
- langchain_core/utils/_merge.py +47 -9
- langchain_core/utils/aiter.py +70 -66
- langchain_core/utils/env.py +12 -9
- langchain_core/utils/function_calling.py +139 -206
- langchain_core/utils/html.py +7 -8
- langchain_core/utils/input.py +6 -6
- langchain_core/utils/interactive_env.py +6 -2
- langchain_core/utils/iter.py +48 -45
- langchain_core/utils/json.py +14 -4
- langchain_core/utils/json_schema.py +159 -43
- langchain_core/utils/mustache.py +32 -25
- langchain_core/utils/pydantic.py +67 -40
- langchain_core/utils/strings.py +5 -5
- langchain_core/utils/usage.py +1 -1
- langchain_core/utils/utils.py +104 -62
- langchain_core/vectorstores/base.py +131 -179
- langchain_core/vectorstores/in_memory.py +113 -182
- langchain_core/vectorstores/utils.py +23 -17
- langchain_core/version.py +1 -1
- langchain_core-1.0.0.dist-info/METADATA +68 -0
- langchain_core-1.0.0.dist-info/RECORD +172 -0
- {langchain_core-0.4.0.dev0.dist-info → langchain_core-1.0.0.dist-info}/WHEEL +1 -1
- langchain_core/beta/__init__.py +0 -1
- langchain_core/beta/runnables/__init__.py +0 -1
- langchain_core/beta/runnables/context.py +0 -448
- langchain_core/memory.py +0 -116
- langchain_core/messages/content_blocks.py +0 -1435
- langchain_core/prompts/pipeline.py +0 -133
- langchain_core/pydantic_v1/__init__.py +0 -30
- langchain_core/pydantic_v1/dataclasses.py +0 -23
- langchain_core/pydantic_v1/main.py +0 -23
- langchain_core/tracers/langchain_v1.py +0 -23
- langchain_core/utils/loading.py +0 -31
- langchain_core/v1/__init__.py +0 -1
- langchain_core/v1/chat_models.py +0 -1047
- langchain_core/v1/messages.py +0 -755
- langchain_core-0.4.0.dev0.dist-info/METADATA +0 -108
- langchain_core-0.4.0.dev0.dist-info/RECORD +0 -177
- langchain_core-0.4.0.dev0.dist-info/entry_points.txt +0 -4
|
@@ -3,7 +3,7 @@
|
|
|
3
3
|
import copy
|
|
4
4
|
import json
|
|
5
5
|
from types import GenericAlias
|
|
6
|
-
from typing import Any
|
|
6
|
+
from typing import Any
|
|
7
7
|
|
|
8
8
|
import jsonpatch # type: ignore[import-untyped]
|
|
9
9
|
from pydantic import BaseModel, model_validator
|
|
@@ -17,7 +17,6 @@ from langchain_core.output_parsers import (
|
|
|
17
17
|
)
|
|
18
18
|
from langchain_core.output_parsers.json import parse_partial_json
|
|
19
19
|
from langchain_core.outputs import ChatGeneration, Generation
|
|
20
|
-
from langchain_core.v1.messages import AIMessage
|
|
21
20
|
|
|
22
21
|
|
|
23
22
|
class OutputFunctionsParser(BaseGenerationOutputParser[Any]):
|
|
@@ -27,27 +26,19 @@ class OutputFunctionsParser(BaseGenerationOutputParser[Any]):
|
|
|
27
26
|
"""Whether to only return the arguments to the function call."""
|
|
28
27
|
|
|
29
28
|
@override
|
|
30
|
-
def parse_result(
|
|
31
|
-
self, result: Union[list[Generation], AIMessage], *, partial: bool = False
|
|
32
|
-
) -> Any:
|
|
29
|
+
def parse_result(self, result: list[Generation], *, partial: bool = False) -> Any:
|
|
33
30
|
"""Parse the result of an LLM call to a JSON object.
|
|
34
31
|
|
|
35
32
|
Args:
|
|
36
33
|
result: The result of the LLM call.
|
|
37
|
-
partial: Whether to parse partial JSON objects.
|
|
34
|
+
partial: Whether to parse partial JSON objects.
|
|
38
35
|
|
|
39
36
|
Returns:
|
|
40
37
|
The parsed JSON object.
|
|
41
38
|
|
|
42
39
|
Raises:
|
|
43
|
-
OutputParserException
|
|
40
|
+
`OutputParserException`: If the output is not valid JSON.
|
|
44
41
|
"""
|
|
45
|
-
if isinstance(result, AIMessage):
|
|
46
|
-
msg = (
|
|
47
|
-
"This output parser does not support v1 AIMessages. Use "
|
|
48
|
-
"JsonOutputToolsParser instead."
|
|
49
|
-
)
|
|
50
|
-
raise TypeError(msg)
|
|
51
42
|
generation = result[0]
|
|
52
43
|
if not isinstance(generation, ChatGeneration):
|
|
53
44
|
msg = "This output parser can only be used with a chat generation."
|
|
@@ -65,7 +56,7 @@ class OutputFunctionsParser(BaseGenerationOutputParser[Any]):
|
|
|
65
56
|
|
|
66
57
|
|
|
67
58
|
class JsonOutputFunctionsParser(BaseCumulativeTransformOutputParser[Any]):
|
|
68
|
-
"""Parse an output as the
|
|
59
|
+
"""Parse an output as the JSON object."""
|
|
69
60
|
|
|
70
61
|
strict: bool = False
|
|
71
62
|
"""Whether to allow non-JSON-compliant strings.
|
|
@@ -83,30 +74,22 @@ class JsonOutputFunctionsParser(BaseCumulativeTransformOutputParser[Any]):
|
|
|
83
74
|
return "json_functions"
|
|
84
75
|
|
|
85
76
|
@override
|
|
86
|
-
def _diff(self, prev:
|
|
77
|
+
def _diff(self, prev: Any | None, next: Any) -> Any:
|
|
87
78
|
return jsonpatch.make_patch(prev, next).patch
|
|
88
79
|
|
|
89
|
-
def parse_result(
|
|
90
|
-
self, result: Union[list[Generation], AIMessage], *, partial: bool = False
|
|
91
|
-
) -> Any:
|
|
80
|
+
def parse_result(self, result: list[Generation], *, partial: bool = False) -> Any:
|
|
92
81
|
"""Parse the result of an LLM call to a JSON object.
|
|
93
82
|
|
|
94
83
|
Args:
|
|
95
84
|
result: The result of the LLM call.
|
|
96
|
-
partial: Whether to parse partial JSON objects.
|
|
85
|
+
partial: Whether to parse partial JSON objects.
|
|
97
86
|
|
|
98
87
|
Returns:
|
|
99
88
|
The parsed JSON object.
|
|
100
89
|
|
|
101
90
|
Raises:
|
|
102
|
-
|
|
91
|
+
OutputParserExcept`ion: If the output is not valid JSON.
|
|
103
92
|
"""
|
|
104
|
-
if isinstance(result, AIMessage):
|
|
105
|
-
msg = (
|
|
106
|
-
"This output parser does not support v1 AIMessages. Use "
|
|
107
|
-
"JsonOutputToolsParser instead."
|
|
108
|
-
)
|
|
109
|
-
raise TypeError(msg)
|
|
110
93
|
if len(result) != 1:
|
|
111
94
|
msg = f"Expected exactly one result, but got {len(result)}"
|
|
112
95
|
raise OutputParserException(msg)
|
|
@@ -172,19 +155,17 @@ class JsonOutputFunctionsParser(BaseCumulativeTransformOutputParser[Any]):
|
|
|
172
155
|
|
|
173
156
|
|
|
174
157
|
class JsonKeyOutputFunctionsParser(JsonOutputFunctionsParser):
|
|
175
|
-
"""Parse an output as the element of the
|
|
158
|
+
"""Parse an output as the element of the JSON object."""
|
|
176
159
|
|
|
177
160
|
key_name: str
|
|
178
161
|
"""The name of the key to return."""
|
|
179
162
|
|
|
180
|
-
def parse_result(
|
|
181
|
-
self, result: Union[list[Generation], AIMessage], *, partial: bool = False
|
|
182
|
-
) -> Any:
|
|
163
|
+
def parse_result(self, result: list[Generation], *, partial: bool = False) -> Any:
|
|
183
164
|
"""Parse the result of an LLM call to a JSON object.
|
|
184
165
|
|
|
185
166
|
Args:
|
|
186
167
|
result: The result of the LLM call.
|
|
187
|
-
partial: Whether to parse partial JSON objects.
|
|
168
|
+
partial: Whether to parse partial JSON objects.
|
|
188
169
|
|
|
189
170
|
Returns:
|
|
190
171
|
The parsed JSON object.
|
|
@@ -196,48 +177,50 @@ class JsonKeyOutputFunctionsParser(JsonOutputFunctionsParser):
|
|
|
196
177
|
|
|
197
178
|
|
|
198
179
|
class PydanticOutputFunctionsParser(OutputFunctionsParser):
|
|
199
|
-
"""Parse an output as a
|
|
180
|
+
"""Parse an output as a Pydantic object.
|
|
200
181
|
|
|
201
|
-
This parser is used to parse the output of a
|
|
202
|
-
|
|
182
|
+
This parser is used to parse the output of a chat model that uses OpenAI function
|
|
183
|
+
format to invoke functions.
|
|
203
184
|
|
|
204
|
-
The parser extracts the function call invocation and matches
|
|
205
|
-
|
|
185
|
+
The parser extracts the function call invocation and matches them to the Pydantic
|
|
186
|
+
schema provided.
|
|
206
187
|
|
|
207
|
-
An exception will be raised if the function call does not match
|
|
208
|
-
the provided schema.
|
|
188
|
+
An exception will be raised if the function call does not match the provided schema.
|
|
209
189
|
|
|
210
190
|
Example:
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
"
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
chat_generation = ChatGeneration(message=message)
|
|
191
|
+
```python
|
|
192
|
+
message = AIMessage(
|
|
193
|
+
content="This is a test message",
|
|
194
|
+
additional_kwargs={
|
|
195
|
+
"function_call": {
|
|
196
|
+
"name": "cookie",
|
|
197
|
+
"arguments": json.dumps({"name": "value", "age": 10}),
|
|
198
|
+
}
|
|
199
|
+
},
|
|
200
|
+
)
|
|
201
|
+
chat_generation = ChatGeneration(message=message)
|
|
223
202
|
|
|
224
|
-
class Cookie(BaseModel):
|
|
225
|
-
name: str
|
|
226
|
-
age: int
|
|
227
203
|
|
|
228
|
-
|
|
229
|
-
|
|
204
|
+
class Cookie(BaseModel):
|
|
205
|
+
name: str
|
|
206
|
+
age: int
|
|
207
|
+
|
|
208
|
+
|
|
209
|
+
class Dog(BaseModel):
|
|
210
|
+
species: str
|
|
230
211
|
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
212
|
+
|
|
213
|
+
# Full output
|
|
214
|
+
parser = PydanticOutputFunctionsParser(
|
|
215
|
+
pydantic_schema={"cookie": Cookie, "dog": Dog}
|
|
216
|
+
)
|
|
217
|
+
result = parser.parse_result([chat_generation])
|
|
218
|
+
```
|
|
236
219
|
|
|
237
220
|
"""
|
|
238
221
|
|
|
239
|
-
pydantic_schema:
|
|
240
|
-
"""The
|
|
222
|
+
pydantic_schema: type[BaseModel] | dict[str, type[BaseModel]]
|
|
223
|
+
"""The Pydantic schema to parse the output with.
|
|
241
224
|
|
|
242
225
|
If multiple schemas are provided, then the function name will be used to
|
|
243
226
|
determine which schema to use.
|
|
@@ -246,7 +229,7 @@ class PydanticOutputFunctionsParser(OutputFunctionsParser):
|
|
|
246
229
|
@model_validator(mode="before")
|
|
247
230
|
@classmethod
|
|
248
231
|
def validate_schema(cls, values: dict) -> Any:
|
|
249
|
-
"""Validate the
|
|
232
|
+
"""Validate the Pydantic schema.
|
|
250
233
|
|
|
251
234
|
Args:
|
|
252
235
|
values: The values to validate.
|
|
@@ -255,7 +238,7 @@ class PydanticOutputFunctionsParser(OutputFunctionsParser):
|
|
|
255
238
|
The validated values.
|
|
256
239
|
|
|
257
240
|
Raises:
|
|
258
|
-
ValueError: If the schema is not a
|
|
241
|
+
ValueError: If the schema is not a Pydantic schema.
|
|
259
242
|
"""
|
|
260
243
|
schema = values["pydantic_schema"]
|
|
261
244
|
if "args_only" not in values:
|
|
@@ -273,14 +256,15 @@ class PydanticOutputFunctionsParser(OutputFunctionsParser):
|
|
|
273
256
|
return values
|
|
274
257
|
|
|
275
258
|
@override
|
|
276
|
-
def parse_result(
|
|
277
|
-
self, result: Union[list[Generation], AIMessage], *, partial: bool = False
|
|
278
|
-
) -> Any:
|
|
259
|
+
def parse_result(self, result: list[Generation], *, partial: bool = False) -> Any:
|
|
279
260
|
"""Parse the result of an LLM call to a JSON object.
|
|
280
261
|
|
|
281
262
|
Args:
|
|
282
263
|
result: The result of the LLM call.
|
|
283
|
-
partial: Whether to parse partial JSON objects.
|
|
264
|
+
partial: Whether to parse partial JSON objects.
|
|
265
|
+
|
|
266
|
+
Raises:
|
|
267
|
+
ValueError: If the Pydantic schema is not valid.
|
|
284
268
|
|
|
285
269
|
Returns:
|
|
286
270
|
The parsed JSON object.
|
|
@@ -303,26 +287,24 @@ class PydanticOutputFunctionsParser(OutputFunctionsParser):
|
|
|
303
287
|
elif issubclass(pydantic_schema, BaseModelV1):
|
|
304
288
|
pydantic_args = pydantic_schema.parse_raw(args)
|
|
305
289
|
else:
|
|
306
|
-
msg = f"Unsupported
|
|
290
|
+
msg = f"Unsupported Pydantic schema: {pydantic_schema}"
|
|
307
291
|
raise ValueError(msg)
|
|
308
292
|
return pydantic_args
|
|
309
293
|
|
|
310
294
|
|
|
311
295
|
class PydanticAttrOutputFunctionsParser(PydanticOutputFunctionsParser):
|
|
312
|
-
"""Parse an output as an attribute of a
|
|
296
|
+
"""Parse an output as an attribute of a Pydantic object."""
|
|
313
297
|
|
|
314
298
|
attr_name: str
|
|
315
299
|
"""The name of the attribute to return."""
|
|
316
300
|
|
|
317
301
|
@override
|
|
318
|
-
def parse_result(
|
|
319
|
-
self, result: Union[list[Generation], AIMessage], *, partial: bool = False
|
|
320
|
-
) -> Any:
|
|
302
|
+
def parse_result(self, result: list[Generation], *, partial: bool = False) -> Any:
|
|
321
303
|
"""Parse the result of an LLM call to a JSON object.
|
|
322
304
|
|
|
323
305
|
Args:
|
|
324
306
|
result: The result of the LLM call.
|
|
325
|
-
partial: Whether to parse partial JSON objects.
|
|
307
|
+
partial: Whether to parse partial JSON objects.
|
|
326
308
|
|
|
327
309
|
Returns:
|
|
328
310
|
The parsed JSON object.
|
|
@@ -4,7 +4,7 @@ import copy
|
|
|
4
4
|
import json
|
|
5
5
|
import logging
|
|
6
6
|
from json import JSONDecodeError
|
|
7
|
-
from typing import Annotated, Any
|
|
7
|
+
from typing import Annotated, Any
|
|
8
8
|
|
|
9
9
|
from pydantic import SkipValidation, ValidationError
|
|
10
10
|
|
|
@@ -16,7 +16,6 @@ from langchain_core.output_parsers.transform import BaseCumulativeTransformOutpu
|
|
|
16
16
|
from langchain_core.outputs import ChatGeneration, Generation
|
|
17
17
|
from langchain_core.utils.json import parse_partial_json
|
|
18
18
|
from langchain_core.utils.pydantic import TypeBaseModel
|
|
19
|
-
from langchain_core.v1.messages import AIMessage as AIMessageV1
|
|
20
19
|
|
|
21
20
|
logger = logging.getLogger(__name__)
|
|
22
21
|
|
|
@@ -27,15 +26,14 @@ def parse_tool_call(
|
|
|
27
26
|
partial: bool = False,
|
|
28
27
|
strict: bool = False,
|
|
29
28
|
return_id: bool = True,
|
|
30
|
-
) ->
|
|
29
|
+
) -> dict[str, Any] | None:
|
|
31
30
|
"""Parse a single tool call.
|
|
32
31
|
|
|
33
32
|
Args:
|
|
34
33
|
raw_tool_call: The raw tool call to parse.
|
|
35
|
-
partial: Whether to parse partial JSON.
|
|
34
|
+
partial: Whether to parse partial JSON.
|
|
36
35
|
strict: Whether to allow non-JSON-compliant strings.
|
|
37
|
-
|
|
38
|
-
return_id: Whether to return the tool call id. Default is True.
|
|
36
|
+
return_id: Whether to return the tool call id.
|
|
39
37
|
|
|
40
38
|
Returns:
|
|
41
39
|
The parsed tool call.
|
|
@@ -76,7 +74,7 @@ def parse_tool_call(
|
|
|
76
74
|
|
|
77
75
|
def make_invalid_tool_call(
|
|
78
76
|
raw_tool_call: dict[str, Any],
|
|
79
|
-
error_msg:
|
|
77
|
+
error_msg: str | None,
|
|
80
78
|
) -> InvalidToolCall:
|
|
81
79
|
"""Create an InvalidToolCall from a raw tool call.
|
|
82
80
|
|
|
@@ -106,10 +104,9 @@ def parse_tool_calls(
|
|
|
106
104
|
|
|
107
105
|
Args:
|
|
108
106
|
raw_tool_calls: The raw tool calls to parse.
|
|
109
|
-
partial: Whether to parse partial JSON.
|
|
107
|
+
partial: Whether to parse partial JSON.
|
|
110
108
|
strict: Whether to allow non-JSON-compliant strings.
|
|
111
|
-
|
|
112
|
-
return_id: Whether to return the tool call id. Default is True.
|
|
109
|
+
return_id: Whether to return the tool call id.
|
|
113
110
|
|
|
114
111
|
Returns:
|
|
115
112
|
The parsed tool calls.
|
|
@@ -149,7 +146,7 @@ class JsonOutputToolsParser(BaseCumulativeTransformOutputParser[Any]):
|
|
|
149
146
|
first_tool_only: bool = False
|
|
150
147
|
"""Whether to return only the first tool call.
|
|
151
148
|
|
|
152
|
-
If False
|
|
149
|
+
If `False`, the result will be a list of tool calls, or an empty list
|
|
153
150
|
if no tool calls are found.
|
|
154
151
|
|
|
155
152
|
If true, and multiple tool calls are found, only the first one will be returned,
|
|
@@ -157,18 +154,15 @@ class JsonOutputToolsParser(BaseCumulativeTransformOutputParser[Any]):
|
|
|
157
154
|
If no tool calls are found, None will be returned.
|
|
158
155
|
"""
|
|
159
156
|
|
|
160
|
-
def parse_result(
|
|
161
|
-
self, result: Union[list[Generation], AIMessageV1], *, partial: bool = False
|
|
162
|
-
) -> Any:
|
|
157
|
+
def parse_result(self, result: list[Generation], *, partial: bool = False) -> Any:
|
|
163
158
|
"""Parse the result of an LLM call to a list of tool calls.
|
|
164
159
|
|
|
165
160
|
Args:
|
|
166
161
|
result: The result of the LLM call.
|
|
167
162
|
partial: Whether to parse partial JSON.
|
|
168
|
-
If True
|
|
163
|
+
If `True`, the output will be a JSON object containing
|
|
169
164
|
all the keys that have been returned so far.
|
|
170
|
-
If False
|
|
171
|
-
Default is False.
|
|
165
|
+
If `False`, the output will be the full JSON object.
|
|
172
166
|
|
|
173
167
|
Returns:
|
|
174
168
|
The parsed tool calls.
|
|
@@ -176,45 +170,31 @@ class JsonOutputToolsParser(BaseCumulativeTransformOutputParser[Any]):
|
|
|
176
170
|
Raises:
|
|
177
171
|
OutputParserException: If the output is not valid JSON.
|
|
178
172
|
"""
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
raise OutputParserException(msg)
|
|
187
|
-
message = generation.message
|
|
188
|
-
if isinstance(message, AIMessage) and message.tool_calls:
|
|
189
|
-
tool_calls = [dict(tc) for tc in message.tool_calls]
|
|
190
|
-
for tool_call in tool_calls:
|
|
191
|
-
if not self.return_id:
|
|
192
|
-
_ = tool_call.pop("id")
|
|
193
|
-
else:
|
|
194
|
-
try:
|
|
195
|
-
raw_tool_calls = copy.deepcopy(
|
|
196
|
-
message.additional_kwargs["tool_calls"]
|
|
197
|
-
)
|
|
198
|
-
except KeyError:
|
|
199
|
-
return []
|
|
200
|
-
tool_calls = parse_tool_calls(
|
|
201
|
-
raw_tool_calls,
|
|
202
|
-
partial=partial,
|
|
203
|
-
strict=self.strict,
|
|
204
|
-
return_id=self.return_id,
|
|
205
|
-
)
|
|
206
|
-
elif result.tool_calls:
|
|
207
|
-
# v1 message
|
|
208
|
-
tool_calls = [dict(tc) for tc in result.tool_calls]
|
|
173
|
+
generation = result[0]
|
|
174
|
+
if not isinstance(generation, ChatGeneration):
|
|
175
|
+
msg = "This output parser can only be used with a chat generation."
|
|
176
|
+
raise OutputParserException(msg)
|
|
177
|
+
message = generation.message
|
|
178
|
+
if isinstance(message, AIMessage) and message.tool_calls:
|
|
179
|
+
tool_calls = [dict(tc) for tc in message.tool_calls]
|
|
209
180
|
for tool_call in tool_calls:
|
|
210
181
|
if not self.return_id:
|
|
211
182
|
_ = tool_call.pop("id")
|
|
212
183
|
else:
|
|
213
|
-
|
|
214
|
-
|
|
184
|
+
try:
|
|
185
|
+
raw_tool_calls = copy.deepcopy(message.additional_kwargs["tool_calls"])
|
|
186
|
+
except KeyError:
|
|
187
|
+
return []
|
|
188
|
+
tool_calls = parse_tool_calls(
|
|
189
|
+
raw_tool_calls,
|
|
190
|
+
partial=partial,
|
|
191
|
+
strict=self.strict,
|
|
192
|
+
return_id=self.return_id,
|
|
193
|
+
)
|
|
215
194
|
# for backwards compatibility
|
|
216
195
|
for tc in tool_calls:
|
|
217
196
|
tc["type"] = tc.pop("name")
|
|
197
|
+
|
|
218
198
|
if self.first_tool_only:
|
|
219
199
|
return tool_calls[0] if tool_calls else None
|
|
220
200
|
return tool_calls
|
|
@@ -237,63 +217,50 @@ class JsonOutputKeyToolsParser(JsonOutputToolsParser):
|
|
|
237
217
|
key_name: str
|
|
238
218
|
"""The type of tools to return."""
|
|
239
219
|
|
|
240
|
-
def parse_result(
|
|
241
|
-
self, result: Union[list[Generation], AIMessageV1], *, partial: bool = False
|
|
242
|
-
) -> Any:
|
|
220
|
+
def parse_result(self, result: list[Generation], *, partial: bool = False) -> Any:
|
|
243
221
|
"""Parse the result of an LLM call to a list of tool calls.
|
|
244
222
|
|
|
245
223
|
Args:
|
|
246
224
|
result: The result of the LLM call.
|
|
247
225
|
partial: Whether to parse partial JSON.
|
|
248
|
-
If True
|
|
226
|
+
If `True`, the output will be a JSON object containing
|
|
249
227
|
all the keys that have been returned so far.
|
|
250
|
-
If False
|
|
251
|
-
|
|
228
|
+
If `False`, the output will be the full JSON object.
|
|
229
|
+
|
|
230
|
+
Raises:
|
|
231
|
+
OutputParserException: If the generation is not a chat generation.
|
|
252
232
|
|
|
253
233
|
Returns:
|
|
254
234
|
The parsed tool calls.
|
|
255
235
|
"""
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
parsed_tool_calls = [dict(tc) for tc in message.tool_calls]
|
|
264
|
-
for tool_call in parsed_tool_calls:
|
|
265
|
-
if not self.return_id:
|
|
266
|
-
_ = tool_call.pop("id")
|
|
267
|
-
else:
|
|
268
|
-
try:
|
|
269
|
-
raw_tool_calls = copy.deepcopy(
|
|
270
|
-
message.additional_kwargs["tool_calls"]
|
|
271
|
-
)
|
|
272
|
-
except KeyError:
|
|
273
|
-
if self.first_tool_only:
|
|
274
|
-
return None
|
|
275
|
-
return []
|
|
276
|
-
parsed_tool_calls = parse_tool_calls(
|
|
277
|
-
raw_tool_calls,
|
|
278
|
-
partial=partial,
|
|
279
|
-
strict=self.strict,
|
|
280
|
-
return_id=self.return_id,
|
|
281
|
-
)
|
|
282
|
-
elif result.tool_calls:
|
|
283
|
-
# v1 message
|
|
284
|
-
parsed_tool_calls = [dict(tc) for tc in result.tool_calls]
|
|
236
|
+
generation = result[0]
|
|
237
|
+
if not isinstance(generation, ChatGeneration):
|
|
238
|
+
msg = "This output parser can only be used with a chat generation."
|
|
239
|
+
raise OutputParserException(msg)
|
|
240
|
+
message = generation.message
|
|
241
|
+
if isinstance(message, AIMessage) and message.tool_calls:
|
|
242
|
+
parsed_tool_calls = [dict(tc) for tc in message.tool_calls]
|
|
285
243
|
for tool_call in parsed_tool_calls:
|
|
286
244
|
if not self.return_id:
|
|
287
245
|
_ = tool_call.pop("id")
|
|
288
246
|
else:
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
247
|
+
try:
|
|
248
|
+
# This exists purely for backward compatibility / cached messages
|
|
249
|
+
# All new messages should use `message.tool_calls`
|
|
250
|
+
raw_tool_calls = copy.deepcopy(message.additional_kwargs["tool_calls"])
|
|
251
|
+
except KeyError:
|
|
252
|
+
if self.first_tool_only:
|
|
253
|
+
return None
|
|
254
|
+
return []
|
|
255
|
+
parsed_tool_calls = parse_tool_calls(
|
|
256
|
+
raw_tool_calls,
|
|
257
|
+
partial=partial,
|
|
258
|
+
strict=self.strict,
|
|
259
|
+
return_id=self.return_id,
|
|
260
|
+
)
|
|
293
261
|
# For backwards compatibility
|
|
294
262
|
for tc in parsed_tool_calls:
|
|
295
263
|
tc["type"] = tc.pop("name")
|
|
296
|
-
|
|
297
264
|
if self.first_tool_only:
|
|
298
265
|
parsed_result = list(
|
|
299
266
|
filter(lambda x: x["type"] == self.key_name, parsed_tool_calls)
|
|
@@ -333,24 +300,23 @@ class PydanticToolsParser(JsonOutputToolsParser):
|
|
|
333
300
|
|
|
334
301
|
# TODO: Support more granular streaming of objects. Currently only streams once all
|
|
335
302
|
# Pydantic object fields are present.
|
|
336
|
-
def parse_result(
|
|
337
|
-
self, result: Union[list[Generation], AIMessageV1], *, partial: bool = False
|
|
338
|
-
) -> Any:
|
|
303
|
+
def parse_result(self, result: list[Generation], *, partial: bool = False) -> Any:
|
|
339
304
|
"""Parse the result of an LLM call to a list of Pydantic objects.
|
|
340
305
|
|
|
341
306
|
Args:
|
|
342
307
|
result: The result of the LLM call.
|
|
343
308
|
partial: Whether to parse partial JSON.
|
|
344
|
-
If True
|
|
309
|
+
If `True`, the output will be a JSON object containing
|
|
345
310
|
all the keys that have been returned so far.
|
|
346
|
-
If False
|
|
347
|
-
Default is False.
|
|
311
|
+
If `False`, the output will be the full JSON object.
|
|
348
312
|
|
|
349
313
|
Returns:
|
|
350
314
|
The parsed Pydantic objects.
|
|
351
315
|
|
|
352
316
|
Raises:
|
|
353
|
-
|
|
317
|
+
ValueError: If the tool call arguments are not a dict.
|
|
318
|
+
ValidationError: If the tool call arguments do not conform
|
|
319
|
+
to the Pydantic model.
|
|
354
320
|
"""
|
|
355
321
|
json_results = super().parse_result(result, partial=partial)
|
|
356
322
|
if not json_results:
|
|
@@ -373,19 +339,12 @@ class PydanticToolsParser(JsonOutputToolsParser):
|
|
|
373
339
|
except (ValidationError, ValueError):
|
|
374
340
|
if partial:
|
|
375
341
|
continue
|
|
376
|
-
has_max_tokens_stop_reason =
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
if isinstance(generation, ChatGeneration)
|
|
383
|
-
)
|
|
384
|
-
else:
|
|
385
|
-
# v1 message
|
|
386
|
-
has_max_tokens_stop_reason = (
|
|
387
|
-
result.response_metadata.get("stop_reason") == "max_tokens"
|
|
388
|
-
)
|
|
342
|
+
has_max_tokens_stop_reason = any(
|
|
343
|
+
generation.message.response_metadata.get("stop_reason")
|
|
344
|
+
== "max_tokens"
|
|
345
|
+
for generation in result
|
|
346
|
+
if isinstance(generation, ChatGeneration)
|
|
347
|
+
)
|
|
389
348
|
if has_max_tokens_stop_reason:
|
|
390
349
|
logger.exception(_MAX_TOKENS_ERROR)
|
|
391
350
|
raise
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
"""Output parsers using Pydantic."""
|
|
2
2
|
|
|
3
3
|
import json
|
|
4
|
-
from typing import Annotated, Generic
|
|
4
|
+
from typing import Annotated, Generic
|
|
5
5
|
|
|
6
6
|
import pydantic
|
|
7
7
|
from pydantic import SkipValidation
|
|
@@ -14,14 +14,13 @@ from langchain_core.utils.pydantic import (
|
|
|
14
14
|
PydanticBaseModel,
|
|
15
15
|
TBaseModel,
|
|
16
16
|
)
|
|
17
|
-
from langchain_core.v1.messages import AIMessage
|
|
18
17
|
|
|
19
18
|
|
|
20
19
|
class PydanticOutputParser(JsonOutputParser, Generic[TBaseModel]):
|
|
21
|
-
"""Parse an output using a
|
|
20
|
+
"""Parse an output using a Pydantic model."""
|
|
22
21
|
|
|
23
22
|
pydantic_object: Annotated[type[TBaseModel], SkipValidation()]
|
|
24
|
-
"""The
|
|
23
|
+
"""The Pydantic model to parse."""
|
|
25
24
|
|
|
26
25
|
def _parse_obj(self, obj: dict) -> TBaseModel:
|
|
27
26
|
try:
|
|
@@ -44,19 +43,22 @@ class PydanticOutputParser(JsonOutputParser, Generic[TBaseModel]):
|
|
|
44
43
|
return OutputParserException(msg, llm_output=json_string)
|
|
45
44
|
|
|
46
45
|
def parse_result(
|
|
47
|
-
self, result:
|
|
48
|
-
) ->
|
|
49
|
-
"""Parse the result of an LLM call to a
|
|
46
|
+
self, result: list[Generation], *, partial: bool = False
|
|
47
|
+
) -> TBaseModel | None:
|
|
48
|
+
"""Parse the result of an LLM call to a Pydantic object.
|
|
50
49
|
|
|
51
50
|
Args:
|
|
52
51
|
result: The result of the LLM call.
|
|
53
52
|
partial: Whether to parse partial JSON objects.
|
|
54
|
-
If True
|
|
53
|
+
If `True`, the output will be a JSON object containing
|
|
55
54
|
all the keys that have been returned so far.
|
|
56
|
-
|
|
55
|
+
|
|
56
|
+
Raises:
|
|
57
|
+
`OutputParserException`: If the result is not valid JSON
|
|
58
|
+
or does not conform to the Pydantic model.
|
|
57
59
|
|
|
58
60
|
Returns:
|
|
59
|
-
The parsed
|
|
61
|
+
The parsed Pydantic object.
|
|
60
62
|
"""
|
|
61
63
|
try:
|
|
62
64
|
json_object = super().parse_result(result)
|
|
@@ -67,13 +69,13 @@ class PydanticOutputParser(JsonOutputParser, Generic[TBaseModel]):
|
|
|
67
69
|
raise
|
|
68
70
|
|
|
69
71
|
def parse(self, text: str) -> TBaseModel:
|
|
70
|
-
"""Parse the output of an LLM call to a
|
|
72
|
+
"""Parse the output of an LLM call to a Pydantic object.
|
|
71
73
|
|
|
72
74
|
Args:
|
|
73
75
|
text: The output of the LLM call.
|
|
74
76
|
|
|
75
77
|
Returns:
|
|
76
|
-
The parsed
|
|
78
|
+
The parsed Pydantic object.
|
|
77
79
|
"""
|
|
78
80
|
return super().parse(text)
|
|
79
81
|
|
|
@@ -104,7 +106,7 @@ class PydanticOutputParser(JsonOutputParser, Generic[TBaseModel]):
|
|
|
104
106
|
@property
|
|
105
107
|
@override
|
|
106
108
|
def OutputType(self) -> type[TBaseModel]:
|
|
107
|
-
"""Return the
|
|
109
|
+
"""Return the Pydantic model."""
|
|
108
110
|
return self.pydantic_object
|
|
109
111
|
|
|
110
112
|
|
|
@@ -1,5 +1,7 @@
|
|
|
1
1
|
"""String output parser."""
|
|
2
2
|
|
|
3
|
+
from typing_extensions import override
|
|
4
|
+
|
|
3
5
|
from langchain_core.output_parsers.transform import BaseTransformOutputParser
|
|
4
6
|
|
|
5
7
|
|
|
@@ -17,9 +19,10 @@ class StrOutputParser(BaseTransformOutputParser[str]):
|
|
|
17
19
|
|
|
18
20
|
@classmethod
|
|
19
21
|
def get_lc_namespace(cls) -> list[str]:
|
|
20
|
-
"""Get the namespace of the
|
|
22
|
+
"""Get the namespace of the LangChain object.
|
|
21
23
|
|
|
22
|
-
|
|
24
|
+
Returns:
|
|
25
|
+
`["langchain", "schema", "output_parser"]`
|
|
23
26
|
"""
|
|
24
27
|
return ["langchain", "schema", "output_parser"]
|
|
25
28
|
|
|
@@ -28,6 +31,7 @@ class StrOutputParser(BaseTransformOutputParser[str]):
|
|
|
28
31
|
"""Return the output parser type for serialization."""
|
|
29
32
|
return "default"
|
|
30
33
|
|
|
34
|
+
@override
|
|
31
35
|
def parse(self, text: str) -> str:
|
|
32
36
|
"""Returns the input text with no changes."""
|
|
33
37
|
return text
|