langchain-core 1.0.0a6__py3-none-any.whl → 1.0.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (165) hide show
  1. langchain_core/__init__.py +1 -1
  2. langchain_core/_api/__init__.py +3 -4
  3. langchain_core/_api/beta_decorator.py +23 -26
  4. langchain_core/_api/deprecation.py +51 -64
  5. langchain_core/_api/path.py +3 -6
  6. langchain_core/_import_utils.py +3 -4
  7. langchain_core/agents.py +55 -48
  8. langchain_core/caches.py +65 -66
  9. langchain_core/callbacks/__init__.py +1 -8
  10. langchain_core/callbacks/base.py +321 -336
  11. langchain_core/callbacks/file.py +44 -44
  12. langchain_core/callbacks/manager.py +454 -514
  13. langchain_core/callbacks/stdout.py +29 -30
  14. langchain_core/callbacks/streaming_stdout.py +32 -32
  15. langchain_core/callbacks/usage.py +60 -57
  16. langchain_core/chat_history.py +53 -68
  17. langchain_core/document_loaders/base.py +27 -25
  18. langchain_core/document_loaders/blob_loaders.py +1 -1
  19. langchain_core/document_loaders/langsmith.py +44 -48
  20. langchain_core/documents/__init__.py +23 -3
  21. langchain_core/documents/base.py +102 -94
  22. langchain_core/documents/compressor.py +10 -10
  23. langchain_core/documents/transformers.py +34 -35
  24. langchain_core/embeddings/fake.py +50 -54
  25. langchain_core/example_selectors/length_based.py +2 -2
  26. langchain_core/example_selectors/semantic_similarity.py +28 -32
  27. langchain_core/exceptions.py +21 -20
  28. langchain_core/globals.py +3 -151
  29. langchain_core/indexing/__init__.py +1 -1
  30. langchain_core/indexing/api.py +121 -126
  31. langchain_core/indexing/base.py +73 -75
  32. langchain_core/indexing/in_memory.py +4 -6
  33. langchain_core/language_models/__init__.py +14 -29
  34. langchain_core/language_models/_utils.py +58 -61
  35. langchain_core/language_models/base.py +82 -172
  36. langchain_core/language_models/chat_models.py +329 -402
  37. langchain_core/language_models/fake.py +11 -11
  38. langchain_core/language_models/fake_chat_models.py +42 -36
  39. langchain_core/language_models/llms.py +189 -269
  40. langchain_core/load/dump.py +9 -12
  41. langchain_core/load/load.py +18 -28
  42. langchain_core/load/mapping.py +2 -4
  43. langchain_core/load/serializable.py +42 -40
  44. langchain_core/messages/__init__.py +10 -16
  45. langchain_core/messages/ai.py +148 -148
  46. langchain_core/messages/base.py +53 -51
  47. langchain_core/messages/block_translators/__init__.py +19 -22
  48. langchain_core/messages/block_translators/anthropic.py +6 -6
  49. langchain_core/messages/block_translators/bedrock_converse.py +5 -5
  50. langchain_core/messages/block_translators/google_genai.py +10 -7
  51. langchain_core/messages/block_translators/google_vertexai.py +4 -32
  52. langchain_core/messages/block_translators/groq.py +117 -21
  53. langchain_core/messages/block_translators/langchain_v0.py +5 -5
  54. langchain_core/messages/block_translators/openai.py +11 -11
  55. langchain_core/messages/chat.py +2 -6
  56. langchain_core/messages/content.py +339 -330
  57. langchain_core/messages/function.py +6 -10
  58. langchain_core/messages/human.py +24 -31
  59. langchain_core/messages/modifier.py +2 -2
  60. langchain_core/messages/system.py +19 -29
  61. langchain_core/messages/tool.py +74 -90
  62. langchain_core/messages/utils.py +484 -510
  63. langchain_core/output_parsers/__init__.py +13 -10
  64. langchain_core/output_parsers/base.py +61 -61
  65. langchain_core/output_parsers/format_instructions.py +9 -4
  66. langchain_core/output_parsers/json.py +12 -10
  67. langchain_core/output_parsers/list.py +21 -23
  68. langchain_core/output_parsers/openai_functions.py +49 -47
  69. langchain_core/output_parsers/openai_tools.py +30 -23
  70. langchain_core/output_parsers/pydantic.py +13 -14
  71. langchain_core/output_parsers/string.py +5 -5
  72. langchain_core/output_parsers/transform.py +15 -17
  73. langchain_core/output_parsers/xml.py +35 -34
  74. langchain_core/outputs/__init__.py +1 -1
  75. langchain_core/outputs/chat_generation.py +18 -18
  76. langchain_core/outputs/chat_result.py +1 -3
  77. langchain_core/outputs/generation.py +16 -16
  78. langchain_core/outputs/llm_result.py +10 -10
  79. langchain_core/prompt_values.py +13 -19
  80. langchain_core/prompts/__init__.py +3 -27
  81. langchain_core/prompts/base.py +81 -86
  82. langchain_core/prompts/chat.py +308 -351
  83. langchain_core/prompts/dict.py +6 -6
  84. langchain_core/prompts/few_shot.py +81 -88
  85. langchain_core/prompts/few_shot_with_templates.py +11 -13
  86. langchain_core/prompts/image.py +12 -14
  87. langchain_core/prompts/loading.py +4 -6
  88. langchain_core/prompts/message.py +7 -7
  89. langchain_core/prompts/prompt.py +24 -39
  90. langchain_core/prompts/string.py +26 -10
  91. langchain_core/prompts/structured.py +49 -53
  92. langchain_core/rate_limiters.py +51 -60
  93. langchain_core/retrievers.py +61 -198
  94. langchain_core/runnables/base.py +1551 -1656
  95. langchain_core/runnables/branch.py +68 -70
  96. langchain_core/runnables/config.py +72 -89
  97. langchain_core/runnables/configurable.py +145 -161
  98. langchain_core/runnables/fallbacks.py +102 -96
  99. langchain_core/runnables/graph.py +91 -97
  100. langchain_core/runnables/graph_ascii.py +27 -28
  101. langchain_core/runnables/graph_mermaid.py +42 -51
  102. langchain_core/runnables/graph_png.py +43 -16
  103. langchain_core/runnables/history.py +175 -177
  104. langchain_core/runnables/passthrough.py +151 -167
  105. langchain_core/runnables/retry.py +46 -51
  106. langchain_core/runnables/router.py +30 -35
  107. langchain_core/runnables/schema.py +75 -80
  108. langchain_core/runnables/utils.py +60 -67
  109. langchain_core/stores.py +85 -121
  110. langchain_core/structured_query.py +8 -8
  111. langchain_core/sys_info.py +29 -29
  112. langchain_core/tools/__init__.py +1 -14
  113. langchain_core/tools/base.py +306 -245
  114. langchain_core/tools/convert.py +160 -155
  115. langchain_core/tools/render.py +10 -10
  116. langchain_core/tools/retriever.py +12 -11
  117. langchain_core/tools/simple.py +19 -24
  118. langchain_core/tools/structured.py +32 -39
  119. langchain_core/tracers/__init__.py +1 -9
  120. langchain_core/tracers/base.py +97 -99
  121. langchain_core/tracers/context.py +29 -52
  122. langchain_core/tracers/core.py +49 -53
  123. langchain_core/tracers/evaluation.py +11 -11
  124. langchain_core/tracers/event_stream.py +65 -64
  125. langchain_core/tracers/langchain.py +21 -21
  126. langchain_core/tracers/log_stream.py +45 -45
  127. langchain_core/tracers/memory_stream.py +3 -3
  128. langchain_core/tracers/root_listeners.py +16 -16
  129. langchain_core/tracers/run_collector.py +2 -4
  130. langchain_core/tracers/schemas.py +0 -129
  131. langchain_core/tracers/stdout.py +3 -3
  132. langchain_core/utils/__init__.py +1 -4
  133. langchain_core/utils/_merge.py +2 -2
  134. langchain_core/utils/aiter.py +57 -61
  135. langchain_core/utils/env.py +9 -9
  136. langchain_core/utils/function_calling.py +94 -188
  137. langchain_core/utils/html.py +7 -8
  138. langchain_core/utils/input.py +9 -6
  139. langchain_core/utils/interactive_env.py +1 -1
  140. langchain_core/utils/iter.py +36 -40
  141. langchain_core/utils/json.py +4 -3
  142. langchain_core/utils/json_schema.py +9 -9
  143. langchain_core/utils/mustache.py +8 -10
  144. langchain_core/utils/pydantic.py +35 -37
  145. langchain_core/utils/strings.py +6 -9
  146. langchain_core/utils/usage.py +1 -1
  147. langchain_core/utils/utils.py +66 -62
  148. langchain_core/vectorstores/base.py +182 -216
  149. langchain_core/vectorstores/in_memory.py +101 -176
  150. langchain_core/vectorstores/utils.py +5 -5
  151. langchain_core/version.py +1 -1
  152. langchain_core-1.0.4.dist-info/METADATA +69 -0
  153. langchain_core-1.0.4.dist-info/RECORD +172 -0
  154. {langchain_core-1.0.0a6.dist-info → langchain_core-1.0.4.dist-info}/WHEEL +1 -1
  155. langchain_core/memory.py +0 -120
  156. langchain_core/messages/block_translators/ollama.py +0 -47
  157. langchain_core/prompts/pipeline.py +0 -138
  158. langchain_core/pydantic_v1/__init__.py +0 -30
  159. langchain_core/pydantic_v1/dataclasses.py +0 -23
  160. langchain_core/pydantic_v1/main.py +0 -23
  161. langchain_core/tracers/langchain_v1.py +0 -31
  162. langchain_core/utils/loading.py +0 -35
  163. langchain_core-1.0.0a6.dist-info/METADATA +0 -67
  164. langchain_core-1.0.0a6.dist-info/RECORD +0 -181
  165. langchain_core-1.0.0a6.dist-info/entry_points.txt +0 -4
@@ -3,7 +3,7 @@
3
3
  import copy
4
4
  import json
5
5
  from types import GenericAlias
6
- from typing import Any, Optional, Union
6
+ from typing import Any
7
7
 
8
8
  import jsonpatch # type: ignore[import-untyped]
9
9
  from pydantic import BaseModel, model_validator
@@ -31,13 +31,13 @@ class OutputFunctionsParser(BaseGenerationOutputParser[Any]):
31
31
 
32
32
  Args:
33
33
  result: The result of the LLM call.
34
- partial: Whether to parse partial JSON objects. Default is False.
34
+ partial: Whether to parse partial JSON objects.
35
35
 
36
36
  Returns:
37
37
  The parsed JSON object.
38
38
 
39
39
  Raises:
40
- OutputParserException: If the output is not valid JSON.
40
+ `OutputParserException`: If the output is not valid JSON.
41
41
  """
42
42
  generation = result[0]
43
43
  if not isinstance(generation, ChatGeneration):
@@ -56,7 +56,7 @@ class OutputFunctionsParser(BaseGenerationOutputParser[Any]):
56
56
 
57
57
 
58
58
  class JsonOutputFunctionsParser(BaseCumulativeTransformOutputParser[Any]):
59
- """Parse an output as the Json object."""
59
+ """Parse an output as the JSON object."""
60
60
 
61
61
  strict: bool = False
62
62
  """Whether to allow non-JSON-compliant strings.
@@ -74,7 +74,7 @@ class JsonOutputFunctionsParser(BaseCumulativeTransformOutputParser[Any]):
74
74
  return "json_functions"
75
75
 
76
76
  @override
77
- def _diff(self, prev: Optional[Any], next: Any) -> Any:
77
+ def _diff(self, prev: Any | None, next: Any) -> Any:
78
78
  return jsonpatch.make_patch(prev, next).patch
79
79
 
80
80
  def parse_result(self, result: list[Generation], *, partial: bool = False) -> Any:
@@ -82,13 +82,13 @@ class JsonOutputFunctionsParser(BaseCumulativeTransformOutputParser[Any]):
82
82
 
83
83
  Args:
84
84
  result: The result of the LLM call.
85
- partial: Whether to parse partial JSON objects. Default is False.
85
+ partial: Whether to parse partial JSON objects.
86
86
 
87
87
  Returns:
88
88
  The parsed JSON object.
89
89
 
90
90
  Raises:
91
- OutputParserException: If the output is not valid JSON.
91
+ OutputParserExcept`ion: If the output is not valid JSON.
92
92
  """
93
93
  if len(result) != 1:
94
94
  msg = f"Expected exactly one result, but got {len(result)}"
@@ -155,7 +155,7 @@ class JsonOutputFunctionsParser(BaseCumulativeTransformOutputParser[Any]):
155
155
 
156
156
 
157
157
  class JsonKeyOutputFunctionsParser(JsonOutputFunctionsParser):
158
- """Parse an output as the element of the Json object."""
158
+ """Parse an output as the element of the JSON object."""
159
159
 
160
160
  key_name: str
161
161
  """The name of the key to return."""
@@ -165,7 +165,7 @@ class JsonKeyOutputFunctionsParser(JsonOutputFunctionsParser):
165
165
 
166
166
  Args:
167
167
  result: The result of the LLM call.
168
- partial: Whether to parse partial JSON objects. Default is False.
168
+ partial: Whether to parse partial JSON objects.
169
169
 
170
170
  Returns:
171
171
  The parsed JSON object.
@@ -177,48 +177,50 @@ class JsonKeyOutputFunctionsParser(JsonOutputFunctionsParser):
177
177
 
178
178
 
179
179
  class PydanticOutputFunctionsParser(OutputFunctionsParser):
180
- """Parse an output as a pydantic object.
180
+ """Parse an output as a Pydantic object.
181
181
 
182
- This parser is used to parse the output of a ChatModel that uses
183
- OpenAI function format to invoke functions.
182
+ This parser is used to parse the output of a chat model that uses OpenAI function
183
+ format to invoke functions.
184
184
 
185
- The parser extracts the function call invocation and matches
186
- them to the pydantic schema provided.
185
+ The parser extracts the function call invocation and matches them to the Pydantic
186
+ schema provided.
187
187
 
188
- An exception will be raised if the function call does not match
189
- the provided schema.
188
+ An exception will be raised if the function call does not match the provided schema.
190
189
 
191
190
  Example:
192
- ... code-block:: python
193
-
194
- message = AIMessage(
195
- content="This is a test message",
196
- additional_kwargs={
197
- "function_call": {
198
- "name": "cookie",
199
- "arguments": json.dumps({"name": "value", "age": 10}),
200
- }
201
- },
202
- )
203
- chat_generation = ChatGeneration(message=message)
191
+ ```python
192
+ message = AIMessage(
193
+ content="This is a test message",
194
+ additional_kwargs={
195
+ "function_call": {
196
+ "name": "cookie",
197
+ "arguments": json.dumps({"name": "value", "age": 10}),
198
+ }
199
+ },
200
+ )
201
+ chat_generation = ChatGeneration(message=message)
204
202
 
205
- class Cookie(BaseModel):
206
- name: str
207
- age: int
208
203
 
209
- class Dog(BaseModel):
210
- species: str
204
+ class Cookie(BaseModel):
205
+ name: str
206
+ age: int
211
207
 
212
- # Full output
213
- parser = PydanticOutputFunctionsParser(
214
- pydantic_schema={"cookie": Cookie, "dog": Dog}
215
- )
216
- result = parser.parse_result([chat_generation])
208
+
209
+ class Dog(BaseModel):
210
+ species: str
211
+
212
+
213
+ # Full output
214
+ parser = PydanticOutputFunctionsParser(
215
+ pydantic_schema={"cookie": Cookie, "dog": Dog}
216
+ )
217
+ result = parser.parse_result([chat_generation])
218
+ ```
217
219
 
218
220
  """
219
221
 
220
- pydantic_schema: Union[type[BaseModel], dict[str, type[BaseModel]]]
221
- """The pydantic schema to parse the output with.
222
+ pydantic_schema: type[BaseModel] | dict[str, type[BaseModel]]
223
+ """The Pydantic schema to parse the output with.
222
224
 
223
225
  If multiple schemas are provided, then the function name will be used to
224
226
  determine which schema to use.
@@ -227,7 +229,7 @@ class PydanticOutputFunctionsParser(OutputFunctionsParser):
227
229
  @model_validator(mode="before")
228
230
  @classmethod
229
231
  def validate_schema(cls, values: dict) -> Any:
230
- """Validate the pydantic schema.
232
+ """Validate the Pydantic schema.
231
233
 
232
234
  Args:
233
235
  values: The values to validate.
@@ -236,7 +238,7 @@ class PydanticOutputFunctionsParser(OutputFunctionsParser):
236
238
  The validated values.
237
239
 
238
240
  Raises:
239
- ValueError: If the schema is not a pydantic schema.
241
+ ValueError: If the schema is not a Pydantic schema.
240
242
  """
241
243
  schema = values["pydantic_schema"]
242
244
  if "args_only" not in values:
@@ -259,10 +261,10 @@ class PydanticOutputFunctionsParser(OutputFunctionsParser):
259
261
 
260
262
  Args:
261
263
  result: The result of the LLM call.
262
- partial: Whether to parse partial JSON objects. Default is False.
264
+ partial: Whether to parse partial JSON objects.
263
265
 
264
266
  Raises:
265
- ValueError: If the pydantic schema is not valid.
267
+ ValueError: If the Pydantic schema is not valid.
266
268
 
267
269
  Returns:
268
270
  The parsed JSON object.
@@ -285,13 +287,13 @@ class PydanticOutputFunctionsParser(OutputFunctionsParser):
285
287
  elif issubclass(pydantic_schema, BaseModelV1):
286
288
  pydantic_args = pydantic_schema.parse_raw(args)
287
289
  else:
288
- msg = f"Unsupported pydantic schema: {pydantic_schema}"
290
+ msg = f"Unsupported Pydantic schema: {pydantic_schema}"
289
291
  raise ValueError(msg)
290
292
  return pydantic_args
291
293
 
292
294
 
293
295
  class PydanticAttrOutputFunctionsParser(PydanticOutputFunctionsParser):
294
- """Parse an output as an attribute of a pydantic object."""
296
+ """Parse an output as an attribute of a Pydantic object."""
295
297
 
296
298
  attr_name: str
297
299
  """The name of the attribute to return."""
@@ -302,7 +304,7 @@ class PydanticAttrOutputFunctionsParser(PydanticOutputFunctionsParser):
302
304
 
303
305
  Args:
304
306
  result: The result of the LLM call.
305
- partial: Whether to parse partial JSON objects. Default is False.
307
+ partial: Whether to parse partial JSON objects.
306
308
 
307
309
  Returns:
308
310
  The parsed JSON object.
@@ -4,7 +4,7 @@ import copy
4
4
  import json
5
5
  import logging
6
6
  from json import JSONDecodeError
7
- from typing import Annotated, Any, Optional
7
+ from typing import Annotated, Any
8
8
 
9
9
  from pydantic import SkipValidation, ValidationError
10
10
 
@@ -15,7 +15,11 @@ from langchain_core.messages.tool import tool_call as create_tool_call
15
15
  from langchain_core.output_parsers.transform import BaseCumulativeTransformOutputParser
16
16
  from langchain_core.outputs import ChatGeneration, Generation
17
17
  from langchain_core.utils.json import parse_partial_json
18
- from langchain_core.utils.pydantic import TypeBaseModel
18
+ from langchain_core.utils.pydantic import (
19
+ TypeBaseModel,
20
+ is_pydantic_v1_subclass,
21
+ is_pydantic_v2_subclass,
22
+ )
19
23
 
20
24
  logger = logging.getLogger(__name__)
21
25
 
@@ -26,15 +30,14 @@ def parse_tool_call(
26
30
  partial: bool = False,
27
31
  strict: bool = False,
28
32
  return_id: bool = True,
29
- ) -> Optional[dict[str, Any]]:
33
+ ) -> dict[str, Any] | None:
30
34
  """Parse a single tool call.
31
35
 
32
36
  Args:
33
37
  raw_tool_call: The raw tool call to parse.
34
- partial: Whether to parse partial JSON. Default is False.
38
+ partial: Whether to parse partial JSON.
35
39
  strict: Whether to allow non-JSON-compliant strings.
36
- Default is False.
37
- return_id: Whether to return the tool call id. Default is True.
40
+ return_id: Whether to return the tool call id.
38
41
 
39
42
  Returns:
40
43
  The parsed tool call.
@@ -75,7 +78,7 @@ def parse_tool_call(
75
78
 
76
79
  def make_invalid_tool_call(
77
80
  raw_tool_call: dict[str, Any],
78
- error_msg: Optional[str],
81
+ error_msg: str | None,
79
82
  ) -> InvalidToolCall:
80
83
  """Create an InvalidToolCall from a raw tool call.
81
84
 
@@ -105,10 +108,9 @@ def parse_tool_calls(
105
108
 
106
109
  Args:
107
110
  raw_tool_calls: The raw tool calls to parse.
108
- partial: Whether to parse partial JSON. Default is False.
111
+ partial: Whether to parse partial JSON.
109
112
  strict: Whether to allow non-JSON-compliant strings.
110
- Default is False.
111
- return_id: Whether to return the tool call id. Default is True.
113
+ return_id: Whether to return the tool call id.
112
114
 
113
115
  Returns:
114
116
  The parsed tool calls.
@@ -148,7 +150,7 @@ class JsonOutputToolsParser(BaseCumulativeTransformOutputParser[Any]):
148
150
  first_tool_only: bool = False
149
151
  """Whether to return only the first tool call.
150
152
 
151
- If False, the result will be a list of tool calls, or an empty list
153
+ If `False`, the result will be a list of tool calls, or an empty list
152
154
  if no tool calls are found.
153
155
 
154
156
  If true, and multiple tool calls are found, only the first one will be returned,
@@ -162,10 +164,9 @@ class JsonOutputToolsParser(BaseCumulativeTransformOutputParser[Any]):
162
164
  Args:
163
165
  result: The result of the LLM call.
164
166
  partial: Whether to parse partial JSON.
165
- If True, the output will be a JSON object containing
167
+ If `True`, the output will be a JSON object containing
166
168
  all the keys that have been returned so far.
167
- If False, the output will be the full JSON object.
168
- Default is False.
169
+ If `False`, the output will be the full JSON object.
169
170
 
170
171
  Returns:
171
172
  The parsed tool calls.
@@ -226,10 +227,9 @@ class JsonOutputKeyToolsParser(JsonOutputToolsParser):
226
227
  Args:
227
228
  result: The result of the LLM call.
228
229
  partial: Whether to parse partial JSON.
229
- If True, the output will be a JSON object containing
230
- all the keys that have been returned so far.
231
- If False, the output will be the full JSON object.
232
- Default is False.
230
+ If `True`, the output will be a JSON object containing
231
+ all the keys that have been returned so far.
232
+ If `False`, the output will be the full JSON object.
233
233
 
234
234
  Raises:
235
235
  OutputParserException: If the generation is not a chat generation.
@@ -310,10 +310,9 @@ class PydanticToolsParser(JsonOutputToolsParser):
310
310
  Args:
311
311
  result: The result of the LLM call.
312
312
  partial: Whether to parse partial JSON.
313
- If True, the output will be a JSON object containing
314
- all the keys that have been returned so far.
315
- If False, the output will be the full JSON object.
316
- Default is False.
313
+ If `True`, the output will be a JSON object containing
314
+ all the keys that have been returned so far.
315
+ If `False`, the output will be the full JSON object.
317
316
 
318
317
  Returns:
319
318
  The parsed Pydantic objects.
@@ -328,7 +327,15 @@ class PydanticToolsParser(JsonOutputToolsParser):
328
327
  return None if self.first_tool_only else []
329
328
 
330
329
  json_results = [json_results] if self.first_tool_only else json_results
331
- name_dict = {tool.__name__: tool for tool in self.tools}
330
+ name_dict_v2: dict[str, TypeBaseModel] = {
331
+ tool.model_config.get("title") or tool.__name__: tool
332
+ for tool in self.tools
333
+ if is_pydantic_v2_subclass(tool)
334
+ }
335
+ name_dict_v1: dict[str, TypeBaseModel] = {
336
+ tool.__name__: tool for tool in self.tools if is_pydantic_v1_subclass(tool)
337
+ }
338
+ name_dict: dict[str, TypeBaseModel] = {**name_dict_v2, **name_dict_v1}
332
339
  pydantic_objects = []
333
340
  for res in json_results:
334
341
  if not isinstance(res["args"], dict):
@@ -1,7 +1,7 @@
1
1
  """Output parsers using Pydantic."""
2
2
 
3
3
  import json
4
- from typing import Annotated, Generic, Optional
4
+ from typing import Annotated, Generic
5
5
 
6
6
  import pydantic
7
7
  from pydantic import SkipValidation
@@ -17,10 +17,10 @@ from langchain_core.utils.pydantic import (
17
17
 
18
18
 
19
19
  class PydanticOutputParser(JsonOutputParser, Generic[TBaseModel]):
20
- """Parse an output using a pydantic model."""
20
+ """Parse an output using a Pydantic model."""
21
21
 
22
22
  pydantic_object: Annotated[type[TBaseModel], SkipValidation()]
23
- """The pydantic model to parse."""
23
+ """The Pydantic model to parse."""
24
24
 
25
25
  def _parse_obj(self, obj: dict) -> TBaseModel:
26
26
  try:
@@ -44,22 +44,21 @@ class PydanticOutputParser(JsonOutputParser, Generic[TBaseModel]):
44
44
 
45
45
  def parse_result(
46
46
  self, result: list[Generation], *, partial: bool = False
47
- ) -> Optional[TBaseModel]:
48
- """Parse the result of an LLM call to a pydantic object.
47
+ ) -> TBaseModel | None:
48
+ """Parse the result of an LLM call to a Pydantic object.
49
49
 
50
50
  Args:
51
51
  result: The result of the LLM call.
52
52
  partial: Whether to parse partial JSON objects.
53
- If True, the output will be a JSON object containing
53
+ If `True`, the output will be a JSON object containing
54
54
  all the keys that have been returned so far.
55
- Defaults to False.
56
55
 
57
56
  Raises:
58
- OutputParserException: If the result is not valid JSON
59
- or does not conform to the pydantic model.
57
+ `OutputParserException`: If the result is not valid JSON
58
+ or does not conform to the Pydantic model.
60
59
 
61
60
  Returns:
62
- The parsed pydantic object.
61
+ The parsed Pydantic object.
63
62
  """
64
63
  try:
65
64
  json_object = super().parse_result(result)
@@ -70,13 +69,13 @@ class PydanticOutputParser(JsonOutputParser, Generic[TBaseModel]):
70
69
  raise
71
70
 
72
71
  def parse(self, text: str) -> TBaseModel:
73
- """Parse the output of an LLM call to a pydantic object.
72
+ """Parse the output of an LLM call to a Pydantic object.
74
73
 
75
74
  Args:
76
75
  text: The output of the LLM call.
77
76
 
78
77
  Returns:
79
- The parsed pydantic object.
78
+ The parsed Pydantic object.
80
79
  """
81
80
  return super().parse(text)
82
81
 
@@ -87,7 +86,7 @@ class PydanticOutputParser(JsonOutputParser, Generic[TBaseModel]):
87
86
  The format instructions for the JSON output.
88
87
  """
89
88
  # Copy schema to avoid altering original Pydantic schema.
90
- schema = dict(self.pydantic_object.model_json_schema().items())
89
+ schema = dict(self._get_schema(self.pydantic_object).items())
91
90
 
92
91
  # Remove extraneous fields.
93
92
  reduced_schema = schema
@@ -107,7 +106,7 @@ class PydanticOutputParser(JsonOutputParser, Generic[TBaseModel]):
107
106
  @property
108
107
  @override
109
108
  def OutputType(self) -> type[TBaseModel]:
110
- """Return the pydantic model."""
109
+ """Return the Pydantic model."""
111
110
  return self.pydantic_object
112
111
 
113
112
 
@@ -6,23 +6,23 @@ from langchain_core.output_parsers.transform import BaseTransformOutputParser
6
6
 
7
7
 
8
8
  class StrOutputParser(BaseTransformOutputParser[str]):
9
- """OutputParser that parses LLMResult into the top likely string."""
9
+ """OutputParser that parses `LLMResult` into the top likely string."""
10
10
 
11
11
  @classmethod
12
12
  def is_lc_serializable(cls) -> bool:
13
- """StrOutputParser is serializable.
13
+ """`StrOutputParser` is serializable.
14
14
 
15
15
  Returns:
16
- True
16
+ `True`
17
17
  """
18
18
  return True
19
19
 
20
20
  @classmethod
21
21
  def get_lc_namespace(cls) -> list[str]:
22
- """Get the namespace of the langchain object.
22
+ """Get the namespace of the LangChain object.
23
23
 
24
24
  Returns:
25
- ``["langchain", "schema", "output_parser"]``
25
+ `["langchain", "schema", "output_parser"]`
26
26
  """
27
27
  return ["langchain", "schema", "output_parser"]
28
28
 
@@ -5,8 +5,6 @@ from __future__ import annotations
5
5
  from typing import (
6
6
  TYPE_CHECKING,
7
7
  Any,
8
- Optional,
9
- Union,
10
8
  )
11
9
 
12
10
  from typing_extensions import override
@@ -32,7 +30,7 @@ class BaseTransformOutputParser(BaseOutputParser[T]):
32
30
 
33
31
  def _transform(
34
32
  self,
35
- input: Iterator[Union[str, BaseMessage]],
33
+ input: Iterator[str | BaseMessage],
36
34
  ) -> Iterator[T]:
37
35
  for chunk in input:
38
36
  if isinstance(chunk, BaseMessage):
@@ -42,7 +40,7 @@ class BaseTransformOutputParser(BaseOutputParser[T]):
42
40
 
43
41
  async def _atransform(
44
42
  self,
45
- input: AsyncIterator[Union[str, BaseMessage]],
43
+ input: AsyncIterator[str | BaseMessage],
46
44
  ) -> AsyncIterator[T]:
47
45
  async for chunk in input:
48
46
  if isinstance(chunk, BaseMessage):
@@ -57,8 +55,8 @@ class BaseTransformOutputParser(BaseOutputParser[T]):
57
55
  @override
58
56
  def transform(
59
57
  self,
60
- input: Iterator[Union[str, BaseMessage]],
61
- config: Optional[RunnableConfig] = None,
58
+ input: Iterator[str | BaseMessage],
59
+ config: RunnableConfig | None = None,
62
60
  **kwargs: Any,
63
61
  ) -> Iterator[T]:
64
62
  """Transform the input into the output format.
@@ -66,7 +64,7 @@ class BaseTransformOutputParser(BaseOutputParser[T]):
66
64
  Args:
67
65
  input: The input to transform.
68
66
  config: The configuration to use for the transformation.
69
- kwargs: Additional keyword arguments.
67
+ **kwargs: Additional keyword arguments.
70
68
 
71
69
  Yields:
72
70
  The transformed output.
@@ -78,8 +76,8 @@ class BaseTransformOutputParser(BaseOutputParser[T]):
78
76
  @override
79
77
  async def atransform(
80
78
  self,
81
- input: AsyncIterator[Union[str, BaseMessage]],
82
- config: Optional[RunnableConfig] = None,
79
+ input: AsyncIterator[str | BaseMessage],
80
+ config: RunnableConfig | None = None,
83
81
  **kwargs: Any,
84
82
  ) -> AsyncIterator[T]:
85
83
  """Async transform the input into the output format.
@@ -87,7 +85,7 @@ class BaseTransformOutputParser(BaseOutputParser[T]):
87
85
  Args:
88
86
  input: The input to transform.
89
87
  config: The configuration to use for the transformation.
90
- kwargs: Additional keyword arguments.
88
+ **kwargs: Additional keyword arguments.
91
89
 
92
90
  Yields:
93
91
  The transformed output.
@@ -108,7 +106,7 @@ class BaseCumulativeTransformOutputParser(BaseTransformOutputParser[T]):
108
106
 
109
107
  def _diff(
110
108
  self,
111
- prev: Optional[T],
109
+ prev: T | None,
112
110
  next: T, # noqa: A002
113
111
  ) -> T:
114
112
  """Convert parsed outputs into a diff format.
@@ -125,11 +123,11 @@ class BaseCumulativeTransformOutputParser(BaseTransformOutputParser[T]):
125
123
  raise NotImplementedError
126
124
 
127
125
  @override
128
- def _transform(self, input: Iterator[Union[str, BaseMessage]]) -> Iterator[Any]:
126
+ def _transform(self, input: Iterator[str | BaseMessage]) -> Iterator[Any]:
129
127
  prev_parsed = None
130
- acc_gen: Union[GenerationChunk, ChatGenerationChunk, None] = None
128
+ acc_gen: GenerationChunk | ChatGenerationChunk | None = None
131
129
  for chunk in input:
132
- chunk_gen: Union[GenerationChunk, ChatGenerationChunk]
130
+ chunk_gen: GenerationChunk | ChatGenerationChunk
133
131
  if isinstance(chunk, BaseMessageChunk):
134
132
  chunk_gen = ChatGenerationChunk(message=chunk)
135
133
  elif isinstance(chunk, BaseMessage):
@@ -151,12 +149,12 @@ class BaseCumulativeTransformOutputParser(BaseTransformOutputParser[T]):
151
149
 
152
150
  @override
153
151
  async def _atransform(
154
- self, input: AsyncIterator[Union[str, BaseMessage]]
152
+ self, input: AsyncIterator[str | BaseMessage]
155
153
  ) -> AsyncIterator[T]:
156
154
  prev_parsed = None
157
- acc_gen: Union[GenerationChunk, ChatGenerationChunk, None] = None
155
+ acc_gen: GenerationChunk | ChatGenerationChunk | None = None
158
156
  async for chunk in input:
159
- chunk_gen: Union[GenerationChunk, ChatGenerationChunk]
157
+ chunk_gen: GenerationChunk | ChatGenerationChunk
160
158
  if isinstance(chunk, BaseMessageChunk):
161
159
  chunk_gen = ChatGenerationChunk(message=chunk)
162
160
  elif isinstance(chunk, BaseMessage):