langchain-core 1.0.0a1__py3-none-any.whl → 1.0.0a3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain-core might be problematic. Click here for more details.

Files changed (131) hide show
  1. langchain_core/_api/beta_decorator.py +17 -40
  2. langchain_core/_api/deprecation.py +20 -7
  3. langchain_core/_api/path.py +19 -2
  4. langchain_core/_import_utils.py +7 -0
  5. langchain_core/agents.py +10 -6
  6. langchain_core/callbacks/base.py +28 -15
  7. langchain_core/callbacks/manager.py +81 -69
  8. langchain_core/callbacks/usage.py +4 -2
  9. langchain_core/chat_history.py +29 -21
  10. langchain_core/document_loaders/base.py +34 -9
  11. langchain_core/document_loaders/langsmith.py +3 -0
  12. langchain_core/documents/base.py +35 -10
  13. langchain_core/documents/transformers.py +4 -2
  14. langchain_core/embeddings/fake.py +8 -5
  15. langchain_core/env.py +2 -3
  16. langchain_core/example_selectors/base.py +12 -0
  17. langchain_core/exceptions.py +7 -0
  18. langchain_core/globals.py +17 -28
  19. langchain_core/indexing/api.py +57 -45
  20. langchain_core/indexing/base.py +5 -8
  21. langchain_core/indexing/in_memory.py +23 -3
  22. langchain_core/language_models/__init__.py +6 -2
  23. langchain_core/language_models/_utils.py +28 -4
  24. langchain_core/language_models/base.py +33 -21
  25. langchain_core/language_models/chat_models.py +103 -29
  26. langchain_core/language_models/fake_chat_models.py +5 -7
  27. langchain_core/language_models/llms.py +54 -20
  28. langchain_core/load/dump.py +2 -3
  29. langchain_core/load/load.py +15 -1
  30. langchain_core/load/serializable.py +38 -43
  31. langchain_core/memory.py +7 -3
  32. langchain_core/messages/__init__.py +7 -17
  33. langchain_core/messages/ai.py +41 -34
  34. langchain_core/messages/base.py +16 -7
  35. langchain_core/messages/block_translators/__init__.py +10 -8
  36. langchain_core/messages/block_translators/anthropic.py +3 -1
  37. langchain_core/messages/block_translators/bedrock.py +3 -1
  38. langchain_core/messages/block_translators/bedrock_converse.py +3 -1
  39. langchain_core/messages/block_translators/google_genai.py +3 -1
  40. langchain_core/messages/block_translators/google_vertexai.py +3 -1
  41. langchain_core/messages/block_translators/groq.py +3 -1
  42. langchain_core/messages/block_translators/langchain_v0.py +3 -136
  43. langchain_core/messages/block_translators/ollama.py +3 -1
  44. langchain_core/messages/block_translators/openai.py +252 -10
  45. langchain_core/messages/content.py +26 -124
  46. langchain_core/messages/human.py +2 -13
  47. langchain_core/messages/system.py +2 -6
  48. langchain_core/messages/tool.py +34 -14
  49. langchain_core/messages/utils.py +189 -74
  50. langchain_core/output_parsers/base.py +5 -2
  51. langchain_core/output_parsers/json.py +4 -4
  52. langchain_core/output_parsers/list.py +7 -22
  53. langchain_core/output_parsers/openai_functions.py +3 -0
  54. langchain_core/output_parsers/openai_tools.py +6 -1
  55. langchain_core/output_parsers/pydantic.py +4 -0
  56. langchain_core/output_parsers/string.py +5 -1
  57. langchain_core/output_parsers/xml.py +19 -19
  58. langchain_core/outputs/chat_generation.py +18 -7
  59. langchain_core/outputs/generation.py +14 -3
  60. langchain_core/outputs/llm_result.py +8 -1
  61. langchain_core/prompt_values.py +10 -4
  62. langchain_core/prompts/base.py +6 -11
  63. langchain_core/prompts/chat.py +88 -60
  64. langchain_core/prompts/dict.py +16 -8
  65. langchain_core/prompts/few_shot.py +9 -11
  66. langchain_core/prompts/few_shot_with_templates.py +5 -1
  67. langchain_core/prompts/image.py +12 -5
  68. langchain_core/prompts/loading.py +2 -2
  69. langchain_core/prompts/message.py +5 -6
  70. langchain_core/prompts/pipeline.py +13 -8
  71. langchain_core/prompts/prompt.py +22 -8
  72. langchain_core/prompts/string.py +18 -10
  73. langchain_core/prompts/structured.py +7 -2
  74. langchain_core/rate_limiters.py +2 -2
  75. langchain_core/retrievers.py +7 -6
  76. langchain_core/runnables/base.py +387 -246
  77. langchain_core/runnables/branch.py +11 -28
  78. langchain_core/runnables/config.py +20 -17
  79. langchain_core/runnables/configurable.py +34 -19
  80. langchain_core/runnables/fallbacks.py +20 -13
  81. langchain_core/runnables/graph.py +48 -38
  82. langchain_core/runnables/graph_ascii.py +40 -17
  83. langchain_core/runnables/graph_mermaid.py +54 -25
  84. langchain_core/runnables/graph_png.py +27 -31
  85. langchain_core/runnables/history.py +55 -58
  86. langchain_core/runnables/passthrough.py +44 -21
  87. langchain_core/runnables/retry.py +44 -23
  88. langchain_core/runnables/router.py +9 -8
  89. langchain_core/runnables/schema.py +9 -0
  90. langchain_core/runnables/utils.py +53 -90
  91. langchain_core/stores.py +19 -31
  92. langchain_core/sys_info.py +9 -8
  93. langchain_core/tools/base.py +36 -27
  94. langchain_core/tools/convert.py +25 -14
  95. langchain_core/tools/simple.py +36 -8
  96. langchain_core/tools/structured.py +25 -12
  97. langchain_core/tracers/base.py +2 -2
  98. langchain_core/tracers/context.py +5 -1
  99. langchain_core/tracers/core.py +110 -46
  100. langchain_core/tracers/evaluation.py +22 -26
  101. langchain_core/tracers/event_stream.py +97 -42
  102. langchain_core/tracers/langchain.py +12 -3
  103. langchain_core/tracers/langchain_v1.py +10 -2
  104. langchain_core/tracers/log_stream.py +56 -17
  105. langchain_core/tracers/root_listeners.py +4 -20
  106. langchain_core/tracers/run_collector.py +6 -16
  107. langchain_core/tracers/schemas.py +5 -1
  108. langchain_core/utils/aiter.py +14 -6
  109. langchain_core/utils/env.py +3 -0
  110. langchain_core/utils/function_calling.py +46 -20
  111. langchain_core/utils/interactive_env.py +6 -2
  112. langchain_core/utils/iter.py +12 -5
  113. langchain_core/utils/json.py +12 -3
  114. langchain_core/utils/json_schema.py +156 -40
  115. langchain_core/utils/loading.py +5 -1
  116. langchain_core/utils/mustache.py +25 -16
  117. langchain_core/utils/pydantic.py +38 -9
  118. langchain_core/utils/utils.py +25 -9
  119. langchain_core/vectorstores/base.py +7 -20
  120. langchain_core/vectorstores/in_memory.py +20 -14
  121. langchain_core/vectorstores/utils.py +18 -12
  122. langchain_core/version.py +1 -1
  123. langchain_core-1.0.0a3.dist-info/METADATA +77 -0
  124. langchain_core-1.0.0a3.dist-info/RECORD +181 -0
  125. langchain_core/beta/__init__.py +0 -1
  126. langchain_core/beta/runnables/__init__.py +0 -1
  127. langchain_core/beta/runnables/context.py +0 -448
  128. langchain_core-1.0.0a1.dist-info/METADATA +0 -106
  129. langchain_core-1.0.0a1.dist-info/RECORD +0 -184
  130. {langchain_core-1.0.0a1.dist-info → langchain_core-1.0.0a3.dist-info}/WHEEL +0 -0
  131. {langchain_core-1.0.0a1.dist-info → langchain_core-1.0.0a3.dist-info}/entry_points.txt +0 -0
@@ -83,7 +83,7 @@ The module defines several types of content blocks, including:
83
83
 
84
84
  - ``TextContentBlock``: Standard text output.
85
85
  - ``Citation``: For annotations that link text output to a source document.
86
- - ``ToolCallContentBlock``: For function calling.
86
+ - ``ToolCall``: For function calling.
87
87
  - ``ReasoningContentBlock``: To capture a model's thought process.
88
88
  - Multimodal data:
89
89
  - ``ImageContentBlock``
@@ -99,8 +99,8 @@ The module defines several types of content blocks, including:
99
99
  # Direct construction:
100
100
  from langchain_core.messages.content import TextContentBlock, ImageContentBlock
101
101
 
102
- multimodal_message: AIMessage(content_blocks=
103
- [
102
+ multimodal_message: AIMessage(
103
+ content_blocks=[
104
104
  TextContentBlock(type="text", text="What is shown in this image?"),
105
105
  ImageContentBlock(
106
106
  type="image",
@@ -113,8 +113,8 @@ The module defines several types of content blocks, including:
113
113
  # Using factories:
114
114
  from langchain_core.messages.content import create_text_block, create_image_block
115
115
 
116
- multimodal_message: AIMessage(content=
117
- [
116
+ multimodal_message: AIMessage(
117
+ content=[
118
118
  create_text_block("What is shown in this image?"),
119
119
  create_image_block(
120
120
  url="https://www.langchain.com/images/brand/langchain_logo_text_w_white.png",
@@ -129,10 +129,9 @@ Factory functions offer benefits such as:
129
129
 
130
130
  """
131
131
 
132
- import warnings
133
132
  from typing import Any, Literal, Optional, Union, get_args, get_type_hints
134
133
 
135
- from typing_extensions import NotRequired, TypedDict, TypeGuard
134
+ from typing_extensions import NotRequired, TypedDict
136
135
 
137
136
  from langchain_core.utils.utils import ensure_id
138
137
 
@@ -262,11 +261,7 @@ class ToolCall(TypedDict):
262
261
 
263
262
  .. code-block:: python
264
263
 
265
- {
266
- "name": "foo",
267
- "args": {"a": 1},
268
- "id": "123"
269
- }
264
+ {"name": "foo", "args": {"a": 1}, "id": "123"}
270
265
 
271
266
  This represents a request to call the tool named "foo" with arguments {"a": 1}
272
267
  and an identifier of "123".
@@ -317,12 +312,12 @@ class ToolCallChunk(TypedDict):
317
312
  .. code-block:: python
318
313
 
319
314
  left_chunks = [ToolCallChunk(name="foo", args='{"a":', index=0)]
320
- right_chunks = [ToolCallChunk(name=None, args='1}', index=0)]
315
+ right_chunks = [ToolCallChunk(name=None, args="1}", index=0)]
321
316
 
322
317
  (
323
318
  AIMessageChunk(content="", tool_call_chunks=left_chunks)
324
319
  + AIMessageChunk(content="", tool_call_chunks=right_chunks)
325
- ).tool_call_chunks == [ToolCallChunk(name='foo', args='{"a":1}', index=0)]
320
+ ).tool_call_chunks == [ToolCallChunk(name="foo", args='{"a":1}', index=0)]
326
321
 
327
322
  """
328
323
 
@@ -834,7 +829,7 @@ class NonStandardContentBlock(TypedDict):
834
829
  The purpose of this block should be to simply hold a provider-specific payload.
835
830
  If a provider's non-standard output includes reasoning and tool calls, it should be
836
831
  the adapter's job to parse that payload and emit the corresponding standard
837
- ``ReasoningContentBlock`` and ``ToolCallContentBlocks``.
832
+ ``ReasoningContentBlock`` and ``ToolCalls``.
838
833
 
839
834
  Has no ``extras`` field, as provider-specific data should be included in the
840
835
  ``value`` field.
@@ -920,7 +915,15 @@ KNOWN_BLOCK_TYPES = {
920
915
 
921
916
 
922
917
  def _get_data_content_block_types() -> tuple[str, ...]:
923
- """Get type literals from DataContentBlock union members dynamically."""
918
+ """Get type literals from DataContentBlock union members dynamically.
919
+
920
+ Example: ("image", "video", "audio", "text-plain", "file")
921
+
922
+ Note that old style multimodal blocks type literals with new style blocks.
923
+ Speficially, "image", "audio", and "file".
924
+
925
+ See the docstring of `_normalize_messages` in `language_models._utils` for details.
926
+ """
924
927
  data_block_types = []
925
928
 
926
929
  for block_type in get_args(DataContentBlock):
@@ -936,7 +939,9 @@ def _get_data_content_block_types() -> tuple[str, ...]:
936
939
 
937
940
 
938
941
  def is_data_content_block(block: dict) -> bool:
939
- """Check if the provided content block is a standard v1 data content block.
942
+ """Check if the provided content block is a data content block.
943
+
944
+ Returns for both v0 (old-style) and v1 (new-style) multimodal data blocks.
940
945
 
941
946
  Args:
942
947
  block: The content block to check.
@@ -949,6 +954,8 @@ def is_data_content_block(block: dict) -> bool:
949
954
  return False
950
955
 
951
956
  if any(key in block for key in ("url", "base64", "file_id", "text")):
957
+ # Type is valid and at least one data field is present
958
+ # (Accepts old-style image and audio URLContentBlock)
952
959
  return True
953
960
 
954
961
  # Verify data presence based on source type
@@ -963,116 +970,11 @@ def is_data_content_block(block: dict) -> bool:
963
970
  ):
964
971
  return True
965
972
 
973
+ # Type may be valid, but no data fields are present
974
+ # (required case since each is optional and we have no validation)
966
975
  return False
967
976
 
968
977
 
969
- def is_tool_call_block(block: ContentBlock) -> TypeGuard[ToolCall]:
970
- """Type guard to check if a content block is a ``ToolCall``."""
971
- return block.get("type") == "tool_call"
972
-
973
-
974
- def is_tool_call_chunk(block: ContentBlock) -> TypeGuard[ToolCallChunk]:
975
- """Type guard to check if a content block is a ``ToolCallChunk``."""
976
- return block.get("type") == "tool_call_chunk"
977
-
978
-
979
- def is_text_block(block: ContentBlock) -> TypeGuard[TextContentBlock]:
980
- """Type guard to check if a content block is a ``TextContentBlock``."""
981
- return block.get("type") == "text"
982
-
983
-
984
- def is_reasoning_block(block: ContentBlock) -> TypeGuard[ReasoningContentBlock]:
985
- """Type guard to check if a content block is a ``ReasoningContentBlock``."""
986
- return block.get("type") == "reasoning"
987
-
988
-
989
- def is_invalid_tool_call_block(
990
- block: ContentBlock,
991
- ) -> TypeGuard[InvalidToolCall]:
992
- """Type guard to check if a content block is an ``InvalidToolCall``."""
993
- return block.get("type") == "invalid_tool_call"
994
-
995
-
996
- def convert_to_openai_image_block(block: dict[str, Any]) -> dict:
997
- """Convert ``ImageContentBlock`` to format expected by OpenAI Chat Completions."""
998
- if "url" in block:
999
- return {
1000
- "type": "image_url",
1001
- "image_url": {
1002
- "url": block["url"],
1003
- },
1004
- }
1005
- if "base64" in block or block.get("source_type") == "base64":
1006
- if "mime_type" not in block:
1007
- error_message = "mime_type key is required for base64 data."
1008
- raise ValueError(error_message)
1009
- mime_type = block["mime_type"]
1010
- base64_data = block["data"] if "data" in block else block["base64"]
1011
- return {
1012
- "type": "image_url",
1013
- "image_url": {
1014
- "url": f"data:{mime_type};base64,{base64_data}",
1015
- },
1016
- }
1017
- error_message = "Unsupported source type. Only 'url' and 'base64' are supported."
1018
- raise ValueError(error_message)
1019
-
1020
-
1021
- def convert_to_openai_data_block(block: dict) -> dict:
1022
- """Format standard data content block to format expected by OpenAI."""
1023
- if block["type"] == "image":
1024
- formatted_block = convert_to_openai_image_block(block)
1025
-
1026
- elif block["type"] == "file":
1027
- if "base64" in block or block.get("source_type") == "base64":
1028
- # Handle v0 format: {"source_type": "base64", "data": "...", ...}
1029
- # Handle v1 format: {"base64": "...", ...}
1030
- base64_data = block["data"] if "source_type" in block else block["base64"]
1031
- file = {"file_data": f"data:{block['mime_type']};base64,{base64_data}"}
1032
- if filename := block.get("filename"):
1033
- file["filename"] = filename
1034
- elif (extras := block.get("extras")) and ("filename" in extras):
1035
- file["filename"] = extras["filename"]
1036
- elif (extras := block.get("metadata")) and ("filename" in extras):
1037
- # Backward compat
1038
- file["filename"] = extras["filename"]
1039
- else:
1040
- warnings.warn(
1041
- "OpenAI may require a filename for file inputs. Specify a filename "
1042
- "in the content block: {'type': 'file', 'mime_type': "
1043
- "'application/pdf', 'base64': '...', 'filename': 'my-pdf'}",
1044
- stacklevel=1,
1045
- )
1046
- formatted_block = {"type": "file", "file": file}
1047
- elif "file_id" in block or block.get("source_type") == "id":
1048
- # Handle v0 format: {"source_type": "id", "id": "...", ...}
1049
- # Handle v1 format: {"file_id": "...", ...}
1050
- file_id = block["id"] if "source_type" in block else block["file_id"]
1051
- formatted_block = {"type": "file", "file": {"file_id": file_id}}
1052
- else:
1053
- error_msg = "Keys base64 or file_id required for file blocks."
1054
- raise ValueError(error_msg)
1055
-
1056
- elif block["type"] == "audio":
1057
- if "base64" in block or block.get("source_type") == "base64":
1058
- # Handle v0 format: {"source_type": "base64", "data": "...", ...}
1059
- # Handle v1 format: {"base64": "...", ...}
1060
- base64_data = block["data"] if "source_type" in block else block["base64"]
1061
- audio_format = block["mime_type"].split("/")[-1]
1062
- formatted_block = {
1063
- "type": "input_audio",
1064
- "input_audio": {"data": base64_data, "format": audio_format},
1065
- }
1066
- else:
1067
- error_msg = "Key base64 is required for audio blocks."
1068
- raise ValueError(error_msg)
1069
- else:
1070
- error_msg = f"Block of type {block['type']} is not supported."
1071
- raise ValueError(error_msg)
1072
-
1073
- return formatted_block
1074
-
1075
-
1076
978
  def create_text_block(
1077
979
  text: str,
1078
980
  *,
@@ -18,12 +18,8 @@ class HumanMessage(BaseMessage):
18
18
  from langchain_core.messages import HumanMessage, SystemMessage
19
19
 
20
20
  messages = [
21
- SystemMessage(
22
- content="You are a helpful assistant! Your name is Bob."
23
- ),
24
- HumanMessage(
25
- content="What is your name?"
26
- )
21
+ SystemMessage(content="You are a helpful assistant! Your name is Bob."),
22
+ HumanMessage(content="What is your name?"),
27
23
  ]
28
24
 
29
25
  # Instantiate a chat model and invoke it with the messages
@@ -32,13 +28,6 @@ class HumanMessage(BaseMessage):
32
28
 
33
29
  """
34
30
 
35
- example: bool = False
36
- """Use to denote that a message is part of an example conversation.
37
-
38
- At the moment, this is ignored by most models. Usage is discouraged.
39
- Defaults to False.
40
- """
41
-
42
31
  type: Literal["human"] = "human"
43
32
  """The type of the message (used for serialization). Defaults to "human"."""
44
33
 
@@ -19,12 +19,8 @@ class SystemMessage(BaseMessage):
19
19
  from langchain_core.messages import HumanMessage, SystemMessage
20
20
 
21
21
  messages = [
22
- SystemMessage(
23
- content="You are a helpful assistant! Your name is Bob."
24
- ),
25
- HumanMessage(
26
- content="What is your name?"
27
- )
22
+ SystemMessage(content="You are a helpful assistant! Your name is Bob."),
23
+ HumanMessage(content="What is your name?"),
28
24
  ]
29
25
 
30
26
  # Define a chat model and invoke it with the messages
@@ -9,7 +9,7 @@ from typing_extensions import NotRequired, TypedDict, override
9
9
 
10
10
  from langchain_core.messages import content as types
11
11
  from langchain_core.messages.base import BaseMessage, BaseMessageChunk, merge_content
12
- from langchain_core.messages.content import InvalidToolCall as InvalidToolCall
12
+ from langchain_core.messages.content import InvalidToolCall
13
13
  from langchain_core.utils._merge import merge_dicts, merge_obj
14
14
 
15
15
 
@@ -34,7 +34,7 @@ class ToolMessage(BaseMessage, ToolOutputMixin):
34
34
 
35
35
  from langchain_core.messages import ToolMessage
36
36
 
37
- ToolMessage(content='42', tool_call_id='call_Jja7J89XsjrOLA5r!MEOW!SL')
37
+ ToolMessage(content="42", tool_call_id="call_Jja7J89XsjrOLA5r!MEOW!SL")
38
38
 
39
39
 
40
40
  Example: A ToolMessage where only part of the tool output is sent to the model
@@ -47,7 +47,8 @@ class ToolMessage(BaseMessage, ToolOutputMixin):
47
47
  from langchain_core.messages import ToolMessage
48
48
 
49
49
  tool_output = {
50
- "stdout": "From the graph we can see that the correlation between x and y is ...",
50
+ "stdout": "From the graph we can see that the correlation between "
51
+ "x and y is ...",
51
52
  "stderr": None,
52
53
  "artifacts": {"type": "image", "base64_data": "/9j/4gIcSU..."},
53
54
  }
@@ -55,14 +56,14 @@ class ToolMessage(BaseMessage, ToolOutputMixin):
55
56
  ToolMessage(
56
57
  content=tool_output["stdout"],
57
58
  artifact=tool_output,
58
- tool_call_id='call_Jja7J89XsjrOLA5r!MEOW!SL',
59
+ tool_call_id="call_Jja7J89XsjrOLA5r!MEOW!SL",
59
60
  )
60
61
 
61
62
  The tool_call_id field is used to associate the tool call request with the
62
63
  tool call response. This is useful in situations where a chat model is able
63
64
  to request multiple tool calls in parallel.
64
65
 
65
- """ # noqa: E501
66
+ """
66
67
 
67
68
  tool_call_id: str
68
69
  """Tool call that this message is responding to."""
@@ -205,11 +206,7 @@ class ToolCall(TypedDict):
205
206
 
206
207
  .. code-block:: python
207
208
 
208
- {
209
- "name": "foo",
210
- "args": {"a": 1},
211
- "id": "123"
212
- }
209
+ {"name": "foo", "args": {"a": 1}, "id": "123"}
213
210
 
214
211
  This represents a request to call the tool named "foo" with arguments {"a": 1}
215
212
  and an identifier of "123".
@@ -241,6 +238,9 @@ def tool_call(
241
238
  name: The name of the tool to be called.
242
239
  args: The arguments to the tool call.
243
240
  id: An identifier associated with the tool call.
241
+
242
+ Returns:
243
+ The created tool call.
244
244
  """
245
245
  return ToolCall(name=name, args=args, id=id, type="tool_call")
246
246
 
@@ -257,12 +257,12 @@ class ToolCallChunk(TypedDict):
257
257
  .. code-block:: python
258
258
 
259
259
  left_chunks = [ToolCallChunk(name="foo", args='{"a":', index=0)]
260
- right_chunks = [ToolCallChunk(name=None, args='1}', index=0)]
260
+ right_chunks = [ToolCallChunk(name=None, args="1}", index=0)]
261
261
 
262
262
  (
263
263
  AIMessageChunk(content="", tool_call_chunks=left_chunks)
264
264
  + AIMessageChunk(content="", tool_call_chunks=right_chunks)
265
- ).tool_call_chunks == [ToolCallChunk(name='foo', args='{"a":1}', index=0)]
265
+ ).tool_call_chunks == [ToolCallChunk(name="foo", args='{"a":1}', index=0)]
266
266
 
267
267
  """
268
268
 
@@ -291,6 +291,9 @@ def tool_call_chunk(
291
291
  args: The arguments to the tool call.
292
292
  id: An identifier associated with the tool call.
293
293
  index: The index of the tool call in a sequence.
294
+
295
+ Returns:
296
+ The created tool call chunk.
294
297
  """
295
298
  return ToolCallChunk(
296
299
  name=name, args=args, id=id, index=index, type="tool_call_chunk"
@@ -311,6 +314,9 @@ def invalid_tool_call(
311
314
  args: The arguments to the tool call.
312
315
  id: An identifier associated with the tool call.
313
316
  error: An error message associated with the tool call.
317
+
318
+ Returns:
319
+ The created invalid tool call.
314
320
  """
315
321
  return InvalidToolCall(
316
322
  name=name, args=args, id=id, error=error, type="invalid_tool_call"
@@ -320,7 +326,14 @@ def invalid_tool_call(
320
326
  def default_tool_parser(
321
327
  raw_tool_calls: list[dict],
322
328
  ) -> tuple[list[ToolCall], list[InvalidToolCall]]:
323
- """Best-effort parsing of tools."""
329
+ """Best-effort parsing of tools.
330
+
331
+ Args:
332
+ raw_tool_calls: List of raw tool call dicts to parse.
333
+
334
+ Returns:
335
+ A list of tool calls and invalid tool calls.
336
+ """
324
337
  tool_calls = []
325
338
  invalid_tool_calls = []
326
339
  for raw_tool_call in raw_tool_calls:
@@ -348,7 +361,14 @@ def default_tool_parser(
348
361
 
349
362
 
350
363
  def default_tool_chunk_parser(raw_tool_calls: list[dict]) -> list[ToolCallChunk]:
351
- """Best-effort parsing of tool chunks."""
364
+ """Best-effort parsing of tool chunks.
365
+
366
+ Args:
367
+ raw_tool_calls: List of raw tool call dicts to parse.
368
+
369
+ Returns:
370
+ List of parsed ToolCallChunk objects.
371
+ """
352
372
  tool_call_chunks = []
353
373
  for tool_call in raw_tool_calls:
354
374
  if "function" not in tool_call: