langchain-core 1.0.0a2__py3-none-any.whl → 1.0.0a3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain-core might be problematic. Click here for more details.

Files changed (130) hide show
  1. langchain_core/_api/beta_decorator.py +17 -40
  2. langchain_core/_api/deprecation.py +20 -7
  3. langchain_core/_api/path.py +19 -2
  4. langchain_core/_import_utils.py +7 -0
  5. langchain_core/agents.py +10 -6
  6. langchain_core/callbacks/base.py +28 -15
  7. langchain_core/callbacks/manager.py +81 -69
  8. langchain_core/callbacks/usage.py +4 -2
  9. langchain_core/chat_history.py +29 -21
  10. langchain_core/document_loaders/base.py +34 -9
  11. langchain_core/document_loaders/langsmith.py +3 -0
  12. langchain_core/documents/base.py +35 -10
  13. langchain_core/documents/transformers.py +4 -2
  14. langchain_core/embeddings/fake.py +8 -5
  15. langchain_core/env.py +2 -3
  16. langchain_core/example_selectors/base.py +12 -0
  17. langchain_core/exceptions.py +7 -0
  18. langchain_core/globals.py +17 -28
  19. langchain_core/indexing/api.py +57 -45
  20. langchain_core/indexing/base.py +5 -8
  21. langchain_core/indexing/in_memory.py +23 -3
  22. langchain_core/language_models/__init__.py +6 -2
  23. langchain_core/language_models/_utils.py +27 -5
  24. langchain_core/language_models/base.py +33 -21
  25. langchain_core/language_models/chat_models.py +99 -27
  26. langchain_core/language_models/fake_chat_models.py +5 -7
  27. langchain_core/language_models/llms.py +54 -20
  28. langchain_core/load/dump.py +2 -3
  29. langchain_core/load/load.py +15 -1
  30. langchain_core/load/serializable.py +38 -43
  31. langchain_core/memory.py +7 -3
  32. langchain_core/messages/__init__.py +1 -1
  33. langchain_core/messages/ai.py +41 -34
  34. langchain_core/messages/base.py +16 -7
  35. langchain_core/messages/block_translators/__init__.py +10 -8
  36. langchain_core/messages/block_translators/anthropic.py +3 -1
  37. langchain_core/messages/block_translators/bedrock.py +3 -1
  38. langchain_core/messages/block_translators/bedrock_converse.py +3 -1
  39. langchain_core/messages/block_translators/google_genai.py +3 -1
  40. langchain_core/messages/block_translators/google_vertexai.py +3 -1
  41. langchain_core/messages/block_translators/groq.py +3 -1
  42. langchain_core/messages/block_translators/ollama.py +3 -1
  43. langchain_core/messages/block_translators/openai.py +50 -20
  44. langchain_core/messages/content.py +23 -13
  45. langchain_core/messages/human.py +2 -13
  46. langchain_core/messages/system.py +2 -6
  47. langchain_core/messages/tool.py +34 -14
  48. langchain_core/messages/utils.py +186 -73
  49. langchain_core/output_parsers/base.py +5 -2
  50. langchain_core/output_parsers/json.py +4 -4
  51. langchain_core/output_parsers/list.py +7 -22
  52. langchain_core/output_parsers/openai_functions.py +3 -0
  53. langchain_core/output_parsers/openai_tools.py +6 -1
  54. langchain_core/output_parsers/pydantic.py +4 -0
  55. langchain_core/output_parsers/string.py +5 -1
  56. langchain_core/output_parsers/xml.py +19 -19
  57. langchain_core/outputs/chat_generation.py +18 -7
  58. langchain_core/outputs/generation.py +14 -3
  59. langchain_core/outputs/llm_result.py +8 -1
  60. langchain_core/prompt_values.py +10 -4
  61. langchain_core/prompts/base.py +6 -11
  62. langchain_core/prompts/chat.py +88 -60
  63. langchain_core/prompts/dict.py +16 -8
  64. langchain_core/prompts/few_shot.py +9 -11
  65. langchain_core/prompts/few_shot_with_templates.py +5 -1
  66. langchain_core/prompts/image.py +12 -5
  67. langchain_core/prompts/loading.py +2 -2
  68. langchain_core/prompts/message.py +5 -6
  69. langchain_core/prompts/pipeline.py +13 -8
  70. langchain_core/prompts/prompt.py +22 -8
  71. langchain_core/prompts/string.py +18 -10
  72. langchain_core/prompts/structured.py +7 -2
  73. langchain_core/rate_limiters.py +2 -2
  74. langchain_core/retrievers.py +7 -6
  75. langchain_core/runnables/base.py +387 -246
  76. langchain_core/runnables/branch.py +11 -28
  77. langchain_core/runnables/config.py +20 -17
  78. langchain_core/runnables/configurable.py +34 -19
  79. langchain_core/runnables/fallbacks.py +20 -13
  80. langchain_core/runnables/graph.py +48 -38
  81. langchain_core/runnables/graph_ascii.py +40 -17
  82. langchain_core/runnables/graph_mermaid.py +54 -25
  83. langchain_core/runnables/graph_png.py +27 -31
  84. langchain_core/runnables/history.py +55 -58
  85. langchain_core/runnables/passthrough.py +44 -21
  86. langchain_core/runnables/retry.py +44 -23
  87. langchain_core/runnables/router.py +9 -8
  88. langchain_core/runnables/schema.py +9 -0
  89. langchain_core/runnables/utils.py +53 -90
  90. langchain_core/stores.py +19 -31
  91. langchain_core/sys_info.py +9 -8
  92. langchain_core/tools/base.py +36 -27
  93. langchain_core/tools/convert.py +25 -14
  94. langchain_core/tools/simple.py +36 -8
  95. langchain_core/tools/structured.py +25 -12
  96. langchain_core/tracers/base.py +2 -2
  97. langchain_core/tracers/context.py +5 -1
  98. langchain_core/tracers/core.py +110 -46
  99. langchain_core/tracers/evaluation.py +22 -26
  100. langchain_core/tracers/event_stream.py +97 -42
  101. langchain_core/tracers/langchain.py +12 -3
  102. langchain_core/tracers/langchain_v1.py +10 -2
  103. langchain_core/tracers/log_stream.py +56 -17
  104. langchain_core/tracers/root_listeners.py +4 -20
  105. langchain_core/tracers/run_collector.py +6 -16
  106. langchain_core/tracers/schemas.py +5 -1
  107. langchain_core/utils/aiter.py +14 -6
  108. langchain_core/utils/env.py +3 -0
  109. langchain_core/utils/function_calling.py +46 -20
  110. langchain_core/utils/interactive_env.py +6 -2
  111. langchain_core/utils/iter.py +12 -5
  112. langchain_core/utils/json.py +12 -3
  113. langchain_core/utils/json_schema.py +156 -40
  114. langchain_core/utils/loading.py +5 -1
  115. langchain_core/utils/mustache.py +25 -16
  116. langchain_core/utils/pydantic.py +38 -9
  117. langchain_core/utils/utils.py +25 -9
  118. langchain_core/vectorstores/base.py +7 -20
  119. langchain_core/vectorstores/in_memory.py +20 -14
  120. langchain_core/vectorstores/utils.py +18 -12
  121. langchain_core/version.py +1 -1
  122. langchain_core-1.0.0a3.dist-info/METADATA +77 -0
  123. langchain_core-1.0.0a3.dist-info/RECORD +181 -0
  124. langchain_core/beta/__init__.py +0 -1
  125. langchain_core/beta/runnables/__init__.py +0 -1
  126. langchain_core/beta/runnables/context.py +0 -448
  127. langchain_core-1.0.0a2.dist-info/METADATA +0 -106
  128. langchain_core-1.0.0a2.dist-info/RECORD +0 -184
  129. {langchain_core-1.0.0a2.dist-info → langchain_core-1.0.0a3.dist-info}/WHEEL +0 -0
  130. {langchain_core-1.0.0a2.dist-info → langchain_core-1.0.0a3.dist-info}/entry_points.txt +0 -0
@@ -53,26 +53,28 @@ def _register_translators() -> None:
53
53
  For translators implemented outside langchain-core, they can be registered by
54
54
  calling ``register_translator`` from within the integration package.
55
55
  """
56
- from langchain_core.messages.block_translators.anthropic import (
56
+ from langchain_core.messages.block_translators.anthropic import ( # noqa: PLC0415
57
57
  _register_anthropic_translator,
58
58
  )
59
- from langchain_core.messages.block_translators.bedrock import (
59
+ from langchain_core.messages.block_translators.bedrock import ( # noqa: PLC0415
60
60
  _register_bedrock_translator,
61
61
  )
62
- from langchain_core.messages.block_translators.bedrock_converse import (
62
+ from langchain_core.messages.block_translators.bedrock_converse import ( # noqa: PLC0415
63
63
  _register_bedrock_converse_translator,
64
64
  )
65
- from langchain_core.messages.block_translators.google_genai import (
65
+ from langchain_core.messages.block_translators.google_genai import ( # noqa: PLC0415
66
66
  _register_google_genai_translator,
67
67
  )
68
- from langchain_core.messages.block_translators.google_vertexai import (
68
+ from langchain_core.messages.block_translators.google_vertexai import ( # noqa: PLC0415
69
69
  _register_google_vertexai_translator,
70
70
  )
71
- from langchain_core.messages.block_translators.groq import _register_groq_translator
72
- from langchain_core.messages.block_translators.ollama import (
71
+ from langchain_core.messages.block_translators.groq import ( # noqa: PLC0415
72
+ _register_groq_translator,
73
+ )
74
+ from langchain_core.messages.block_translators.ollama import ( # noqa: PLC0415
73
75
  _register_ollama_translator,
74
76
  )
75
- from langchain_core.messages.block_translators.openai import (
77
+ from langchain_core.messages.block_translators.openai import ( # noqa: PLC0415
76
78
  _register_openai_translator,
77
79
  )
78
80
 
@@ -443,7 +443,9 @@ def _register_anthropic_translator() -> None:
443
443
 
444
444
  Run automatically when the module is imported.
445
445
  """
446
- from langchain_core.messages.block_translators import register_translator
446
+ from langchain_core.messages.block_translators import ( # noqa: PLC0415
447
+ register_translator,
448
+ )
447
449
 
448
450
  register_translator("anthropic", translate_content, translate_content_chunk)
449
451
 
@@ -37,7 +37,9 @@ def _register_bedrock_translator() -> None:
37
37
 
38
38
  Run automatically when the module is imported.
39
39
  """
40
- from langchain_core.messages.block_translators import register_translator
40
+ from langchain_core.messages.block_translators import ( # noqa: PLC0415
41
+ register_translator,
42
+ )
41
43
 
42
44
  register_translator("bedrock", translate_content, translate_content_chunk)
43
45
 
@@ -39,7 +39,9 @@ def _register_bedrock_converse_translator() -> None:
39
39
 
40
40
  Run automatically when the module is imported.
41
41
  """
42
- from langchain_core.messages.block_translators import register_translator
42
+ from langchain_core.messages.block_translators import ( # noqa: PLC0415
43
+ register_translator,
44
+ )
43
45
 
44
46
  register_translator("bedrock_converse", translate_content, translate_content_chunk)
45
47
 
@@ -37,7 +37,9 @@ def _register_google_genai_translator() -> None:
37
37
 
38
38
  Run automatically when the module is imported.
39
39
  """
40
- from langchain_core.messages.block_translators import register_translator
40
+ from langchain_core.messages.block_translators import ( # noqa: PLC0415
41
+ register_translator,
42
+ )
41
43
 
42
44
  register_translator("google_genai", translate_content, translate_content_chunk)
43
45
 
@@ -39,7 +39,9 @@ def _register_google_vertexai_translator() -> None:
39
39
 
40
40
  Run automatically when the module is imported.
41
41
  """
42
- from langchain_core.messages.block_translators import register_translator
42
+ from langchain_core.messages.block_translators import ( # noqa: PLC0415
43
+ register_translator,
44
+ )
43
45
 
44
46
  register_translator("google_vertexai", translate_content, translate_content_chunk)
45
47
 
@@ -37,7 +37,9 @@ def _register_groq_translator() -> None:
37
37
 
38
38
  Run automatically when the module is imported.
39
39
  """
40
- from langchain_core.messages.block_translators import register_translator
40
+ from langchain_core.messages.block_translators import ( # noqa: PLC0415
41
+ register_translator,
42
+ )
41
43
 
42
44
  register_translator("groq", translate_content, translate_content_chunk)
43
45
 
@@ -37,7 +37,9 @@ def _register_ollama_translator() -> None:
37
37
 
38
38
  Run automatically when the module is imported.
39
39
  """
40
- from langchain_core.messages.block_translators import register_translator
40
+ from langchain_core.messages.block_translators import ( # noqa: PLC0415
41
+ register_translator,
42
+ )
41
43
 
42
44
  register_translator("ollama", translate_content, translate_content_chunk)
43
45
 
@@ -5,11 +5,11 @@ from __future__ import annotations
5
5
  import json
6
6
  import warnings
7
7
  from collections.abc import Iterable
8
- from typing import TYPE_CHECKING, Any, Optional, Union, cast
8
+ from typing import TYPE_CHECKING, Any, Literal, Optional, Union, cast
9
9
 
10
10
  from langchain_core.language_models._utils import (
11
- _is_openai_data_block,
12
11
  _parse_data_uri,
12
+ is_openai_data_block,
13
13
  )
14
14
  from langchain_core.messages import content as types
15
15
 
@@ -42,15 +42,32 @@ def convert_to_openai_image_block(block: dict[str, Any]) -> dict:
42
42
  raise ValueError(error_message)
43
43
 
44
44
 
45
- def convert_to_openai_data_block(block: dict) -> dict:
46
- """Format standard data content block to format expected by OpenAI."""
45
+ def convert_to_openai_data_block(
46
+ block: dict, api: Literal["chat/completions", "responses"] = "chat/completions"
47
+ ) -> dict:
48
+ """Format standard data content block to format expected by OpenAI.
49
+
50
+ "Standard data content block" can include old-style LangChain v0 blocks
51
+ (URLContentBlock, Base64ContentBlock, IDContentBlock) or new ones.
52
+ """
47
53
  if block["type"] == "image":
48
- formatted_block = convert_to_openai_image_block(block)
54
+ chat_completions_block = convert_to_openai_image_block(block)
55
+ if api == "responses":
56
+ formatted_block = {
57
+ "type": "input_image",
58
+ "image_url": chat_completions_block["image_url"]["url"],
59
+ }
60
+ if chat_completions_block["image_url"].get("detail"):
61
+ formatted_block["detail"] = chat_completions_block["image_url"][
62
+ "detail"
63
+ ]
64
+ else:
65
+ formatted_block = chat_completions_block
49
66
 
50
67
  elif block["type"] == "file":
51
- if "base64" in block or block.get("source_type") == "base64":
52
- # Handle v0 format: {"source_type": "base64", "data": "...", ...}
53
- # Handle v1 format: {"base64": "...", ...}
68
+ if block.get("source_type") == "base64" or "base64" in block:
69
+ # Handle v0 format (Base64CB): {"source_type": "base64", "data": "...", ...}
70
+ # Handle v1 format (IDCB): {"base64": "...", ...}
54
71
  base64_data = block["data"] if "source_type" in block else block["base64"]
55
72
  file = {"file_data": f"data:{block['mime_type']};base64,{base64_data}"}
56
73
  if filename := block.get("filename"):
@@ -61,20 +78,31 @@ def convert_to_openai_data_block(block: dict) -> dict:
61
78
  # Backward compat
62
79
  file["filename"] = extras["filename"]
63
80
  else:
81
+ # Can't infer filename
64
82
  warnings.warn(
65
- "OpenAI may require a filename for file inputs. Specify a filename "
66
- "in the content block: {'type': 'file', 'mime_type': "
67
- "'application/pdf', 'base64': '...', 'filename': 'my-pdf'}",
83
+ "OpenAI may require a filename for file uploads. Specify a filename"
84
+ " in the content block, e.g.: {'type': 'file', 'mime_type': "
85
+ "'...', 'base64': '...', 'filename': 'my-file.pdf'}",
68
86
  stacklevel=1,
69
87
  )
70
88
  formatted_block = {"type": "file", "file": file}
71
- elif "file_id" in block or block.get("source_type") == "id":
72
- # Handle v0 format: {"source_type": "id", "id": "...", ...}
73
- # Handle v1 format: {"file_id": "...", ...}
89
+ if api == "responses":
90
+ formatted_block = {"type": "input_file", **formatted_block["file"]}
91
+ elif block.get("source_type") == "id" or "file_id" in block:
92
+ # Handle v0 format (IDContentBlock): {"source_type": "id", "id": "...", ...}
93
+ # Handle v1 format (IDCB): {"file_id": "...", ...}
74
94
  file_id = block["id"] if "source_type" in block else block["file_id"]
75
95
  formatted_block = {"type": "file", "file": {"file_id": file_id}}
96
+ if api == "responses":
97
+ formatted_block = {"type": "input_file", **formatted_block["file"]}
98
+ elif "url" in block: # Intentionally do not check for source_type="url"
99
+ if api == "chat/completions":
100
+ error_msg = "OpenAI Chat Completions does not support file URLs."
101
+ raise ValueError(error_msg)
102
+ # Only supported by Responses API; return in that format
103
+ formatted_block = {"type": "input_file", "file_url": block["url"]}
76
104
  else:
77
- error_msg = "Keys base64 or file_id required for file blocks."
105
+ error_msg = "Keys base64, url, or file_id required for file blocks."
78
106
  raise ValueError(error_msg)
79
107
 
80
108
  elif block["type"] == "audio":
@@ -136,7 +164,7 @@ def _convert_to_v1_from_chat_completions_input(
136
164
  Returns:
137
165
  Updated list with OpenAI blocks converted to v1 format.
138
166
  """
139
- from langchain_core.messages import content as types
167
+ from langchain_core.messages import content as types # noqa: PLC0415
140
168
 
141
169
  converted_blocks = []
142
170
  unpacked_blocks: list[dict[str, Any]] = [
@@ -150,7 +178,7 @@ def _convert_to_v1_from_chat_completions_input(
150
178
  "image_url",
151
179
  "input_audio",
152
180
  "file",
153
- } and _is_openai_data_block(block):
181
+ } and is_openai_data_block(block):
154
182
  converted_block = _convert_openai_format_to_data_block(block)
155
183
  # If conversion succeeded, use it; otherwise keep as non_standard
156
184
  if (
@@ -232,7 +260,7 @@ _FUNCTION_CALL_IDS_MAP_KEY = "__openai_function_call_ids__"
232
260
 
233
261
  def _convert_from_v03_ai_message(message: AIMessage) -> AIMessage:
234
262
  """Convert v0 AIMessage into ``output_version="responses/v1"`` format."""
235
- from langchain_core.messages import AIMessageChunk
263
+ from langchain_core.messages import AIMessageChunk # noqa: PLC0415
236
264
 
237
265
  # Only update ChatOpenAI v0.3 AIMessages
238
266
  is_chatopenai_v03 = (
@@ -650,7 +678,7 @@ def _convert_to_v1_from_responses(message: AIMessage) -> list[types.ContentBlock
650
678
  ] = None
651
679
  call_id = block.get("call_id", "")
652
680
 
653
- from langchain_core.messages import AIMessageChunk
681
+ from langchain_core.messages import AIMessageChunk # noqa: PLC0415
654
682
 
655
683
  if (
656
684
  isinstance(message, AIMessageChunk)
@@ -790,7 +818,9 @@ def _register_openai_translator() -> None:
790
818
 
791
819
  Run automatically when the module is imported.
792
820
  """
793
- from langchain_core.messages.block_translators import register_translator
821
+ from langchain_core.messages.block_translators import ( # noqa: PLC0415
822
+ register_translator,
823
+ )
794
824
 
795
825
  register_translator("openai", translate_content, translate_content_chunk)
796
826
 
@@ -99,8 +99,8 @@ The module defines several types of content blocks, including:
99
99
  # Direct construction:
100
100
  from langchain_core.messages.content import TextContentBlock, ImageContentBlock
101
101
 
102
- multimodal_message: AIMessage(content_blocks=
103
- [
102
+ multimodal_message: AIMessage(
103
+ content_blocks=[
104
104
  TextContentBlock(type="text", text="What is shown in this image?"),
105
105
  ImageContentBlock(
106
106
  type="image",
@@ -113,8 +113,8 @@ The module defines several types of content blocks, including:
113
113
  # Using factories:
114
114
  from langchain_core.messages.content import create_text_block, create_image_block
115
115
 
116
- multimodal_message: AIMessage(content=
117
- [
116
+ multimodal_message: AIMessage(
117
+ content=[
118
118
  create_text_block("What is shown in this image?"),
119
119
  create_image_block(
120
120
  url="https://www.langchain.com/images/brand/langchain_logo_text_w_white.png",
@@ -261,11 +261,7 @@ class ToolCall(TypedDict):
261
261
 
262
262
  .. code-block:: python
263
263
 
264
- {
265
- "name": "foo",
266
- "args": {"a": 1},
267
- "id": "123"
268
- }
264
+ {"name": "foo", "args": {"a": 1}, "id": "123"}
269
265
 
270
266
  This represents a request to call the tool named "foo" with arguments {"a": 1}
271
267
  and an identifier of "123".
@@ -316,12 +312,12 @@ class ToolCallChunk(TypedDict):
316
312
  .. code-block:: python
317
313
 
318
314
  left_chunks = [ToolCallChunk(name="foo", args='{"a":', index=0)]
319
- right_chunks = [ToolCallChunk(name=None, args='1}', index=0)]
315
+ right_chunks = [ToolCallChunk(name=None, args="1}", index=0)]
320
316
 
321
317
  (
322
318
  AIMessageChunk(content="", tool_call_chunks=left_chunks)
323
319
  + AIMessageChunk(content="", tool_call_chunks=right_chunks)
324
- ).tool_call_chunks == [ToolCallChunk(name='foo', args='{"a":1}', index=0)]
320
+ ).tool_call_chunks == [ToolCallChunk(name="foo", args='{"a":1}', index=0)]
325
321
 
326
322
  """
327
323
 
@@ -919,7 +915,15 @@ KNOWN_BLOCK_TYPES = {
919
915
 
920
916
 
921
917
  def _get_data_content_block_types() -> tuple[str, ...]:
922
- """Get type literals from DataContentBlock union members dynamically."""
918
+ """Get type literals from DataContentBlock union members dynamically.
919
+
920
+ Example: ("image", "video", "audio", "text-plain", "file")
921
+
922
+ Note that old style multimodal blocks type literals with new style blocks.
923
+ Speficially, "image", "audio", and "file".
924
+
925
+ See the docstring of `_normalize_messages` in `language_models._utils` for details.
926
+ """
923
927
  data_block_types = []
924
928
 
925
929
  for block_type in get_args(DataContentBlock):
@@ -935,7 +939,9 @@ def _get_data_content_block_types() -> tuple[str, ...]:
935
939
 
936
940
 
937
941
  def is_data_content_block(block: dict) -> bool:
938
- """Check if the provided content block is a standard v1 data content block.
942
+ """Check if the provided content block is a data content block.
943
+
944
+ Returns for both v0 (old-style) and v1 (new-style) multimodal data blocks.
939
945
 
940
946
  Args:
941
947
  block: The content block to check.
@@ -948,6 +954,8 @@ def is_data_content_block(block: dict) -> bool:
948
954
  return False
949
955
 
950
956
  if any(key in block for key in ("url", "base64", "file_id", "text")):
957
+ # Type is valid and at least one data field is present
958
+ # (Accepts old-style image and audio URLContentBlock)
951
959
  return True
952
960
 
953
961
  # Verify data presence based on source type
@@ -962,6 +970,8 @@ def is_data_content_block(block: dict) -> bool:
962
970
  ):
963
971
  return True
964
972
 
973
+ # Type may be valid, but no data fields are present
974
+ # (required case since each is optional and we have no validation)
965
975
  return False
966
976
 
967
977
 
@@ -18,12 +18,8 @@ class HumanMessage(BaseMessage):
18
18
  from langchain_core.messages import HumanMessage, SystemMessage
19
19
 
20
20
  messages = [
21
- SystemMessage(
22
- content="You are a helpful assistant! Your name is Bob."
23
- ),
24
- HumanMessage(
25
- content="What is your name?"
26
- )
21
+ SystemMessage(content="You are a helpful assistant! Your name is Bob."),
22
+ HumanMessage(content="What is your name?"),
27
23
  ]
28
24
 
29
25
  # Instantiate a chat model and invoke it with the messages
@@ -32,13 +28,6 @@ class HumanMessage(BaseMessage):
32
28
 
33
29
  """
34
30
 
35
- example: bool = False
36
- """Use to denote that a message is part of an example conversation.
37
-
38
- At the moment, this is ignored by most models. Usage is discouraged.
39
- Defaults to False.
40
- """
41
-
42
31
  type: Literal["human"] = "human"
43
32
  """The type of the message (used for serialization). Defaults to "human"."""
44
33
 
@@ -19,12 +19,8 @@ class SystemMessage(BaseMessage):
19
19
  from langchain_core.messages import HumanMessage, SystemMessage
20
20
 
21
21
  messages = [
22
- SystemMessage(
23
- content="You are a helpful assistant! Your name is Bob."
24
- ),
25
- HumanMessage(
26
- content="What is your name?"
27
- )
22
+ SystemMessage(content="You are a helpful assistant! Your name is Bob."),
23
+ HumanMessage(content="What is your name?"),
28
24
  ]
29
25
 
30
26
  # Define a chat model and invoke it with the messages
@@ -9,7 +9,7 @@ from typing_extensions import NotRequired, TypedDict, override
9
9
 
10
10
  from langchain_core.messages import content as types
11
11
  from langchain_core.messages.base import BaseMessage, BaseMessageChunk, merge_content
12
- from langchain_core.messages.content import InvalidToolCall as InvalidToolCall
12
+ from langchain_core.messages.content import InvalidToolCall
13
13
  from langchain_core.utils._merge import merge_dicts, merge_obj
14
14
 
15
15
 
@@ -34,7 +34,7 @@ class ToolMessage(BaseMessage, ToolOutputMixin):
34
34
 
35
35
  from langchain_core.messages import ToolMessage
36
36
 
37
- ToolMessage(content='42', tool_call_id='call_Jja7J89XsjrOLA5r!MEOW!SL')
37
+ ToolMessage(content="42", tool_call_id="call_Jja7J89XsjrOLA5r!MEOW!SL")
38
38
 
39
39
 
40
40
  Example: A ToolMessage where only part of the tool output is sent to the model
@@ -47,7 +47,8 @@ class ToolMessage(BaseMessage, ToolOutputMixin):
47
47
  from langchain_core.messages import ToolMessage
48
48
 
49
49
  tool_output = {
50
- "stdout": "From the graph we can see that the correlation between x and y is ...",
50
+ "stdout": "From the graph we can see that the correlation between "
51
+ "x and y is ...",
51
52
  "stderr": None,
52
53
  "artifacts": {"type": "image", "base64_data": "/9j/4gIcSU..."},
53
54
  }
@@ -55,14 +56,14 @@ class ToolMessage(BaseMessage, ToolOutputMixin):
55
56
  ToolMessage(
56
57
  content=tool_output["stdout"],
57
58
  artifact=tool_output,
58
- tool_call_id='call_Jja7J89XsjrOLA5r!MEOW!SL',
59
+ tool_call_id="call_Jja7J89XsjrOLA5r!MEOW!SL",
59
60
  )
60
61
 
61
62
  The tool_call_id field is used to associate the tool call request with the
62
63
  tool call response. This is useful in situations where a chat model is able
63
64
  to request multiple tool calls in parallel.
64
65
 
65
- """ # noqa: E501
66
+ """
66
67
 
67
68
  tool_call_id: str
68
69
  """Tool call that this message is responding to."""
@@ -205,11 +206,7 @@ class ToolCall(TypedDict):
205
206
 
206
207
  .. code-block:: python
207
208
 
208
- {
209
- "name": "foo",
210
- "args": {"a": 1},
211
- "id": "123"
212
- }
209
+ {"name": "foo", "args": {"a": 1}, "id": "123"}
213
210
 
214
211
  This represents a request to call the tool named "foo" with arguments {"a": 1}
215
212
  and an identifier of "123".
@@ -241,6 +238,9 @@ def tool_call(
241
238
  name: The name of the tool to be called.
242
239
  args: The arguments to the tool call.
243
240
  id: An identifier associated with the tool call.
241
+
242
+ Returns:
243
+ The created tool call.
244
244
  """
245
245
  return ToolCall(name=name, args=args, id=id, type="tool_call")
246
246
 
@@ -257,12 +257,12 @@ class ToolCallChunk(TypedDict):
257
257
  .. code-block:: python
258
258
 
259
259
  left_chunks = [ToolCallChunk(name="foo", args='{"a":', index=0)]
260
- right_chunks = [ToolCallChunk(name=None, args='1}', index=0)]
260
+ right_chunks = [ToolCallChunk(name=None, args="1}", index=0)]
261
261
 
262
262
  (
263
263
  AIMessageChunk(content="", tool_call_chunks=left_chunks)
264
264
  + AIMessageChunk(content="", tool_call_chunks=right_chunks)
265
- ).tool_call_chunks == [ToolCallChunk(name='foo', args='{"a":1}', index=0)]
265
+ ).tool_call_chunks == [ToolCallChunk(name="foo", args='{"a":1}', index=0)]
266
266
 
267
267
  """
268
268
 
@@ -291,6 +291,9 @@ def tool_call_chunk(
291
291
  args: The arguments to the tool call.
292
292
  id: An identifier associated with the tool call.
293
293
  index: The index of the tool call in a sequence.
294
+
295
+ Returns:
296
+ The created tool call chunk.
294
297
  """
295
298
  return ToolCallChunk(
296
299
  name=name, args=args, id=id, index=index, type="tool_call_chunk"
@@ -311,6 +314,9 @@ def invalid_tool_call(
311
314
  args: The arguments to the tool call.
312
315
  id: An identifier associated with the tool call.
313
316
  error: An error message associated with the tool call.
317
+
318
+ Returns:
319
+ The created invalid tool call.
314
320
  """
315
321
  return InvalidToolCall(
316
322
  name=name, args=args, id=id, error=error, type="invalid_tool_call"
@@ -320,7 +326,14 @@ def invalid_tool_call(
320
326
  def default_tool_parser(
321
327
  raw_tool_calls: list[dict],
322
328
  ) -> tuple[list[ToolCall], list[InvalidToolCall]]:
323
- """Best-effort parsing of tools."""
329
+ """Best-effort parsing of tools.
330
+
331
+ Args:
332
+ raw_tool_calls: List of raw tool call dicts to parse.
333
+
334
+ Returns:
335
+ A list of tool calls and invalid tool calls.
336
+ """
324
337
  tool_calls = []
325
338
  invalid_tool_calls = []
326
339
  for raw_tool_call in raw_tool_calls:
@@ -348,7 +361,14 @@ def default_tool_parser(
348
361
 
349
362
 
350
363
  def default_tool_chunk_parser(raw_tool_calls: list[dict]) -> list[ToolCallChunk]:
351
- """Best-effort parsing of tool chunks."""
364
+ """Best-effort parsing of tool chunks.
365
+
366
+ Args:
367
+ raw_tool_calls: List of raw tool call dicts to parse.
368
+
369
+ Returns:
370
+ List of parsed ToolCallChunk objects.
371
+ """
352
372
  tool_call_chunks = []
353
373
  for tool_call in raw_tool_calls:
354
374
  if "function" not in tool_call: