langchain-core 1.0.0rc1__py3-none-any.whl → 1.0.0rc2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain-core might be problematic. Click here for more details.

Files changed (92) hide show
  1. langchain_core/agents.py +3 -3
  2. langchain_core/caches.py +44 -48
  3. langchain_core/callbacks/base.py +5 -5
  4. langchain_core/callbacks/file.py +2 -2
  5. langchain_core/callbacks/stdout.py +1 -1
  6. langchain_core/chat_history.py +1 -1
  7. langchain_core/document_loaders/base.py +21 -21
  8. langchain_core/document_loaders/langsmith.py +2 -2
  9. langchain_core/documents/base.py +39 -39
  10. langchain_core/embeddings/fake.py +4 -2
  11. langchain_core/example_selectors/semantic_similarity.py +4 -6
  12. langchain_core/exceptions.py +3 -4
  13. langchain_core/indexing/api.py +8 -14
  14. langchain_core/language_models/__init__.py +11 -25
  15. langchain_core/language_models/_utils.py +2 -1
  16. langchain_core/language_models/base.py +7 -0
  17. langchain_core/language_models/chat_models.py +14 -16
  18. langchain_core/language_models/fake_chat_models.py +3 -3
  19. langchain_core/language_models/llms.py +4 -4
  20. langchain_core/load/dump.py +3 -4
  21. langchain_core/load/load.py +0 -9
  22. langchain_core/load/serializable.py +3 -3
  23. langchain_core/messages/ai.py +20 -22
  24. langchain_core/messages/base.py +8 -8
  25. langchain_core/messages/block_translators/__init__.py +1 -1
  26. langchain_core/messages/block_translators/anthropic.py +1 -1
  27. langchain_core/messages/block_translators/bedrock_converse.py +1 -1
  28. langchain_core/messages/block_translators/google_genai.py +3 -2
  29. langchain_core/messages/block_translators/google_vertexai.py +4 -32
  30. langchain_core/messages/block_translators/langchain_v0.py +1 -1
  31. langchain_core/messages/block_translators/openai.py +1 -1
  32. langchain_core/messages/chat.py +2 -6
  33. langchain_core/messages/content.py +34 -17
  34. langchain_core/messages/function.py +3 -7
  35. langchain_core/messages/human.py +4 -9
  36. langchain_core/messages/modifier.py +1 -1
  37. langchain_core/messages/system.py +2 -10
  38. langchain_core/messages/tool.py +30 -42
  39. langchain_core/messages/utils.py +24 -30
  40. langchain_core/output_parsers/base.py +24 -24
  41. langchain_core/output_parsers/json.py +0 -1
  42. langchain_core/output_parsers/list.py +1 -1
  43. langchain_core/output_parsers/openai_functions.py +2 -2
  44. langchain_core/output_parsers/openai_tools.py +4 -9
  45. langchain_core/output_parsers/string.py +1 -1
  46. langchain_core/outputs/generation.py +1 -1
  47. langchain_core/prompt_values.py +7 -7
  48. langchain_core/prompts/base.py +1 -1
  49. langchain_core/prompts/chat.py +12 -13
  50. langchain_core/prompts/dict.py +2 -2
  51. langchain_core/prompts/few_shot_with_templates.py +1 -1
  52. langchain_core/prompts/image.py +1 -1
  53. langchain_core/prompts/message.py +2 -2
  54. langchain_core/prompts/prompt.py +7 -8
  55. langchain_core/prompts/string.py +1 -1
  56. langchain_core/prompts/structured.py +2 -2
  57. langchain_core/rate_limiters.py +23 -29
  58. langchain_core/retrievers.py +29 -29
  59. langchain_core/runnables/base.py +9 -16
  60. langchain_core/runnables/branch.py +1 -1
  61. langchain_core/runnables/config.py +1 -1
  62. langchain_core/runnables/configurable.py +2 -2
  63. langchain_core/runnables/fallbacks.py +1 -1
  64. langchain_core/runnables/graph.py +23 -28
  65. langchain_core/runnables/graph_mermaid.py +9 -9
  66. langchain_core/runnables/graph_png.py +1 -1
  67. langchain_core/runnables/history.py +2 -2
  68. langchain_core/runnables/passthrough.py +3 -3
  69. langchain_core/runnables/router.py +1 -1
  70. langchain_core/runnables/utils.py +5 -5
  71. langchain_core/tools/base.py +9 -10
  72. langchain_core/tools/convert.py +13 -17
  73. langchain_core/tools/retriever.py +6 -6
  74. langchain_core/tools/simple.py +1 -1
  75. langchain_core/tools/structured.py +5 -10
  76. langchain_core/tracers/memory_stream.py +1 -1
  77. langchain_core/tracers/root_listeners.py +2 -2
  78. langchain_core/tracers/stdout.py +1 -2
  79. langchain_core/utils/__init__.py +1 -1
  80. langchain_core/utils/aiter.py +1 -1
  81. langchain_core/utils/function_calling.py +15 -38
  82. langchain_core/utils/input.py +1 -1
  83. langchain_core/utils/iter.py +1 -1
  84. langchain_core/utils/json.py +1 -1
  85. langchain_core/utils/strings.py +1 -1
  86. langchain_core/vectorstores/base.py +14 -25
  87. langchain_core/vectorstores/utils.py +2 -2
  88. langchain_core/version.py +1 -1
  89. {langchain_core-1.0.0rc1.dist-info → langchain_core-1.0.0rc2.dist-info}/METADATA +1 -1
  90. langchain_core-1.0.0rc2.dist-info/RECORD +172 -0
  91. langchain_core-1.0.0rc1.dist-info/RECORD +0 -172
  92. {langchain_core-1.0.0rc1.dist-info → langchain_core-1.0.0rc2.dist-info}/WHEEL +0 -0
@@ -105,7 +105,7 @@ def _convert_to_v1_from_genai_input(
105
105
  Called when message isn't an `AIMessage` or `model_provider` isn't set on
106
106
  `response_metadata`.
107
107
 
108
- During the `.content_blocks` parsing process, we wrap blocks not recognized as a v1
108
+ During the `content_blocks` parsing process, we wrap blocks not recognized as a v1
109
109
  block as a `'non_standard'` block with the original block stored in the `value`
110
110
  field. This function attempts to unpack those blocks and convert any blocks that
111
111
  might be GenAI format to v1 ContentBlocks.
@@ -453,9 +453,10 @@ def _convert_to_v1_from_genai(message: AIMessage) -> list[types.ContentBlock]:
453
453
  "status": status, # type: ignore[typeddict-item]
454
454
  "output": item.get("code_execution_result", ""),
455
455
  }
456
+ server_tool_result_block["extras"] = {"block_type": item_type}
456
457
  # Preserve original outcome in extras
457
458
  if outcome is not None:
458
- server_tool_result_block["extras"] = {"outcome": outcome}
459
+ server_tool_result_block["extras"]["outcome"] = outcome
459
460
  converted_blocks.append(server_tool_result_block)
460
461
  else:
461
462
  # Unknown type, preserve as non-standard
@@ -1,37 +1,9 @@
1
1
  """Derivations of standard content blocks from Google (VertexAI) content."""
2
2
 
3
- import warnings
4
-
5
- from langchain_core.messages import AIMessage, AIMessageChunk
6
- from langchain_core.messages import content as types
7
-
8
- WARNED = False
9
-
10
-
11
- def translate_content(message: AIMessage) -> list[types.ContentBlock]: # noqa: ARG001
12
- """Derive standard content blocks from a message with Google (VertexAI) content."""
13
- global WARNED # noqa: PLW0603
14
- if not WARNED:
15
- warning_message = (
16
- "Content block standardization is not yet fully supported for Google "
17
- "VertexAI."
18
- )
19
- warnings.warn(warning_message, stacklevel=2)
20
- WARNED = True
21
- raise NotImplementedError
22
-
23
-
24
- def translate_content_chunk(message: AIMessageChunk) -> list[types.ContentBlock]: # noqa: ARG001
25
- """Derive standard content blocks from a chunk with Google (VertexAI) content."""
26
- global WARNED # noqa: PLW0603
27
- if not WARNED:
28
- warning_message = (
29
- "Content block standardization is not yet fully supported for Google "
30
- "VertexAI."
31
- )
32
- warnings.warn(warning_message, stacklevel=2)
33
- WARNED = True
34
- raise NotImplementedError
3
+ from langchain_core.messages.block_translators.google_genai import (
4
+ translate_content,
5
+ translate_content_chunk,
6
+ )
35
7
 
36
8
 
37
9
  def _register_google_vertexai_translator() -> None:
@@ -10,7 +10,7 @@ def _convert_v0_multimodal_input_to_v1(
10
10
  ) -> list[types.ContentBlock]:
11
11
  """Convert v0 multimodal blocks to v1 format.
12
12
 
13
- During the `.content_blocks` parsing process, we wrap blocks not recognized as a v1
13
+ During the `content_blocks` parsing process, we wrap blocks not recognized as a v1
14
14
  block as a `'non_standard'` block with the original block stored in the `value`
15
15
  field. This function attempts to unpack those blocks and convert any v0 format
16
16
  blocks to v1 format.
@@ -155,7 +155,7 @@ def _convert_to_v1_from_chat_completions_input(
155
155
  ) -> list[types.ContentBlock]:
156
156
  """Convert OpenAI Chat Completions format blocks to v1 format.
157
157
 
158
- During the `.content_blocks` parsing process, we wrap blocks not recognized as a v1
158
+ During the `content_blocks` parsing process, we wrap blocks not recognized as a v1
159
159
  block as a `'non_standard'` block with the original block stored in the `value`
160
160
  field. This function attempts to unpack those blocks and convert any blocks that
161
161
  might be OpenAI format to v1 ContentBlocks.
@@ -19,7 +19,7 @@ class ChatMessage(BaseMessage):
19
19
  """The speaker / role of the Message."""
20
20
 
21
21
  type: Literal["chat"] = "chat"
22
- """The type of the message (used during serialization). Defaults to "chat"."""
22
+ """The type of the message (used during serialization)."""
23
23
 
24
24
 
25
25
  class ChatMessageChunk(ChatMessage, BaseMessageChunk):
@@ -29,11 +29,7 @@ class ChatMessageChunk(ChatMessage, BaseMessageChunk):
29
29
  # to make sure that the chunk variant can be discriminated from the
30
30
  # non-chunk variant.
31
31
  type: Literal["ChatMessageChunk"] = "ChatMessageChunk" # type: ignore[assignment]
32
- """The type of the message (used during serialization).
33
-
34
- Defaults to `'ChatMessageChunk'`.
35
-
36
- """
32
+ """The type of the message (used during serialization)."""
37
33
 
38
34
  @override
39
35
  def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore[override]
@@ -143,7 +143,7 @@ class Citation(TypedDict):
143
143
  not the source text. This means that the indices are relative to the model's
144
144
  response, not the original document (as specified in the `url`).
145
145
 
146
- !!! note
146
+ !!! note "Factory function"
147
147
  `create_citation` may also be used as a factory to create a `Citation`.
148
148
  Benefits include:
149
149
 
@@ -156,7 +156,9 @@ class Citation(TypedDict):
156
156
  """Type of the content block. Used for discrimination."""
157
157
 
158
158
  id: NotRequired[str]
159
- """Content block identifier. Either:
159
+ """Content block identifier.
160
+
161
+ Either:
160
162
 
161
163
  - Generated by the provider (e.g., OpenAI's file ID)
162
164
  - Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`))
@@ -201,6 +203,7 @@ class NonStandardAnnotation(TypedDict):
201
203
  """Content block identifier.
202
204
 
203
205
  Either:
206
+
204
207
  - Generated by the provider (e.g., OpenAI's file ID)
205
208
  - Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`))
206
209
 
@@ -211,6 +214,7 @@ class NonStandardAnnotation(TypedDict):
211
214
 
212
215
 
213
216
  Annotation = Citation | NonStandardAnnotation
217
+ """A union of all defined `Annotation` types."""
214
218
 
215
219
 
216
220
  class TextContentBlock(TypedDict):
@@ -219,7 +223,7 @@ class TextContentBlock(TypedDict):
219
223
  This typically represents the main text content of a message, such as the response
220
224
  from a language model or the text of a user message.
221
225
 
222
- !!! note
226
+ !!! note "Factory function"
223
227
  `create_text_block` may also be used as a factory to create a
224
228
  `TextContentBlock`. Benefits include:
225
229
 
@@ -235,6 +239,7 @@ class TextContentBlock(TypedDict):
235
239
  """Content block identifier.
236
240
 
237
241
  Either:
242
+
238
243
  - Generated by the provider (e.g., OpenAI's file ID)
239
244
  - Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`))
240
245
 
@@ -254,7 +259,7 @@ class TextContentBlock(TypedDict):
254
259
 
255
260
 
256
261
  class ToolCall(TypedDict):
257
- """Represents a request to call a tool.
262
+ """Represents an AI's request to call a tool.
258
263
 
259
264
  Example:
260
265
  ```python
@@ -264,7 +269,7 @@ class ToolCall(TypedDict):
264
269
  This represents a request to call the tool named "foo" with arguments {"a": 1}
265
270
  and an identifier of "123".
266
271
 
267
- !!! note
272
+ !!! note "Factory function"
268
273
  `create_tool_call` may also be used as a factory to create a
269
274
  `ToolCall`. Benefits include:
270
275
 
@@ -299,7 +304,7 @@ class ToolCall(TypedDict):
299
304
 
300
305
 
301
306
  class ToolCallChunk(TypedDict):
302
- """A chunk of a tool call (e.g., as part of a stream).
307
+ """A chunk of a tool call (yielded when streaming).
303
308
 
304
309
  When merging `ToolCallChunks` (e.g., via `AIMessageChunk.__add__`),
305
310
  all string attributes are concatenated. Chunks are only merged if their
@@ -381,7 +386,10 @@ class InvalidToolCall(TypedDict):
381
386
 
382
387
 
383
388
  class ServerToolCall(TypedDict):
384
- """Tool call that is executed server-side."""
389
+ """Tool call that is executed server-side.
390
+
391
+ For example: code execution, web search, etc.
392
+ """
385
393
 
386
394
  type: Literal["server_tool_call"]
387
395
  """Used for discrimination."""
@@ -403,7 +411,7 @@ class ServerToolCall(TypedDict):
403
411
 
404
412
 
405
413
  class ServerToolCallChunk(TypedDict):
406
- """A chunk of a tool call (as part of a stream)."""
414
+ """A chunk of a server-side tool call (yielded when streaming)."""
407
415
 
408
416
  type: Literal["server_tool_call_chunk"]
409
417
  """Used for discrimination."""
@@ -452,7 +460,7 @@ class ServerToolResult(TypedDict):
452
460
  class ReasoningContentBlock(TypedDict):
453
461
  """Reasoning output from a LLM.
454
462
 
455
- !!! note
463
+ !!! note "Factory function"
456
464
  `create_reasoning_block` may also be used as a factory to create a
457
465
  `ReasoningContentBlock`. Benefits include:
458
466
 
@@ -468,6 +476,7 @@ class ReasoningContentBlock(TypedDict):
468
476
  """Content block identifier.
469
477
 
470
478
  Either:
479
+
471
480
  - Generated by the provider (e.g., OpenAI's file ID)
472
481
  - Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`))
473
482
 
@@ -494,7 +503,7 @@ class ReasoningContentBlock(TypedDict):
494
503
  class ImageContentBlock(TypedDict):
495
504
  """Image data.
496
505
 
497
- !!! note
506
+ !!! note "Factory function"
498
507
  `create_image_block` may also be used as a factory to create a
499
508
  `ImageContentBlock`. Benefits include:
500
509
 
@@ -510,6 +519,7 @@ class ImageContentBlock(TypedDict):
510
519
  """Content block identifier.
511
520
 
512
521
  Either:
522
+
513
523
  - Generated by the provider (e.g., OpenAI's file ID)
514
524
  - Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`))
515
525
 
@@ -541,7 +551,7 @@ class ImageContentBlock(TypedDict):
541
551
  class VideoContentBlock(TypedDict):
542
552
  """Video data.
543
553
 
544
- !!! note
554
+ !!! note "Factory function"
545
555
  `create_video_block` may also be used as a factory to create a
546
556
  `VideoContentBlock`. Benefits include:
547
557
 
@@ -557,6 +567,7 @@ class VideoContentBlock(TypedDict):
557
567
  """Content block identifier.
558
568
 
559
569
  Either:
570
+
560
571
  - Generated by the provider (e.g., OpenAI's file ID)
561
572
  - Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`))
562
573
 
@@ -588,7 +599,7 @@ class VideoContentBlock(TypedDict):
588
599
  class AudioContentBlock(TypedDict):
589
600
  """Audio data.
590
601
 
591
- !!! note
602
+ !!! note "Factory function"
592
603
  `create_audio_block` may also be used as a factory to create an
593
604
  `AudioContentBlock`. Benefits include:
594
605
  * Automatic ID generation (when not provided)
@@ -603,6 +614,7 @@ class AudioContentBlock(TypedDict):
603
614
  """Content block identifier.
604
615
 
605
616
  Either:
617
+
606
618
  - Generated by the provider (e.g., OpenAI's file ID)
607
619
  - Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`))
608
620
 
@@ -642,9 +654,9 @@ class PlainTextContentBlock(TypedDict):
642
654
 
643
655
  !!! note
644
656
  Title and context are optional fields that may be passed to the model. See
645
- Anthropic [example](https://docs.anthropic.com/en/docs/build-with-claude/citations#citable-vs-non-citable-content).
657
+ Anthropic [example](https://docs.claude.com/en/docs/build-with-claude/citations#citable-vs-non-citable-content).
646
658
 
647
- !!! note
659
+ !!! note "Factory function"
648
660
  `create_plaintext_block` may also be used as a factory to create a
649
661
  `PlainTextContentBlock`. Benefits include:
650
662
 
@@ -660,6 +672,7 @@ class PlainTextContentBlock(TypedDict):
660
672
  """Content block identifier.
661
673
 
662
674
  Either:
675
+
663
676
  - Generated by the provider (e.g., OpenAI's file ID)
664
677
  - Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`))
665
678
 
@@ -694,7 +707,7 @@ class PlainTextContentBlock(TypedDict):
694
707
 
695
708
 
696
709
  class FileContentBlock(TypedDict):
697
- """File data that doesn't fit into other multimodal blocks.
710
+ """File data that doesn't fit into other multimodal block types.
698
711
 
699
712
  This block is intended for files that are not images, audio, or plaintext. For
700
713
  example, it can be used for PDFs, Word documents, etc.
@@ -703,7 +716,7 @@ class FileContentBlock(TypedDict):
703
716
  content block type (e.g., `ImageContentBlock`, `AudioContentBlock`,
704
717
  `PlainTextContentBlock`).
705
718
 
706
- !!! note
719
+ !!! note "Factory function"
707
720
  `create_file_block` may also be used as a factory to create a
708
721
  `FileContentBlock`. Benefits include:
709
722
 
@@ -719,6 +732,7 @@ class FileContentBlock(TypedDict):
719
732
  """Content block identifier.
720
733
 
721
734
  Either:
735
+
722
736
  - Generated by the provider (e.g., OpenAI's file ID)
723
737
  - Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`))
724
738
 
@@ -765,7 +779,7 @@ class NonStandardContentBlock(TypedDict):
765
779
  Has no `extras` field, as provider-specific data should be included in the
766
780
  `value` field.
767
781
 
768
- !!! note
782
+ !!! note "Factory function"
769
783
  `create_non_standard_block` may also be used as a factory to create a
770
784
  `NonStandardContentBlock`. Benefits include:
771
785
 
@@ -781,6 +795,7 @@ class NonStandardContentBlock(TypedDict):
781
795
  """Content block identifier.
782
796
 
783
797
  Either:
798
+
784
799
  - Generated by the provider (e.g., OpenAI's file ID)
785
800
  - Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`))
786
801
 
@@ -801,6 +816,7 @@ DataContentBlock = (
801
816
  | PlainTextContentBlock
802
817
  | FileContentBlock
803
818
  )
819
+ """A union of all defined multimodal data `ContentBlock` types."""
804
820
 
805
821
  ToolContentBlock = (
806
822
  ToolCall | ToolCallChunk | ServerToolCall | ServerToolCallChunk | ServerToolResult
@@ -814,6 +830,7 @@ ContentBlock = (
814
830
  | DataContentBlock
815
831
  | ToolContentBlock
816
832
  )
833
+ """A union of all defined `ContentBlock` types and aliases."""
817
834
 
818
835
 
819
836
  KNOWN_BLOCK_TYPES = {
@@ -19,7 +19,7 @@ class FunctionMessage(BaseMessage):
19
19
  do not contain the `tool_call_id` field.
20
20
 
21
21
  The `tool_call_id` field is used to associate the tool call request with the
22
- tool call response. This is useful in situations where a chat model is able
22
+ tool call response. Useful in situations where a chat model is able
23
23
  to request multiple tool calls in parallel.
24
24
 
25
25
  """
@@ -28,7 +28,7 @@ class FunctionMessage(BaseMessage):
28
28
  """The name of the function that was executed."""
29
29
 
30
30
  type: Literal["function"] = "function"
31
- """The type of the message (used for serialization). Defaults to `'function'`."""
31
+ """The type of the message (used for serialization)."""
32
32
 
33
33
 
34
34
  class FunctionMessageChunk(FunctionMessage, BaseMessageChunk):
@@ -38,11 +38,7 @@ class FunctionMessageChunk(FunctionMessage, BaseMessageChunk):
38
38
  # to make sure that the chunk variant can be discriminated from the
39
39
  # non-chunk variant.
40
40
  type: Literal["FunctionMessageChunk"] = "FunctionMessageChunk" # type: ignore[assignment]
41
- """The type of the message (used for serialization).
42
-
43
- Defaults to `'FunctionMessageChunk'`.
44
-
45
- """
41
+ """The type of the message (used for serialization)."""
46
42
 
47
43
  @override
48
44
  def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore[override]
@@ -7,9 +7,9 @@ from langchain_core.messages.base import BaseMessage, BaseMessageChunk
7
7
 
8
8
 
9
9
  class HumanMessage(BaseMessage):
10
- """Message from a human.
10
+ """Message from the user.
11
11
 
12
- `HumanMessage`s are messages that are passed in from a human to the model.
12
+ A `HumanMessage` is a message that is passed in from a user to the model.
13
13
 
14
14
  Example:
15
15
  ```python
@@ -27,11 +27,7 @@ class HumanMessage(BaseMessage):
27
27
  """
28
28
 
29
29
  type: Literal["human"] = "human"
30
- """The type of the message (used for serialization).
31
-
32
- Defaults to `'human'`.
33
-
34
- """
30
+ """The type of the message (used for serialization)."""
35
31
 
36
32
  @overload
37
33
  def __init__(
@@ -71,5 +67,4 @@ class HumanMessageChunk(HumanMessage, BaseMessageChunk):
71
67
  # to make sure that the chunk variant can be discriminated from the
72
68
  # non-chunk variant.
73
69
  type: Literal["HumanMessageChunk"] = "HumanMessageChunk" # type: ignore[assignment]
74
- """The type of the message (used for serialization).
75
- Defaults to "HumanMessageChunk"."""
70
+ """The type of the message (used for serialization)."""
@@ -9,7 +9,7 @@ class RemoveMessage(BaseMessage):
9
9
  """Message responsible for deleting other messages."""
10
10
 
11
11
  type: Literal["remove"] = "remove"
12
- """The type of the message (used for serialization). Defaults to "remove"."""
12
+ """The type of the message (used for serialization)."""
13
13
 
14
14
  def __init__(
15
15
  self,
@@ -27,11 +27,7 @@ class SystemMessage(BaseMessage):
27
27
  """
28
28
 
29
29
  type: Literal["system"] = "system"
30
- """The type of the message (used for serialization).
31
-
32
- Defaults to `'system'`.
33
-
34
- """
30
+ """The type of the message (used for serialization)."""
35
31
 
36
32
  @overload
37
33
  def __init__(
@@ -71,8 +67,4 @@ class SystemMessageChunk(SystemMessage, BaseMessageChunk):
71
67
  # to make sure that the chunk variant can be discriminated from the
72
68
  # non-chunk variant.
73
69
  type: Literal["SystemMessageChunk"] = "SystemMessageChunk" # type: ignore[assignment]
74
- """The type of the message (used for serialization).
75
-
76
- Defaults to `'SystemMessageChunk'`.
77
-
78
- """
70
+ """The type of the message (used for serialization)."""
@@ -31,36 +31,34 @@ class ToolMessage(BaseMessage, ToolOutputMixin):
31
31
 
32
32
  Example: A `ToolMessage` representing a result of `42` from a tool call with id
33
33
 
34
- ```python
35
- from langchain_core.messages import ToolMessage
34
+ ```python
35
+ from langchain_core.messages import ToolMessage
36
36
 
37
- ToolMessage(content="42", tool_call_id="call_Jja7J89XsjrOLA5r!MEOW!SL")
38
- ```
37
+ ToolMessage(content="42", tool_call_id="call_Jja7J89XsjrOLA5r!MEOW!SL")
38
+ ```
39
39
 
40
40
  Example: A `ToolMessage` where only part of the tool output is sent to the model
41
- and the full output is passed in to artifact.
42
-
43
- !!! version-added "Added in version 0.2.17"
41
+ and the full output is passed in to artifact.
44
42
 
45
- ```python
46
- from langchain_core.messages import ToolMessage
47
-
48
- tool_output = {
49
- "stdout": "From the graph we can see that the correlation between "
50
- "x and y is ...",
51
- "stderr": None,
52
- "artifacts": {"type": "image", "base64_data": "/9j/4gIcSU..."},
53
- }
54
-
55
- ToolMessage(
56
- content=tool_output["stdout"],
57
- artifact=tool_output,
58
- tool_call_id="call_Jja7J89XsjrOLA5r!MEOW!SL",
59
- )
60
- ```
43
+ ```python
44
+ from langchain_core.messages import ToolMessage
45
+
46
+ tool_output = {
47
+ "stdout": "From the graph we can see that the correlation between "
48
+ "x and y is ...",
49
+ "stderr": None,
50
+ "artifacts": {"type": "image", "base64_data": "/9j/4gIcSU..."},
51
+ }
52
+
53
+ ToolMessage(
54
+ content=tool_output["stdout"],
55
+ artifact=tool_output,
56
+ tool_call_id="call_Jja7J89XsjrOLA5r!MEOW!SL",
57
+ )
58
+ ```
61
59
 
62
60
  The `tool_call_id` field is used to associate the tool call request with the
63
- tool call response. This is useful in situations where a chat model is able
61
+ tool call response. Useful in situations where a chat model is able
64
62
  to request multiple tool calls in parallel.
65
63
 
66
64
  """
@@ -69,11 +67,7 @@ class ToolMessage(BaseMessage, ToolOutputMixin):
69
67
  """Tool call that this message is responding to."""
70
68
 
71
69
  type: Literal["tool"] = "tool"
72
- """The type of the message (used for serialization).
73
-
74
- Defaults to `'tool'`.
75
-
76
- """
70
+ """The type of the message (used for serialization)."""
77
71
 
78
72
  artifact: Any = None
79
73
  """Artifact of the Tool execution which is not meant to be sent to the model.
@@ -82,21 +76,15 @@ class ToolMessage(BaseMessage, ToolOutputMixin):
82
76
  a subset of the full tool output is being passed as message content but the full
83
77
  output is needed in other parts of the code.
84
78
 
85
- !!! version-added "Added in version 0.2.17"
86
-
87
79
  """
88
80
 
89
81
  status: Literal["success", "error"] = "success"
90
- """Status of the tool invocation.
91
-
92
- !!! version-added "Added in version 0.2.24"
93
-
94
- """
82
+ """Status of the tool invocation."""
95
83
 
96
84
  additional_kwargs: dict = Field(default_factory=dict, repr=False)
97
- """Currently inherited from BaseMessage, but not used."""
85
+ """Currently inherited from `BaseMessage`, but not used."""
98
86
  response_metadata: dict = Field(default_factory=dict, repr=False)
99
- """Currently inherited from BaseMessage, but not used."""
87
+ """Currently inherited from `BaseMessage`, but not used."""
100
88
 
101
89
  @model_validator(mode="before")
102
90
  @classmethod
@@ -164,12 +152,12 @@ class ToolMessage(BaseMessage, ToolOutputMixin):
164
152
  content_blocks: list[types.ContentBlock] | None = None,
165
153
  **kwargs: Any,
166
154
  ) -> None:
167
- """Initialize `ToolMessage`.
155
+ """Initialize a `ToolMessage`.
168
156
 
169
157
  Specify `content` as positional arg or `content_blocks` for typing.
170
158
 
171
159
  Args:
172
- content: The string contents of the message.
160
+ content: The contents of the message.
173
161
  content_blocks: Typed standard content.
174
162
  **kwargs: Additional fields.
175
163
  """
@@ -215,7 +203,7 @@ class ToolMessageChunk(ToolMessage, BaseMessageChunk):
215
203
 
216
204
 
217
205
  class ToolCall(TypedDict):
218
- """Represents a request to call a tool.
206
+ """Represents an AI's request to call a tool.
219
207
 
220
208
  Example:
221
209
  ```python
@@ -261,7 +249,7 @@ def tool_call(
261
249
 
262
250
 
263
251
  class ToolCallChunk(TypedDict):
264
- """A chunk of a tool call (e.g., as part of a stream).
252
+ """A chunk of a tool call (yielded when streaming).
265
253
 
266
254
  When merging `ToolCallChunk`s (e.g., via `AIMessageChunk.__add__`),
267
255
  all string attributes are concatenated. Chunks are only merged if their