langchain-core 1.0.0a5__py3-none-any.whl → 1.0.0a7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain-core might be problematic. Click here for more details.

Files changed (132) hide show
  1. langchain_core/_api/__init__.py +3 -3
  2. langchain_core/_api/beta_decorator.py +6 -6
  3. langchain_core/_api/deprecation.py +21 -29
  4. langchain_core/_api/path.py +3 -6
  5. langchain_core/_import_utils.py +2 -3
  6. langchain_core/agents.py +10 -11
  7. langchain_core/caches.py +7 -7
  8. langchain_core/callbacks/base.py +91 -91
  9. langchain_core/callbacks/file.py +11 -11
  10. langchain_core/callbacks/manager.py +86 -89
  11. langchain_core/callbacks/stdout.py +8 -8
  12. langchain_core/callbacks/usage.py +4 -4
  13. langchain_core/chat_history.py +1 -37
  14. langchain_core/document_loaders/base.py +2 -2
  15. langchain_core/document_loaders/langsmith.py +15 -15
  16. langchain_core/documents/base.py +16 -16
  17. langchain_core/documents/compressor.py +4 -4
  18. langchain_core/example_selectors/length_based.py +1 -1
  19. langchain_core/example_selectors/semantic_similarity.py +17 -19
  20. langchain_core/exceptions.py +3 -3
  21. langchain_core/globals.py +3 -151
  22. langchain_core/indexing/api.py +44 -43
  23. langchain_core/indexing/base.py +30 -30
  24. langchain_core/indexing/in_memory.py +3 -3
  25. langchain_core/language_models/_utils.py +5 -7
  26. langchain_core/language_models/base.py +18 -132
  27. langchain_core/language_models/chat_models.py +118 -227
  28. langchain_core/language_models/fake.py +11 -11
  29. langchain_core/language_models/fake_chat_models.py +35 -29
  30. langchain_core/language_models/llms.py +91 -201
  31. langchain_core/load/dump.py +1 -1
  32. langchain_core/load/load.py +11 -12
  33. langchain_core/load/mapping.py +2 -4
  34. langchain_core/load/serializable.py +2 -4
  35. langchain_core/messages/ai.py +17 -20
  36. langchain_core/messages/base.py +28 -26
  37. langchain_core/messages/block_translators/__init__.py +17 -7
  38. langchain_core/messages/block_translators/anthropic.py +3 -3
  39. langchain_core/messages/block_translators/bedrock_converse.py +2 -2
  40. langchain_core/messages/block_translators/google_genai.py +502 -20
  41. langchain_core/messages/block_translators/langchain_v0.py +2 -2
  42. langchain_core/messages/block_translators/openai.py +6 -6
  43. langchain_core/messages/content.py +120 -124
  44. langchain_core/messages/human.py +7 -7
  45. langchain_core/messages/system.py +7 -7
  46. langchain_core/messages/tool.py +24 -24
  47. langchain_core/messages/utils.py +67 -79
  48. langchain_core/output_parsers/base.py +12 -14
  49. langchain_core/output_parsers/json.py +4 -4
  50. langchain_core/output_parsers/list.py +3 -5
  51. langchain_core/output_parsers/openai_functions.py +3 -3
  52. langchain_core/output_parsers/openai_tools.py +3 -3
  53. langchain_core/output_parsers/pydantic.py +2 -2
  54. langchain_core/output_parsers/transform.py +13 -15
  55. langchain_core/output_parsers/xml.py +7 -9
  56. langchain_core/outputs/chat_generation.py +4 -4
  57. langchain_core/outputs/chat_result.py +1 -3
  58. langchain_core/outputs/generation.py +2 -2
  59. langchain_core/outputs/llm_result.py +5 -5
  60. langchain_core/prompts/__init__.py +1 -5
  61. langchain_core/prompts/base.py +10 -15
  62. langchain_core/prompts/chat.py +31 -82
  63. langchain_core/prompts/dict.py +2 -2
  64. langchain_core/prompts/few_shot.py +5 -5
  65. langchain_core/prompts/few_shot_with_templates.py +4 -4
  66. langchain_core/prompts/loading.py +3 -5
  67. langchain_core/prompts/prompt.py +4 -16
  68. langchain_core/prompts/string.py +2 -1
  69. langchain_core/prompts/structured.py +16 -23
  70. langchain_core/rate_limiters.py +3 -4
  71. langchain_core/retrievers.py +14 -14
  72. langchain_core/runnables/base.py +938 -1054
  73. langchain_core/runnables/branch.py +36 -40
  74. langchain_core/runnables/config.py +27 -35
  75. langchain_core/runnables/configurable.py +108 -124
  76. langchain_core/runnables/fallbacks.py +76 -72
  77. langchain_core/runnables/graph.py +39 -45
  78. langchain_core/runnables/graph_ascii.py +9 -11
  79. langchain_core/runnables/graph_mermaid.py +18 -19
  80. langchain_core/runnables/graph_png.py +8 -9
  81. langchain_core/runnables/history.py +114 -127
  82. langchain_core/runnables/passthrough.py +113 -139
  83. langchain_core/runnables/retry.py +43 -48
  84. langchain_core/runnables/router.py +23 -28
  85. langchain_core/runnables/schema.py +42 -44
  86. langchain_core/runnables/utils.py +28 -31
  87. langchain_core/stores.py +9 -13
  88. langchain_core/structured_query.py +8 -8
  89. langchain_core/tools/base.py +63 -115
  90. langchain_core/tools/convert.py +31 -35
  91. langchain_core/tools/render.py +1 -1
  92. langchain_core/tools/retriever.py +4 -4
  93. langchain_core/tools/simple.py +13 -17
  94. langchain_core/tools/structured.py +12 -15
  95. langchain_core/tracers/base.py +62 -64
  96. langchain_core/tracers/context.py +17 -35
  97. langchain_core/tracers/core.py +49 -53
  98. langchain_core/tracers/evaluation.py +11 -11
  99. langchain_core/tracers/event_stream.py +58 -60
  100. langchain_core/tracers/langchain.py +13 -13
  101. langchain_core/tracers/log_stream.py +22 -24
  102. langchain_core/tracers/root_listeners.py +14 -14
  103. langchain_core/tracers/run_collector.py +2 -4
  104. langchain_core/tracers/schemas.py +8 -8
  105. langchain_core/tracers/stdout.py +2 -1
  106. langchain_core/utils/__init__.py +0 -3
  107. langchain_core/utils/_merge.py +2 -2
  108. langchain_core/utils/aiter.py +24 -28
  109. langchain_core/utils/env.py +4 -4
  110. langchain_core/utils/function_calling.py +31 -41
  111. langchain_core/utils/html.py +3 -4
  112. langchain_core/utils/input.py +3 -3
  113. langchain_core/utils/iter.py +15 -19
  114. langchain_core/utils/json.py +3 -2
  115. langchain_core/utils/json_schema.py +6 -6
  116. langchain_core/utils/mustache.py +3 -5
  117. langchain_core/utils/pydantic.py +16 -18
  118. langchain_core/utils/usage.py +1 -1
  119. langchain_core/utils/utils.py +29 -29
  120. langchain_core/vectorstores/base.py +18 -21
  121. langchain_core/vectorstores/in_memory.py +14 -87
  122. langchain_core/vectorstores/utils.py +2 -2
  123. langchain_core/version.py +1 -1
  124. {langchain_core-1.0.0a5.dist-info → langchain_core-1.0.0a7.dist-info}/METADATA +10 -31
  125. langchain_core-1.0.0a7.dist-info/RECORD +176 -0
  126. {langchain_core-1.0.0a5.dist-info → langchain_core-1.0.0a7.dist-info}/WHEEL +1 -1
  127. langchain_core/messages/block_translators/ollama.py +0 -47
  128. langchain_core/prompts/pipeline.py +0 -138
  129. langchain_core/tracers/langchain_v1.py +0 -31
  130. langchain_core/utils/loading.py +0 -35
  131. langchain_core-1.0.0a5.dist-info/RECORD +0 -181
  132. langchain_core-1.0.0a5.dist-info/entry_points.txt +0 -4
@@ -3,7 +3,7 @@
3
3
  import importlib
4
4
  import json
5
5
  import os
6
- from typing import Any, Optional
6
+ from typing import Any
7
7
 
8
8
  from langchain_core._api import beta
9
9
  from langchain_core.load.mapping import (
@@ -50,12 +50,11 @@ class Reviver:
50
50
 
51
51
  def __init__(
52
52
  self,
53
- secrets_map: Optional[dict[str, str]] = None,
54
- valid_namespaces: Optional[list[str]] = None,
53
+ secrets_map: dict[str, str] | None = None,
54
+ valid_namespaces: list[str] | None = None,
55
55
  secrets_from_env: bool = True, # noqa: FBT001,FBT002
56
- additional_import_mappings: Optional[
57
- dict[tuple[str, ...], tuple[str, ...]]
58
- ] = None,
56
+ additional_import_mappings: dict[tuple[str, ...], tuple[str, ...]]
57
+ | None = None,
59
58
  *,
60
59
  ignore_unserializable_fields: bool = False,
61
60
  ) -> None:
@@ -187,10 +186,10 @@ class Reviver:
187
186
  def loads(
188
187
  text: str,
189
188
  *,
190
- secrets_map: Optional[dict[str, str]] = None,
191
- valid_namespaces: Optional[list[str]] = None,
189
+ secrets_map: dict[str, str] | None = None,
190
+ valid_namespaces: list[str] | None = None,
192
191
  secrets_from_env: bool = True,
193
- additional_import_mappings: Optional[dict[tuple[str, ...], tuple[str, ...]]] = None,
192
+ additional_import_mappings: dict[tuple[str, ...], tuple[str, ...]] | None = None,
194
193
  ignore_unserializable_fields: bool = False,
195
194
  ) -> Any:
196
195
  """Revive a LangChain class from a JSON string.
@@ -231,10 +230,10 @@ def loads(
231
230
  def load(
232
231
  obj: Any,
233
232
  *,
234
- secrets_map: Optional[dict[str, str]] = None,
235
- valid_namespaces: Optional[list[str]] = None,
233
+ secrets_map: dict[str, str] | None = None,
234
+ valid_namespaces: list[str] | None = None,
236
235
  secrets_from_env: bool = True,
237
- additional_import_mappings: Optional[dict[tuple[str, ...], tuple[str, ...]]] = None,
236
+ additional_import_mappings: dict[tuple[str, ...], tuple[str, ...]] | None = None,
238
237
  ignore_unserializable_fields: bool = False,
239
238
  ) -> Any:
240
239
  """Revive a LangChain class from a JSON object.
@@ -413,11 +413,10 @@ SERIALIZABLE_MAPPING: dict[tuple[str, ...], tuple[str, ...]] = {
413
413
  "few_shot_with_templates",
414
414
  "FewShotPromptWithTemplates",
415
415
  ),
416
- ("langchain", "prompts", "pipeline", "PipelinePromptTemplate"): (
416
+ ("langchain", "prompts", "pipeline"): (
417
417
  "langchain_core",
418
418
  "prompts",
419
419
  "pipeline",
420
- "PipelinePromptTemplate",
421
420
  ),
422
421
  ("langchain", "prompts", "base", "StringPromptTemplate"): (
423
422
  "langchain_core",
@@ -846,11 +845,10 @@ OLD_CORE_NAMESPACES_MAPPING: dict[tuple[str, ...], tuple[str, ...]] = {
846
845
  "few_shot_with_templates",
847
846
  "FewShotPromptWithTemplates",
848
847
  ),
849
- ("langchain_core", "prompts", "pipeline", "PipelinePromptTemplate"): (
848
+ ("langchain_core", "prompts", "pipeline"): (
850
849
  "langchain_core",
851
850
  "prompts",
852
851
  "pipeline",
853
- "PipelinePromptTemplate",
854
852
  ),
855
853
  ("langchain_core", "prompts", "string", "StringPromptTemplate"): (
856
854
  "langchain_core",
@@ -6,9 +6,7 @@ from abc import ABC
6
6
  from typing import (
7
7
  Any,
8
8
  Literal,
9
- Optional,
10
9
  TypedDict,
11
- Union,
12
10
  cast,
13
11
  )
14
12
 
@@ -53,7 +51,7 @@ class SerializedNotImplemented(BaseSerialized):
53
51
 
54
52
  type: Literal["not_implemented"]
55
53
  """The type of the object. Must be ``'not_implemented'``."""
56
- repr: Optional[str]
54
+ repr: str | None
57
55
  """The representation of the object. Optional."""
58
56
 
59
57
 
@@ -188,7 +186,7 @@ class Serializable(BaseModel, ABC):
188
186
  if (k not in type(self).model_fields or try_neq_default(v, k, self))
189
187
  ]
190
188
 
191
- def to_json(self) -> Union[SerializedConstructor, SerializedNotImplemented]:
189
+ def to_json(self) -> SerializedConstructor | SerializedNotImplemented:
192
190
  """Serialize the object to JSON.
193
191
 
194
192
  Raises:
@@ -4,7 +4,7 @@ import json
4
4
  import logging
5
5
  import operator
6
6
  from collections.abc import Sequence
7
- from typing import Any, Literal, Optional, Union, cast, overload
7
+ from typing import Any, Literal, cast, overload
8
8
 
9
9
  from pydantic import model_validator
10
10
  from typing_extensions import NotRequired, Self, TypedDict, override
@@ -48,7 +48,7 @@ class InputTokenDetails(TypedDict, total=False):
48
48
  "cache_read": 100,
49
49
  }
50
50
 
51
- .. versionadded:: 0.3.9
51
+ !!! version-added "Added in version 0.3.9"
52
52
 
53
53
  May also hold extra provider-specific keys.
54
54
 
@@ -83,7 +83,7 @@ class OutputTokenDetails(TypedDict, total=False):
83
83
  "reasoning": 200,
84
84
  }
85
85
 
86
- .. versionadded:: 0.3.9
86
+ !!! version-added "Added in version 0.3.9"
87
87
 
88
88
  """
89
89
 
@@ -121,8 +121,7 @@ class UsageMetadata(TypedDict):
121
121
  },
122
122
  }
123
123
 
124
- .. versionchanged:: 0.3.9
125
-
124
+ !!! warning "Behavior changed in 0.3.9"
126
125
  Added ``input_token_details`` and ``output_token_details``.
127
126
 
128
127
  """
@@ -161,7 +160,7 @@ class AIMessage(BaseMessage):
161
160
  """If provided, tool calls associated with the message."""
162
161
  invalid_tool_calls: list[InvalidToolCall] = []
163
162
  """If provided, tool calls with parsing errors associated with the message."""
164
- usage_metadata: Optional[UsageMetadata] = None
163
+ usage_metadata: UsageMetadata | None = None
165
164
  """If provided, usage metadata for a message, such as token counts.
166
165
 
167
166
  This is a standard representation of token usage that is consistent across models.
@@ -174,22 +173,22 @@ class AIMessage(BaseMessage):
174
173
  @overload
175
174
  def __init__(
176
175
  self,
177
- content: Union[str, list[Union[str, dict]]],
176
+ content: str | list[str | dict],
178
177
  **kwargs: Any,
179
178
  ) -> None: ...
180
179
 
181
180
  @overload
182
181
  def __init__(
183
182
  self,
184
- content: Optional[Union[str, list[Union[str, dict]]]] = None,
185
- content_blocks: Optional[list[types.ContentBlock]] = None,
183
+ content: str | list[str | dict] | None = None,
184
+ content_blocks: list[types.ContentBlock] | None = None,
186
185
  **kwargs: Any,
187
186
  ) -> None: ...
188
187
 
189
188
  def __init__(
190
189
  self,
191
- content: Optional[Union[str, list[Union[str, dict]]]] = None,
192
- content_blocks: Optional[list[types.ContentBlock]] = None,
190
+ content: str | list[str | dict] | None = None,
191
+ content_blocks: list[types.ContentBlock] | None = None,
193
192
  **kwargs: Any,
194
193
  ) -> None:
195
194
  """Initialize ``AIMessage``.
@@ -210,7 +209,7 @@ class AIMessage(BaseMessage):
210
209
  kwargs["tool_calls"] = content_tool_calls
211
210
 
212
211
  super().__init__(
213
- content=cast("Union[str, list[Union[str, dict]]]", content_blocks),
212
+ content=cast("str | list[str | dict]", content_blocks),
214
213
  **kwargs,
215
214
  )
216
215
  else:
@@ -345,7 +344,7 @@ class AIMessage(BaseMessage):
345
344
  base = super().pretty_repr(html=html)
346
345
  lines = []
347
346
 
348
- def _format_tool_args(tc: Union[ToolCall, InvalidToolCall]) -> list[str]:
347
+ def _format_tool_args(tc: ToolCall | InvalidToolCall) -> list[str]:
349
348
  lines = [
350
349
  f" {tc.get('name', 'Tool')} ({tc.get('id')})",
351
350
  f" Call ID: {tc.get('id')}",
@@ -388,7 +387,7 @@ class AIMessageChunk(AIMessage, BaseMessageChunk):
388
387
  tool_call_chunks: list[ToolCallChunk] = []
389
388
  """If provided, tool call chunks associated with the message."""
390
389
 
391
- chunk_position: Optional[Literal["last"]] = None
390
+ chunk_position: Literal["last"] | None = None
392
391
  """Optional span represented by an aggregated AIMessageChunk.
393
392
 
394
393
  If a chunk with ``chunk_position="last"`` is aggregated into a stream,
@@ -633,7 +632,7 @@ def add_ai_message_chunks(
633
632
 
634
633
  # Token usage
635
634
  if left.usage_metadata or any(o.usage_metadata is not None for o in others):
636
- usage_metadata: Optional[UsageMetadata] = left.usage_metadata
635
+ usage_metadata: UsageMetadata | None = left.usage_metadata
637
636
  for other in others:
638
637
  usage_metadata = add_usage(usage_metadata, other.usage_metadata)
639
638
  else:
@@ -663,7 +662,7 @@ def add_ai_message_chunks(
663
662
  chunk_id = id_
664
663
  break
665
664
 
666
- chunk_position: Optional[Literal["last"]] = (
665
+ chunk_position: Literal["last"] | None = (
667
666
  "last" if any(x.chunk_position == "last" for x in [left, *others]) else None
668
667
  )
669
668
 
@@ -678,9 +677,7 @@ def add_ai_message_chunks(
678
677
  )
679
678
 
680
679
 
681
- def add_usage(
682
- left: Optional[UsageMetadata], right: Optional[UsageMetadata]
683
- ) -> UsageMetadata:
680
+ def add_usage(left: UsageMetadata | None, right: UsageMetadata | None) -> UsageMetadata:
684
681
  """Recursively add two UsageMetadata objects.
685
682
 
686
683
  Example:
@@ -741,7 +738,7 @@ def add_usage(
741
738
 
742
739
 
743
740
  def subtract_usage(
744
- left: Optional[UsageMetadata], right: Optional[UsageMetadata]
741
+ left: UsageMetadata | None, right: UsageMetadata | None
745
742
  ) -> UsageMetadata:
746
743
  """Recursively subtract two ``UsageMetadata`` objects.
747
744
 
@@ -2,7 +2,7 @@
2
2
 
3
3
  from __future__ import annotations
4
4
 
5
- from typing import TYPE_CHECKING, Any, Optional, Union, cast, overload
5
+ from typing import TYPE_CHECKING, Any, cast, overload
6
6
 
7
7
  from pydantic import ConfigDict, Field
8
8
  from typing_extensions import Self
@@ -22,7 +22,7 @@ if TYPE_CHECKING:
22
22
 
23
23
  def _extract_reasoning_from_additional_kwargs(
24
24
  message: BaseMessage,
25
- ) -> Optional[types.ReasoningContentBlock]:
25
+ ) -> types.ReasoningContentBlock | None:
26
26
  """Extract `reasoning_content` from `additional_kwargs`.
27
27
 
28
28
  Handles reasoning content stored in various formats:
@@ -34,13 +34,11 @@ def _extract_reasoning_from_additional_kwargs(
34
34
  Returns:
35
35
  A `ReasoningContentBlock` if reasoning content is found, None otherwise.
36
36
  """
37
- from langchain_core.messages.content import create_reasoning_block # noqa: PLC0415
38
-
39
37
  additional_kwargs = getattr(message, "additional_kwargs", {})
40
38
 
41
39
  reasoning_content = additional_kwargs.get("reasoning_content")
42
40
  if reasoning_content is not None and isinstance(reasoning_content, str):
43
- return create_reasoning_block(reasoning=reasoning_content)
41
+ return {"type": "reasoning", "reasoning": reasoning_content}
44
42
 
45
43
  return None
46
44
 
@@ -72,9 +70,9 @@ class TextAccessor(str):
72
70
  This method exists solely to support legacy code that calls ``.text()``
73
71
  as a method. New code should use property access (``.text``) instead.
74
72
 
75
- .. deprecated:: 1.0.0
76
- Calling ``.text()`` as a method is deprecated. Use ``.text`` as a property
77
- instead. This method will be removed in 2.0.0.
73
+ !!! deprecated
74
+ As of `langchain-core` 1.0.0, calling ``.text()`` as a method is deprecated.
75
+ Use ``.text`` as a property instead. This method will be removed in 2.0.0.
78
76
 
79
77
  Returns:
80
78
  The string content, identical to property access.
@@ -94,10 +92,10 @@ class TextAccessor(str):
94
92
  class BaseMessage(Serializable):
95
93
  """Base abstract message class.
96
94
 
97
- Messages are the inputs and outputs of ``ChatModel``s.
95
+ Messages are the inputs and outputs of a ``ChatModel``.
98
96
  """
99
97
 
100
- content: Union[str, list[Union[str, dict]]]
98
+ content: str | list[str | dict]
101
99
  """The string contents of the message."""
102
100
 
103
101
  additional_kwargs: dict = Field(default_factory=dict)
@@ -119,7 +117,7 @@ class BaseMessage(Serializable):
119
117
 
120
118
  """
121
119
 
122
- name: Optional[str] = None
120
+ name: str | None = None
123
121
  """An optional name for the message.
124
122
 
125
123
  This can be used to provide a human-readable name for the message.
@@ -129,7 +127,7 @@ class BaseMessage(Serializable):
129
127
 
130
128
  """
131
129
 
132
- id: Optional[str] = Field(default=None, coerce_numbers_to_str=True)
130
+ id: str | None = Field(default=None, coerce_numbers_to_str=True)
133
131
  """An optional unique identifier for the message.
134
132
 
135
133
  This should ideally be provided by the provider/model which created the message.
@@ -143,22 +141,22 @@ class BaseMessage(Serializable):
143
141
  @overload
144
142
  def __init__(
145
143
  self,
146
- content: Union[str, list[Union[str, dict]]],
144
+ content: str | list[str | dict],
147
145
  **kwargs: Any,
148
146
  ) -> None: ...
149
147
 
150
148
  @overload
151
149
  def __init__(
152
150
  self,
153
- content: Optional[Union[str, list[Union[str, dict]]]] = None,
154
- content_blocks: Optional[list[types.ContentBlock]] = None,
151
+ content: str | list[str | dict] | None = None,
152
+ content_blocks: list[types.ContentBlock] | None = None,
155
153
  **kwargs: Any,
156
154
  ) -> None: ...
157
155
 
158
156
  def __init__(
159
157
  self,
160
- content: Optional[Union[str, list[Union[str, dict]]]] = None,
161
- content_blocks: Optional[list[types.ContentBlock]] = None,
158
+ content: str | list[str | dict] | None = None,
159
+ content_blocks: list[types.ContentBlock] | None = None,
162
160
  **kwargs: Any,
163
161
  ) -> None:
164
162
  """Initialize ``BaseMessage``.
@@ -197,7 +195,7 @@ class BaseMessage(Serializable):
197
195
  def content_blocks(self) -> list[types.ContentBlock]:
198
196
  r"""Load content blocks from the message content.
199
197
 
200
- .. versionadded:: 1.0.0
198
+ !!! version-added "Added in version 1.0.0"
201
199
 
202
200
  """
203
201
  # Needed here to avoid circular import, as these classes import BaseMessages
@@ -208,6 +206,9 @@ class BaseMessage(Serializable):
208
206
  from langchain_core.messages.block_translators.bedrock_converse import ( # noqa: PLC0415
209
207
  _convert_to_v1_from_converse_input,
210
208
  )
209
+ from langchain_core.messages.block_translators.google_genai import ( # noqa: PLC0415
210
+ _convert_to_v1_from_genai_input,
211
+ )
211
212
  from langchain_core.messages.block_translators.langchain_v0 import ( # noqa: PLC0415
212
213
  _convert_v0_multimodal_input_to_v1,
213
214
  )
@@ -243,11 +244,12 @@ class BaseMessage(Serializable):
243
244
  blocks.append(cast("types.ContentBlock", item))
244
245
 
245
246
  # Subsequent passes: attempt to unpack non-standard blocks.
246
- # The block is left as non-standard if conversion fails.
247
+ # This is the last stop - if we can't parse it here, it is left as non-standard
247
248
  for parsing_step in [
248
249
  _convert_v0_multimodal_input_to_v1,
249
250
  _convert_to_v1_from_chat_completions_input,
250
251
  _convert_to_v1_from_anthropic_input,
252
+ _convert_to_v1_from_genai_input,
251
253
  _convert_to_v1_from_converse_input,
252
254
  ]:
253
255
  blocks = parsing_step(blocks)
@@ -259,9 +261,9 @@ class BaseMessage(Serializable):
259
261
 
260
262
  Can be used as both property (``message.text``) and method (``message.text()``).
261
263
 
262
- .. deprecated:: 1.0.0
263
- Calling ``.text()`` as a method is deprecated. Use ``.text`` as a property
264
- instead. This method will be removed in 2.0.0.
264
+ !!! deprecated
265
+ As of langchain-core 1.0.0, calling ``.text()`` as a method is deprecated.
266
+ Use ``.text`` as a property instead. This method will be removed in 2.0.0.
265
267
 
266
268
  Returns:
267
269
  The text content of the message.
@@ -323,9 +325,9 @@ class BaseMessage(Serializable):
323
325
 
324
326
 
325
327
  def merge_content(
326
- first_content: Union[str, list[Union[str, dict]]],
327
- *contents: Union[str, list[Union[str, dict]]],
328
- ) -> Union[str, list[Union[str, dict]]]:
328
+ first_content: str | list[str | dict],
329
+ *contents: str | list[str | dict],
330
+ ) -> str | list[str | dict]:
329
331
  """Merge multiple message contents.
330
332
 
331
333
  Args:
@@ -336,7 +338,7 @@ def merge_content(
336
338
  The merged content.
337
339
 
338
340
  """
339
- merged: Union[str, list[Union[str, dict]]]
341
+ merged: str | list[str | dict]
340
342
  merged = "" if first_content is None else first_content
341
343
 
342
344
  for content in contents:
@@ -12,7 +12,8 @@ the implementation in ``BaseMessage``.
12
12
 
13
13
  from __future__ import annotations
14
14
 
15
- from typing import TYPE_CHECKING, Callable
15
+ from collections.abc import Callable
16
+ from typing import TYPE_CHECKING
16
17
 
17
18
  if TYPE_CHECKING:
18
19
  from langchain_core.messages import AIMessage, AIMessageChunk
@@ -20,6 +21,18 @@ if TYPE_CHECKING:
20
21
 
21
22
  # Provider to translator mapping
22
23
  PROVIDER_TRANSLATORS: dict[str, dict[str, Callable[..., list[types.ContentBlock]]]] = {}
24
+ """Map model provider names to translator functions.
25
+
26
+ The dictionary maps provider names (e.g. ``'openai'``, ``'anthropic'``) to another
27
+ dictionary with two keys:
28
+ - ``'translate_content'``: Function to translate ``AIMessage`` content.
29
+ - ``'translate_content_chunk'``: Function to translate ``AIMessageChunk`` content.
30
+
31
+ When calling `.content_blocks` on an ``AIMessage`` or ``AIMessageChunk``, if
32
+ ``model_provider`` is set in ``response_metadata``, the corresponding translator
33
+ functions will be used to parse the content into blocks. Otherwise, best-effort parsing
34
+ in ``BaseMessage`` will be used.
35
+ """
23
36
 
24
37
 
25
38
  def register_translator(
@@ -27,7 +40,7 @@ def register_translator(
27
40
  translate_content: Callable[[AIMessage], list[types.ContentBlock]],
28
41
  translate_content_chunk: Callable[[AIMessageChunk], list[types.ContentBlock]],
29
42
  ) -> None:
30
- """Register content translators for a provider.
43
+ """Register content translators for a provider in `PROVIDER_TRANSLATORS`.
31
44
 
32
45
  Args:
33
46
  provider: The model provider name (e.g. ``'openai'``, ``'anthropic'``).
@@ -50,7 +63,8 @@ def get_translator(
50
63
 
51
64
  Returns:
52
65
  Dictionary with ``'translate_content'`` and ``'translate_content_chunk'``
53
- functions, or None if no translator is registered for the provider.
66
+ functions, or None if no translator is registered for the provider. In such
67
+ case, best-effort parsing in ``BaseMessage`` will be used.
54
68
  """
55
69
  return PROVIDER_TRANSLATORS.get(provider)
56
70
 
@@ -81,9 +95,6 @@ def _register_translators() -> None:
81
95
  from langchain_core.messages.block_translators.groq import ( # noqa: PLC0415
82
96
  _register_groq_translator,
83
97
  )
84
- from langchain_core.messages.block_translators.ollama import ( # noqa: PLC0415
85
- _register_ollama_translator,
86
- )
87
98
  from langchain_core.messages.block_translators.openai import ( # noqa: PLC0415
88
99
  _register_openai_translator,
89
100
  )
@@ -94,7 +105,6 @@ def _register_translators() -> None:
94
105
  _register_google_genai_translator()
95
106
  _register_google_vertexai_translator()
96
107
  _register_groq_translator()
97
- _register_ollama_translator()
98
108
  _register_openai_translator()
99
109
 
100
110
 
@@ -2,7 +2,7 @@
2
2
 
3
3
  import json
4
4
  from collections.abc import Iterable
5
- from typing import Any, Optional, Union, cast
5
+ from typing import Any, cast
6
6
 
7
7
  from langchain_core.messages import AIMessage, AIMessageChunk
8
8
  from langchain_core.messages import content as types
@@ -200,7 +200,7 @@ def _convert_citation_to_v1(citation: dict[str, Any]) -> types.Annotation:
200
200
  def _convert_to_v1_from_anthropic(message: AIMessage) -> list[types.ContentBlock]:
201
201
  """Convert Anthropic message content to v1 format."""
202
202
  if isinstance(message.content, str):
203
- content: list[Union[str, dict]] = [{"type": "text", "text": message.content}]
203
+ content: list[str | dict] = [{"type": "text", "text": message.content}]
204
204
  else:
205
205
  content = message.content
206
206
 
@@ -252,7 +252,7 @@ def _convert_to_v1_from_anthropic(message: AIMessage) -> list[types.ContentBlock
252
252
  tool_call_chunk["type"] = "tool_call_chunk"
253
253
  yield tool_call_chunk
254
254
  else:
255
- tool_call_block: Optional[types.ToolCall] = None
255
+ tool_call_block: types.ToolCall | None = None
256
256
  # Non-streaming or gathered chunk
257
257
  if len(message.tool_calls) == 1:
258
258
  tool_call_block = {
@@ -2,7 +2,7 @@
2
2
 
3
3
  import base64
4
4
  from collections.abc import Iterable
5
- from typing import Any, Optional, cast
5
+ from typing import Any, cast
6
6
 
7
7
  from langchain_core.messages import AIMessage, AIMessageChunk
8
8
  from langchain_core.messages import content as types
@@ -216,7 +216,7 @@ def _convert_to_v1_from_converse(message: AIMessage) -> list[types.ContentBlock]
216
216
  tool_call_chunk["type"] = "tool_call_chunk"
217
217
  yield tool_call_chunk
218
218
  else:
219
- tool_call_block: Optional[types.ToolCall] = None
219
+ tool_call_block: types.ToolCall | None = None
220
220
  # Non-streaming or gathered chunk
221
221
  if len(message.tool_calls) == 1:
222
222
  tool_call_block = {