langchain-core 1.0.0a8__py3-none-any.whl → 1.0.0rc2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain-core might be problematic. Click here for more details.

Files changed (142) hide show
  1. langchain_core/__init__.py +1 -1
  2. langchain_core/_api/__init__.py +0 -1
  3. langchain_core/_api/beta_decorator.py +17 -20
  4. langchain_core/_api/deprecation.py +30 -35
  5. langchain_core/_import_utils.py +1 -1
  6. langchain_core/agents.py +10 -9
  7. langchain_core/caches.py +46 -56
  8. langchain_core/callbacks/__init__.py +1 -8
  9. langchain_core/callbacks/base.py +232 -243
  10. langchain_core/callbacks/file.py +33 -33
  11. langchain_core/callbacks/manager.py +353 -416
  12. langchain_core/callbacks/stdout.py +21 -22
  13. langchain_core/callbacks/streaming_stdout.py +32 -32
  14. langchain_core/callbacks/usage.py +54 -51
  15. langchain_core/chat_history.py +43 -58
  16. langchain_core/document_loaders/base.py +21 -21
  17. langchain_core/document_loaders/langsmith.py +22 -22
  18. langchain_core/documents/__init__.py +0 -1
  19. langchain_core/documents/base.py +46 -49
  20. langchain_core/documents/transformers.py +28 -29
  21. langchain_core/embeddings/fake.py +50 -54
  22. langchain_core/example_selectors/semantic_similarity.py +4 -6
  23. langchain_core/exceptions.py +7 -8
  24. langchain_core/indexing/api.py +19 -25
  25. langchain_core/indexing/base.py +24 -24
  26. langchain_core/language_models/__init__.py +11 -27
  27. langchain_core/language_models/_utils.py +53 -54
  28. langchain_core/language_models/base.py +30 -24
  29. langchain_core/language_models/chat_models.py +123 -148
  30. langchain_core/language_models/fake_chat_models.py +7 -7
  31. langchain_core/language_models/llms.py +14 -16
  32. langchain_core/load/dump.py +3 -4
  33. langchain_core/load/load.py +7 -16
  34. langchain_core/load/serializable.py +37 -36
  35. langchain_core/messages/__init__.py +1 -16
  36. langchain_core/messages/ai.py +122 -123
  37. langchain_core/messages/base.py +31 -31
  38. langchain_core/messages/block_translators/__init__.py +17 -17
  39. langchain_core/messages/block_translators/anthropic.py +3 -3
  40. langchain_core/messages/block_translators/bedrock_converse.py +3 -3
  41. langchain_core/messages/block_translators/google_genai.py +5 -4
  42. langchain_core/messages/block_translators/google_vertexai.py +4 -32
  43. langchain_core/messages/block_translators/groq.py +117 -21
  44. langchain_core/messages/block_translators/langchain_v0.py +3 -3
  45. langchain_core/messages/block_translators/openai.py +5 -5
  46. langchain_core/messages/chat.py +2 -6
  47. langchain_core/messages/content.py +222 -209
  48. langchain_core/messages/function.py +6 -10
  49. langchain_core/messages/human.py +17 -24
  50. langchain_core/messages/modifier.py +2 -2
  51. langchain_core/messages/system.py +12 -22
  52. langchain_core/messages/tool.py +53 -69
  53. langchain_core/messages/utils.py +399 -417
  54. langchain_core/output_parsers/__init__.py +1 -14
  55. langchain_core/output_parsers/base.py +46 -47
  56. langchain_core/output_parsers/json.py +3 -4
  57. langchain_core/output_parsers/list.py +2 -2
  58. langchain_core/output_parsers/openai_functions.py +46 -44
  59. langchain_core/output_parsers/openai_tools.py +11 -16
  60. langchain_core/output_parsers/pydantic.py +10 -11
  61. langchain_core/output_parsers/string.py +2 -2
  62. langchain_core/output_parsers/transform.py +2 -2
  63. langchain_core/output_parsers/xml.py +1 -1
  64. langchain_core/outputs/__init__.py +1 -1
  65. langchain_core/outputs/chat_generation.py +14 -14
  66. langchain_core/outputs/generation.py +6 -6
  67. langchain_core/outputs/llm_result.py +5 -5
  68. langchain_core/prompt_values.py +11 -11
  69. langchain_core/prompts/__init__.py +3 -23
  70. langchain_core/prompts/base.py +33 -38
  71. langchain_core/prompts/chat.py +222 -229
  72. langchain_core/prompts/dict.py +3 -3
  73. langchain_core/prompts/few_shot.py +76 -83
  74. langchain_core/prompts/few_shot_with_templates.py +7 -9
  75. langchain_core/prompts/image.py +12 -14
  76. langchain_core/prompts/loading.py +1 -1
  77. langchain_core/prompts/message.py +3 -3
  78. langchain_core/prompts/prompt.py +20 -23
  79. langchain_core/prompts/string.py +20 -8
  80. langchain_core/prompts/structured.py +26 -27
  81. langchain_core/rate_limiters.py +50 -58
  82. langchain_core/retrievers.py +41 -182
  83. langchain_core/runnables/base.py +565 -597
  84. langchain_core/runnables/branch.py +8 -8
  85. langchain_core/runnables/config.py +37 -44
  86. langchain_core/runnables/configurable.py +9 -10
  87. langchain_core/runnables/fallbacks.py +9 -9
  88. langchain_core/runnables/graph.py +46 -50
  89. langchain_core/runnables/graph_ascii.py +19 -18
  90. langchain_core/runnables/graph_mermaid.py +20 -31
  91. langchain_core/runnables/graph_png.py +7 -7
  92. langchain_core/runnables/history.py +22 -22
  93. langchain_core/runnables/passthrough.py +11 -11
  94. langchain_core/runnables/retry.py +3 -3
  95. langchain_core/runnables/router.py +2 -2
  96. langchain_core/runnables/schema.py +33 -33
  97. langchain_core/runnables/utils.py +30 -34
  98. langchain_core/stores.py +72 -102
  99. langchain_core/sys_info.py +27 -29
  100. langchain_core/tools/__init__.py +1 -14
  101. langchain_core/tools/base.py +70 -71
  102. langchain_core/tools/convert.py +100 -104
  103. langchain_core/tools/render.py +9 -9
  104. langchain_core/tools/retriever.py +7 -7
  105. langchain_core/tools/simple.py +6 -7
  106. langchain_core/tools/structured.py +18 -24
  107. langchain_core/tracers/__init__.py +1 -9
  108. langchain_core/tracers/base.py +35 -35
  109. langchain_core/tracers/context.py +12 -17
  110. langchain_core/tracers/event_stream.py +3 -3
  111. langchain_core/tracers/langchain.py +8 -8
  112. langchain_core/tracers/log_stream.py +17 -18
  113. langchain_core/tracers/memory_stream.py +3 -3
  114. langchain_core/tracers/root_listeners.py +2 -2
  115. langchain_core/tracers/schemas.py +0 -129
  116. langchain_core/tracers/stdout.py +1 -2
  117. langchain_core/utils/__init__.py +1 -1
  118. langchain_core/utils/aiter.py +32 -32
  119. langchain_core/utils/env.py +5 -5
  120. langchain_core/utils/function_calling.py +59 -154
  121. langchain_core/utils/html.py +4 -4
  122. langchain_core/utils/input.py +3 -3
  123. langchain_core/utils/interactive_env.py +1 -1
  124. langchain_core/utils/iter.py +20 -20
  125. langchain_core/utils/json.py +1 -1
  126. langchain_core/utils/json_schema.py +2 -2
  127. langchain_core/utils/mustache.py +5 -5
  128. langchain_core/utils/pydantic.py +17 -17
  129. langchain_core/utils/strings.py +5 -5
  130. langchain_core/utils/utils.py +25 -28
  131. langchain_core/vectorstores/base.py +55 -87
  132. langchain_core/vectorstores/in_memory.py +83 -85
  133. langchain_core/vectorstores/utils.py +2 -2
  134. langchain_core/version.py +1 -1
  135. {langchain_core-1.0.0a8.dist-info → langchain_core-1.0.0rc2.dist-info}/METADATA +23 -11
  136. langchain_core-1.0.0rc2.dist-info/RECORD +172 -0
  137. langchain_core/memory.py +0 -120
  138. langchain_core/pydantic_v1/__init__.py +0 -30
  139. langchain_core/pydantic_v1/dataclasses.py +0 -23
  140. langchain_core/pydantic_v1/main.py +0 -23
  141. langchain_core-1.0.0a8.dist-info/RECORD +0 -176
  142. {langchain_core-1.0.0a8.dist-info → langchain_core-1.0.0rc2.dist-info}/WHEEL +0 -0
@@ -40,13 +40,13 @@ class InputTokenDetails(TypedDict, total=False):
40
40
  Does *not* need to sum to full input token count. Does *not* need to have all keys.
41
41
 
42
42
  Example:
43
- .. code-block:: python
44
-
45
- {
46
- "audio": 10,
47
- "cache_creation": 200,
48
- "cache_read": 100,
49
- }
43
+ ```python
44
+ {
45
+ "audio": 10,
46
+ "cache_creation": 200,
47
+ "cache_read": 100,
48
+ }
49
+ ```
50
50
 
51
51
  !!! version-added "Added in version 0.3.9"
52
52
 
@@ -76,12 +76,12 @@ class OutputTokenDetails(TypedDict, total=False):
76
76
  Does *not* need to sum to full output token count. Does *not* need to have all keys.
77
77
 
78
78
  Example:
79
- .. code-block:: python
80
-
81
- {
82
- "audio": 10,
83
- "reasoning": 200,
84
- }
79
+ ```python
80
+ {
81
+ "audio": 10,
82
+ "reasoning": 200,
83
+ }
84
+ ```
85
85
 
86
86
  !!! version-added "Added in version 0.3.9"
87
87
 
@@ -104,25 +104,25 @@ class UsageMetadata(TypedDict):
104
104
  This is a standard representation of token usage that is consistent across models.
105
105
 
106
106
  Example:
107
- .. code-block:: python
108
-
109
- {
110
- "input_tokens": 350,
111
- "output_tokens": 240,
112
- "total_tokens": 590,
113
- "input_token_details": {
114
- "audio": 10,
115
- "cache_creation": 200,
116
- "cache_read": 100,
117
- },
118
- "output_token_details": {
119
- "audio": 10,
120
- "reasoning": 200,
121
- },
122
- }
107
+ ```python
108
+ {
109
+ "input_tokens": 350,
110
+ "output_tokens": 240,
111
+ "total_tokens": 590,
112
+ "input_token_details": {
113
+ "audio": 10,
114
+ "cache_creation": 200,
115
+ "cache_read": 100,
116
+ },
117
+ "output_token_details": {
118
+ "audio": 10,
119
+ "reasoning": 200,
120
+ },
121
+ }
122
+ ```
123
123
 
124
124
  !!! warning "Behavior changed in 0.3.9"
125
- Added ``input_token_details`` and ``output_token_details``.
125
+ Added `input_token_details` and `output_token_details`.
126
126
 
127
127
  """
128
128
 
@@ -148,27 +148,26 @@ class UsageMetadata(TypedDict):
148
148
  class AIMessage(BaseMessage):
149
149
  """Message from an AI.
150
150
 
151
- AIMessage is returned from a chat model as a response to a prompt.
151
+ An `AIMessage` is returned from a chat model as a response to a prompt.
152
152
 
153
153
  This message represents the output of the model and consists of both
154
- the raw output as returned by the model together standardized fields
154
+ the raw output as returned by the model and standardized fields
155
155
  (e.g., tool calls, usage metadata) added by the LangChain framework.
156
156
 
157
157
  """
158
158
 
159
159
  tool_calls: list[ToolCall] = []
160
- """If provided, tool calls associated with the message."""
160
+ """If present, tool calls associated with the message."""
161
161
  invalid_tool_calls: list[InvalidToolCall] = []
162
- """If provided, tool calls with parsing errors associated with the message."""
162
+ """If present, tool calls with parsing errors associated with the message."""
163
163
  usage_metadata: UsageMetadata | None = None
164
- """If provided, usage metadata for a message, such as token counts.
164
+ """If present, usage metadata for a message, such as token counts.
165
165
 
166
166
  This is a standard representation of token usage that is consistent across models.
167
-
168
167
  """
169
168
 
170
169
  type: Literal["ai"] = "ai"
171
- """The type of the message (used for deserialization). Defaults to "ai"."""
170
+ """The type of the message (used for deserialization)."""
172
171
 
173
172
  @overload
174
173
  def __init__(
@@ -191,14 +190,14 @@ class AIMessage(BaseMessage):
191
190
  content_blocks: list[types.ContentBlock] | None = None,
192
191
  **kwargs: Any,
193
192
  ) -> None:
194
- """Initialize ``AIMessage``.
193
+ """Initialize an `AIMessage`.
195
194
 
196
- Specify ``content`` as positional arg or ``content_blocks`` for typing.
195
+ Specify `content` as positional arg or `content_blocks` for typing.
197
196
 
198
197
  Args:
199
198
  content: The content of the message.
200
199
  content_blocks: Typed standard content.
201
- kwargs: Additional arguments to pass to the parent class.
200
+ **kwargs: Additional arguments to pass to the parent class.
202
201
  """
203
202
  if content_blocks is not None:
204
203
  # If there are tool calls in content_blocks, but not in tool_calls, add them
@@ -217,7 +216,11 @@ class AIMessage(BaseMessage):
217
216
 
218
217
  @property
219
218
  def lc_attributes(self) -> dict:
220
- """Attrs to be serialized even if they are derived from other init args."""
219
+ """Attributes to be serialized.
220
+
221
+ Includes all attributes, even if they are derived from other initialization
222
+ arguments.
223
+ """
221
224
  return {
222
225
  "tool_calls": self.tool_calls,
223
226
  "invalid_tool_calls": self.invalid_tool_calls,
@@ -225,11 +228,11 @@ class AIMessage(BaseMessage):
225
228
 
226
229
  @property
227
230
  def content_blocks(self) -> list[types.ContentBlock]:
228
- """Return content blocks of the message.
231
+ """Return standard, typed `ContentBlock` dicts from the message.
229
232
 
230
233
  If the message has a known model provider, use the provider-specific translator
231
234
  first before falling back to best-effort parsing. For details, see the property
232
- on ``BaseMessage``.
235
+ on `BaseMessage`.
233
236
  """
234
237
  if self.response_metadata.get("output_version") == "v1":
235
238
  return cast("list[types.ContentBlock]", self.content)
@@ -331,11 +334,10 @@ class AIMessage(BaseMessage):
331
334
 
332
335
  @override
333
336
  def pretty_repr(self, html: bool = False) -> str:
334
- """Return a pretty representation of the message.
337
+ """Return a pretty representation of the message for display.
335
338
 
336
339
  Args:
337
340
  html: Whether to return an HTML-formatted string.
338
- Defaults to False.
339
341
 
340
342
  Returns:
341
343
  A pretty representation of the message.
@@ -372,31 +374,27 @@ class AIMessage(BaseMessage):
372
374
 
373
375
 
374
376
  class AIMessageChunk(AIMessage, BaseMessageChunk):
375
- """Message chunk from an AI."""
377
+ """Message chunk from an AI (yielded when streaming)."""
376
378
 
377
379
  # Ignoring mypy re-assignment here since we're overriding the value
378
380
  # to make sure that the chunk variant can be discriminated from the
379
381
  # non-chunk variant.
380
382
  type: Literal["AIMessageChunk"] = "AIMessageChunk" # type: ignore[assignment]
381
- """The type of the message (used for deserialization).
382
-
383
- Defaults to ``AIMessageChunk``.
384
-
385
- """
383
+ """The type of the message (used for deserialization)."""
386
384
 
387
385
  tool_call_chunks: list[ToolCallChunk] = []
388
386
  """If provided, tool call chunks associated with the message."""
389
387
 
390
388
  chunk_position: Literal["last"] | None = None
391
- """Optional span represented by an aggregated AIMessageChunk.
389
+ """Optional span represented by an aggregated `AIMessageChunk`.
392
390
 
393
- If a chunk with ``chunk_position="last"`` is aggregated into a stream,
394
- ``tool_call_chunks`` in message content will be parsed into ``tool_calls``.
391
+ If a chunk with `chunk_position="last"` is aggregated into a stream,
392
+ `tool_call_chunks` in message content will be parsed into `tool_calls`.
395
393
  """
396
394
 
397
395
  @property
398
396
  def lc_attributes(self) -> dict:
399
- """Attrs to be serialized even if they are derived from other init args."""
397
+ """Attributes to be serialized, even if they are derived from other initialization args.""" # noqa: E501
400
398
  return {
401
399
  "tool_calls": self.tool_calls,
402
400
  "invalid_tool_calls": self.invalid_tool_calls,
@@ -404,7 +402,7 @@ class AIMessageChunk(AIMessage, BaseMessageChunk):
404
402
 
405
403
  @property
406
404
  def content_blocks(self) -> list[types.ContentBlock]:
407
- """Return content blocks of the message."""
405
+ """Return standard, typed `ContentBlock` dicts from the message."""
408
406
  if self.response_metadata.get("output_version") == "v1":
409
407
  return cast("list[types.ContentBlock]", self.content)
410
408
 
@@ -545,12 +543,15 @@ class AIMessageChunk(AIMessage, BaseMessageChunk):
545
543
  and call_id in id_to_tc
546
544
  ):
547
545
  self.content[idx] = cast("dict[str, Any]", id_to_tc[call_id])
546
+ if "extras" in block:
547
+ # mypy does not account for instance check for dict above
548
+ self.content[idx]["extras"] = block["extras"] # type: ignore[index]
548
549
 
549
550
  return self
550
551
 
551
552
  @model_validator(mode="after")
552
553
  def init_server_tool_calls(self) -> Self:
553
- """Parse server_tool_call_chunks."""
554
+ """Parse `server_tool_call_chunks`."""
554
555
  if (
555
556
  self.chunk_position == "last"
556
557
  and self.response_metadata.get("output_version") == "v1"
@@ -596,14 +597,14 @@ class AIMessageChunk(AIMessage, BaseMessageChunk):
596
597
  def add_ai_message_chunks(
597
598
  left: AIMessageChunk, *others: AIMessageChunk
598
599
  ) -> AIMessageChunk:
599
- """Add multiple ``AIMessageChunk``s together.
600
+ """Add multiple `AIMessageChunk`s together.
600
601
 
601
602
  Args:
602
- left: The first ``AIMessageChunk``.
603
- *others: Other ``AIMessageChunk``s to add.
603
+ left: The first `AIMessageChunk`.
604
+ *others: Other `AIMessageChunk`s to add.
604
605
 
605
606
  Returns:
606
- The resulting ``AIMessageChunk``.
607
+ The resulting `AIMessageChunk`.
607
608
 
608
609
  """
609
610
  content = merge_content(left.content, *(o.content for o in others))
@@ -681,43 +682,42 @@ def add_usage(left: UsageMetadata | None, right: UsageMetadata | None) -> UsageM
681
682
  """Recursively add two UsageMetadata objects.
682
683
 
683
684
  Example:
684
- .. code-block:: python
685
-
686
- from langchain_core.messages.ai import add_usage
687
-
688
- left = UsageMetadata(
689
- input_tokens=5,
690
- output_tokens=0,
691
- total_tokens=5,
692
- input_token_details=InputTokenDetails(cache_read=3),
693
- )
694
- right = UsageMetadata(
695
- input_tokens=0,
696
- output_tokens=10,
697
- total_tokens=10,
698
- output_token_details=OutputTokenDetails(reasoning=4),
699
- )
685
+ ```python
686
+ from langchain_core.messages.ai import add_usage
687
+
688
+ left = UsageMetadata(
689
+ input_tokens=5,
690
+ output_tokens=0,
691
+ total_tokens=5,
692
+ input_token_details=InputTokenDetails(cache_read=3),
693
+ )
694
+ right = UsageMetadata(
695
+ input_tokens=0,
696
+ output_tokens=10,
697
+ total_tokens=10,
698
+ output_token_details=OutputTokenDetails(reasoning=4),
699
+ )
700
700
 
701
- add_usage(left, right)
701
+ add_usage(left, right)
702
+ ```
702
703
 
703
704
  results in
704
705
 
705
- .. code-block:: python
706
-
707
- UsageMetadata(
708
- input_tokens=5,
709
- output_tokens=10,
710
- total_tokens=15,
711
- input_token_details=InputTokenDetails(cache_read=3),
712
- output_token_details=OutputTokenDetails(reasoning=4),
713
- )
714
-
706
+ ```python
707
+ UsageMetadata(
708
+ input_tokens=5,
709
+ output_tokens=10,
710
+ total_tokens=15,
711
+ input_token_details=InputTokenDetails(cache_read=3),
712
+ output_token_details=OutputTokenDetails(reasoning=4),
713
+ )
714
+ ```
715
715
  Args:
716
- left: The first ``UsageMetadata`` object.
717
- right: The second ``UsageMetadata`` object.
716
+ left: The first `UsageMetadata` object.
717
+ right: The second `UsageMetadata` object.
718
718
 
719
719
  Returns:
720
- The sum of the two ``UsageMetadata`` objects.
720
+ The sum of the two `UsageMetadata` objects.
721
721
 
722
722
  """
723
723
  if not (left or right):
@@ -740,48 +740,47 @@ def add_usage(left: UsageMetadata | None, right: UsageMetadata | None) -> UsageM
740
740
  def subtract_usage(
741
741
  left: UsageMetadata | None, right: UsageMetadata | None
742
742
  ) -> UsageMetadata:
743
- """Recursively subtract two ``UsageMetadata`` objects.
743
+ """Recursively subtract two `UsageMetadata` objects.
744
744
 
745
- Token counts cannot be negative so the actual operation is ``max(left - right, 0)``.
745
+ Token counts cannot be negative so the actual operation is `max(left - right, 0)`.
746
746
 
747
747
  Example:
748
- .. code-block:: python
749
-
750
- from langchain_core.messages.ai import subtract_usage
751
-
752
- left = UsageMetadata(
753
- input_tokens=5,
754
- output_tokens=10,
755
- total_tokens=15,
756
- input_token_details=InputTokenDetails(cache_read=4),
757
- )
758
- right = UsageMetadata(
759
- input_tokens=3,
760
- output_tokens=8,
761
- total_tokens=11,
762
- output_token_details=OutputTokenDetails(reasoning=4),
763
- )
748
+ ```python
749
+ from langchain_core.messages.ai import subtract_usage
750
+
751
+ left = UsageMetadata(
752
+ input_tokens=5,
753
+ output_tokens=10,
754
+ total_tokens=15,
755
+ input_token_details=InputTokenDetails(cache_read=4),
756
+ )
757
+ right = UsageMetadata(
758
+ input_tokens=3,
759
+ output_tokens=8,
760
+ total_tokens=11,
761
+ output_token_details=OutputTokenDetails(reasoning=4),
762
+ )
764
763
 
765
- subtract_usage(left, right)
764
+ subtract_usage(left, right)
765
+ ```
766
766
 
767
767
  results in
768
768
 
769
- .. code-block:: python
770
-
771
- UsageMetadata(
772
- input_tokens=2,
773
- output_tokens=2,
774
- total_tokens=4,
775
- input_token_details=InputTokenDetails(cache_read=4),
776
- output_token_details=OutputTokenDetails(reasoning=0),
777
- )
778
-
769
+ ```python
770
+ UsageMetadata(
771
+ input_tokens=2,
772
+ output_tokens=2,
773
+ total_tokens=4,
774
+ input_token_details=InputTokenDetails(cache_read=4),
775
+ output_token_details=OutputTokenDetails(reasoning=0),
776
+ )
777
+ ```
779
778
  Args:
780
- left: The first ``UsageMetadata`` object.
781
- right: The second ``UsageMetadata`` object.
779
+ left: The first `UsageMetadata` object.
780
+ right: The second `UsageMetadata` object.
782
781
 
783
782
  Returns:
784
- The resulting ``UsageMetadata`` after subtraction.
783
+ The resulting `UsageMetadata` after subtraction.
785
784
 
786
785
  """
787
786
  if not (left or right):
@@ -48,13 +48,13 @@ class TextAccessor(str):
48
48
 
49
49
  Exists to maintain backward compatibility while transitioning from method-based to
50
50
  property-based text access in message objects. In LangChain <v1.0, message text was
51
- accessed via ``.text()`` method calls. In v1.0=<, the preferred pattern is property
52
- access via ``.text``.
51
+ accessed via `.text()` method calls. In v1.0=<, the preferred pattern is property
52
+ access via `.text`.
53
53
 
54
- Rather than breaking existing code immediately, ``TextAccessor`` allows both
54
+ Rather than breaking existing code immediately, `TextAccessor` allows both
55
55
  patterns:
56
- - Modern property access: ``message.text`` (returns string directly)
57
- - Legacy method access: ``message.text()`` (callable, emits deprecation warning)
56
+ - Modern property access: `message.text` (returns string directly)
57
+ - Legacy method access: `message.text()` (callable, emits deprecation warning)
58
58
 
59
59
  """
60
60
 
@@ -67,12 +67,12 @@ class TextAccessor(str):
67
67
  def __call__(self) -> str:
68
68
  """Enable method-style text access for backward compatibility.
69
69
 
70
- This method exists solely to support legacy code that calls ``.text()``
71
- as a method. New code should use property access (``.text``) instead.
70
+ This method exists solely to support legacy code that calls `.text()`
71
+ as a method. New code should use property access (`.text`) instead.
72
72
 
73
73
  !!! deprecated
74
- As of `langchain-core` 1.0.0, calling ``.text()`` as a method is deprecated.
75
- Use ``.text`` as a property instead. This method will be removed in 2.0.0.
74
+ As of `langchain-core` 1.0.0, calling `.text()` as a method is deprecated.
75
+ Use `.text` as a property instead. This method will be removed in 2.0.0.
76
76
 
77
77
  Returns:
78
78
  The string content, identical to property access.
@@ -92,11 +92,11 @@ class TextAccessor(str):
92
92
  class BaseMessage(Serializable):
93
93
  """Base abstract message class.
94
94
 
95
- Messages are the inputs and outputs of a ``ChatModel``.
95
+ Messages are the inputs and outputs of a chat model.
96
96
  """
97
97
 
98
98
  content: str | list[str | dict]
99
- """The string contents of the message."""
99
+ """The contents of the message."""
100
100
 
101
101
  additional_kwargs: dict = Field(default_factory=dict)
102
102
  """Reserved for additional payload data associated with the message.
@@ -159,14 +159,14 @@ class BaseMessage(Serializable):
159
159
  content_blocks: list[types.ContentBlock] | None = None,
160
160
  **kwargs: Any,
161
161
  ) -> None:
162
- """Initialize ``BaseMessage``.
162
+ """Initialize a `BaseMessage`.
163
163
 
164
- Specify ``content`` as positional arg or ``content_blocks`` for typing.
164
+ Specify `content` as positional arg or `content_blocks` for typing.
165
165
 
166
166
  Args:
167
- content: The string contents of the message.
167
+ content: The contents of the message.
168
168
  content_blocks: Typed standard content.
169
- kwargs: Additional arguments to pass to the parent class.
169
+ **kwargs: Additional arguments to pass to the parent class.
170
170
  """
171
171
  if content_blocks is not None:
172
172
  super().__init__(content=content_blocks, **kwargs)
@@ -175,7 +175,7 @@ class BaseMessage(Serializable):
175
175
 
176
176
  @classmethod
177
177
  def is_lc_serializable(cls) -> bool:
178
- """``BaseMessage`` is serializable.
178
+ """`BaseMessage` is serializable.
179
179
 
180
180
  Returns:
181
181
  True
@@ -184,10 +184,10 @@ class BaseMessage(Serializable):
184
184
 
185
185
  @classmethod
186
186
  def get_lc_namespace(cls) -> list[str]:
187
- """Get the namespace of the langchain object.
187
+ """Get the namespace of the LangChain object.
188
188
 
189
189
  Returns:
190
- ``["langchain", "schema", "messages"]``
190
+ `["langchain", "schema", "messages"]`
191
191
  """
192
192
  return ["langchain", "schema", "messages"]
193
193
 
@@ -259,11 +259,11 @@ class BaseMessage(Serializable):
259
259
  def text(self) -> TextAccessor:
260
260
  """Get the text content of the message as a string.
261
261
 
262
- Can be used as both property (``message.text``) and method (``message.text()``).
262
+ Can be used as both property (`message.text`) and method (`message.text()`).
263
263
 
264
264
  !!! deprecated
265
- As of langchain-core 1.0.0, calling ``.text()`` as a method is deprecated.
266
- Use ``.text`` as a property instead. This method will be removed in 2.0.0.
265
+ As of `langchain-core` 1.0.0, calling `.text()` as a method is deprecated.
266
+ Use `.text` as a property instead. This method will be removed in 2.0.0.
267
267
 
268
268
  Returns:
269
269
  The text content of the message.
@@ -306,8 +306,8 @@ class BaseMessage(Serializable):
306
306
  """Get a pretty representation of the message.
307
307
 
308
308
  Args:
309
- html: Whether to format the message as HTML. If True, the message will be
310
- formatted with HTML tags. Default is False.
309
+ html: Whether to format the message as HTML. If `True`, the message will be
310
+ formatted with HTML tags.
311
311
 
312
312
  Returns:
313
313
  A pretty representation of the message.
@@ -331,8 +331,8 @@ def merge_content(
331
331
  """Merge multiple message contents.
332
332
 
333
333
  Args:
334
- first_content: The first ``content``. Can be a string or a list.
335
- contents: The other ``content``s. Can be a string or a list.
334
+ first_content: The first `content`. Can be a string or a list.
335
+ contents: The other `content`s. Can be a string or a list.
336
336
 
337
337
  Returns:
338
338
  The merged content.
@@ -388,9 +388,9 @@ class BaseMessageChunk(BaseMessage):
388
388
 
389
389
  For example,
390
390
 
391
- ``AIMessageChunk(content="Hello") + AIMessageChunk(content=" World")``
391
+ `AIMessageChunk(content="Hello") + AIMessageChunk(content=" World")`
392
392
 
393
- will give ``AIMessageChunk(content="Hello World")``
393
+ will give `AIMessageChunk(content="Hello World")`
394
394
 
395
395
  """
396
396
  if isinstance(other, BaseMessageChunk):
@@ -439,8 +439,8 @@ def message_to_dict(message: BaseMessage) -> dict:
439
439
  message: Message to convert.
440
440
 
441
441
  Returns:
442
- Message as a dict. The dict will have a ``type`` key with the message type
443
- and a ``data`` key with the message data as a dict.
442
+ Message as a dict. The dict will have a `type` key with the message type
443
+ and a `data` key with the message data as a dict.
444
444
 
445
445
  """
446
446
  return {"type": message.type, "data": message.model_dump()}
@@ -450,7 +450,7 @@ def messages_to_dict(messages: Sequence[BaseMessage]) -> list[dict]:
450
450
  """Convert a sequence of Messages to a list of dictionaries.
451
451
 
452
452
  Args:
453
- messages: Sequence of messages (as ``BaseMessage``s) to convert.
453
+ messages: Sequence of messages (as `BaseMessage`s) to convert.
454
454
 
455
455
  Returns:
456
456
  List of messages as dicts.
@@ -464,7 +464,7 @@ def get_msg_title_repr(title: str, *, bold: bool = False) -> str:
464
464
 
465
465
  Args:
466
466
  title: The title.
467
- bold: Whether to bold the title. Default is False.
467
+ bold: Whether to bold the title.
468
468
 
469
469
  Returns:
470
470
  The title representation.
@@ -1,13 +1,13 @@
1
1
  """Derivations of standard content blocks from provider content.
2
2
 
3
- ``AIMessage`` will first attempt to use a provider-specific translator if
4
- ``model_provider`` is set in ``response_metadata`` on the message. Consequently, each
3
+ `AIMessage` will first attempt to use a provider-specific translator if
4
+ `model_provider` is set in `response_metadata` on the message. Consequently, each
5
5
  provider translator must handle all possible content response types from the provider,
6
6
  including text.
7
7
 
8
8
  If no provider is set, or if the provider does not have a registered translator,
9
- ``AIMessage`` will fall back to best-effort parsing of the content into blocks using
10
- the implementation in ``BaseMessage``.
9
+ `AIMessage` will fall back to best-effort parsing of the content into blocks using
10
+ the implementation in `BaseMessage`.
11
11
  """
12
12
 
13
13
  from __future__ import annotations
@@ -23,15 +23,15 @@ if TYPE_CHECKING:
23
23
  PROVIDER_TRANSLATORS: dict[str, dict[str, Callable[..., list[types.ContentBlock]]]] = {}
24
24
  """Map model provider names to translator functions.
25
25
 
26
- The dictionary maps provider names (e.g. ``'openai'``, ``'anthropic'``) to another
26
+ The dictionary maps provider names (e.g. `'openai'`, `'anthropic'`) to another
27
27
  dictionary with two keys:
28
- - ``'translate_content'``: Function to translate ``AIMessage`` content.
29
- - ``'translate_content_chunk'``: Function to translate ``AIMessageChunk`` content.
28
+ - `'translate_content'`: Function to translate `AIMessage` content.
29
+ - `'translate_content_chunk'`: Function to translate `AIMessageChunk` content.
30
30
 
31
- When calling `.content_blocks` on an ``AIMessage`` or ``AIMessageChunk``, if
32
- ``model_provider`` is set in ``response_metadata``, the corresponding translator
31
+ When calling `content_blocks` on an `AIMessage` or `AIMessageChunk`, if
32
+ `model_provider` is set in `response_metadata`, the corresponding translator
33
33
  functions will be used to parse the content into blocks. Otherwise, best-effort parsing
34
- in ``BaseMessage`` will be used.
34
+ in `BaseMessage` will be used.
35
35
  """
36
36
 
37
37
 
@@ -43,9 +43,9 @@ def register_translator(
43
43
  """Register content translators for a provider in `PROVIDER_TRANSLATORS`.
44
44
 
45
45
  Args:
46
- provider: The model provider name (e.g. ``'openai'``, ``'anthropic'``).
47
- translate_content: Function to translate ``AIMessage`` content.
48
- translate_content_chunk: Function to translate ``AIMessageChunk`` content.
46
+ provider: The model provider name (e.g. `'openai'`, `'anthropic'`).
47
+ translate_content: Function to translate `AIMessage` content.
48
+ translate_content_chunk: Function to translate `AIMessageChunk` content.
49
49
  """
50
50
  PROVIDER_TRANSLATORS[provider] = {
51
51
  "translate_content": translate_content,
@@ -62,9 +62,9 @@ def get_translator(
62
62
  provider: The model provider name.
63
63
 
64
64
  Returns:
65
- Dictionary with ``'translate_content'`` and ``'translate_content_chunk'``
65
+ Dictionary with `'translate_content'` and `'translate_content_chunk'`
66
66
  functions, or None if no translator is registered for the provider. In such
67
- case, best-effort parsing in ``BaseMessage`` will be used.
67
+ case, best-effort parsing in `BaseMessage` will be used.
68
68
  """
69
69
  return PROVIDER_TRANSLATORS.get(provider)
70
70
 
@@ -72,10 +72,10 @@ def get_translator(
72
72
  def _register_translators() -> None:
73
73
  """Register all translators in langchain-core.
74
74
 
75
- A unit test ensures all modules in ``block_translators`` are represented here.
75
+ A unit test ensures all modules in `block_translators` are represented here.
76
76
 
77
77
  For translators implemented outside langchain-core, they can be registered by
78
- calling ``register_translator`` from within the integration package.
78
+ calling `register_translator` from within the integration package.
79
79
  """
80
80
  from langchain_core.messages.block_translators.anthropic import ( # noqa: PLC0415
81
81
  _register_anthropic_translator,