langchain-core 1.0.0a2__py3-none-any.whl → 1.0.0a4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain-core might be problematic. Click here for more details.

Files changed (130) hide show
  1. langchain_core/_api/beta_decorator.py +17 -40
  2. langchain_core/_api/deprecation.py +20 -7
  3. langchain_core/_api/path.py +19 -2
  4. langchain_core/_import_utils.py +7 -0
  5. langchain_core/agents.py +10 -6
  6. langchain_core/callbacks/base.py +28 -15
  7. langchain_core/callbacks/manager.py +81 -69
  8. langchain_core/callbacks/usage.py +4 -2
  9. langchain_core/chat_history.py +29 -21
  10. langchain_core/document_loaders/base.py +34 -9
  11. langchain_core/document_loaders/langsmith.py +3 -0
  12. langchain_core/documents/base.py +35 -10
  13. langchain_core/documents/transformers.py +4 -2
  14. langchain_core/embeddings/fake.py +8 -5
  15. langchain_core/env.py +2 -3
  16. langchain_core/example_selectors/base.py +12 -0
  17. langchain_core/exceptions.py +7 -0
  18. langchain_core/globals.py +17 -28
  19. langchain_core/indexing/api.py +57 -45
  20. langchain_core/indexing/base.py +5 -8
  21. langchain_core/indexing/in_memory.py +23 -3
  22. langchain_core/language_models/__init__.py +6 -2
  23. langchain_core/language_models/_utils.py +27 -5
  24. langchain_core/language_models/base.py +33 -21
  25. langchain_core/language_models/chat_models.py +104 -31
  26. langchain_core/language_models/fake_chat_models.py +5 -7
  27. langchain_core/language_models/llms.py +54 -20
  28. langchain_core/load/dump.py +2 -3
  29. langchain_core/load/load.py +15 -1
  30. langchain_core/load/serializable.py +38 -43
  31. langchain_core/memory.py +7 -3
  32. langchain_core/messages/__init__.py +1 -1
  33. langchain_core/messages/ai.py +41 -34
  34. langchain_core/messages/base.py +20 -7
  35. langchain_core/messages/block_translators/__init__.py +10 -8
  36. langchain_core/messages/block_translators/anthropic.py +11 -7
  37. langchain_core/messages/block_translators/bedrock.py +76 -27
  38. langchain_core/messages/block_translators/bedrock_converse.py +259 -23
  39. langchain_core/messages/block_translators/google_genai.py +3 -1
  40. langchain_core/messages/block_translators/google_vertexai.py +3 -1
  41. langchain_core/messages/block_translators/groq.py +3 -1
  42. langchain_core/messages/block_translators/ollama.py +3 -1
  43. langchain_core/messages/block_translators/openai.py +50 -20
  44. langchain_core/messages/content.py +23 -13
  45. langchain_core/messages/human.py +2 -13
  46. langchain_core/messages/system.py +2 -6
  47. langchain_core/messages/tool.py +34 -14
  48. langchain_core/messages/utils.py +186 -73
  49. langchain_core/output_parsers/base.py +5 -2
  50. langchain_core/output_parsers/json.py +4 -4
  51. langchain_core/output_parsers/list.py +7 -22
  52. langchain_core/output_parsers/openai_functions.py +3 -0
  53. langchain_core/output_parsers/openai_tools.py +6 -1
  54. langchain_core/output_parsers/pydantic.py +4 -0
  55. langchain_core/output_parsers/string.py +5 -1
  56. langchain_core/output_parsers/xml.py +19 -19
  57. langchain_core/outputs/chat_generation.py +18 -7
  58. langchain_core/outputs/generation.py +14 -3
  59. langchain_core/outputs/llm_result.py +8 -1
  60. langchain_core/prompt_values.py +10 -4
  61. langchain_core/prompts/base.py +6 -11
  62. langchain_core/prompts/chat.py +88 -60
  63. langchain_core/prompts/dict.py +16 -8
  64. langchain_core/prompts/few_shot.py +9 -11
  65. langchain_core/prompts/few_shot_with_templates.py +5 -1
  66. langchain_core/prompts/image.py +12 -5
  67. langchain_core/prompts/loading.py +2 -2
  68. langchain_core/prompts/message.py +5 -6
  69. langchain_core/prompts/pipeline.py +13 -8
  70. langchain_core/prompts/prompt.py +22 -8
  71. langchain_core/prompts/string.py +18 -10
  72. langchain_core/prompts/structured.py +7 -2
  73. langchain_core/rate_limiters.py +2 -2
  74. langchain_core/retrievers.py +7 -6
  75. langchain_core/runnables/base.py +387 -246
  76. langchain_core/runnables/branch.py +11 -28
  77. langchain_core/runnables/config.py +20 -17
  78. langchain_core/runnables/configurable.py +34 -19
  79. langchain_core/runnables/fallbacks.py +20 -13
  80. langchain_core/runnables/graph.py +48 -38
  81. langchain_core/runnables/graph_ascii.py +40 -17
  82. langchain_core/runnables/graph_mermaid.py +54 -25
  83. langchain_core/runnables/graph_png.py +27 -31
  84. langchain_core/runnables/history.py +55 -58
  85. langchain_core/runnables/passthrough.py +44 -21
  86. langchain_core/runnables/retry.py +44 -23
  87. langchain_core/runnables/router.py +9 -8
  88. langchain_core/runnables/schema.py +9 -0
  89. langchain_core/runnables/utils.py +53 -90
  90. langchain_core/stores.py +19 -31
  91. langchain_core/sys_info.py +9 -8
  92. langchain_core/tools/base.py +36 -27
  93. langchain_core/tools/convert.py +25 -14
  94. langchain_core/tools/simple.py +36 -8
  95. langchain_core/tools/structured.py +25 -12
  96. langchain_core/tracers/base.py +2 -2
  97. langchain_core/tracers/context.py +5 -1
  98. langchain_core/tracers/core.py +110 -46
  99. langchain_core/tracers/evaluation.py +22 -26
  100. langchain_core/tracers/event_stream.py +97 -42
  101. langchain_core/tracers/langchain.py +12 -3
  102. langchain_core/tracers/langchain_v1.py +10 -2
  103. langchain_core/tracers/log_stream.py +56 -17
  104. langchain_core/tracers/root_listeners.py +4 -20
  105. langchain_core/tracers/run_collector.py +6 -16
  106. langchain_core/tracers/schemas.py +5 -1
  107. langchain_core/utils/aiter.py +14 -6
  108. langchain_core/utils/env.py +3 -0
  109. langchain_core/utils/function_calling.py +46 -20
  110. langchain_core/utils/interactive_env.py +6 -2
  111. langchain_core/utils/iter.py +12 -5
  112. langchain_core/utils/json.py +12 -3
  113. langchain_core/utils/json_schema.py +156 -40
  114. langchain_core/utils/loading.py +5 -1
  115. langchain_core/utils/mustache.py +25 -16
  116. langchain_core/utils/pydantic.py +38 -9
  117. langchain_core/utils/utils.py +25 -9
  118. langchain_core/vectorstores/base.py +7 -20
  119. langchain_core/vectorstores/in_memory.py +20 -14
  120. langchain_core/vectorstores/utils.py +18 -12
  121. langchain_core/version.py +1 -1
  122. langchain_core-1.0.0a4.dist-info/METADATA +77 -0
  123. langchain_core-1.0.0a4.dist-info/RECORD +181 -0
  124. langchain_core/beta/__init__.py +0 -1
  125. langchain_core/beta/runnables/__init__.py +0 -1
  126. langchain_core/beta/runnables/context.py +0 -448
  127. langchain_core-1.0.0a2.dist-info/METADATA +0 -106
  128. langchain_core-1.0.0a2.dist-info/RECORD +0 -184
  129. {langchain_core-1.0.0a2.dist-info → langchain_core-1.0.0a4.dist-info}/WHEEL +0 -0
  130. {langchain_core-1.0.0a2.dist-info → langchain_core-1.0.0a4.dist-info}/entry_points.txt +0 -0
@@ -131,6 +131,7 @@ def create_base_retry_decorator(
131
131
 
132
132
  def _resolve_cache(*, cache: Union[BaseCache, bool, None]) -> Optional[BaseCache]:
133
133
  """Resolve the cache."""
134
+ llm_cache: Optional[BaseCache]
134
135
  if isinstance(cache, BaseCache):
135
136
  llm_cache = cache
136
137
  elif cache is None:
@@ -356,7 +357,9 @@ class BaseLLM(BaseLanguageModel[str], ABC):
356
357
  ls_params["ls_stop"] = stop
357
358
 
358
359
  # model
359
- if hasattr(self, "model") and isinstance(self.model, str):
360
+ if "model" in kwargs and isinstance(kwargs["model"], str):
361
+ ls_params["ls_model_name"] = kwargs["model"]
362
+ elif hasattr(self, "model") and isinstance(self.model, str):
360
363
  ls_params["ls_model_name"] = self.model
361
364
  elif hasattr(self, "model_name") and isinstance(self.model_name, str):
362
365
  ls_params["ls_model_name"] = self.model_name
@@ -663,7 +666,18 @@ class BaseLLM(BaseLanguageModel[str], ABC):
663
666
  run_manager: Optional[CallbackManagerForLLMRun] = None,
664
667
  **kwargs: Any,
665
668
  ) -> LLMResult:
666
- """Run the LLM on the given prompts."""
669
+ """Run the LLM on the given prompts.
670
+
671
+ Args:
672
+ prompts: The prompts to generate from.
673
+ stop: Stop words to use when generating. Model output is cut off at the
674
+ first occurrence of any of the stop substrings.
675
+ If stop tokens are not supported consider raising NotImplementedError.
676
+ run_manager: Callback manager for the run.
677
+
678
+ Returns:
679
+ The LLM result.
680
+ """
667
681
 
668
682
  async def _agenerate(
669
683
  self,
@@ -672,7 +686,18 @@ class BaseLLM(BaseLanguageModel[str], ABC):
672
686
  run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
673
687
  **kwargs: Any,
674
688
  ) -> LLMResult:
675
- """Run the LLM on the given prompts."""
689
+ """Run the LLM on the given prompts.
690
+
691
+ Args:
692
+ prompts: The prompts to generate from.
693
+ stop: Stop words to use when generating. Model output is cut off at the
694
+ first occurrence of any of the stop substrings.
695
+ If stop tokens are not supported consider raising NotImplementedError.
696
+ run_manager: Callback manager for the run.
697
+
698
+ Returns:
699
+ The LLM result.
700
+ """
676
701
  return await run_in_executor(
677
702
  None,
678
703
  self._generate,
@@ -705,8 +730,8 @@ class BaseLLM(BaseLanguageModel[str], ABC):
705
730
  **kwargs: Arbitrary additional keyword arguments. These are usually passed
706
731
  to the model provider API call.
707
732
 
708
- Returns:
709
- An iterator of GenerationChunks.
733
+ Yields:
734
+ Generation chunks.
710
735
  """
711
736
  raise NotImplementedError
712
737
 
@@ -731,8 +756,8 @@ class BaseLLM(BaseLanguageModel[str], ABC):
731
756
  **kwargs: Arbitrary additional keyword arguments. These are usually passed
732
757
  to the model provider API call.
733
758
 
734
- Returns:
735
- An async iterator of GenerationChunks.
759
+ Yields:
760
+ Generation chunks.
736
761
  """
737
762
  iterator = await run_in_executor(
738
763
  None,
@@ -830,10 +855,11 @@ class BaseLLM(BaseLanguageModel[str], ABC):
830
855
  API.
831
856
 
832
857
  Use this method when you want to:
833
- 1. take advantage of batched calls,
834
- 2. need more output from the model than just the top generated value,
835
- 3. are building chains that are agnostic to the underlying language model
836
- type (e.g., pure text completion models vs chat models).
858
+
859
+ 1. Take advantage of batched calls,
860
+ 2. Need more output from the model than just the top generated value,
861
+ 3. Are building chains that are agnostic to the underlying language model
862
+ type (e.g., pure text completion models vs chat models).
837
863
 
838
864
  Args:
839
865
  prompts: List of string prompts.
@@ -853,6 +879,11 @@ class BaseLLM(BaseLanguageModel[str], ABC):
853
879
  **kwargs: Arbitrary additional keyword arguments. These are usually passed
854
880
  to the model provider API call.
855
881
 
882
+ Raises:
883
+ ValueError: If prompts is not a list.
884
+ ValueError: If the length of ``callbacks``, ``tags``, ``metadata``, or
885
+ ``run_name`` (if provided) does not match the length of prompts.
886
+
856
887
  Returns:
857
888
  An LLMResult, which contains a list of candidate Generations for each input
858
889
  prompt and additional model provider-specific output.
@@ -1090,10 +1121,11 @@ class BaseLLM(BaseLanguageModel[str], ABC):
1090
1121
  API.
1091
1122
 
1092
1123
  Use this method when you want to:
1093
- 1. take advantage of batched calls,
1094
- 2. need more output from the model than just the top generated value,
1095
- 3. are building chains that are agnostic to the underlying language model
1096
- type (e.g., pure text completion models vs chat models).
1124
+
1125
+ 1. Take advantage of batched calls,
1126
+ 2. Need more output from the model than just the top generated value,
1127
+ 3. Are building chains that are agnostic to the underlying language model
1128
+ type (e.g., pure text completion models vs chat models).
1097
1129
 
1098
1130
  Args:
1099
1131
  prompts: List of string prompts.
@@ -1113,6 +1145,10 @@ class BaseLLM(BaseLanguageModel[str], ABC):
1113
1145
  **kwargs: Arbitrary additional keyword arguments. These are usually passed
1114
1146
  to the model provider API call.
1115
1147
 
1148
+ Raises:
1149
+ ValueError: If the length of ``callbacks``, ``tags``, ``metadata``, or
1150
+ ``run_name`` (if provided) does not match the length of prompts.
1151
+
1116
1152
  Returns:
1117
1153
  An LLMResult, which contains a list of candidate Generations for each input
1118
1154
  prompt and additional model provider-specific output.
@@ -1388,7 +1424,7 @@ class BaseLLM(BaseLanguageModel[str], ABC):
1388
1424
  return AIMessage(content=content)
1389
1425
 
1390
1426
  def __str__(self) -> str:
1391
- """Get a string representation of the object for printing."""
1427
+ """Return a string representation of the object for printing."""
1392
1428
  cls_name = f"\033[1m{self.__class__.__name__}\033[0m"
1393
1429
  return f"{cls_name}\nParams: {self._identifying_params}"
1394
1430
 
@@ -1430,10 +1466,10 @@ class BaseLLM(BaseLanguageModel[str], ABC):
1430
1466
  prompt_dict = self.dict()
1431
1467
 
1432
1468
  if save_path.suffix == ".json":
1433
- with save_path.open("w") as f:
1469
+ with save_path.open("w", encoding="utf-8") as f:
1434
1470
  json.dump(prompt_dict, f, indent=4)
1435
1471
  elif save_path.suffix.endswith((".yaml", ".yml")):
1436
- with save_path.open("w") as f:
1472
+ with save_path.open("w", encoding="utf-8") as f:
1437
1473
  yaml.dump(prompt_dict, f, default_flow_style=False)
1438
1474
  else:
1439
1475
  msg = f"{save_path} must be json or yaml"
@@ -1536,7 +1572,6 @@ class LLM(BaseLLM):
1536
1572
  run_manager: Optional[CallbackManagerForLLMRun] = None,
1537
1573
  **kwargs: Any,
1538
1574
  ) -> LLMResult:
1539
- """Run the LLM on the given prompt and input."""
1540
1575
  # TODO: add caching here.
1541
1576
  generations = []
1542
1577
  new_arg_supported = inspect.signature(self._call).parameters.get("run_manager")
@@ -1556,7 +1591,6 @@ class LLM(BaseLLM):
1556
1591
  run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
1557
1592
  **kwargs: Any,
1558
1593
  ) -> LLMResult:
1559
- """Async run the LLM on the given prompt and input."""
1560
1594
  generations = []
1561
1595
  new_arg_supported = inspect.signature(self._acall).parameters.get("run_manager")
1562
1596
  for prompt in prompts:
@@ -6,6 +6,8 @@ from typing import Any
6
6
  from pydantic import BaseModel
7
7
 
8
8
  from langchain_core.load.serializable import Serializable, to_json_not_implemented
9
+ from langchain_core.messages import AIMessage
10
+ from langchain_core.outputs import ChatGeneration
9
11
 
10
12
 
11
13
  def default(obj: Any) -> Any:
@@ -23,9 +25,6 @@ def default(obj: Any) -> Any:
23
25
 
24
26
 
25
27
  def _dump_pydantic_models(obj: Any) -> Any:
26
- from langchain_core.messages import AIMessage
27
- from langchain_core.outputs import ChatGeneration
28
-
29
28
  if (
30
29
  isinstance(obj, ChatGeneration)
31
30
  and isinstance(obj.message, AIMessage)
@@ -95,7 +95,21 @@ class Reviver:
95
95
  self.ignore_unserializable_fields = ignore_unserializable_fields
96
96
 
97
97
  def __call__(self, value: dict[str, Any]) -> Any:
98
- """Revive the value."""
98
+ """Revive the value.
99
+
100
+ Args:
101
+ value: The value to revive.
102
+
103
+ Returns:
104
+ The revived value.
105
+
106
+ Raises:
107
+ ValueError: If the namespace is invalid.
108
+ ValueError: If trying to deserialize something that cannot
109
+ be deserialized in the current version of langchain-core.
110
+ NotImplementedError: If the object is not implemented and
111
+ ``ignore_unserializable_fields`` is False.
112
+ """
99
113
  if (
100
114
  value.get("lc") == 1
101
115
  and value.get("type") == "secret"
@@ -20,53 +20,41 @@ logger = logging.getLogger(__name__)
20
20
 
21
21
 
22
22
  class BaseSerialized(TypedDict):
23
- """Base class for serialized objects.
24
-
25
- Parameters:
26
- lc: The version of the serialization format.
27
- id: The unique identifier of the object.
28
- name: The name of the object. Optional.
29
- graph: The graph of the object. Optional.
30
- """
23
+ """Base class for serialized objects."""
31
24
 
32
25
  lc: int
26
+ """The version of the serialization format."""
33
27
  id: list[str]
28
+ """The unique identifier of the object."""
34
29
  name: NotRequired[str]
30
+ """The name of the object. Optional."""
35
31
  graph: NotRequired[dict[str, Any]]
32
+ """The graph of the object. Optional."""
36
33
 
37
34
 
38
35
  class SerializedConstructor(BaseSerialized):
39
- """Serialized constructor.
40
-
41
- Parameters:
42
- type: The type of the object. Must be "constructor".
43
- kwargs: The constructor arguments.
44
- """
36
+ """Serialized constructor."""
45
37
 
46
38
  type: Literal["constructor"]
39
+ """The type of the object. Must be ``'constructor'``."""
47
40
  kwargs: dict[str, Any]
41
+ """The constructor arguments."""
48
42
 
49
43
 
50
44
  class SerializedSecret(BaseSerialized):
51
- """Serialized secret.
52
-
53
- Parameters:
54
- type: The type of the object. Must be "secret".
55
- """
45
+ """Serialized secret."""
56
46
 
57
47
  type: Literal["secret"]
48
+ """The type of the object. Must be ``'secret'``."""
58
49
 
59
50
 
60
51
  class SerializedNotImplemented(BaseSerialized):
61
- """Serialized not implemented.
62
-
63
- Parameters:
64
- type: The type of the object. Must be "not_implemented".
65
- repr: The representation of the object. Optional.
66
- """
52
+ """Serialized not implemented."""
67
53
 
68
54
  type: Literal["not_implemented"]
55
+ """The type of the object. Must be ``'not_implemented'``."""
69
56
  repr: Optional[str]
57
+ """The representation of the object. Optional."""
70
58
 
71
59
 
72
60
  def try_neq_default(value: Any, key: str, model: BaseModel) -> bool:
@@ -79,9 +67,6 @@ def try_neq_default(value: Any, key: str, model: BaseModel) -> bool:
79
67
 
80
68
  Returns:
81
69
  Whether the value is different from the default.
82
-
83
- Raises:
84
- Exception: If the key is not in the model.
85
70
  """
86
71
  field = type(model).model_fields[key]
87
72
  return _try_neq_default(value, field)
@@ -109,19 +94,19 @@ class Serializable(BaseModel, ABC):
109
94
 
110
95
  It relies on the following methods and properties:
111
96
 
112
- - `is_lc_serializable`: Is this class serializable?
113
- By design, even if a class inherits from Serializable, it is not serializable by
114
- default. This is to prevent accidental serialization of objects that should not
115
- be serialized.
116
- - `get_lc_namespace`: Get the namespace of the langchain object.
117
- During deserialization, this namespace is used to identify
118
- the correct class to instantiate.
119
- Please see the `Reviver` class in `langchain_core.load.load` for more details.
120
- During deserialization an additional mapping is handle
121
- classes that have moved or been renamed across package versions.
122
- - `lc_secrets`: A map of constructor argument names to secret ids.
123
- - `lc_attributes`: List of additional attribute names that should be included
124
- as part of the serialized representation.
97
+ - ``is_lc_serializable``: Is this class serializable?
98
+ By design, even if a class inherits from Serializable, it is not serializable by
99
+ default. This is to prevent accidental serialization of objects that should not
100
+ be serialized.
101
+ - ``get_lc_namespace``: Get the namespace of the langchain object.
102
+ During deserialization, this namespace is used to identify
103
+ the correct class to instantiate.
104
+ Please see the ``Reviver`` class in ``langchain_core.load.load`` for more details.
105
+ During deserialization an additional mapping is handle
106
+ classes that have moved or been renamed across package versions.
107
+ - ``lc_secrets``: A map of constructor argument names to secret ids.
108
+ - ``lc_attributes``: List of additional attribute names that should be included
109
+ as part of the serialized representation.
125
110
  """
126
111
 
127
112
  # Remove default BaseModel init docstring.
@@ -148,6 +133,9 @@ class Serializable(BaseModel, ABC):
148
133
 
149
134
  For example, if the class is `langchain.llms.openai.OpenAI`, then the
150
135
  namespace is ["langchain", "llms", "openai"]
136
+
137
+ Returns:
138
+ The namespace as a list of strings.
151
139
  """
152
140
  return cls.__module__.split(".")
153
141
 
@@ -171,7 +159,7 @@ class Serializable(BaseModel, ABC):
171
159
 
172
160
  @classmethod
173
161
  def lc_id(cls) -> list[str]:
174
- """A unique identifier for this class for serialization purposes.
162
+ """Return a unique identifier for this class for serialization purposes.
175
163
 
176
164
  The unique identifier is a list of strings that describes the path
177
165
  to the object.
@@ -203,6 +191,9 @@ class Serializable(BaseModel, ABC):
203
191
  def to_json(self) -> Union[SerializedConstructor, SerializedNotImplemented]:
204
192
  """Serialize the object to JSON.
205
193
 
194
+ Raises:
195
+ ValueError: If the class has deprecated attributes.
196
+
206
197
  Returns:
207
198
  A json serializable object or a SerializedNotImplemented object.
208
199
  """
@@ -276,7 +267,11 @@ class Serializable(BaseModel, ABC):
276
267
  }
277
268
 
278
269
  def to_json_not_implemented(self) -> SerializedNotImplemented:
279
- """Serialize a "not implemented" object."""
270
+ """Serialize a "not implemented" object.
271
+
272
+ Returns:
273
+ SerializedNotImplemented.
274
+ """
280
275
  return to_json_not_implemented(self)
281
276
 
282
277
 
langchain_core/memory.py CHANGED
@@ -45,16 +45,20 @@ class BaseMemory(Serializable, ABC):
45
45
  def memory_variables(self) -> list[str]:
46
46
  return list(self.memories.keys())
47
47
 
48
- def load_memory_variables(self, inputs: dict[str, Any]) -> dict[str, str]:
48
+ def load_memory_variables(
49
+ self, inputs: dict[str, Any]
50
+ ) -> dict[str, str]:
49
51
  return self.memories
50
52
 
51
- def save_context(self, inputs: dict[str, Any], outputs: dict[str, str]) -> None:
53
+ def save_context(
54
+ self, inputs: dict[str, Any], outputs: dict[str, str]
55
+ ) -> None:
52
56
  pass
53
57
 
54
58
  def clear(self) -> None:
55
59
  pass
56
60
 
57
- """ # noqa: E501
61
+ """
58
62
 
59
63
  model_config = ConfigDict(
60
64
  arbitrary_types_allowed=True,
@@ -48,6 +48,7 @@ if TYPE_CHECKING:
48
48
  DataContentBlock,
49
49
  FileContentBlock,
50
50
  ImageContentBlock,
51
+ InvalidToolCall,
51
52
  NonStandardAnnotation,
52
53
  NonStandardContentBlock,
53
54
  PlainTextContentBlock,
@@ -63,7 +64,6 @@ if TYPE_CHECKING:
63
64
  from langchain_core.messages.modifier import RemoveMessage
64
65
  from langchain_core.messages.system import SystemMessage, SystemMessageChunk
65
66
  from langchain_core.messages.tool import (
66
- InvalidToolCall,
67
67
  ToolCall,
68
68
  ToolCallChunk,
69
69
  ToolMessage,
@@ -4,10 +4,10 @@ import json
4
4
  import logging
5
5
  import operator
6
6
  from collections.abc import Sequence
7
- from typing import Any, Literal, Optional, Union, cast
7
+ from typing import Any, Literal, Optional, Union, cast, overload
8
8
 
9
9
  from pydantic import model_validator
10
- from typing_extensions import NotRequired, Self, TypedDict, overload, override
10
+ from typing_extensions import NotRequired, Self, TypedDict, override
11
11
 
12
12
  from langchain_core.messages import content as types
13
13
  from langchain_core.messages.base import (
@@ -15,8 +15,8 @@ from langchain_core.messages.base import (
15
15
  BaseMessageChunk,
16
16
  merge_content,
17
17
  )
18
+ from langchain_core.messages.content import InvalidToolCall
18
19
  from langchain_core.messages.tool import (
19
- InvalidToolCall,
20
20
  ToolCall,
21
21
  ToolCallChunk,
22
22
  default_tool_chunk_parser,
@@ -118,7 +118,7 @@ class UsageMetadata(TypedDict):
118
118
  "output_token_details": {
119
119
  "audio": 10,
120
120
  "reasoning": 200,
121
- }
121
+ },
122
122
  }
123
123
 
124
124
  .. versionchanged:: 0.3.9
@@ -155,12 +155,6 @@ class AIMessage(BaseMessage):
155
155
  (e.g., tool calls, usage metadata) added by the LangChain framework.
156
156
  """
157
157
 
158
- example: bool = False
159
- """Use to denote that a message is part of an example conversation.
160
-
161
- At the moment, this is ignored by most models. Usage is discouraged.
162
- """
163
-
164
158
  tool_calls: list[ToolCall] = []
165
159
  """If provided, tool calls associated with the message."""
166
160
  invalid_tool_calls: list[InvalidToolCall] = []
@@ -227,7 +221,9 @@ class AIMessage(BaseMessage):
227
221
 
228
222
  model_provider = self.response_metadata.get("model_provider")
229
223
  if model_provider:
230
- from langchain_core.messages.block_translators import get_translator
224
+ from langchain_core.messages.block_translators import ( # noqa: PLC0415
225
+ get_translator,
226
+ )
231
227
 
232
228
  translator = get_translator(model_provider)
233
229
  if translator:
@@ -386,7 +382,9 @@ class AIMessageChunk(AIMessage, BaseMessageChunk):
386
382
 
387
383
  model_provider = self.response_metadata.get("model_provider")
388
384
  if model_provider:
389
- from langchain_core.messages.block_translators import get_translator
385
+ from langchain_core.messages.block_translators import ( # noqa: PLC0415
386
+ get_translator,
387
+ )
390
388
 
391
389
  translator = get_translator(model_provider)
392
390
  if translator:
@@ -425,14 +423,8 @@ class AIMessageChunk(AIMessage, BaseMessageChunk):
425
423
  def init_tool_calls(self) -> Self:
426
424
  """Initialize tool calls from tool call chunks.
427
425
 
428
- Args:
429
- values: The values to validate.
430
-
431
426
  Returns:
432
- The values with tool calls initialized.
433
-
434
- Raises:
435
- ValueError: If the tool call chunks are malformed.
427
+ This ``AIMessageChunk``.
436
428
  """
437
429
  if not self.tool_call_chunks:
438
430
  if self.tool_calls:
@@ -473,10 +465,7 @@ class AIMessageChunk(AIMessage, BaseMessageChunk):
473
465
 
474
466
  for chunk in self.tool_call_chunks:
475
467
  try:
476
- if chunk["args"] is not None and chunk["args"] != "":
477
- args_ = parse_partial_json(chunk["args"])
478
- else:
479
- args_ = {}
468
+ args_ = parse_partial_json(chunk["args"]) if chunk["args"] else {}
480
469
  if isinstance(args_, dict):
481
470
  tool_calls.append(
482
471
  create_tool_call(
@@ -542,11 +531,16 @@ class AIMessageChunk(AIMessage, BaseMessageChunk):
542
531
  def add_ai_message_chunks(
543
532
  left: AIMessageChunk, *others: AIMessageChunk
544
533
  ) -> AIMessageChunk:
545
- """Add multiple AIMessageChunks together."""
546
- if any(left.example != o.example for o in others):
547
- msg = "Cannot concatenate AIMessageChunks with different example values."
548
- raise ValueError(msg)
534
+ """Add multiple ``AIMessageChunk``s together.
535
+
536
+ Args:
537
+ left: The first ``AIMessageChunk``.
538
+ *others: Other ``AIMessageChunk``s to add.
539
+
540
+ Returns:
541
+ The resulting ``AIMessageChunk``.
549
542
 
543
+ """
550
544
  content = merge_content(left.content, *(o.content for o in others))
551
545
  additional_kwargs = merge_dicts(
552
546
  left.additional_kwargs, *(o.additional_kwargs for o in others)
@@ -608,7 +602,6 @@ def add_ai_message_chunks(
608
602
  )
609
603
 
610
604
  return left.__class__(
611
- example=left.example,
612
605
  content=content,
613
606
  additional_kwargs=additional_kwargs,
614
607
  tool_call_chunks=tool_call_chunks,
@@ -633,13 +626,13 @@ def add_usage(
633
626
  input_tokens=5,
634
627
  output_tokens=0,
635
628
  total_tokens=5,
636
- input_token_details=InputTokenDetails(cache_read=3)
629
+ input_token_details=InputTokenDetails(cache_read=3),
637
630
  )
638
631
  right = UsageMetadata(
639
632
  input_tokens=0,
640
633
  output_tokens=10,
641
634
  total_tokens=10,
642
- output_token_details=OutputTokenDetails(reasoning=4)
635
+ output_token_details=OutputTokenDetails(reasoning=4),
643
636
  )
644
637
 
645
638
  add_usage(left, right)
@@ -653,9 +646,16 @@ def add_usage(
653
646
  output_tokens=10,
654
647
  total_tokens=15,
655
648
  input_token_details=InputTokenDetails(cache_read=3),
656
- output_token_details=OutputTokenDetails(reasoning=4)
649
+ output_token_details=OutputTokenDetails(reasoning=4),
657
650
  )
658
651
 
652
+ Args:
653
+ left: The first ``UsageMetadata`` object.
654
+ right: The second ``UsageMetadata`` object.
655
+
656
+ Returns:
657
+ The sum of the two ``UsageMetadata`` objects.
658
+
659
659
  """
660
660
  if not (left or right):
661
661
  return UsageMetadata(input_tokens=0, output_tokens=0, total_tokens=0)
@@ -690,13 +690,13 @@ def subtract_usage(
690
690
  input_tokens=5,
691
691
  output_tokens=10,
692
692
  total_tokens=15,
693
- input_token_details=InputTokenDetails(cache_read=4)
693
+ input_token_details=InputTokenDetails(cache_read=4),
694
694
  )
695
695
  right = UsageMetadata(
696
696
  input_tokens=3,
697
697
  output_tokens=8,
698
698
  total_tokens=11,
699
- output_token_details=OutputTokenDetails(reasoning=4)
699
+ output_token_details=OutputTokenDetails(reasoning=4),
700
700
  )
701
701
 
702
702
  subtract_usage(left, right)
@@ -710,9 +710,16 @@ def subtract_usage(
710
710
  output_tokens=2,
711
711
  total_tokens=4,
712
712
  input_token_details=InputTokenDetails(cache_read=4),
713
- output_token_details=OutputTokenDetails(reasoning=0)
713
+ output_token_details=OutputTokenDetails(reasoning=0),
714
714
  )
715
715
 
716
+ Args:
717
+ left: The first ``UsageMetadata`` object.
718
+ right: The second ``UsageMetadata`` object.
719
+
720
+ Returns:
721
+ The resulting ``UsageMetadata`` after subtraction.
722
+
716
723
  """
717
724
  if not (left or right):
718
725
  return UsageMetadata(input_tokens=0, output_tokens=0, total_tokens=0)
@@ -150,7 +150,8 @@ class BaseMessage(Serializable):
150
150
  def get_lc_namespace(cls) -> list[str]:
151
151
  """Get the namespace of the langchain object.
152
152
 
153
- Default is ["langchain", "schema", "messages"].
153
+ Returns:
154
+ ``["langchain", "schema", "messages"]``
154
155
  """
155
156
  return ["langchain", "schema", "messages"]
156
157
 
@@ -179,14 +180,17 @@ class BaseMessage(Serializable):
179
180
  .. versionadded:: 1.0.0
180
181
 
181
182
  """ # noqa: E501
182
- from langchain_core.messages import content as types
183
- from langchain_core.messages.block_translators.anthropic import (
183
+ from langchain_core.messages import content as types # noqa: PLC0415
184
+ from langchain_core.messages.block_translators.anthropic import ( # noqa: PLC0415
184
185
  _convert_to_v1_from_anthropic_input,
185
186
  )
186
- from langchain_core.messages.block_translators.langchain_v0 import (
187
+ from langchain_core.messages.block_translators.bedrock_converse import ( # noqa: PLC0415
188
+ _convert_to_v1_from_converse_input,
189
+ )
190
+ from langchain_core.messages.block_translators.langchain_v0 import ( # noqa: PLC0415
187
191
  _convert_v0_multimodal_input_to_v1,
188
192
  )
189
- from langchain_core.messages.block_translators.openai import (
193
+ from langchain_core.messages.block_translators.openai import ( # noqa: PLC0415
190
194
  _convert_to_v1_from_chat_completions_input,
191
195
  )
192
196
 
@@ -213,6 +217,7 @@ class BaseMessage(Serializable):
213
217
  _convert_v0_multimodal_input_to_v1,
214
218
  _convert_to_v1_from_chat_completions_input,
215
219
  _convert_to_v1_from_anthropic_input,
220
+ _convert_to_v1_from_converse_input,
216
221
  ]:
217
222
  blocks = parsing_step(blocks)
218
223
  return blocks
@@ -246,8 +251,16 @@ class BaseMessage(Serializable):
246
251
  return TextAccessor(text_value)
247
252
 
248
253
  def __add__(self, other: Any) -> ChatPromptTemplate:
249
- """Concatenate this message with another message."""
250
- from langchain_core.prompts.chat import ChatPromptTemplate
254
+ """Concatenate this message with another message.
255
+
256
+ Args:
257
+ other: Another message to concatenate with this one.
258
+
259
+ Returns:
260
+ A ChatPromptTemplate containing both messages.
261
+ """
262
+ # Import locally to prevent circular imports.
263
+ from langchain_core.prompts.chat import ChatPromptTemplate # noqa: PLC0415
251
264
 
252
265
  prompt = ChatPromptTemplate(messages=[self])
253
266
  return prompt + other