langchain-core 0.3.75__py3-none-any.whl → 0.3.77__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain-core might be problematic. Click here for more details.

Files changed (119) hide show
  1. langchain_core/_api/beta_decorator.py +22 -44
  2. langchain_core/_api/deprecation.py +30 -17
  3. langchain_core/_api/path.py +19 -2
  4. langchain_core/_import_utils.py +7 -0
  5. langchain_core/agents.py +10 -6
  6. langchain_core/beta/runnables/context.py +1 -2
  7. langchain_core/callbacks/base.py +28 -15
  8. langchain_core/callbacks/manager.py +83 -71
  9. langchain_core/callbacks/usage.py +6 -4
  10. langchain_core/chat_history.py +29 -21
  11. langchain_core/document_loaders/base.py +34 -9
  12. langchain_core/document_loaders/langsmith.py +4 -1
  13. langchain_core/documents/base.py +35 -10
  14. langchain_core/documents/transformers.py +4 -2
  15. langchain_core/embeddings/fake.py +8 -5
  16. langchain_core/env.py +2 -3
  17. langchain_core/example_selectors/base.py +12 -0
  18. langchain_core/exceptions.py +7 -0
  19. langchain_core/globals.py +17 -28
  20. langchain_core/indexing/api.py +88 -76
  21. langchain_core/indexing/base.py +5 -8
  22. langchain_core/indexing/in_memory.py +23 -3
  23. langchain_core/language_models/__init__.py +3 -2
  24. langchain_core/language_models/base.py +31 -20
  25. langchain_core/language_models/chat_models.py +98 -27
  26. langchain_core/language_models/fake_chat_models.py +10 -9
  27. langchain_core/language_models/llms.py +52 -18
  28. langchain_core/load/dump.py +2 -3
  29. langchain_core/load/load.py +15 -1
  30. langchain_core/load/serializable.py +39 -44
  31. langchain_core/memory.py +7 -3
  32. langchain_core/messages/ai.py +53 -24
  33. langchain_core/messages/base.py +43 -22
  34. langchain_core/messages/chat.py +4 -1
  35. langchain_core/messages/content_blocks.py +23 -2
  36. langchain_core/messages/function.py +9 -5
  37. langchain_core/messages/human.py +13 -10
  38. langchain_core/messages/modifier.py +1 -0
  39. langchain_core/messages/system.py +11 -8
  40. langchain_core/messages/tool.py +60 -29
  41. langchain_core/messages/utils.py +250 -131
  42. langchain_core/output_parsers/base.py +5 -2
  43. langchain_core/output_parsers/json.py +4 -4
  44. langchain_core/output_parsers/list.py +7 -22
  45. langchain_core/output_parsers/openai_functions.py +3 -0
  46. langchain_core/output_parsers/openai_tools.py +6 -1
  47. langchain_core/output_parsers/pydantic.py +4 -0
  48. langchain_core/output_parsers/string.py +5 -1
  49. langchain_core/output_parsers/xml.py +19 -19
  50. langchain_core/outputs/chat_generation.py +25 -10
  51. langchain_core/outputs/generation.py +14 -3
  52. langchain_core/outputs/llm_result.py +8 -1
  53. langchain_core/prompt_values.py +16 -6
  54. langchain_core/prompts/base.py +4 -9
  55. langchain_core/prompts/chat.py +89 -57
  56. langchain_core/prompts/dict.py +16 -8
  57. langchain_core/prompts/few_shot.py +12 -11
  58. langchain_core/prompts/few_shot_with_templates.py +5 -1
  59. langchain_core/prompts/image.py +12 -5
  60. langchain_core/prompts/message.py +5 -6
  61. langchain_core/prompts/pipeline.py +13 -8
  62. langchain_core/prompts/prompt.py +22 -8
  63. langchain_core/prompts/string.py +18 -10
  64. langchain_core/prompts/structured.py +7 -2
  65. langchain_core/rate_limiters.py +2 -2
  66. langchain_core/retrievers.py +7 -6
  67. langchain_core/runnables/base.py +406 -186
  68. langchain_core/runnables/branch.py +14 -19
  69. langchain_core/runnables/config.py +9 -15
  70. langchain_core/runnables/configurable.py +34 -19
  71. langchain_core/runnables/fallbacks.py +20 -13
  72. langchain_core/runnables/graph.py +48 -38
  73. langchain_core/runnables/graph_ascii.py +41 -18
  74. langchain_core/runnables/graph_mermaid.py +54 -25
  75. langchain_core/runnables/graph_png.py +27 -31
  76. langchain_core/runnables/history.py +55 -58
  77. langchain_core/runnables/passthrough.py +44 -21
  78. langchain_core/runnables/retry.py +44 -23
  79. langchain_core/runnables/router.py +9 -8
  80. langchain_core/runnables/schema.py +2 -0
  81. langchain_core/runnables/utils.py +51 -89
  82. langchain_core/stores.py +19 -31
  83. langchain_core/sys_info.py +9 -8
  84. langchain_core/tools/base.py +37 -28
  85. langchain_core/tools/convert.py +26 -15
  86. langchain_core/tools/simple.py +36 -8
  87. langchain_core/tools/structured.py +25 -12
  88. langchain_core/tracers/base.py +2 -2
  89. langchain_core/tracers/context.py +5 -1
  90. langchain_core/tracers/core.py +109 -39
  91. langchain_core/tracers/evaluation.py +22 -26
  92. langchain_core/tracers/event_stream.py +45 -34
  93. langchain_core/tracers/langchain.py +12 -3
  94. langchain_core/tracers/langchain_v1.py +10 -2
  95. langchain_core/tracers/log_stream.py +56 -17
  96. langchain_core/tracers/root_listeners.py +4 -20
  97. langchain_core/tracers/run_collector.py +6 -16
  98. langchain_core/tracers/schemas.py +5 -1
  99. langchain_core/utils/aiter.py +15 -7
  100. langchain_core/utils/env.py +3 -0
  101. langchain_core/utils/function_calling.py +50 -28
  102. langchain_core/utils/interactive_env.py +6 -2
  103. langchain_core/utils/iter.py +12 -4
  104. langchain_core/utils/json.py +12 -3
  105. langchain_core/utils/json_schema.py +156 -40
  106. langchain_core/utils/loading.py +5 -1
  107. langchain_core/utils/mustache.py +24 -15
  108. langchain_core/utils/pydantic.py +38 -9
  109. langchain_core/utils/utils.py +25 -9
  110. langchain_core/vectorstores/base.py +7 -20
  111. langchain_core/vectorstores/in_memory.py +23 -17
  112. langchain_core/vectorstores/utils.py +18 -12
  113. langchain_core/version.py +1 -1
  114. langchain_core-0.3.77.dist-info/METADATA +67 -0
  115. langchain_core-0.3.77.dist-info/RECORD +174 -0
  116. langchain_core-0.3.75.dist-info/METADATA +0 -106
  117. langchain_core-0.3.75.dist-info/RECORD +0 -174
  118. {langchain_core-0.3.75.dist-info → langchain_core-0.3.77.dist-info}/WHEEL +0 -0
  119. {langchain_core-0.3.75.dist-info → langchain_core-0.3.77.dist-info}/entry_points.txt +0 -0
@@ -131,6 +131,7 @@ def create_base_retry_decorator(
131
131
 
132
132
  def _resolve_cache(*, cache: Union[BaseCache, bool, None]) -> Optional[BaseCache]:
133
133
  """Resolve the cache."""
134
+ llm_cache: Optional[BaseCache]
134
135
  if isinstance(cache, BaseCache):
135
136
  llm_cache = cache
136
137
  elif cache is None:
@@ -356,7 +357,9 @@ class BaseLLM(BaseLanguageModel[str], ABC):
356
357
  ls_params["ls_stop"] = stop
357
358
 
358
359
  # model
359
- if hasattr(self, "model") and isinstance(self.model, str):
360
+ if "model" in kwargs and isinstance(kwargs["model"], str):
361
+ ls_params["ls_model_name"] = kwargs["model"]
362
+ elif hasattr(self, "model") and isinstance(self.model, str):
360
363
  ls_params["ls_model_name"] = self.model
361
364
  elif hasattr(self, "model_name") and isinstance(self.model_name, str):
362
365
  ls_params["ls_model_name"] = self.model_name
@@ -663,7 +666,18 @@ class BaseLLM(BaseLanguageModel[str], ABC):
663
666
  run_manager: Optional[CallbackManagerForLLMRun] = None,
664
667
  **kwargs: Any,
665
668
  ) -> LLMResult:
666
- """Run the LLM on the given prompts."""
669
+ """Run the LLM on the given prompts.
670
+
671
+ Args:
672
+ prompts: The prompts to generate from.
673
+ stop: Stop words to use when generating. Model output is cut off at the
674
+ first occurrence of any of the stop substrings.
675
+ If stop tokens are not supported consider raising NotImplementedError.
676
+ run_manager: Callback manager for the run.
677
+
678
+ Returns:
679
+ The LLM result.
680
+ """
667
681
 
668
682
  async def _agenerate(
669
683
  self,
@@ -672,7 +686,18 @@ class BaseLLM(BaseLanguageModel[str], ABC):
672
686
  run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
673
687
  **kwargs: Any,
674
688
  ) -> LLMResult:
675
- """Run the LLM on the given prompts."""
689
+ """Run the LLM on the given prompts.
690
+
691
+ Args:
692
+ prompts: The prompts to generate from.
693
+ stop: Stop words to use when generating. Model output is cut off at the
694
+ first occurrence of any of the stop substrings.
695
+ If stop tokens are not supported consider raising NotImplementedError.
696
+ run_manager: Callback manager for the run.
697
+
698
+ Returns:
699
+ The LLM result.
700
+ """
676
701
  return await run_in_executor(
677
702
  None,
678
703
  self._generate,
@@ -705,8 +730,8 @@ class BaseLLM(BaseLanguageModel[str], ABC):
705
730
  **kwargs: Arbitrary additional keyword arguments. These are usually passed
706
731
  to the model provider API call.
707
732
 
708
- Returns:
709
- An iterator of GenerationChunks.
733
+ Yields:
734
+ Generation chunks.
710
735
  """
711
736
  raise NotImplementedError
712
737
 
@@ -731,8 +756,8 @@ class BaseLLM(BaseLanguageModel[str], ABC):
731
756
  **kwargs: Arbitrary additional keyword arguments. These are usually passed
732
757
  to the model provider API call.
733
758
 
734
- Returns:
735
- An async iterator of GenerationChunks.
759
+ Yields:
760
+ Generation chunks.
736
761
  """
737
762
  iterator = await run_in_executor(
738
763
  None,
@@ -830,10 +855,11 @@ class BaseLLM(BaseLanguageModel[str], ABC):
830
855
  API.
831
856
 
832
857
  Use this method when you want to:
833
- 1. take advantage of batched calls,
834
- 2. need more output from the model than just the top generated value,
835
- 3. are building chains that are agnostic to the underlying language model
836
- type (e.g., pure text completion models vs chat models).
858
+
859
+ 1. Take advantage of batched calls,
860
+ 2. Need more output from the model than just the top generated value,
861
+ 3. Are building chains that are agnostic to the underlying language model
862
+ type (e.g., pure text completion models vs chat models).
837
863
 
838
864
  Args:
839
865
  prompts: List of string prompts.
@@ -853,6 +879,11 @@ class BaseLLM(BaseLanguageModel[str], ABC):
853
879
  **kwargs: Arbitrary additional keyword arguments. These are usually passed
854
880
  to the model provider API call.
855
881
 
882
+ Raises:
883
+ ValueError: If prompts is not a list.
884
+ ValueError: If the length of ``callbacks``, ``tags``, ``metadata``, or
885
+ ``run_name`` (if provided) does not match the length of prompts.
886
+
856
887
  Returns:
857
888
  An LLMResult, which contains a list of candidate Generations for each input
858
889
  prompt and additional model provider-specific output.
@@ -1090,10 +1121,11 @@ class BaseLLM(BaseLanguageModel[str], ABC):
1090
1121
  API.
1091
1122
 
1092
1123
  Use this method when you want to:
1093
- 1. take advantage of batched calls,
1094
- 2. need more output from the model than just the top generated value,
1095
- 3. are building chains that are agnostic to the underlying language model
1096
- type (e.g., pure text completion models vs chat models).
1124
+
1125
+ 1. Take advantage of batched calls,
1126
+ 2. Need more output from the model than just the top generated value,
1127
+ 3. Are building chains that are agnostic to the underlying language model
1128
+ type (e.g., pure text completion models vs chat models).
1097
1129
 
1098
1130
  Args:
1099
1131
  prompts: List of string prompts.
@@ -1113,6 +1145,10 @@ class BaseLLM(BaseLanguageModel[str], ABC):
1113
1145
  **kwargs: Arbitrary additional keyword arguments. These are usually passed
1114
1146
  to the model provider API call.
1115
1147
 
1148
+ Raises:
1149
+ ValueError: If the length of ``callbacks``, ``tags``, ``metadata``, or
1150
+ ``run_name`` (if provided) does not match the length of prompts.
1151
+
1116
1152
  Returns:
1117
1153
  An LLMResult, which contains a list of candidate Generations for each input
1118
1154
  prompt and additional model provider-specific output.
@@ -1388,7 +1424,7 @@ class BaseLLM(BaseLanguageModel[str], ABC):
1388
1424
  return AIMessage(content=content)
1389
1425
 
1390
1426
  def __str__(self) -> str:
1391
- """Get a string representation of the object for printing."""
1427
+ """Return a string representation of the object for printing."""
1392
1428
  cls_name = f"\033[1m{self.__class__.__name__}\033[0m"
1393
1429
  return f"{cls_name}\nParams: {self._identifying_params}"
1394
1430
 
@@ -1536,7 +1572,6 @@ class LLM(BaseLLM):
1536
1572
  run_manager: Optional[CallbackManagerForLLMRun] = None,
1537
1573
  **kwargs: Any,
1538
1574
  ) -> LLMResult:
1539
- """Run the LLM on the given prompt and input."""
1540
1575
  # TODO: add caching here.
1541
1576
  generations = []
1542
1577
  new_arg_supported = inspect.signature(self._call).parameters.get("run_manager")
@@ -1556,7 +1591,6 @@ class LLM(BaseLLM):
1556
1591
  run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
1557
1592
  **kwargs: Any,
1558
1593
  ) -> LLMResult:
1559
- """Async run the LLM on the given prompt and input."""
1560
1594
  generations = []
1561
1595
  new_arg_supported = inspect.signature(self._acall).parameters.get("run_manager")
1562
1596
  for prompt in prompts:
@@ -6,6 +6,8 @@ from typing import Any
6
6
  from pydantic import BaseModel
7
7
 
8
8
  from langchain_core.load.serializable import Serializable, to_json_not_implemented
9
+ from langchain_core.messages import AIMessage
10
+ from langchain_core.outputs import ChatGeneration
9
11
 
10
12
 
11
13
  def default(obj: Any) -> Any:
@@ -23,9 +25,6 @@ def default(obj: Any) -> Any:
23
25
 
24
26
 
25
27
  def _dump_pydantic_models(obj: Any) -> Any:
26
- from langchain_core.messages import AIMessage
27
- from langchain_core.outputs import ChatGeneration
28
-
29
28
  if (
30
29
  isinstance(obj, ChatGeneration)
31
30
  and isinstance(obj.message, AIMessage)
@@ -95,7 +95,21 @@ class Reviver:
95
95
  self.ignore_unserializable_fields = ignore_unserializable_fields
96
96
 
97
97
  def __call__(self, value: dict[str, Any]) -> Any:
98
- """Revive the value."""
98
+ """Revive the value.
99
+
100
+ Args:
101
+ value: The value to revive.
102
+
103
+ Returns:
104
+ The revived value.
105
+
106
+ Raises:
107
+ ValueError: If the namespace is invalid.
108
+ ValueError: If trying to deserialize something that cannot
109
+ be deserialized in the current version of langchain-core.
110
+ NotImplementedError: If the object is not implemented and
111
+ ``ignore_unserializable_fields`` is False.
112
+ """
99
113
  if (
100
114
  value.get("lc") == 1
101
115
  and value.get("type") == "secret"
@@ -20,53 +20,41 @@ logger = logging.getLogger(__name__)
20
20
 
21
21
 
22
22
  class BaseSerialized(TypedDict):
23
- """Base class for serialized objects.
24
-
25
- Parameters:
26
- lc: The version of the serialization format.
27
- id: The unique identifier of the object.
28
- name: The name of the object. Optional.
29
- graph: The graph of the object. Optional.
30
- """
23
+ """Base class for serialized objects."""
31
24
 
32
25
  lc: int
26
+ """The version of the serialization format."""
33
27
  id: list[str]
28
+ """The unique identifier of the object."""
34
29
  name: NotRequired[str]
30
+ """The name of the object. Optional."""
35
31
  graph: NotRequired[dict[str, Any]]
32
+ """The graph of the object. Optional."""
36
33
 
37
34
 
38
35
  class SerializedConstructor(BaseSerialized):
39
- """Serialized constructor.
40
-
41
- Parameters:
42
- type: The type of the object. Must be "constructor".
43
- kwargs: The constructor arguments.
44
- """
36
+ """Serialized constructor."""
45
37
 
46
38
  type: Literal["constructor"]
39
+ """The type of the object. Must be ``'constructor'``."""
47
40
  kwargs: dict[str, Any]
41
+ """The constructor arguments."""
48
42
 
49
43
 
50
44
  class SerializedSecret(BaseSerialized):
51
- """Serialized secret.
52
-
53
- Parameters:
54
- type: The type of the object. Must be "secret".
55
- """
45
+ """Serialized secret."""
56
46
 
57
47
  type: Literal["secret"]
48
+ """The type of the object. Must be ``'secret'``."""
58
49
 
59
50
 
60
51
  class SerializedNotImplemented(BaseSerialized):
61
- """Serialized not implemented.
62
-
63
- Parameters:
64
- type: The type of the object. Must be "not_implemented".
65
- repr: The representation of the object. Optional.
66
- """
52
+ """Serialized not implemented."""
67
53
 
68
54
  type: Literal["not_implemented"]
55
+ """The type of the object. Must be ``'not_implemented'``."""
69
56
  repr: Optional[str]
57
+ """The representation of the object. Optional."""
70
58
 
71
59
 
72
60
  def try_neq_default(value: Any, key: str, model: BaseModel) -> bool:
@@ -79,9 +67,6 @@ def try_neq_default(value: Any, key: str, model: BaseModel) -> bool:
79
67
 
80
68
  Returns:
81
69
  Whether the value is different from the default.
82
-
83
- Raises:
84
- Exception: If the key is not in the model.
85
70
  """
86
71
  field = type(model).model_fields[key]
87
72
  return _try_neq_default(value, field)
@@ -109,24 +94,24 @@ class Serializable(BaseModel, ABC):
109
94
 
110
95
  It relies on the following methods and properties:
111
96
 
112
- - `is_lc_serializable`: Is this class serializable?
113
- By design, even if a class inherits from Serializable, it is not serializable by
114
- default. This is to prevent accidental serialization of objects that should not
115
- be serialized.
116
- - `get_lc_namespace`: Get the namespace of the langchain object.
117
- During deserialization, this namespace is used to identify
118
- the correct class to instantiate.
119
- Please see the `Reviver` class in `langchain_core.load.load` for more details.
120
- During deserialization an additional mapping is handle
121
- classes that have moved or been renamed across package versions.
122
- - `lc_secrets`: A map of constructor argument names to secret ids.
123
- - `lc_attributes`: List of additional attribute names that should be included
124
- as part of the serialized representation.
97
+ - ``is_lc_serializable``: Is this class serializable?
98
+ By design, even if a class inherits from Serializable, it is not serializable by
99
+ default. This is to prevent accidental serialization of objects that should not
100
+ be serialized.
101
+ - ``get_lc_namespace``: Get the namespace of the langchain object.
102
+ During deserialization, this namespace is used to identify
103
+ the correct class to instantiate.
104
+ Please see the ``Reviver`` class in ``langchain_core.load.load`` for more details.
105
+ During deserialization an additional mapping is handle
106
+ classes that have moved or been renamed across package versions.
107
+ - ``lc_secrets``: A map of constructor argument names to secret ids.
108
+ - ``lc_attributes``: List of additional attribute names that should be included
109
+ as part of the serialized representation.
125
110
  """
126
111
 
127
112
  # Remove default BaseModel init docstring.
128
113
  def __init__(self, *args: Any, **kwargs: Any) -> None:
129
- """""" # noqa: D419
114
+ """""" # noqa: D419 # Intentional blank docstring
130
115
  super().__init__(*args, **kwargs)
131
116
 
132
117
  @classmethod
@@ -148,6 +133,9 @@ class Serializable(BaseModel, ABC):
148
133
 
149
134
  For example, if the class is `langchain.llms.openai.OpenAI`, then the
150
135
  namespace is ["langchain", "llms", "openai"]
136
+
137
+ Returns:
138
+ The namespace as a list of strings.
151
139
  """
152
140
  return cls.__module__.split(".")
153
141
 
@@ -171,7 +159,7 @@ class Serializable(BaseModel, ABC):
171
159
 
172
160
  @classmethod
173
161
  def lc_id(cls) -> list[str]:
174
- """A unique identifier for this class for serialization purposes.
162
+ """Return a unique identifier for this class for serialization purposes.
175
163
 
176
164
  The unique identifier is a list of strings that describes the path
177
165
  to the object.
@@ -203,6 +191,9 @@ class Serializable(BaseModel, ABC):
203
191
  def to_json(self) -> Union[SerializedConstructor, SerializedNotImplemented]:
204
192
  """Serialize the object to JSON.
205
193
 
194
+ Raises:
195
+ ValueError: If the class has deprecated attributes.
196
+
206
197
  Returns:
207
198
  A json serializable object or a SerializedNotImplemented object.
208
199
  """
@@ -276,7 +267,11 @@ class Serializable(BaseModel, ABC):
276
267
  }
277
268
 
278
269
  def to_json_not_implemented(self) -> SerializedNotImplemented:
279
- """Serialize a "not implemented" object."""
270
+ """Serialize a "not implemented" object.
271
+
272
+ Returns:
273
+ SerializedNotImplemented.
274
+ """
280
275
  return to_json_not_implemented(self)
281
276
 
282
277
 
langchain_core/memory.py CHANGED
@@ -45,16 +45,20 @@ class BaseMemory(Serializable, ABC):
45
45
  def memory_variables(self) -> list[str]:
46
46
  return list(self.memories.keys())
47
47
 
48
- def load_memory_variables(self, inputs: dict[str, Any]) -> dict[str, str]:
48
+ def load_memory_variables(
49
+ self, inputs: dict[str, Any]
50
+ ) -> dict[str, str]:
49
51
  return self.memories
50
52
 
51
- def save_context(self, inputs: dict[str, Any], outputs: dict[str, str]) -> None:
53
+ def save_context(
54
+ self, inputs: dict[str, Any], outputs: dict[str, str]
55
+ ) -> None:
52
56
  pass
53
57
 
54
58
  def clear(self) -> None:
55
59
  pass
56
60
 
57
- """ # noqa: E501
61
+ """
58
62
 
59
63
  model_config = ConfigDict(
60
64
  arbitrary_types_allowed=True,
@@ -45,7 +45,6 @@ class InputTokenDetails(TypedDict, total=False):
45
45
  Does *not* need to sum to full input token count. Does *not* need to have all keys.
46
46
 
47
47
  Example:
48
-
49
48
  .. code-block:: python
50
49
 
51
50
  {
@@ -72,6 +71,7 @@ class InputTokenDetails(TypedDict, total=False):
72
71
 
73
72
  Since there was a cache hit, the tokens were read from the cache. More precisely,
74
73
  the model state given these tokens was read from the cache.
74
+
75
75
  """
76
76
 
77
77
 
@@ -81,7 +81,6 @@ class OutputTokenDetails(TypedDict, total=False):
81
81
  Does *not* need to sum to full output token count. Does *not* need to have all keys.
82
82
 
83
83
  Example:
84
-
85
84
  .. code-block:: python
86
85
 
87
86
  {
@@ -100,6 +99,7 @@ class OutputTokenDetails(TypedDict, total=False):
100
99
 
101
100
  Tokens generated by the model in a chain of thought process (i.e. by OpenAI's o1
102
101
  models) that are not returned as part of model output.
102
+
103
103
  """
104
104
 
105
105
 
@@ -109,7 +109,6 @@ class UsageMetadata(TypedDict):
109
109
  This is a standard representation of token usage that is consistent across models.
110
110
 
111
111
  Example:
112
-
113
112
  .. code-block:: python
114
113
 
115
114
  {
@@ -124,7 +123,7 @@ class UsageMetadata(TypedDict):
124
123
  "output_token_details": {
125
124
  "audio": 10,
126
125
  "reasoning": 200,
127
- }
126
+ },
128
127
  }
129
128
 
130
129
  .. versionchanged:: 0.3.9
@@ -148,6 +147,7 @@ class UsageMetadata(TypedDict):
148
147
  """Breakdown of output token counts.
149
148
 
150
149
  Does *not* need to sum to full output token count. Does *not* need to have all keys.
150
+
151
151
  """
152
152
 
153
153
 
@@ -159,12 +159,14 @@ class AIMessage(BaseMessage):
159
159
  This message represents the output of the model and consists of both
160
160
  the raw output as returned by the model together standardized fields
161
161
  (e.g., tool calls, usage metadata) added by the LangChain framework.
162
+
162
163
  """
163
164
 
164
165
  example: bool = False
165
166
  """Use to denote that a message is part of an example conversation.
166
167
 
167
168
  At the moment, this is ignored by most models. Usage is discouraged.
169
+
168
170
  """
169
171
 
170
172
  tool_calls: list[ToolCall] = []
@@ -175,15 +177,18 @@ class AIMessage(BaseMessage):
175
177
  """If provided, usage metadata for a message, such as token counts.
176
178
 
177
179
  This is a standard representation of token usage that is consistent across models.
180
+
178
181
  """
179
182
 
180
183
  type: Literal["ai"] = "ai"
181
- """The type of the message (used for deserialization). Defaults to "ai"."""
184
+ """The type of the message (used for deserialization). Defaults to ``'ai'``."""
182
185
 
183
186
  def __init__(
184
- self, content: Union[str, list[Union[str, dict]]], **kwargs: Any
187
+ self,
188
+ content: Union[str, list[Union[str, dict]]],
189
+ **kwargs: Any,
185
190
  ) -> None:
186
- """Pass in content as positional arg.
191
+ """Initialize ``AIMessage``.
187
192
 
188
193
  Args:
189
194
  content: The content of the message.
@@ -254,6 +259,7 @@ class AIMessage(BaseMessage):
254
259
 
255
260
  Returns:
256
261
  A pretty representation of the message.
262
+
257
263
  """
258
264
  base = super().pretty_repr(html=html)
259
265
  lines = []
@@ -293,7 +299,10 @@ class AIMessageChunk(AIMessage, BaseMessageChunk):
293
299
  # non-chunk variant.
294
300
  type: Literal["AIMessageChunk"] = "AIMessageChunk" # type: ignore[assignment]
295
301
  """The type of the message (used for deserialization).
296
- Defaults to "AIMessageChunk"."""
302
+
303
+ Defaults to ``AIMessageChunk``.
304
+
305
+ """
297
306
 
298
307
  tool_call_chunks: list[ToolCallChunk] = []
299
308
  """If provided, tool call chunks associated with the message."""
@@ -310,9 +319,6 @@ class AIMessageChunk(AIMessage, BaseMessageChunk):
310
319
  def init_tool_calls(self) -> Self:
311
320
  """Initialize tool calls from tool call chunks.
312
321
 
313
- Args:
314
- values: The values to validate.
315
-
316
322
  Returns:
317
323
  The values with tool calls initialized.
318
324
 
@@ -358,10 +364,7 @@ class AIMessageChunk(AIMessage, BaseMessageChunk):
358
364
 
359
365
  for chunk in self.tool_call_chunks:
360
366
  try:
361
- if chunk["args"] is not None and chunk["args"] != "":
362
- args_ = parse_partial_json(chunk["args"])
363
- else:
364
- args_ = {}
367
+ args_ = parse_partial_json(chunk["args"]) if chunk["args"] else {}
365
368
  if isinstance(args_, dict):
366
369
  tool_calls.append(
367
370
  create_tool_call(
@@ -392,7 +395,19 @@ class AIMessageChunk(AIMessage, BaseMessageChunk):
392
395
  def add_ai_message_chunks(
393
396
  left: AIMessageChunk, *others: AIMessageChunk
394
397
  ) -> AIMessageChunk:
395
- """Add multiple AIMessageChunks together."""
398
+ """Add multiple ``AIMessageChunk``s together.
399
+
400
+ Args:
401
+ left: The first ``AIMessageChunk``.
402
+ *others: Other ``AIMessageChunk``s to add.
403
+
404
+ Raises:
405
+ ValueError: If the example values of the chunks are not the same.
406
+
407
+ Returns:
408
+ The resulting ``AIMessageChunk``.
409
+
410
+ """
396
411
  if any(left.example != o.example for o in others):
397
412
  msg = "Cannot concatenate AIMessageChunks with different example values."
398
413
  raise ValueError(msg)
@@ -468,13 +483,13 @@ def add_usage(
468
483
  input_tokens=5,
469
484
  output_tokens=0,
470
485
  total_tokens=5,
471
- input_token_details=InputTokenDetails(cache_read=3)
486
+ input_token_details=InputTokenDetails(cache_read=3),
472
487
  )
473
488
  right = UsageMetadata(
474
489
  input_tokens=0,
475
490
  output_tokens=10,
476
491
  total_tokens=10,
477
- output_token_details=OutputTokenDetails(reasoning=4)
492
+ output_token_details=OutputTokenDetails(reasoning=4),
478
493
  )
479
494
 
480
495
  add_usage(left, right)
@@ -488,9 +503,16 @@ def add_usage(
488
503
  output_tokens=10,
489
504
  total_tokens=15,
490
505
  input_token_details=InputTokenDetails(cache_read=3),
491
- output_token_details=OutputTokenDetails(reasoning=4)
506
+ output_token_details=OutputTokenDetails(reasoning=4),
492
507
  )
493
508
 
509
+ Args:
510
+ left: The first ``UsageMetadata`` object.
511
+ right: The second ``UsageMetadata`` object.
512
+
513
+ Returns:
514
+ The sum of the two ``UsageMetadata`` objects.
515
+
494
516
  """
495
517
  if not (left or right):
496
518
  return UsageMetadata(input_tokens=0, output_tokens=0, total_tokens=0)
@@ -512,9 +534,9 @@ def add_usage(
512
534
  def subtract_usage(
513
535
  left: Optional[UsageMetadata], right: Optional[UsageMetadata]
514
536
  ) -> UsageMetadata:
515
- """Recursively subtract two UsageMetadata objects.
537
+ """Recursively subtract two ``UsageMetadata`` objects.
516
538
 
517
- Token counts cannot be negative so the actual operation is max(left - right, 0).
539
+ Token counts cannot be negative so the actual operation is ``max(left - right, 0)``.
518
540
 
519
541
  Example:
520
542
  .. code-block:: python
@@ -525,13 +547,13 @@ def subtract_usage(
525
547
  input_tokens=5,
526
548
  output_tokens=10,
527
549
  total_tokens=15,
528
- input_token_details=InputTokenDetails(cache_read=4)
550
+ input_token_details=InputTokenDetails(cache_read=4),
529
551
  )
530
552
  right = UsageMetadata(
531
553
  input_tokens=3,
532
554
  output_tokens=8,
533
555
  total_tokens=11,
534
- output_token_details=OutputTokenDetails(reasoning=4)
556
+ output_token_details=OutputTokenDetails(reasoning=4),
535
557
  )
536
558
 
537
559
  subtract_usage(left, right)
@@ -545,9 +567,16 @@ def subtract_usage(
545
567
  output_tokens=2,
546
568
  total_tokens=4,
547
569
  input_token_details=InputTokenDetails(cache_read=4),
548
- output_token_details=OutputTokenDetails(reasoning=0)
570
+ output_token_details=OutputTokenDetails(reasoning=0),
549
571
  )
550
572
 
573
+ Args:
574
+ left: The first ``UsageMetadata`` object.
575
+ right: The second ``UsageMetadata`` object.
576
+
577
+ Returns:
578
+ The resulting ``UsageMetadata`` after subtraction.
579
+
551
580
  """
552
581
  if not (left or right):
553
582
  return UsageMetadata(input_tokens=0, output_tokens=0, total_tokens=0)