langchain-core 0.3.74__py3-none-any.whl → 0.3.76__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain-core might be problematic. Click here for more details.

Files changed (122) hide show
  1. langchain_core/_api/beta_decorator.py +18 -41
  2. langchain_core/_api/deprecation.py +20 -7
  3. langchain_core/_api/path.py +19 -2
  4. langchain_core/_import_utils.py +7 -0
  5. langchain_core/agents.py +10 -6
  6. langchain_core/beta/runnables/context.py +2 -3
  7. langchain_core/callbacks/base.py +11 -4
  8. langchain_core/callbacks/file.py +13 -2
  9. langchain_core/callbacks/manager.py +129 -78
  10. langchain_core/callbacks/usage.py +4 -2
  11. langchain_core/chat_history.py +10 -12
  12. langchain_core/document_loaders/base.py +34 -9
  13. langchain_core/document_loaders/langsmith.py +3 -0
  14. langchain_core/documents/base.py +36 -11
  15. langchain_core/documents/compressor.py +9 -6
  16. langchain_core/documents/transformers.py +4 -2
  17. langchain_core/embeddings/fake.py +8 -5
  18. langchain_core/env.py +2 -3
  19. langchain_core/example_selectors/base.py +12 -0
  20. langchain_core/exceptions.py +7 -0
  21. langchain_core/globals.py +17 -28
  22. langchain_core/indexing/api.py +56 -44
  23. langchain_core/indexing/base.py +7 -10
  24. langchain_core/indexing/in_memory.py +23 -3
  25. langchain_core/language_models/__init__.py +3 -2
  26. langchain_core/language_models/base.py +64 -39
  27. langchain_core/language_models/chat_models.py +130 -42
  28. langchain_core/language_models/fake_chat_models.py +10 -11
  29. langchain_core/language_models/llms.py +49 -17
  30. langchain_core/load/dump.py +5 -7
  31. langchain_core/load/load.py +15 -1
  32. langchain_core/load/serializable.py +38 -43
  33. langchain_core/memory.py +7 -3
  34. langchain_core/messages/ai.py +36 -16
  35. langchain_core/messages/base.py +13 -6
  36. langchain_core/messages/content_blocks.py +23 -2
  37. langchain_core/messages/human.py +2 -6
  38. langchain_core/messages/modifier.py +1 -1
  39. langchain_core/messages/system.py +2 -6
  40. langchain_core/messages/tool.py +36 -16
  41. langchain_core/messages/utils.py +198 -87
  42. langchain_core/output_parsers/base.py +5 -2
  43. langchain_core/output_parsers/json.py +4 -4
  44. langchain_core/output_parsers/list.py +7 -22
  45. langchain_core/output_parsers/openai_functions.py +3 -0
  46. langchain_core/output_parsers/openai_tools.py +8 -1
  47. langchain_core/output_parsers/pydantic.py +4 -0
  48. langchain_core/output_parsers/string.py +5 -1
  49. langchain_core/output_parsers/transform.py +2 -2
  50. langchain_core/output_parsers/xml.py +23 -22
  51. langchain_core/outputs/chat_generation.py +18 -7
  52. langchain_core/outputs/generation.py +14 -3
  53. langchain_core/outputs/llm_result.py +8 -1
  54. langchain_core/prompt_values.py +10 -4
  55. langchain_core/prompts/base.py +4 -9
  56. langchain_core/prompts/chat.py +88 -61
  57. langchain_core/prompts/dict.py +16 -8
  58. langchain_core/prompts/few_shot.py +9 -11
  59. langchain_core/prompts/few_shot_with_templates.py +5 -1
  60. langchain_core/prompts/image.py +12 -5
  61. langchain_core/prompts/message.py +5 -6
  62. langchain_core/prompts/pipeline.py +13 -8
  63. langchain_core/prompts/prompt.py +22 -8
  64. langchain_core/prompts/string.py +18 -10
  65. langchain_core/prompts/structured.py +7 -2
  66. langchain_core/rate_limiters.py +2 -2
  67. langchain_core/retrievers.py +7 -6
  68. langchain_core/runnables/base.py +842 -567
  69. langchain_core/runnables/branch.py +15 -20
  70. langchain_core/runnables/config.py +11 -17
  71. langchain_core/runnables/configurable.py +34 -19
  72. langchain_core/runnables/fallbacks.py +24 -17
  73. langchain_core/runnables/graph.py +47 -40
  74. langchain_core/runnables/graph_ascii.py +40 -17
  75. langchain_core/runnables/graph_mermaid.py +27 -15
  76. langchain_core/runnables/graph_png.py +27 -31
  77. langchain_core/runnables/history.py +56 -59
  78. langchain_core/runnables/passthrough.py +47 -24
  79. langchain_core/runnables/retry.py +10 -6
  80. langchain_core/runnables/router.py +10 -9
  81. langchain_core/runnables/schema.py +2 -0
  82. langchain_core/runnables/utils.py +51 -89
  83. langchain_core/stores.py +13 -25
  84. langchain_core/structured_query.py +3 -7
  85. langchain_core/sys_info.py +9 -8
  86. langchain_core/tools/base.py +30 -23
  87. langchain_core/tools/convert.py +24 -13
  88. langchain_core/tools/simple.py +35 -3
  89. langchain_core/tools/structured.py +26 -3
  90. langchain_core/tracers/_streaming.py +6 -7
  91. langchain_core/tracers/base.py +2 -2
  92. langchain_core/tracers/context.py +5 -1
  93. langchain_core/tracers/core.py +109 -39
  94. langchain_core/tracers/evaluation.py +22 -26
  95. langchain_core/tracers/event_stream.py +41 -28
  96. langchain_core/tracers/langchain.py +12 -3
  97. langchain_core/tracers/langchain_v1.py +10 -2
  98. langchain_core/tracers/log_stream.py +57 -18
  99. langchain_core/tracers/root_listeners.py +4 -20
  100. langchain_core/tracers/run_collector.py +6 -16
  101. langchain_core/tracers/schemas.py +5 -1
  102. langchain_core/utils/aiter.py +14 -6
  103. langchain_core/utils/env.py +3 -0
  104. langchain_core/utils/function_calling.py +49 -30
  105. langchain_core/utils/interactive_env.py +6 -2
  106. langchain_core/utils/iter.py +11 -3
  107. langchain_core/utils/json.py +5 -2
  108. langchain_core/utils/json_schema.py +15 -5
  109. langchain_core/utils/loading.py +5 -1
  110. langchain_core/utils/mustache.py +24 -15
  111. langchain_core/utils/pydantic.py +32 -4
  112. langchain_core/utils/utils.py +24 -8
  113. langchain_core/vectorstores/base.py +7 -20
  114. langchain_core/vectorstores/in_memory.py +18 -12
  115. langchain_core/vectorstores/utils.py +18 -12
  116. langchain_core/version.py +1 -1
  117. langchain_core-0.3.76.dist-info/METADATA +77 -0
  118. langchain_core-0.3.76.dist-info/RECORD +174 -0
  119. langchain_core-0.3.74.dist-info/METADATA +0 -108
  120. langchain_core-0.3.74.dist-info/RECORD +0 -174
  121. {langchain_core-0.3.74.dist-info → langchain_core-0.3.76.dist-info}/WHEEL +0 -0
  122. {langchain_core-0.3.74.dist-info → langchain_core-0.3.76.dist-info}/entry_points.txt +0 -0
@@ -6,6 +6,8 @@ from typing import Any
6
6
  from pydantic import BaseModel
7
7
 
8
8
  from langchain_core.load.serializable import Serializable, to_json_not_implemented
9
+ from langchain_core.messages import AIMessage
10
+ from langchain_core.outputs import ChatGeneration
9
11
 
10
12
 
11
13
  def default(obj: Any) -> Any:
@@ -23,9 +25,6 @@ def default(obj: Any) -> Any:
23
25
 
24
26
 
25
27
  def _dump_pydantic_models(obj: Any) -> Any:
26
- from langchain_core.messages import AIMessage
27
- from langchain_core.outputs import ChatGeneration
28
-
29
28
  if (
30
29
  isinstance(obj, ChatGeneration)
31
30
  and isinstance(obj.message, AIMessage)
@@ -73,10 +72,9 @@ def dumps(obj: Any, *, pretty: bool = False, **kwargs: Any) -> str:
73
72
  def dumpd(obj: Any) -> Any:
74
73
  """Return a dict representation of an object.
75
74
 
76
- Note:
77
- Unfortunately this function is not as efficient as it could be
78
- because it first dumps the object to a json string and then loads it
79
- back into a dictionary.
75
+ .. note::
76
+ Unfortunately this function is not as efficient as it could be because it first
77
+ dumps the object to a json string and then loads it back into a dictionary.
80
78
 
81
79
  Args:
82
80
  obj: The object to dump.
@@ -95,7 +95,21 @@ class Reviver:
95
95
  self.ignore_unserializable_fields = ignore_unserializable_fields
96
96
 
97
97
  def __call__(self, value: dict[str, Any]) -> Any:
98
- """Revive the value."""
98
+ """Revive the value.
99
+
100
+ Args:
101
+ value: The value to revive.
102
+
103
+ Returns:
104
+ The revived value.
105
+
106
+ Raises:
107
+ ValueError: If the namespace is invalid.
108
+ ValueError: If trying to deserialize something that cannot
109
+ be deserialized in the current version of langchain-core.
110
+ NotImplementedError: If the object is not implemented and
111
+ ``ignore_unserializable_fields`` is False.
112
+ """
99
113
  if (
100
114
  value.get("lc") == 1
101
115
  and value.get("type") == "secret"
@@ -20,53 +20,41 @@ logger = logging.getLogger(__name__)
20
20
 
21
21
 
22
22
  class BaseSerialized(TypedDict):
23
- """Base class for serialized objects.
24
-
25
- Parameters:
26
- lc: The version of the serialization format.
27
- id: The unique identifier of the object.
28
- name: The name of the object. Optional.
29
- graph: The graph of the object. Optional.
30
- """
23
+ """Base class for serialized objects."""
31
24
 
32
25
  lc: int
26
+ """The version of the serialization format."""
33
27
  id: list[str]
28
+ """The unique identifier of the object."""
34
29
  name: NotRequired[str]
30
+ """The name of the object. Optional."""
35
31
  graph: NotRequired[dict[str, Any]]
32
+ """The graph of the object. Optional."""
36
33
 
37
34
 
38
35
  class SerializedConstructor(BaseSerialized):
39
- """Serialized constructor.
40
-
41
- Parameters:
42
- type: The type of the object. Must be "constructor".
43
- kwargs: The constructor arguments.
44
- """
36
+ """Serialized constructor."""
45
37
 
46
38
  type: Literal["constructor"]
39
+ """The type of the object. Must be ``'constructor'``."""
47
40
  kwargs: dict[str, Any]
41
+ """The constructor arguments."""
48
42
 
49
43
 
50
44
  class SerializedSecret(BaseSerialized):
51
- """Serialized secret.
52
-
53
- Parameters:
54
- type: The type of the object. Must be "secret".
55
- """
45
+ """Serialized secret."""
56
46
 
57
47
  type: Literal["secret"]
48
+ """The type of the object. Must be ``'secret'``."""
58
49
 
59
50
 
60
51
  class SerializedNotImplemented(BaseSerialized):
61
- """Serialized not implemented.
62
-
63
- Parameters:
64
- type: The type of the object. Must be "not_implemented".
65
- repr: The representation of the object. Optional.
66
- """
52
+ """Serialized not implemented."""
67
53
 
68
54
  type: Literal["not_implemented"]
55
+ """The type of the object. Must be ``'not_implemented'``."""
69
56
  repr: Optional[str]
57
+ """The representation of the object. Optional."""
70
58
 
71
59
 
72
60
  def try_neq_default(value: Any, key: str, model: BaseModel) -> bool:
@@ -79,9 +67,6 @@ def try_neq_default(value: Any, key: str, model: BaseModel) -> bool:
79
67
 
80
68
  Returns:
81
69
  Whether the value is different from the default.
82
-
83
- Raises:
84
- Exception: If the key is not in the model.
85
70
  """
86
71
  field = type(model).model_fields[key]
87
72
  return _try_neq_default(value, field)
@@ -109,19 +94,19 @@ class Serializable(BaseModel, ABC):
109
94
 
110
95
  It relies on the following methods and properties:
111
96
 
112
- - `is_lc_serializable`: Is this class serializable?
113
- By design, even if a class inherits from Serializable, it is not serializable by
114
- default. This is to prevent accidental serialization of objects that should not
115
- be serialized.
116
- - `get_lc_namespace`: Get the namespace of the langchain object.
117
- During deserialization, this namespace is used to identify
118
- the correct class to instantiate.
119
- Please see the `Reviver` class in `langchain_core.load.load` for more details.
120
- During deserialization an additional mapping is handle
121
- classes that have moved or been renamed across package versions.
122
- - `lc_secrets`: A map of constructor argument names to secret ids.
123
- - `lc_attributes`: List of additional attribute names that should be included
124
- as part of the serialized representation.
97
+ - ``is_lc_serializable``: Is this class serializable?
98
+ By design, even if a class inherits from Serializable, it is not serializable by
99
+ default. This is to prevent accidental serialization of objects that should not
100
+ be serialized.
101
+ - ``get_lc_namespace``: Get the namespace of the langchain object.
102
+ During deserialization, this namespace is used to identify
103
+ the correct class to instantiate.
104
+ Please see the ``Reviver`` class in ``langchain_core.load.load`` for more details.
105
+ During deserialization an additional mapping is handle
106
+ classes that have moved or been renamed across package versions.
107
+ - ``lc_secrets``: A map of constructor argument names to secret ids.
108
+ - ``lc_attributes``: List of additional attribute names that should be included
109
+ as part of the serialized representation.
125
110
  """
126
111
 
127
112
  # Remove default BaseModel init docstring.
@@ -148,6 +133,9 @@ class Serializable(BaseModel, ABC):
148
133
 
149
134
  For example, if the class is `langchain.llms.openai.OpenAI`, then the
150
135
  namespace is ["langchain", "llms", "openai"]
136
+
137
+ Returns:
138
+ The namespace as a list of strings.
151
139
  """
152
140
  return cls.__module__.split(".")
153
141
 
@@ -171,7 +159,7 @@ class Serializable(BaseModel, ABC):
171
159
 
172
160
  @classmethod
173
161
  def lc_id(cls) -> list[str]:
174
- """A unique identifier for this class for serialization purposes.
162
+ """Return a unique identifier for this class for serialization purposes.
175
163
 
176
164
  The unique identifier is a list of strings that describes the path
177
165
  to the object.
@@ -203,6 +191,9 @@ class Serializable(BaseModel, ABC):
203
191
  def to_json(self) -> Union[SerializedConstructor, SerializedNotImplemented]:
204
192
  """Serialize the object to JSON.
205
193
 
194
+ Raises:
195
+ ValueError: If the class has deprecated attributes.
196
+
206
197
  Returns:
207
198
  A json serializable object or a SerializedNotImplemented object.
208
199
  """
@@ -276,7 +267,11 @@ class Serializable(BaseModel, ABC):
276
267
  }
277
268
 
278
269
  def to_json_not_implemented(self) -> SerializedNotImplemented:
279
- """Serialize a "not implemented" object."""
270
+ """Serialize a "not implemented" object.
271
+
272
+ Returns:
273
+ SerializedNotImplemented.
274
+ """
280
275
  return to_json_not_implemented(self)
281
276
 
282
277
 
langchain_core/memory.py CHANGED
@@ -45,16 +45,20 @@ class BaseMemory(Serializable, ABC):
45
45
  def memory_variables(self) -> list[str]:
46
46
  return list(self.memories.keys())
47
47
 
48
- def load_memory_variables(self, inputs: dict[str, Any]) -> dict[str, str]:
48
+ def load_memory_variables(
49
+ self, inputs: dict[str, Any]
50
+ ) -> dict[str, str]:
49
51
  return self.memories
50
52
 
51
- def save_context(self, inputs: dict[str, Any], outputs: dict[str, str]) -> None:
53
+ def save_context(
54
+ self, inputs: dict[str, Any], outputs: dict[str, str]
55
+ ) -> None:
52
56
  pass
53
57
 
54
58
  def clear(self) -> None:
55
59
  pass
56
60
 
57
- """ # noqa: E501
61
+ """
58
62
 
59
63
  model_config = ConfigDict(
60
64
  arbitrary_types_allowed=True,
@@ -124,7 +124,7 @@ class UsageMetadata(TypedDict):
124
124
  "output_token_details": {
125
125
  "audio": 10,
126
126
  "reasoning": 200,
127
- }
127
+ },
128
128
  }
129
129
 
130
130
  .. versionchanged:: 0.3.9
@@ -310,14 +310,8 @@ class AIMessageChunk(AIMessage, BaseMessageChunk):
310
310
  def init_tool_calls(self) -> Self:
311
311
  """Initialize tool calls from tool call chunks.
312
312
 
313
- Args:
314
- values: The values to validate.
315
-
316
313
  Returns:
317
- The values with tool calls initialized.
318
-
319
- Raises:
320
- ValueError: If the tool call chunks are malformed.
314
+ This ``AIMessageChunk``.
321
315
  """
322
316
  if not self.tool_call_chunks:
323
317
  if self.tool_calls:
@@ -358,7 +352,7 @@ class AIMessageChunk(AIMessage, BaseMessageChunk):
358
352
 
359
353
  for chunk in self.tool_call_chunks:
360
354
  try:
361
- args_ = parse_partial_json(chunk["args"]) if chunk["args"] != "" else {} # type: ignore[arg-type]
355
+ args_ = parse_partial_json(chunk["args"]) if chunk["args"] else {}
362
356
  if isinstance(args_, dict):
363
357
  tool_calls.append(
364
358
  create_tool_call(
@@ -389,7 +383,19 @@ class AIMessageChunk(AIMessage, BaseMessageChunk):
389
383
  def add_ai_message_chunks(
390
384
  left: AIMessageChunk, *others: AIMessageChunk
391
385
  ) -> AIMessageChunk:
392
- """Add multiple AIMessageChunks together."""
386
+ """Add multiple ``AIMessageChunk``s together.
387
+
388
+ Args:
389
+ left: The first ``AIMessageChunk``.
390
+ *others: Other ``AIMessageChunk``s to add.
391
+
392
+ Raises:
393
+ ValueError: If the example values of the chunks are not the same.
394
+
395
+ Returns:
396
+ The resulting ``AIMessageChunk``.
397
+
398
+ """
393
399
  if any(left.example != o.example for o in others):
394
400
  msg = "Cannot concatenate AIMessageChunks with different example values."
395
401
  raise ValueError(msg)
@@ -465,13 +471,13 @@ def add_usage(
465
471
  input_tokens=5,
466
472
  output_tokens=0,
467
473
  total_tokens=5,
468
- input_token_details=InputTokenDetails(cache_read=3)
474
+ input_token_details=InputTokenDetails(cache_read=3),
469
475
  )
470
476
  right = UsageMetadata(
471
477
  input_tokens=0,
472
478
  output_tokens=10,
473
479
  total_tokens=10,
474
- output_token_details=OutputTokenDetails(reasoning=4)
480
+ output_token_details=OutputTokenDetails(reasoning=4),
475
481
  )
476
482
 
477
483
  add_usage(left, right)
@@ -485,9 +491,16 @@ def add_usage(
485
491
  output_tokens=10,
486
492
  total_tokens=15,
487
493
  input_token_details=InputTokenDetails(cache_read=3),
488
- output_token_details=OutputTokenDetails(reasoning=4)
494
+ output_token_details=OutputTokenDetails(reasoning=4),
489
495
  )
490
496
 
497
+ Args:
498
+ left: The first ``UsageMetadata`` object.
499
+ right: The second ``UsageMetadata`` object.
500
+
501
+ Returns:
502
+ The sum of the two ``UsageMetadata`` objects.
503
+
491
504
  """
492
505
  if not (left or right):
493
506
  return UsageMetadata(input_tokens=0, output_tokens=0, total_tokens=0)
@@ -522,13 +535,13 @@ def subtract_usage(
522
535
  input_tokens=5,
523
536
  output_tokens=10,
524
537
  total_tokens=15,
525
- input_token_details=InputTokenDetails(cache_read=4)
538
+ input_token_details=InputTokenDetails(cache_read=4),
526
539
  )
527
540
  right = UsageMetadata(
528
541
  input_tokens=3,
529
542
  output_tokens=8,
530
543
  total_tokens=11,
531
- output_token_details=OutputTokenDetails(reasoning=4)
544
+ output_token_details=OutputTokenDetails(reasoning=4),
532
545
  )
533
546
 
534
547
  subtract_usage(left, right)
@@ -542,9 +555,16 @@ def subtract_usage(
542
555
  output_tokens=2,
543
556
  total_tokens=4,
544
557
  input_token_details=InputTokenDetails(cache_read=4),
545
- output_token_details=OutputTokenDetails(reasoning=0)
558
+ output_token_details=OutputTokenDetails(reasoning=0),
546
559
  )
547
560
 
561
+ Args:
562
+ left: The first ``UsageMetadata`` object.
563
+ right: The second ``UsageMetadata`` object.
564
+
565
+ Returns:
566
+ The resulting ``UsageMetadata`` after subtraction.
567
+
548
568
  """
549
569
  if not (left or right):
550
570
  return UsageMetadata(input_tokens=0, output_tokens=0, total_tokens=0)
@@ -84,7 +84,8 @@ class BaseMessage(Serializable):
84
84
  def get_lc_namespace(cls) -> list[str]:
85
85
  """Get the namespace of the langchain object.
86
86
 
87
- Default is ["langchain", "schema", "messages"].
87
+ Returns:
88
+ ``["langchain", "schema", "messages"]``
88
89
  """
89
90
  return ["langchain", "schema", "messages"]
90
91
 
@@ -109,8 +110,16 @@ class BaseMessage(Serializable):
109
110
  )
110
111
 
111
112
  def __add__(self, other: Any) -> ChatPromptTemplate:
112
- """Concatenate this message with another message."""
113
- from langchain_core.prompts.chat import ChatPromptTemplate
113
+ """Concatenate this message with another message.
114
+
115
+ Args:
116
+ other: Another message to concatenate with this one.
117
+
118
+ Returns:
119
+ A ChatPromptTemplate containing both messages.
120
+ """
121
+ # Import locally to prevent circular imports.
122
+ from langchain_core.prompts.chat import ChatPromptTemplate # noqa: PLC0415
114
123
 
115
124
  prompt = ChatPromptTemplate(messages=[self])
116
125
  return prompt + other
@@ -171,9 +180,7 @@ def merge_content(
171
180
  elif merged and isinstance(merged[-1], str):
172
181
  merged[-1] += content
173
182
  # If second content is an empty string, treat as a no-op
174
- elif content == "":
175
- pass
176
- else:
183
+ elif content:
177
184
  # Otherwise, add the second content as a new element of the list
178
185
  merged.append(content)
179
186
  return merged
@@ -88,7 +88,18 @@ def is_data_content_block(
88
88
 
89
89
 
90
90
  def convert_to_openai_image_block(content_block: dict[str, Any]) -> dict:
91
- """Convert image content block to format expected by OpenAI Chat Completions API."""
91
+ """Convert image content block to format expected by OpenAI Chat Completions API.
92
+
93
+ Args:
94
+ content_block: The content block to convert.
95
+
96
+ Raises:
97
+ ValueError: If the source type is not supported or if ``mime_type`` is missing
98
+ for base64 data.
99
+
100
+ Returns:
101
+ A dictionary formatted for OpenAI's API.
102
+ """
92
103
  if content_block["source_type"] == "url":
93
104
  return {
94
105
  "type": "image_url",
@@ -112,7 +123,17 @@ def convert_to_openai_image_block(content_block: dict[str, Any]) -> dict:
112
123
 
113
124
 
114
125
  def convert_to_openai_data_block(block: dict) -> dict:
115
- """Format standard data content block to format expected by OpenAI."""
126
+ """Format standard data content block to format expected by OpenAI.
127
+
128
+ Args:
129
+ block: A data content block.
130
+
131
+ Raises:
132
+ ValueError: If the block type or source type is not supported.
133
+
134
+ Returns:
135
+ A dictionary formatted for OpenAI's API.
136
+ """
116
137
  if block["type"] == "image":
117
138
  formatted_block = convert_to_openai_image_block(block)
118
139
 
@@ -17,12 +17,8 @@ class HumanMessage(BaseMessage):
17
17
  from langchain_core.messages import HumanMessage, SystemMessage
18
18
 
19
19
  messages = [
20
- SystemMessage(
21
- content="You are a helpful assistant! Your name is Bob."
22
- ),
23
- HumanMessage(
24
- content="What is your name?"
25
- )
20
+ SystemMessage(content="You are a helpful assistant! Your name is Bob."),
21
+ HumanMessage(content="What is your name?"),
26
22
  ]
27
23
 
28
24
  # Instantiate a chat model and invoke it with the messages
@@ -13,7 +13,7 @@ class RemoveMessage(BaseMessage):
13
13
 
14
14
  def __init__(
15
15
  self,
16
- id: str, # noqa: A002
16
+ id: str,
17
17
  **kwargs: Any,
18
18
  ) -> None:
19
19
  """Create a RemoveMessage.
@@ -18,12 +18,8 @@ class SystemMessage(BaseMessage):
18
18
  from langchain_core.messages import HumanMessage, SystemMessage
19
19
 
20
20
  messages = [
21
- SystemMessage(
22
- content="You are a helpful assistant! Your name is Bob."
23
- ),
24
- HumanMessage(
25
- content="What is your name?"
26
- )
21
+ SystemMessage(content="You are a helpful assistant! Your name is Bob."),
22
+ HumanMessage(content="What is your name?"),
27
23
  ]
28
24
 
29
25
  # Define a chat model and invoke it with the messages
@@ -32,7 +32,7 @@ class ToolMessage(BaseMessage, ToolOutputMixin):
32
32
 
33
33
  from langchain_core.messages import ToolMessage
34
34
 
35
- ToolMessage(content='42', tool_call_id='call_Jja7J89XsjrOLA5r!MEOW!SL')
35
+ ToolMessage(content="42", tool_call_id="call_Jja7J89XsjrOLA5r!MEOW!SL")
36
36
 
37
37
 
38
38
  Example: A ToolMessage where only part of the tool output is sent to the model
@@ -45,7 +45,8 @@ class ToolMessage(BaseMessage, ToolOutputMixin):
45
45
  from langchain_core.messages import ToolMessage
46
46
 
47
47
  tool_output = {
48
- "stdout": "From the graph we can see that the correlation between x and y is ...",
48
+ "stdout": "From the graph we can see that the correlation between "
49
+ "x and y is ...",
49
50
  "stderr": None,
50
51
  "artifacts": {"type": "image", "base64_data": "/9j/4gIcSU..."},
51
52
  }
@@ -53,14 +54,14 @@ class ToolMessage(BaseMessage, ToolOutputMixin):
53
54
  ToolMessage(
54
55
  content=tool_output["stdout"],
55
56
  artifact=tool_output,
56
- tool_call_id='call_Jja7J89XsjrOLA5r!MEOW!SL',
57
+ tool_call_id="call_Jja7J89XsjrOLA5r!MEOW!SL",
57
58
  )
58
59
 
59
60
  The tool_call_id field is used to associate the tool call request with the
60
61
  tool call response. This is useful in situations where a chat model is able
61
62
  to request multiple tool calls in parallel.
62
63
 
63
- """ # noqa: E501
64
+ """
64
65
 
65
66
  tool_call_id: str
66
67
  """Tool call that this message is responding to."""
@@ -184,11 +185,7 @@ class ToolCall(TypedDict):
184
185
 
185
186
  .. code-block:: python
186
187
 
187
- {
188
- "name": "foo",
189
- "args": {"a": 1},
190
- "id": "123"
191
- }
188
+ {"name": "foo", "args": {"a": 1}, "id": "123"}
192
189
 
193
190
  This represents a request to call the tool named "foo" with arguments {"a": 1}
194
191
  and an identifier of "123".
@@ -212,7 +209,7 @@ def tool_call(
212
209
  *,
213
210
  name: str,
214
211
  args: dict[str, Any],
215
- id: Optional[str], # noqa: A002
212
+ id: Optional[str],
216
213
  ) -> ToolCall:
217
214
  """Create a tool call.
218
215
 
@@ -220,6 +217,9 @@ def tool_call(
220
217
  name: The name of the tool to be called.
221
218
  args: The arguments to the tool call.
222
219
  id: An identifier associated with the tool call.
220
+
221
+ Returns:
222
+ The created tool call.
223
223
  """
224
224
  return ToolCall(name=name, args=args, id=id, type="tool_call")
225
225
 
@@ -236,12 +236,12 @@ class ToolCallChunk(TypedDict):
236
236
  .. code-block:: python
237
237
 
238
238
  left_chunks = [ToolCallChunk(name="foo", args='{"a":', index=0)]
239
- right_chunks = [ToolCallChunk(name=None, args='1}', index=0)]
239
+ right_chunks = [ToolCallChunk(name=None, args="1}", index=0)]
240
240
 
241
241
  (
242
242
  AIMessageChunk(content="", tool_call_chunks=left_chunks)
243
243
  + AIMessageChunk(content="", tool_call_chunks=right_chunks)
244
- ).tool_call_chunks == [ToolCallChunk(name='foo', args='{"a":1}', index=0)]
244
+ ).tool_call_chunks == [ToolCallChunk(name="foo", args='{"a":1}', index=0)]
245
245
 
246
246
  """
247
247
 
@@ -260,7 +260,7 @@ def tool_call_chunk(
260
260
  *,
261
261
  name: Optional[str] = None,
262
262
  args: Optional[str] = None,
263
- id: Optional[str] = None, # noqa: A002
263
+ id: Optional[str] = None,
264
264
  index: Optional[int] = None,
265
265
  ) -> ToolCallChunk:
266
266
  """Create a tool call chunk.
@@ -270,6 +270,9 @@ def tool_call_chunk(
270
270
  args: The arguments to the tool call.
271
271
  id: An identifier associated with the tool call.
272
272
  index: The index of the tool call in a sequence.
273
+
274
+ Returns:
275
+ The created tool call chunk.
273
276
  """
274
277
  return ToolCallChunk(
275
278
  name=name, args=args, id=id, index=index, type="tool_call_chunk"
@@ -298,7 +301,7 @@ def invalid_tool_call(
298
301
  *,
299
302
  name: Optional[str] = None,
300
303
  args: Optional[str] = None,
301
- id: Optional[str] = None, # noqa: A002
304
+ id: Optional[str] = None,
302
305
  error: Optional[str] = None,
303
306
  ) -> InvalidToolCall:
304
307
  """Create an invalid tool call.
@@ -308,6 +311,9 @@ def invalid_tool_call(
308
311
  args: The arguments to the tool call.
309
312
  id: An identifier associated with the tool call.
310
313
  error: An error message associated with the tool call.
314
+
315
+ Returns:
316
+ The created invalid tool call.
311
317
  """
312
318
  return InvalidToolCall(
313
319
  name=name, args=args, id=id, error=error, type="invalid_tool_call"
@@ -317,7 +323,14 @@ def invalid_tool_call(
317
323
  def default_tool_parser(
318
324
  raw_tool_calls: list[dict],
319
325
  ) -> tuple[list[ToolCall], list[InvalidToolCall]]:
320
- """Best-effort parsing of tools."""
326
+ """Best-effort parsing of tools.
327
+
328
+ Args:
329
+ raw_tool_calls: List of raw tool call dicts to parse.
330
+
331
+ Returns:
332
+ A list of tool calls and invalid tool calls.
333
+ """
321
334
  tool_calls = []
322
335
  invalid_tool_calls = []
323
336
  for raw_tool_call in raw_tool_calls:
@@ -345,7 +358,14 @@ def default_tool_parser(
345
358
 
346
359
 
347
360
  def default_tool_chunk_parser(raw_tool_calls: list[dict]) -> list[ToolCallChunk]:
348
- """Best-effort parsing of tool chunks."""
361
+ """Best-effort parsing of tool chunks.
362
+
363
+ Args:
364
+ raw_tool_calls: List of raw tool call dicts to parse.
365
+
366
+ Returns:
367
+ List of parsed ToolCallChunk objects.
368
+ """
349
369
  tool_call_chunks = []
350
370
  for tool_call in raw_tool_calls:
351
371
  if "function" not in tool_call: