langchain-core 0.3.74__py3-none-any.whl → 0.3.76__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain-core might be problematic. Click here for more details.

Files changed (122) hide show
  1. langchain_core/_api/beta_decorator.py +18 -41
  2. langchain_core/_api/deprecation.py +20 -7
  3. langchain_core/_api/path.py +19 -2
  4. langchain_core/_import_utils.py +7 -0
  5. langchain_core/agents.py +10 -6
  6. langchain_core/beta/runnables/context.py +2 -3
  7. langchain_core/callbacks/base.py +11 -4
  8. langchain_core/callbacks/file.py +13 -2
  9. langchain_core/callbacks/manager.py +129 -78
  10. langchain_core/callbacks/usage.py +4 -2
  11. langchain_core/chat_history.py +10 -12
  12. langchain_core/document_loaders/base.py +34 -9
  13. langchain_core/document_loaders/langsmith.py +3 -0
  14. langchain_core/documents/base.py +36 -11
  15. langchain_core/documents/compressor.py +9 -6
  16. langchain_core/documents/transformers.py +4 -2
  17. langchain_core/embeddings/fake.py +8 -5
  18. langchain_core/env.py +2 -3
  19. langchain_core/example_selectors/base.py +12 -0
  20. langchain_core/exceptions.py +7 -0
  21. langchain_core/globals.py +17 -28
  22. langchain_core/indexing/api.py +56 -44
  23. langchain_core/indexing/base.py +7 -10
  24. langchain_core/indexing/in_memory.py +23 -3
  25. langchain_core/language_models/__init__.py +3 -2
  26. langchain_core/language_models/base.py +64 -39
  27. langchain_core/language_models/chat_models.py +130 -42
  28. langchain_core/language_models/fake_chat_models.py +10 -11
  29. langchain_core/language_models/llms.py +49 -17
  30. langchain_core/load/dump.py +5 -7
  31. langchain_core/load/load.py +15 -1
  32. langchain_core/load/serializable.py +38 -43
  33. langchain_core/memory.py +7 -3
  34. langchain_core/messages/ai.py +36 -16
  35. langchain_core/messages/base.py +13 -6
  36. langchain_core/messages/content_blocks.py +23 -2
  37. langchain_core/messages/human.py +2 -6
  38. langchain_core/messages/modifier.py +1 -1
  39. langchain_core/messages/system.py +2 -6
  40. langchain_core/messages/tool.py +36 -16
  41. langchain_core/messages/utils.py +198 -87
  42. langchain_core/output_parsers/base.py +5 -2
  43. langchain_core/output_parsers/json.py +4 -4
  44. langchain_core/output_parsers/list.py +7 -22
  45. langchain_core/output_parsers/openai_functions.py +3 -0
  46. langchain_core/output_parsers/openai_tools.py +8 -1
  47. langchain_core/output_parsers/pydantic.py +4 -0
  48. langchain_core/output_parsers/string.py +5 -1
  49. langchain_core/output_parsers/transform.py +2 -2
  50. langchain_core/output_parsers/xml.py +23 -22
  51. langchain_core/outputs/chat_generation.py +18 -7
  52. langchain_core/outputs/generation.py +14 -3
  53. langchain_core/outputs/llm_result.py +8 -1
  54. langchain_core/prompt_values.py +10 -4
  55. langchain_core/prompts/base.py +4 -9
  56. langchain_core/prompts/chat.py +88 -61
  57. langchain_core/prompts/dict.py +16 -8
  58. langchain_core/prompts/few_shot.py +9 -11
  59. langchain_core/prompts/few_shot_with_templates.py +5 -1
  60. langchain_core/prompts/image.py +12 -5
  61. langchain_core/prompts/message.py +5 -6
  62. langchain_core/prompts/pipeline.py +13 -8
  63. langchain_core/prompts/prompt.py +22 -8
  64. langchain_core/prompts/string.py +18 -10
  65. langchain_core/prompts/structured.py +7 -2
  66. langchain_core/rate_limiters.py +2 -2
  67. langchain_core/retrievers.py +7 -6
  68. langchain_core/runnables/base.py +842 -567
  69. langchain_core/runnables/branch.py +15 -20
  70. langchain_core/runnables/config.py +11 -17
  71. langchain_core/runnables/configurable.py +34 -19
  72. langchain_core/runnables/fallbacks.py +24 -17
  73. langchain_core/runnables/graph.py +47 -40
  74. langchain_core/runnables/graph_ascii.py +40 -17
  75. langchain_core/runnables/graph_mermaid.py +27 -15
  76. langchain_core/runnables/graph_png.py +27 -31
  77. langchain_core/runnables/history.py +56 -59
  78. langchain_core/runnables/passthrough.py +47 -24
  79. langchain_core/runnables/retry.py +10 -6
  80. langchain_core/runnables/router.py +10 -9
  81. langchain_core/runnables/schema.py +2 -0
  82. langchain_core/runnables/utils.py +51 -89
  83. langchain_core/stores.py +13 -25
  84. langchain_core/structured_query.py +3 -7
  85. langchain_core/sys_info.py +9 -8
  86. langchain_core/tools/base.py +30 -23
  87. langchain_core/tools/convert.py +24 -13
  88. langchain_core/tools/simple.py +35 -3
  89. langchain_core/tools/structured.py +26 -3
  90. langchain_core/tracers/_streaming.py +6 -7
  91. langchain_core/tracers/base.py +2 -2
  92. langchain_core/tracers/context.py +5 -1
  93. langchain_core/tracers/core.py +109 -39
  94. langchain_core/tracers/evaluation.py +22 -26
  95. langchain_core/tracers/event_stream.py +41 -28
  96. langchain_core/tracers/langchain.py +12 -3
  97. langchain_core/tracers/langchain_v1.py +10 -2
  98. langchain_core/tracers/log_stream.py +57 -18
  99. langchain_core/tracers/root_listeners.py +4 -20
  100. langchain_core/tracers/run_collector.py +6 -16
  101. langchain_core/tracers/schemas.py +5 -1
  102. langchain_core/utils/aiter.py +14 -6
  103. langchain_core/utils/env.py +3 -0
  104. langchain_core/utils/function_calling.py +49 -30
  105. langchain_core/utils/interactive_env.py +6 -2
  106. langchain_core/utils/iter.py +11 -3
  107. langchain_core/utils/json.py +5 -2
  108. langchain_core/utils/json_schema.py +15 -5
  109. langchain_core/utils/loading.py +5 -1
  110. langchain_core/utils/mustache.py +24 -15
  111. langchain_core/utils/pydantic.py +32 -4
  112. langchain_core/utils/utils.py +24 -8
  113. langchain_core/vectorstores/base.py +7 -20
  114. langchain_core/vectorstores/in_memory.py +18 -12
  115. langchain_core/vectorstores/utils.py +18 -12
  116. langchain_core/version.py +1 -1
  117. langchain_core-0.3.76.dist-info/METADATA +77 -0
  118. langchain_core-0.3.76.dist-info/RECORD +174 -0
  119. langchain_core-0.3.74.dist-info/METADATA +0 -108
  120. langchain_core-0.3.74.dist-info/RECORD +0 -174
  121. {langchain_core-0.3.74.dist-info → langchain_core-0.3.76.dist-info}/WHEEL +0 -0
  122. {langchain_core-0.3.74.dist-info → langchain_core-0.3.76.dist-info}/entry_points.txt +0 -0
@@ -7,6 +7,7 @@ import contextlib
7
7
  import copy
8
8
  import threading
9
9
  from collections import defaultdict
10
+ from pprint import pformat
10
11
  from typing import (
11
12
  TYPE_CHECKING,
12
13
  Any,
@@ -20,10 +21,11 @@ from typing import (
20
21
  import jsonpatch # type: ignore[import-untyped]
21
22
  from typing_extensions import NotRequired, TypedDict, override
22
23
 
24
+ from langchain_core.callbacks.base import BaseCallbackManager
23
25
  from langchain_core.load import dumps
24
26
  from langchain_core.load.load import load
25
27
  from langchain_core.outputs import ChatGenerationChunk, GenerationChunk
26
- from langchain_core.runnables import Runnable, RunnableConfig, ensure_config
28
+ from langchain_core.runnables import RunnableConfig, ensure_config
27
29
  from langchain_core.tracers._streaming import _StreamingCallbackHandler
28
30
  from langchain_core.tracers.base import BaseTracer
29
31
  from langchain_core.tracers.memory_stream import _MemoryStream
@@ -32,6 +34,7 @@ if TYPE_CHECKING:
32
34
  from collections.abc import AsyncIterator, Iterator, Sequence
33
35
  from uuid import UUID
34
36
 
37
+ from langchain_core.runnables import Runnable
35
38
  from langchain_core.runnables.utils import Input, Output
36
39
  from langchain_core.tracers.schemas import Run
37
40
 
@@ -110,7 +113,17 @@ class RunLogPatch:
110
113
  self.ops = list(ops)
111
114
 
112
115
  def __add__(self, other: Union[RunLogPatch, Any]) -> RunLog:
113
- """Combine two RunLogPatch instances."""
116
+ """Combine two ``RunLogPatch`` instances.
117
+
118
+ Args:
119
+ other: The other ``RunLogPatch`` to combine with.
120
+
121
+ Raises:
122
+ TypeError: If the other object is not a ``RunLogPatch``.
123
+
124
+ Returns:
125
+ A new ``RunLog`` representing the combination of the two.
126
+ """
114
127
  if type(other) is RunLogPatch:
115
128
  ops = self.ops + other.ops
116
129
  state = jsonpatch.apply_patch(None, copy.deepcopy(ops))
@@ -121,8 +134,6 @@ class RunLogPatch:
121
134
 
122
135
  @override
123
136
  def __repr__(self) -> str:
124
- from pprint import pformat
125
-
126
137
  # 1:-1 to get rid of the [] around the list
127
138
  return f"RunLogPatch({pformat(self.ops)[1:-1]})"
128
139
 
@@ -150,7 +161,17 @@ class RunLog(RunLogPatch):
150
161
  self.state = state
151
162
 
152
163
  def __add__(self, other: Union[RunLogPatch, Any]) -> RunLog:
153
- """Combine two RunLogs."""
164
+ """Combine two ``RunLog``s.
165
+
166
+ Args:
167
+ other: The other ``RunLog`` or ``RunLogPatch`` to combine with.
168
+
169
+ Raises:
170
+ TypeError: If the other object is not a ``RunLog`` or ``RunLogPatch``.
171
+
172
+ Returns:
173
+ A new ``RunLog`` representing the combination of the two.
174
+ """
154
175
  if type(other) is RunLogPatch:
155
176
  ops = self.ops + other.ops
156
177
  state = jsonpatch.apply_patch(self.state, other.ops)
@@ -161,13 +182,18 @@ class RunLog(RunLogPatch):
161
182
 
162
183
  @override
163
184
  def __repr__(self) -> str:
164
- from pprint import pformat
165
-
166
185
  return f"RunLog({pformat(self.state)})"
167
186
 
168
187
  @override
169
188
  def __eq__(self, other: object) -> bool:
170
- """Check if two RunLogs are equal."""
189
+ """Check if two ``RunLog``s are equal.
190
+
191
+ Args:
192
+ other: The other ``RunLog`` to compare to.
193
+
194
+ Returns:
195
+ True if the ``RunLog``s are equal, False otherwise.
196
+ """
171
197
  # First compare that the state is the same
172
198
  if not isinstance(other, RunLog):
173
199
  return False
@@ -176,7 +202,7 @@ class RunLog(RunLogPatch):
176
202
  # Then compare that the ops are the same
177
203
  return super().__eq__(other)
178
204
 
179
- __hash__ = None # type: ignore[assignment]
205
+ __hash__ = None
180
206
 
181
207
 
182
208
  T = TypeVar("T")
@@ -250,7 +276,11 @@ class LogStreamCallbackHandler(BaseTracer, _StreamingCallbackHandler):
250
276
  self.root_id: Optional[UUID] = None
251
277
 
252
278
  def __aiter__(self) -> AsyncIterator[RunLogPatch]:
253
- """Iterate over the stream of run logs."""
279
+ """Iterate over the stream of run logs.
280
+
281
+ Returns:
282
+ An async iterator over the run log patches.
283
+ """
254
284
  return self.receive_stream.__aiter__()
255
285
 
256
286
  def send(self, *ops: dict[str, Any]) -> bool:
@@ -623,15 +653,24 @@ async def _astream_log_implementation(
623
653
 
624
654
  The implementation has been factored out (at least temporarily) as both
625
655
  astream_log and astream_events relies on it.
626
- """
627
- import jsonpatch
628
-
629
- from langchain_core.callbacks.base import BaseCallbackManager
630
- from langchain_core.tracers.log_stream import (
631
- RunLog,
632
- RunLogPatch,
633
- )
634
656
 
657
+ Args:
658
+ runnable: The runnable to run in streaming mode.
659
+ value: The input to the runnable.
660
+ config: The config to pass to the runnable.
661
+ stream: The stream to send the run logs to.
662
+ diff: Whether to yield run log patches (True) or full run logs (False).
663
+ with_streamed_output_list: Whether to include a list of all streamed
664
+ outputs in each patch. If False, only the final output will be included
665
+ in the patches.
666
+ **kwargs: Additional keyword arguments to pass to the runnable.
667
+
668
+ Raises:
669
+ ValueError: If the callbacks in the config are of an unexpected type.
670
+
671
+ Yields:
672
+ The run log patches or states, depending on the value of ``diff``.
673
+ """
635
674
  # Assign the stream handler to the config
636
675
  config = ensure_config(config)
637
676
  callbacks = config.get("callbacks")
@@ -21,18 +21,10 @@ AsyncListener = Union[
21
21
 
22
22
 
23
23
  class RootListenersTracer(BaseTracer):
24
- """Tracer that calls listeners on run start, end, and error.
25
-
26
- Parameters:
27
- log_missing_parent: Whether to log a warning if the parent is missing.
28
- Default is False.
29
- config: The runnable config.
30
- on_start: The listener to call on run start.
31
- on_end: The listener to call on run end.
32
- on_error: The listener to call on run error.
33
- """
24
+ """Tracer that calls listeners on run start, end, and error."""
34
25
 
35
26
  log_missing_parent = False
27
+ """Whether to log a warning if the parent is missing. Default is False."""
36
28
 
37
29
  def __init__(
38
30
  self,
@@ -84,18 +76,10 @@ class RootListenersTracer(BaseTracer):
84
76
 
85
77
 
86
78
  class AsyncRootListenersTracer(AsyncBaseTracer):
87
- """Async Tracer that calls listeners on run start, end, and error.
88
-
89
- Parameters:
90
- log_missing_parent: Whether to log a warning if the parent is missing.
91
- Default is False.
92
- config: The runnable config.
93
- on_start: The listener to call on run start.
94
- on_end: The listener to call on run end.
95
- on_error: The listener to call on run error.
96
- """
79
+ """Async Tracer that calls listeners on run start, end, and error."""
97
80
 
98
81
  log_missing_parent = False
82
+ """Whether to log a warning if the parent is missing. Default is False."""
99
83
 
100
84
  def __init__(
101
85
  self,
@@ -11,12 +11,6 @@ class RunCollectorCallbackHandler(BaseTracer):
11
11
  """Tracer that collects all nested runs in a list.
12
12
 
13
13
  This tracer is useful for inspection and evaluation purposes.
14
-
15
- Parameters
16
- ----------
17
- name : str, default="run-collector_callback_handler"
18
- example_id : Optional[Union[UUID, str]], default=None
19
- The ID of the example being traced. It can be either a UUID or a string.
20
14
  """
21
15
 
22
16
  name: str = "run-collector_callback_handler"
@@ -26,12 +20,10 @@ class RunCollectorCallbackHandler(BaseTracer):
26
20
  ) -> None:
27
21
  """Initialize the RunCollectorCallbackHandler.
28
22
 
29
- Parameters
30
- ----------
31
- example_id : Optional[Union[UUID, str]], default=None
32
- The ID of the example being traced. It can be either a UUID or a string.
33
- **kwargs : Any
34
- Additional keyword arguments
23
+ Args:
24
+ example_id: The ID of the example being traced. (default: None).
25
+ It can be either a UUID or a string.
26
+ **kwargs: Additional keyword arguments.
35
27
  """
36
28
  super().__init__(**kwargs)
37
29
  self.example_id = (
@@ -42,10 +34,8 @@ class RunCollectorCallbackHandler(BaseTracer):
42
34
  def _persist_run(self, run: Run) -> None:
43
35
  """Persist a run by adding it to the traced_runs list.
44
36
 
45
- Parameters
46
- ----------
47
- run : Run
48
- The run to be persisted.
37
+ Args:
38
+ run: The run to be persisted.
49
39
  """
50
40
  run_ = run.copy()
51
41
  run_.reference_example_id = self.example_id
@@ -18,7 +18,11 @@ from langchain_core._api import deprecated
18
18
 
19
19
  @deprecated("0.1.0", alternative="Use string instead.", removal="1.0")
20
20
  def RunTypeEnum() -> type[RunTypeEnumDep]: # noqa: N802
21
- """RunTypeEnum."""
21
+ """``RunTypeEnum``.
22
+
23
+ Returns:
24
+ The ``RunTypeEnum`` class.
25
+ """
22
26
  warnings.warn(
23
27
  "RunTypeEnum is deprecated. Please directly use a string instead"
24
28
  " (e.g. 'llm', 'chain', 'tool').",
@@ -37,7 +37,7 @@ _no_default = object()
37
37
  # before 3.10, the builtin anext() was not available
38
38
  def py_anext(
39
39
  iterator: AsyncIterator[T], default: Union[T, Any] = _no_default
40
- ) -> Awaitable[Union[T, None, Any]]:
40
+ ) -> Awaitable[Union[T, Any, None]]:
41
41
  """Pure-Python implementation of anext() for testing purposes.
42
42
 
43
43
  Closely matches the builtin anext() C implementation.
@@ -94,7 +94,7 @@ class NoLock:
94
94
  exc_val: Optional[BaseException],
95
95
  exc_tb: Optional[TracebackType],
96
96
  ) -> bool:
97
- """Exception not handled."""
97
+ """Return False, exception not suppressed."""
98
98
  return False
99
99
 
100
100
 
@@ -236,7 +236,11 @@ class Tee(Generic[T]):
236
236
  return self._children[item]
237
237
 
238
238
  def __iter__(self) -> Iterator[AsyncIterator[T]]:
239
- """Iterate over the child iterators."""
239
+ """Iterate over the child iterators.
240
+
241
+ Yields:
242
+ The child iterators.
243
+ """
240
244
  yield from self._children
241
245
 
242
246
  async def __aenter__(self) -> "Tee[T]":
@@ -249,7 +253,11 @@ class Tee(Generic[T]):
249
253
  exc_val: Optional[BaseException],
250
254
  exc_tb: Optional[TracebackType],
251
255
  ) -> bool:
252
- """Close all child iterators."""
256
+ """Close all child iterators.
257
+
258
+ Returns:
259
+ False, exceptions not suppressed.
260
+ """
253
261
  await self.aclose()
254
262
  return False
255
263
 
@@ -318,8 +326,8 @@ async def abatch_iterate(
318
326
  size: The size of the batch.
319
327
  iterable: The async iterable to batch.
320
328
 
321
- Returns:
322
- An async iterator over the batches.
329
+ Yields:
330
+ The batches.
323
331
  """
324
332
  batch: list[T] = []
325
333
  async for element in iterable:
@@ -39,6 +39,9 @@ def get_from_dict_or_env(
39
39
  in the dictionary.
40
40
  default: The default value to return if the key is not in the dictionary
41
41
  or the environment. Defaults to None.
42
+
43
+ Returns:
44
+ The dict value or the environment variable value.
42
45
  """
43
46
  if isinstance(key, (list, tuple)):
44
47
  for k in key:
@@ -21,8 +21,10 @@ from typing import (
21
21
 
22
22
  from pydantic import BaseModel
23
23
  from pydantic.v1 import BaseModel as BaseModelV1
24
+ from pydantic.v1 import Field, create_model
24
25
  from typing_extensions import TypedDict, get_args, get_origin, is_typeddict
25
26
 
27
+ import langchain_core
26
28
  from langchain_core._api import beta, deprecated
27
29
  from langchain_core.messages import AIMessage, BaseMessage, HumanMessage, ToolMessage
28
30
  from langchain_core.utils.json_schema import dereference_refs
@@ -146,6 +148,9 @@ def _convert_pydantic_to_openai_function(
146
148
  of the schema will be used.
147
149
  rm_titles: Whether to remove titles from the schema. Defaults to True.
148
150
 
151
+ Raises:
152
+ TypeError: If the model is not a Pydantic model.
153
+
149
154
  Returns:
150
155
  The function description.
151
156
  """
@@ -217,10 +222,8 @@ def _convert_python_function_to_openai_function(
217
222
  Returns:
218
223
  The OpenAI function description.
219
224
  """
220
- from langchain_core.tools.base import create_schema_from_function
221
-
222
225
  func_name = _get_python_function_name(function)
223
- model = create_schema_from_function(
226
+ model = langchain_core.tools.base.create_schema_from_function(
224
227
  func_name,
225
228
  function,
226
229
  filter_args=(),
@@ -261,9 +264,6 @@ def _convert_any_typed_dicts_to_pydantic(
261
264
  visited: dict,
262
265
  depth: int = 0,
263
266
  ) -> type:
264
- from pydantic.v1 import Field as Field_v1
265
- from pydantic.v1 import create_model as create_model_v1
266
-
267
267
  if type_ in visited:
268
268
  return visited[type_]
269
269
  if depth >= _MAX_TYPED_DICT_RECURSION:
@@ -277,7 +277,7 @@ def _convert_any_typed_dicts_to_pydantic(
277
277
  )
278
278
  fields: dict = {}
279
279
  for arg, arg_type in annotations_.items():
280
- if get_origin(arg_type) is Annotated:
280
+ if get_origin(arg_type) is Annotated: # type: ignore[comparison-overlap]
281
281
  annotated_args = get_args(arg_type)
282
282
  new_arg_type = _convert_any_typed_dicts_to_pydantic(
283
283
  annotated_args[0], depth=depth + 1, visited=visited
@@ -294,7 +294,7 @@ def _convert_any_typed_dicts_to_pydantic(
294
294
  raise ValueError(msg)
295
295
  if arg_desc := arg_descriptions.get(arg):
296
296
  field_kwargs["description"] = arg_desc
297
- fields[arg] = (new_arg_type, Field_v1(**field_kwargs))
297
+ fields[arg] = (new_arg_type, Field(**field_kwargs))
298
298
  else:
299
299
  new_arg_type = _convert_any_typed_dicts_to_pydantic(
300
300
  arg_type, depth=depth + 1, visited=visited
@@ -302,8 +302,8 @@ def _convert_any_typed_dicts_to_pydantic(
302
302
  field_kwargs = {"default": ...}
303
303
  if arg_desc := arg_descriptions.get(arg):
304
304
  field_kwargs["description"] = arg_desc
305
- fields[arg] = (new_arg_type, Field_v1(**field_kwargs))
306
- model = create_model_v1(typed_dict.__name__, **fields)
305
+ fields[arg] = (new_arg_type, Field(**field_kwargs))
306
+ model = create_model(typed_dict.__name__, **fields)
307
307
  model.__doc__ = description
308
308
  visited[typed_dict] = model
309
309
  return model
@@ -323,12 +323,15 @@ def _format_tool_to_openai_function(tool: BaseTool) -> FunctionDescription:
323
323
  Args:
324
324
  tool: The tool to format.
325
325
 
326
+ Raises:
327
+ ValueError: If the tool call schema is not supported.
328
+
326
329
  Returns:
327
330
  The function description.
328
331
  """
329
- from langchain_core.tools import simple
330
-
331
- is_simple_oai_tool = isinstance(tool, simple.Tool) and not tool.args_schema
332
+ is_simple_oai_tool = (
333
+ isinstance(tool, langchain_core.tools.simple.Tool) and not tool.args_schema
334
+ )
332
335
  if tool.tool_call_schema and not is_simple_oai_tool:
333
336
  if isinstance(tool.tool_call_schema, dict):
334
337
  return _convert_json_schema_to_openai_function(
@@ -429,8 +432,6 @@ def convert_to_openai_function(
429
432
  'description' and 'parameters' keys are now optional. Only 'name' is
430
433
  required and guaranteed to be part of the output.
431
434
  """
432
- from langchain_core.tools import BaseTool
433
-
434
435
  # an Anthropic format tool
435
436
  if isinstance(function, dict) and all(
436
437
  k in function for k in ("name", "input_schema")
@@ -470,7 +471,7 @@ def convert_to_openai_function(
470
471
  oai_function = cast(
471
472
  "dict", _convert_typed_dict_to_openai_function(cast("type", function))
472
473
  )
473
- elif isinstance(function, BaseTool):
474
+ elif isinstance(function, langchain_core.tools.base.BaseTool):
474
475
  oai_function = cast("dict", _format_tool_to_openai_function(function))
475
476
  elif callable(function):
476
477
  oai_function = cast(
@@ -515,6 +516,7 @@ _WellKnownOpenAITools = (
515
516
  "mcp",
516
517
  "image_generation",
517
518
  "web_search_preview",
519
+ "web_search",
518
520
  )
519
521
 
520
522
 
@@ -575,7 +577,8 @@ def convert_to_openai_tool(
575
577
 
576
578
  Added support for OpenAI's image generation built-in tool.
577
579
  """
578
- from langchain_core.tools import Tool
580
+ # Import locally to prevent circular import
581
+ from langchain_core.tools import Tool # noqa: PLC0415
579
582
 
580
583
  if isinstance(tool, dict):
581
584
  if tool.get("type") in _WellKnownOpenAITools:
@@ -601,7 +604,20 @@ def convert_to_json_schema(
601
604
  *,
602
605
  strict: Optional[bool] = None,
603
606
  ) -> dict[str, Any]:
604
- """Convert a schema representation to a JSON schema."""
607
+ """Convert a schema representation to a JSON schema.
608
+
609
+ Args:
610
+ schema: The schema to convert.
611
+ strict: If True, model output is guaranteed to exactly match the JSON Schema
612
+ provided in the function definition. If None, ``strict`` argument will not
613
+ be included in function definition.
614
+
615
+ Raises:
616
+ ValueError: If the input is not a valid OpenAI-format tool.
617
+
618
+ Returns:
619
+ A JSON schema representation of the input schema.
620
+ """
605
621
  openai_tool = convert_to_openai_tool(schema, strict=strict)
606
622
  if (
607
623
  not isinstance(openai_tool, dict)
@@ -627,7 +643,7 @@ def convert_to_json_schema(
627
643
 
628
644
  @beta()
629
645
  def tool_example_to_messages(
630
- input: str, # noqa: A002
646
+ input: str,
631
647
  tool_calls: list[BaseModel],
632
648
  tool_outputs: Optional[list[str]] = None,
633
649
  *,
@@ -640,15 +656,16 @@ def tool_example_to_messages(
640
656
 
641
657
  The list of messages per example by default corresponds to:
642
658
 
643
- 1) HumanMessage: contains the content from which content should be extracted.
644
- 2) AIMessage: contains the extracted information from the model
645
- 3) ToolMessage: contains confirmation to the model that the model requested a tool
646
- correctly.
659
+ 1. ``HumanMessage``: contains the content from which content should be extracted.
660
+ 2. ``AIMessage``: contains the extracted information from the model
661
+ 3. ``ToolMessage``: contains confirmation to the model that the model requested a
662
+ tool correctly.
647
663
 
648
- If `ai_response` is specified, there will be a final AIMessage with that response.
664
+ If ``ai_response`` is specified, there will be a final ``AIMessage`` with that
665
+ response.
649
666
 
650
- The ToolMessage is required because some chat models are hyper-optimized for agents
651
- rather than for an extraction use case.
667
+ The ``ToolMessage`` is required because some chat models are hyper-optimized for
668
+ agents rather than for an extraction use case.
652
669
 
653
670
  Arguments:
654
671
  input: string, the user input
@@ -657,7 +674,7 @@ def tool_example_to_messages(
657
674
  tool_outputs: Optional[list[str]], a list of tool call outputs.
658
675
  Does not need to be provided. If not provided, a placeholder value
659
676
  will be inserted. Defaults to None.
660
- ai_response: Optional[str], if provided, content for a final AIMessage.
677
+ ai_response: Optional[str], if provided, content for a final ``AIMessage``.
661
678
 
662
679
  Returns:
663
680
  A list of messages
@@ -670,8 +687,10 @@ def tool_example_to_messages(
670
687
  from pydantic import BaseModel, Field
671
688
  from langchain_openai import ChatOpenAI
672
689
 
690
+
673
691
  class Person(BaseModel):
674
692
  '''Information about a person.'''
693
+
675
694
  name: Optional[str] = Field(..., description="The name of the person")
676
695
  hair_color: Optional[str] = Field(
677
696
  ..., description="The color of the person's hair if known"
@@ -680,6 +699,7 @@ def tool_example_to_messages(
680
699
  ..., description="Height in METERS"
681
700
  )
682
701
 
702
+
683
703
  examples = [
684
704
  (
685
705
  "The ocean is vast and blue. It's more than 20,000 feet deep.",
@@ -695,9 +715,7 @@ def tool_example_to_messages(
695
715
  messages = []
696
716
 
697
717
  for txt, tool_call in examples:
698
- messages.extend(
699
- tool_example_to_messages(txt, [tool_call])
700
- )
718
+ messages.extend(tool_example_to_messages(txt, [tool_call]))
701
719
 
702
720
  """
703
721
  messages: list[BaseMessage] = [HumanMessage(content=input)]
@@ -739,6 +757,7 @@ def _parse_google_docstring(
739
757
  """Parse the function and argument descriptions from the docstring of a function.
740
758
 
741
759
  Assumes the function docstring follows Google Python style guide.
760
+
742
761
  """
743
762
  if docstring:
744
763
  docstring_blocks = docstring.split("\n\n")
@@ -1,8 +1,12 @@
1
1
  """Utilities for working with interactive environments."""
2
2
 
3
+ import sys
4
+
3
5
 
4
6
  def is_interactive_env() -> bool:
5
- """Determine if running within IPython or Jupyter."""
6
- import sys
7
+ """Determine if running within IPython or Jupyter.
7
8
 
9
+ Returns:
10
+ True if running in an interactive environment, False otherwise.
11
+ """
8
12
  return hasattr(sys, "ps2")
@@ -31,7 +31,7 @@ class NoLock:
31
31
  exc_val: Optional[BaseException],
32
32
  exc_tb: Optional[TracebackType],
33
33
  ) -> Literal[False]:
34
- """Exception not handled."""
34
+ """Return False (exception not suppressed)."""
35
35
  return False
36
36
 
37
37
 
@@ -173,7 +173,11 @@ class Tee(Generic[T]):
173
173
  return self._children[item]
174
174
 
175
175
  def __iter__(self) -> Iterator[Iterator[T]]:
176
- """Return an iterator over the child iterators."""
176
+ """Return an iterator over the child iterators.
177
+
178
+ Yields:
179
+ The child iterators.
180
+ """
177
181
  yield from self._children
178
182
 
179
183
  def __enter__(self) -> "Tee[T]":
@@ -186,7 +190,11 @@ class Tee(Generic[T]):
186
190
  exc_val: Optional[BaseException],
187
191
  exc_tb: Optional[TracebackType],
188
192
  ) -> Literal[False]:
189
- """Close all child iterators."""
193
+ """Close all child iterators.
194
+
195
+ Returns:
196
+ False (exception not suppressed).
197
+ """
190
198
  self.close()
191
199
  return False
192
200
 
@@ -4,7 +4,7 @@ from __future__ import annotations
4
4
 
5
5
  import json
6
6
  import re
7
- from typing import Any, Callable
7
+ from typing import Any, Callable, Union
8
8
 
9
9
  from langchain_core.exceptions import OutputParserException
10
10
 
@@ -19,13 +19,16 @@ def _replace_new_line(match: re.Match[str]) -> str:
19
19
  return match.group(1) + value + match.group(3)
20
20
 
21
21
 
22
- def _custom_parser(multiline_string: str) -> str:
22
+ def _custom_parser(multiline_string: Union[str, bytes, bytearray]) -> str:
23
23
  r"""Custom parser for multiline strings.
24
24
 
25
25
  The LLM response for `action_input` may be a multiline
26
26
  string containing unescaped newlines, tabs or quotes. This function
27
27
  replaces those characters with their escaped counterparts.
28
28
  (newlines in JSON must be double-escaped: `\\n`).
29
+
30
+ Returns:
31
+ The modified string with escaped newlines, tabs and quotes.
29
32
  """
30
33
  if isinstance(multiline_string, (bytes, bytearray)):
31
34
  multiline_string = multiline_string.decode()
@@ -3,13 +3,13 @@
3
3
  from __future__ import annotations
4
4
 
5
5
  from copy import deepcopy
6
- from typing import TYPE_CHECKING, Any, Optional
6
+ from typing import TYPE_CHECKING, Any, Optional, Union
7
7
 
8
8
  if TYPE_CHECKING:
9
9
  from collections.abc import Sequence
10
10
 
11
11
 
12
- def _retrieve_ref(path: str, schema: dict) -> dict:
12
+ def _retrieve_ref(path: str, schema: dict) -> Union[list, dict]:
13
13
  components = path.split("/")
14
14
  if components[0] != "#":
15
15
  msg = (
@@ -17,9 +17,12 @@ def _retrieve_ref(path: str, schema: dict) -> dict:
17
17
  "with #."
18
18
  )
19
19
  raise ValueError(msg)
20
- out = schema
20
+ out: Union[list, dict] = schema
21
21
  for component in components[1:]:
22
22
  if component in out:
23
+ if isinstance(out, list):
24
+ msg = f"Reference '{path}' not found."
25
+ raise KeyError(msg)
23
26
  out = out[component]
24
27
  elif component.isdigit():
25
28
  index = int(component)
@@ -46,10 +49,14 @@ def _dereference_refs_helper(
46
49
  """Inline every pure {'$ref':...}.
47
50
 
48
51
  But:
52
+
49
53
  - if shallow_refs=True: only break cycles, do not inline nested refs
50
54
  - if shallow_refs=False: deep-inline all nested refs
51
55
 
52
56
  Also skip recursion under any key in skip_keys.
57
+
58
+ Returns:
59
+ The object with refs dereferenced.
53
60
  """
54
61
  if processed_refs is None:
55
62
  processed_refs = set()
@@ -112,9 +119,12 @@ def dereference_refs(
112
119
  full_schema: The complete schema (defaults to schema_obj).
113
120
  skip_keys:
114
121
  - If None (the default), we skip recursion under '$defs' *and* only
115
- shallow-inline refs.
122
+ shallow-inline refs.
116
123
  - If provided (even as an empty list), we will recurse under every key and
117
- deep-inline all refs.
124
+ deep-inline all refs.
125
+
126
+ Returns:
127
+ The schema with refs dereferenced.
118
128
  """
119
129
  full = full_schema or schema_obj
120
130
  keys_to_skip = list(skip_keys) if skip_keys is not None else ["$defs"]