langchain-core 0.3.74__py3-none-any.whl → 0.3.76__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain-core might be problematic. Click here for more details.

Files changed (122) hide show
  1. langchain_core/_api/beta_decorator.py +18 -41
  2. langchain_core/_api/deprecation.py +20 -7
  3. langchain_core/_api/path.py +19 -2
  4. langchain_core/_import_utils.py +7 -0
  5. langchain_core/agents.py +10 -6
  6. langchain_core/beta/runnables/context.py +2 -3
  7. langchain_core/callbacks/base.py +11 -4
  8. langchain_core/callbacks/file.py +13 -2
  9. langchain_core/callbacks/manager.py +129 -78
  10. langchain_core/callbacks/usage.py +4 -2
  11. langchain_core/chat_history.py +10 -12
  12. langchain_core/document_loaders/base.py +34 -9
  13. langchain_core/document_loaders/langsmith.py +3 -0
  14. langchain_core/documents/base.py +36 -11
  15. langchain_core/documents/compressor.py +9 -6
  16. langchain_core/documents/transformers.py +4 -2
  17. langchain_core/embeddings/fake.py +8 -5
  18. langchain_core/env.py +2 -3
  19. langchain_core/example_selectors/base.py +12 -0
  20. langchain_core/exceptions.py +7 -0
  21. langchain_core/globals.py +17 -28
  22. langchain_core/indexing/api.py +56 -44
  23. langchain_core/indexing/base.py +7 -10
  24. langchain_core/indexing/in_memory.py +23 -3
  25. langchain_core/language_models/__init__.py +3 -2
  26. langchain_core/language_models/base.py +64 -39
  27. langchain_core/language_models/chat_models.py +130 -42
  28. langchain_core/language_models/fake_chat_models.py +10 -11
  29. langchain_core/language_models/llms.py +49 -17
  30. langchain_core/load/dump.py +5 -7
  31. langchain_core/load/load.py +15 -1
  32. langchain_core/load/serializable.py +38 -43
  33. langchain_core/memory.py +7 -3
  34. langchain_core/messages/ai.py +36 -16
  35. langchain_core/messages/base.py +13 -6
  36. langchain_core/messages/content_blocks.py +23 -2
  37. langchain_core/messages/human.py +2 -6
  38. langchain_core/messages/modifier.py +1 -1
  39. langchain_core/messages/system.py +2 -6
  40. langchain_core/messages/tool.py +36 -16
  41. langchain_core/messages/utils.py +198 -87
  42. langchain_core/output_parsers/base.py +5 -2
  43. langchain_core/output_parsers/json.py +4 -4
  44. langchain_core/output_parsers/list.py +7 -22
  45. langchain_core/output_parsers/openai_functions.py +3 -0
  46. langchain_core/output_parsers/openai_tools.py +8 -1
  47. langchain_core/output_parsers/pydantic.py +4 -0
  48. langchain_core/output_parsers/string.py +5 -1
  49. langchain_core/output_parsers/transform.py +2 -2
  50. langchain_core/output_parsers/xml.py +23 -22
  51. langchain_core/outputs/chat_generation.py +18 -7
  52. langchain_core/outputs/generation.py +14 -3
  53. langchain_core/outputs/llm_result.py +8 -1
  54. langchain_core/prompt_values.py +10 -4
  55. langchain_core/prompts/base.py +4 -9
  56. langchain_core/prompts/chat.py +88 -61
  57. langchain_core/prompts/dict.py +16 -8
  58. langchain_core/prompts/few_shot.py +9 -11
  59. langchain_core/prompts/few_shot_with_templates.py +5 -1
  60. langchain_core/prompts/image.py +12 -5
  61. langchain_core/prompts/message.py +5 -6
  62. langchain_core/prompts/pipeline.py +13 -8
  63. langchain_core/prompts/prompt.py +22 -8
  64. langchain_core/prompts/string.py +18 -10
  65. langchain_core/prompts/structured.py +7 -2
  66. langchain_core/rate_limiters.py +2 -2
  67. langchain_core/retrievers.py +7 -6
  68. langchain_core/runnables/base.py +842 -567
  69. langchain_core/runnables/branch.py +15 -20
  70. langchain_core/runnables/config.py +11 -17
  71. langchain_core/runnables/configurable.py +34 -19
  72. langchain_core/runnables/fallbacks.py +24 -17
  73. langchain_core/runnables/graph.py +47 -40
  74. langchain_core/runnables/graph_ascii.py +40 -17
  75. langchain_core/runnables/graph_mermaid.py +27 -15
  76. langchain_core/runnables/graph_png.py +27 -31
  77. langchain_core/runnables/history.py +56 -59
  78. langchain_core/runnables/passthrough.py +47 -24
  79. langchain_core/runnables/retry.py +10 -6
  80. langchain_core/runnables/router.py +10 -9
  81. langchain_core/runnables/schema.py +2 -0
  82. langchain_core/runnables/utils.py +51 -89
  83. langchain_core/stores.py +13 -25
  84. langchain_core/structured_query.py +3 -7
  85. langchain_core/sys_info.py +9 -8
  86. langchain_core/tools/base.py +30 -23
  87. langchain_core/tools/convert.py +24 -13
  88. langchain_core/tools/simple.py +35 -3
  89. langchain_core/tools/structured.py +26 -3
  90. langchain_core/tracers/_streaming.py +6 -7
  91. langchain_core/tracers/base.py +2 -2
  92. langchain_core/tracers/context.py +5 -1
  93. langchain_core/tracers/core.py +109 -39
  94. langchain_core/tracers/evaluation.py +22 -26
  95. langchain_core/tracers/event_stream.py +41 -28
  96. langchain_core/tracers/langchain.py +12 -3
  97. langchain_core/tracers/langchain_v1.py +10 -2
  98. langchain_core/tracers/log_stream.py +57 -18
  99. langchain_core/tracers/root_listeners.py +4 -20
  100. langchain_core/tracers/run_collector.py +6 -16
  101. langchain_core/tracers/schemas.py +5 -1
  102. langchain_core/utils/aiter.py +14 -6
  103. langchain_core/utils/env.py +3 -0
  104. langchain_core/utils/function_calling.py +49 -30
  105. langchain_core/utils/interactive_env.py +6 -2
  106. langchain_core/utils/iter.py +11 -3
  107. langchain_core/utils/json.py +5 -2
  108. langchain_core/utils/json_schema.py +15 -5
  109. langchain_core/utils/loading.py +5 -1
  110. langchain_core/utils/mustache.py +24 -15
  111. langchain_core/utils/pydantic.py +32 -4
  112. langchain_core/utils/utils.py +24 -8
  113. langchain_core/vectorstores/base.py +7 -20
  114. langchain_core/vectorstores/in_memory.py +18 -12
  115. langchain_core/vectorstores/utils.py +18 -12
  116. langchain_core/version.py +1 -1
  117. langchain_core-0.3.76.dist-info/METADATA +77 -0
  118. langchain_core-0.3.76.dist-info/RECORD +174 -0
  119. langchain_core-0.3.74.dist-info/METADATA +0 -108
  120. langchain_core-0.3.74.dist-info/RECORD +0 -174
  121. {langchain_core-0.3.74.dist-info → langchain_core-0.3.76.dist-info}/WHEEL +0 -0
  122. {langchain_core-0.3.74.dist-info → langchain_core-0.3.76.dist-info}/entry_points.txt +0 -0
langchain_core/stores.py CHANGED
@@ -16,6 +16,8 @@ from typing import (
16
16
  Union,
17
17
  )
18
18
 
19
+ from typing_extensions import override
20
+
19
21
  from langchain_core.exceptions import LangChainException
20
22
  from langchain_core.runnables import run_in_executor
21
23
 
@@ -52,8 +54,8 @@ class BaseStore(ABC, Generic[K, V]):
52
54
 
53
55
  from langchain.storage import BaseStore
54
56
 
55
- class MyInMemoryStore(BaseStore[str, int]):
56
57
 
58
+ class MyInMemoryStore(BaseStore[str, int]):
57
59
  def __init__(self):
58
60
  self.store = {}
59
61
 
@@ -206,27 +208,13 @@ class InMemoryBaseStore(BaseStore[str, V], Generic[V]):
206
208
  """
207
209
  return self.mget(keys)
208
210
 
211
+ @override
209
212
  def mset(self, key_value_pairs: Sequence[tuple[str, V]]) -> None:
210
- """Set the values for the given keys.
211
-
212
- Args:
213
- key_value_pairs (Sequence[tuple[str, V]]): A sequence of key-value pairs.
214
-
215
- Returns:
216
- None
217
- """
218
213
  for key, value in key_value_pairs:
219
214
  self.store[key] = value
220
215
 
216
+ @override
221
217
  async def amset(self, key_value_pairs: Sequence[tuple[str, V]]) -> None:
222
- """Async set the values for the given keys.
223
-
224
- Args:
225
- key_value_pairs (Sequence[tuple[str, V]]): A sequence of key-value pairs.
226
-
227
- Returns:
228
- None
229
- """
230
218
  return self.mset(key_value_pairs)
231
219
 
232
220
  def mdelete(self, keys: Sequence[str]) -> None:
@@ -295,13 +283,13 @@ class InMemoryStore(InMemoryBaseStore[Any]):
295
283
  from langchain.storage import InMemoryStore
296
284
 
297
285
  store = InMemoryStore()
298
- store.mset([('key1', 'value1'), ('key2', 'value2')])
299
- store.mget(['key1', 'key2'])
286
+ store.mset([("key1", "value1"), ("key2", "value2")])
287
+ store.mget(["key1", "key2"])
300
288
  # ['value1', 'value2']
301
- store.mdelete(['key1'])
289
+ store.mdelete(["key1"])
302
290
  list(store.yield_keys())
303
291
  # ['key2']
304
- list(store.yield_keys(prefix='k'))
292
+ list(store.yield_keys(prefix="k"))
305
293
  # ['key2']
306
294
 
307
295
  """
@@ -321,13 +309,13 @@ class InMemoryByteStore(InMemoryBaseStore[bytes]):
321
309
  from langchain.storage import InMemoryByteStore
322
310
 
323
311
  store = InMemoryByteStore()
324
- store.mset([('key1', b'value1'), ('key2', b'value2')])
325
- store.mget(['key1', 'key2'])
312
+ store.mset([("key1", b"value1"), ("key2", b"value2")])
313
+ store.mget(["key1", "key2"])
326
314
  # [b'value1', b'value2']
327
- store.mdelete(['key1'])
315
+ store.mdelete(["key1"])
328
316
  list(store.yield_keys())
329
317
  # ['key2']
330
- list(store.yield_keys(prefix='k'))
318
+ list(store.yield_keys(prefix="k"))
331
319
  # ['key2']
332
320
 
333
321
  """
@@ -143,7 +143,7 @@ class Comparison(FilterDirective):
143
143
  value: The value to compare to.
144
144
  """
145
145
  # super exists from BaseModel
146
- super().__init__( # type: ignore[call-arg]
146
+ super().__init__(
147
147
  comparator=comparator, attribute=attribute, value=value, **kwargs
148
148
  )
149
149
 
@@ -166,9 +166,7 @@ class Operation(FilterDirective):
166
166
  arguments: The arguments to the operator.
167
167
  """
168
168
  # super exists from BaseModel
169
- super().__init__( # type: ignore[call-arg]
170
- operator=operator, arguments=arguments, **kwargs
171
- )
169
+ super().__init__(operator=operator, arguments=arguments, **kwargs)
172
170
 
173
171
 
174
172
  class StructuredQuery(Expr):
@@ -196,6 +194,4 @@ class StructuredQuery(Expr):
196
194
  limit: The limit on the number of results.
197
195
  """
198
196
  # super exists from BaseModel
199
- super().__init__( # type: ignore[call-arg]
200
- query=query, filter=filter, limit=limit, **kwargs
201
- )
197
+ super().__init__(query=query, filter=filter, limit=limit, **kwargs)
@@ -1,12 +1,18 @@
1
- """**sys_info** prints information about the system and langchain packages for debugging purposes.""" # noqa: E501
1
+ """**sys_info** implementation.
2
2
 
3
+ sys_info prints information about the system and langchain packages for
4
+ debugging purposes.
5
+ """
6
+
7
+ import pkgutil
8
+ import platform
9
+ import sys
3
10
  from collections.abc import Sequence
11
+ from importlib import metadata, util
4
12
 
5
13
 
6
14
  def _get_sub_deps(packages: Sequence[str]) -> list[str]:
7
15
  """Get any specified sub-dependencies."""
8
- from importlib import metadata
9
-
10
16
  sub_deps = set()
11
17
  underscored_packages = {pkg.replace("-", "_") for pkg in packages}
12
18
 
@@ -33,11 +39,6 @@ def print_sys_info(*, additional_pkgs: Sequence[str] = ()) -> None:
33
39
  Args:
34
40
  additional_pkgs: Additional packages to include in the output.
35
41
  """
36
- import pkgutil
37
- import platform
38
- import sys
39
- from importlib import metadata, util
40
-
41
42
  # Packages that do not start with "langchain" prefix.
42
43
  other_langchain_packages = [
43
44
  "langserve",
@@ -81,6 +81,7 @@ TOOL_MESSAGE_BLOCK_TYPES = (
81
81
  "json",
82
82
  "search_result",
83
83
  "custom_tool_call_output",
84
+ "document",
84
85
  )
85
86
 
86
87
 
@@ -271,15 +272,12 @@ def _function_annotations_are_pydantic_v1(
271
272
 
272
273
 
273
274
  class _SchemaConfig:
274
- """Configuration for Pydantic models generated from function signatures.
275
-
276
- Attributes:
277
- extra: Whether to allow extra fields in the model.
278
- arbitrary_types_allowed: Whether to allow arbitrary types in the model.
279
- """
275
+ """Configuration for Pydantic models generated from function signatures."""
280
276
 
281
277
  extra: str = "forbid"
278
+ """Whether to allow extra fields in the model."""
282
279
  arbitrary_types_allowed: bool = True
280
+ """Whether to allow arbitrary types in the model."""
283
281
 
284
282
 
285
283
  def create_schema_from_function(
@@ -506,7 +504,12 @@ class ChildTool(BaseTool):
506
504
  """
507
505
 
508
506
  def __init__(self, **kwargs: Any) -> None:
509
- """Initialize the tool."""
507
+ """Initialize the tool.
508
+
509
+ Raises:
510
+ TypeError: If ``args_schema`` is not a subclass of pydantic ``BaseModel`` or
511
+ dict.
512
+ """
510
513
  if (
511
514
  "args_schema" in kwargs
512
515
  and kwargs["args_schema"] is not None
@@ -628,9 +631,10 @@ class ChildTool(BaseTool):
628
631
  The parsed and validated input.
629
632
 
630
633
  Raises:
631
- ValueError: If string input is provided with JSON schema or if
632
- InjectedToolCallId is required but not provided.
633
- NotImplementedError: If args_schema is not a supported type.
634
+ ValueError: If string input is provided with JSON schema ``args_schema``.
635
+ ValueError: If InjectedToolCallId is required but ``tool_call_id`` is not
636
+ provided.
637
+ TypeError: If args_schema is not a Pydantic ``BaseModel`` or dict.
634
638
  """
635
639
  input_args = self.args_schema
636
640
  if isinstance(tool_input, str):
@@ -655,10 +659,7 @@ class ChildTool(BaseTool):
655
659
  return tool_input
656
660
  if issubclass(input_args, BaseModel):
657
661
  for k, v in get_all_basemodel_annotations(input_args).items():
658
- if (
659
- _is_injected_arg_type(v, injected_type=InjectedToolCallId)
660
- and k not in tool_input
661
- ):
662
+ if _is_injected_arg_type(v, injected_type=InjectedToolCallId):
662
663
  if tool_call_id is None:
663
664
  msg = (
664
665
  "When tool includes an InjectedToolCallId "
@@ -673,10 +674,7 @@ class ChildTool(BaseTool):
673
674
  result_dict = result.model_dump()
674
675
  elif issubclass(input_args, BaseModelV1):
675
676
  for k, v in get_all_basemodel_annotations(input_args).items():
676
- if (
677
- _is_injected_arg_type(v, injected_type=InjectedToolCallId)
678
- and k not in tool_input
679
- ):
677
+ if _is_injected_arg_type(v, injected_type=InjectedToolCallId):
680
678
  if tool_call_id is None:
681
679
  msg = (
682
680
  "When tool includes an InjectedToolCallId "
@@ -725,6 +723,9 @@ class ChildTool(BaseTool):
725
723
 
726
724
  Add run_manager: Optional[CallbackManagerForToolRun] = None
727
725
  to child implementations to enable tracing.
726
+
727
+ Returns:
728
+ The result of the tool execution.
728
729
  """
729
730
 
730
731
  async def _arun(self, *args: Any, **kwargs: Any) -> Any:
@@ -732,6 +733,9 @@ class ChildTool(BaseTool):
732
733
 
733
734
  Add run_manager: Optional[AsyncCallbackManagerForToolRun] = None
734
735
  to child implementations to enable tracing.
736
+
737
+ Returns:
738
+ The result of the tool execution.
735
739
  """
736
740
  if kwargs.get("run_manager") and signature(self._run).parameters.get(
737
741
  "run_manager"
@@ -1285,7 +1289,7 @@ class InjectedToolCallId(InjectedToolArg):
1285
1289
 
1286
1290
 
1287
1291
  def _is_injected_arg_type(
1288
- type_: type, injected_type: Optional[type[InjectedToolArg]] = None
1292
+ type_: Union[type, TypeVar], injected_type: Optional[type[InjectedToolArg]] = None
1289
1293
  ) -> bool:
1290
1294
  """Check if a type annotation indicates an injected argument.
1291
1295
 
@@ -1306,12 +1310,15 @@ def _is_injected_arg_type(
1306
1310
 
1307
1311
  def get_all_basemodel_annotations(
1308
1312
  cls: Union[TypeBaseModel, Any], *, default_to_bound: bool = True
1309
- ) -> dict[str, type]:
1313
+ ) -> dict[str, Union[type, TypeVar]]:
1310
1314
  """Get all annotations from a Pydantic BaseModel and its parents.
1311
1315
 
1312
1316
  Args:
1313
1317
  cls: The Pydantic BaseModel class.
1314
1318
  default_to_bound: Whether to default to the bound of a TypeVar if it exists.
1319
+
1320
+ Returns:
1321
+ A dictionary of field names to their type annotations.
1315
1322
  """
1316
1323
  # cls has no subscript: cls = FooBar
1317
1324
  if isinstance(cls, type):
@@ -1319,7 +1326,7 @@ def get_all_basemodel_annotations(
1319
1326
  fields = getattr(cls, "model_fields", {}) or getattr(cls, "__fields__", {})
1320
1327
  alias_map = {field.alias: name for name, field in fields.items() if field.alias}
1321
1328
 
1322
- annotations: dict[str, type] = {}
1329
+ annotations: dict[str, Union[type, TypeVar]] = {}
1323
1330
  for name, param in inspect.signature(cls).parameters.items():
1324
1331
  # Exclude hidden init args added by pydantic Config. For example if
1325
1332
  # BaseModel(extra="allow") then "extra_data" will part of init sig.
@@ -1373,11 +1380,11 @@ def get_all_basemodel_annotations(
1373
1380
 
1374
1381
 
1375
1382
  def _replace_type_vars(
1376
- type_: type,
1383
+ type_: Union[type, TypeVar],
1377
1384
  generic_map: Optional[dict[TypeVar, type]] = None,
1378
1385
  *,
1379
1386
  default_to_bound: bool = True,
1380
- ) -> type:
1387
+ ) -> Union[type, TypeVar]:
1381
1388
  """Replace TypeVars in a type annotation with concrete types.
1382
1389
 
1383
1390
  Args:
@@ -92,12 +92,13 @@ def tool(
92
92
  positional argument.
93
93
  description: Optional description for the tool.
94
94
  Precedence for the tool description value is as follows:
95
- - `description` argument
96
- (used even if docstring and/or `args_schema` are provided)
97
- - tool function docstring
98
- (used even if `args_schema` is provided)
99
- - `args_schema` description
100
- (used only if `description` / docstring are not provided)
95
+
96
+ - ``description`` argument
97
+ (used even if docstring and/or ``args_schema`` are provided)
98
+ - tool function docstring
99
+ (used even if ``args_schema`` is provided)
100
+ - ``args_schema`` description
101
+ (used only if `description` / docstring are not provided)
101
102
  *args: Extra positional arguments. Must be empty.
102
103
  return_direct: Whether to return directly from the tool rather
103
104
  than continuing the agent loop. Defaults to False.
@@ -119,6 +120,17 @@ def tool(
119
120
  whether to raise ValueError on invalid Google Style docstrings.
120
121
  Defaults to True.
121
122
 
123
+ Raises:
124
+ ValueError: If too many positional arguments are provided.
125
+ ValueError: If a runnable is provided without a string name.
126
+ ValueError: If the first argument is not a string or callable with
127
+ a ``__name__`` attribute.
128
+ ValueError: If the function does not have a docstring and description
129
+ is not provided and ``infer_schema`` is False.
130
+ ValueError: If ``parse_docstring`` is True and the function has an invalid
131
+ Google-style docstring and ``error_on_invalid_docstring`` is True.
132
+ ValueError: If a Runnable is provided that does not have an object schema.
133
+
122
134
  Returns:
123
135
  The tool.
124
136
 
@@ -134,11 +146,13 @@ def tool(
134
146
  # Searches the API for the query.
135
147
  return
136
148
 
149
+
137
150
  @tool("search", return_direct=True)
138
151
  def search_api(query: str) -> str:
139
152
  # Searches the API for the query.
140
153
  return
141
154
 
155
+
142
156
  @tool(response_format="content_and_artifact")
143
157
  def search_api(query: str) -> tuple[str, dict]:
144
158
  return "partial json of results", {"full": "object of results"}
@@ -171,18 +185,15 @@ def tool(
171
185
  "bar": {
172
186
  "title": "Bar",
173
187
  "description": "The bar.",
174
- "type": "string"
188
+ "type": "string",
175
189
  },
176
190
  "baz": {
177
191
  "title": "Baz",
178
192
  "description": "The baz.",
179
- "type": "integer"
180
- }
193
+ "type": "integer",
194
+ },
181
195
  },
182
- "required": [
183
- "bar",
184
- "baz"
185
- ]
196
+ "required": ["bar", "baz"],
186
197
  }
187
198
 
188
199
  Note that parsing by default will raise ``ValueError`` if the docstring
@@ -76,7 +76,19 @@ class Tool(BaseTool):
76
76
  def _to_args_and_kwargs(
77
77
  self, tool_input: Union[str, dict], tool_call_id: Optional[str]
78
78
  ) -> tuple[tuple, dict]:
79
- """Convert tool input to pydantic model."""
79
+ """Convert tool input to pydantic model.
80
+
81
+ Args:
82
+ tool_input: The input to the tool.
83
+ tool_call_id: The ID of the tool call.
84
+
85
+ Raises:
86
+ ToolException: If the tool input is invalid.
87
+
88
+ Returns:
89
+ the pydantic model args and kwargs.
90
+
91
+ """
80
92
  args, kwargs = super()._to_args_and_kwargs(tool_input, tool_call_id)
81
93
  # For backwards compatibility. The tool must be run with a single input
82
94
  all_args = list(args) + list(kwargs.values())
@@ -96,7 +108,17 @@ class Tool(BaseTool):
96
108
  run_manager: Optional[CallbackManagerForToolRun] = None,
97
109
  **kwargs: Any,
98
110
  ) -> Any:
99
- """Use the tool."""
111
+ """Use the tool.
112
+
113
+ Args:
114
+ *args: Positional arguments to pass to the tool
115
+ config: Configuration for the run
116
+ run_manager: Optional callback manager to use for the run
117
+ **kwargs: Keyword arguments to pass to the tool
118
+
119
+ Returns:
120
+ The result of the tool execution
121
+ """
100
122
  if self.func:
101
123
  if run_manager and signature(self.func).parameters.get("callbacks"):
102
124
  kwargs["callbacks"] = run_manager.get_child()
@@ -113,7 +135,17 @@ class Tool(BaseTool):
113
135
  run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
114
136
  **kwargs: Any,
115
137
  ) -> Any:
116
- """Use the tool asynchronously."""
138
+ """Use the tool asynchronously.
139
+
140
+ Args:
141
+ *args: Positional arguments to pass to the tool
142
+ config: Configuration for the run
143
+ run_manager: Optional callback manager to use for the run
144
+ **kwargs: Keyword arguments to pass to the tool
145
+
146
+ Returns:
147
+ The result of the tool execution
148
+ """
117
149
  if self.coroutine:
118
150
  if run_manager and signature(self.coroutine).parameters.get("callbacks"):
119
151
  kwargs["callbacks"] = run_manager.get_child()
@@ -84,7 +84,17 @@ class StructuredTool(BaseTool):
84
84
  run_manager: Optional[CallbackManagerForToolRun] = None,
85
85
  **kwargs: Any,
86
86
  ) -> Any:
87
- """Use the tool."""
87
+ """Use the tool.
88
+
89
+ Args:
90
+ *args: Positional arguments to pass to the tool
91
+ config: Configuration for the run
92
+ run_manager: Optional callback manager to use for the run
93
+ **kwargs: Keyword arguments to pass to the tool
94
+
95
+ Returns:
96
+ The result of the tool execution
97
+ """
88
98
  if self.func:
89
99
  if run_manager and signature(self.func).parameters.get("callbacks"):
90
100
  kwargs["callbacks"] = run_manager.get_child()
@@ -101,7 +111,17 @@ class StructuredTool(BaseTool):
101
111
  run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
102
112
  **kwargs: Any,
103
113
  ) -> Any:
104
- """Use the tool asynchronously."""
114
+ """Use the tool asynchronously.
115
+
116
+ Args:
117
+ *args: Positional arguments to pass to the tool
118
+ config: Configuration for the run
119
+ run_manager: Optional callback manager to use for the run
120
+ **kwargs: Keyword arguments to pass to the tool
121
+
122
+ Returns:
123
+ The result of the tool execution
124
+ """
105
125
  if self.coroutine:
106
126
  if run_manager and signature(self.coroutine).parameters.get("callbacks"):
107
127
  kwargs["callbacks"] = run_manager.get_child()
@@ -164,6 +184,9 @@ class StructuredTool(BaseTool):
164
184
 
165
185
  Raises:
166
186
  ValueError: If the function is not provided.
187
+ ValueError: If the function does not have a docstring and description
188
+ is not provided.
189
+ TypeError: If the ``args_schema`` is not a ``BaseModel`` or dict.
167
190
 
168
191
  Examples:
169
192
 
@@ -228,7 +251,7 @@ class StructuredTool(BaseTool):
228
251
  name=name,
229
252
  func=func,
230
253
  coroutine=coroutine,
231
- args_schema=args_schema, # type: ignore[arg-type]
254
+ args_schema=args_schema,
232
255
  description=description_,
233
256
  return_direct=return_direct,
234
257
  response_format=response_format,
@@ -1,15 +1,16 @@
1
1
  """Internal tracers used for stream_log and astream events implementations."""
2
2
 
3
- import abc
3
+ import typing
4
4
  from collections.abc import AsyncIterator, Iterator
5
- from typing import TypeVar
6
5
  from uuid import UUID
7
6
 
8
- T = TypeVar("T")
7
+ T = typing.TypeVar("T")
9
8
 
10
9
 
11
- class _StreamingCallbackHandler(abc.ABC):
12
- """For internal use.
10
+ # THIS IS USED IN LANGGRAPH.
11
+ @typing.runtime_checkable
12
+ class _StreamingCallbackHandler(typing.Protocol[T]):
13
+ """Types for streaming callback handlers.
13
14
 
14
15
  This is a common mixin that the callback handlers
15
16
  for both astream events and astream log inherit from.
@@ -18,13 +19,11 @@ class _StreamingCallbackHandler(abc.ABC):
18
19
  to produce callbacks for intermediate results.
19
20
  """
20
21
 
21
- @abc.abstractmethod
22
22
  def tap_output_aiter(
23
23
  self, run_id: UUID, output: AsyncIterator[T]
24
24
  ) -> AsyncIterator[T]:
25
25
  """Used for internal astream_log and astream events implementations."""
26
26
 
27
- @abc.abstractmethod
28
27
  def tap_output_iter(self, run_id: UUID, output: Iterator[T]) -> Iterator[T]:
29
28
  """Used for internal astream_log and astream events implementations."""
30
29
 
@@ -520,11 +520,11 @@ class BaseTracer(_TracerCore, BaseCallbackHandler, ABC):
520
520
  return retrieval_run
521
521
 
522
522
  def __deepcopy__(self, memo: dict) -> BaseTracer:
523
- """Deepcopy the tracer."""
523
+ """Return self."""
524
524
  return self
525
525
 
526
526
  def __copy__(self) -> BaseTracer:
527
- """Copy the tracer."""
527
+ """Return self."""
528
528
  return self
529
529
 
530
530
 
@@ -43,7 +43,11 @@ run_collector_var: ContextVar[Optional[RunCollectorCallbackHandler]] = ContextVa
43
43
  def tracing_enabled(
44
44
  session_name: str = "default", # noqa: ARG001
45
45
  ) -> Generator[TracerSessionV1, None, None]:
46
- """Throw an error because this has been replaced by tracing_v2_enabled."""
46
+ """Throw an error because this has been replaced by ``tracing_v2_enabled``.
47
+
48
+ Raises:
49
+ RuntimeError: Always, because this function is deprecated.
50
+ """
47
51
  msg = (
48
52
  "tracing_enabled is no longer supported. Please use tracing_enabled_v2 instead."
49
53
  )