langchain-core 1.0.0rc1__py3-none-any.whl → 1.0.0rc3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain-core might be problematic. Click here for more details.

Files changed (92) hide show
  1. langchain_core/agents.py +3 -3
  2. langchain_core/caches.py +44 -48
  3. langchain_core/callbacks/base.py +5 -5
  4. langchain_core/callbacks/file.py +2 -2
  5. langchain_core/callbacks/stdout.py +1 -1
  6. langchain_core/chat_history.py +1 -1
  7. langchain_core/document_loaders/base.py +21 -21
  8. langchain_core/document_loaders/langsmith.py +2 -2
  9. langchain_core/documents/base.py +39 -39
  10. langchain_core/embeddings/fake.py +4 -2
  11. langchain_core/example_selectors/semantic_similarity.py +4 -6
  12. langchain_core/exceptions.py +3 -4
  13. langchain_core/indexing/api.py +8 -14
  14. langchain_core/language_models/__init__.py +11 -25
  15. langchain_core/language_models/_utils.py +2 -1
  16. langchain_core/language_models/base.py +7 -0
  17. langchain_core/language_models/chat_models.py +24 -25
  18. langchain_core/language_models/fake_chat_models.py +3 -3
  19. langchain_core/language_models/llms.py +4 -4
  20. langchain_core/load/dump.py +3 -4
  21. langchain_core/load/load.py +0 -9
  22. langchain_core/load/serializable.py +3 -3
  23. langchain_core/messages/ai.py +20 -22
  24. langchain_core/messages/base.py +8 -8
  25. langchain_core/messages/block_translators/__init__.py +1 -1
  26. langchain_core/messages/block_translators/anthropic.py +1 -1
  27. langchain_core/messages/block_translators/bedrock_converse.py +1 -1
  28. langchain_core/messages/block_translators/google_genai.py +3 -2
  29. langchain_core/messages/block_translators/google_vertexai.py +4 -32
  30. langchain_core/messages/block_translators/langchain_v0.py +1 -1
  31. langchain_core/messages/block_translators/openai.py +1 -1
  32. langchain_core/messages/chat.py +2 -6
  33. langchain_core/messages/content.py +34 -17
  34. langchain_core/messages/function.py +3 -7
  35. langchain_core/messages/human.py +4 -9
  36. langchain_core/messages/modifier.py +1 -1
  37. langchain_core/messages/system.py +2 -10
  38. langchain_core/messages/tool.py +30 -42
  39. langchain_core/messages/utils.py +24 -30
  40. langchain_core/output_parsers/base.py +24 -24
  41. langchain_core/output_parsers/json.py +0 -1
  42. langchain_core/output_parsers/list.py +1 -1
  43. langchain_core/output_parsers/openai_functions.py +2 -2
  44. langchain_core/output_parsers/openai_tools.py +4 -9
  45. langchain_core/output_parsers/string.py +1 -1
  46. langchain_core/outputs/generation.py +1 -1
  47. langchain_core/prompt_values.py +7 -7
  48. langchain_core/prompts/base.py +1 -1
  49. langchain_core/prompts/chat.py +12 -13
  50. langchain_core/prompts/dict.py +2 -2
  51. langchain_core/prompts/few_shot_with_templates.py +1 -1
  52. langchain_core/prompts/image.py +1 -1
  53. langchain_core/prompts/message.py +2 -2
  54. langchain_core/prompts/prompt.py +7 -8
  55. langchain_core/prompts/string.py +1 -1
  56. langchain_core/prompts/structured.py +2 -2
  57. langchain_core/rate_limiters.py +23 -29
  58. langchain_core/retrievers.py +29 -29
  59. langchain_core/runnables/base.py +15 -22
  60. langchain_core/runnables/branch.py +1 -1
  61. langchain_core/runnables/config.py +7 -7
  62. langchain_core/runnables/configurable.py +2 -2
  63. langchain_core/runnables/fallbacks.py +1 -1
  64. langchain_core/runnables/graph.py +23 -28
  65. langchain_core/runnables/graph_mermaid.py +9 -9
  66. langchain_core/runnables/graph_png.py +1 -1
  67. langchain_core/runnables/history.py +2 -2
  68. langchain_core/runnables/passthrough.py +3 -3
  69. langchain_core/runnables/router.py +1 -1
  70. langchain_core/runnables/utils.py +5 -5
  71. langchain_core/tools/base.py +56 -11
  72. langchain_core/tools/convert.py +13 -17
  73. langchain_core/tools/retriever.py +6 -6
  74. langchain_core/tools/simple.py +1 -1
  75. langchain_core/tools/structured.py +5 -10
  76. langchain_core/tracers/memory_stream.py +1 -1
  77. langchain_core/tracers/root_listeners.py +2 -2
  78. langchain_core/tracers/stdout.py +1 -2
  79. langchain_core/utils/__init__.py +1 -1
  80. langchain_core/utils/aiter.py +1 -1
  81. langchain_core/utils/function_calling.py +15 -38
  82. langchain_core/utils/input.py +1 -1
  83. langchain_core/utils/iter.py +1 -1
  84. langchain_core/utils/json.py +1 -1
  85. langchain_core/utils/strings.py +1 -1
  86. langchain_core/vectorstores/base.py +14 -25
  87. langchain_core/vectorstores/utils.py +2 -2
  88. langchain_core/version.py +1 -1
  89. {langchain_core-1.0.0rc1.dist-info → langchain_core-1.0.0rc3.dist-info}/METADATA +1 -1
  90. langchain_core-1.0.0rc3.dist-info/RECORD +172 -0
  91. langchain_core-1.0.0rc1.dist-info/RECORD +0 -172
  92. {langchain_core-1.0.0rc1.dist-info → langchain_core-1.0.0rc3.dist-info}/WHEEL +0 -0
@@ -60,10 +60,10 @@ def draw_mermaid(
60
60
  edges: List of edges, object with a source, target and data.
61
61
  first_node: Id of the first node.
62
62
  last_node: Id of the last node.
63
- with_styles: Whether to include styles in the graph. Defaults to `True`.
64
- curve_style: Curve style for the edges. Defaults to CurveStyle.LINEAR.
65
- node_styles: Node colors for different types. Defaults to NodeStyles().
66
- wrap_label_n_words: Words to wrap the edge labels. Defaults to 9.
63
+ with_styles: Whether to include styles in the graph.
64
+ curve_style: Curve style for the edges.
65
+ node_styles: Node colors for different types.
66
+ wrap_label_n_words: Words to wrap the edge labels.
67
67
  frontmatter_config: Mermaid frontmatter config.
68
68
  Can be used to customize theme and styles. Will be converted to YAML and
69
69
  added to the beginning of the mermaid graph.
@@ -287,11 +287,11 @@ def draw_mermaid_png(
287
287
  Args:
288
288
  mermaid_syntax: Mermaid graph syntax.
289
289
  output_file_path: Path to save the PNG image.
290
- draw_method: Method to draw the graph. Defaults to MermaidDrawMethod.API.
291
- background_color: Background color of the image. Defaults to "white".
292
- padding: Padding around the image. Defaults to 10.
293
- max_retries: Maximum number of retries (MermaidDrawMethod.API). Defaults to 1.
294
- retry_delay: Delay between retries (MermaidDrawMethod.API). Defaults to 1.0.
290
+ draw_method: Method to draw the graph.
291
+ background_color: Background color of the image.
292
+ padding: Padding around the image.
293
+ max_retries: Maximum number of retries (MermaidDrawMethod.API).
294
+ retry_delay: Delay between retries (MermaidDrawMethod.API).
295
295
  base_url: Base URL for the Mermaid.ink API.
296
296
 
297
297
  Returns:
@@ -105,7 +105,7 @@ class PngDrawer:
105
105
  source: The source node.
106
106
  target: The target node.
107
107
  label: The label for the edge.
108
- conditional: Whether the edge is conditional. Defaults to `False`.
108
+ conditional: Whether the edge is conditional.
109
109
  """
110
110
  viz.add_edge(
111
111
  source,
@@ -296,9 +296,9 @@ class RunnableWithMessageHistory(RunnableBindingBase): # type: ignore[no-redef]
296
296
  ```
297
297
 
298
298
  input_messages_key: Must be specified if the base runnable accepts a dict
299
- as input. Default is None.
299
+ as input.
300
300
  output_messages_key: Must be specified if the base runnable returns a dict
301
- as output. Default is None.
301
+ as output.
302
302
  history_messages_key: Must be specified if the base runnable accepts a dict
303
303
  as input and expects a separate key for historical messages.
304
304
  history_factory_config: Configure fields that should be passed to the
@@ -185,7 +185,7 @@ class RunnablePassthrough(RunnableSerializable[Other, Other]):
185
185
 
186
186
  @classmethod
187
187
  def get_lc_namespace(cls) -> list[str]:
188
- """Get the namespace of the langchain object.
188
+ """Get the namespace of the LangChain object.
189
189
 
190
190
  Returns:
191
191
  `["langchain", "schema", "runnable"]`
@@ -409,7 +409,7 @@ class RunnableAssign(RunnableSerializable[dict[str, Any], dict[str, Any]]):
409
409
  @classmethod
410
410
  @override
411
411
  def get_lc_namespace(cls) -> list[str]:
412
- """Get the namespace of the langchain object.
412
+ """Get the namespace of the LangChain object.
413
413
 
414
414
  Returns:
415
415
  `["langchain", "schema", "runnable"]`
@@ -714,7 +714,7 @@ class RunnablePick(RunnableSerializable[dict[str, Any], dict[str, Any]]):
714
714
  @classmethod
715
715
  @override
716
716
  def get_lc_namespace(cls) -> list[str]:
717
- """Get the namespace of the langchain object.
717
+ """Get the namespace of the LangChain object.
718
718
 
719
719
  Returns:
720
720
  `["langchain", "schema", "runnable"]`
@@ -96,7 +96,7 @@ class RouterRunnable(RunnableSerializable[RouterInput, Output]):
96
96
  @classmethod
97
97
  @override
98
98
  def get_lc_namespace(cls) -> list[str]:
99
- """Get the namespace of the langchain object.
99
+ """Get the namespace of the LangChain object.
100
100
 
101
101
  Returns:
102
102
  `["langchain", "schema", "runnable"]`
@@ -136,7 +136,7 @@ def coro_with_context(
136
136
  Args:
137
137
  coro: The coroutine to await.
138
138
  context: The context to use.
139
- create_task: Whether to create a task. Defaults to `False`.
139
+ create_task: Whether to create a task.
140
140
 
141
141
  Returns:
142
142
  The coroutine with the context.
@@ -558,7 +558,7 @@ class ConfigurableField(NamedTuple):
558
558
  annotation: Any | None = None
559
559
  """The annotation of the field. """
560
560
  is_shared: bool = False
561
- """Whether the field is shared. Defaults to `False`."""
561
+ """Whether the field is shared."""
562
562
 
563
563
  @override
564
564
  def __hash__(self) -> int:
@@ -579,7 +579,7 @@ class ConfigurableFieldSingleOption(NamedTuple):
579
579
  description: str | None = None
580
580
  """The description of the field. """
581
581
  is_shared: bool = False
582
- """Whether the field is shared. Defaults to `False`."""
582
+ """Whether the field is shared."""
583
583
 
584
584
  @override
585
585
  def __hash__(self) -> int:
@@ -600,7 +600,7 @@ class ConfigurableFieldMultiOption(NamedTuple):
600
600
  description: str | None = None
601
601
  """The description of the field. """
602
602
  is_shared: bool = False
603
- """Whether the field is shared. Defaults to `False`."""
603
+ """Whether the field is shared."""
604
604
 
605
605
  @override
606
606
  def __hash__(self) -> int:
@@ -626,7 +626,7 @@ class ConfigurableFieldSpec(NamedTuple):
626
626
  default: Any = None
627
627
  """The default value for the field. """
628
628
  is_shared: bool = False
629
- """Whether the field is shared. Defaults to `False`."""
629
+ """Whether the field is shared."""
630
630
  dependencies: list[str] | None = None
631
631
  """The dependencies of the field. """
632
632
 
@@ -293,10 +293,9 @@ def create_schema_from_function(
293
293
  filter_args: Optional list of arguments to exclude from the schema.
294
294
  Defaults to `FILTERED_ARGS`.
295
295
  parse_docstring: Whether to parse the function's docstring for descriptions
296
- for each argument. Defaults to `False`.
296
+ for each argument.
297
297
  error_on_invalid_docstring: if `parse_docstring` is provided, configure
298
298
  whether to raise `ValueError` on invalid Google Style docstrings.
299
- Defaults to `False`.
300
299
  include_injected: Whether to include injected arguments in the schema.
301
300
  Defaults to `True`, since we want to include them in the schema
302
301
  when *validating* tool inputs.
@@ -481,11 +480,11 @@ class ChildTool(BaseTool):
481
480
  """Handle the content of the ValidationError thrown."""
482
481
 
483
482
  response_format: Literal["content", "content_and_artifact"] = "content"
484
- """The tool response format. Defaults to 'content'.
483
+ """The tool response format.
485
484
 
486
- If "content" then the output of the tool is interpreted as the contents of a
487
- ToolMessage. If "content_and_artifact" then the output is expected to be a
488
- two-tuple corresponding to the (content, artifact) of a ToolMessage.
485
+ If `"content"` then the output of the tool is interpreted as the contents of a
486
+ ToolMessage. If `"content_and_artifact"` then the output is expected to be a
487
+ two-tuple corresponding to the (content, artifact) of a `ToolMessage`.
489
488
  """
490
489
 
491
490
  def __init__(self, **kwargs: Any) -> None:
@@ -768,8 +767,8 @@ class ChildTool(BaseTool):
768
767
  Args:
769
768
  tool_input: The input to the tool.
770
769
  verbose: Whether to log the tool's progress.
771
- start_color: The color to use when starting the tool. Defaults to 'green'.
772
- color: The color to use when ending the tool. Defaults to 'green'.
770
+ start_color: The color to use when starting the tool.
771
+ color: The color to use when ending the tool.
773
772
  callbacks: Callbacks to be called during tool execution.
774
773
  tags: Optional list of tags associated with the tool.
775
774
  metadata: Optional metadata associated with the tool.
@@ -880,8 +879,8 @@ class ChildTool(BaseTool):
880
879
  Args:
881
880
  tool_input: The input to the tool.
882
881
  verbose: Whether to log the tool's progress.
883
- start_color: The color to use when starting the tool. Defaults to 'green'.
884
- color: The color to use when ending the tool. Defaults to 'green'.
882
+ start_color: The color to use when starting the tool.
883
+ color: The color to use when ending the tool.
885
884
  callbacks: Callbacks to be called during tool execution.
886
885
  tags: Optional list of tags associated with the tool.
887
886
  metadata: Optional metadata associated with the tool.
@@ -1211,6 +1210,26 @@ class InjectedToolArg:
1211
1210
  """
1212
1211
 
1213
1212
 
1213
+ class _DirectlyInjectedToolArg:
1214
+ """Annotation for tool arguments that are injected at runtime.
1215
+
1216
+ Injected via direct type annotation, rather than annotated metadata.
1217
+
1218
+ For example, ToolRuntime is a directly injected argument.
1219
+ Note the direct annotation rather than the verbose alternative:
1220
+ Annotated[ToolRuntime, InjectedRuntime]
1221
+ ```python
1222
+ from langchain_core.tools import tool, ToolRuntime
1223
+
1224
+
1225
+ @tool
1226
+ def foo(x: int, runtime: ToolRuntime) -> str:
1227
+ # use runtime.state, runtime.context, runtime.store, etc.
1228
+ ...
1229
+ ```
1230
+ """
1231
+
1232
+
1214
1233
  class InjectedToolCallId(InjectedToolArg):
1215
1234
  """Annotation for injecting the tool call ID.
1216
1235
 
@@ -1238,6 +1257,24 @@ class InjectedToolCallId(InjectedToolArg):
1238
1257
  """
1239
1258
 
1240
1259
 
1260
+ def _is_directly_injected_arg_type(type_: Any) -> bool:
1261
+ """Check if a type annotation indicates a directly injected argument.
1262
+
1263
+ This is currently only used for ToolRuntime.
1264
+ Checks if either the annotation itself is a subclass of _DirectlyInjectedToolArg
1265
+ or the origin of the annotation is a subclass of _DirectlyInjectedToolArg.
1266
+
1267
+ Ex: ToolRuntime or ToolRuntime[ContextT, StateT] would both return True.
1268
+ """
1269
+ return (
1270
+ isinstance(type_, type) and issubclass(type_, _DirectlyInjectedToolArg)
1271
+ ) or (
1272
+ (origin := get_origin(type_)) is not None
1273
+ and isinstance(origin, type)
1274
+ and issubclass(origin, _DirectlyInjectedToolArg)
1275
+ )
1276
+
1277
+
1241
1278
  def _is_injected_arg_type(
1242
1279
  type_: type | TypeVar, injected_type: type[InjectedToolArg] | None = None
1243
1280
  ) -> bool:
@@ -1250,7 +1287,15 @@ def _is_injected_arg_type(
1250
1287
  Returns:
1251
1288
  `True` if the type is an injected argument, `False` otherwise.
1252
1289
  """
1253
- injected_type = injected_type or InjectedToolArg
1290
+ if injected_type is None:
1291
+ # if no injected type is specified,
1292
+ # check if the type is a directly injected argument
1293
+ if _is_directly_injected_arg_type(type_):
1294
+ return True
1295
+ injected_type = InjectedToolArg
1296
+
1297
+ # if the type is an Annotated type, check if annotated metadata
1298
+ # is an intance or subclass of the injected type
1254
1299
  return any(
1255
1300
  isinstance(arg, injected_type)
1256
1301
  or (isinstance(arg, type) and issubclass(arg, injected_type))
@@ -81,7 +81,7 @@ def tool(
81
81
  parse_docstring: bool = False,
82
82
  error_on_invalid_docstring: bool = True,
83
83
  ) -> BaseTool | Callable[[Callable | Runnable], BaseTool]:
84
- """Make tools out of functions, can be used with or without arguments.
84
+ """Make tools out of Python functions, can be used with or without arguments.
85
85
 
86
86
  Args:
87
87
  name_or_callable: Optional name of the tool or the callable to be
@@ -93,30 +93,26 @@ def tool(
93
93
 
94
94
  - `description` argument
95
95
  (used even if docstring and/or `args_schema` are provided)
96
- - tool function docstring
96
+ - Tool function docstring
97
97
  (used even if `args_schema` is provided)
98
98
  - `args_schema` description
99
99
  (used only if `description` / docstring are not provided)
100
100
  *args: Extra positional arguments. Must be empty.
101
101
  return_direct: Whether to return directly from the tool rather
102
- than continuing the agent loop. Defaults to `False`.
103
- args_schema: optional argument schema for user to specify.
102
+ than continuing the agent loop.
103
+ args_schema: Optional argument schema for user to specify.
104
104
 
105
105
  infer_schema: Whether to infer the schema of the arguments from
106
106
  the function's signature. This also makes the resultant tool
107
107
  accept a dictionary input to its `run()` function.
108
- Defaults to `True`.
109
- response_format: The tool response format. If "content" then the output of
110
- the tool is interpreted as the contents of a ToolMessage. If
111
- "content_and_artifact" then the output is expected to be a two-tuple
112
- corresponding to the (content, artifact) of a ToolMessage.
113
- Defaults to "content".
108
+ response_format: The tool response format. If `"content"` then the output of
109
+ the tool is interpreted as the contents of a `ToolMessage`. If
110
+ `"content_and_artifact"` then the output is expected to be a two-tuple
111
+ corresponding to the `(content, artifact)` of a `ToolMessage`.
114
112
  parse_docstring: if `infer_schema` and `parse_docstring`, will attempt to
115
113
  parse parameter descriptions from Google Style function docstrings.
116
- Defaults to `False`.
117
114
  error_on_invalid_docstring: if `parse_docstring` is provided, configure
118
- whether to raise ValueError on invalid Google Style docstrings.
119
- Defaults to `True`.
115
+ whether to raise `ValueError` on invalid Google Style docstrings.
120
116
 
121
117
  Raises:
122
118
  ValueError: If too many positional arguments are provided.
@@ -124,8 +120,8 @@ def tool(
124
120
  ValueError: If the first argument is not a string or callable with
125
121
  a `__name__` attribute.
126
122
  ValueError: If the function does not have a docstring and description
127
- is not provided and `infer_schema` is False.
128
- ValueError: If `parse_docstring` is True and the function has an invalid
123
+ is not provided and `infer_schema` is `False`.
124
+ ValueError: If `parse_docstring` is `True` and the function has an invalid
129
125
  Google-style docstring and `error_on_invalid_docstring` is True.
130
126
  ValueError: If a Runnable is provided that does not have an object schema.
131
127
 
@@ -133,7 +129,7 @@ def tool(
133
129
  The tool.
134
130
 
135
131
  Requires:
136
- - Function must be of type (str) -> str
132
+ - Function must be of type `(str) -> str`
137
133
  - Function must have a docstring
138
134
 
139
135
  Examples:
@@ -197,7 +193,7 @@ def tool(
197
193
  Note that parsing by default will raise `ValueError` if the docstring
198
194
  is considered invalid. A docstring is considered invalid if it contains
199
195
  arguments not in the function signature, or is unable to be parsed into
200
- a summary and "Args:" blocks. Examples below:
196
+ a summary and `"Args:"` blocks. Examples below:
201
197
 
202
198
  ```python
203
199
  # No args section
@@ -82,12 +82,12 @@ def create_retriever_tool(
82
82
  description: The description for the tool. This will be passed to the language
83
83
  model, so should be descriptive.
84
84
  document_prompt: The prompt to use for the document.
85
- document_separator: The separator to use between documents. Defaults to "\n\n".
86
- response_format: The tool response format. If "content" then the output of
87
- the tool is interpreted as the contents of a ToolMessage. If
88
- "content_and_artifact" then the output is expected to be a two-tuple
89
- corresponding to the (content, artifact) of a ToolMessage (artifact
90
- being a list of documents in this case). Defaults to "content".
85
+ document_separator: The separator to use between documents.
86
+ response_format: The tool response format. If `"content"` then the output of
87
+ the tool is interpreted as the contents of a `ToolMessage`. If
88
+ `"content_and_artifact"` then the output is expected to be a two-tuple
89
+ corresponding to the `(content, artifact)` of a `ToolMessage` (artifact
90
+ being a list of documents in this case).
91
91
 
92
92
  Returns:
93
93
  Tool class to pass to an agent.
@@ -176,7 +176,7 @@ class Tool(BaseTool):
176
176
  func: The function to create the tool from.
177
177
  name: The name of the tool.
178
178
  description: The description of the tool.
179
- return_direct: Whether to return the output directly. Defaults to `False`.
179
+ return_direct: Whether to return the output directly.
180
180
  args_schema: The schema of the tool's input arguments.
181
181
  coroutine: The asynchronous version of the function.
182
182
  **kwargs: Additional arguments to pass to the tool.
@@ -149,21 +149,16 @@ class StructuredTool(BaseTool):
149
149
  description: The description of the tool.
150
150
  Defaults to the function docstring.
151
151
  return_direct: Whether to return the result directly or as a callback.
152
- Defaults to `False`.
153
152
  args_schema: The schema of the tool's input arguments.
154
153
  infer_schema: Whether to infer the schema from the function's signature.
155
- Defaults to `True`.
156
- response_format: The tool response format. If "content" then the output of
157
- the tool is interpreted as the contents of a ToolMessage. If
158
- "content_and_artifact" then the output is expected to be a two-tuple
159
- corresponding to the (content, artifact) of a ToolMessage.
160
- Defaults to "content".
154
+ response_format: The tool response format. If `"content"` then the output of
155
+ the tool is interpreted as the contents of a `ToolMessage`. If
156
+ `"content_and_artifact"` then the output is expected to be a two-tuple
157
+ corresponding to the `(content, artifact)` of a `ToolMessage`.
161
158
  parse_docstring: if `infer_schema` and `parse_docstring`, will attempt
162
159
  to parse parameter descriptions from Google Style function docstrings.
163
- Defaults to `False`.
164
160
  error_on_invalid_docstring: if `parse_docstring` is provided, configure
165
- whether to raise ValueError on invalid Google Style docstrings.
166
- Defaults to `False`.
161
+ whether to raise `ValueError` on invalid Google Style docstrings.
167
162
  **kwargs: Additional arguments to pass to the tool
168
163
 
169
164
  Returns:
@@ -5,7 +5,7 @@ channel. The writer and reader can be in the same event loop or in different eve
5
5
  loops. When they're in different event loops, they will also be in different
6
6
  threads.
7
7
 
8
- This is useful in situations when there's a mix of synchronous and asynchronous
8
+ Useful in situations when there's a mix of synchronous and asynchronous
9
9
  used in the code.
10
10
  """
11
11
 
@@ -24,7 +24,7 @@ class RootListenersTracer(BaseTracer):
24
24
  """Tracer that calls listeners on run start, end, and error."""
25
25
 
26
26
  log_missing_parent = False
27
- """Whether to log a warning if the parent is missing. Default is False."""
27
+ """Whether to log a warning if the parent is missing."""
28
28
 
29
29
  def __init__(
30
30
  self,
@@ -79,7 +79,7 @@ class AsyncRootListenersTracer(AsyncBaseTracer):
79
79
  """Async Tracer that calls listeners on run start, end, and error."""
80
80
 
81
81
  log_missing_parent = False
82
- """Whether to log a warning if the parent is missing. Default is False."""
82
+ """Whether to log a warning if the parent is missing."""
83
83
 
84
84
  def __init__(
85
85
  self,
@@ -49,8 +49,7 @@ class FunctionCallbackHandler(BaseTracer):
49
49
  """Tracer that calls a function with a single str parameter."""
50
50
 
51
51
  name: str = "function_callback_handler"
52
- """The name of the tracer. This is used to identify the tracer in the logs.
53
- Default is "function_callback_handler"."""
52
+ """The name of the tracer. This is used to identify the tracer in the logs."""
54
53
 
55
54
  def __init__(self, function: Callable[[str], None], **kwargs: Any) -> None:
56
55
  """Create a FunctionCallbackHandler.
@@ -1,4 +1,4 @@
1
- """**Utility functions** for LangChain.
1
+ """Utility functions for LangChain.
2
2
 
3
3
  These functions do not depend on any other LangChain module.
4
4
  """
@@ -201,7 +201,7 @@ class Tee(Generic[T]):
201
201
 
202
202
  Args:
203
203
  iterable: The iterable to split.
204
- n: The number of iterators to create. Defaults to 2.
204
+ n: The number of iterators to create.
205
205
  lock: The lock to synchronise access to the shared buffers.
206
206
 
207
207
  """
@@ -114,7 +114,7 @@ def _convert_json_schema_to_openai_function(
114
114
  used.
115
115
  description: The description of the function. If not provided, the description
116
116
  of the schema will be used.
117
- rm_titles: Whether to remove titles from the schema. Defaults to `True`.
117
+ rm_titles: Whether to remove titles from the schema.
118
118
 
119
119
  Returns:
120
120
  The function description.
@@ -148,7 +148,7 @@ def _convert_pydantic_to_openai_function(
148
148
  used.
149
149
  description: The description of the function. If not provided, the description
150
150
  of the schema will be used.
151
- rm_titles: Whether to remove titles from the schema. Defaults to `True`.
151
+ rm_titles: Whether to remove titles from the schema.
152
152
 
153
153
  Raises:
154
154
  TypeError: If the model is not a Pydantic model.
@@ -334,11 +334,11 @@ def convert_to_openai_function(
334
334
 
335
335
  Args:
336
336
  function:
337
- A dictionary, Pydantic BaseModel class, TypedDict class, a LangChain
338
- Tool object, or a Python function. If a dictionary is passed in, it is
337
+ A dictionary, Pydantic `BaseModel` class, `TypedDict` class, a LangChain
338
+ `Tool` object, or a Python function. If a dictionary is passed in, it is
339
339
  assumed to already be a valid OpenAI function, a JSON schema with
340
- top-level 'title' key specified, an Anthropic format
341
- tool, or an Amazon Bedrock Converse format tool.
340
+ top-level `title` key specified, an Anthropic format tool, or an Amazon
341
+ Bedrock Converse format tool.
342
342
  strict:
343
343
  If `True`, model output is guaranteed to exactly match the JSON Schema
344
344
  provided in the function definition. If `None`, `strict` argument will not
@@ -351,17 +351,8 @@ def convert_to_openai_function(
351
351
  Raises:
352
352
  ValueError: If function is not in a supported format.
353
353
 
354
- !!! warning "Behavior changed in 0.2.29"
355
- `strict` arg added.
356
-
357
- !!! warning "Behavior changed in 0.3.13"
358
- Support for Anthropic format tools added.
359
-
360
- !!! warning "Behavior changed in 0.3.14"
361
- Support for Amazon Bedrock Converse format tools added.
362
-
363
354
  !!! warning "Behavior changed in 0.3.16"
364
- 'description' and 'parameters' keys are now optional. Only 'name' is
355
+ `description` and `parameters` keys are now optional. Only `name` is
365
356
  required and guaranteed to be part of the output.
366
357
  """
367
358
  # an Anthropic format tool
@@ -459,16 +450,14 @@ def convert_to_openai_tool(
459
450
  ) -> dict[str, Any]:
460
451
  """Convert a tool-like object to an OpenAI tool schema.
461
452
 
462
- OpenAI tool schema reference:
463
- https://platform.openai.com/docs/api-reference/chat/create#chat-create-tools
453
+ [OpenAI tool schema reference](https://platform.openai.com/docs/api-reference/chat/create#chat-create-tools)
464
454
 
465
455
  Args:
466
456
  tool:
467
- Either a dictionary, a pydantic.BaseModel class, Python function, or
468
- BaseTool. If a dictionary is passed in, it is
469
- assumed to already be a valid OpenAI function, a JSON schema with
470
- top-level 'title' key specified, an Anthropic format
471
- tool, or an Amazon Bedrock Converse format tool.
457
+ Either a dictionary, a `pydantic.BaseModel` class, Python function, or
458
+ `BaseTool`. If a dictionary is passed in, it is assumed to already be a
459
+ valid OpenAI function, a JSON schema with top-level `title` key specified,
460
+ an Anthropic format tool, or an Amazon Bedrock Converse format tool.
472
461
  strict:
473
462
  If `True`, model output is guaranteed to exactly match the JSON Schema
474
463
  provided in the function definition. If `None`, `strict` argument will not
@@ -478,26 +467,14 @@ def convert_to_openai_tool(
478
467
  A dict version of the passed in tool which is compatible with the
479
468
  OpenAI tool-calling API.
480
469
 
481
- !!! warning "Behavior changed in 0.2.29"
482
- `strict` arg added.
483
-
484
- !!! warning "Behavior changed in 0.3.13"
485
- Support for Anthropic format tools added.
486
-
487
- !!! warning "Behavior changed in 0.3.14"
488
- Support for Amazon Bedrock Converse format tools added.
489
-
490
470
  !!! warning "Behavior changed in 0.3.16"
491
- 'description' and 'parameters' keys are now optional. Only 'name' is
471
+ `description` and `parameters` keys are now optional. Only `name` is
492
472
  required and guaranteed to be part of the output.
493
473
 
494
474
  !!! warning "Behavior changed in 0.3.44"
495
475
  Return OpenAI Responses API-style tools unchanged. This includes
496
- any dict with "type" in "file_search", "function", "computer_use_preview",
497
- "web_search_preview".
498
-
499
- !!! warning "Behavior changed in 0.3.61"
500
- Added support for OpenAI's built-in code interpreter and remote MCP tools.
476
+ any dict with `"type"` in `"file_search"`, `"function"`,
477
+ `"computer_use_preview"`, `"web_search_preview"`.
501
478
 
502
479
  !!! warning "Behavior changed in 0.3.63"
503
480
  Added support for OpenAI's image generation built-in tool.
@@ -66,7 +66,7 @@ def print_text(
66
66
  Args:
67
67
  text: The text to print.
68
68
  color: The color to use.
69
- end: The end character to use. Defaults to "".
69
+ end: The end character to use.
70
70
  file: The file to write to.
71
71
  """
72
72
  text_to_print = get_colored_text(text, color) if color else text
@@ -137,7 +137,7 @@ class Tee(Generic[T]):
137
137
 
138
138
  Args:
139
139
  iterable: The iterable to split.
140
- n: The number of iterators to create. Defaults to 2.
140
+ n: The number of iterators to create.
141
141
  lock: The lock to synchronise access to the shared buffers.
142
142
 
143
143
  """
@@ -51,7 +51,7 @@ def parse_partial_json(s: str, *, strict: bool = False) -> Any:
51
51
 
52
52
  Args:
53
53
  s: The JSON string to parse.
54
- strict: Whether to use strict parsing. Defaults to `False`.
54
+ strict: Whether to use strict parsing.
55
55
 
56
56
  Returns:
57
57
  The parsed JSON object as a Python dictionary.
@@ -57,7 +57,7 @@ def sanitize_for_postgres(text: str, replacement: str = "") -> str:
57
57
 
58
58
  Args:
59
59
  text: The text to sanitize.
60
- replacement: String to replace NUL bytes with. Defaults to empty string.
60
+ replacement: String to replace NUL bytes with.
61
61
 
62
62
  Returns:
63
63
  The sanitized text with NUL bytes removed or replaced.