langchain-core 0.4.0.dev0__py3-none-any.whl → 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of langchain-core might be problematic. Click here for more details.
- langchain_core/__init__.py +1 -1
- langchain_core/_api/__init__.py +3 -4
- langchain_core/_api/beta_decorator.py +45 -70
- langchain_core/_api/deprecation.py +80 -80
- langchain_core/_api/path.py +22 -8
- langchain_core/_import_utils.py +10 -4
- langchain_core/agents.py +25 -21
- langchain_core/caches.py +53 -63
- langchain_core/callbacks/__init__.py +1 -8
- langchain_core/callbacks/base.py +341 -348
- langchain_core/callbacks/file.py +55 -44
- langchain_core/callbacks/manager.py +546 -683
- langchain_core/callbacks/stdout.py +29 -30
- langchain_core/callbacks/streaming_stdout.py +35 -36
- langchain_core/callbacks/usage.py +65 -70
- langchain_core/chat_history.py +48 -55
- langchain_core/document_loaders/base.py +46 -21
- langchain_core/document_loaders/langsmith.py +39 -36
- langchain_core/documents/__init__.py +0 -1
- langchain_core/documents/base.py +96 -74
- langchain_core/documents/compressor.py +12 -9
- langchain_core/documents/transformers.py +29 -28
- langchain_core/embeddings/fake.py +56 -57
- langchain_core/env.py +2 -3
- langchain_core/example_selectors/base.py +12 -0
- langchain_core/example_selectors/length_based.py +1 -1
- langchain_core/example_selectors/semantic_similarity.py +21 -25
- langchain_core/exceptions.py +15 -9
- langchain_core/globals.py +4 -163
- langchain_core/indexing/api.py +132 -125
- langchain_core/indexing/base.py +64 -67
- langchain_core/indexing/in_memory.py +26 -6
- langchain_core/language_models/__init__.py +15 -27
- langchain_core/language_models/_utils.py +267 -117
- langchain_core/language_models/base.py +92 -177
- langchain_core/language_models/chat_models.py +547 -407
- langchain_core/language_models/fake.py +11 -11
- langchain_core/language_models/fake_chat_models.py +72 -118
- langchain_core/language_models/llms.py +168 -242
- langchain_core/load/dump.py +8 -11
- langchain_core/load/load.py +32 -28
- langchain_core/load/mapping.py +2 -4
- langchain_core/load/serializable.py +50 -56
- langchain_core/messages/__init__.py +36 -51
- langchain_core/messages/ai.py +377 -150
- langchain_core/messages/base.py +239 -47
- langchain_core/messages/block_translators/__init__.py +111 -0
- langchain_core/messages/block_translators/anthropic.py +470 -0
- langchain_core/messages/block_translators/bedrock.py +94 -0
- langchain_core/messages/block_translators/bedrock_converse.py +297 -0
- langchain_core/messages/block_translators/google_genai.py +530 -0
- langchain_core/messages/block_translators/google_vertexai.py +21 -0
- langchain_core/messages/block_translators/groq.py +143 -0
- langchain_core/messages/block_translators/langchain_v0.py +301 -0
- langchain_core/messages/block_translators/openai.py +1010 -0
- langchain_core/messages/chat.py +2 -3
- langchain_core/messages/content.py +1423 -0
- langchain_core/messages/function.py +7 -7
- langchain_core/messages/human.py +44 -38
- langchain_core/messages/modifier.py +3 -2
- langchain_core/messages/system.py +40 -27
- langchain_core/messages/tool.py +160 -58
- langchain_core/messages/utils.py +527 -638
- langchain_core/output_parsers/__init__.py +1 -14
- langchain_core/output_parsers/base.py +68 -104
- langchain_core/output_parsers/json.py +13 -17
- langchain_core/output_parsers/list.py +11 -33
- langchain_core/output_parsers/openai_functions.py +56 -74
- langchain_core/output_parsers/openai_tools.py +68 -109
- langchain_core/output_parsers/pydantic.py +15 -13
- langchain_core/output_parsers/string.py +6 -2
- langchain_core/output_parsers/transform.py +17 -60
- langchain_core/output_parsers/xml.py +34 -44
- langchain_core/outputs/__init__.py +1 -1
- langchain_core/outputs/chat_generation.py +26 -11
- langchain_core/outputs/chat_result.py +1 -3
- langchain_core/outputs/generation.py +17 -6
- langchain_core/outputs/llm_result.py +15 -8
- langchain_core/prompt_values.py +29 -123
- langchain_core/prompts/__init__.py +3 -27
- langchain_core/prompts/base.py +48 -63
- langchain_core/prompts/chat.py +259 -288
- langchain_core/prompts/dict.py +19 -11
- langchain_core/prompts/few_shot.py +84 -90
- langchain_core/prompts/few_shot_with_templates.py +14 -12
- langchain_core/prompts/image.py +19 -14
- langchain_core/prompts/loading.py +6 -8
- langchain_core/prompts/message.py +7 -8
- langchain_core/prompts/prompt.py +42 -43
- langchain_core/prompts/string.py +37 -16
- langchain_core/prompts/structured.py +43 -46
- langchain_core/rate_limiters.py +51 -60
- langchain_core/retrievers.py +52 -192
- langchain_core/runnables/base.py +1727 -1683
- langchain_core/runnables/branch.py +52 -73
- langchain_core/runnables/config.py +89 -103
- langchain_core/runnables/configurable.py +128 -130
- langchain_core/runnables/fallbacks.py +93 -82
- langchain_core/runnables/graph.py +127 -127
- langchain_core/runnables/graph_ascii.py +63 -41
- langchain_core/runnables/graph_mermaid.py +87 -70
- langchain_core/runnables/graph_png.py +31 -36
- langchain_core/runnables/history.py +145 -161
- langchain_core/runnables/passthrough.py +141 -144
- langchain_core/runnables/retry.py +84 -68
- langchain_core/runnables/router.py +33 -37
- langchain_core/runnables/schema.py +79 -72
- langchain_core/runnables/utils.py +95 -139
- langchain_core/stores.py +85 -131
- langchain_core/structured_query.py +11 -15
- langchain_core/sys_info.py +31 -32
- langchain_core/tools/__init__.py +1 -14
- langchain_core/tools/base.py +221 -247
- langchain_core/tools/convert.py +144 -161
- langchain_core/tools/render.py +10 -10
- langchain_core/tools/retriever.py +12 -19
- langchain_core/tools/simple.py +52 -29
- langchain_core/tools/structured.py +56 -60
- langchain_core/tracers/__init__.py +1 -9
- langchain_core/tracers/_streaming.py +6 -7
- langchain_core/tracers/base.py +103 -112
- langchain_core/tracers/context.py +29 -48
- langchain_core/tracers/core.py +142 -105
- langchain_core/tracers/evaluation.py +30 -34
- langchain_core/tracers/event_stream.py +162 -117
- langchain_core/tracers/langchain.py +34 -36
- langchain_core/tracers/log_stream.py +87 -49
- langchain_core/tracers/memory_stream.py +3 -3
- langchain_core/tracers/root_listeners.py +18 -34
- langchain_core/tracers/run_collector.py +8 -20
- langchain_core/tracers/schemas.py +0 -125
- langchain_core/tracers/stdout.py +3 -3
- langchain_core/utils/__init__.py +1 -4
- langchain_core/utils/_merge.py +47 -9
- langchain_core/utils/aiter.py +70 -66
- langchain_core/utils/env.py +12 -9
- langchain_core/utils/function_calling.py +139 -206
- langchain_core/utils/html.py +7 -8
- langchain_core/utils/input.py +6 -6
- langchain_core/utils/interactive_env.py +6 -2
- langchain_core/utils/iter.py +48 -45
- langchain_core/utils/json.py +14 -4
- langchain_core/utils/json_schema.py +159 -43
- langchain_core/utils/mustache.py +32 -25
- langchain_core/utils/pydantic.py +67 -40
- langchain_core/utils/strings.py +5 -5
- langchain_core/utils/usage.py +1 -1
- langchain_core/utils/utils.py +104 -62
- langchain_core/vectorstores/base.py +131 -179
- langchain_core/vectorstores/in_memory.py +113 -182
- langchain_core/vectorstores/utils.py +23 -17
- langchain_core/version.py +1 -1
- langchain_core-1.0.0.dist-info/METADATA +68 -0
- langchain_core-1.0.0.dist-info/RECORD +172 -0
- {langchain_core-0.4.0.dev0.dist-info → langchain_core-1.0.0.dist-info}/WHEEL +1 -1
- langchain_core/beta/__init__.py +0 -1
- langchain_core/beta/runnables/__init__.py +0 -1
- langchain_core/beta/runnables/context.py +0 -448
- langchain_core/memory.py +0 -116
- langchain_core/messages/content_blocks.py +0 -1435
- langchain_core/prompts/pipeline.py +0 -133
- langchain_core/pydantic_v1/__init__.py +0 -30
- langchain_core/pydantic_v1/dataclasses.py +0 -23
- langchain_core/pydantic_v1/main.py +0 -23
- langchain_core/tracers/langchain_v1.py +0 -23
- langchain_core/utils/loading.py +0 -31
- langchain_core/v1/__init__.py +0 -1
- langchain_core/v1/chat_models.py +0 -1047
- langchain_core/v1/messages.py +0 -755
- langchain_core-0.4.0.dev0.dist-info/METADATA +0 -108
- langchain_core-0.4.0.dev0.dist-info/RECORD +0 -177
- langchain_core-0.4.0.dev0.dist-info/entry_points.txt +0 -4
|
@@ -8,22 +8,26 @@ import logging
|
|
|
8
8
|
import types
|
|
9
9
|
import typing
|
|
10
10
|
import uuid
|
|
11
|
+
from collections.abc import Callable
|
|
11
12
|
from typing import (
|
|
12
13
|
TYPE_CHECKING,
|
|
13
14
|
Annotated,
|
|
14
15
|
Any,
|
|
15
|
-
Callable,
|
|
16
16
|
Literal,
|
|
17
|
-
Optional,
|
|
18
17
|
Union,
|
|
19
18
|
cast,
|
|
19
|
+
get_args,
|
|
20
|
+
get_origin,
|
|
20
21
|
)
|
|
21
22
|
|
|
22
23
|
from pydantic import BaseModel
|
|
23
24
|
from pydantic.v1 import BaseModel as BaseModelV1
|
|
24
|
-
from
|
|
25
|
+
from pydantic.v1 import Field as Field_v1
|
|
26
|
+
from pydantic.v1 import create_model as create_model_v1
|
|
27
|
+
from typing_extensions import TypedDict, is_typeddict
|
|
25
28
|
|
|
26
|
-
|
|
29
|
+
import langchain_core
|
|
30
|
+
from langchain_core._api import beta
|
|
27
31
|
from langchain_core.messages import AIMessage, BaseMessage, HumanMessage, ToolMessage
|
|
28
32
|
from langchain_core.utils.json_schema import dereference_refs
|
|
29
33
|
from langchain_core.utils.pydantic import is_basemodel_subclass
|
|
@@ -68,11 +72,11 @@ def _rm_titles(kv: dict, prev_key: str = "") -> dict:
|
|
|
68
72
|
except when a "title" appears within a property definition under "properties".
|
|
69
73
|
|
|
70
74
|
Args:
|
|
71
|
-
kv
|
|
72
|
-
prev_key
|
|
75
|
+
kv: The input JSON schema as a dictionary.
|
|
76
|
+
prev_key: The key from the parent dictionary, used to identify context.
|
|
73
77
|
|
|
74
78
|
Returns:
|
|
75
|
-
|
|
79
|
+
A new dictionary with appropriate "title" fields removed.
|
|
76
80
|
"""
|
|
77
81
|
new_kv = {}
|
|
78
82
|
|
|
@@ -98,8 +102,8 @@ def _rm_titles(kv: dict, prev_key: str = "") -> dict:
|
|
|
98
102
|
def _convert_json_schema_to_openai_function(
|
|
99
103
|
schema: dict,
|
|
100
104
|
*,
|
|
101
|
-
name:
|
|
102
|
-
description:
|
|
105
|
+
name: str | None = None,
|
|
106
|
+
description: str | None = None,
|
|
103
107
|
rm_titles: bool = True,
|
|
104
108
|
) -> FunctionDescription:
|
|
105
109
|
"""Converts a Pydantic model to a function description for the OpenAI API.
|
|
@@ -110,7 +114,7 @@ def _convert_json_schema_to_openai_function(
|
|
|
110
114
|
used.
|
|
111
115
|
description: The description of the function. If not provided, the description
|
|
112
116
|
of the schema will be used.
|
|
113
|
-
rm_titles: Whether to remove titles from the schema.
|
|
117
|
+
rm_titles: Whether to remove titles from the schema.
|
|
114
118
|
|
|
115
119
|
Returns:
|
|
116
120
|
The function description.
|
|
@@ -132,8 +136,8 @@ def _convert_json_schema_to_openai_function(
|
|
|
132
136
|
def _convert_pydantic_to_openai_function(
|
|
133
137
|
model: type,
|
|
134
138
|
*,
|
|
135
|
-
name:
|
|
136
|
-
description:
|
|
139
|
+
name: str | None = None,
|
|
140
|
+
description: str | None = None,
|
|
137
141
|
rm_titles: bool = True,
|
|
138
142
|
) -> FunctionDescription:
|
|
139
143
|
"""Converts a Pydantic model to a function description for the OpenAI API.
|
|
@@ -144,7 +148,10 @@ def _convert_pydantic_to_openai_function(
|
|
|
144
148
|
used.
|
|
145
149
|
description: The description of the function. If not provided, the description
|
|
146
150
|
of the schema will be used.
|
|
147
|
-
rm_titles: Whether to remove titles from the schema.
|
|
151
|
+
rm_titles: Whether to remove titles from the schema.
|
|
152
|
+
|
|
153
|
+
Raises:
|
|
154
|
+
TypeError: If the model is not a Pydantic model.
|
|
148
155
|
|
|
149
156
|
Returns:
|
|
150
157
|
The function description.
|
|
@@ -161,42 +168,6 @@ def _convert_pydantic_to_openai_function(
|
|
|
161
168
|
)
|
|
162
169
|
|
|
163
170
|
|
|
164
|
-
convert_pydantic_to_openai_function = deprecated(
|
|
165
|
-
"0.1.16",
|
|
166
|
-
alternative="langchain_core.utils.function_calling.convert_to_openai_function()",
|
|
167
|
-
removal="1.0",
|
|
168
|
-
)(_convert_pydantic_to_openai_function)
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
@deprecated(
|
|
172
|
-
"0.1.16",
|
|
173
|
-
alternative="langchain_core.utils.function_calling.convert_to_openai_tool()",
|
|
174
|
-
removal="1.0",
|
|
175
|
-
)
|
|
176
|
-
def convert_pydantic_to_openai_tool(
|
|
177
|
-
model: type[BaseModel],
|
|
178
|
-
*,
|
|
179
|
-
name: Optional[str] = None,
|
|
180
|
-
description: Optional[str] = None,
|
|
181
|
-
) -> ToolDescription:
|
|
182
|
-
"""Converts a Pydantic model to a function description for the OpenAI API.
|
|
183
|
-
|
|
184
|
-
Args:
|
|
185
|
-
model: The Pydantic model to convert.
|
|
186
|
-
name: The name of the function. If not provided, the title of the schema will be
|
|
187
|
-
used.
|
|
188
|
-
description: The description of the function. If not provided, the description
|
|
189
|
-
of the schema will be used.
|
|
190
|
-
|
|
191
|
-
Returns:
|
|
192
|
-
The tool description.
|
|
193
|
-
"""
|
|
194
|
-
function = _convert_pydantic_to_openai_function(
|
|
195
|
-
model, name=name, description=description
|
|
196
|
-
)
|
|
197
|
-
return {"type": "function", "function": function}
|
|
198
|
-
|
|
199
|
-
|
|
200
171
|
def _get_python_function_name(function: Callable) -> str:
|
|
201
172
|
"""Get the name of a Python function."""
|
|
202
173
|
return function.__name__
|
|
@@ -217,10 +188,8 @@ def _convert_python_function_to_openai_function(
|
|
|
217
188
|
Returns:
|
|
218
189
|
The OpenAI function description.
|
|
219
190
|
"""
|
|
220
|
-
from langchain_core.tools.base import create_schema_from_function
|
|
221
|
-
|
|
222
191
|
func_name = _get_python_function_name(function)
|
|
223
|
-
model = create_schema_from_function(
|
|
192
|
+
model = langchain_core.tools.base.create_schema_from_function(
|
|
224
193
|
func_name,
|
|
225
194
|
function,
|
|
226
195
|
filter_args=(),
|
|
@@ -235,13 +204,6 @@ def _convert_python_function_to_openai_function(
|
|
|
235
204
|
)
|
|
236
205
|
|
|
237
206
|
|
|
238
|
-
convert_python_function_to_openai_function = deprecated(
|
|
239
|
-
"0.1.16",
|
|
240
|
-
alternative="langchain_core.utils.function_calling.convert_to_openai_function()",
|
|
241
|
-
removal="1.0",
|
|
242
|
-
)(_convert_python_function_to_openai_function)
|
|
243
|
-
|
|
244
|
-
|
|
245
207
|
def _convert_typed_dict_to_openai_function(typed_dict: type) -> FunctionDescription:
|
|
246
208
|
visited: dict = {}
|
|
247
209
|
|
|
@@ -261,9 +223,6 @@ def _convert_any_typed_dicts_to_pydantic(
|
|
|
261
223
|
visited: dict,
|
|
262
224
|
depth: int = 0,
|
|
263
225
|
) -> type:
|
|
264
|
-
from pydantic.v1 import Field as Field_v1
|
|
265
|
-
from pydantic.v1 import create_model as create_model_v1
|
|
266
|
-
|
|
267
226
|
if type_ in visited:
|
|
268
227
|
return visited[type_]
|
|
269
228
|
if depth >= _MAX_TYPED_DICT_RECURSION:
|
|
@@ -277,12 +236,14 @@ def _convert_any_typed_dicts_to_pydantic(
|
|
|
277
236
|
)
|
|
278
237
|
fields: dict = {}
|
|
279
238
|
for arg, arg_type in annotations_.items():
|
|
280
|
-
if get_origin(arg_type) is Annotated:
|
|
239
|
+
if get_origin(arg_type) is Annotated: # type: ignore[comparison-overlap]
|
|
281
240
|
annotated_args = get_args(arg_type)
|
|
282
241
|
new_arg_type = _convert_any_typed_dicts_to_pydantic(
|
|
283
242
|
annotated_args[0], depth=depth + 1, visited=visited
|
|
284
243
|
)
|
|
285
|
-
field_kwargs = dict(
|
|
244
|
+
field_kwargs = dict(
|
|
245
|
+
zip(("default", "description"), annotated_args[1:], strict=False)
|
|
246
|
+
)
|
|
286
247
|
if (field_desc := field_kwargs.get("description")) and not isinstance(
|
|
287
248
|
field_desc, str
|
|
288
249
|
):
|
|
@@ -323,12 +284,15 @@ def _format_tool_to_openai_function(tool: BaseTool) -> FunctionDescription:
|
|
|
323
284
|
Args:
|
|
324
285
|
tool: The tool to format.
|
|
325
286
|
|
|
287
|
+
Raises:
|
|
288
|
+
ValueError: If the tool call schema is not supported.
|
|
289
|
+
|
|
326
290
|
Returns:
|
|
327
291
|
The function description.
|
|
328
292
|
"""
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
293
|
+
is_simple_oai_tool = (
|
|
294
|
+
isinstance(tool, langchain_core.tools.simple.Tool) and not tool.args_schema
|
|
295
|
+
)
|
|
332
296
|
if tool.tool_call_schema and not is_simple_oai_tool:
|
|
333
297
|
if isinstance(tool.tool_call_schema, dict):
|
|
334
298
|
return _convert_json_schema_to_openai_function(
|
|
@@ -361,48 +325,23 @@ def _format_tool_to_openai_function(tool: BaseTool) -> FunctionDescription:
|
|
|
361
325
|
}
|
|
362
326
|
|
|
363
327
|
|
|
364
|
-
format_tool_to_openai_function = deprecated(
|
|
365
|
-
"0.1.16",
|
|
366
|
-
alternative="langchain_core.utils.function_calling.convert_to_openai_function()",
|
|
367
|
-
removal="1.0",
|
|
368
|
-
)(_format_tool_to_openai_function)
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
@deprecated(
|
|
372
|
-
"0.1.16",
|
|
373
|
-
alternative="langchain_core.utils.function_calling.convert_to_openai_tool()",
|
|
374
|
-
removal="1.0",
|
|
375
|
-
)
|
|
376
|
-
def format_tool_to_openai_tool(tool: BaseTool) -> ToolDescription:
|
|
377
|
-
"""Format tool into the OpenAI function API.
|
|
378
|
-
|
|
379
|
-
Args:
|
|
380
|
-
tool: The tool to format.
|
|
381
|
-
|
|
382
|
-
Returns:
|
|
383
|
-
The tool description.
|
|
384
|
-
"""
|
|
385
|
-
function = _format_tool_to_openai_function(tool)
|
|
386
|
-
return {"type": "function", "function": function}
|
|
387
|
-
|
|
388
|
-
|
|
389
328
|
def convert_to_openai_function(
|
|
390
|
-
function:
|
|
329
|
+
function: dict[str, Any] | type | Callable | BaseTool,
|
|
391
330
|
*,
|
|
392
|
-
strict:
|
|
331
|
+
strict: bool | None = None,
|
|
393
332
|
) -> dict[str, Any]:
|
|
394
333
|
"""Convert a raw function/class to an OpenAI function.
|
|
395
334
|
|
|
396
335
|
Args:
|
|
397
336
|
function:
|
|
398
|
-
A dictionary, Pydantic BaseModel class, TypedDict class, a LangChain
|
|
399
|
-
Tool object, or a Python function. If a dictionary is passed in, it is
|
|
337
|
+
A dictionary, Pydantic `BaseModel` class, `TypedDict` class, a LangChain
|
|
338
|
+
`Tool` object, or a Python function. If a dictionary is passed in, it is
|
|
400
339
|
assumed to already be a valid OpenAI function, a JSON schema with
|
|
401
|
-
top-level
|
|
402
|
-
|
|
340
|
+
top-level `title` key specified, an Anthropic format tool, or an Amazon
|
|
341
|
+
Bedrock Converse format tool.
|
|
403
342
|
strict:
|
|
404
|
-
If True
|
|
405
|
-
provided in the function definition. If None
|
|
343
|
+
If `True`, model output is guaranteed to exactly match the JSON Schema
|
|
344
|
+
provided in the function definition. If `None`, `strict` argument will not
|
|
406
345
|
be included in function definition.
|
|
407
346
|
|
|
408
347
|
Returns:
|
|
@@ -412,25 +351,10 @@ def convert_to_openai_function(
|
|
|
412
351
|
Raises:
|
|
413
352
|
ValueError: If function is not in a supported format.
|
|
414
353
|
|
|
415
|
-
|
|
416
|
-
|
|
417
|
-
``strict`` arg added.
|
|
418
|
-
|
|
419
|
-
.. versionchanged:: 0.3.13
|
|
420
|
-
|
|
421
|
-
Support for Anthropic format tools added.
|
|
422
|
-
|
|
423
|
-
.. versionchanged:: 0.3.14
|
|
424
|
-
|
|
425
|
-
Support for Amazon Bedrock Converse format tools added.
|
|
426
|
-
|
|
427
|
-
.. versionchanged:: 0.3.16
|
|
428
|
-
|
|
429
|
-
'description' and 'parameters' keys are now optional. Only 'name' is
|
|
354
|
+
!!! warning "Behavior changed in 0.3.16"
|
|
355
|
+
`description` and `parameters` keys are now optional. Only `name` is
|
|
430
356
|
required and guaranteed to be part of the output.
|
|
431
357
|
"""
|
|
432
|
-
from langchain_core.tools import BaseTool
|
|
433
|
-
|
|
434
358
|
# an Anthropic format tool
|
|
435
359
|
if isinstance(function, dict) and all(
|
|
436
360
|
k in function for k in ("name", "input_schema")
|
|
@@ -470,7 +394,7 @@ def convert_to_openai_function(
|
|
|
470
394
|
oai_function = cast(
|
|
471
395
|
"dict", _convert_typed_dict_to_openai_function(cast("type", function))
|
|
472
396
|
)
|
|
473
|
-
elif isinstance(function, BaseTool):
|
|
397
|
+
elif isinstance(function, langchain_core.tools.base.BaseTool):
|
|
474
398
|
oai_function = cast("dict", _format_tool_to_openai_function(function))
|
|
475
399
|
elif callable(function):
|
|
476
400
|
oai_function = cast(
|
|
@@ -515,82 +439,87 @@ _WellKnownOpenAITools = (
|
|
|
515
439
|
"mcp",
|
|
516
440
|
"image_generation",
|
|
517
441
|
"web_search_preview",
|
|
442
|
+
"web_search",
|
|
518
443
|
)
|
|
519
444
|
|
|
520
445
|
|
|
521
446
|
def convert_to_openai_tool(
|
|
522
|
-
tool:
|
|
447
|
+
tool: dict[str, Any] | type[BaseModel] | Callable | BaseTool,
|
|
523
448
|
*,
|
|
524
|
-
strict:
|
|
449
|
+
strict: bool | None = None,
|
|
525
450
|
) -> dict[str, Any]:
|
|
526
451
|
"""Convert a tool-like object to an OpenAI tool schema.
|
|
527
452
|
|
|
528
|
-
OpenAI tool schema reference
|
|
529
|
-
https://platform.openai.com/docs/api-reference/chat/create#chat-create-tools
|
|
453
|
+
[OpenAI tool schema reference](https://platform.openai.com/docs/api-reference/chat/create#chat-create-tools)
|
|
530
454
|
|
|
531
455
|
Args:
|
|
532
456
|
tool:
|
|
533
|
-
Either a dictionary, a pydantic.BaseModel class, Python function, or
|
|
534
|
-
BaseTool
|
|
535
|
-
|
|
536
|
-
|
|
537
|
-
tool, or an Amazon Bedrock Converse format tool.
|
|
457
|
+
Either a dictionary, a `pydantic.BaseModel` class, Python function, or
|
|
458
|
+
`BaseTool`. If a dictionary is passed in, it is assumed to already be a
|
|
459
|
+
valid OpenAI function, a JSON schema with top-level `title` key specified,
|
|
460
|
+
an Anthropic format tool, or an Amazon Bedrock Converse format tool.
|
|
538
461
|
strict:
|
|
539
|
-
If True
|
|
540
|
-
provided in the function definition. If None
|
|
462
|
+
If `True`, model output is guaranteed to exactly match the JSON Schema
|
|
463
|
+
provided in the function definition. If `None`, `strict` argument will not
|
|
541
464
|
be included in tool definition.
|
|
542
465
|
|
|
543
466
|
Returns:
|
|
544
467
|
A dict version of the passed in tool which is compatible with the
|
|
545
468
|
OpenAI tool-calling API.
|
|
546
469
|
|
|
547
|
-
|
|
548
|
-
|
|
549
|
-
``strict`` arg added.
|
|
550
|
-
|
|
551
|
-
.. versionchanged:: 0.3.13
|
|
552
|
-
|
|
553
|
-
Support for Anthropic format tools added.
|
|
554
|
-
|
|
555
|
-
.. versionchanged:: 0.3.14
|
|
556
|
-
|
|
557
|
-
Support for Amazon Bedrock Converse format tools added.
|
|
558
|
-
|
|
559
|
-
.. versionchanged:: 0.3.16
|
|
560
|
-
|
|
561
|
-
'description' and 'parameters' keys are now optional. Only 'name' is
|
|
470
|
+
!!! warning "Behavior changed in 0.3.16"
|
|
471
|
+
`description` and `parameters` keys are now optional. Only `name` is
|
|
562
472
|
required and guaranteed to be part of the output.
|
|
563
473
|
|
|
564
|
-
|
|
565
|
-
|
|
474
|
+
!!! warning "Behavior changed in 0.3.44"
|
|
566
475
|
Return OpenAI Responses API-style tools unchanged. This includes
|
|
567
|
-
any dict with "type" in "file_search"
|
|
568
|
-
"web_search_preview"
|
|
569
|
-
|
|
570
|
-
.. versionchanged:: 0.3.61
|
|
571
|
-
|
|
572
|
-
Added support for OpenAI's built-in code interpreter and remote MCP tools.
|
|
573
|
-
|
|
574
|
-
.. versionchanged:: 0.3.63
|
|
476
|
+
any dict with `"type"` in `"file_search"`, `"function"`,
|
|
477
|
+
`"computer_use_preview"`, `"web_search_preview"`.
|
|
575
478
|
|
|
479
|
+
!!! warning "Behavior changed in 0.3.63"
|
|
576
480
|
Added support for OpenAI's image generation built-in tool.
|
|
577
481
|
"""
|
|
482
|
+
# Import locally to prevent circular import
|
|
483
|
+
from langchain_core.tools import Tool # noqa: PLC0415
|
|
484
|
+
|
|
578
485
|
if isinstance(tool, dict):
|
|
579
486
|
if tool.get("type") in _WellKnownOpenAITools:
|
|
580
487
|
return tool
|
|
581
488
|
# As of 03.12.25 can be "web_search_preview" or "web_search_preview_2025_03_11"
|
|
582
489
|
if (tool.get("type") or "").startswith("web_search_preview"):
|
|
583
490
|
return tool
|
|
491
|
+
if isinstance(tool, Tool) and (tool.metadata or {}).get("type") == "custom_tool":
|
|
492
|
+
oai_tool = {
|
|
493
|
+
"type": "custom",
|
|
494
|
+
"name": tool.name,
|
|
495
|
+
"description": tool.description,
|
|
496
|
+
}
|
|
497
|
+
if tool.metadata is not None and "format" in tool.metadata:
|
|
498
|
+
oai_tool["format"] = tool.metadata["format"]
|
|
499
|
+
return oai_tool
|
|
584
500
|
oai_function = convert_to_openai_function(tool, strict=strict)
|
|
585
501
|
return {"type": "function", "function": oai_function}
|
|
586
502
|
|
|
587
503
|
|
|
588
504
|
def convert_to_json_schema(
|
|
589
|
-
schema:
|
|
505
|
+
schema: dict[str, Any] | type[BaseModel] | Callable | BaseTool,
|
|
590
506
|
*,
|
|
591
|
-
strict:
|
|
507
|
+
strict: bool | None = None,
|
|
592
508
|
) -> dict[str, Any]:
|
|
593
|
-
"""Convert a schema representation to a JSON schema.
|
|
509
|
+
"""Convert a schema representation to a JSON schema.
|
|
510
|
+
|
|
511
|
+
Args:
|
|
512
|
+
schema: The schema to convert.
|
|
513
|
+
strict: If `True`, model output is guaranteed to exactly match the JSON Schema
|
|
514
|
+
provided in the function definition. If `None`, `strict` argument will not
|
|
515
|
+
be included in function definition.
|
|
516
|
+
|
|
517
|
+
Raises:
|
|
518
|
+
ValueError: If the input is not a valid OpenAI-format tool.
|
|
519
|
+
|
|
520
|
+
Returns:
|
|
521
|
+
A JSON schema representation of the input schema.
|
|
522
|
+
"""
|
|
594
523
|
openai_tool = convert_to_openai_tool(schema, strict=strict)
|
|
595
524
|
if (
|
|
596
525
|
not isinstance(openai_tool, dict)
|
|
@@ -618,9 +547,9 @@ def convert_to_json_schema(
|
|
|
618
547
|
def tool_example_to_messages(
|
|
619
548
|
input: str,
|
|
620
549
|
tool_calls: list[BaseModel],
|
|
621
|
-
tool_outputs:
|
|
550
|
+
tool_outputs: list[str] | None = None,
|
|
622
551
|
*,
|
|
623
|
-
ai_response:
|
|
552
|
+
ai_response: str | None = None,
|
|
624
553
|
) -> list[BaseMessage]:
|
|
625
554
|
"""Convert an example into a list of messages that can be fed into an LLM.
|
|
626
555
|
|
|
@@ -629,65 +558,62 @@ def tool_example_to_messages(
|
|
|
629
558
|
|
|
630
559
|
The list of messages per example by default corresponds to:
|
|
631
560
|
|
|
632
|
-
1
|
|
633
|
-
2
|
|
634
|
-
3
|
|
635
|
-
|
|
561
|
+
1. `HumanMessage`: contains the content from which content should be extracted.
|
|
562
|
+
2. `AIMessage`: contains the extracted information from the model
|
|
563
|
+
3. `ToolMessage`: contains confirmation to the model that the model requested a
|
|
564
|
+
tool correctly.
|
|
636
565
|
|
|
637
|
-
If `ai_response` is specified, there will be a final AIMessage with that
|
|
566
|
+
If `ai_response` is specified, there will be a final `AIMessage` with that
|
|
567
|
+
response.
|
|
638
568
|
|
|
639
|
-
The ToolMessage is required because some chat models are hyper-optimized for
|
|
640
|
-
rather than for an extraction use case.
|
|
569
|
+
The `ToolMessage` is required because some chat models are hyper-optimized for
|
|
570
|
+
agents rather than for an extraction use case.
|
|
641
571
|
|
|
642
|
-
|
|
643
|
-
input:
|
|
644
|
-
tool_calls:
|
|
645
|
-
|
|
646
|
-
tool_outputs: Optional[list[str]], a list of tool call outputs.
|
|
572
|
+
Args:
|
|
573
|
+
input: The user input
|
|
574
|
+
tool_calls: Tool calls represented as Pydantic BaseModels
|
|
575
|
+
tool_outputs: Tool call outputs.
|
|
647
576
|
Does not need to be provided. If not provided, a placeholder value
|
|
648
|
-
will be inserted.
|
|
649
|
-
ai_response:
|
|
577
|
+
will be inserted.
|
|
578
|
+
ai_response: If provided, content for a final `AIMessage`.
|
|
650
579
|
|
|
651
580
|
Returns:
|
|
652
581
|
A list of messages
|
|
653
582
|
|
|
654
583
|
Examples:
|
|
584
|
+
```python
|
|
585
|
+
from typing import Optional
|
|
586
|
+
from pydantic import BaseModel, Field
|
|
587
|
+
from langchain_openai import ChatOpenAI
|
|
655
588
|
|
|
656
|
-
.. code-block:: python
|
|
657
589
|
|
|
658
|
-
|
|
659
|
-
|
|
660
|
-
from langchain_openai import ChatOpenAI
|
|
590
|
+
class Person(BaseModel):
|
|
591
|
+
'''Information about a person.'''
|
|
661
592
|
|
|
662
|
-
|
|
663
|
-
|
|
664
|
-
|
|
665
|
-
|
|
666
|
-
|
|
667
|
-
)
|
|
668
|
-
height_in_meters: Optional[str] = Field(
|
|
669
|
-
..., description="Height in METERS"
|
|
670
|
-
)
|
|
593
|
+
name: str | None = Field(..., description="The name of the person")
|
|
594
|
+
hair_color: str | None = Field(
|
|
595
|
+
..., description="The color of the person's hair if known"
|
|
596
|
+
)
|
|
597
|
+
height_in_meters: str | None = Field(..., description="Height in METERS")
|
|
671
598
|
|
|
672
|
-
examples = [
|
|
673
|
-
(
|
|
674
|
-
"The ocean is vast and blue. It's more than 20,000 feet deep.",
|
|
675
|
-
Person(name=None, height_in_meters=None, hair_color=None),
|
|
676
|
-
),
|
|
677
|
-
(
|
|
678
|
-
"Fiona traveled far from France to Spain.",
|
|
679
|
-
Person(name="Fiona", height_in_meters=None, hair_color=None),
|
|
680
|
-
),
|
|
681
|
-
]
|
|
682
599
|
|
|
600
|
+
examples = [
|
|
601
|
+
(
|
|
602
|
+
"The ocean is vast and blue. It's more than 20,000 feet deep.",
|
|
603
|
+
Person(name=None, height_in_meters=None, hair_color=None),
|
|
604
|
+
),
|
|
605
|
+
(
|
|
606
|
+
"Fiona traveled far from France to Spain.",
|
|
607
|
+
Person(name="Fiona", height_in_meters=None, hair_color=None),
|
|
608
|
+
),
|
|
609
|
+
]
|
|
683
610
|
|
|
684
|
-
messages = []
|
|
685
611
|
|
|
686
|
-
|
|
687
|
-
messages.extend(
|
|
688
|
-
tool_example_to_messages(txt, [tool_call])
|
|
689
|
-
)
|
|
612
|
+
messages = []
|
|
690
613
|
|
|
614
|
+
for txt, tool_call in examples:
|
|
615
|
+
messages.extend(tool_example_to_messages(txt, [tool_call]))
|
|
616
|
+
```
|
|
691
617
|
"""
|
|
692
618
|
messages: list[BaseMessage] = [HumanMessage(content=input)]
|
|
693
619
|
openai_tool_calls = [
|
|
@@ -696,7 +622,7 @@ def tool_example_to_messages(
|
|
|
696
622
|
"type": "function",
|
|
697
623
|
"function": {
|
|
698
624
|
# The name of the function right now corresponds to the name
|
|
699
|
-
# of the
|
|
625
|
+
# of the Pydantic model. This is implicit in the API right now,
|
|
700
626
|
# and will be improved over time.
|
|
701
627
|
"name": tool_call.__class__.__name__,
|
|
702
628
|
"arguments": tool_call.model_dump_json(),
|
|
@@ -711,7 +637,7 @@ def tool_example_to_messages(
|
|
|
711
637
|
tool_outputs = tool_outputs or ["You have correctly called this tool."] * len(
|
|
712
638
|
openai_tool_calls
|
|
713
639
|
)
|
|
714
|
-
for output, tool_call_dict in zip(tool_outputs, openai_tool_calls):
|
|
640
|
+
for output, tool_call_dict in zip(tool_outputs, openai_tool_calls, strict=False):
|
|
715
641
|
messages.append(ToolMessage(content=output, tool_call_id=tool_call_dict["id"]))
|
|
716
642
|
|
|
717
643
|
if ai_response:
|
|
@@ -720,7 +646,7 @@ def tool_example_to_messages(
|
|
|
720
646
|
|
|
721
647
|
|
|
722
648
|
def _parse_google_docstring(
|
|
723
|
-
docstring:
|
|
649
|
+
docstring: str | None,
|
|
724
650
|
args: list[str],
|
|
725
651
|
*,
|
|
726
652
|
error_on_invalid_docstring: bool = False,
|
|
@@ -728,6 +654,7 @@ def _parse_google_docstring(
|
|
|
728
654
|
"""Parse the function and argument descriptions from the docstring of a function.
|
|
729
655
|
|
|
730
656
|
Assumes the function docstring follows Google Python style guide.
|
|
657
|
+
|
|
731
658
|
"""
|
|
732
659
|
if docstring:
|
|
733
660
|
docstring_blocks = docstring.split("\n\n")
|
|
@@ -803,8 +730,14 @@ def _recursive_set_additional_properties_false(
|
|
|
803
730
|
if isinstance(schema, dict):
|
|
804
731
|
# Check if 'required' is a key at the current level or if the schema is empty,
|
|
805
732
|
# in which case additionalProperties still needs to be specified.
|
|
806
|
-
if
|
|
807
|
-
"
|
|
733
|
+
if (
|
|
734
|
+
"required" in schema
|
|
735
|
+
or ("properties" in schema and not schema["properties"])
|
|
736
|
+
# Since Pydantic 2.11, it will always add `additionalProperties: True`
|
|
737
|
+
# for arbitrary dictionary schemas
|
|
738
|
+
# See: https://pydantic.dev/articles/pydantic-v2-11-release#changes
|
|
739
|
+
# If it is already set to True, we need override it to False
|
|
740
|
+
or "additionalProperties" in schema
|
|
808
741
|
):
|
|
809
742
|
schema["additionalProperties"] = False
|
|
810
743
|
|
langchain_core/utils/html.py
CHANGED
|
@@ -3,7 +3,6 @@
|
|
|
3
3
|
import logging
|
|
4
4
|
import re
|
|
5
5
|
from collections.abc import Sequence
|
|
6
|
-
from typing import Optional, Union
|
|
7
6
|
from urllib.parse import urljoin, urlparse
|
|
8
7
|
|
|
9
8
|
logger = logging.getLogger(__name__)
|
|
@@ -35,7 +34,7 @@ DEFAULT_LINK_REGEX = (
|
|
|
35
34
|
|
|
36
35
|
|
|
37
36
|
def find_all_links(
|
|
38
|
-
raw_html: str, *, pattern:
|
|
37
|
+
raw_html: str, *, pattern: str | re.Pattern | None = None
|
|
39
38
|
) -> list[str]:
|
|
40
39
|
"""Extract all links from a raw HTML string.
|
|
41
40
|
|
|
@@ -44,7 +43,7 @@ def find_all_links(
|
|
|
44
43
|
pattern: Regex to use for extracting links from raw HTML.
|
|
45
44
|
|
|
46
45
|
Returns:
|
|
47
|
-
|
|
46
|
+
all links
|
|
48
47
|
"""
|
|
49
48
|
pattern = pattern or DEFAULT_LINK_REGEX
|
|
50
49
|
return list(set(re.findall(pattern, raw_html)))
|
|
@@ -54,8 +53,8 @@ def extract_sub_links(
|
|
|
54
53
|
raw_html: str,
|
|
55
54
|
url: str,
|
|
56
55
|
*,
|
|
57
|
-
base_url:
|
|
58
|
-
pattern:
|
|
56
|
+
base_url: str | None = None,
|
|
57
|
+
pattern: str | re.Pattern | None = None,
|
|
59
58
|
prevent_outside: bool = True,
|
|
60
59
|
exclude_prefixes: Sequence[str] = (),
|
|
61
60
|
continue_on_failure: bool = False,
|
|
@@ -67,14 +66,14 @@ def extract_sub_links(
|
|
|
67
66
|
url: the url of the HTML.
|
|
68
67
|
base_url: the base URL to check for outside links against.
|
|
69
68
|
pattern: Regex to use for extracting links from raw HTML.
|
|
70
|
-
prevent_outside: If True
|
|
69
|
+
prevent_outside: If `True`, ignore external links which are not children
|
|
71
70
|
of the base URL.
|
|
72
71
|
exclude_prefixes: Exclude any URLs that start with one of these prefixes.
|
|
73
|
-
continue_on_failure: If True
|
|
72
|
+
continue_on_failure: If `True`, continue if parsing a specific link raises an
|
|
74
73
|
exception. Otherwise, raise the exception.
|
|
75
74
|
|
|
76
75
|
Returns:
|
|
77
|
-
|
|
76
|
+
sub links.
|
|
78
77
|
"""
|
|
79
78
|
base_url_to_use = base_url if base_url is not None else url
|
|
80
79
|
parsed_base_url = urlparse(base_url_to_use)
|
langchain_core/utils/input.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
"""Handle chained inputs."""
|
|
2
2
|
|
|
3
|
-
from typing import
|
|
3
|
+
from typing import TextIO
|
|
4
4
|
|
|
5
5
|
_TEXT_COLOR_MAPPING = {
|
|
6
6
|
"blue": "36;1",
|
|
@@ -12,7 +12,7 @@ _TEXT_COLOR_MAPPING = {
|
|
|
12
12
|
|
|
13
13
|
|
|
14
14
|
def get_color_mapping(
|
|
15
|
-
items: list[str], excluded_colors:
|
|
15
|
+
items: list[str], excluded_colors: list | None = None
|
|
16
16
|
) -> dict[str, str]:
|
|
17
17
|
"""Get mapping for items to a support color.
|
|
18
18
|
|
|
@@ -56,7 +56,7 @@ def get_bolded_text(text: str) -> str:
|
|
|
56
56
|
|
|
57
57
|
|
|
58
58
|
def print_text(
|
|
59
|
-
text: str, color:
|
|
59
|
+
text: str, color: str | None = None, end: str = "", file: TextIO | None = None
|
|
60
60
|
) -> None:
|
|
61
61
|
"""Print text with highlighting and no end characters.
|
|
62
62
|
|
|
@@ -65,9 +65,9 @@ def print_text(
|
|
|
65
65
|
|
|
66
66
|
Args:
|
|
67
67
|
text: The text to print.
|
|
68
|
-
color: The color to use.
|
|
69
|
-
end: The end character to use.
|
|
70
|
-
file: The file to write to.
|
|
68
|
+
color: The color to use.
|
|
69
|
+
end: The end character to use.
|
|
70
|
+
file: The file to write to.
|
|
71
71
|
"""
|
|
72
72
|
text_to_print = get_colored_text(text, color) if color else text
|
|
73
73
|
print(text_to_print, end=end, file=file)
|