langchain-core 0.3.79__py3-none-any.whl → 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- langchain_core/__init__.py +1 -1
- langchain_core/_api/__init__.py +3 -4
- langchain_core/_api/beta_decorator.py +23 -26
- langchain_core/_api/deprecation.py +52 -65
- langchain_core/_api/path.py +3 -6
- langchain_core/_import_utils.py +3 -4
- langchain_core/agents.py +19 -19
- langchain_core/caches.py +53 -63
- langchain_core/callbacks/__init__.py +1 -8
- langchain_core/callbacks/base.py +323 -334
- langchain_core/callbacks/file.py +44 -44
- langchain_core/callbacks/manager.py +441 -507
- langchain_core/callbacks/stdout.py +29 -30
- langchain_core/callbacks/streaming_stdout.py +32 -32
- langchain_core/callbacks/usage.py +60 -57
- langchain_core/chat_history.py +48 -63
- langchain_core/document_loaders/base.py +23 -23
- langchain_core/document_loaders/langsmith.py +37 -37
- langchain_core/documents/__init__.py +0 -1
- langchain_core/documents/base.py +62 -65
- langchain_core/documents/compressor.py +4 -4
- langchain_core/documents/transformers.py +28 -29
- langchain_core/embeddings/fake.py +50 -54
- langchain_core/example_selectors/length_based.py +1 -1
- langchain_core/example_selectors/semantic_similarity.py +21 -25
- langchain_core/exceptions.py +10 -11
- langchain_core/globals.py +3 -151
- langchain_core/indexing/api.py +61 -66
- langchain_core/indexing/base.py +58 -58
- langchain_core/indexing/in_memory.py +3 -3
- langchain_core/language_models/__init__.py +14 -27
- langchain_core/language_models/_utils.py +270 -84
- langchain_core/language_models/base.py +55 -162
- langchain_core/language_models/chat_models.py +442 -402
- langchain_core/language_models/fake.py +11 -11
- langchain_core/language_models/fake_chat_models.py +61 -39
- langchain_core/language_models/llms.py +123 -231
- langchain_core/load/dump.py +4 -5
- langchain_core/load/load.py +18 -28
- langchain_core/load/mapping.py +2 -4
- langchain_core/load/serializable.py +39 -40
- langchain_core/messages/__init__.py +61 -22
- langchain_core/messages/ai.py +368 -163
- langchain_core/messages/base.py +214 -43
- langchain_core/messages/block_translators/__init__.py +111 -0
- langchain_core/messages/block_translators/anthropic.py +470 -0
- langchain_core/messages/block_translators/bedrock.py +94 -0
- langchain_core/messages/block_translators/bedrock_converse.py +297 -0
- langchain_core/messages/block_translators/google_genai.py +530 -0
- langchain_core/messages/block_translators/google_vertexai.py +21 -0
- langchain_core/messages/block_translators/groq.py +143 -0
- langchain_core/messages/block_translators/langchain_v0.py +301 -0
- langchain_core/messages/block_translators/openai.py +1010 -0
- langchain_core/messages/chat.py +2 -6
- langchain_core/messages/content.py +1423 -0
- langchain_core/messages/function.py +6 -10
- langchain_core/messages/human.py +41 -38
- langchain_core/messages/modifier.py +2 -2
- langchain_core/messages/system.py +38 -28
- langchain_core/messages/tool.py +96 -103
- langchain_core/messages/utils.py +478 -504
- langchain_core/output_parsers/__init__.py +1 -14
- langchain_core/output_parsers/base.py +58 -61
- langchain_core/output_parsers/json.py +7 -8
- langchain_core/output_parsers/list.py +5 -7
- langchain_core/output_parsers/openai_functions.py +49 -47
- langchain_core/output_parsers/openai_tools.py +14 -19
- langchain_core/output_parsers/pydantic.py +12 -13
- langchain_core/output_parsers/string.py +2 -2
- langchain_core/output_parsers/transform.py +15 -17
- langchain_core/output_parsers/xml.py +8 -10
- langchain_core/outputs/__init__.py +1 -1
- langchain_core/outputs/chat_generation.py +18 -18
- langchain_core/outputs/chat_result.py +1 -3
- langchain_core/outputs/generation.py +8 -8
- langchain_core/outputs/llm_result.py +10 -10
- langchain_core/prompt_values.py +12 -12
- langchain_core/prompts/__init__.py +3 -27
- langchain_core/prompts/base.py +45 -55
- langchain_core/prompts/chat.py +254 -313
- langchain_core/prompts/dict.py +5 -5
- langchain_core/prompts/few_shot.py +81 -88
- langchain_core/prompts/few_shot_with_templates.py +11 -13
- langchain_core/prompts/image.py +12 -14
- langchain_core/prompts/loading.py +6 -8
- langchain_core/prompts/message.py +3 -3
- langchain_core/prompts/prompt.py +24 -39
- langchain_core/prompts/string.py +4 -4
- langchain_core/prompts/structured.py +42 -50
- langchain_core/rate_limiters.py +51 -60
- langchain_core/retrievers.py +49 -190
- langchain_core/runnables/base.py +1484 -1709
- langchain_core/runnables/branch.py +45 -61
- langchain_core/runnables/config.py +80 -88
- langchain_core/runnables/configurable.py +117 -134
- langchain_core/runnables/fallbacks.py +83 -79
- langchain_core/runnables/graph.py +85 -95
- langchain_core/runnables/graph_ascii.py +27 -28
- langchain_core/runnables/graph_mermaid.py +38 -50
- langchain_core/runnables/graph_png.py +15 -16
- langchain_core/runnables/history.py +135 -148
- langchain_core/runnables/passthrough.py +124 -150
- langchain_core/runnables/retry.py +46 -51
- langchain_core/runnables/router.py +25 -30
- langchain_core/runnables/schema.py +79 -74
- langchain_core/runnables/utils.py +62 -68
- langchain_core/stores.py +81 -115
- langchain_core/structured_query.py +8 -8
- langchain_core/sys_info.py +27 -29
- langchain_core/tools/__init__.py +1 -14
- langchain_core/tools/base.py +179 -187
- langchain_core/tools/convert.py +131 -139
- langchain_core/tools/render.py +10 -10
- langchain_core/tools/retriever.py +11 -11
- langchain_core/tools/simple.py +19 -24
- langchain_core/tools/structured.py +30 -39
- langchain_core/tracers/__init__.py +1 -9
- langchain_core/tracers/base.py +97 -99
- langchain_core/tracers/context.py +29 -52
- langchain_core/tracers/core.py +50 -60
- langchain_core/tracers/evaluation.py +11 -11
- langchain_core/tracers/event_stream.py +115 -70
- langchain_core/tracers/langchain.py +21 -21
- langchain_core/tracers/log_stream.py +43 -43
- langchain_core/tracers/memory_stream.py +3 -3
- langchain_core/tracers/root_listeners.py +16 -16
- langchain_core/tracers/run_collector.py +2 -4
- langchain_core/tracers/schemas.py +0 -129
- langchain_core/tracers/stdout.py +3 -3
- langchain_core/utils/__init__.py +1 -4
- langchain_core/utils/_merge.py +46 -8
- langchain_core/utils/aiter.py +57 -61
- langchain_core/utils/env.py +9 -9
- langchain_core/utils/function_calling.py +89 -191
- langchain_core/utils/html.py +7 -8
- langchain_core/utils/input.py +6 -6
- langchain_core/utils/interactive_env.py +1 -1
- langchain_core/utils/iter.py +37 -42
- langchain_core/utils/json.py +4 -3
- langchain_core/utils/json_schema.py +8 -8
- langchain_core/utils/mustache.py +9 -11
- langchain_core/utils/pydantic.py +33 -35
- langchain_core/utils/strings.py +5 -5
- langchain_core/utils/usage.py +1 -1
- langchain_core/utils/utils.py +80 -54
- langchain_core/vectorstores/base.py +129 -164
- langchain_core/vectorstores/in_memory.py +99 -174
- langchain_core/vectorstores/utils.py +5 -5
- langchain_core/version.py +1 -1
- {langchain_core-0.3.79.dist-info → langchain_core-1.0.0.dist-info}/METADATA +28 -27
- langchain_core-1.0.0.dist-info/RECORD +172 -0
- {langchain_core-0.3.79.dist-info → langchain_core-1.0.0.dist-info}/WHEEL +1 -1
- langchain_core/beta/__init__.py +0 -1
- langchain_core/beta/runnables/__init__.py +0 -1
- langchain_core/beta/runnables/context.py +0 -447
- langchain_core/memory.py +0 -120
- langchain_core/messages/content_blocks.py +0 -176
- langchain_core/prompts/pipeline.py +0 -138
- langchain_core/pydantic_v1/__init__.py +0 -30
- langchain_core/pydantic_v1/dataclasses.py +0 -23
- langchain_core/pydantic_v1/main.py +0 -23
- langchain_core/tracers/langchain_v1.py +0 -31
- langchain_core/utils/loading.py +0 -35
- langchain_core-0.3.79.dist-info/RECORD +0 -174
- langchain_core-0.3.79.dist-info/entry_points.txt +0 -4
|
@@ -8,24 +8,26 @@ import logging
|
|
|
8
8
|
import types
|
|
9
9
|
import typing
|
|
10
10
|
import uuid
|
|
11
|
+
from collections.abc import Callable
|
|
11
12
|
from typing import (
|
|
12
13
|
TYPE_CHECKING,
|
|
13
14
|
Annotated,
|
|
14
15
|
Any,
|
|
15
|
-
Callable,
|
|
16
16
|
Literal,
|
|
17
|
-
Optional,
|
|
18
17
|
Union,
|
|
19
18
|
cast,
|
|
19
|
+
get_args,
|
|
20
|
+
get_origin,
|
|
20
21
|
)
|
|
21
22
|
|
|
22
23
|
from pydantic import BaseModel
|
|
23
24
|
from pydantic.v1 import BaseModel as BaseModelV1
|
|
24
|
-
from pydantic.v1 import Field
|
|
25
|
-
from
|
|
25
|
+
from pydantic.v1 import Field as Field_v1
|
|
26
|
+
from pydantic.v1 import create_model as create_model_v1
|
|
27
|
+
from typing_extensions import TypedDict, is_typeddict
|
|
26
28
|
|
|
27
29
|
import langchain_core
|
|
28
|
-
from langchain_core._api import beta
|
|
30
|
+
from langchain_core._api import beta
|
|
29
31
|
from langchain_core.messages import AIMessage, BaseMessage, HumanMessage, ToolMessage
|
|
30
32
|
from langchain_core.utils.json_schema import dereference_refs
|
|
31
33
|
from langchain_core.utils.pydantic import is_basemodel_subclass
|
|
@@ -70,11 +72,11 @@ def _rm_titles(kv: dict, prev_key: str = "") -> dict:
|
|
|
70
72
|
except when a "title" appears within a property definition under "properties".
|
|
71
73
|
|
|
72
74
|
Args:
|
|
73
|
-
kv
|
|
74
|
-
prev_key
|
|
75
|
+
kv: The input JSON schema as a dictionary.
|
|
76
|
+
prev_key: The key from the parent dictionary, used to identify context.
|
|
75
77
|
|
|
76
78
|
Returns:
|
|
77
|
-
|
|
79
|
+
A new dictionary with appropriate "title" fields removed.
|
|
78
80
|
"""
|
|
79
81
|
new_kv = {}
|
|
80
82
|
|
|
@@ -100,8 +102,8 @@ def _rm_titles(kv: dict, prev_key: str = "") -> dict:
|
|
|
100
102
|
def _convert_json_schema_to_openai_function(
|
|
101
103
|
schema: dict,
|
|
102
104
|
*,
|
|
103
|
-
name:
|
|
104
|
-
description:
|
|
105
|
+
name: str | None = None,
|
|
106
|
+
description: str | None = None,
|
|
105
107
|
rm_titles: bool = True,
|
|
106
108
|
) -> FunctionDescription:
|
|
107
109
|
"""Converts a Pydantic model to a function description for the OpenAI API.
|
|
@@ -112,7 +114,7 @@ def _convert_json_schema_to_openai_function(
|
|
|
112
114
|
used.
|
|
113
115
|
description: The description of the function. If not provided, the description
|
|
114
116
|
of the schema will be used.
|
|
115
|
-
rm_titles: Whether to remove titles from the schema.
|
|
117
|
+
rm_titles: Whether to remove titles from the schema.
|
|
116
118
|
|
|
117
119
|
Returns:
|
|
118
120
|
The function description.
|
|
@@ -134,8 +136,8 @@ def _convert_json_schema_to_openai_function(
|
|
|
134
136
|
def _convert_pydantic_to_openai_function(
|
|
135
137
|
model: type,
|
|
136
138
|
*,
|
|
137
|
-
name:
|
|
138
|
-
description:
|
|
139
|
+
name: str | None = None,
|
|
140
|
+
description: str | None = None,
|
|
139
141
|
rm_titles: bool = True,
|
|
140
142
|
) -> FunctionDescription:
|
|
141
143
|
"""Converts a Pydantic model to a function description for the OpenAI API.
|
|
@@ -146,7 +148,7 @@ def _convert_pydantic_to_openai_function(
|
|
|
146
148
|
used.
|
|
147
149
|
description: The description of the function. If not provided, the description
|
|
148
150
|
of the schema will be used.
|
|
149
|
-
rm_titles: Whether to remove titles from the schema.
|
|
151
|
+
rm_titles: Whether to remove titles from the schema.
|
|
150
152
|
|
|
151
153
|
Raises:
|
|
152
154
|
TypeError: If the model is not a Pydantic model.
|
|
@@ -166,42 +168,6 @@ def _convert_pydantic_to_openai_function(
|
|
|
166
168
|
)
|
|
167
169
|
|
|
168
170
|
|
|
169
|
-
convert_pydantic_to_openai_function = deprecated(
|
|
170
|
-
"0.1.16",
|
|
171
|
-
alternative="langchain_core.utils.function_calling.convert_to_openai_function()",
|
|
172
|
-
removal="1.0",
|
|
173
|
-
)(_convert_pydantic_to_openai_function)
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
@deprecated(
|
|
177
|
-
"0.1.16",
|
|
178
|
-
alternative="langchain_core.utils.function_calling.convert_to_openai_tool()",
|
|
179
|
-
removal="1.0",
|
|
180
|
-
)
|
|
181
|
-
def convert_pydantic_to_openai_tool(
|
|
182
|
-
model: type[BaseModel],
|
|
183
|
-
*,
|
|
184
|
-
name: Optional[str] = None,
|
|
185
|
-
description: Optional[str] = None,
|
|
186
|
-
) -> ToolDescription:
|
|
187
|
-
"""Converts a Pydantic model to a function description for the OpenAI API.
|
|
188
|
-
|
|
189
|
-
Args:
|
|
190
|
-
model: The Pydantic model to convert.
|
|
191
|
-
name: The name of the function. If not provided, the title of the schema will be
|
|
192
|
-
used.
|
|
193
|
-
description: The description of the function. If not provided, the description
|
|
194
|
-
of the schema will be used.
|
|
195
|
-
|
|
196
|
-
Returns:
|
|
197
|
-
The tool description.
|
|
198
|
-
"""
|
|
199
|
-
function = _convert_pydantic_to_openai_function(
|
|
200
|
-
model, name=name, description=description
|
|
201
|
-
)
|
|
202
|
-
return {"type": "function", "function": function}
|
|
203
|
-
|
|
204
|
-
|
|
205
171
|
def _get_python_function_name(function: Callable) -> str:
|
|
206
172
|
"""Get the name of a Python function."""
|
|
207
173
|
return function.__name__
|
|
@@ -238,13 +204,6 @@ def _convert_python_function_to_openai_function(
|
|
|
238
204
|
)
|
|
239
205
|
|
|
240
206
|
|
|
241
|
-
convert_python_function_to_openai_function = deprecated(
|
|
242
|
-
"0.1.16",
|
|
243
|
-
alternative="langchain_core.utils.function_calling.convert_to_openai_function()",
|
|
244
|
-
removal="1.0",
|
|
245
|
-
)(_convert_python_function_to_openai_function)
|
|
246
|
-
|
|
247
|
-
|
|
248
207
|
def _convert_typed_dict_to_openai_function(typed_dict: type) -> FunctionDescription:
|
|
249
208
|
visited: dict = {}
|
|
250
209
|
|
|
@@ -282,7 +241,9 @@ def _convert_any_typed_dicts_to_pydantic(
|
|
|
282
241
|
new_arg_type = _convert_any_typed_dicts_to_pydantic(
|
|
283
242
|
annotated_args[0], depth=depth + 1, visited=visited
|
|
284
243
|
)
|
|
285
|
-
field_kwargs = dict(
|
|
244
|
+
field_kwargs = dict(
|
|
245
|
+
zip(("default", "description"), annotated_args[1:], strict=False)
|
|
246
|
+
)
|
|
286
247
|
if (field_desc := field_kwargs.get("description")) and not isinstance(
|
|
287
248
|
field_desc, str
|
|
288
249
|
):
|
|
@@ -294,7 +255,7 @@ def _convert_any_typed_dicts_to_pydantic(
|
|
|
294
255
|
raise ValueError(msg)
|
|
295
256
|
if arg_desc := arg_descriptions.get(arg):
|
|
296
257
|
field_kwargs["description"] = arg_desc
|
|
297
|
-
fields[arg] = (new_arg_type,
|
|
258
|
+
fields[arg] = (new_arg_type, Field_v1(**field_kwargs))
|
|
298
259
|
else:
|
|
299
260
|
new_arg_type = _convert_any_typed_dicts_to_pydantic(
|
|
300
261
|
arg_type, depth=depth + 1, visited=visited
|
|
@@ -302,8 +263,8 @@ def _convert_any_typed_dicts_to_pydantic(
|
|
|
302
263
|
field_kwargs = {"default": ...}
|
|
303
264
|
if arg_desc := arg_descriptions.get(arg):
|
|
304
265
|
field_kwargs["description"] = arg_desc
|
|
305
|
-
fields[arg] = (new_arg_type,
|
|
306
|
-
model =
|
|
266
|
+
fields[arg] = (new_arg_type, Field_v1(**field_kwargs))
|
|
267
|
+
model = create_model_v1(typed_dict.__name__, **fields)
|
|
307
268
|
model.__doc__ = description
|
|
308
269
|
visited[typed_dict] = model
|
|
309
270
|
return model
|
|
@@ -364,48 +325,23 @@ def _format_tool_to_openai_function(tool: BaseTool) -> FunctionDescription:
|
|
|
364
325
|
}
|
|
365
326
|
|
|
366
327
|
|
|
367
|
-
format_tool_to_openai_function = deprecated(
|
|
368
|
-
"0.1.16",
|
|
369
|
-
alternative="langchain_core.utils.function_calling.convert_to_openai_function()",
|
|
370
|
-
removal="1.0",
|
|
371
|
-
)(_format_tool_to_openai_function)
|
|
372
|
-
|
|
373
|
-
|
|
374
|
-
@deprecated(
|
|
375
|
-
"0.1.16",
|
|
376
|
-
alternative="langchain_core.utils.function_calling.convert_to_openai_tool()",
|
|
377
|
-
removal="1.0",
|
|
378
|
-
)
|
|
379
|
-
def format_tool_to_openai_tool(tool: BaseTool) -> ToolDescription:
|
|
380
|
-
"""Format tool into the OpenAI function API.
|
|
381
|
-
|
|
382
|
-
Args:
|
|
383
|
-
tool: The tool to format.
|
|
384
|
-
|
|
385
|
-
Returns:
|
|
386
|
-
The tool description.
|
|
387
|
-
"""
|
|
388
|
-
function = _format_tool_to_openai_function(tool)
|
|
389
|
-
return {"type": "function", "function": function}
|
|
390
|
-
|
|
391
|
-
|
|
392
328
|
def convert_to_openai_function(
|
|
393
|
-
function:
|
|
329
|
+
function: dict[str, Any] | type | Callable | BaseTool,
|
|
394
330
|
*,
|
|
395
|
-
strict:
|
|
331
|
+
strict: bool | None = None,
|
|
396
332
|
) -> dict[str, Any]:
|
|
397
333
|
"""Convert a raw function/class to an OpenAI function.
|
|
398
334
|
|
|
399
335
|
Args:
|
|
400
336
|
function:
|
|
401
|
-
A dictionary, Pydantic BaseModel class, TypedDict class, a LangChain
|
|
402
|
-
Tool object, or a Python function. If a dictionary is passed in, it is
|
|
337
|
+
A dictionary, Pydantic `BaseModel` class, `TypedDict` class, a LangChain
|
|
338
|
+
`Tool` object, or a Python function. If a dictionary is passed in, it is
|
|
403
339
|
assumed to already be a valid OpenAI function, a JSON schema with
|
|
404
|
-
top-level
|
|
405
|
-
|
|
340
|
+
top-level `title` key specified, an Anthropic format tool, or an Amazon
|
|
341
|
+
Bedrock Converse format tool.
|
|
406
342
|
strict:
|
|
407
|
-
If True
|
|
408
|
-
provided in the function definition. If None
|
|
343
|
+
If `True`, model output is guaranteed to exactly match the JSON Schema
|
|
344
|
+
provided in the function definition. If `None`, `strict` argument will not
|
|
409
345
|
be included in function definition.
|
|
410
346
|
|
|
411
347
|
Returns:
|
|
@@ -415,21 +351,8 @@ def convert_to_openai_function(
|
|
|
415
351
|
Raises:
|
|
416
352
|
ValueError: If function is not in a supported format.
|
|
417
353
|
|
|
418
|
-
|
|
419
|
-
|
|
420
|
-
``strict`` arg added.
|
|
421
|
-
|
|
422
|
-
.. versionchanged:: 0.3.13
|
|
423
|
-
|
|
424
|
-
Support for Anthropic format tools added.
|
|
425
|
-
|
|
426
|
-
.. versionchanged:: 0.3.14
|
|
427
|
-
|
|
428
|
-
Support for Amazon Bedrock Converse format tools added.
|
|
429
|
-
|
|
430
|
-
.. versionchanged:: 0.3.16
|
|
431
|
-
|
|
432
|
-
'description' and 'parameters' keys are now optional. Only 'name' is
|
|
354
|
+
!!! warning "Behavior changed in 0.3.16"
|
|
355
|
+
`description` and `parameters` keys are now optional. Only `name` is
|
|
433
356
|
required and guaranteed to be part of the output.
|
|
434
357
|
"""
|
|
435
358
|
# an Anthropic format tool
|
|
@@ -521,60 +444,39 @@ _WellKnownOpenAITools = (
|
|
|
521
444
|
|
|
522
445
|
|
|
523
446
|
def convert_to_openai_tool(
|
|
524
|
-
tool:
|
|
447
|
+
tool: dict[str, Any] | type[BaseModel] | Callable | BaseTool,
|
|
525
448
|
*,
|
|
526
|
-
strict:
|
|
449
|
+
strict: bool | None = None,
|
|
527
450
|
) -> dict[str, Any]:
|
|
528
451
|
"""Convert a tool-like object to an OpenAI tool schema.
|
|
529
452
|
|
|
530
|
-
OpenAI tool schema reference
|
|
531
|
-
https://platform.openai.com/docs/api-reference/chat/create#chat-create-tools
|
|
453
|
+
[OpenAI tool schema reference](https://platform.openai.com/docs/api-reference/chat/create#chat-create-tools)
|
|
532
454
|
|
|
533
455
|
Args:
|
|
534
456
|
tool:
|
|
535
|
-
Either a dictionary, a pydantic.BaseModel class, Python function, or
|
|
536
|
-
BaseTool
|
|
537
|
-
|
|
538
|
-
|
|
539
|
-
tool, or an Amazon Bedrock Converse format tool.
|
|
457
|
+
Either a dictionary, a `pydantic.BaseModel` class, Python function, or
|
|
458
|
+
`BaseTool`. If a dictionary is passed in, it is assumed to already be a
|
|
459
|
+
valid OpenAI function, a JSON schema with top-level `title` key specified,
|
|
460
|
+
an Anthropic format tool, or an Amazon Bedrock Converse format tool.
|
|
540
461
|
strict:
|
|
541
|
-
If True
|
|
542
|
-
provided in the function definition. If None
|
|
462
|
+
If `True`, model output is guaranteed to exactly match the JSON Schema
|
|
463
|
+
provided in the function definition. If `None`, `strict` argument will not
|
|
543
464
|
be included in tool definition.
|
|
544
465
|
|
|
545
466
|
Returns:
|
|
546
467
|
A dict version of the passed in tool which is compatible with the
|
|
547
468
|
OpenAI tool-calling API.
|
|
548
469
|
|
|
549
|
-
|
|
550
|
-
|
|
551
|
-
``strict`` arg added.
|
|
552
|
-
|
|
553
|
-
.. versionchanged:: 0.3.13
|
|
554
|
-
|
|
555
|
-
Support for Anthropic format tools added.
|
|
556
|
-
|
|
557
|
-
.. versionchanged:: 0.3.14
|
|
558
|
-
|
|
559
|
-
Support for Amazon Bedrock Converse format tools added.
|
|
560
|
-
|
|
561
|
-
.. versionchanged:: 0.3.16
|
|
562
|
-
|
|
563
|
-
'description' and 'parameters' keys are now optional. Only 'name' is
|
|
470
|
+
!!! warning "Behavior changed in 0.3.16"
|
|
471
|
+
`description` and `parameters` keys are now optional. Only `name` is
|
|
564
472
|
required and guaranteed to be part of the output.
|
|
565
473
|
|
|
566
|
-
|
|
567
|
-
|
|
474
|
+
!!! warning "Behavior changed in 0.3.44"
|
|
568
475
|
Return OpenAI Responses API-style tools unchanged. This includes
|
|
569
|
-
any dict with "type" in "file_search"
|
|
570
|
-
"web_search_preview"
|
|
571
|
-
|
|
572
|
-
.. versionchanged:: 0.3.61
|
|
573
|
-
|
|
574
|
-
Added support for OpenAI's built-in code interpreter and remote MCP tools.
|
|
575
|
-
|
|
576
|
-
.. versionchanged:: 0.3.63
|
|
476
|
+
any dict with `"type"` in `"file_search"`, `"function"`,
|
|
477
|
+
`"computer_use_preview"`, `"web_search_preview"`.
|
|
577
478
|
|
|
479
|
+
!!! warning "Behavior changed in 0.3.63"
|
|
578
480
|
Added support for OpenAI's image generation built-in tool.
|
|
579
481
|
"""
|
|
580
482
|
# Import locally to prevent circular import
|
|
@@ -600,16 +502,16 @@ def convert_to_openai_tool(
|
|
|
600
502
|
|
|
601
503
|
|
|
602
504
|
def convert_to_json_schema(
|
|
603
|
-
schema:
|
|
505
|
+
schema: dict[str, Any] | type[BaseModel] | Callable | BaseTool,
|
|
604
506
|
*,
|
|
605
|
-
strict:
|
|
507
|
+
strict: bool | None = None,
|
|
606
508
|
) -> dict[str, Any]:
|
|
607
509
|
"""Convert a schema representation to a JSON schema.
|
|
608
510
|
|
|
609
511
|
Args:
|
|
610
512
|
schema: The schema to convert.
|
|
611
|
-
strict: If True
|
|
612
|
-
provided in the function definition. If None
|
|
513
|
+
strict: If `True`, model output is guaranteed to exactly match the JSON Schema
|
|
514
|
+
provided in the function definition. If `None`, `strict` argument will not
|
|
613
515
|
be included in function definition.
|
|
614
516
|
|
|
615
517
|
Raises:
|
|
@@ -645,9 +547,9 @@ def convert_to_json_schema(
|
|
|
645
547
|
def tool_example_to_messages(
|
|
646
548
|
input: str,
|
|
647
549
|
tool_calls: list[BaseModel],
|
|
648
|
-
tool_outputs:
|
|
550
|
+
tool_outputs: list[str] | None = None,
|
|
649
551
|
*,
|
|
650
|
-
ai_response:
|
|
552
|
+
ai_response: str | None = None,
|
|
651
553
|
) -> list[BaseMessage]:
|
|
652
554
|
"""Convert an example into a list of messages that can be fed into an LLM.
|
|
653
555
|
|
|
@@ -656,15 +558,15 @@ def tool_example_to_messages(
|
|
|
656
558
|
|
|
657
559
|
The list of messages per example by default corresponds to:
|
|
658
560
|
|
|
659
|
-
1.
|
|
660
|
-
2.
|
|
661
|
-
3.
|
|
662
|
-
|
|
561
|
+
1. `HumanMessage`: contains the content from which content should be extracted.
|
|
562
|
+
2. `AIMessage`: contains the extracted information from the model
|
|
563
|
+
3. `ToolMessage`: contains confirmation to the model that the model requested a
|
|
564
|
+
tool correctly.
|
|
663
565
|
|
|
664
|
-
If
|
|
566
|
+
If `ai_response` is specified, there will be a final `AIMessage` with that
|
|
665
567
|
response.
|
|
666
568
|
|
|
667
|
-
The
|
|
569
|
+
The `ToolMessage` is required because some chat models are hyper-optimized for
|
|
668
570
|
agents rather than for an extraction use case.
|
|
669
571
|
|
|
670
572
|
Args:
|
|
@@ -672,50 +574,46 @@ def tool_example_to_messages(
|
|
|
672
574
|
tool_calls: Tool calls represented as Pydantic BaseModels
|
|
673
575
|
tool_outputs: Tool call outputs.
|
|
674
576
|
Does not need to be provided. If not provided, a placeholder value
|
|
675
|
-
will be inserted.
|
|
676
|
-
ai_response: If provided, content for a final
|
|
577
|
+
will be inserted.
|
|
578
|
+
ai_response: If provided, content for a final `AIMessage`.
|
|
677
579
|
|
|
678
580
|
Returns:
|
|
679
581
|
A list of messages
|
|
680
582
|
|
|
681
583
|
Examples:
|
|
584
|
+
```python
|
|
585
|
+
from typing import Optional
|
|
586
|
+
from pydantic import BaseModel, Field
|
|
587
|
+
from langchain_openai import ChatOpenAI
|
|
682
588
|
|
|
683
|
-
.. code-block:: python
|
|
684
|
-
|
|
685
|
-
from typing import Optional
|
|
686
|
-
from pydantic import BaseModel, Field
|
|
687
|
-
from langchain_openai import ChatOpenAI
|
|
688
589
|
|
|
590
|
+
class Person(BaseModel):
|
|
591
|
+
'''Information about a person.'''
|
|
689
592
|
|
|
690
|
-
|
|
691
|
-
|
|
692
|
-
|
|
693
|
-
|
|
694
|
-
|
|
695
|
-
..., description="The color of the person's hair if known"
|
|
696
|
-
)
|
|
697
|
-
height_in_meters: Optional[str] = Field(
|
|
698
|
-
..., description="Height in METERS"
|
|
699
|
-
)
|
|
700
|
-
|
|
593
|
+
name: str | None = Field(..., description="The name of the person")
|
|
594
|
+
hair_color: str | None = Field(
|
|
595
|
+
..., description="The color of the person's hair if known"
|
|
596
|
+
)
|
|
597
|
+
height_in_meters: str | None = Field(..., description="Height in METERS")
|
|
701
598
|
|
|
702
|
-
examples = [
|
|
703
|
-
(
|
|
704
|
-
"The ocean is vast and blue. It's more than 20,000 feet deep.",
|
|
705
|
-
Person(name=None, height_in_meters=None, hair_color=None),
|
|
706
|
-
),
|
|
707
|
-
(
|
|
708
|
-
"Fiona traveled far from France to Spain.",
|
|
709
|
-
Person(name="Fiona", height_in_meters=None, hair_color=None),
|
|
710
|
-
),
|
|
711
|
-
]
|
|
712
599
|
|
|
600
|
+
examples = [
|
|
601
|
+
(
|
|
602
|
+
"The ocean is vast and blue. It's more than 20,000 feet deep.",
|
|
603
|
+
Person(name=None, height_in_meters=None, hair_color=None),
|
|
604
|
+
),
|
|
605
|
+
(
|
|
606
|
+
"Fiona traveled far from France to Spain.",
|
|
607
|
+
Person(name="Fiona", height_in_meters=None, hair_color=None),
|
|
608
|
+
),
|
|
609
|
+
]
|
|
713
610
|
|
|
714
|
-
messages = []
|
|
715
611
|
|
|
716
|
-
|
|
717
|
-
messages.extend(tool_example_to_messages(txt, [tool_call]))
|
|
612
|
+
messages = []
|
|
718
613
|
|
|
614
|
+
for txt, tool_call in examples:
|
|
615
|
+
messages.extend(tool_example_to_messages(txt, [tool_call]))
|
|
616
|
+
```
|
|
719
617
|
"""
|
|
720
618
|
messages: list[BaseMessage] = [HumanMessage(content=input)]
|
|
721
619
|
openai_tool_calls = [
|
|
@@ -724,7 +622,7 @@ def tool_example_to_messages(
|
|
|
724
622
|
"type": "function",
|
|
725
623
|
"function": {
|
|
726
624
|
# The name of the function right now corresponds to the name
|
|
727
|
-
# of the
|
|
625
|
+
# of the Pydantic model. This is implicit in the API right now,
|
|
728
626
|
# and will be improved over time.
|
|
729
627
|
"name": tool_call.__class__.__name__,
|
|
730
628
|
"arguments": tool_call.model_dump_json(),
|
|
@@ -739,7 +637,7 @@ def tool_example_to_messages(
|
|
|
739
637
|
tool_outputs = tool_outputs or ["You have correctly called this tool."] * len(
|
|
740
638
|
openai_tool_calls
|
|
741
639
|
)
|
|
742
|
-
for output, tool_call_dict in zip(tool_outputs, openai_tool_calls):
|
|
640
|
+
for output, tool_call_dict in zip(tool_outputs, openai_tool_calls, strict=False):
|
|
743
641
|
messages.append(ToolMessage(content=output, tool_call_id=tool_call_dict["id"]))
|
|
744
642
|
|
|
745
643
|
if ai_response:
|
|
@@ -748,7 +646,7 @@ def tool_example_to_messages(
|
|
|
748
646
|
|
|
749
647
|
|
|
750
648
|
def _parse_google_docstring(
|
|
751
|
-
docstring:
|
|
649
|
+
docstring: str | None,
|
|
752
650
|
args: list[str],
|
|
753
651
|
*,
|
|
754
652
|
error_on_invalid_docstring: bool = False,
|
langchain_core/utils/html.py
CHANGED
|
@@ -3,7 +3,6 @@
|
|
|
3
3
|
import logging
|
|
4
4
|
import re
|
|
5
5
|
from collections.abc import Sequence
|
|
6
|
-
from typing import Optional, Union
|
|
7
6
|
from urllib.parse import urljoin, urlparse
|
|
8
7
|
|
|
9
8
|
logger = logging.getLogger(__name__)
|
|
@@ -35,7 +34,7 @@ DEFAULT_LINK_REGEX = (
|
|
|
35
34
|
|
|
36
35
|
|
|
37
36
|
def find_all_links(
|
|
38
|
-
raw_html: str, *, pattern:
|
|
37
|
+
raw_html: str, *, pattern: str | re.Pattern | None = None
|
|
39
38
|
) -> list[str]:
|
|
40
39
|
"""Extract all links from a raw HTML string.
|
|
41
40
|
|
|
@@ -44,7 +43,7 @@ def find_all_links(
|
|
|
44
43
|
pattern: Regex to use for extracting links from raw HTML.
|
|
45
44
|
|
|
46
45
|
Returns:
|
|
47
|
-
|
|
46
|
+
all links
|
|
48
47
|
"""
|
|
49
48
|
pattern = pattern or DEFAULT_LINK_REGEX
|
|
50
49
|
return list(set(re.findall(pattern, raw_html)))
|
|
@@ -54,8 +53,8 @@ def extract_sub_links(
|
|
|
54
53
|
raw_html: str,
|
|
55
54
|
url: str,
|
|
56
55
|
*,
|
|
57
|
-
base_url:
|
|
58
|
-
pattern:
|
|
56
|
+
base_url: str | None = None,
|
|
57
|
+
pattern: str | re.Pattern | None = None,
|
|
59
58
|
prevent_outside: bool = True,
|
|
60
59
|
exclude_prefixes: Sequence[str] = (),
|
|
61
60
|
continue_on_failure: bool = False,
|
|
@@ -67,14 +66,14 @@ def extract_sub_links(
|
|
|
67
66
|
url: the url of the HTML.
|
|
68
67
|
base_url: the base URL to check for outside links against.
|
|
69
68
|
pattern: Regex to use for extracting links from raw HTML.
|
|
70
|
-
prevent_outside: If True
|
|
69
|
+
prevent_outside: If `True`, ignore external links which are not children
|
|
71
70
|
of the base URL.
|
|
72
71
|
exclude_prefixes: Exclude any URLs that start with one of these prefixes.
|
|
73
|
-
continue_on_failure: If True
|
|
72
|
+
continue_on_failure: If `True`, continue if parsing a specific link raises an
|
|
74
73
|
exception. Otherwise, raise the exception.
|
|
75
74
|
|
|
76
75
|
Returns:
|
|
77
|
-
|
|
76
|
+
sub links.
|
|
78
77
|
"""
|
|
79
78
|
base_url_to_use = base_url if base_url is not None else url
|
|
80
79
|
parsed_base_url = urlparse(base_url_to_use)
|
langchain_core/utils/input.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
"""Handle chained inputs."""
|
|
2
2
|
|
|
3
|
-
from typing import
|
|
3
|
+
from typing import TextIO
|
|
4
4
|
|
|
5
5
|
_TEXT_COLOR_MAPPING = {
|
|
6
6
|
"blue": "36;1",
|
|
@@ -12,7 +12,7 @@ _TEXT_COLOR_MAPPING = {
|
|
|
12
12
|
|
|
13
13
|
|
|
14
14
|
def get_color_mapping(
|
|
15
|
-
items: list[str], excluded_colors:
|
|
15
|
+
items: list[str], excluded_colors: list | None = None
|
|
16
16
|
) -> dict[str, str]:
|
|
17
17
|
"""Get mapping for items to a support color.
|
|
18
18
|
|
|
@@ -56,7 +56,7 @@ def get_bolded_text(text: str) -> str:
|
|
|
56
56
|
|
|
57
57
|
|
|
58
58
|
def print_text(
|
|
59
|
-
text: str, color:
|
|
59
|
+
text: str, color: str | None = None, end: str = "", file: TextIO | None = None
|
|
60
60
|
) -> None:
|
|
61
61
|
"""Print text with highlighting and no end characters.
|
|
62
62
|
|
|
@@ -65,9 +65,9 @@ def print_text(
|
|
|
65
65
|
|
|
66
66
|
Args:
|
|
67
67
|
text: The text to print.
|
|
68
|
-
color: The color to use.
|
|
69
|
-
end: The end character to use.
|
|
70
|
-
file: The file to write to.
|
|
68
|
+
color: The color to use.
|
|
69
|
+
end: The end character to use.
|
|
70
|
+
file: The file to write to.
|
|
71
71
|
"""
|
|
72
72
|
text_to_print = get_colored_text(text, color) if color else text
|
|
73
73
|
print(text_to_print, end=end, file=file)
|
|
@@ -7,6 +7,6 @@ def is_interactive_env() -> bool:
|
|
|
7
7
|
"""Determine if running within IPython or Jupyter.
|
|
8
8
|
|
|
9
9
|
Returns:
|
|
10
|
-
True if running in an interactive environment, False otherwise.
|
|
10
|
+
True if running in an interactive environment, `False` otherwise.
|
|
11
11
|
"""
|
|
12
12
|
return hasattr(sys, "ps2")
|