langchain-core 1.0.0a5__py3-none-any.whl → 1.0.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- langchain_core/__init__.py +1 -1
- langchain_core/_api/__init__.py +3 -4
- langchain_core/_api/beta_decorator.py +23 -26
- langchain_core/_api/deprecation.py +51 -64
- langchain_core/_api/path.py +3 -6
- langchain_core/_import_utils.py +3 -4
- langchain_core/agents.py +20 -22
- langchain_core/caches.py +65 -66
- langchain_core/callbacks/__init__.py +1 -8
- langchain_core/callbacks/base.py +321 -336
- langchain_core/callbacks/file.py +44 -44
- langchain_core/callbacks/manager.py +436 -513
- langchain_core/callbacks/stdout.py +29 -30
- langchain_core/callbacks/streaming_stdout.py +32 -32
- langchain_core/callbacks/usage.py +60 -57
- langchain_core/chat_history.py +53 -68
- langchain_core/document_loaders/base.py +27 -25
- langchain_core/document_loaders/blob_loaders.py +1 -1
- langchain_core/document_loaders/langsmith.py +44 -48
- langchain_core/documents/__init__.py +23 -3
- langchain_core/documents/base.py +98 -90
- langchain_core/documents/compressor.py +10 -10
- langchain_core/documents/transformers.py +34 -35
- langchain_core/embeddings/fake.py +50 -54
- langchain_core/example_selectors/length_based.py +1 -1
- langchain_core/example_selectors/semantic_similarity.py +28 -32
- langchain_core/exceptions.py +21 -20
- langchain_core/globals.py +3 -151
- langchain_core/indexing/__init__.py +1 -1
- langchain_core/indexing/api.py +121 -126
- langchain_core/indexing/base.py +73 -75
- langchain_core/indexing/in_memory.py +4 -6
- langchain_core/language_models/__init__.py +14 -29
- langchain_core/language_models/_utils.py +58 -61
- langchain_core/language_models/base.py +53 -162
- langchain_core/language_models/chat_models.py +298 -387
- langchain_core/language_models/fake.py +11 -11
- langchain_core/language_models/fake_chat_models.py +42 -36
- langchain_core/language_models/llms.py +125 -235
- langchain_core/load/dump.py +9 -12
- langchain_core/load/load.py +18 -28
- langchain_core/load/mapping.py +2 -4
- langchain_core/load/serializable.py +42 -40
- langchain_core/messages/__init__.py +10 -16
- langchain_core/messages/ai.py +148 -148
- langchain_core/messages/base.py +58 -52
- langchain_core/messages/block_translators/__init__.py +27 -17
- langchain_core/messages/block_translators/anthropic.py +6 -6
- langchain_core/messages/block_translators/bedrock_converse.py +5 -5
- langchain_core/messages/block_translators/google_genai.py +505 -20
- langchain_core/messages/block_translators/google_vertexai.py +4 -32
- langchain_core/messages/block_translators/groq.py +117 -21
- langchain_core/messages/block_translators/langchain_v0.py +5 -5
- langchain_core/messages/block_translators/openai.py +11 -11
- langchain_core/messages/chat.py +2 -6
- langchain_core/messages/content.py +337 -328
- langchain_core/messages/function.py +6 -10
- langchain_core/messages/human.py +24 -31
- langchain_core/messages/modifier.py +2 -2
- langchain_core/messages/system.py +19 -29
- langchain_core/messages/tool.py +74 -90
- langchain_core/messages/utils.py +474 -504
- langchain_core/output_parsers/__init__.py +13 -10
- langchain_core/output_parsers/base.py +61 -61
- langchain_core/output_parsers/format_instructions.py +9 -4
- langchain_core/output_parsers/json.py +12 -10
- langchain_core/output_parsers/list.py +21 -23
- langchain_core/output_parsers/openai_functions.py +49 -47
- langchain_core/output_parsers/openai_tools.py +16 -21
- langchain_core/output_parsers/pydantic.py +13 -14
- langchain_core/output_parsers/string.py +5 -5
- langchain_core/output_parsers/transform.py +15 -17
- langchain_core/output_parsers/xml.py +35 -34
- langchain_core/outputs/__init__.py +1 -1
- langchain_core/outputs/chat_generation.py +18 -18
- langchain_core/outputs/chat_result.py +1 -3
- langchain_core/outputs/generation.py +10 -11
- langchain_core/outputs/llm_result.py +10 -10
- langchain_core/prompt_values.py +11 -17
- langchain_core/prompts/__init__.py +3 -27
- langchain_core/prompts/base.py +48 -56
- langchain_core/prompts/chat.py +275 -325
- langchain_core/prompts/dict.py +5 -5
- langchain_core/prompts/few_shot.py +81 -88
- langchain_core/prompts/few_shot_with_templates.py +11 -13
- langchain_core/prompts/image.py +12 -14
- langchain_core/prompts/loading.py +4 -6
- langchain_core/prompts/message.py +3 -3
- langchain_core/prompts/prompt.py +24 -39
- langchain_core/prompts/string.py +26 -10
- langchain_core/prompts/structured.py +49 -53
- langchain_core/rate_limiters.py +51 -60
- langchain_core/retrievers.py +61 -198
- langchain_core/runnables/base.py +1478 -1630
- langchain_core/runnables/branch.py +53 -57
- langchain_core/runnables/config.py +72 -89
- langchain_core/runnables/configurable.py +120 -137
- langchain_core/runnables/fallbacks.py +83 -79
- langchain_core/runnables/graph.py +91 -97
- langchain_core/runnables/graph_ascii.py +27 -28
- langchain_core/runnables/graph_mermaid.py +38 -50
- langchain_core/runnables/graph_png.py +15 -16
- langchain_core/runnables/history.py +135 -148
- langchain_core/runnables/passthrough.py +124 -150
- langchain_core/runnables/retry.py +46 -51
- langchain_core/runnables/router.py +25 -30
- langchain_core/runnables/schema.py +75 -80
- langchain_core/runnables/utils.py +60 -67
- langchain_core/stores.py +85 -121
- langchain_core/structured_query.py +8 -8
- langchain_core/sys_info.py +27 -29
- langchain_core/tools/__init__.py +1 -14
- langchain_core/tools/base.py +285 -229
- langchain_core/tools/convert.py +160 -155
- langchain_core/tools/render.py +10 -10
- langchain_core/tools/retriever.py +12 -11
- langchain_core/tools/simple.py +19 -24
- langchain_core/tools/structured.py +32 -39
- langchain_core/tracers/__init__.py +1 -9
- langchain_core/tracers/base.py +97 -99
- langchain_core/tracers/context.py +29 -52
- langchain_core/tracers/core.py +49 -53
- langchain_core/tracers/evaluation.py +11 -11
- langchain_core/tracers/event_stream.py +65 -64
- langchain_core/tracers/langchain.py +21 -21
- langchain_core/tracers/log_stream.py +45 -45
- langchain_core/tracers/memory_stream.py +3 -3
- langchain_core/tracers/root_listeners.py +16 -16
- langchain_core/tracers/run_collector.py +2 -4
- langchain_core/tracers/schemas.py +0 -129
- langchain_core/tracers/stdout.py +3 -3
- langchain_core/utils/__init__.py +1 -4
- langchain_core/utils/_merge.py +2 -2
- langchain_core/utils/aiter.py +57 -61
- langchain_core/utils/env.py +9 -9
- langchain_core/utils/function_calling.py +89 -186
- langchain_core/utils/html.py +7 -8
- langchain_core/utils/input.py +6 -6
- langchain_core/utils/interactive_env.py +1 -1
- langchain_core/utils/iter.py +36 -40
- langchain_core/utils/json.py +4 -3
- langchain_core/utils/json_schema.py +9 -9
- langchain_core/utils/mustache.py +8 -10
- langchain_core/utils/pydantic.py +33 -35
- langchain_core/utils/strings.py +6 -9
- langchain_core/utils/usage.py +1 -1
- langchain_core/utils/utils.py +66 -62
- langchain_core/vectorstores/base.py +182 -216
- langchain_core/vectorstores/in_memory.py +101 -176
- langchain_core/vectorstores/utils.py +5 -5
- langchain_core/version.py +1 -1
- langchain_core-1.0.3.dist-info/METADATA +69 -0
- langchain_core-1.0.3.dist-info/RECORD +172 -0
- {langchain_core-1.0.0a5.dist-info → langchain_core-1.0.3.dist-info}/WHEEL +1 -1
- langchain_core/memory.py +0 -120
- langchain_core/messages/block_translators/ollama.py +0 -47
- langchain_core/prompts/pipeline.py +0 -138
- langchain_core/pydantic_v1/__init__.py +0 -30
- langchain_core/pydantic_v1/dataclasses.py +0 -23
- langchain_core/pydantic_v1/main.py +0 -23
- langchain_core/tracers/langchain_v1.py +0 -31
- langchain_core/utils/loading.py +0 -35
- langchain_core-1.0.0a5.dist-info/METADATA +0 -77
- langchain_core-1.0.0a5.dist-info/RECORD +0 -181
- langchain_core-1.0.0a5.dist-info/entry_points.txt +0 -4
|
@@ -8,13 +8,12 @@ import logging
|
|
|
8
8
|
import types
|
|
9
9
|
import typing
|
|
10
10
|
import uuid
|
|
11
|
+
from collections.abc import Callable
|
|
11
12
|
from typing import (
|
|
12
13
|
TYPE_CHECKING,
|
|
13
14
|
Annotated,
|
|
14
15
|
Any,
|
|
15
|
-
Callable,
|
|
16
16
|
Literal,
|
|
17
|
-
Optional,
|
|
18
17
|
Union,
|
|
19
18
|
cast,
|
|
20
19
|
get_args,
|
|
@@ -28,7 +27,7 @@ from pydantic.v1 import create_model as create_model_v1
|
|
|
28
27
|
from typing_extensions import TypedDict, is_typeddict
|
|
29
28
|
|
|
30
29
|
import langchain_core
|
|
31
|
-
from langchain_core._api import beta
|
|
30
|
+
from langchain_core._api import beta
|
|
32
31
|
from langchain_core.messages import AIMessage, BaseMessage, HumanMessage, ToolMessage
|
|
33
32
|
from langchain_core.utils.json_schema import dereference_refs
|
|
34
33
|
from langchain_core.utils.pydantic import is_basemodel_subclass
|
|
@@ -73,11 +72,11 @@ def _rm_titles(kv: dict, prev_key: str = "") -> dict:
|
|
|
73
72
|
except when a "title" appears within a property definition under "properties".
|
|
74
73
|
|
|
75
74
|
Args:
|
|
76
|
-
kv
|
|
77
|
-
prev_key
|
|
75
|
+
kv: The input JSON schema as a dictionary.
|
|
76
|
+
prev_key: The key from the parent dictionary, used to identify context.
|
|
78
77
|
|
|
79
78
|
Returns:
|
|
80
|
-
|
|
79
|
+
A new dictionary with appropriate "title" fields removed.
|
|
81
80
|
"""
|
|
82
81
|
new_kv = {}
|
|
83
82
|
|
|
@@ -103,8 +102,8 @@ def _rm_titles(kv: dict, prev_key: str = "") -> dict:
|
|
|
103
102
|
def _convert_json_schema_to_openai_function(
|
|
104
103
|
schema: dict,
|
|
105
104
|
*,
|
|
106
|
-
name:
|
|
107
|
-
description:
|
|
105
|
+
name: str | None = None,
|
|
106
|
+
description: str | None = None,
|
|
108
107
|
rm_titles: bool = True,
|
|
109
108
|
) -> FunctionDescription:
|
|
110
109
|
"""Converts a Pydantic model to a function description for the OpenAI API.
|
|
@@ -115,7 +114,7 @@ def _convert_json_schema_to_openai_function(
|
|
|
115
114
|
used.
|
|
116
115
|
description: The description of the function. If not provided, the description
|
|
117
116
|
of the schema will be used.
|
|
118
|
-
rm_titles: Whether to remove titles from the schema.
|
|
117
|
+
rm_titles: Whether to remove titles from the schema.
|
|
119
118
|
|
|
120
119
|
Returns:
|
|
121
120
|
The function description.
|
|
@@ -137,8 +136,8 @@ def _convert_json_schema_to_openai_function(
|
|
|
137
136
|
def _convert_pydantic_to_openai_function(
|
|
138
137
|
model: type,
|
|
139
138
|
*,
|
|
140
|
-
name:
|
|
141
|
-
description:
|
|
139
|
+
name: str | None = None,
|
|
140
|
+
description: str | None = None,
|
|
142
141
|
rm_titles: bool = True,
|
|
143
142
|
) -> FunctionDescription:
|
|
144
143
|
"""Converts a Pydantic model to a function description for the OpenAI API.
|
|
@@ -149,7 +148,7 @@ def _convert_pydantic_to_openai_function(
|
|
|
149
148
|
used.
|
|
150
149
|
description: The description of the function. If not provided, the description
|
|
151
150
|
of the schema will be used.
|
|
152
|
-
rm_titles: Whether to remove titles from the schema.
|
|
151
|
+
rm_titles: Whether to remove titles from the schema.
|
|
153
152
|
|
|
154
153
|
Raises:
|
|
155
154
|
TypeError: If the model is not a Pydantic model.
|
|
@@ -169,42 +168,6 @@ def _convert_pydantic_to_openai_function(
|
|
|
169
168
|
)
|
|
170
169
|
|
|
171
170
|
|
|
172
|
-
convert_pydantic_to_openai_function = deprecated(
|
|
173
|
-
"0.1.16",
|
|
174
|
-
alternative="langchain_core.utils.function_calling.convert_to_openai_function()",
|
|
175
|
-
removal="1.0",
|
|
176
|
-
)(_convert_pydantic_to_openai_function)
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
@deprecated(
|
|
180
|
-
"0.1.16",
|
|
181
|
-
alternative="langchain_core.utils.function_calling.convert_to_openai_tool()",
|
|
182
|
-
removal="1.0",
|
|
183
|
-
)
|
|
184
|
-
def convert_pydantic_to_openai_tool(
|
|
185
|
-
model: type[BaseModel],
|
|
186
|
-
*,
|
|
187
|
-
name: Optional[str] = None,
|
|
188
|
-
description: Optional[str] = None,
|
|
189
|
-
) -> ToolDescription:
|
|
190
|
-
"""Converts a Pydantic model to a function description for the OpenAI API.
|
|
191
|
-
|
|
192
|
-
Args:
|
|
193
|
-
model: The Pydantic model to convert.
|
|
194
|
-
name: The name of the function. If not provided, the title of the schema will be
|
|
195
|
-
used.
|
|
196
|
-
description: The description of the function. If not provided, the description
|
|
197
|
-
of the schema will be used.
|
|
198
|
-
|
|
199
|
-
Returns:
|
|
200
|
-
The tool description.
|
|
201
|
-
"""
|
|
202
|
-
function = _convert_pydantic_to_openai_function(
|
|
203
|
-
model, name=name, description=description
|
|
204
|
-
)
|
|
205
|
-
return {"type": "function", "function": function}
|
|
206
|
-
|
|
207
|
-
|
|
208
171
|
def _get_python_function_name(function: Callable) -> str:
|
|
209
172
|
"""Get the name of a Python function."""
|
|
210
173
|
return function.__name__
|
|
@@ -241,13 +204,6 @@ def _convert_python_function_to_openai_function(
|
|
|
241
204
|
)
|
|
242
205
|
|
|
243
206
|
|
|
244
|
-
convert_python_function_to_openai_function = deprecated(
|
|
245
|
-
"0.1.16",
|
|
246
|
-
alternative="langchain_core.utils.function_calling.convert_to_openai_function()",
|
|
247
|
-
removal="1.0",
|
|
248
|
-
)(_convert_python_function_to_openai_function)
|
|
249
|
-
|
|
250
|
-
|
|
251
207
|
def _convert_typed_dict_to_openai_function(typed_dict: type) -> FunctionDescription:
|
|
252
208
|
visited: dict = {}
|
|
253
209
|
|
|
@@ -285,7 +241,9 @@ def _convert_any_typed_dicts_to_pydantic(
|
|
|
285
241
|
new_arg_type = _convert_any_typed_dicts_to_pydantic(
|
|
286
242
|
annotated_args[0], depth=depth + 1, visited=visited
|
|
287
243
|
)
|
|
288
|
-
field_kwargs = dict(
|
|
244
|
+
field_kwargs = dict(
|
|
245
|
+
zip(("default", "description"), annotated_args[1:], strict=False)
|
|
246
|
+
)
|
|
289
247
|
if (field_desc := field_kwargs.get("description")) and not isinstance(
|
|
290
248
|
field_desc, str
|
|
291
249
|
):
|
|
@@ -367,48 +325,23 @@ def _format_tool_to_openai_function(tool: BaseTool) -> FunctionDescription:
|
|
|
367
325
|
}
|
|
368
326
|
|
|
369
327
|
|
|
370
|
-
format_tool_to_openai_function = deprecated(
|
|
371
|
-
"0.1.16",
|
|
372
|
-
alternative="langchain_core.utils.function_calling.convert_to_openai_function()",
|
|
373
|
-
removal="1.0",
|
|
374
|
-
)(_format_tool_to_openai_function)
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
@deprecated(
|
|
378
|
-
"0.1.16",
|
|
379
|
-
alternative="langchain_core.utils.function_calling.convert_to_openai_tool()",
|
|
380
|
-
removal="1.0",
|
|
381
|
-
)
|
|
382
|
-
def format_tool_to_openai_tool(tool: BaseTool) -> ToolDescription:
|
|
383
|
-
"""Format tool into the OpenAI function API.
|
|
384
|
-
|
|
385
|
-
Args:
|
|
386
|
-
tool: The tool to format.
|
|
387
|
-
|
|
388
|
-
Returns:
|
|
389
|
-
The tool description.
|
|
390
|
-
"""
|
|
391
|
-
function = _format_tool_to_openai_function(tool)
|
|
392
|
-
return {"type": "function", "function": function}
|
|
393
|
-
|
|
394
|
-
|
|
395
328
|
def convert_to_openai_function(
|
|
396
|
-
function:
|
|
329
|
+
function: dict[str, Any] | type | Callable | BaseTool,
|
|
397
330
|
*,
|
|
398
|
-
strict:
|
|
331
|
+
strict: bool | None = None,
|
|
399
332
|
) -> dict[str, Any]:
|
|
400
333
|
"""Convert a raw function/class to an OpenAI function.
|
|
401
334
|
|
|
402
335
|
Args:
|
|
403
336
|
function:
|
|
404
|
-
A dictionary, Pydantic BaseModel class, TypedDict class, a LangChain
|
|
405
|
-
Tool object, or a Python function. If a dictionary is passed in, it is
|
|
337
|
+
A dictionary, Pydantic `BaseModel` class, `TypedDict` class, a LangChain
|
|
338
|
+
`Tool` object, or a Python function. If a dictionary is passed in, it is
|
|
406
339
|
assumed to already be a valid OpenAI function, a JSON schema with
|
|
407
|
-
top-level
|
|
408
|
-
|
|
340
|
+
top-level `title` key specified, an Anthropic format tool, or an Amazon
|
|
341
|
+
Bedrock Converse format tool.
|
|
409
342
|
strict:
|
|
410
|
-
If True
|
|
411
|
-
provided in the function definition. If None
|
|
343
|
+
If `True`, model output is guaranteed to exactly match the JSON Schema
|
|
344
|
+
provided in the function definition. If `None`, `strict` argument will not
|
|
412
345
|
be included in function definition.
|
|
413
346
|
|
|
414
347
|
Returns:
|
|
@@ -418,21 +351,8 @@ def convert_to_openai_function(
|
|
|
418
351
|
Raises:
|
|
419
352
|
ValueError: If function is not in a supported format.
|
|
420
353
|
|
|
421
|
-
|
|
422
|
-
|
|
423
|
-
``strict`` arg added.
|
|
424
|
-
|
|
425
|
-
.. versionchanged:: 0.3.13
|
|
426
|
-
|
|
427
|
-
Support for Anthropic format tools added.
|
|
428
|
-
|
|
429
|
-
.. versionchanged:: 0.3.14
|
|
430
|
-
|
|
431
|
-
Support for Amazon Bedrock Converse format tools added.
|
|
432
|
-
|
|
433
|
-
.. versionchanged:: 0.3.16
|
|
434
|
-
|
|
435
|
-
'description' and 'parameters' keys are now optional. Only 'name' is
|
|
354
|
+
!!! warning "Behavior changed in 0.3.16"
|
|
355
|
+
`description` and `parameters` keys are now optional. Only `name` is
|
|
436
356
|
required and guaranteed to be part of the output.
|
|
437
357
|
"""
|
|
438
358
|
# an Anthropic format tool
|
|
@@ -505,6 +425,14 @@ def convert_to_openai_function(
|
|
|
505
425
|
oai_function["parameters"] = _recursive_set_additional_properties_false(
|
|
506
426
|
oai_function["parameters"]
|
|
507
427
|
)
|
|
428
|
+
# All fields must be `required`
|
|
429
|
+
parameters = oai_function.get("parameters")
|
|
430
|
+
if isinstance(parameters, dict):
|
|
431
|
+
fields = parameters.get("properties")
|
|
432
|
+
if isinstance(fields, dict) and fields:
|
|
433
|
+
parameters = dict(parameters)
|
|
434
|
+
parameters["required"] = list(fields.keys())
|
|
435
|
+
oai_function["parameters"] = parameters
|
|
508
436
|
return oai_function
|
|
509
437
|
|
|
510
438
|
|
|
@@ -524,60 +452,39 @@ _WellKnownOpenAITools = (
|
|
|
524
452
|
|
|
525
453
|
|
|
526
454
|
def convert_to_openai_tool(
|
|
527
|
-
tool:
|
|
455
|
+
tool: dict[str, Any] | type[BaseModel] | Callable | BaseTool,
|
|
528
456
|
*,
|
|
529
|
-
strict:
|
|
457
|
+
strict: bool | None = None,
|
|
530
458
|
) -> dict[str, Any]:
|
|
531
459
|
"""Convert a tool-like object to an OpenAI tool schema.
|
|
532
460
|
|
|
533
|
-
OpenAI tool schema reference
|
|
534
|
-
https://platform.openai.com/docs/api-reference/chat/create#chat-create-tools
|
|
461
|
+
[OpenAI tool schema reference](https://platform.openai.com/docs/api-reference/chat/create#chat-create-tools)
|
|
535
462
|
|
|
536
463
|
Args:
|
|
537
464
|
tool:
|
|
538
|
-
Either a dictionary, a pydantic.BaseModel class, Python function, or
|
|
539
|
-
BaseTool
|
|
540
|
-
|
|
541
|
-
|
|
542
|
-
tool, or an Amazon Bedrock Converse format tool.
|
|
465
|
+
Either a dictionary, a `pydantic.BaseModel` class, Python function, or
|
|
466
|
+
`BaseTool`. If a dictionary is passed in, it is assumed to already be a
|
|
467
|
+
valid OpenAI function, a JSON schema with top-level `title` key specified,
|
|
468
|
+
an Anthropic format tool, or an Amazon Bedrock Converse format tool.
|
|
543
469
|
strict:
|
|
544
|
-
If True
|
|
545
|
-
provided in the function definition. If None
|
|
470
|
+
If `True`, model output is guaranteed to exactly match the JSON Schema
|
|
471
|
+
provided in the function definition. If `None`, `strict` argument will not
|
|
546
472
|
be included in tool definition.
|
|
547
473
|
|
|
548
474
|
Returns:
|
|
549
475
|
A dict version of the passed in tool which is compatible with the
|
|
550
476
|
OpenAI tool-calling API.
|
|
551
477
|
|
|
552
|
-
|
|
553
|
-
|
|
554
|
-
``strict`` arg added.
|
|
555
|
-
|
|
556
|
-
.. versionchanged:: 0.3.13
|
|
557
|
-
|
|
558
|
-
Support for Anthropic format tools added.
|
|
559
|
-
|
|
560
|
-
.. versionchanged:: 0.3.14
|
|
561
|
-
|
|
562
|
-
Support for Amazon Bedrock Converse format tools added.
|
|
563
|
-
|
|
564
|
-
.. versionchanged:: 0.3.16
|
|
565
|
-
|
|
566
|
-
'description' and 'parameters' keys are now optional. Only 'name' is
|
|
478
|
+
!!! warning "Behavior changed in 0.3.16"
|
|
479
|
+
`description` and `parameters` keys are now optional. Only `name` is
|
|
567
480
|
required and guaranteed to be part of the output.
|
|
568
481
|
|
|
569
|
-
|
|
570
|
-
|
|
482
|
+
!!! warning "Behavior changed in 0.3.44"
|
|
571
483
|
Return OpenAI Responses API-style tools unchanged. This includes
|
|
572
|
-
any dict with "type" in "file_search"
|
|
573
|
-
"web_search_preview"
|
|
574
|
-
|
|
575
|
-
.. versionchanged:: 0.3.61
|
|
576
|
-
|
|
577
|
-
Added support for OpenAI's built-in code interpreter and remote MCP tools.
|
|
578
|
-
|
|
579
|
-
.. versionchanged:: 0.3.63
|
|
484
|
+
any dict with `"type"` in `"file_search"`, `"function"`,
|
|
485
|
+
`"computer_use_preview"`, `"web_search_preview"`.
|
|
580
486
|
|
|
487
|
+
!!! warning "Behavior changed in 0.3.63"
|
|
581
488
|
Added support for OpenAI's image generation built-in tool.
|
|
582
489
|
"""
|
|
583
490
|
# Import locally to prevent circular import
|
|
@@ -603,16 +510,16 @@ def convert_to_openai_tool(
|
|
|
603
510
|
|
|
604
511
|
|
|
605
512
|
def convert_to_json_schema(
|
|
606
|
-
schema:
|
|
513
|
+
schema: dict[str, Any] | type[BaseModel] | Callable | BaseTool,
|
|
607
514
|
*,
|
|
608
|
-
strict:
|
|
515
|
+
strict: bool | None = None,
|
|
609
516
|
) -> dict[str, Any]:
|
|
610
517
|
"""Convert a schema representation to a JSON schema.
|
|
611
518
|
|
|
612
519
|
Args:
|
|
613
520
|
schema: The schema to convert.
|
|
614
|
-
strict: If True
|
|
615
|
-
provided in the function definition. If None
|
|
521
|
+
strict: If `True`, model output is guaranteed to exactly match the JSON Schema
|
|
522
|
+
provided in the function definition. If `None`, `strict` argument will not
|
|
616
523
|
be included in function definition.
|
|
617
524
|
|
|
618
525
|
Raises:
|
|
@@ -648,9 +555,9 @@ def convert_to_json_schema(
|
|
|
648
555
|
def tool_example_to_messages(
|
|
649
556
|
input: str,
|
|
650
557
|
tool_calls: list[BaseModel],
|
|
651
|
-
tool_outputs:
|
|
558
|
+
tool_outputs: list[str] | None = None,
|
|
652
559
|
*,
|
|
653
|
-
ai_response:
|
|
560
|
+
ai_response: str | None = None,
|
|
654
561
|
) -> list[BaseMessage]:
|
|
655
562
|
"""Convert an example into a list of messages that can be fed into an LLM.
|
|
656
563
|
|
|
@@ -659,15 +566,15 @@ def tool_example_to_messages(
|
|
|
659
566
|
|
|
660
567
|
The list of messages per example by default corresponds to:
|
|
661
568
|
|
|
662
|
-
1.
|
|
663
|
-
2.
|
|
664
|
-
3.
|
|
665
|
-
|
|
569
|
+
1. `HumanMessage`: contains the content from which content should be extracted.
|
|
570
|
+
2. `AIMessage`: contains the extracted information from the model
|
|
571
|
+
3. `ToolMessage`: contains confirmation to the model that the model requested a
|
|
572
|
+
tool correctly.
|
|
666
573
|
|
|
667
|
-
If
|
|
574
|
+
If `ai_response` is specified, there will be a final `AIMessage` with that
|
|
668
575
|
response.
|
|
669
576
|
|
|
670
|
-
The
|
|
577
|
+
The `ToolMessage` is required because some chat models are hyper-optimized for
|
|
671
578
|
agents rather than for an extraction use case.
|
|
672
579
|
|
|
673
580
|
Args:
|
|
@@ -675,50 +582,46 @@ def tool_example_to_messages(
|
|
|
675
582
|
tool_calls: Tool calls represented as Pydantic BaseModels
|
|
676
583
|
tool_outputs: Tool call outputs.
|
|
677
584
|
Does not need to be provided. If not provided, a placeholder value
|
|
678
|
-
will be inserted.
|
|
679
|
-
ai_response: If provided, content for a final
|
|
585
|
+
will be inserted.
|
|
586
|
+
ai_response: If provided, content for a final `AIMessage`.
|
|
680
587
|
|
|
681
588
|
Returns:
|
|
682
589
|
A list of messages
|
|
683
590
|
|
|
684
591
|
Examples:
|
|
592
|
+
```python
|
|
593
|
+
from typing import Optional
|
|
594
|
+
from pydantic import BaseModel, Field
|
|
595
|
+
from langchain_openai import ChatOpenAI
|
|
685
596
|
|
|
686
|
-
.. code-block:: python
|
|
687
|
-
|
|
688
|
-
from typing import Optional
|
|
689
|
-
from pydantic import BaseModel, Field
|
|
690
|
-
from langchain_openai import ChatOpenAI
|
|
691
597
|
|
|
598
|
+
class Person(BaseModel):
|
|
599
|
+
'''Information about a person.'''
|
|
692
600
|
|
|
693
|
-
|
|
694
|
-
|
|
695
|
-
|
|
696
|
-
|
|
697
|
-
|
|
698
|
-
..., description="The color of the person's hair if known"
|
|
699
|
-
)
|
|
700
|
-
height_in_meters: Optional[str] = Field(
|
|
701
|
-
..., description="Height in METERS"
|
|
702
|
-
)
|
|
703
|
-
|
|
601
|
+
name: str | None = Field(..., description="The name of the person")
|
|
602
|
+
hair_color: str | None = Field(
|
|
603
|
+
..., description="The color of the person's hair if known"
|
|
604
|
+
)
|
|
605
|
+
height_in_meters: str | None = Field(..., description="Height in METERS")
|
|
704
606
|
|
|
705
|
-
examples = [
|
|
706
|
-
(
|
|
707
|
-
"The ocean is vast and blue. It's more than 20,000 feet deep.",
|
|
708
|
-
Person(name=None, height_in_meters=None, hair_color=None),
|
|
709
|
-
),
|
|
710
|
-
(
|
|
711
|
-
"Fiona traveled far from France to Spain.",
|
|
712
|
-
Person(name="Fiona", height_in_meters=None, hair_color=None),
|
|
713
|
-
),
|
|
714
|
-
]
|
|
715
607
|
|
|
608
|
+
examples = [
|
|
609
|
+
(
|
|
610
|
+
"The ocean is vast and blue. It's more than 20,000 feet deep.",
|
|
611
|
+
Person(name=None, height_in_meters=None, hair_color=None),
|
|
612
|
+
),
|
|
613
|
+
(
|
|
614
|
+
"Fiona traveled far from France to Spain.",
|
|
615
|
+
Person(name="Fiona", height_in_meters=None, hair_color=None),
|
|
616
|
+
),
|
|
617
|
+
]
|
|
716
618
|
|
|
717
|
-
messages = []
|
|
718
619
|
|
|
719
|
-
|
|
720
|
-
messages.extend(tool_example_to_messages(txt, [tool_call]))
|
|
620
|
+
messages = []
|
|
721
621
|
|
|
622
|
+
for txt, tool_call in examples:
|
|
623
|
+
messages.extend(tool_example_to_messages(txt, [tool_call]))
|
|
624
|
+
```
|
|
722
625
|
"""
|
|
723
626
|
messages: list[BaseMessage] = [HumanMessage(content=input)]
|
|
724
627
|
openai_tool_calls = [
|
|
@@ -727,7 +630,7 @@ def tool_example_to_messages(
|
|
|
727
630
|
"type": "function",
|
|
728
631
|
"function": {
|
|
729
632
|
# The name of the function right now corresponds to the name
|
|
730
|
-
# of the
|
|
633
|
+
# of the Pydantic model. This is implicit in the API right now,
|
|
731
634
|
# and will be improved over time.
|
|
732
635
|
"name": tool_call.__class__.__name__,
|
|
733
636
|
"arguments": tool_call.model_dump_json(),
|
|
@@ -742,7 +645,7 @@ def tool_example_to_messages(
|
|
|
742
645
|
tool_outputs = tool_outputs or ["You have correctly called this tool."] * len(
|
|
743
646
|
openai_tool_calls
|
|
744
647
|
)
|
|
745
|
-
for output, tool_call_dict in zip(tool_outputs, openai_tool_calls):
|
|
648
|
+
for output, tool_call_dict in zip(tool_outputs, openai_tool_calls, strict=False):
|
|
746
649
|
messages.append(ToolMessage(content=output, tool_call_id=tool_call_dict["id"]))
|
|
747
650
|
|
|
748
651
|
if ai_response:
|
|
@@ -751,7 +654,7 @@ def tool_example_to_messages(
|
|
|
751
654
|
|
|
752
655
|
|
|
753
656
|
def _parse_google_docstring(
|
|
754
|
-
docstring:
|
|
657
|
+
docstring: str | None,
|
|
755
658
|
args: list[str],
|
|
756
659
|
*,
|
|
757
660
|
error_on_invalid_docstring: bool = False,
|
langchain_core/utils/html.py
CHANGED
|
@@ -3,7 +3,6 @@
|
|
|
3
3
|
import logging
|
|
4
4
|
import re
|
|
5
5
|
from collections.abc import Sequence
|
|
6
|
-
from typing import Optional, Union
|
|
7
6
|
from urllib.parse import urljoin, urlparse
|
|
8
7
|
|
|
9
8
|
logger = logging.getLogger(__name__)
|
|
@@ -35,7 +34,7 @@ DEFAULT_LINK_REGEX = (
|
|
|
35
34
|
|
|
36
35
|
|
|
37
36
|
def find_all_links(
|
|
38
|
-
raw_html: str, *, pattern:
|
|
37
|
+
raw_html: str, *, pattern: str | re.Pattern | None = None
|
|
39
38
|
) -> list[str]:
|
|
40
39
|
"""Extract all links from a raw HTML string.
|
|
41
40
|
|
|
@@ -44,7 +43,7 @@ def find_all_links(
|
|
|
44
43
|
pattern: Regex to use for extracting links from raw HTML.
|
|
45
44
|
|
|
46
45
|
Returns:
|
|
47
|
-
|
|
46
|
+
all links
|
|
48
47
|
"""
|
|
49
48
|
pattern = pattern or DEFAULT_LINK_REGEX
|
|
50
49
|
return list(set(re.findall(pattern, raw_html)))
|
|
@@ -54,8 +53,8 @@ def extract_sub_links(
|
|
|
54
53
|
raw_html: str,
|
|
55
54
|
url: str,
|
|
56
55
|
*,
|
|
57
|
-
base_url:
|
|
58
|
-
pattern:
|
|
56
|
+
base_url: str | None = None,
|
|
57
|
+
pattern: str | re.Pattern | None = None,
|
|
59
58
|
prevent_outside: bool = True,
|
|
60
59
|
exclude_prefixes: Sequence[str] = (),
|
|
61
60
|
continue_on_failure: bool = False,
|
|
@@ -67,14 +66,14 @@ def extract_sub_links(
|
|
|
67
66
|
url: the url of the HTML.
|
|
68
67
|
base_url: the base URL to check for outside links against.
|
|
69
68
|
pattern: Regex to use for extracting links from raw HTML.
|
|
70
|
-
prevent_outside: If True
|
|
69
|
+
prevent_outside: If `True`, ignore external links which are not children
|
|
71
70
|
of the base URL.
|
|
72
71
|
exclude_prefixes: Exclude any URLs that start with one of these prefixes.
|
|
73
|
-
continue_on_failure: If True
|
|
72
|
+
continue_on_failure: If `True`, continue if parsing a specific link raises an
|
|
74
73
|
exception. Otherwise, raise the exception.
|
|
75
74
|
|
|
76
75
|
Returns:
|
|
77
|
-
|
|
76
|
+
sub links.
|
|
78
77
|
"""
|
|
79
78
|
base_url_to_use = base_url if base_url is not None else url
|
|
80
79
|
parsed_base_url = urlparse(base_url_to_use)
|
langchain_core/utils/input.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
"""Handle chained inputs."""
|
|
2
2
|
|
|
3
|
-
from typing import
|
|
3
|
+
from typing import TextIO
|
|
4
4
|
|
|
5
5
|
_TEXT_COLOR_MAPPING = {
|
|
6
6
|
"blue": "36;1",
|
|
@@ -12,7 +12,7 @@ _TEXT_COLOR_MAPPING = {
|
|
|
12
12
|
|
|
13
13
|
|
|
14
14
|
def get_color_mapping(
|
|
15
|
-
items: list[str], excluded_colors:
|
|
15
|
+
items: list[str], excluded_colors: list | None = None
|
|
16
16
|
) -> dict[str, str]:
|
|
17
17
|
"""Get mapping for items to a support color.
|
|
18
18
|
|
|
@@ -56,7 +56,7 @@ def get_bolded_text(text: str) -> str:
|
|
|
56
56
|
|
|
57
57
|
|
|
58
58
|
def print_text(
|
|
59
|
-
text: str, color:
|
|
59
|
+
text: str, color: str | None = None, end: str = "", file: TextIO | None = None
|
|
60
60
|
) -> None:
|
|
61
61
|
"""Print text with highlighting and no end characters.
|
|
62
62
|
|
|
@@ -65,9 +65,9 @@ def print_text(
|
|
|
65
65
|
|
|
66
66
|
Args:
|
|
67
67
|
text: The text to print.
|
|
68
|
-
color: The color to use.
|
|
69
|
-
end: The end character to use.
|
|
70
|
-
file: The file to write to.
|
|
68
|
+
color: The color to use.
|
|
69
|
+
end: The end character to use.
|
|
70
|
+
file: The file to write to.
|
|
71
71
|
"""
|
|
72
72
|
text_to_print = get_colored_text(text, color) if color else text
|
|
73
73
|
print(text_to_print, end=end, file=file)
|
|
@@ -7,6 +7,6 @@ def is_interactive_env() -> bool:
|
|
|
7
7
|
"""Determine if running within IPython or Jupyter.
|
|
8
8
|
|
|
9
9
|
Returns:
|
|
10
|
-
True if running in an interactive environment, False otherwise.
|
|
10
|
+
True if running in an interactive environment, `False` otherwise.
|
|
11
11
|
"""
|
|
12
12
|
return hasattr(sys, "ps2")
|