langchain-core 1.0.0rc1__py3-none-any.whl → 1.0.0rc2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of langchain-core might be problematic. Click here for more details.
- langchain_core/agents.py +3 -3
- langchain_core/caches.py +44 -48
- langchain_core/callbacks/base.py +5 -5
- langchain_core/callbacks/file.py +2 -2
- langchain_core/callbacks/stdout.py +1 -1
- langchain_core/chat_history.py +1 -1
- langchain_core/document_loaders/base.py +21 -21
- langchain_core/document_loaders/langsmith.py +2 -2
- langchain_core/documents/base.py +39 -39
- langchain_core/embeddings/fake.py +4 -2
- langchain_core/example_selectors/semantic_similarity.py +4 -6
- langchain_core/exceptions.py +3 -4
- langchain_core/indexing/api.py +8 -14
- langchain_core/language_models/__init__.py +11 -25
- langchain_core/language_models/_utils.py +2 -1
- langchain_core/language_models/base.py +7 -0
- langchain_core/language_models/chat_models.py +14 -16
- langchain_core/language_models/fake_chat_models.py +3 -3
- langchain_core/language_models/llms.py +4 -4
- langchain_core/load/dump.py +3 -4
- langchain_core/load/load.py +0 -9
- langchain_core/load/serializable.py +3 -3
- langchain_core/messages/ai.py +20 -22
- langchain_core/messages/base.py +8 -8
- langchain_core/messages/block_translators/__init__.py +1 -1
- langchain_core/messages/block_translators/anthropic.py +1 -1
- langchain_core/messages/block_translators/bedrock_converse.py +1 -1
- langchain_core/messages/block_translators/google_genai.py +3 -2
- langchain_core/messages/block_translators/google_vertexai.py +4 -32
- langchain_core/messages/block_translators/langchain_v0.py +1 -1
- langchain_core/messages/block_translators/openai.py +1 -1
- langchain_core/messages/chat.py +2 -6
- langchain_core/messages/content.py +34 -17
- langchain_core/messages/function.py +3 -7
- langchain_core/messages/human.py +4 -9
- langchain_core/messages/modifier.py +1 -1
- langchain_core/messages/system.py +2 -10
- langchain_core/messages/tool.py +30 -42
- langchain_core/messages/utils.py +24 -30
- langchain_core/output_parsers/base.py +24 -24
- langchain_core/output_parsers/json.py +0 -1
- langchain_core/output_parsers/list.py +1 -1
- langchain_core/output_parsers/openai_functions.py +2 -2
- langchain_core/output_parsers/openai_tools.py +4 -9
- langchain_core/output_parsers/string.py +1 -1
- langchain_core/outputs/generation.py +1 -1
- langchain_core/prompt_values.py +7 -7
- langchain_core/prompts/base.py +1 -1
- langchain_core/prompts/chat.py +12 -13
- langchain_core/prompts/dict.py +2 -2
- langchain_core/prompts/few_shot_with_templates.py +1 -1
- langchain_core/prompts/image.py +1 -1
- langchain_core/prompts/message.py +2 -2
- langchain_core/prompts/prompt.py +7 -8
- langchain_core/prompts/string.py +1 -1
- langchain_core/prompts/structured.py +2 -2
- langchain_core/rate_limiters.py +23 -29
- langchain_core/retrievers.py +29 -29
- langchain_core/runnables/base.py +9 -16
- langchain_core/runnables/branch.py +1 -1
- langchain_core/runnables/config.py +1 -1
- langchain_core/runnables/configurable.py +2 -2
- langchain_core/runnables/fallbacks.py +1 -1
- langchain_core/runnables/graph.py +23 -28
- langchain_core/runnables/graph_mermaid.py +9 -9
- langchain_core/runnables/graph_png.py +1 -1
- langchain_core/runnables/history.py +2 -2
- langchain_core/runnables/passthrough.py +3 -3
- langchain_core/runnables/router.py +1 -1
- langchain_core/runnables/utils.py +5 -5
- langchain_core/tools/base.py +9 -10
- langchain_core/tools/convert.py +13 -17
- langchain_core/tools/retriever.py +6 -6
- langchain_core/tools/simple.py +1 -1
- langchain_core/tools/structured.py +5 -10
- langchain_core/tracers/memory_stream.py +1 -1
- langchain_core/tracers/root_listeners.py +2 -2
- langchain_core/tracers/stdout.py +1 -2
- langchain_core/utils/__init__.py +1 -1
- langchain_core/utils/aiter.py +1 -1
- langchain_core/utils/function_calling.py +15 -38
- langchain_core/utils/input.py +1 -1
- langchain_core/utils/iter.py +1 -1
- langchain_core/utils/json.py +1 -1
- langchain_core/utils/strings.py +1 -1
- langchain_core/vectorstores/base.py +14 -25
- langchain_core/vectorstores/utils.py +2 -2
- langchain_core/version.py +1 -1
- {langchain_core-1.0.0rc1.dist-info → langchain_core-1.0.0rc2.dist-info}/METADATA +1 -1
- langchain_core-1.0.0rc2.dist-info/RECORD +172 -0
- langchain_core-1.0.0rc1.dist-info/RECORD +0 -172
- {langchain_core-1.0.0rc1.dist-info → langchain_core-1.0.0rc2.dist-info}/WHEEL +0 -0
|
@@ -136,7 +136,7 @@ def coro_with_context(
|
|
|
136
136
|
Args:
|
|
137
137
|
coro: The coroutine to await.
|
|
138
138
|
context: The context to use.
|
|
139
|
-
create_task: Whether to create a task.
|
|
139
|
+
create_task: Whether to create a task.
|
|
140
140
|
|
|
141
141
|
Returns:
|
|
142
142
|
The coroutine with the context.
|
|
@@ -558,7 +558,7 @@ class ConfigurableField(NamedTuple):
|
|
|
558
558
|
annotation: Any | None = None
|
|
559
559
|
"""The annotation of the field. """
|
|
560
560
|
is_shared: bool = False
|
|
561
|
-
"""Whether the field is shared.
|
|
561
|
+
"""Whether the field is shared."""
|
|
562
562
|
|
|
563
563
|
@override
|
|
564
564
|
def __hash__(self) -> int:
|
|
@@ -579,7 +579,7 @@ class ConfigurableFieldSingleOption(NamedTuple):
|
|
|
579
579
|
description: str | None = None
|
|
580
580
|
"""The description of the field. """
|
|
581
581
|
is_shared: bool = False
|
|
582
|
-
"""Whether the field is shared.
|
|
582
|
+
"""Whether the field is shared."""
|
|
583
583
|
|
|
584
584
|
@override
|
|
585
585
|
def __hash__(self) -> int:
|
|
@@ -600,7 +600,7 @@ class ConfigurableFieldMultiOption(NamedTuple):
|
|
|
600
600
|
description: str | None = None
|
|
601
601
|
"""The description of the field. """
|
|
602
602
|
is_shared: bool = False
|
|
603
|
-
"""Whether the field is shared.
|
|
603
|
+
"""Whether the field is shared."""
|
|
604
604
|
|
|
605
605
|
@override
|
|
606
606
|
def __hash__(self) -> int:
|
|
@@ -626,7 +626,7 @@ class ConfigurableFieldSpec(NamedTuple):
|
|
|
626
626
|
default: Any = None
|
|
627
627
|
"""The default value for the field. """
|
|
628
628
|
is_shared: bool = False
|
|
629
|
-
"""Whether the field is shared.
|
|
629
|
+
"""Whether the field is shared."""
|
|
630
630
|
dependencies: list[str] | None = None
|
|
631
631
|
"""The dependencies of the field. """
|
|
632
632
|
|
langchain_core/tools/base.py
CHANGED
|
@@ -293,10 +293,9 @@ def create_schema_from_function(
|
|
|
293
293
|
filter_args: Optional list of arguments to exclude from the schema.
|
|
294
294
|
Defaults to `FILTERED_ARGS`.
|
|
295
295
|
parse_docstring: Whether to parse the function's docstring for descriptions
|
|
296
|
-
for each argument.
|
|
296
|
+
for each argument.
|
|
297
297
|
error_on_invalid_docstring: if `parse_docstring` is provided, configure
|
|
298
298
|
whether to raise `ValueError` on invalid Google Style docstrings.
|
|
299
|
-
Defaults to `False`.
|
|
300
299
|
include_injected: Whether to include injected arguments in the schema.
|
|
301
300
|
Defaults to `True`, since we want to include them in the schema
|
|
302
301
|
when *validating* tool inputs.
|
|
@@ -481,11 +480,11 @@ class ChildTool(BaseTool):
|
|
|
481
480
|
"""Handle the content of the ValidationError thrown."""
|
|
482
481
|
|
|
483
482
|
response_format: Literal["content", "content_and_artifact"] = "content"
|
|
484
|
-
"""The tool response format.
|
|
483
|
+
"""The tool response format.
|
|
485
484
|
|
|
486
|
-
If "content" then the output of the tool is interpreted as the contents of a
|
|
487
|
-
ToolMessage. If "content_and_artifact" then the output is expected to be a
|
|
488
|
-
two-tuple corresponding to the (content, artifact) of a ToolMessage
|
|
485
|
+
If `"content"` then the output of the tool is interpreted as the contents of a
|
|
486
|
+
ToolMessage. If `"content_and_artifact"` then the output is expected to be a
|
|
487
|
+
two-tuple corresponding to the (content, artifact) of a `ToolMessage`.
|
|
489
488
|
"""
|
|
490
489
|
|
|
491
490
|
def __init__(self, **kwargs: Any) -> None:
|
|
@@ -768,8 +767,8 @@ class ChildTool(BaseTool):
|
|
|
768
767
|
Args:
|
|
769
768
|
tool_input: The input to the tool.
|
|
770
769
|
verbose: Whether to log the tool's progress.
|
|
771
|
-
start_color: The color to use when starting the tool.
|
|
772
|
-
color: The color to use when ending the tool.
|
|
770
|
+
start_color: The color to use when starting the tool.
|
|
771
|
+
color: The color to use when ending the tool.
|
|
773
772
|
callbacks: Callbacks to be called during tool execution.
|
|
774
773
|
tags: Optional list of tags associated with the tool.
|
|
775
774
|
metadata: Optional metadata associated with the tool.
|
|
@@ -880,8 +879,8 @@ class ChildTool(BaseTool):
|
|
|
880
879
|
Args:
|
|
881
880
|
tool_input: The input to the tool.
|
|
882
881
|
verbose: Whether to log the tool's progress.
|
|
883
|
-
start_color: The color to use when starting the tool.
|
|
884
|
-
color: The color to use when ending the tool.
|
|
882
|
+
start_color: The color to use when starting the tool.
|
|
883
|
+
color: The color to use when ending the tool.
|
|
885
884
|
callbacks: Callbacks to be called during tool execution.
|
|
886
885
|
tags: Optional list of tags associated with the tool.
|
|
887
886
|
metadata: Optional metadata associated with the tool.
|
langchain_core/tools/convert.py
CHANGED
|
@@ -81,7 +81,7 @@ def tool(
|
|
|
81
81
|
parse_docstring: bool = False,
|
|
82
82
|
error_on_invalid_docstring: bool = True,
|
|
83
83
|
) -> BaseTool | Callable[[Callable | Runnable], BaseTool]:
|
|
84
|
-
"""Make tools out of functions, can be used with or without arguments.
|
|
84
|
+
"""Make tools out of Python functions, can be used with or without arguments.
|
|
85
85
|
|
|
86
86
|
Args:
|
|
87
87
|
name_or_callable: Optional name of the tool or the callable to be
|
|
@@ -93,30 +93,26 @@ def tool(
|
|
|
93
93
|
|
|
94
94
|
- `description` argument
|
|
95
95
|
(used even if docstring and/or `args_schema` are provided)
|
|
96
|
-
-
|
|
96
|
+
- Tool function docstring
|
|
97
97
|
(used even if `args_schema` is provided)
|
|
98
98
|
- `args_schema` description
|
|
99
99
|
(used only if `description` / docstring are not provided)
|
|
100
100
|
*args: Extra positional arguments. Must be empty.
|
|
101
101
|
return_direct: Whether to return directly from the tool rather
|
|
102
|
-
than continuing the agent loop.
|
|
103
|
-
args_schema:
|
|
102
|
+
than continuing the agent loop.
|
|
103
|
+
args_schema: Optional argument schema for user to specify.
|
|
104
104
|
|
|
105
105
|
infer_schema: Whether to infer the schema of the arguments from
|
|
106
106
|
the function's signature. This also makes the resultant tool
|
|
107
107
|
accept a dictionary input to its `run()` function.
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
the
|
|
111
|
-
|
|
112
|
-
corresponding to the (content, artifact) of a ToolMessage.
|
|
113
|
-
Defaults to "content".
|
|
108
|
+
response_format: The tool response format. If `"content"` then the output of
|
|
109
|
+
the tool is interpreted as the contents of a `ToolMessage`. If
|
|
110
|
+
`"content_and_artifact"` then the output is expected to be a two-tuple
|
|
111
|
+
corresponding to the `(content, artifact)` of a `ToolMessage`.
|
|
114
112
|
parse_docstring: if `infer_schema` and `parse_docstring`, will attempt to
|
|
115
113
|
parse parameter descriptions from Google Style function docstrings.
|
|
116
|
-
Defaults to `False`.
|
|
117
114
|
error_on_invalid_docstring: if `parse_docstring` is provided, configure
|
|
118
|
-
whether to raise ValueError on invalid Google Style docstrings.
|
|
119
|
-
Defaults to `True`.
|
|
115
|
+
whether to raise `ValueError` on invalid Google Style docstrings.
|
|
120
116
|
|
|
121
117
|
Raises:
|
|
122
118
|
ValueError: If too many positional arguments are provided.
|
|
@@ -124,8 +120,8 @@ def tool(
|
|
|
124
120
|
ValueError: If the first argument is not a string or callable with
|
|
125
121
|
a `__name__` attribute.
|
|
126
122
|
ValueError: If the function does not have a docstring and description
|
|
127
|
-
is not provided and `infer_schema` is False
|
|
128
|
-
ValueError: If `parse_docstring` is True and the function has an invalid
|
|
123
|
+
is not provided and `infer_schema` is `False`.
|
|
124
|
+
ValueError: If `parse_docstring` is `True` and the function has an invalid
|
|
129
125
|
Google-style docstring and `error_on_invalid_docstring` is True.
|
|
130
126
|
ValueError: If a Runnable is provided that does not have an object schema.
|
|
131
127
|
|
|
@@ -133,7 +129,7 @@ def tool(
|
|
|
133
129
|
The tool.
|
|
134
130
|
|
|
135
131
|
Requires:
|
|
136
|
-
- Function must be of type (str) -> str
|
|
132
|
+
- Function must be of type `(str) -> str`
|
|
137
133
|
- Function must have a docstring
|
|
138
134
|
|
|
139
135
|
Examples:
|
|
@@ -197,7 +193,7 @@ def tool(
|
|
|
197
193
|
Note that parsing by default will raise `ValueError` if the docstring
|
|
198
194
|
is considered invalid. A docstring is considered invalid if it contains
|
|
199
195
|
arguments not in the function signature, or is unable to be parsed into
|
|
200
|
-
a summary and "Args:" blocks. Examples below:
|
|
196
|
+
a summary and `"Args:"` blocks. Examples below:
|
|
201
197
|
|
|
202
198
|
```python
|
|
203
199
|
# No args section
|
|
@@ -82,12 +82,12 @@ def create_retriever_tool(
|
|
|
82
82
|
description: The description for the tool. This will be passed to the language
|
|
83
83
|
model, so should be descriptive.
|
|
84
84
|
document_prompt: The prompt to use for the document.
|
|
85
|
-
document_separator: The separator to use between documents.
|
|
86
|
-
response_format: The tool response format. If "content" then the output of
|
|
87
|
-
the tool is interpreted as the contents of a ToolMessage
|
|
88
|
-
"content_and_artifact" then the output is expected to be a two-tuple
|
|
89
|
-
corresponding to the (content, artifact) of a ToolMessage (artifact
|
|
90
|
-
being a list of documents in this case).
|
|
85
|
+
document_separator: The separator to use between documents.
|
|
86
|
+
response_format: The tool response format. If `"content"` then the output of
|
|
87
|
+
the tool is interpreted as the contents of a `ToolMessage`. If
|
|
88
|
+
`"content_and_artifact"` then the output is expected to be a two-tuple
|
|
89
|
+
corresponding to the `(content, artifact)` of a `ToolMessage` (artifact
|
|
90
|
+
being a list of documents in this case).
|
|
91
91
|
|
|
92
92
|
Returns:
|
|
93
93
|
Tool class to pass to an agent.
|
langchain_core/tools/simple.py
CHANGED
|
@@ -176,7 +176,7 @@ class Tool(BaseTool):
|
|
|
176
176
|
func: The function to create the tool from.
|
|
177
177
|
name: The name of the tool.
|
|
178
178
|
description: The description of the tool.
|
|
179
|
-
return_direct: Whether to return the output directly.
|
|
179
|
+
return_direct: Whether to return the output directly.
|
|
180
180
|
args_schema: The schema of the tool's input arguments.
|
|
181
181
|
coroutine: The asynchronous version of the function.
|
|
182
182
|
**kwargs: Additional arguments to pass to the tool.
|
|
@@ -149,21 +149,16 @@ class StructuredTool(BaseTool):
|
|
|
149
149
|
description: The description of the tool.
|
|
150
150
|
Defaults to the function docstring.
|
|
151
151
|
return_direct: Whether to return the result directly or as a callback.
|
|
152
|
-
Defaults to `False`.
|
|
153
152
|
args_schema: The schema of the tool's input arguments.
|
|
154
153
|
infer_schema: Whether to infer the schema from the function's signature.
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
the
|
|
158
|
-
|
|
159
|
-
corresponding to the (content, artifact) of a ToolMessage.
|
|
160
|
-
Defaults to "content".
|
|
154
|
+
response_format: The tool response format. If `"content"` then the output of
|
|
155
|
+
the tool is interpreted as the contents of a `ToolMessage`. If
|
|
156
|
+
`"content_and_artifact"` then the output is expected to be a two-tuple
|
|
157
|
+
corresponding to the `(content, artifact)` of a `ToolMessage`.
|
|
161
158
|
parse_docstring: if `infer_schema` and `parse_docstring`, will attempt
|
|
162
159
|
to parse parameter descriptions from Google Style function docstrings.
|
|
163
|
-
Defaults to `False`.
|
|
164
160
|
error_on_invalid_docstring: if `parse_docstring` is provided, configure
|
|
165
|
-
whether to raise ValueError on invalid Google Style docstrings.
|
|
166
|
-
Defaults to `False`.
|
|
161
|
+
whether to raise `ValueError` on invalid Google Style docstrings.
|
|
167
162
|
**kwargs: Additional arguments to pass to the tool
|
|
168
163
|
|
|
169
164
|
Returns:
|
|
@@ -5,7 +5,7 @@ channel. The writer and reader can be in the same event loop or in different eve
|
|
|
5
5
|
loops. When they're in different event loops, they will also be in different
|
|
6
6
|
threads.
|
|
7
7
|
|
|
8
|
-
|
|
8
|
+
Useful in situations when there's a mix of synchronous and asynchronous
|
|
9
9
|
used in the code.
|
|
10
10
|
"""
|
|
11
11
|
|
|
@@ -24,7 +24,7 @@ class RootListenersTracer(BaseTracer):
|
|
|
24
24
|
"""Tracer that calls listeners on run start, end, and error."""
|
|
25
25
|
|
|
26
26
|
log_missing_parent = False
|
|
27
|
-
"""Whether to log a warning if the parent is missing.
|
|
27
|
+
"""Whether to log a warning if the parent is missing."""
|
|
28
28
|
|
|
29
29
|
def __init__(
|
|
30
30
|
self,
|
|
@@ -79,7 +79,7 @@ class AsyncRootListenersTracer(AsyncBaseTracer):
|
|
|
79
79
|
"""Async Tracer that calls listeners on run start, end, and error."""
|
|
80
80
|
|
|
81
81
|
log_missing_parent = False
|
|
82
|
-
"""Whether to log a warning if the parent is missing.
|
|
82
|
+
"""Whether to log a warning if the parent is missing."""
|
|
83
83
|
|
|
84
84
|
def __init__(
|
|
85
85
|
self,
|
langchain_core/tracers/stdout.py
CHANGED
|
@@ -49,8 +49,7 @@ class FunctionCallbackHandler(BaseTracer):
|
|
|
49
49
|
"""Tracer that calls a function with a single str parameter."""
|
|
50
50
|
|
|
51
51
|
name: str = "function_callback_handler"
|
|
52
|
-
"""The name of the tracer. This is used to identify the tracer in the logs.
|
|
53
|
-
Default is "function_callback_handler"."""
|
|
52
|
+
"""The name of the tracer. This is used to identify the tracer in the logs."""
|
|
54
53
|
|
|
55
54
|
def __init__(self, function: Callable[[str], None], **kwargs: Any) -> None:
|
|
56
55
|
"""Create a FunctionCallbackHandler.
|
langchain_core/utils/__init__.py
CHANGED
langchain_core/utils/aiter.py
CHANGED
|
@@ -114,7 +114,7 @@ def _convert_json_schema_to_openai_function(
|
|
|
114
114
|
used.
|
|
115
115
|
description: The description of the function. If not provided, the description
|
|
116
116
|
of the schema will be used.
|
|
117
|
-
rm_titles: Whether to remove titles from the schema.
|
|
117
|
+
rm_titles: Whether to remove titles from the schema.
|
|
118
118
|
|
|
119
119
|
Returns:
|
|
120
120
|
The function description.
|
|
@@ -148,7 +148,7 @@ def _convert_pydantic_to_openai_function(
|
|
|
148
148
|
used.
|
|
149
149
|
description: The description of the function. If not provided, the description
|
|
150
150
|
of the schema will be used.
|
|
151
|
-
rm_titles: Whether to remove titles from the schema.
|
|
151
|
+
rm_titles: Whether to remove titles from the schema.
|
|
152
152
|
|
|
153
153
|
Raises:
|
|
154
154
|
TypeError: If the model is not a Pydantic model.
|
|
@@ -334,11 +334,11 @@ def convert_to_openai_function(
|
|
|
334
334
|
|
|
335
335
|
Args:
|
|
336
336
|
function:
|
|
337
|
-
A dictionary, Pydantic BaseModel class, TypedDict class, a LangChain
|
|
338
|
-
Tool object, or a Python function. If a dictionary is passed in, it is
|
|
337
|
+
A dictionary, Pydantic `BaseModel` class, `TypedDict` class, a LangChain
|
|
338
|
+
`Tool` object, or a Python function. If a dictionary is passed in, it is
|
|
339
339
|
assumed to already be a valid OpenAI function, a JSON schema with
|
|
340
|
-
top-level
|
|
341
|
-
|
|
340
|
+
top-level `title` key specified, an Anthropic format tool, or an Amazon
|
|
341
|
+
Bedrock Converse format tool.
|
|
342
342
|
strict:
|
|
343
343
|
If `True`, model output is guaranteed to exactly match the JSON Schema
|
|
344
344
|
provided in the function definition. If `None`, `strict` argument will not
|
|
@@ -351,17 +351,8 @@ def convert_to_openai_function(
|
|
|
351
351
|
Raises:
|
|
352
352
|
ValueError: If function is not in a supported format.
|
|
353
353
|
|
|
354
|
-
!!! warning "Behavior changed in 0.2.29"
|
|
355
|
-
`strict` arg added.
|
|
356
|
-
|
|
357
|
-
!!! warning "Behavior changed in 0.3.13"
|
|
358
|
-
Support for Anthropic format tools added.
|
|
359
|
-
|
|
360
|
-
!!! warning "Behavior changed in 0.3.14"
|
|
361
|
-
Support for Amazon Bedrock Converse format tools added.
|
|
362
|
-
|
|
363
354
|
!!! warning "Behavior changed in 0.3.16"
|
|
364
|
-
|
|
355
|
+
`description` and `parameters` keys are now optional. Only `name` is
|
|
365
356
|
required and guaranteed to be part of the output.
|
|
366
357
|
"""
|
|
367
358
|
# an Anthropic format tool
|
|
@@ -459,16 +450,14 @@ def convert_to_openai_tool(
|
|
|
459
450
|
) -> dict[str, Any]:
|
|
460
451
|
"""Convert a tool-like object to an OpenAI tool schema.
|
|
461
452
|
|
|
462
|
-
OpenAI tool schema reference
|
|
463
|
-
https://platform.openai.com/docs/api-reference/chat/create#chat-create-tools
|
|
453
|
+
[OpenAI tool schema reference](https://platform.openai.com/docs/api-reference/chat/create#chat-create-tools)
|
|
464
454
|
|
|
465
455
|
Args:
|
|
466
456
|
tool:
|
|
467
|
-
Either a dictionary, a pydantic.BaseModel class, Python function, or
|
|
468
|
-
BaseTool
|
|
469
|
-
|
|
470
|
-
|
|
471
|
-
tool, or an Amazon Bedrock Converse format tool.
|
|
457
|
+
Either a dictionary, a `pydantic.BaseModel` class, Python function, or
|
|
458
|
+
`BaseTool`. If a dictionary is passed in, it is assumed to already be a
|
|
459
|
+
valid OpenAI function, a JSON schema with top-level `title` key specified,
|
|
460
|
+
an Anthropic format tool, or an Amazon Bedrock Converse format tool.
|
|
472
461
|
strict:
|
|
473
462
|
If `True`, model output is guaranteed to exactly match the JSON Schema
|
|
474
463
|
provided in the function definition. If `None`, `strict` argument will not
|
|
@@ -478,26 +467,14 @@ def convert_to_openai_tool(
|
|
|
478
467
|
A dict version of the passed in tool which is compatible with the
|
|
479
468
|
OpenAI tool-calling API.
|
|
480
469
|
|
|
481
|
-
!!! warning "Behavior changed in 0.2.29"
|
|
482
|
-
`strict` arg added.
|
|
483
|
-
|
|
484
|
-
!!! warning "Behavior changed in 0.3.13"
|
|
485
|
-
Support for Anthropic format tools added.
|
|
486
|
-
|
|
487
|
-
!!! warning "Behavior changed in 0.3.14"
|
|
488
|
-
Support for Amazon Bedrock Converse format tools added.
|
|
489
|
-
|
|
490
470
|
!!! warning "Behavior changed in 0.3.16"
|
|
491
|
-
|
|
471
|
+
`description` and `parameters` keys are now optional. Only `name` is
|
|
492
472
|
required and guaranteed to be part of the output.
|
|
493
473
|
|
|
494
474
|
!!! warning "Behavior changed in 0.3.44"
|
|
495
475
|
Return OpenAI Responses API-style tools unchanged. This includes
|
|
496
|
-
any dict with "type" in "file_search"
|
|
497
|
-
"web_search_preview"
|
|
498
|
-
|
|
499
|
-
!!! warning "Behavior changed in 0.3.61"
|
|
500
|
-
Added support for OpenAI's built-in code interpreter and remote MCP tools.
|
|
476
|
+
any dict with `"type"` in `"file_search"`, `"function"`,
|
|
477
|
+
`"computer_use_preview"`, `"web_search_preview"`.
|
|
501
478
|
|
|
502
479
|
!!! warning "Behavior changed in 0.3.63"
|
|
503
480
|
Added support for OpenAI's image generation built-in tool.
|
langchain_core/utils/input.py
CHANGED
|
@@ -66,7 +66,7 @@ def print_text(
|
|
|
66
66
|
Args:
|
|
67
67
|
text: The text to print.
|
|
68
68
|
color: The color to use.
|
|
69
|
-
end: The end character to use.
|
|
69
|
+
end: The end character to use.
|
|
70
70
|
file: The file to write to.
|
|
71
71
|
"""
|
|
72
72
|
text_to_print = get_colored_text(text, color) if color else text
|
langchain_core/utils/iter.py
CHANGED
langchain_core/utils/json.py
CHANGED
|
@@ -51,7 +51,7 @@ def parse_partial_json(s: str, *, strict: bool = False) -> Any:
|
|
|
51
51
|
|
|
52
52
|
Args:
|
|
53
53
|
s: The JSON string to parse.
|
|
54
|
-
strict: Whether to use strict parsing.
|
|
54
|
+
strict: Whether to use strict parsing.
|
|
55
55
|
|
|
56
56
|
Returns:
|
|
57
57
|
The parsed JSON object as a Python dictionary.
|
langchain_core/utils/strings.py
CHANGED
|
@@ -57,7 +57,7 @@ def sanitize_for_postgres(text: str, replacement: str = "") -> str:
|
|
|
57
57
|
|
|
58
58
|
Args:
|
|
59
59
|
text: The text to sanitize.
|
|
60
|
-
replacement: String to replace NUL bytes with.
|
|
60
|
+
replacement: String to replace NUL bytes with.
|
|
61
61
|
|
|
62
62
|
Returns:
|
|
63
63
|
The sanitized text with NUL bytes removed or replaced.
|
|
@@ -109,7 +109,7 @@ class VectorStore(ABC):
|
|
|
109
109
|
"""Delete by vector ID or other criteria.
|
|
110
110
|
|
|
111
111
|
Args:
|
|
112
|
-
ids: List of ids to delete. If `None`, delete all.
|
|
112
|
+
ids: List of ids to delete. If `None`, delete all.
|
|
113
113
|
**kwargs: Other keyword arguments that subclasses might use.
|
|
114
114
|
|
|
115
115
|
Returns:
|
|
@@ -176,7 +176,7 @@ class VectorStore(ABC):
|
|
|
176
176
|
"""Async delete by vector ID or other criteria.
|
|
177
177
|
|
|
178
178
|
Args:
|
|
179
|
-
ids: List of ids to delete. If `None`, delete all.
|
|
179
|
+
ids: List of ids to delete. If `None`, delete all.
|
|
180
180
|
**kwargs: Other keyword arguments that subclasses might use.
|
|
181
181
|
|
|
182
182
|
Returns:
|
|
@@ -197,7 +197,6 @@ class VectorStore(ABC):
|
|
|
197
197
|
Args:
|
|
198
198
|
texts: Iterable of strings to add to the vectorstore.
|
|
199
199
|
metadatas: Optional list of metadatas associated with the texts.
|
|
200
|
-
Default is None.
|
|
201
200
|
ids: Optional list
|
|
202
201
|
**kwargs: vectorstore specific parameters.
|
|
203
202
|
|
|
@@ -365,7 +364,7 @@ class VectorStore(ABC):
|
|
|
365
364
|
|
|
366
365
|
Args:
|
|
367
366
|
query: Input text.
|
|
368
|
-
k: Number of Documents to return.
|
|
367
|
+
k: Number of Documents to return.
|
|
369
368
|
**kwargs: Arguments to pass to the search method.
|
|
370
369
|
|
|
371
370
|
Returns:
|
|
@@ -462,7 +461,7 @@ class VectorStore(ABC):
|
|
|
462
461
|
|
|
463
462
|
Args:
|
|
464
463
|
query: Input text.
|
|
465
|
-
k: Number of Documents to return.
|
|
464
|
+
k: Number of Documents to return.
|
|
466
465
|
**kwargs: kwargs to be passed to similarity search. Should include:
|
|
467
466
|
score_threshold: Optional, a floating point value between 0 to 1 to
|
|
468
467
|
filter the resulting set of retrieved docs
|
|
@@ -489,7 +488,7 @@ class VectorStore(ABC):
|
|
|
489
488
|
|
|
490
489
|
Args:
|
|
491
490
|
query: Input text.
|
|
492
|
-
k: Number of Documents to return.
|
|
491
|
+
k: Number of Documents to return.
|
|
493
492
|
**kwargs: kwargs to be passed to similarity search. Should include:
|
|
494
493
|
score_threshold: Optional, a floating point value between 0 to 1 to
|
|
495
494
|
filter the resulting set of retrieved docs
|
|
@@ -513,7 +512,7 @@ class VectorStore(ABC):
|
|
|
513
512
|
|
|
514
513
|
Args:
|
|
515
514
|
query: Input text.
|
|
516
|
-
k: Number of Documents to return.
|
|
515
|
+
k: Number of Documents to return.
|
|
517
516
|
**kwargs: kwargs to be passed to similarity search. Should include:
|
|
518
517
|
score_threshold: Optional, a floating point value between 0 to 1 to
|
|
519
518
|
filter the resulting set of retrieved docs.
|
|
@@ -562,7 +561,7 @@ class VectorStore(ABC):
|
|
|
562
561
|
|
|
563
562
|
Args:
|
|
564
563
|
query: Input text.
|
|
565
|
-
k: Number of Documents to return.
|
|
564
|
+
k: Number of Documents to return.
|
|
566
565
|
**kwargs: kwargs to be passed to similarity search. Should include:
|
|
567
566
|
score_threshold: Optional, a floating point value between 0 to 1 to
|
|
568
567
|
filter the resulting set of retrieved docs
|
|
@@ -606,7 +605,7 @@ class VectorStore(ABC):
|
|
|
606
605
|
|
|
607
606
|
Args:
|
|
608
607
|
query: Input text.
|
|
609
|
-
k: Number of Documents to return.
|
|
608
|
+
k: Number of Documents to return.
|
|
610
609
|
**kwargs: Arguments to pass to the search method.
|
|
611
610
|
|
|
612
611
|
Returns:
|
|
@@ -624,7 +623,7 @@ class VectorStore(ABC):
|
|
|
624
623
|
|
|
625
624
|
Args:
|
|
626
625
|
embedding: Embedding to look up documents similar to.
|
|
627
|
-
k: Number of Documents to return.
|
|
626
|
+
k: Number of Documents to return.
|
|
628
627
|
**kwargs: Arguments to pass to the search method.
|
|
629
628
|
|
|
630
629
|
Returns:
|
|
@@ -639,7 +638,7 @@ class VectorStore(ABC):
|
|
|
639
638
|
|
|
640
639
|
Args:
|
|
641
640
|
embedding: Embedding to look up documents similar to.
|
|
642
|
-
k: Number of Documents to return.
|
|
641
|
+
k: Number of Documents to return.
|
|
643
642
|
**kwargs: Arguments to pass to the search method.
|
|
644
643
|
|
|
645
644
|
Returns:
|
|
@@ -667,13 +666,11 @@ class VectorStore(ABC):
|
|
|
667
666
|
|
|
668
667
|
Args:
|
|
669
668
|
query: Text to look up documents similar to.
|
|
670
|
-
k: Number of Documents to return.
|
|
669
|
+
k: Number of Documents to return.
|
|
671
670
|
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
|
|
672
|
-
Default is 20.
|
|
673
671
|
lambda_mult: Number between 0 and 1 that determines the degree
|
|
674
672
|
of diversity among the results with 0 corresponding
|
|
675
673
|
to maximum diversity and 1 to minimum diversity.
|
|
676
|
-
Defaults to 0.5.
|
|
677
674
|
**kwargs: Arguments to pass to the search method.
|
|
678
675
|
|
|
679
676
|
Returns:
|
|
@@ -696,13 +693,11 @@ class VectorStore(ABC):
|
|
|
696
693
|
|
|
697
694
|
Args:
|
|
698
695
|
query: Text to look up documents similar to.
|
|
699
|
-
k: Number of Documents to return.
|
|
696
|
+
k: Number of Documents to return.
|
|
700
697
|
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
|
|
701
|
-
Default is 20.
|
|
702
698
|
lambda_mult: Number between 0 and 1 that determines the degree
|
|
703
699
|
of diversity among the results with 0 corresponding
|
|
704
700
|
to maximum diversity and 1 to minimum diversity.
|
|
705
|
-
Defaults to 0.5.
|
|
706
701
|
**kwargs: Arguments to pass to the search method.
|
|
707
702
|
|
|
708
703
|
Returns:
|
|
@@ -736,13 +731,11 @@ class VectorStore(ABC):
|
|
|
736
731
|
|
|
737
732
|
Args:
|
|
738
733
|
embedding: Embedding to look up documents similar to.
|
|
739
|
-
k: Number of Documents to return.
|
|
734
|
+
k: Number of Documents to return.
|
|
740
735
|
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
|
|
741
|
-
Default is 20.
|
|
742
736
|
lambda_mult: Number between 0 and 1 that determines the degree
|
|
743
737
|
of diversity among the results with 0 corresponding
|
|
744
738
|
to maximum diversity and 1 to minimum diversity.
|
|
745
|
-
Defaults to 0.5.
|
|
746
739
|
**kwargs: Arguments to pass to the search method.
|
|
747
740
|
|
|
748
741
|
Returns:
|
|
@@ -765,13 +758,11 @@ class VectorStore(ABC):
|
|
|
765
758
|
|
|
766
759
|
Args:
|
|
767
760
|
embedding: Embedding to look up documents similar to.
|
|
768
|
-
k: Number of Documents to return.
|
|
761
|
+
k: Number of Documents to return.
|
|
769
762
|
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
|
|
770
|
-
Default is 20.
|
|
771
763
|
lambda_mult: Number between 0 and 1 that determines the degree
|
|
772
764
|
of diversity among the results with 0 corresponding
|
|
773
765
|
to maximum diversity and 1 to minimum diversity.
|
|
774
|
-
Defaults to 0.5.
|
|
775
766
|
**kwargs: Arguments to pass to the search method.
|
|
776
767
|
|
|
777
768
|
Returns:
|
|
@@ -864,7 +855,6 @@ class VectorStore(ABC):
|
|
|
864
855
|
texts: Texts to add to the vectorstore.
|
|
865
856
|
embedding: Embedding function to use.
|
|
866
857
|
metadatas: Optional list of metadatas associated with the texts.
|
|
867
|
-
Default is None.
|
|
868
858
|
ids: Optional list of IDs associated with the texts.
|
|
869
859
|
**kwargs: Additional keyword arguments.
|
|
870
860
|
|
|
@@ -888,7 +878,6 @@ class VectorStore(ABC):
|
|
|
888
878
|
texts: Texts to add to the vectorstore.
|
|
889
879
|
embedding: Embedding function to use.
|
|
890
880
|
metadatas: Optional list of metadatas associated with the texts.
|
|
891
|
-
Default is None.
|
|
892
881
|
ids: Optional list of IDs associated with the texts.
|
|
893
882
|
**kwargs: Additional keyword arguments.
|
|
894
883
|
|
|
@@ -112,8 +112,8 @@ def maximal_marginal_relevance(
|
|
|
112
112
|
Args:
|
|
113
113
|
query_embedding: The query embedding.
|
|
114
114
|
embedding_list: A list of embeddings.
|
|
115
|
-
lambda_mult: The lambda parameter for MMR.
|
|
116
|
-
k: The number of embeddings to return.
|
|
115
|
+
lambda_mult: The lambda parameter for MMR.
|
|
116
|
+
k: The number of embeddings to return.
|
|
117
117
|
|
|
118
118
|
Returns:
|
|
119
119
|
A list of indices of the embeddings to return.
|
langchain_core/version.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: langchain-core
|
|
3
|
-
Version: 1.0.
|
|
3
|
+
Version: 1.0.0rc2
|
|
4
4
|
Summary: Building applications with LLMs through composability
|
|
5
5
|
Project-URL: homepage, https://docs.langchain.com/
|
|
6
6
|
Project-URL: repository, https://github.com/langchain-ai/langchain/tree/master/libs/core
|