langchain-core 1.0.0a7__py3-none-any.whl → 1.0.0rc1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of langchain-core might be problematic. Click here for more details.
- langchain_core/__init__.py +1 -1
- langchain_core/_api/__init__.py +0 -1
- langchain_core/_api/beta_decorator.py +17 -20
- langchain_core/_api/deprecation.py +30 -35
- langchain_core/_import_utils.py +1 -1
- langchain_core/agents.py +7 -6
- langchain_core/caches.py +4 -10
- langchain_core/callbacks/__init__.py +1 -8
- langchain_core/callbacks/base.py +232 -243
- langchain_core/callbacks/file.py +33 -33
- langchain_core/callbacks/manager.py +353 -416
- langchain_core/callbacks/stdout.py +21 -22
- langchain_core/callbacks/streaming_stdout.py +32 -32
- langchain_core/callbacks/usage.py +54 -51
- langchain_core/chat_history.py +76 -55
- langchain_core/document_loaders/langsmith.py +21 -21
- langchain_core/documents/__init__.py +0 -1
- langchain_core/documents/base.py +37 -40
- langchain_core/documents/transformers.py +28 -29
- langchain_core/embeddings/fake.py +46 -52
- langchain_core/exceptions.py +5 -5
- langchain_core/indexing/api.py +11 -11
- langchain_core/indexing/base.py +24 -24
- langchain_core/language_models/__init__.py +0 -2
- langchain_core/language_models/_utils.py +51 -53
- langchain_core/language_models/base.py +23 -24
- langchain_core/language_models/chat_models.py +121 -144
- langchain_core/language_models/fake_chat_models.py +5 -5
- langchain_core/language_models/llms.py +10 -12
- langchain_core/load/dump.py +1 -1
- langchain_core/load/load.py +16 -16
- langchain_core/load/serializable.py +35 -34
- langchain_core/messages/__init__.py +1 -16
- langchain_core/messages/ai.py +105 -104
- langchain_core/messages/base.py +26 -26
- langchain_core/messages/block_translators/__init__.py +17 -17
- langchain_core/messages/block_translators/anthropic.py +2 -2
- langchain_core/messages/block_translators/bedrock_converse.py +2 -2
- langchain_core/messages/block_translators/google_genai.py +2 -2
- langchain_core/messages/block_translators/groq.py +117 -21
- langchain_core/messages/block_translators/langchain_v0.py +2 -2
- langchain_core/messages/block_translators/openai.py +4 -4
- langchain_core/messages/chat.py +1 -1
- langchain_core/messages/content.py +189 -193
- langchain_core/messages/function.py +5 -5
- langchain_core/messages/human.py +15 -17
- langchain_core/messages/modifier.py +1 -1
- langchain_core/messages/system.py +12 -14
- langchain_core/messages/tool.py +45 -49
- langchain_core/messages/utils.py +384 -396
- langchain_core/output_parsers/__init__.py +1 -14
- langchain_core/output_parsers/base.py +22 -23
- langchain_core/output_parsers/json.py +3 -3
- langchain_core/output_parsers/list.py +1 -1
- langchain_core/output_parsers/openai_functions.py +46 -44
- langchain_core/output_parsers/openai_tools.py +7 -7
- langchain_core/output_parsers/pydantic.py +10 -11
- langchain_core/output_parsers/string.py +1 -1
- langchain_core/output_parsers/transform.py +2 -2
- langchain_core/output_parsers/xml.py +1 -1
- langchain_core/outputs/__init__.py +1 -1
- langchain_core/outputs/chat_generation.py +14 -14
- langchain_core/outputs/generation.py +5 -5
- langchain_core/outputs/llm_result.py +5 -5
- langchain_core/prompt_values.py +5 -5
- langchain_core/prompts/__init__.py +3 -23
- langchain_core/prompts/base.py +32 -37
- langchain_core/prompts/chat.py +216 -222
- langchain_core/prompts/dict.py +2 -2
- langchain_core/prompts/few_shot.py +76 -83
- langchain_core/prompts/few_shot_with_templates.py +6 -8
- langchain_core/prompts/image.py +11 -13
- langchain_core/prompts/loading.py +1 -1
- langchain_core/prompts/message.py +2 -2
- langchain_core/prompts/prompt.py +14 -16
- langchain_core/prompts/string.py +19 -7
- langchain_core/prompts/structured.py +24 -25
- langchain_core/rate_limiters.py +36 -38
- langchain_core/retrievers.py +41 -182
- langchain_core/runnables/base.py +565 -590
- langchain_core/runnables/branch.py +7 -7
- langchain_core/runnables/config.py +37 -44
- langchain_core/runnables/configurable.py +8 -9
- langchain_core/runnables/fallbacks.py +8 -8
- langchain_core/runnables/graph.py +28 -27
- langchain_core/runnables/graph_ascii.py +19 -18
- langchain_core/runnables/graph_mermaid.py +20 -31
- langchain_core/runnables/graph_png.py +7 -7
- langchain_core/runnables/history.py +20 -20
- langchain_core/runnables/passthrough.py +8 -8
- langchain_core/runnables/retry.py +3 -3
- langchain_core/runnables/router.py +1 -1
- langchain_core/runnables/schema.py +33 -33
- langchain_core/runnables/utils.py +30 -34
- langchain_core/stores.py +72 -102
- langchain_core/sys_info.py +27 -29
- langchain_core/tools/__init__.py +1 -14
- langchain_core/tools/base.py +63 -63
- langchain_core/tools/convert.py +92 -92
- langchain_core/tools/render.py +9 -9
- langchain_core/tools/retriever.py +1 -1
- langchain_core/tools/simple.py +6 -7
- langchain_core/tools/structured.py +17 -18
- langchain_core/tracers/__init__.py +1 -9
- langchain_core/tracers/base.py +35 -35
- langchain_core/tracers/context.py +12 -17
- langchain_core/tracers/event_stream.py +3 -3
- langchain_core/tracers/langchain.py +8 -8
- langchain_core/tracers/log_stream.py +17 -18
- langchain_core/tracers/memory_stream.py +2 -2
- langchain_core/tracers/schemas.py +0 -129
- langchain_core/utils/aiter.py +31 -31
- langchain_core/utils/env.py +5 -5
- langchain_core/utils/function_calling.py +48 -120
- langchain_core/utils/html.py +4 -4
- langchain_core/utils/input.py +2 -2
- langchain_core/utils/interactive_env.py +1 -1
- langchain_core/utils/iter.py +19 -19
- langchain_core/utils/json.py +1 -1
- langchain_core/utils/json_schema.py +2 -2
- langchain_core/utils/mustache.py +5 -5
- langchain_core/utils/pydantic.py +17 -17
- langchain_core/utils/strings.py +4 -4
- langchain_core/utils/utils.py +25 -28
- langchain_core/vectorstores/base.py +43 -64
- langchain_core/vectorstores/in_memory.py +83 -85
- langchain_core/version.py +1 -1
- {langchain_core-1.0.0a7.dist-info → langchain_core-1.0.0rc1.dist-info}/METADATA +23 -11
- langchain_core-1.0.0rc1.dist-info/RECORD +172 -0
- langchain_core/memory.py +0 -120
- langchain_core/pydantic_v1/__init__.py +0 -30
- langchain_core/pydantic_v1/dataclasses.py +0 -23
- langchain_core/pydantic_v1/main.py +0 -23
- langchain_core-1.0.0a7.dist-info/RECORD +0 -176
- {langchain_core-1.0.0a7.dist-info → langchain_core-1.0.0rc1.dist-info}/WHEEL +0 -0
langchain_core/utils/aiter.py
CHANGED
|
@@ -50,7 +50,7 @@ def py_anext(
|
|
|
50
50
|
|
|
51
51
|
Returns:
|
|
52
52
|
The next value from the iterator, or the default value
|
|
53
|
-
|
|
53
|
+
if the iterator is exhausted.
|
|
54
54
|
|
|
55
55
|
Raises:
|
|
56
56
|
TypeError: If the iterator is not an async iterator.
|
|
@@ -107,7 +107,7 @@ async def tee_peer(
|
|
|
107
107
|
"""An individual iterator of a `tee`.
|
|
108
108
|
|
|
109
109
|
This function is a generator that yields items from the shared iterator
|
|
110
|
-
|
|
110
|
+
`iterator`. It buffers items until the least advanced iterator has
|
|
111
111
|
yielded them as well. The buffer is shared with all other peers.
|
|
112
112
|
|
|
113
113
|
Args:
|
|
@@ -153,38 +153,38 @@ async def tee_peer(
|
|
|
153
153
|
|
|
154
154
|
|
|
155
155
|
class Tee(Generic[T]):
|
|
156
|
-
"""Create
|
|
156
|
+
"""Create `n` separate asynchronous iterators over `iterable`.
|
|
157
157
|
|
|
158
|
-
This splits a single
|
|
158
|
+
This splits a single `iterable` into multiple iterators, each providing
|
|
159
159
|
the same items in the same order.
|
|
160
160
|
All child iterators may advance separately but share the same items
|
|
161
|
-
from
|
|
161
|
+
from `iterable` -- when the most advanced iterator retrieves an item,
|
|
162
162
|
it is buffered until the least advanced iterator has yielded it as well.
|
|
163
|
-
A
|
|
163
|
+
A `tee` works lazily and can handle an infinite `iterable`, provided
|
|
164
164
|
that all iterators advance.
|
|
165
165
|
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
166
|
+
```python
|
|
167
|
+
async def derivative(sensor_data):
|
|
168
|
+
previous, current = a.tee(sensor_data, n=2)
|
|
169
|
+
await a.anext(previous) # advance one iterator
|
|
170
|
+
return a.map(operator.sub, previous, current)
|
|
171
|
+
```
|
|
172
172
|
|
|
173
173
|
Unlike `itertools.tee`, `.tee` returns a custom type instead
|
|
174
174
|
of a :py`tuple`. Like a tuple, it can be indexed, iterated and unpacked
|
|
175
175
|
to get the child iterators. In addition, its `.tee.aclose` method
|
|
176
|
-
immediately closes all children, and it can be used in an
|
|
176
|
+
immediately closes all children, and it can be used in an `async with` context
|
|
177
177
|
for the same effect.
|
|
178
178
|
|
|
179
|
-
If
|
|
180
|
-
provide these items. Also,
|
|
179
|
+
If `iterable` is an iterator and read elsewhere, `tee` will *not*
|
|
180
|
+
provide these items. Also, `tee` must internally buffer each item until the
|
|
181
181
|
last iterator has yielded it; if the most and least advanced iterator differ
|
|
182
182
|
by most data, using a :py`list` is more efficient (but not lazy).
|
|
183
183
|
|
|
184
|
-
If the underlying iterable is concurrency safe (
|
|
184
|
+
If the underlying iterable is concurrency safe (`anext` may be awaited
|
|
185
185
|
concurrently) the resulting iterators are concurrency safe as well. Otherwise,
|
|
186
186
|
the iterators are safe if there is only ever one single "most advanced" iterator.
|
|
187
|
-
To enforce sequential use of
|
|
187
|
+
To enforce sequential use of `anext`, provide a `lock`
|
|
188
188
|
- e.g. an :py`asyncio.Lock` instance in an :py:mod:`asyncio` application -
|
|
189
189
|
and access is automatically synchronised.
|
|
190
190
|
|
|
@@ -197,13 +197,13 @@ class Tee(Generic[T]):
|
|
|
197
197
|
*,
|
|
198
198
|
lock: AbstractAsyncContextManager[Any] | None = None,
|
|
199
199
|
):
|
|
200
|
-
"""Create a
|
|
200
|
+
"""Create a `tee`.
|
|
201
201
|
|
|
202
202
|
Args:
|
|
203
203
|
iterable: The iterable to split.
|
|
204
204
|
n: The number of iterators to create. Defaults to 2.
|
|
205
205
|
lock: The lock to synchronise access to the shared buffers.
|
|
206
|
-
|
|
206
|
+
|
|
207
207
|
"""
|
|
208
208
|
self._iterator = iterable.__aiter__() # before 3.10 aiter() doesn't exist
|
|
209
209
|
self._buffers: list[deque[T]] = [deque() for _ in range(n)]
|
|
@@ -269,25 +269,25 @@ atee = Tee
|
|
|
269
269
|
|
|
270
270
|
|
|
271
271
|
class aclosing(AbstractAsyncContextManager): # noqa: N801
|
|
272
|
-
"""Async context manager to wrap an AsyncGenerator that has a
|
|
272
|
+
"""Async context manager to wrap an AsyncGenerator that has a `aclose()` method.
|
|
273
273
|
|
|
274
274
|
Code like this:
|
|
275
275
|
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
276
|
+
```python
|
|
277
|
+
async with aclosing(<module>.fetch(<arguments>)) as agen:
|
|
278
|
+
<block>
|
|
279
|
+
```
|
|
280
280
|
|
|
281
281
|
is equivalent to this:
|
|
282
282
|
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
await agen.aclose()
|
|
283
|
+
```python
|
|
284
|
+
agen = <module>.fetch(<arguments>)
|
|
285
|
+
try:
|
|
286
|
+
<block>
|
|
287
|
+
finally:
|
|
288
|
+
await agen.aclose()
|
|
290
289
|
|
|
290
|
+
```
|
|
291
291
|
"""
|
|
292
292
|
|
|
293
293
|
def __init__(self, thing: AsyncGenerator[Any, Any] | AsyncIterator[Any]) -> None:
|
langchain_core/utils/env.py
CHANGED
|
@@ -10,10 +10,10 @@ def env_var_is_set(env_var: str) -> bool:
|
|
|
10
10
|
"""Check if an environment variable is set.
|
|
11
11
|
|
|
12
12
|
Args:
|
|
13
|
-
env_var
|
|
13
|
+
env_var: The name of the environment variable.
|
|
14
14
|
|
|
15
15
|
Returns:
|
|
16
|
-
|
|
16
|
+
`True` if the environment variable is set, `False` otherwise.
|
|
17
17
|
"""
|
|
18
18
|
return env_var in os.environ and os.environ[env_var] not in {
|
|
19
19
|
"",
|
|
@@ -38,7 +38,7 @@ def get_from_dict_or_env(
|
|
|
38
38
|
env_key: The environment variable to look up if the key is not
|
|
39
39
|
in the dictionary.
|
|
40
40
|
default: The default value to return if the key is not in the dictionary
|
|
41
|
-
or the environment.
|
|
41
|
+
or the environment.
|
|
42
42
|
|
|
43
43
|
Returns:
|
|
44
44
|
The dict value or the environment variable value.
|
|
@@ -64,10 +64,10 @@ def get_from_env(key: str, env_key: str, default: str | None = None) -> str:
|
|
|
64
64
|
env_key: The environment variable to look up if the key is not
|
|
65
65
|
in the dictionary.
|
|
66
66
|
default: The default value to return if the key is not in the dictionary
|
|
67
|
-
or the environment.
|
|
67
|
+
or the environment.
|
|
68
68
|
|
|
69
69
|
Returns:
|
|
70
|
-
|
|
70
|
+
The value of the key.
|
|
71
71
|
|
|
72
72
|
Raises:
|
|
73
73
|
ValueError: If the key is not in the dictionary and no default value is
|
|
@@ -27,7 +27,7 @@ from pydantic.v1 import create_model as create_model_v1
|
|
|
27
27
|
from typing_extensions import TypedDict, is_typeddict
|
|
28
28
|
|
|
29
29
|
import langchain_core
|
|
30
|
-
from langchain_core._api import beta
|
|
30
|
+
from langchain_core._api import beta
|
|
31
31
|
from langchain_core.messages import AIMessage, BaseMessage, HumanMessage, ToolMessage
|
|
32
32
|
from langchain_core.utils.json_schema import dereference_refs
|
|
33
33
|
from langchain_core.utils.pydantic import is_basemodel_subclass
|
|
@@ -72,11 +72,11 @@ def _rm_titles(kv: dict, prev_key: str = "") -> dict:
|
|
|
72
72
|
except when a "title" appears within a property definition under "properties".
|
|
73
73
|
|
|
74
74
|
Args:
|
|
75
|
-
kv
|
|
76
|
-
prev_key
|
|
75
|
+
kv: The input JSON schema as a dictionary.
|
|
76
|
+
prev_key: The key from the parent dictionary, used to identify context.
|
|
77
77
|
|
|
78
78
|
Returns:
|
|
79
|
-
|
|
79
|
+
A new dictionary with appropriate "title" fields removed.
|
|
80
80
|
"""
|
|
81
81
|
new_kv = {}
|
|
82
82
|
|
|
@@ -114,7 +114,7 @@ def _convert_json_schema_to_openai_function(
|
|
|
114
114
|
used.
|
|
115
115
|
description: The description of the function. If not provided, the description
|
|
116
116
|
of the schema will be used.
|
|
117
|
-
rm_titles: Whether to remove titles from the schema. Defaults to True
|
|
117
|
+
rm_titles: Whether to remove titles from the schema. Defaults to `True`.
|
|
118
118
|
|
|
119
119
|
Returns:
|
|
120
120
|
The function description.
|
|
@@ -148,7 +148,7 @@ def _convert_pydantic_to_openai_function(
|
|
|
148
148
|
used.
|
|
149
149
|
description: The description of the function. If not provided, the description
|
|
150
150
|
of the schema will be used.
|
|
151
|
-
rm_titles: Whether to remove titles from the schema. Defaults to True
|
|
151
|
+
rm_titles: Whether to remove titles from the schema. Defaults to `True`.
|
|
152
152
|
|
|
153
153
|
Raises:
|
|
154
154
|
TypeError: If the model is not a Pydantic model.
|
|
@@ -168,42 +168,6 @@ def _convert_pydantic_to_openai_function(
|
|
|
168
168
|
)
|
|
169
169
|
|
|
170
170
|
|
|
171
|
-
convert_pydantic_to_openai_function = deprecated(
|
|
172
|
-
"0.1.16",
|
|
173
|
-
alternative="langchain_core.utils.function_calling.convert_to_openai_function()",
|
|
174
|
-
removal="1.0",
|
|
175
|
-
)(_convert_pydantic_to_openai_function)
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
@deprecated(
|
|
179
|
-
"0.1.16",
|
|
180
|
-
alternative="langchain_core.utils.function_calling.convert_to_openai_tool()",
|
|
181
|
-
removal="1.0",
|
|
182
|
-
)
|
|
183
|
-
def convert_pydantic_to_openai_tool(
|
|
184
|
-
model: type[BaseModel],
|
|
185
|
-
*,
|
|
186
|
-
name: str | None = None,
|
|
187
|
-
description: str | None = None,
|
|
188
|
-
) -> ToolDescription:
|
|
189
|
-
"""Converts a Pydantic model to a function description for the OpenAI API.
|
|
190
|
-
|
|
191
|
-
Args:
|
|
192
|
-
model: The Pydantic model to convert.
|
|
193
|
-
name: The name of the function. If not provided, the title of the schema will be
|
|
194
|
-
used.
|
|
195
|
-
description: The description of the function. If not provided, the description
|
|
196
|
-
of the schema will be used.
|
|
197
|
-
|
|
198
|
-
Returns:
|
|
199
|
-
The tool description.
|
|
200
|
-
"""
|
|
201
|
-
function = _convert_pydantic_to_openai_function(
|
|
202
|
-
model, name=name, description=description
|
|
203
|
-
)
|
|
204
|
-
return {"type": "function", "function": function}
|
|
205
|
-
|
|
206
|
-
|
|
207
171
|
def _get_python_function_name(function: Callable) -> str:
|
|
208
172
|
"""Get the name of a Python function."""
|
|
209
173
|
return function.__name__
|
|
@@ -240,13 +204,6 @@ def _convert_python_function_to_openai_function(
|
|
|
240
204
|
)
|
|
241
205
|
|
|
242
206
|
|
|
243
|
-
convert_python_function_to_openai_function = deprecated(
|
|
244
|
-
"0.1.16",
|
|
245
|
-
alternative="langchain_core.utils.function_calling.convert_to_openai_function()",
|
|
246
|
-
removal="1.0",
|
|
247
|
-
)(_convert_python_function_to_openai_function)
|
|
248
|
-
|
|
249
|
-
|
|
250
207
|
def _convert_typed_dict_to_openai_function(typed_dict: type) -> FunctionDescription:
|
|
251
208
|
visited: dict = {}
|
|
252
209
|
|
|
@@ -368,31 +325,6 @@ def _format_tool_to_openai_function(tool: BaseTool) -> FunctionDescription:
|
|
|
368
325
|
}
|
|
369
326
|
|
|
370
327
|
|
|
371
|
-
format_tool_to_openai_function = deprecated(
|
|
372
|
-
"0.1.16",
|
|
373
|
-
alternative="langchain_core.utils.function_calling.convert_to_openai_function()",
|
|
374
|
-
removal="1.0",
|
|
375
|
-
)(_format_tool_to_openai_function)
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
@deprecated(
|
|
379
|
-
"0.1.16",
|
|
380
|
-
alternative="langchain_core.utils.function_calling.convert_to_openai_tool()",
|
|
381
|
-
removal="1.0",
|
|
382
|
-
)
|
|
383
|
-
def format_tool_to_openai_tool(tool: BaseTool) -> ToolDescription:
|
|
384
|
-
"""Format tool into the OpenAI function API.
|
|
385
|
-
|
|
386
|
-
Args:
|
|
387
|
-
tool: The tool to format.
|
|
388
|
-
|
|
389
|
-
Returns:
|
|
390
|
-
The tool description.
|
|
391
|
-
"""
|
|
392
|
-
function = _format_tool_to_openai_function(tool)
|
|
393
|
-
return {"type": "function", "function": function}
|
|
394
|
-
|
|
395
|
-
|
|
396
328
|
def convert_to_openai_function(
|
|
397
329
|
function: dict[str, Any] | type | Callable | BaseTool,
|
|
398
330
|
*,
|
|
@@ -408,8 +340,8 @@ def convert_to_openai_function(
|
|
|
408
340
|
top-level 'title' key specified, an Anthropic format
|
|
409
341
|
tool, or an Amazon Bedrock Converse format tool.
|
|
410
342
|
strict:
|
|
411
|
-
If True
|
|
412
|
-
provided in the function definition. If None
|
|
343
|
+
If `True`, model output is guaranteed to exactly match the JSON Schema
|
|
344
|
+
provided in the function definition. If `None`, `strict` argument will not
|
|
413
345
|
be included in function definition.
|
|
414
346
|
|
|
415
347
|
Returns:
|
|
@@ -420,7 +352,7 @@ def convert_to_openai_function(
|
|
|
420
352
|
ValueError: If function is not in a supported format.
|
|
421
353
|
|
|
422
354
|
!!! warning "Behavior changed in 0.2.29"
|
|
423
|
-
|
|
355
|
+
`strict` arg added.
|
|
424
356
|
|
|
425
357
|
!!! warning "Behavior changed in 0.3.13"
|
|
426
358
|
Support for Anthropic format tools added.
|
|
@@ -538,8 +470,8 @@ def convert_to_openai_tool(
|
|
|
538
470
|
top-level 'title' key specified, an Anthropic format
|
|
539
471
|
tool, or an Amazon Bedrock Converse format tool.
|
|
540
472
|
strict:
|
|
541
|
-
If True
|
|
542
|
-
provided in the function definition. If None
|
|
473
|
+
If `True`, model output is guaranteed to exactly match the JSON Schema
|
|
474
|
+
provided in the function definition. If `None`, `strict` argument will not
|
|
543
475
|
be included in tool definition.
|
|
544
476
|
|
|
545
477
|
Returns:
|
|
@@ -547,7 +479,7 @@ def convert_to_openai_tool(
|
|
|
547
479
|
OpenAI tool-calling API.
|
|
548
480
|
|
|
549
481
|
!!! warning "Behavior changed in 0.2.29"
|
|
550
|
-
|
|
482
|
+
`strict` arg added.
|
|
551
483
|
|
|
552
484
|
!!! warning "Behavior changed in 0.3.13"
|
|
553
485
|
Support for Anthropic format tools added.
|
|
@@ -601,8 +533,8 @@ def convert_to_json_schema(
|
|
|
601
533
|
|
|
602
534
|
Args:
|
|
603
535
|
schema: The schema to convert.
|
|
604
|
-
strict: If True
|
|
605
|
-
provided in the function definition. If None
|
|
536
|
+
strict: If `True`, model output is guaranteed to exactly match the JSON Schema
|
|
537
|
+
provided in the function definition. If `None`, `strict` argument will not
|
|
606
538
|
be included in function definition.
|
|
607
539
|
|
|
608
540
|
Raises:
|
|
@@ -649,15 +581,15 @@ def tool_example_to_messages(
|
|
|
649
581
|
|
|
650
582
|
The list of messages per example by default corresponds to:
|
|
651
583
|
|
|
652
|
-
1.
|
|
653
|
-
2.
|
|
654
|
-
3.
|
|
655
|
-
|
|
584
|
+
1. `HumanMessage`: contains the content from which content should be extracted.
|
|
585
|
+
2. `AIMessage`: contains the extracted information from the model
|
|
586
|
+
3. `ToolMessage`: contains confirmation to the model that the model requested a
|
|
587
|
+
tool correctly.
|
|
656
588
|
|
|
657
|
-
If
|
|
589
|
+
If `ai_response` is specified, there will be a final `AIMessage` with that
|
|
658
590
|
response.
|
|
659
591
|
|
|
660
|
-
The
|
|
592
|
+
The `ToolMessage` is required because some chat models are hyper-optimized for
|
|
661
593
|
agents rather than for an extraction use case.
|
|
662
594
|
|
|
663
595
|
Args:
|
|
@@ -665,50 +597,46 @@ def tool_example_to_messages(
|
|
|
665
597
|
tool_calls: Tool calls represented as Pydantic BaseModels
|
|
666
598
|
tool_outputs: Tool call outputs.
|
|
667
599
|
Does not need to be provided. If not provided, a placeholder value
|
|
668
|
-
will be inserted.
|
|
669
|
-
ai_response: If provided, content for a final
|
|
600
|
+
will be inserted.
|
|
601
|
+
ai_response: If provided, content for a final `AIMessage`.
|
|
670
602
|
|
|
671
603
|
Returns:
|
|
672
604
|
A list of messages
|
|
673
605
|
|
|
674
606
|
Examples:
|
|
607
|
+
```python
|
|
608
|
+
from typing import Optional
|
|
609
|
+
from pydantic import BaseModel, Field
|
|
610
|
+
from langchain_openai import ChatOpenAI
|
|
675
611
|
|
|
676
|
-
.. code-block:: python
|
|
677
|
-
|
|
678
|
-
from typing import Optional
|
|
679
|
-
from pydantic import BaseModel, Field
|
|
680
|
-
from langchain_openai import ChatOpenAI
|
|
681
612
|
|
|
613
|
+
class Person(BaseModel):
|
|
614
|
+
'''Information about a person.'''
|
|
682
615
|
|
|
683
|
-
|
|
684
|
-
|
|
685
|
-
|
|
686
|
-
|
|
687
|
-
|
|
688
|
-
..., description="The color of the person's hair if known"
|
|
689
|
-
)
|
|
690
|
-
height_in_meters: Optional[str] = Field(
|
|
691
|
-
..., description="Height in METERS"
|
|
692
|
-
)
|
|
693
|
-
|
|
616
|
+
name: str | None = Field(..., description="The name of the person")
|
|
617
|
+
hair_color: str | None = Field(
|
|
618
|
+
..., description="The color of the person's hair if known"
|
|
619
|
+
)
|
|
620
|
+
height_in_meters: str | None = Field(..., description="Height in METERS")
|
|
694
621
|
|
|
695
|
-
examples = [
|
|
696
|
-
(
|
|
697
|
-
"The ocean is vast and blue. It's more than 20,000 feet deep.",
|
|
698
|
-
Person(name=None, height_in_meters=None, hair_color=None),
|
|
699
|
-
),
|
|
700
|
-
(
|
|
701
|
-
"Fiona traveled far from France to Spain.",
|
|
702
|
-
Person(name="Fiona", height_in_meters=None, hair_color=None),
|
|
703
|
-
),
|
|
704
|
-
]
|
|
705
622
|
|
|
623
|
+
examples = [
|
|
624
|
+
(
|
|
625
|
+
"The ocean is vast and blue. It's more than 20,000 feet deep.",
|
|
626
|
+
Person(name=None, height_in_meters=None, hair_color=None),
|
|
627
|
+
),
|
|
628
|
+
(
|
|
629
|
+
"Fiona traveled far from France to Spain.",
|
|
630
|
+
Person(name="Fiona", height_in_meters=None, hair_color=None),
|
|
631
|
+
),
|
|
632
|
+
]
|
|
706
633
|
|
|
707
|
-
messages = []
|
|
708
634
|
|
|
709
|
-
|
|
710
|
-
messages.extend(tool_example_to_messages(txt, [tool_call]))
|
|
635
|
+
messages = []
|
|
711
636
|
|
|
637
|
+
for txt, tool_call in examples:
|
|
638
|
+
messages.extend(tool_example_to_messages(txt, [tool_call]))
|
|
639
|
+
```
|
|
712
640
|
"""
|
|
713
641
|
messages: list[BaseMessage] = [HumanMessage(content=input)]
|
|
714
642
|
openai_tool_calls = [
|
|
@@ -717,7 +645,7 @@ def tool_example_to_messages(
|
|
|
717
645
|
"type": "function",
|
|
718
646
|
"function": {
|
|
719
647
|
# The name of the function right now corresponds to the name
|
|
720
|
-
# of the
|
|
648
|
+
# of the Pydantic model. This is implicit in the API right now,
|
|
721
649
|
# and will be improved over time.
|
|
722
650
|
"name": tool_call.__class__.__name__,
|
|
723
651
|
"arguments": tool_call.model_dump_json(),
|
langchain_core/utils/html.py
CHANGED
|
@@ -43,7 +43,7 @@ def find_all_links(
|
|
|
43
43
|
pattern: Regex to use for extracting links from raw HTML.
|
|
44
44
|
|
|
45
45
|
Returns:
|
|
46
|
-
|
|
46
|
+
all links
|
|
47
47
|
"""
|
|
48
48
|
pattern = pattern or DEFAULT_LINK_REGEX
|
|
49
49
|
return list(set(re.findall(pattern, raw_html)))
|
|
@@ -66,14 +66,14 @@ def extract_sub_links(
|
|
|
66
66
|
url: the url of the HTML.
|
|
67
67
|
base_url: the base URL to check for outside links against.
|
|
68
68
|
pattern: Regex to use for extracting links from raw HTML.
|
|
69
|
-
prevent_outside: If True
|
|
69
|
+
prevent_outside: If `True`, ignore external links which are not children
|
|
70
70
|
of the base URL.
|
|
71
71
|
exclude_prefixes: Exclude any URLs that start with one of these prefixes.
|
|
72
|
-
continue_on_failure: If True
|
|
72
|
+
continue_on_failure: If `True`, continue if parsing a specific link raises an
|
|
73
73
|
exception. Otherwise, raise the exception.
|
|
74
74
|
|
|
75
75
|
Returns:
|
|
76
|
-
|
|
76
|
+
sub links.
|
|
77
77
|
"""
|
|
78
78
|
base_url_to_use = base_url if base_url is not None else url
|
|
79
79
|
parsed_base_url = urlparse(base_url_to_use)
|
langchain_core/utils/input.py
CHANGED
|
@@ -65,9 +65,9 @@ def print_text(
|
|
|
65
65
|
|
|
66
66
|
Args:
|
|
67
67
|
text: The text to print.
|
|
68
|
-
color: The color to use.
|
|
68
|
+
color: The color to use.
|
|
69
69
|
end: The end character to use. Defaults to "".
|
|
70
|
-
file: The file to write to.
|
|
70
|
+
file: The file to write to.
|
|
71
71
|
"""
|
|
72
72
|
text_to_print = get_colored_text(text, color) if color else text
|
|
73
73
|
print(text_to_print, end=end, file=file)
|
|
@@ -7,6 +7,6 @@ def is_interactive_env() -> bool:
|
|
|
7
7
|
"""Determine if running within IPython or Jupyter.
|
|
8
8
|
|
|
9
9
|
Returns:
|
|
10
|
-
True if running in an interactive environment, False otherwise.
|
|
10
|
+
True if running in an interactive environment, `False` otherwise.
|
|
11
11
|
"""
|
|
12
12
|
return hasattr(sys, "ps2")
|
langchain_core/utils/iter.py
CHANGED
|
@@ -43,7 +43,7 @@ def tee_peer(
|
|
|
43
43
|
"""An individual iterator of a `.tee`.
|
|
44
44
|
|
|
45
45
|
This function is a generator that yields items from the shared iterator
|
|
46
|
-
|
|
46
|
+
`iterator`. It buffers items until the least advanced iterator has
|
|
47
47
|
yielded them as well. The buffer is shared with all other peers.
|
|
48
48
|
|
|
49
49
|
Args:
|
|
@@ -89,38 +89,38 @@ def tee_peer(
|
|
|
89
89
|
|
|
90
90
|
|
|
91
91
|
class Tee(Generic[T]):
|
|
92
|
-
"""Create
|
|
92
|
+
"""Create `n` separate asynchronous iterators over `iterable`.
|
|
93
93
|
|
|
94
|
-
This splits a single
|
|
94
|
+
This splits a single `iterable` into multiple iterators, each providing
|
|
95
95
|
the same items in the same order.
|
|
96
96
|
All child iterators may advance separately but share the same items
|
|
97
|
-
from
|
|
97
|
+
from `iterable` -- when the most advanced iterator retrieves an item,
|
|
98
98
|
it is buffered until the least advanced iterator has yielded it as well.
|
|
99
|
-
A
|
|
99
|
+
A `tee` works lazily and can handle an infinite `iterable`, provided
|
|
100
100
|
that all iterators advance.
|
|
101
101
|
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
102
|
+
```python
|
|
103
|
+
async def derivative(sensor_data):
|
|
104
|
+
previous, current = a.tee(sensor_data, n=2)
|
|
105
|
+
await a.anext(previous) # advance one iterator
|
|
106
|
+
return a.map(operator.sub, previous, current)
|
|
107
|
+
```
|
|
108
108
|
|
|
109
109
|
Unlike `itertools.tee`, `.tee` returns a custom type instead
|
|
110
110
|
of a :py`tuple`. Like a tuple, it can be indexed, iterated and unpacked
|
|
111
111
|
to get the child iterators. In addition, its `.tee.aclose` method
|
|
112
|
-
immediately closes all children, and it can be used in an
|
|
112
|
+
immediately closes all children, and it can be used in an `async with` context
|
|
113
113
|
for the same effect.
|
|
114
114
|
|
|
115
|
-
If
|
|
116
|
-
provide these items. Also,
|
|
115
|
+
If `iterable` is an iterator and read elsewhere, `tee` will *not*
|
|
116
|
+
provide these items. Also, `tee` must internally buffer each item until the
|
|
117
117
|
last iterator has yielded it; if the most and least advanced iterator differ
|
|
118
118
|
by most data, using a :py`list` is more efficient (but not lazy).
|
|
119
119
|
|
|
120
|
-
If the underlying iterable is concurrency safe (
|
|
120
|
+
If the underlying iterable is concurrency safe (`anext` may be awaited
|
|
121
121
|
concurrently) the resulting iterators are concurrency safe as well. Otherwise,
|
|
122
122
|
the iterators are safe if there is only ever one single "most advanced" iterator.
|
|
123
|
-
To enforce sequential use of
|
|
123
|
+
To enforce sequential use of `anext`, provide a `lock`
|
|
124
124
|
- e.g. an :py`asyncio.Lock` instance in an :py:mod:`asyncio` application -
|
|
125
125
|
and access is automatically synchronised.
|
|
126
126
|
|
|
@@ -133,13 +133,13 @@ class Tee(Generic[T]):
|
|
|
133
133
|
*,
|
|
134
134
|
lock: AbstractContextManager[Any] | None = None,
|
|
135
135
|
):
|
|
136
|
-
"""Create a
|
|
136
|
+
"""Create a `tee`.
|
|
137
137
|
|
|
138
138
|
Args:
|
|
139
139
|
iterable: The iterable to split.
|
|
140
140
|
n: The number of iterators to create. Defaults to 2.
|
|
141
141
|
lock: The lock to synchronise access to the shared buffers.
|
|
142
|
-
|
|
142
|
+
|
|
143
143
|
"""
|
|
144
144
|
self._iterator = iter(iterable)
|
|
145
145
|
self._buffers: list[deque[T]] = [deque() for _ in range(n)]
|
|
@@ -207,7 +207,7 @@ def batch_iterate(size: int | None, iterable: Iterable[T]) -> Iterator[list[T]]:
|
|
|
207
207
|
"""Utility batching function.
|
|
208
208
|
|
|
209
209
|
Args:
|
|
210
|
-
size: The size of the batch. If None
|
|
210
|
+
size: The size of the batch. If `None`, returns a single batch.
|
|
211
211
|
iterable: The iterable to batch.
|
|
212
212
|
|
|
213
213
|
Yields:
|
langchain_core/utils/json.py
CHANGED
|
@@ -51,7 +51,7 @@ def parse_partial_json(s: str, *, strict: bool = False) -> Any:
|
|
|
51
51
|
|
|
52
52
|
Args:
|
|
53
53
|
s: The JSON string to parse.
|
|
54
|
-
strict: Whether to use strict parsing. Defaults to False
|
|
54
|
+
strict: Whether to use strict parsing. Defaults to `False`.
|
|
55
55
|
|
|
56
56
|
Returns:
|
|
57
57
|
The parsed JSON object as a Python dictionary.
|
|
@@ -85,7 +85,7 @@ def _dereference_refs_helper(
|
|
|
85
85
|
full_schema: The complete schema containing all definitions
|
|
86
86
|
processed_refs: Set tracking currently processing refs (for cycle detection)
|
|
87
87
|
skip_keys: Keys under which to skip recursion
|
|
88
|
-
shallow_refs: If True
|
|
88
|
+
shallow_refs: If `True`, only break cycles; if False, deep-inline all refs
|
|
89
89
|
|
|
90
90
|
Returns:
|
|
91
91
|
The object with $ref properties resolved and merged with other properties.
|
|
@@ -184,7 +184,7 @@ def dereference_refs(
|
|
|
184
184
|
point to. If not provided, defaults to schema_obj (useful when the
|
|
185
185
|
schema is self-contained).
|
|
186
186
|
skip_keys: Controls recursion behavior and reference resolution depth:
|
|
187
|
-
- If None (
|
|
187
|
+
- If `None` (Default): Only recurse under '$defs' and use shallow reference
|
|
188
188
|
resolution (break cycles but don't deep-inline nested refs)
|
|
189
189
|
- If provided (even as []): Recurse under all keys and use deep reference
|
|
190
190
|
resolution (fully inline all nested references)
|
langchain_core/utils/mustache.py
CHANGED
|
@@ -46,7 +46,7 @@ def grab_literal(template: str, l_del: str) -> tuple[str, str]:
|
|
|
46
46
|
l_del: The left delimiter.
|
|
47
47
|
|
|
48
48
|
Returns:
|
|
49
|
-
|
|
49
|
+
The literal and the template.
|
|
50
50
|
"""
|
|
51
51
|
global _CURRENT_LINE
|
|
52
52
|
|
|
@@ -76,7 +76,7 @@ def l_sa_check(
|
|
|
76
76
|
is_standalone: Whether the tag is standalone.
|
|
77
77
|
|
|
78
78
|
Returns:
|
|
79
|
-
|
|
79
|
+
Whether the tag could be a standalone.
|
|
80
80
|
"""
|
|
81
81
|
# If there is a newline, or the previous tag was a standalone
|
|
82
82
|
if literal.find("\n") != -1 or is_standalone:
|
|
@@ -102,7 +102,7 @@ def r_sa_check(
|
|
|
102
102
|
is_standalone: Whether the tag is standalone.
|
|
103
103
|
|
|
104
104
|
Returns:
|
|
105
|
-
|
|
105
|
+
Whether the tag could be a standalone.
|
|
106
106
|
"""
|
|
107
107
|
# Check right side if we might be a standalone
|
|
108
108
|
if is_standalone and tag_type not in {"variable", "no escape"}:
|
|
@@ -124,7 +124,7 @@ def parse_tag(template: str, l_del: str, r_del: str) -> tuple[tuple[str, str], s
|
|
|
124
124
|
r_del: The right delimiter.
|
|
125
125
|
|
|
126
126
|
Returns:
|
|
127
|
-
|
|
127
|
+
The tag and the template.
|
|
128
128
|
|
|
129
129
|
Raises:
|
|
130
130
|
ChevronError: If the tag is unclosed.
|
|
@@ -329,7 +329,7 @@ def tokenize(
|
|
|
329
329
|
|
|
330
330
|
|
|
331
331
|
def _html_escape(string: str) -> str:
|
|
332
|
-
"""Return the HTML-escaped string with these characters escaped:
|
|
332
|
+
"""Return the HTML-escaped string with these characters escaped: `" & < >`."""
|
|
333
333
|
html_codes = {
|
|
334
334
|
'"': """,
|
|
335
335
|
"<": "<",
|