langchain-core 1.0.0a6__py3-none-any.whl → 1.0.0a7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of langchain-core might be problematic. Click here for more details.
- langchain_core/_api/__init__.py +3 -3
- langchain_core/_api/beta_decorator.py +6 -6
- langchain_core/_api/deprecation.py +21 -29
- langchain_core/_api/path.py +3 -6
- langchain_core/_import_utils.py +2 -3
- langchain_core/agents.py +10 -11
- langchain_core/caches.py +7 -7
- langchain_core/callbacks/base.py +91 -91
- langchain_core/callbacks/file.py +11 -11
- langchain_core/callbacks/manager.py +86 -89
- langchain_core/callbacks/stdout.py +8 -8
- langchain_core/callbacks/usage.py +4 -4
- langchain_core/chat_history.py +1 -37
- langchain_core/document_loaders/base.py +2 -2
- langchain_core/document_loaders/langsmith.py +15 -15
- langchain_core/documents/base.py +16 -16
- langchain_core/documents/compressor.py +4 -4
- langchain_core/example_selectors/length_based.py +1 -1
- langchain_core/example_selectors/semantic_similarity.py +17 -19
- langchain_core/exceptions.py +3 -3
- langchain_core/globals.py +3 -151
- langchain_core/indexing/api.py +44 -43
- langchain_core/indexing/base.py +30 -30
- langchain_core/indexing/in_memory.py +3 -3
- langchain_core/language_models/_utils.py +5 -7
- langchain_core/language_models/base.py +18 -132
- langchain_core/language_models/chat_models.py +118 -227
- langchain_core/language_models/fake.py +11 -11
- langchain_core/language_models/fake_chat_models.py +35 -29
- langchain_core/language_models/llms.py +91 -201
- langchain_core/load/dump.py +1 -1
- langchain_core/load/load.py +11 -12
- langchain_core/load/mapping.py +2 -4
- langchain_core/load/serializable.py +2 -4
- langchain_core/messages/ai.py +17 -20
- langchain_core/messages/base.py +23 -25
- langchain_core/messages/block_translators/__init__.py +2 -5
- langchain_core/messages/block_translators/anthropic.py +3 -3
- langchain_core/messages/block_translators/bedrock_converse.py +2 -2
- langchain_core/messages/block_translators/langchain_v0.py +2 -2
- langchain_core/messages/block_translators/openai.py +6 -6
- langchain_core/messages/content.py +120 -124
- langchain_core/messages/human.py +7 -7
- langchain_core/messages/system.py +7 -7
- langchain_core/messages/tool.py +24 -24
- langchain_core/messages/utils.py +67 -79
- langchain_core/output_parsers/base.py +12 -14
- langchain_core/output_parsers/json.py +4 -4
- langchain_core/output_parsers/list.py +3 -5
- langchain_core/output_parsers/openai_functions.py +3 -3
- langchain_core/output_parsers/openai_tools.py +3 -3
- langchain_core/output_parsers/pydantic.py +2 -2
- langchain_core/output_parsers/transform.py +13 -15
- langchain_core/output_parsers/xml.py +7 -9
- langchain_core/outputs/chat_generation.py +4 -4
- langchain_core/outputs/chat_result.py +1 -3
- langchain_core/outputs/generation.py +2 -2
- langchain_core/outputs/llm_result.py +5 -5
- langchain_core/prompts/__init__.py +1 -5
- langchain_core/prompts/base.py +10 -15
- langchain_core/prompts/chat.py +31 -82
- langchain_core/prompts/dict.py +2 -2
- langchain_core/prompts/few_shot.py +5 -5
- langchain_core/prompts/few_shot_with_templates.py +4 -4
- langchain_core/prompts/loading.py +3 -5
- langchain_core/prompts/prompt.py +4 -16
- langchain_core/prompts/string.py +2 -1
- langchain_core/prompts/structured.py +16 -23
- langchain_core/rate_limiters.py +3 -4
- langchain_core/retrievers.py +14 -14
- langchain_core/runnables/base.py +928 -1042
- langchain_core/runnables/branch.py +36 -40
- langchain_core/runnables/config.py +27 -35
- langchain_core/runnables/configurable.py +108 -124
- langchain_core/runnables/fallbacks.py +76 -72
- langchain_core/runnables/graph.py +39 -45
- langchain_core/runnables/graph_ascii.py +9 -11
- langchain_core/runnables/graph_mermaid.py +18 -19
- langchain_core/runnables/graph_png.py +8 -9
- langchain_core/runnables/history.py +114 -127
- langchain_core/runnables/passthrough.py +113 -139
- langchain_core/runnables/retry.py +43 -48
- langchain_core/runnables/router.py +23 -28
- langchain_core/runnables/schema.py +42 -44
- langchain_core/runnables/utils.py +28 -31
- langchain_core/stores.py +9 -13
- langchain_core/structured_query.py +8 -8
- langchain_core/tools/base.py +62 -115
- langchain_core/tools/convert.py +31 -35
- langchain_core/tools/render.py +1 -1
- langchain_core/tools/retriever.py +4 -4
- langchain_core/tools/simple.py +13 -17
- langchain_core/tools/structured.py +12 -15
- langchain_core/tracers/base.py +62 -64
- langchain_core/tracers/context.py +17 -35
- langchain_core/tracers/core.py +49 -53
- langchain_core/tracers/evaluation.py +11 -11
- langchain_core/tracers/event_stream.py +58 -60
- langchain_core/tracers/langchain.py +13 -13
- langchain_core/tracers/log_stream.py +22 -24
- langchain_core/tracers/root_listeners.py +14 -14
- langchain_core/tracers/run_collector.py +2 -4
- langchain_core/tracers/schemas.py +8 -8
- langchain_core/tracers/stdout.py +2 -1
- langchain_core/utils/__init__.py +0 -3
- langchain_core/utils/_merge.py +2 -2
- langchain_core/utils/aiter.py +24 -28
- langchain_core/utils/env.py +4 -4
- langchain_core/utils/function_calling.py +31 -41
- langchain_core/utils/html.py +3 -4
- langchain_core/utils/input.py +3 -3
- langchain_core/utils/iter.py +15 -19
- langchain_core/utils/json.py +3 -2
- langchain_core/utils/json_schema.py +6 -6
- langchain_core/utils/mustache.py +3 -5
- langchain_core/utils/pydantic.py +16 -18
- langchain_core/utils/usage.py +1 -1
- langchain_core/utils/utils.py +29 -29
- langchain_core/vectorstores/base.py +18 -21
- langchain_core/vectorstores/in_memory.py +14 -87
- langchain_core/vectorstores/utils.py +2 -2
- langchain_core/version.py +1 -1
- {langchain_core-1.0.0a6.dist-info → langchain_core-1.0.0a7.dist-info}/METADATA +10 -21
- langchain_core-1.0.0a7.dist-info/RECORD +176 -0
- {langchain_core-1.0.0a6.dist-info → langchain_core-1.0.0a7.dist-info}/WHEEL +1 -1
- langchain_core/messages/block_translators/ollama.py +0 -47
- langchain_core/prompts/pipeline.py +0 -138
- langchain_core/tracers/langchain_v1.py +0 -31
- langchain_core/utils/loading.py +0 -35
- langchain_core-1.0.0a6.dist-info/RECORD +0 -181
- langchain_core-1.0.0a6.dist-info/entry_points.txt +0 -4
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
"""System message."""
|
|
2
2
|
|
|
3
|
-
from typing import Any, Literal,
|
|
3
|
+
from typing import Any, Literal, cast, overload
|
|
4
4
|
|
|
5
5
|
from langchain_core.messages import content as types
|
|
6
6
|
from langchain_core.messages.base import BaseMessage, BaseMessageChunk
|
|
@@ -38,28 +38,28 @@ class SystemMessage(BaseMessage):
|
|
|
38
38
|
@overload
|
|
39
39
|
def __init__(
|
|
40
40
|
self,
|
|
41
|
-
content:
|
|
41
|
+
content: str | list[str | dict],
|
|
42
42
|
**kwargs: Any,
|
|
43
43
|
) -> None: ...
|
|
44
44
|
|
|
45
45
|
@overload
|
|
46
46
|
def __init__(
|
|
47
47
|
self,
|
|
48
|
-
content:
|
|
49
|
-
content_blocks:
|
|
48
|
+
content: str | list[str | dict] | None = None,
|
|
49
|
+
content_blocks: list[types.ContentBlock] | None = None,
|
|
50
50
|
**kwargs: Any,
|
|
51
51
|
) -> None: ...
|
|
52
52
|
|
|
53
53
|
def __init__(
|
|
54
54
|
self,
|
|
55
|
-
content:
|
|
56
|
-
content_blocks:
|
|
55
|
+
content: str | list[str | dict] | None = None,
|
|
56
|
+
content_blocks: list[types.ContentBlock] | None = None,
|
|
57
57
|
**kwargs: Any,
|
|
58
58
|
) -> None:
|
|
59
59
|
"""Specify ``content`` as positional arg or ``content_blocks`` for typing."""
|
|
60
60
|
if content_blocks is not None:
|
|
61
61
|
super().__init__(
|
|
62
|
-
content=cast("
|
|
62
|
+
content=cast("str | list[str | dict]", content_blocks),
|
|
63
63
|
**kwargs,
|
|
64
64
|
)
|
|
65
65
|
else:
|
langchain_core/messages/tool.py
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
"""Messages for tools."""
|
|
2
2
|
|
|
3
3
|
import json
|
|
4
|
-
from typing import Any, Literal,
|
|
4
|
+
from typing import Any, Literal, cast, overload
|
|
5
5
|
from uuid import UUID
|
|
6
6
|
|
|
7
7
|
from pydantic import Field, model_validator
|
|
@@ -41,7 +41,7 @@ class ToolMessage(BaseMessage, ToolOutputMixin):
|
|
|
41
41
|
Example: A ``ToolMessage`` where only part of the tool output is sent to the model
|
|
42
42
|
and the full output is passed in to artifact.
|
|
43
43
|
|
|
44
|
-
|
|
44
|
+
!!! version-added "Added in version 0.2.17"
|
|
45
45
|
|
|
46
46
|
.. code-block:: python
|
|
47
47
|
|
|
@@ -83,14 +83,14 @@ class ToolMessage(BaseMessage, ToolOutputMixin):
|
|
|
83
83
|
a subset of the full tool output is being passed as message content but the full
|
|
84
84
|
output is needed in other parts of the code.
|
|
85
85
|
|
|
86
|
-
|
|
86
|
+
!!! version-added "Added in version 0.2.17"
|
|
87
87
|
|
|
88
88
|
"""
|
|
89
89
|
|
|
90
90
|
status: Literal["success", "error"] = "success"
|
|
91
91
|
"""Status of the tool invocation.
|
|
92
92
|
|
|
93
|
-
|
|
93
|
+
!!! version-added "Added in version 0.2.24"
|
|
94
94
|
|
|
95
95
|
"""
|
|
96
96
|
|
|
@@ -147,22 +147,22 @@ class ToolMessage(BaseMessage, ToolOutputMixin):
|
|
|
147
147
|
@overload
|
|
148
148
|
def __init__(
|
|
149
149
|
self,
|
|
150
|
-
content:
|
|
150
|
+
content: str | list[str | dict],
|
|
151
151
|
**kwargs: Any,
|
|
152
152
|
) -> None: ...
|
|
153
153
|
|
|
154
154
|
@overload
|
|
155
155
|
def __init__(
|
|
156
156
|
self,
|
|
157
|
-
content:
|
|
158
|
-
content_blocks:
|
|
157
|
+
content: str | list[str | dict] | None = None,
|
|
158
|
+
content_blocks: list[types.ContentBlock] | None = None,
|
|
159
159
|
**kwargs: Any,
|
|
160
160
|
) -> None: ...
|
|
161
161
|
|
|
162
162
|
def __init__(
|
|
163
163
|
self,
|
|
164
|
-
content:
|
|
165
|
-
content_blocks:
|
|
164
|
+
content: str | list[str | dict] | None = None,
|
|
165
|
+
content_blocks: list[types.ContentBlock] | None = None,
|
|
166
166
|
**kwargs: Any,
|
|
167
167
|
) -> None:
|
|
168
168
|
"""Initialize ``ToolMessage``.
|
|
@@ -176,7 +176,7 @@ class ToolMessage(BaseMessage, ToolOutputMixin):
|
|
|
176
176
|
"""
|
|
177
177
|
if content_blocks is not None:
|
|
178
178
|
super().__init__(
|
|
179
|
-
content=cast("
|
|
179
|
+
content=cast("str | list[str | dict]", content_blocks),
|
|
180
180
|
**kwargs,
|
|
181
181
|
)
|
|
182
182
|
else:
|
|
@@ -233,7 +233,7 @@ class ToolCall(TypedDict):
|
|
|
233
233
|
"""The name of the tool to be called."""
|
|
234
234
|
args: dict[str, Any]
|
|
235
235
|
"""The arguments to the tool call."""
|
|
236
|
-
id:
|
|
236
|
+
id: str | None
|
|
237
237
|
"""An identifier associated with the tool call.
|
|
238
238
|
|
|
239
239
|
An identifier is needed to associate a tool call request with a tool
|
|
@@ -247,7 +247,7 @@ def tool_call(
|
|
|
247
247
|
*,
|
|
248
248
|
name: str,
|
|
249
249
|
args: dict[str, Any],
|
|
250
|
-
id:
|
|
250
|
+
id: str | None,
|
|
251
251
|
) -> ToolCall:
|
|
252
252
|
"""Create a tool call.
|
|
253
253
|
|
|
@@ -283,23 +283,23 @@ class ToolCallChunk(TypedDict):
|
|
|
283
283
|
|
|
284
284
|
"""
|
|
285
285
|
|
|
286
|
-
name:
|
|
286
|
+
name: str | None
|
|
287
287
|
"""The name of the tool to be called."""
|
|
288
|
-
args:
|
|
288
|
+
args: str | None
|
|
289
289
|
"""The arguments to the tool call."""
|
|
290
|
-
id:
|
|
290
|
+
id: str | None
|
|
291
291
|
"""An identifier associated with the tool call."""
|
|
292
|
-
index:
|
|
292
|
+
index: int | None
|
|
293
293
|
"""The index of the tool call in a sequence."""
|
|
294
294
|
type: NotRequired[Literal["tool_call_chunk"]]
|
|
295
295
|
|
|
296
296
|
|
|
297
297
|
def tool_call_chunk(
|
|
298
298
|
*,
|
|
299
|
-
name:
|
|
300
|
-
args:
|
|
301
|
-
id:
|
|
302
|
-
index:
|
|
299
|
+
name: str | None = None,
|
|
300
|
+
args: str | None = None,
|
|
301
|
+
id: str | None = None,
|
|
302
|
+
index: int | None = None,
|
|
303
303
|
) -> ToolCallChunk:
|
|
304
304
|
"""Create a tool call chunk.
|
|
305
305
|
|
|
@@ -319,10 +319,10 @@ def tool_call_chunk(
|
|
|
319
319
|
|
|
320
320
|
def invalid_tool_call(
|
|
321
321
|
*,
|
|
322
|
-
name:
|
|
323
|
-
args:
|
|
324
|
-
id:
|
|
325
|
-
error:
|
|
322
|
+
name: str | None = None,
|
|
323
|
+
args: str | None = None,
|
|
324
|
+
id: str | None = None,
|
|
325
|
+
error: str | None = None,
|
|
326
326
|
) -> InvalidToolCall:
|
|
327
327
|
"""Create an invalid tool call.
|
|
328
328
|
|
langchain_core/messages/utils.py
CHANGED
|
@@ -15,16 +15,13 @@ import inspect
|
|
|
15
15
|
import json
|
|
16
16
|
import logging
|
|
17
17
|
import math
|
|
18
|
-
from collections.abc import Iterable, Sequence
|
|
18
|
+
from collections.abc import Callable, Iterable, Sequence
|
|
19
19
|
from functools import partial
|
|
20
20
|
from typing import (
|
|
21
21
|
TYPE_CHECKING,
|
|
22
22
|
Annotated,
|
|
23
23
|
Any,
|
|
24
|
-
Callable,
|
|
25
24
|
Literal,
|
|
26
|
-
Optional,
|
|
27
|
-
Union,
|
|
28
25
|
cast,
|
|
29
26
|
overload,
|
|
30
27
|
)
|
|
@@ -76,20 +73,18 @@ def _get_type(v: Any) -> str:
|
|
|
76
73
|
|
|
77
74
|
|
|
78
75
|
AnyMessage = Annotated[
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
Annotated[ToolMessageChunk, Tag(tag="ToolMessageChunk")],
|
|
92
|
-
],
|
|
76
|
+
Annotated[AIMessage, Tag(tag="ai")]
|
|
77
|
+
| Annotated[HumanMessage, Tag(tag="human")]
|
|
78
|
+
| Annotated[ChatMessage, Tag(tag="chat")]
|
|
79
|
+
| Annotated[SystemMessage, Tag(tag="system")]
|
|
80
|
+
| Annotated[FunctionMessage, Tag(tag="function")]
|
|
81
|
+
| Annotated[ToolMessage, Tag(tag="tool")]
|
|
82
|
+
| Annotated[AIMessageChunk, Tag(tag="AIMessageChunk")]
|
|
83
|
+
| Annotated[HumanMessageChunk, Tag(tag="HumanMessageChunk")]
|
|
84
|
+
| Annotated[ChatMessageChunk, Tag(tag="ChatMessageChunk")]
|
|
85
|
+
| Annotated[SystemMessageChunk, Tag(tag="SystemMessageChunk")]
|
|
86
|
+
| Annotated[FunctionMessageChunk, Tag(tag="FunctionMessageChunk")]
|
|
87
|
+
| Annotated[ToolMessageChunk, Tag(tag="ToolMessageChunk")],
|
|
93
88
|
Field(discriminator=Discriminator(_get_type)),
|
|
94
89
|
]
|
|
95
90
|
|
|
@@ -215,18 +210,18 @@ def message_chunk_to_message(chunk: BaseMessage) -> BaseMessage:
|
|
|
215
210
|
)
|
|
216
211
|
|
|
217
212
|
|
|
218
|
-
MessageLikeRepresentation =
|
|
219
|
-
BaseMessage
|
|
220
|
-
|
|
213
|
+
MessageLikeRepresentation = (
|
|
214
|
+
BaseMessage | list[str] | tuple[str, str] | str | dict[str, Any]
|
|
215
|
+
)
|
|
221
216
|
|
|
222
217
|
|
|
223
218
|
def _create_message_from_message_type(
|
|
224
219
|
message_type: str,
|
|
225
220
|
content: str,
|
|
226
|
-
name:
|
|
227
|
-
tool_call_id:
|
|
228
|
-
tool_calls:
|
|
229
|
-
id:
|
|
221
|
+
name: str | None = None,
|
|
222
|
+
tool_call_id: str | None = None,
|
|
223
|
+
tool_calls: list[dict[str, Any]] | None = None,
|
|
224
|
+
id: str | None = None,
|
|
230
225
|
**additional_kwargs: Any,
|
|
231
226
|
) -> BaseMessage:
|
|
232
227
|
"""Create a message from a ``Message`` type and content string.
|
|
@@ -368,7 +363,7 @@ def _convert_to_message(message: MessageLikeRepresentation) -> BaseMessage:
|
|
|
368
363
|
|
|
369
364
|
|
|
370
365
|
def convert_to_messages(
|
|
371
|
-
messages:
|
|
366
|
+
messages: Iterable[MessageLikeRepresentation] | PromptValue,
|
|
372
367
|
) -> list[BaseMessage]:
|
|
373
368
|
"""Convert a sequence of messages to a list of messages.
|
|
374
369
|
|
|
@@ -399,12 +394,12 @@ def _runnable_support(func: Callable) -> Callable:
|
|
|
399
394
|
) -> list[BaseMessage]: ...
|
|
400
395
|
|
|
401
396
|
def wrapped(
|
|
402
|
-
messages:
|
|
397
|
+
messages: Sequence[MessageLikeRepresentation] | None = None,
|
|
403
398
|
**kwargs: Any,
|
|
404
|
-
) ->
|
|
405
|
-
list[BaseMessage]
|
|
406
|
-
Runnable[Sequence[MessageLikeRepresentation], list[BaseMessage]]
|
|
407
|
-
|
|
399
|
+
) -> (
|
|
400
|
+
list[BaseMessage]
|
|
401
|
+
| Runnable[Sequence[MessageLikeRepresentation], list[BaseMessage]]
|
|
402
|
+
):
|
|
408
403
|
# Import locally to prevent circular import.
|
|
409
404
|
from langchain_core.runnables.base import RunnableLambda # noqa: PLC0415
|
|
410
405
|
|
|
@@ -418,15 +413,15 @@ def _runnable_support(func: Callable) -> Callable:
|
|
|
418
413
|
|
|
419
414
|
@_runnable_support
|
|
420
415
|
def filter_messages(
|
|
421
|
-
messages:
|
|
416
|
+
messages: Iterable[MessageLikeRepresentation] | PromptValue,
|
|
422
417
|
*,
|
|
423
|
-
include_names:
|
|
424
|
-
exclude_names:
|
|
425
|
-
include_types:
|
|
426
|
-
exclude_types:
|
|
427
|
-
include_ids:
|
|
428
|
-
exclude_ids:
|
|
429
|
-
exclude_tool_calls:
|
|
418
|
+
include_names: Sequence[str] | None = None,
|
|
419
|
+
exclude_names: Sequence[str] | None = None,
|
|
420
|
+
include_types: Sequence[str | type[BaseMessage]] | None = None,
|
|
421
|
+
exclude_types: Sequence[str | type[BaseMessage]] | None = None,
|
|
422
|
+
include_ids: Sequence[str] | None = None,
|
|
423
|
+
exclude_ids: Sequence[str] | None = None,
|
|
424
|
+
exclude_tool_calls: Sequence[str] | bool | None = None,
|
|
430
425
|
) -> list[BaseMessage]:
|
|
431
426
|
"""Filter messages based on ``name``, ``type`` or ``id``.
|
|
432
427
|
|
|
@@ -461,7 +456,7 @@ def filter_messages(
|
|
|
461
456
|
anything that is not explicitly excluded will be included.
|
|
462
457
|
|
|
463
458
|
Raises:
|
|
464
|
-
ValueError
|
|
459
|
+
ValueError: If two incompatible arguments are provided.
|
|
465
460
|
|
|
466
461
|
Example:
|
|
467
462
|
.. code-block:: python
|
|
@@ -563,20 +558,20 @@ def filter_messages(
|
|
|
563
558
|
|
|
564
559
|
@_runnable_support
|
|
565
560
|
def merge_message_runs(
|
|
566
|
-
messages:
|
|
561
|
+
messages: Iterable[MessageLikeRepresentation] | PromptValue,
|
|
567
562
|
*,
|
|
568
563
|
chunk_separator: str = "\n",
|
|
569
564
|
) -> list[BaseMessage]:
|
|
570
565
|
r"""Merge consecutive Messages of the same type.
|
|
571
566
|
|
|
572
|
-
|
|
567
|
+
!!! note
|
|
573
568
|
ToolMessages are not merged, as each has a distinct tool call id that can't be
|
|
574
569
|
merged.
|
|
575
570
|
|
|
576
571
|
Args:
|
|
577
572
|
messages: Sequence Message-like objects to merge.
|
|
578
573
|
chunk_separator: Specify the string to be inserted between message chunks.
|
|
579
|
-
|
|
574
|
+
Defaults to ``'\n'``.
|
|
580
575
|
|
|
581
576
|
Returns:
|
|
582
577
|
list of BaseMessages with consecutive runs of message types merged into single
|
|
@@ -696,24 +691,18 @@ def merge_message_runs(
|
|
|
696
691
|
# init not at runtime.
|
|
697
692
|
@_runnable_support
|
|
698
693
|
def trim_messages(
|
|
699
|
-
messages:
|
|
694
|
+
messages: Iterable[MessageLikeRepresentation] | PromptValue,
|
|
700
695
|
*,
|
|
701
696
|
max_tokens: int,
|
|
702
|
-
token_counter:
|
|
703
|
-
|
|
704
|
-
|
|
705
|
-
BaseLanguageModel,
|
|
706
|
-
],
|
|
697
|
+
token_counter: Callable[[list[BaseMessage]], int]
|
|
698
|
+
| Callable[[BaseMessage], int]
|
|
699
|
+
| BaseLanguageModel,
|
|
707
700
|
strategy: Literal["first", "last"] = "last",
|
|
708
701
|
allow_partial: bool = False,
|
|
709
|
-
end_on:
|
|
710
|
-
|
|
711
|
-
] = None,
|
|
712
|
-
start_on: Optional[
|
|
713
|
-
Union[str, type[BaseMessage], Sequence[Union[str, type[BaseMessage]]]]
|
|
714
|
-
] = None,
|
|
702
|
+
end_on: str | type[BaseMessage] | Sequence[str | type[BaseMessage]] | None = None,
|
|
703
|
+
start_on: str | type[BaseMessage] | Sequence[str | type[BaseMessage]] | None = None,
|
|
715
704
|
include_system: bool = False,
|
|
716
|
-
text_splitter:
|
|
705
|
+
text_splitter: Callable[[str], list[str]] | TextSplitter | None = None,
|
|
717
706
|
) -> list[BaseMessage]:
|
|
718
707
|
r"""Trim messages to be below a token count.
|
|
719
708
|
|
|
@@ -739,7 +728,7 @@ def trim_messages(
|
|
|
739
728
|
the first message in the history if present. To achieve this set the
|
|
740
729
|
``include_system=True``.
|
|
741
730
|
|
|
742
|
-
|
|
731
|
+
!!! note
|
|
743
732
|
The examples below show how to configure ``trim_messages`` to achieve a behavior
|
|
744
733
|
consistent with the above properties.
|
|
745
734
|
|
|
@@ -751,7 +740,7 @@ def trim_messages(
|
|
|
751
740
|
``BaseLanguageModel.get_num_tokens_from_messages()`` will be used.
|
|
752
741
|
Set to ``len`` to count the number of **messages** in the chat history.
|
|
753
742
|
|
|
754
|
-
|
|
743
|
+
!!! note
|
|
755
744
|
Use ``count_tokens_approximately`` to get fast, approximate token
|
|
756
745
|
counts.
|
|
757
746
|
This is recommended for using ``trim_messages`` on the hot path, where
|
|
@@ -1042,10 +1031,11 @@ def trim_messages(
|
|
|
1042
1031
|
|
|
1043
1032
|
|
|
1044
1033
|
def convert_to_openai_messages(
|
|
1045
|
-
messages:
|
|
1034
|
+
messages: MessageLikeRepresentation | Sequence[MessageLikeRepresentation],
|
|
1046
1035
|
*,
|
|
1047
1036
|
text_format: Literal["string", "block"] = "string",
|
|
1048
|
-
|
|
1037
|
+
include_id: bool = False,
|
|
1038
|
+
) -> dict | list[dict]:
|
|
1049
1039
|
"""Convert LangChain messages into OpenAI message dicts.
|
|
1050
1040
|
|
|
1051
1041
|
Args:
|
|
@@ -1062,6 +1052,8 @@ def convert_to_openai_messages(
|
|
|
1062
1052
|
If a message has a string content, this is turned into a list
|
|
1063
1053
|
with a single content block of type ``'text'``. If a message has
|
|
1064
1054
|
content blocks these are left as is.
|
|
1055
|
+
include_id: Whether to include message ids in the openai messages, if they
|
|
1056
|
+
are present in the source messages.
|
|
1065
1057
|
|
|
1066
1058
|
Raises:
|
|
1067
1059
|
ValueError: if an unrecognized ``text_format`` is specified, or if a message
|
|
@@ -1071,11 +1063,11 @@ def convert_to_openai_messages(
|
|
|
1071
1063
|
The return type depends on the input type:
|
|
1072
1064
|
|
|
1073
1065
|
- dict:
|
|
1074
|
-
|
|
1075
|
-
|
|
1066
|
+
If a single message-like object is passed in, a single OpenAI message
|
|
1067
|
+
dict is returned.
|
|
1076
1068
|
- list[dict]:
|
|
1077
|
-
|
|
1078
|
-
|
|
1069
|
+
If a sequence of message-like objects are passed in, a list of OpenAI
|
|
1070
|
+
message dicts is returned.
|
|
1079
1071
|
|
|
1080
1072
|
Example:
|
|
1081
1073
|
|
|
@@ -1123,7 +1115,7 @@ def convert_to_openai_messages(
|
|
|
1123
1115
|
# {'role': 'assistant', 'content': 'thats nice'}
|
|
1124
1116
|
# ]
|
|
1125
1117
|
|
|
1126
|
-
|
|
1118
|
+
!!! version-added "Added in version 0.3.11"
|
|
1127
1119
|
|
|
1128
1120
|
""" # noqa: E501
|
|
1129
1121
|
if text_format not in {"string", "block"}:
|
|
@@ -1140,7 +1132,7 @@ def convert_to_openai_messages(
|
|
|
1140
1132
|
for i, message in enumerate(messages):
|
|
1141
1133
|
oai_msg: dict = {"role": _get_message_openai_role(message)}
|
|
1142
1134
|
tool_messages: list = []
|
|
1143
|
-
content:
|
|
1135
|
+
content: str | list[dict]
|
|
1144
1136
|
|
|
1145
1137
|
if message.name:
|
|
1146
1138
|
oai_msg["name"] = message.name
|
|
@@ -1150,6 +1142,8 @@ def convert_to_openai_messages(
|
|
|
1150
1142
|
oai_msg["refusal"] = message.additional_kwargs["refusal"]
|
|
1151
1143
|
if isinstance(message, ToolMessage):
|
|
1152
1144
|
oai_msg["tool_call_id"] = message.tool_call_id
|
|
1145
|
+
if include_id and message.id:
|
|
1146
|
+
oai_msg["id"] = message.id
|
|
1153
1147
|
|
|
1154
1148
|
if not message.content:
|
|
1155
1149
|
content = "" if text_format == "string" else []
|
|
@@ -1421,10 +1415,8 @@ def _first_max_tokens(
|
|
|
1421
1415
|
max_tokens: int,
|
|
1422
1416
|
token_counter: Callable[[list[BaseMessage]], int],
|
|
1423
1417
|
text_splitter: Callable[[str], list[str]],
|
|
1424
|
-
partial_strategy:
|
|
1425
|
-
end_on:
|
|
1426
|
-
Union[str, type[BaseMessage], Sequence[Union[str, type[BaseMessage]]]]
|
|
1427
|
-
] = None,
|
|
1418
|
+
partial_strategy: Literal["first", "last"] | None = None,
|
|
1419
|
+
end_on: str | type[BaseMessage] | Sequence[str | type[BaseMessage]] | None = None,
|
|
1428
1420
|
) -> list[BaseMessage]:
|
|
1429
1421
|
messages = list(messages)
|
|
1430
1422
|
if not messages:
|
|
@@ -1541,12 +1533,8 @@ def _last_max_tokens(
|
|
|
1541
1533
|
text_splitter: Callable[[str], list[str]],
|
|
1542
1534
|
allow_partial: bool = False,
|
|
1543
1535
|
include_system: bool = False,
|
|
1544
|
-
start_on:
|
|
1545
|
-
|
|
1546
|
-
] = None,
|
|
1547
|
-
end_on: Optional[
|
|
1548
|
-
Union[str, type[BaseMessage], Sequence[Union[str, type[BaseMessage]]]]
|
|
1549
|
-
] = None,
|
|
1536
|
+
start_on: str | type[BaseMessage] | Sequence[str | type[BaseMessage]] | None = None,
|
|
1537
|
+
end_on: str | type[BaseMessage] | Sequence[str | type[BaseMessage]] | None = None,
|
|
1550
1538
|
) -> list[BaseMessage]:
|
|
1551
1539
|
messages = list(messages)
|
|
1552
1540
|
if len(messages) == 0:
|
|
@@ -1647,7 +1635,7 @@ def _default_text_splitter(text: str) -> list[str]:
|
|
|
1647
1635
|
|
|
1648
1636
|
def _is_message_type(
|
|
1649
1637
|
message: BaseMessage,
|
|
1650
|
-
type_:
|
|
1638
|
+
type_: str | type[BaseMessage] | Sequence[str | type[BaseMessage]],
|
|
1651
1639
|
) -> bool:
|
|
1652
1640
|
types = [type_] if isinstance(type_, (str, type)) else type_
|
|
1653
1641
|
types_str = [t for t in types if isinstance(t, str)]
|
|
@@ -1720,14 +1708,14 @@ def count_tokens_approximately(
|
|
|
1720
1708
|
Returns:
|
|
1721
1709
|
Approximate number of tokens in the messages.
|
|
1722
1710
|
|
|
1723
|
-
|
|
1711
|
+
!!! note
|
|
1724
1712
|
This is a simple approximation that may not match the exact token count used by
|
|
1725
1713
|
specific models. For accurate counts, use model-specific tokenizers.
|
|
1726
1714
|
|
|
1727
1715
|
Warning:
|
|
1728
1716
|
This function does not currently support counting image tokens.
|
|
1729
1717
|
|
|
1730
|
-
|
|
1718
|
+
!!! version-added "Added in version 0.3.46"
|
|
1731
1719
|
|
|
1732
1720
|
"""
|
|
1733
1721
|
token_count = 0.0
|
|
@@ -8,9 +8,7 @@ from typing import (
|
|
|
8
8
|
TYPE_CHECKING,
|
|
9
9
|
Any,
|
|
10
10
|
Generic,
|
|
11
|
-
Optional,
|
|
12
11
|
TypeVar,
|
|
13
|
-
Union,
|
|
14
12
|
)
|
|
15
13
|
|
|
16
14
|
from typing_extensions import override
|
|
@@ -71,7 +69,7 @@ class BaseGenerationOutputParser(
|
|
|
71
69
|
@override
|
|
72
70
|
def InputType(self) -> Any:
|
|
73
71
|
"""Return the input type for the parser."""
|
|
74
|
-
return
|
|
72
|
+
return str | AnyMessage
|
|
75
73
|
|
|
76
74
|
@property
|
|
77
75
|
@override
|
|
@@ -84,8 +82,8 @@ class BaseGenerationOutputParser(
|
|
|
84
82
|
@override
|
|
85
83
|
def invoke(
|
|
86
84
|
self,
|
|
87
|
-
input:
|
|
88
|
-
config:
|
|
85
|
+
input: str | BaseMessage,
|
|
86
|
+
config: RunnableConfig | None = None,
|
|
89
87
|
**kwargs: Any,
|
|
90
88
|
) -> T:
|
|
91
89
|
if isinstance(input, BaseMessage):
|
|
@@ -107,9 +105,9 @@ class BaseGenerationOutputParser(
|
|
|
107
105
|
@override
|
|
108
106
|
async def ainvoke(
|
|
109
107
|
self,
|
|
110
|
-
input:
|
|
111
|
-
config:
|
|
112
|
-
**kwargs:
|
|
108
|
+
input: str | BaseMessage,
|
|
109
|
+
config: RunnableConfig | None = None,
|
|
110
|
+
**kwargs: Any | None,
|
|
113
111
|
) -> T:
|
|
114
112
|
if isinstance(input, BaseMessage):
|
|
115
113
|
return await self._acall_with_config(
|
|
@@ -165,7 +163,7 @@ class BaseOutputParser(
|
|
|
165
163
|
@override
|
|
166
164
|
def InputType(self) -> Any:
|
|
167
165
|
"""Return the input type for the parser."""
|
|
168
|
-
return
|
|
166
|
+
return str | AnyMessage
|
|
169
167
|
|
|
170
168
|
@property
|
|
171
169
|
@override
|
|
@@ -192,8 +190,8 @@ class BaseOutputParser(
|
|
|
192
190
|
@override
|
|
193
191
|
def invoke(
|
|
194
192
|
self,
|
|
195
|
-
input:
|
|
196
|
-
config:
|
|
193
|
+
input: str | BaseMessage,
|
|
194
|
+
config: RunnableConfig | None = None,
|
|
197
195
|
**kwargs: Any,
|
|
198
196
|
) -> T:
|
|
199
197
|
if isinstance(input, BaseMessage):
|
|
@@ -215,9 +213,9 @@ class BaseOutputParser(
|
|
|
215
213
|
@override
|
|
216
214
|
async def ainvoke(
|
|
217
215
|
self,
|
|
218
|
-
input:
|
|
219
|
-
config:
|
|
220
|
-
**kwargs:
|
|
216
|
+
input: str | BaseMessage,
|
|
217
|
+
config: RunnableConfig | None = None,
|
|
218
|
+
**kwargs: Any | None,
|
|
221
219
|
) -> T:
|
|
222
220
|
if isinstance(input, BaseMessage):
|
|
223
221
|
return await self._acall_with_config(
|
|
@@ -4,7 +4,7 @@ from __future__ import annotations
|
|
|
4
4
|
|
|
5
5
|
import json
|
|
6
6
|
from json import JSONDecodeError
|
|
7
|
-
from typing import Annotated, Any,
|
|
7
|
+
from typing import Annotated, Any, TypeVar
|
|
8
8
|
|
|
9
9
|
import jsonpatch # type: ignore[import-untyped]
|
|
10
10
|
import pydantic
|
|
@@ -23,7 +23,7 @@ from langchain_core.utils.json import (
|
|
|
23
23
|
)
|
|
24
24
|
|
|
25
25
|
# Union type needs to be last assignment to PydanticBaseModel to make mypy happy.
|
|
26
|
-
PydanticBaseModel =
|
|
26
|
+
PydanticBaseModel = BaseModel | pydantic.BaseModel
|
|
27
27
|
|
|
28
28
|
TBaseModel = TypeVar("TBaseModel", bound=PydanticBaseModel)
|
|
29
29
|
|
|
@@ -38,12 +38,12 @@ class JsonOutputParser(BaseCumulativeTransformOutputParser[Any]):
|
|
|
38
38
|
describing the difference between the previous and the current object.
|
|
39
39
|
"""
|
|
40
40
|
|
|
41
|
-
pydantic_object: Annotated[
|
|
41
|
+
pydantic_object: Annotated[type[TBaseModel] | None, SkipValidation()] = None # type: ignore[valid-type]
|
|
42
42
|
"""The Pydantic object to use for validation.
|
|
43
43
|
If None, no validation is performed."""
|
|
44
44
|
|
|
45
45
|
@override
|
|
46
|
-
def _diff(self, prev:
|
|
46
|
+
def _diff(self, prev: Any | None, next: Any) -> Any:
|
|
47
47
|
return jsonpatch.make_patch(prev, next).patch
|
|
48
48
|
|
|
49
49
|
@staticmethod
|
|
@@ -7,7 +7,7 @@ import re
|
|
|
7
7
|
from abc import abstractmethod
|
|
8
8
|
from collections import deque
|
|
9
9
|
from io import StringIO
|
|
10
|
-
from typing import TYPE_CHECKING, TypeVar
|
|
10
|
+
from typing import TYPE_CHECKING, TypeVar
|
|
11
11
|
|
|
12
12
|
from typing_extensions import override
|
|
13
13
|
|
|
@@ -70,9 +70,7 @@ class ListOutputParser(BaseTransformOutputParser[list[str]]):
|
|
|
70
70
|
raise NotImplementedError
|
|
71
71
|
|
|
72
72
|
@override
|
|
73
|
-
def _transform(
|
|
74
|
-
self, input: Iterator[Union[str, BaseMessage]]
|
|
75
|
-
) -> Iterator[list[str]]:
|
|
73
|
+
def _transform(self, input: Iterator[str | BaseMessage]) -> Iterator[list[str]]:
|
|
76
74
|
buffer = ""
|
|
77
75
|
for chunk in input:
|
|
78
76
|
if isinstance(chunk, BaseMessage):
|
|
@@ -105,7 +103,7 @@ class ListOutputParser(BaseTransformOutputParser[list[str]]):
|
|
|
105
103
|
|
|
106
104
|
@override
|
|
107
105
|
async def _atransform(
|
|
108
|
-
self, input: AsyncIterator[
|
|
106
|
+
self, input: AsyncIterator[str | BaseMessage]
|
|
109
107
|
) -> AsyncIterator[list[str]]:
|
|
110
108
|
buffer = ""
|
|
111
109
|
async for chunk in input:
|
|
@@ -3,7 +3,7 @@
|
|
|
3
3
|
import copy
|
|
4
4
|
import json
|
|
5
5
|
from types import GenericAlias
|
|
6
|
-
from typing import Any
|
|
6
|
+
from typing import Any
|
|
7
7
|
|
|
8
8
|
import jsonpatch # type: ignore[import-untyped]
|
|
9
9
|
from pydantic import BaseModel, model_validator
|
|
@@ -74,7 +74,7 @@ class JsonOutputFunctionsParser(BaseCumulativeTransformOutputParser[Any]):
|
|
|
74
74
|
return "json_functions"
|
|
75
75
|
|
|
76
76
|
@override
|
|
77
|
-
def _diff(self, prev:
|
|
77
|
+
def _diff(self, prev: Any | None, next: Any) -> Any:
|
|
78
78
|
return jsonpatch.make_patch(prev, next).patch
|
|
79
79
|
|
|
80
80
|
def parse_result(self, result: list[Generation], *, partial: bool = False) -> Any:
|
|
@@ -217,7 +217,7 @@ class PydanticOutputFunctionsParser(OutputFunctionsParser):
|
|
|
217
217
|
|
|
218
218
|
"""
|
|
219
219
|
|
|
220
|
-
pydantic_schema:
|
|
220
|
+
pydantic_schema: type[BaseModel] | dict[str, type[BaseModel]]
|
|
221
221
|
"""The pydantic schema to parse the output with.
|
|
222
222
|
|
|
223
223
|
If multiple schemas are provided, then the function name will be used to
|