langchain-core 0.4.0.dev0__py3-none-any.whl → 1.0.0a1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of langchain-core might be problematic. Click here for more details.
- langchain_core/_api/beta_decorator.py +2 -2
- langchain_core/_api/deprecation.py +1 -1
- langchain_core/beta/runnables/context.py +1 -1
- langchain_core/callbacks/base.py +14 -23
- langchain_core/callbacks/file.py +13 -2
- langchain_core/callbacks/manager.py +74 -157
- langchain_core/callbacks/streaming_stdout.py +3 -4
- langchain_core/callbacks/usage.py +2 -12
- langchain_core/chat_history.py +6 -6
- langchain_core/documents/base.py +1 -1
- langchain_core/documents/compressor.py +9 -6
- langchain_core/indexing/base.py +2 -2
- langchain_core/language_models/_utils.py +230 -101
- langchain_core/language_models/base.py +35 -23
- langchain_core/language_models/chat_models.py +245 -53
- langchain_core/language_models/fake_chat_models.py +28 -81
- langchain_core/load/dump.py +3 -4
- langchain_core/messages/__init__.py +38 -22
- langchain_core/messages/ai.py +188 -30
- langchain_core/messages/base.py +164 -25
- langchain_core/messages/block_translators/__init__.py +89 -0
- langchain_core/messages/block_translators/anthropic.py +451 -0
- langchain_core/messages/block_translators/bedrock.py +45 -0
- langchain_core/messages/block_translators/bedrock_converse.py +47 -0
- langchain_core/messages/block_translators/google_genai.py +45 -0
- langchain_core/messages/block_translators/google_vertexai.py +47 -0
- langchain_core/messages/block_translators/groq.py +45 -0
- langchain_core/messages/block_translators/langchain_v0.py +297 -0
- langchain_core/messages/block_translators/ollama.py +45 -0
- langchain_core/messages/block_translators/openai.py +586 -0
- langchain_core/messages/{content_blocks.py → content.py} +346 -213
- langchain_core/messages/human.py +29 -9
- langchain_core/messages/system.py +29 -9
- langchain_core/messages/tool.py +94 -13
- langchain_core/messages/utils.py +32 -234
- langchain_core/output_parsers/base.py +14 -50
- langchain_core/output_parsers/json.py +2 -5
- langchain_core/output_parsers/list.py +2 -7
- langchain_core/output_parsers/openai_functions.py +5 -28
- langchain_core/output_parsers/openai_tools.py +49 -90
- langchain_core/output_parsers/pydantic.py +2 -3
- langchain_core/output_parsers/transform.py +12 -53
- langchain_core/output_parsers/xml.py +9 -17
- langchain_core/prompt_values.py +8 -112
- langchain_core/prompts/chat.py +1 -3
- langchain_core/runnables/base.py +500 -451
- langchain_core/runnables/branch.py +1 -1
- langchain_core/runnables/fallbacks.py +4 -4
- langchain_core/runnables/history.py +1 -1
- langchain_core/runnables/passthrough.py +3 -3
- langchain_core/runnables/retry.py +1 -1
- langchain_core/runnables/router.py +1 -1
- langchain_core/structured_query.py +3 -7
- langchain_core/tools/base.py +14 -41
- langchain_core/tools/convert.py +2 -22
- langchain_core/tools/retriever.py +1 -8
- langchain_core/tools/structured.py +2 -10
- langchain_core/tracers/_streaming.py +6 -7
- langchain_core/tracers/base.py +7 -14
- langchain_core/tracers/core.py +4 -27
- langchain_core/tracers/event_stream.py +4 -15
- langchain_core/tracers/langchain.py +3 -14
- langchain_core/tracers/log_stream.py +2 -3
- langchain_core/utils/_merge.py +45 -7
- langchain_core/utils/function_calling.py +22 -9
- langchain_core/utils/utils.py +29 -0
- langchain_core/version.py +1 -1
- {langchain_core-0.4.0.dev0.dist-info → langchain_core-1.0.0a1.dist-info}/METADATA +7 -9
- {langchain_core-0.4.0.dev0.dist-info → langchain_core-1.0.0a1.dist-info}/RECORD +71 -64
- langchain_core/v1/__init__.py +0 -1
- langchain_core/v1/chat_models.py +0 -1047
- langchain_core/v1/messages.py +0 -755
- {langchain_core-0.4.0.dev0.dist-info → langchain_core-1.0.0a1.dist-info}/WHEEL +0 -0
- {langchain_core-0.4.0.dev0.dist-info → langchain_core-1.0.0a1.dist-info}/entry_points.txt +0 -0
|
@@ -3,8 +3,8 @@
|
|
|
3
3
|
import asyncio
|
|
4
4
|
import re
|
|
5
5
|
import time
|
|
6
|
-
from collections.abc import AsyncIterator,
|
|
7
|
-
from typing import Any, Optional, Union, cast
|
|
6
|
+
from collections.abc import AsyncIterator, Iterator
|
|
7
|
+
from typing import Any, Literal, Optional, Union, cast
|
|
8
8
|
|
|
9
9
|
from typing_extensions import override
|
|
10
10
|
|
|
@@ -16,10 +16,6 @@ from langchain_core.language_models.chat_models import BaseChatModel, SimpleChat
|
|
|
16
16
|
from langchain_core.messages import AIMessage, AIMessageChunk, BaseMessage
|
|
17
17
|
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
|
|
18
18
|
from langchain_core.runnables import RunnableConfig
|
|
19
|
-
from langchain_core.v1.chat_models import BaseChatModel as BaseChatModelV1
|
|
20
|
-
from langchain_core.v1.messages import AIMessage as AIMessageV1
|
|
21
|
-
from langchain_core.v1.messages import AIMessageChunk as AIMessageChunkV1
|
|
22
|
-
from langchain_core.v1.messages import MessageV1
|
|
23
19
|
|
|
24
20
|
|
|
25
21
|
class FakeMessagesListChatModel(BaseChatModel):
|
|
@@ -116,7 +112,12 @@ class FakeListChatModel(SimpleChatModel):
|
|
|
116
112
|
):
|
|
117
113
|
raise FakeListChatModelError
|
|
118
114
|
|
|
119
|
-
|
|
115
|
+
chunk_position: Optional[Literal["last"]] = (
|
|
116
|
+
"last" if i_c == len(response) - 1 else None
|
|
117
|
+
)
|
|
118
|
+
yield ChatGenerationChunk(
|
|
119
|
+
message=AIMessageChunk(content=c, chunk_position=chunk_position)
|
|
120
|
+
)
|
|
120
121
|
|
|
121
122
|
@override
|
|
122
123
|
async def _astream(
|
|
@@ -139,7 +140,12 @@ class FakeListChatModel(SimpleChatModel):
|
|
|
139
140
|
and i_c == self.error_on_chunk_number
|
|
140
141
|
):
|
|
141
142
|
raise FakeListChatModelError
|
|
142
|
-
|
|
143
|
+
chunk_position: Optional[Literal["last"]] = (
|
|
144
|
+
"last" if i_c == len(response) - 1 else None
|
|
145
|
+
)
|
|
146
|
+
yield ChatGenerationChunk(
|
|
147
|
+
message=AIMessageChunk(content=c, chunk_position=chunk_position)
|
|
148
|
+
)
|
|
143
149
|
|
|
144
150
|
@property
|
|
145
151
|
@override
|
|
@@ -155,7 +161,7 @@ class FakeListChatModel(SimpleChatModel):
|
|
|
155
161
|
*,
|
|
156
162
|
return_exceptions: bool = False,
|
|
157
163
|
**kwargs: Any,
|
|
158
|
-
) -> list[
|
|
164
|
+
) -> list[AIMessage]:
|
|
159
165
|
if isinstance(config, list):
|
|
160
166
|
return [self.invoke(m, c, **kwargs) for m, c in zip(inputs, config)]
|
|
161
167
|
return [self.invoke(m, config, **kwargs) for m in inputs]
|
|
@@ -168,7 +174,7 @@ class FakeListChatModel(SimpleChatModel):
|
|
|
168
174
|
*,
|
|
169
175
|
return_exceptions: bool = False,
|
|
170
176
|
**kwargs: Any,
|
|
171
|
-
) -> list[
|
|
177
|
+
) -> list[AIMessage]:
|
|
172
178
|
if isinstance(config, list):
|
|
173
179
|
# do Not use an async iterator here because need explicit ordering
|
|
174
180
|
return [await self.ainvoke(m, c, **kwargs) for m, c in zip(inputs, config)]
|
|
@@ -227,11 +233,12 @@ class GenericFakeChatModel(BaseChatModel):
|
|
|
227
233
|
This can be expanded to accept other types like Callables / dicts / strings
|
|
228
234
|
to make the interface more generic if needed.
|
|
229
235
|
|
|
230
|
-
|
|
236
|
+
.. note::
|
|
237
|
+
if you want to pass a list, you can use ``iter`` to convert it to an iterator.
|
|
231
238
|
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
239
|
+
.. warning::
|
|
240
|
+
Streaming is not implemented yet. We should try to implement it in the future by
|
|
241
|
+
delegating to invoke and then breaking the resulting output into message chunks.
|
|
235
242
|
"""
|
|
236
243
|
|
|
237
244
|
@override
|
|
@@ -286,10 +293,16 @@ class GenericFakeChatModel(BaseChatModel):
|
|
|
286
293
|
|
|
287
294
|
content_chunks = cast("list[str]", re.split(r"(\s)", content))
|
|
288
295
|
|
|
289
|
-
for token in content_chunks:
|
|
296
|
+
for idx, token in enumerate(content_chunks):
|
|
290
297
|
chunk = ChatGenerationChunk(
|
|
291
298
|
message=AIMessageChunk(content=token, id=message.id)
|
|
292
299
|
)
|
|
300
|
+
if (
|
|
301
|
+
idx == len(content_chunks) - 1
|
|
302
|
+
and isinstance(chunk.message, AIMessageChunk)
|
|
303
|
+
and not message.additional_kwargs
|
|
304
|
+
):
|
|
305
|
+
chunk.message.chunk_position = "last"
|
|
293
306
|
if run_manager:
|
|
294
307
|
run_manager.on_llm_new_token(token, chunk=chunk)
|
|
295
308
|
yield chunk
|
|
@@ -371,69 +384,3 @@ class ParrotFakeChatModel(BaseChatModel):
|
|
|
371
384
|
@property
|
|
372
385
|
def _llm_type(self) -> str:
|
|
373
386
|
return "parrot-fake-chat-model"
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
class GenericFakeChatModelV1(BaseChatModelV1):
|
|
377
|
-
"""Generic fake chat model that can be used to test the chat model interface."""
|
|
378
|
-
|
|
379
|
-
messages: Optional[Iterator[Union[AIMessageV1, str]]] = None
|
|
380
|
-
message_chunks: Optional[Iterable[Union[AIMessageChunkV1, str]]] = None
|
|
381
|
-
|
|
382
|
-
@override
|
|
383
|
-
def _invoke(
|
|
384
|
-
self,
|
|
385
|
-
messages: list[MessageV1],
|
|
386
|
-
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
|
387
|
-
**kwargs: Any,
|
|
388
|
-
) -> AIMessageV1:
|
|
389
|
-
"""Top Level call."""
|
|
390
|
-
if self.messages is None:
|
|
391
|
-
error_msg = "Messages iterator is not set."
|
|
392
|
-
raise ValueError(error_msg)
|
|
393
|
-
message = next(self.messages)
|
|
394
|
-
return AIMessageV1(content=message) if isinstance(message, str) else message
|
|
395
|
-
|
|
396
|
-
@override
|
|
397
|
-
def _stream(
|
|
398
|
-
self,
|
|
399
|
-
messages: list[MessageV1],
|
|
400
|
-
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
|
401
|
-
**kwargs: Any,
|
|
402
|
-
) -> Iterator[AIMessageChunkV1]:
|
|
403
|
-
"""Top Level call."""
|
|
404
|
-
if self.message_chunks is None:
|
|
405
|
-
error_msg = "Message chunks iterator is not set."
|
|
406
|
-
raise ValueError(error_msg)
|
|
407
|
-
for chunk in self.message_chunks:
|
|
408
|
-
if isinstance(chunk, str):
|
|
409
|
-
yield AIMessageChunkV1(chunk)
|
|
410
|
-
else:
|
|
411
|
-
yield chunk
|
|
412
|
-
|
|
413
|
-
@property
|
|
414
|
-
def _llm_type(self) -> str:
|
|
415
|
-
return "generic-fake-chat-model"
|
|
416
|
-
|
|
417
|
-
|
|
418
|
-
class ParrotFakeChatModelV1(BaseChatModelV1):
|
|
419
|
-
"""Generic fake chat model that can be used to test the chat model interface.
|
|
420
|
-
|
|
421
|
-
* Chat model should be usable in both sync and async tests
|
|
422
|
-
"""
|
|
423
|
-
|
|
424
|
-
@override
|
|
425
|
-
def _invoke(
|
|
426
|
-
self,
|
|
427
|
-
messages: list[MessageV1],
|
|
428
|
-
stop: Optional[list[str]] = None,
|
|
429
|
-
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
|
430
|
-
**kwargs: Any,
|
|
431
|
-
) -> AIMessageV1:
|
|
432
|
-
"""Top Level call."""
|
|
433
|
-
if isinstance(messages[-1], AIMessageV1):
|
|
434
|
-
return messages[-1]
|
|
435
|
-
return AIMessageV1(content=messages[-1].content)
|
|
436
|
-
|
|
437
|
-
@property
|
|
438
|
-
def _llm_type(self) -> str:
|
|
439
|
-
return "parrot-fake-chat-model"
|
langchain_core/load/dump.py
CHANGED
|
@@ -73,10 +73,9 @@ def dumps(obj: Any, *, pretty: bool = False, **kwargs: Any) -> str:
|
|
|
73
73
|
def dumpd(obj: Any) -> Any:
|
|
74
74
|
"""Return a dict representation of an object.
|
|
75
75
|
|
|
76
|
-
|
|
77
|
-
Unfortunately this function is not as efficient as it could be
|
|
78
|
-
|
|
79
|
-
back into a dictionary.
|
|
76
|
+
.. note::
|
|
77
|
+
Unfortunately this function is not as efficient as it could be because it first
|
|
78
|
+
dumps the object to a json string and then loads it back into a dictionary.
|
|
80
79
|
|
|
81
80
|
Args:
|
|
82
81
|
obj: The object to dump.
|
|
@@ -18,6 +18,7 @@
|
|
|
18
18
|
from typing import TYPE_CHECKING
|
|
19
19
|
|
|
20
20
|
from langchain_core._import_utils import import_attr
|
|
21
|
+
from langchain_core.utils.utils import LC_AUTO_PREFIX, LC_ID_PREFIX, ensure_id
|
|
21
22
|
|
|
22
23
|
if TYPE_CHECKING:
|
|
23
24
|
from langchain_core.messages.ai import (
|
|
@@ -32,7 +33,7 @@ if TYPE_CHECKING:
|
|
|
32
33
|
messages_to_dict,
|
|
33
34
|
)
|
|
34
35
|
from langchain_core.messages.chat import ChatMessage, ChatMessageChunk
|
|
35
|
-
from langchain_core.messages.
|
|
36
|
+
from langchain_core.messages.content import (
|
|
36
37
|
Annotation,
|
|
37
38
|
AudioContentBlock,
|
|
38
39
|
Citation,
|
|
@@ -54,6 +55,10 @@ if TYPE_CHECKING:
|
|
|
54
55
|
convert_to_openai_data_block,
|
|
55
56
|
convert_to_openai_image_block,
|
|
56
57
|
is_data_content_block,
|
|
58
|
+
is_reasoning_block,
|
|
59
|
+
is_text_block,
|
|
60
|
+
is_tool_call_block,
|
|
61
|
+
is_tool_call_chunk,
|
|
57
62
|
)
|
|
58
63
|
from langchain_core.messages.function import FunctionMessage, FunctionMessageChunk
|
|
59
64
|
from langchain_core.messages.human import HumanMessage, HumanMessageChunk
|
|
@@ -81,6 +86,8 @@ if TYPE_CHECKING:
|
|
|
81
86
|
)
|
|
82
87
|
|
|
83
88
|
__all__ = (
|
|
89
|
+
"LC_AUTO_PREFIX",
|
|
90
|
+
"LC_ID_PREFIX",
|
|
84
91
|
"AIMessage",
|
|
85
92
|
"AIMessageChunk",
|
|
86
93
|
"Annotation",
|
|
@@ -124,9 +131,14 @@ __all__ = (
|
|
|
124
131
|
"convert_to_openai_data_block",
|
|
125
132
|
"convert_to_openai_image_block",
|
|
126
133
|
"convert_to_openai_messages",
|
|
134
|
+
"ensure_id",
|
|
127
135
|
"filter_messages",
|
|
128
136
|
"get_buffer_string",
|
|
129
137
|
"is_data_content_block",
|
|
138
|
+
"is_reasoning_block",
|
|
139
|
+
"is_text_block",
|
|
140
|
+
"is_tool_call_block",
|
|
141
|
+
"is_tool_call_chunk",
|
|
130
142
|
"merge_content",
|
|
131
143
|
"merge_message_runs",
|
|
132
144
|
"message_chunk_to_message",
|
|
@@ -139,53 +151,57 @@ __all__ = (
|
|
|
139
151
|
_dynamic_imports = {
|
|
140
152
|
"AIMessage": "ai",
|
|
141
153
|
"AIMessageChunk": "ai",
|
|
142
|
-
"Annotation": "
|
|
143
|
-
"AudioContentBlock": "
|
|
154
|
+
"Annotation": "content",
|
|
155
|
+
"AudioContentBlock": "content",
|
|
144
156
|
"BaseMessage": "base",
|
|
145
157
|
"BaseMessageChunk": "base",
|
|
146
158
|
"merge_content": "base",
|
|
147
159
|
"message_to_dict": "base",
|
|
148
160
|
"messages_to_dict": "base",
|
|
149
|
-
"Citation": "
|
|
150
|
-
"ContentBlock": "
|
|
161
|
+
"Citation": "content",
|
|
162
|
+
"ContentBlock": "content",
|
|
151
163
|
"ChatMessage": "chat",
|
|
152
164
|
"ChatMessageChunk": "chat",
|
|
153
|
-
"CodeInterpreterCall": "
|
|
154
|
-
"CodeInterpreterOutput": "
|
|
155
|
-
"CodeInterpreterResult": "
|
|
156
|
-
"DataContentBlock": "
|
|
157
|
-
"FileContentBlock": "
|
|
165
|
+
"CodeInterpreterCall": "content",
|
|
166
|
+
"CodeInterpreterOutput": "content",
|
|
167
|
+
"CodeInterpreterResult": "content",
|
|
168
|
+
"DataContentBlock": "content",
|
|
169
|
+
"FileContentBlock": "content",
|
|
158
170
|
"FunctionMessage": "function",
|
|
159
171
|
"FunctionMessageChunk": "function",
|
|
160
172
|
"HumanMessage": "human",
|
|
161
173
|
"HumanMessageChunk": "human",
|
|
162
|
-
"NonStandardAnnotation": "
|
|
163
|
-
"NonStandardContentBlock": "
|
|
164
|
-
"PlainTextContentBlock": "
|
|
165
|
-
"ReasoningContentBlock": "
|
|
174
|
+
"NonStandardAnnotation": "content",
|
|
175
|
+
"NonStandardContentBlock": "content",
|
|
176
|
+
"PlainTextContentBlock": "content",
|
|
177
|
+
"ReasoningContentBlock": "content",
|
|
166
178
|
"RemoveMessage": "modifier",
|
|
167
179
|
"SystemMessage": "system",
|
|
168
180
|
"SystemMessageChunk": "system",
|
|
169
|
-
"WebSearchCall": "
|
|
170
|
-
"WebSearchResult": "
|
|
171
|
-
"ImageContentBlock": "
|
|
181
|
+
"WebSearchCall": "content",
|
|
182
|
+
"WebSearchResult": "content",
|
|
183
|
+
"ImageContentBlock": "content",
|
|
172
184
|
"InvalidToolCall": "tool",
|
|
173
|
-
"TextContentBlock": "
|
|
185
|
+
"TextContentBlock": "content",
|
|
174
186
|
"ToolCall": "tool",
|
|
175
187
|
"ToolCallChunk": "tool",
|
|
176
188
|
"ToolMessage": "tool",
|
|
177
189
|
"ToolMessageChunk": "tool",
|
|
178
|
-
"VideoContentBlock": "
|
|
190
|
+
"VideoContentBlock": "content",
|
|
179
191
|
"AnyMessage": "utils",
|
|
180
192
|
"MessageLikeRepresentation": "utils",
|
|
181
193
|
"_message_from_dict": "utils",
|
|
182
194
|
"convert_to_messages": "utils",
|
|
183
|
-
"convert_to_openai_data_block": "
|
|
184
|
-
"convert_to_openai_image_block": "
|
|
195
|
+
"convert_to_openai_data_block": "content",
|
|
196
|
+
"convert_to_openai_image_block": "content",
|
|
185
197
|
"convert_to_openai_messages": "utils",
|
|
186
198
|
"filter_messages": "utils",
|
|
187
199
|
"get_buffer_string": "utils",
|
|
188
|
-
"is_data_content_block": "
|
|
200
|
+
"is_data_content_block": "content",
|
|
201
|
+
"is_reasoning_block": "content",
|
|
202
|
+
"is_text_block": "content",
|
|
203
|
+
"is_tool_call_block": "content",
|
|
204
|
+
"is_tool_call_chunk": "content",
|
|
189
205
|
"merge_message_runs": "utils",
|
|
190
206
|
"message_chunk_to_message": "utils",
|
|
191
207
|
"messages_from_dict": "utils",
|
langchain_core/messages/ai.py
CHANGED
|
@@ -3,12 +3,18 @@
|
|
|
3
3
|
import json
|
|
4
4
|
import logging
|
|
5
5
|
import operator
|
|
6
|
+
from collections.abc import Sequence
|
|
6
7
|
from typing import Any, Literal, Optional, Union, cast
|
|
7
8
|
|
|
8
9
|
from pydantic import model_validator
|
|
9
|
-
from typing_extensions import NotRequired, Self, TypedDict, override
|
|
10
|
+
from typing_extensions import NotRequired, Self, TypedDict, overload, override
|
|
10
11
|
|
|
11
|
-
from langchain_core.messages
|
|
12
|
+
from langchain_core.messages import content as types
|
|
13
|
+
from langchain_core.messages.base import (
|
|
14
|
+
BaseMessage,
|
|
15
|
+
BaseMessageChunk,
|
|
16
|
+
merge_content,
|
|
17
|
+
)
|
|
12
18
|
from langchain_core.messages.tool import (
|
|
13
19
|
InvalidToolCall,
|
|
14
20
|
ToolCall,
|
|
@@ -22,23 +28,11 @@ from langchain_core.messages.tool import tool_call_chunk as create_tool_call_chu
|
|
|
22
28
|
from langchain_core.utils._merge import merge_dicts, merge_lists
|
|
23
29
|
from langchain_core.utils.json import parse_partial_json
|
|
24
30
|
from langchain_core.utils.usage import _dict_int_op
|
|
31
|
+
from langchain_core.utils.utils import LC_AUTO_PREFIX, LC_ID_PREFIX
|
|
25
32
|
|
|
26
33
|
logger = logging.getLogger(__name__)
|
|
27
34
|
|
|
28
35
|
|
|
29
|
-
_LC_ID_PREFIX = "run-"
|
|
30
|
-
"""Internal tracing/callback system identifier.
|
|
31
|
-
|
|
32
|
-
Used for:
|
|
33
|
-
- Tracing. Every LangChain operation (LLM call, chain execution, tool use, etc.)
|
|
34
|
-
gets a unique run_id (UUID)
|
|
35
|
-
- Enables tracking parent-child relationships between operations
|
|
36
|
-
"""
|
|
37
|
-
|
|
38
|
-
_LC_AUTO_PREFIX = "lc_"
|
|
39
|
-
"""LangChain auto-generated ID prefix for messages and content blocks."""
|
|
40
|
-
|
|
41
|
-
|
|
42
36
|
class InputTokenDetails(TypedDict, total=False):
|
|
43
37
|
"""Breakdown of input token counts.
|
|
44
38
|
|
|
@@ -180,16 +174,42 @@ class AIMessage(BaseMessage):
|
|
|
180
174
|
type: Literal["ai"] = "ai"
|
|
181
175
|
"""The type of the message (used for deserialization). Defaults to "ai"."""
|
|
182
176
|
|
|
177
|
+
@overload
|
|
178
|
+
def __init__(
|
|
179
|
+
self,
|
|
180
|
+
content: Union[str, list[Union[str, dict]]],
|
|
181
|
+
**kwargs: Any,
|
|
182
|
+
) -> None: ...
|
|
183
|
+
|
|
184
|
+
@overload
|
|
183
185
|
def __init__(
|
|
184
|
-
self,
|
|
186
|
+
self,
|
|
187
|
+
content: Optional[Union[str, list[Union[str, dict]]]] = None,
|
|
188
|
+
content_blocks: Optional[list[types.ContentBlock]] = None,
|
|
189
|
+
**kwargs: Any,
|
|
190
|
+
) -> None: ...
|
|
191
|
+
|
|
192
|
+
def __init__(
|
|
193
|
+
self,
|
|
194
|
+
content: Optional[Union[str, list[Union[str, dict]]]] = None,
|
|
195
|
+
content_blocks: Optional[list[types.ContentBlock]] = None,
|
|
196
|
+
**kwargs: Any,
|
|
185
197
|
) -> None:
|
|
186
|
-
"""
|
|
198
|
+
"""Specify ``content`` as positional arg or ``content_blocks`` for typing."""
|
|
199
|
+
if content_blocks is not None:
|
|
200
|
+
# If there are tool calls in content_blocks, but not in tool_calls, add them
|
|
201
|
+
content_tool_calls = [
|
|
202
|
+
block for block in content_blocks if block.get("type") == "tool_call"
|
|
203
|
+
]
|
|
204
|
+
if content_tool_calls and "tool_calls" not in kwargs:
|
|
205
|
+
kwargs["tool_calls"] = content_tool_calls
|
|
187
206
|
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
207
|
+
super().__init__(
|
|
208
|
+
content=cast("Union[str, list[Union[str, dict]]]", content_blocks),
|
|
209
|
+
**kwargs,
|
|
210
|
+
)
|
|
211
|
+
else:
|
|
212
|
+
super().__init__(content=content, **kwargs)
|
|
193
213
|
|
|
194
214
|
@property
|
|
195
215
|
def lc_attributes(self) -> dict:
|
|
@@ -199,6 +219,49 @@ class AIMessage(BaseMessage):
|
|
|
199
219
|
"invalid_tool_calls": self.invalid_tool_calls,
|
|
200
220
|
}
|
|
201
221
|
|
|
222
|
+
@property
|
|
223
|
+
def content_blocks(self) -> list[types.ContentBlock]:
|
|
224
|
+
"""Return content blocks of the message."""
|
|
225
|
+
if self.response_metadata.get("output_version") == "v1":
|
|
226
|
+
return cast("list[types.ContentBlock]", self.content)
|
|
227
|
+
|
|
228
|
+
model_provider = self.response_metadata.get("model_provider")
|
|
229
|
+
if model_provider:
|
|
230
|
+
from langchain_core.messages.block_translators import get_translator
|
|
231
|
+
|
|
232
|
+
translator = get_translator(model_provider)
|
|
233
|
+
if translator:
|
|
234
|
+
try:
|
|
235
|
+
return translator["translate_content"](self)
|
|
236
|
+
except NotImplementedError:
|
|
237
|
+
pass
|
|
238
|
+
|
|
239
|
+
# Otherwise, use best-effort parsing
|
|
240
|
+
blocks = super().content_blocks
|
|
241
|
+
|
|
242
|
+
if self.tool_calls:
|
|
243
|
+
# Add from tool_calls if missing from content
|
|
244
|
+
content_tool_call_ids = {
|
|
245
|
+
block.get("id")
|
|
246
|
+
for block in self.content
|
|
247
|
+
if isinstance(block, dict) and block.get("type") == "tool_call"
|
|
248
|
+
}
|
|
249
|
+
for tool_call in self.tool_calls:
|
|
250
|
+
if (id_ := tool_call.get("id")) and id_ not in content_tool_call_ids:
|
|
251
|
+
tool_call_block: types.ToolCall = {
|
|
252
|
+
"type": "tool_call",
|
|
253
|
+
"id": id_,
|
|
254
|
+
"name": tool_call["name"],
|
|
255
|
+
"args": tool_call["args"],
|
|
256
|
+
}
|
|
257
|
+
if "index" in tool_call:
|
|
258
|
+
tool_call_block["index"] = tool_call["index"] # type: ignore[typeddict-item]
|
|
259
|
+
if "extras" in tool_call:
|
|
260
|
+
tool_call_block["extras"] = tool_call["extras"] # type: ignore[typeddict-item]
|
|
261
|
+
blocks.append(tool_call_block)
|
|
262
|
+
|
|
263
|
+
return blocks
|
|
264
|
+
|
|
202
265
|
# TODO: remove this logic if possible, reducing breaking nature of changes
|
|
203
266
|
@model_validator(mode="before")
|
|
204
267
|
@classmethod
|
|
@@ -227,7 +290,9 @@ class AIMessage(BaseMessage):
|
|
|
227
290
|
# Ensure "type" is properly set on all tool call-like dicts.
|
|
228
291
|
if tool_calls := values.get("tool_calls"):
|
|
229
292
|
values["tool_calls"] = [
|
|
230
|
-
create_tool_call(
|
|
293
|
+
create_tool_call(
|
|
294
|
+
**{k: v for k, v in tc.items() if k not in ("type", "extras")}
|
|
295
|
+
)
|
|
231
296
|
for tc in tool_calls
|
|
232
297
|
]
|
|
233
298
|
if invalid_tool_calls := values.get("invalid_tool_calls"):
|
|
@@ -298,6 +363,13 @@ class AIMessageChunk(AIMessage, BaseMessageChunk):
|
|
|
298
363
|
tool_call_chunks: list[ToolCallChunk] = []
|
|
299
364
|
"""If provided, tool call chunks associated with the message."""
|
|
300
365
|
|
|
366
|
+
chunk_position: Optional[Literal["last"]] = None
|
|
367
|
+
"""Optional span represented by an aggregated AIMessageChunk.
|
|
368
|
+
|
|
369
|
+
If a chunk with ``chunk_position="last"`` is aggregated into a stream,
|
|
370
|
+
``tool_call_chunks`` in message content will be parsed into ``tool_calls``.
|
|
371
|
+
"""
|
|
372
|
+
|
|
301
373
|
@property
|
|
302
374
|
def lc_attributes(self) -> dict:
|
|
303
375
|
"""Attrs to be serialized even if they are derived from other init args."""
|
|
@@ -306,6 +378,49 @@ class AIMessageChunk(AIMessage, BaseMessageChunk):
|
|
|
306
378
|
"invalid_tool_calls": self.invalid_tool_calls,
|
|
307
379
|
}
|
|
308
380
|
|
|
381
|
+
@property
|
|
382
|
+
def content_blocks(self) -> list[types.ContentBlock]:
|
|
383
|
+
"""Return content blocks of the message."""
|
|
384
|
+
if self.response_metadata.get("output_version") == "v1":
|
|
385
|
+
return cast("list[types.ContentBlock]", self.content)
|
|
386
|
+
|
|
387
|
+
model_provider = self.response_metadata.get("model_provider")
|
|
388
|
+
if model_provider:
|
|
389
|
+
from langchain_core.messages.block_translators import get_translator
|
|
390
|
+
|
|
391
|
+
translator = get_translator(model_provider)
|
|
392
|
+
if translator:
|
|
393
|
+
try:
|
|
394
|
+
return translator["translate_content_chunk"](self)
|
|
395
|
+
except NotImplementedError:
|
|
396
|
+
pass
|
|
397
|
+
|
|
398
|
+
# Otherwise, use best-effort parsing
|
|
399
|
+
blocks = super().content_blocks
|
|
400
|
+
|
|
401
|
+
if (
|
|
402
|
+
self.tool_call_chunks
|
|
403
|
+
and not self.content
|
|
404
|
+
and self.chunk_position != "last" # keep tool_calls if aggregated
|
|
405
|
+
):
|
|
406
|
+
blocks = [
|
|
407
|
+
block
|
|
408
|
+
for block in blocks
|
|
409
|
+
if block["type"] not in ("tool_call", "invalid_tool_call")
|
|
410
|
+
]
|
|
411
|
+
for tool_call_chunk in self.tool_call_chunks:
|
|
412
|
+
tc: types.ToolCallChunk = {
|
|
413
|
+
"type": "tool_call_chunk",
|
|
414
|
+
"id": tool_call_chunk.get("id"),
|
|
415
|
+
"name": tool_call_chunk.get("name"),
|
|
416
|
+
"args": tool_call_chunk.get("args"),
|
|
417
|
+
}
|
|
418
|
+
if (idx := tool_call_chunk.get("index")) is not None:
|
|
419
|
+
tc["index"] = idx
|
|
420
|
+
blocks.append(tc)
|
|
421
|
+
|
|
422
|
+
return blocks
|
|
423
|
+
|
|
309
424
|
@model_validator(mode="after")
|
|
310
425
|
def init_tool_calls(self) -> Self:
|
|
311
426
|
"""Initialize tool calls from tool call chunks.
|
|
@@ -358,7 +473,10 @@ class AIMessageChunk(AIMessage, BaseMessageChunk):
|
|
|
358
473
|
|
|
359
474
|
for chunk in self.tool_call_chunks:
|
|
360
475
|
try:
|
|
361
|
-
|
|
476
|
+
if chunk["args"] is not None and chunk["args"] != "":
|
|
477
|
+
args_ = parse_partial_json(chunk["args"])
|
|
478
|
+
else:
|
|
479
|
+
args_ = {}
|
|
362
480
|
if isinstance(args_, dict):
|
|
363
481
|
tool_calls.append(
|
|
364
482
|
create_tool_call(
|
|
@@ -373,10 +491,45 @@ class AIMessageChunk(AIMessage, BaseMessageChunk):
|
|
|
373
491
|
add_chunk_to_invalid_tool_calls(chunk)
|
|
374
492
|
self.tool_calls = tool_calls
|
|
375
493
|
self.invalid_tool_calls = invalid_tool_calls
|
|
494
|
+
|
|
495
|
+
if (
|
|
496
|
+
self.chunk_position == "last"
|
|
497
|
+
and self.tool_call_chunks
|
|
498
|
+
and self.response_metadata.get("output_version") == "v1"
|
|
499
|
+
and isinstance(self.content, list)
|
|
500
|
+
):
|
|
501
|
+
id_to_tc: dict[str, types.ToolCall] = {
|
|
502
|
+
cast("str", tc.get("id")): {
|
|
503
|
+
"type": "tool_call",
|
|
504
|
+
"name": tc["name"],
|
|
505
|
+
"args": tc["args"],
|
|
506
|
+
"id": tc.get("id"),
|
|
507
|
+
}
|
|
508
|
+
for tc in self.tool_calls
|
|
509
|
+
if "id" in tc
|
|
510
|
+
}
|
|
511
|
+
for idx, block in enumerate(self.content):
|
|
512
|
+
if (
|
|
513
|
+
isinstance(block, dict)
|
|
514
|
+
and block.get("type") == "tool_call_chunk"
|
|
515
|
+
and (call_id := block.get("id"))
|
|
516
|
+
and call_id in id_to_tc
|
|
517
|
+
):
|
|
518
|
+
self.content[idx] = cast("dict[str, Any]", id_to_tc[call_id])
|
|
519
|
+
|
|
376
520
|
return self
|
|
377
521
|
|
|
522
|
+
@overload # type: ignore[override] # summing BaseMessages gives ChatPromptTemplate
|
|
523
|
+
def __add__(self, other: "AIMessageChunk") -> "AIMessageChunk": ...
|
|
524
|
+
|
|
525
|
+
@overload
|
|
526
|
+
def __add__(self, other: Sequence["AIMessageChunk"]) -> "AIMessageChunk": ...
|
|
527
|
+
|
|
528
|
+
@overload
|
|
529
|
+
def __add__(self, other: Any) -> BaseMessageChunk: ...
|
|
530
|
+
|
|
378
531
|
@override
|
|
379
|
-
def __add__(self, other: Any) -> BaseMessageChunk:
|
|
532
|
+
def __add__(self, other: Any) -> BaseMessageChunk:
|
|
380
533
|
if isinstance(other, AIMessageChunk):
|
|
381
534
|
return add_ai_message_chunks(self, other)
|
|
382
535
|
if isinstance(other, (list, tuple)) and all(
|
|
@@ -432,24 +585,28 @@ def add_ai_message_chunks(
|
|
|
432
585
|
for id_ in candidates:
|
|
433
586
|
if (
|
|
434
587
|
id_
|
|
435
|
-
and not id_.startswith(
|
|
436
|
-
and not id_.startswith(
|
|
588
|
+
and not id_.startswith(LC_ID_PREFIX)
|
|
589
|
+
and not id_.startswith(LC_AUTO_PREFIX)
|
|
437
590
|
):
|
|
438
591
|
chunk_id = id_
|
|
439
592
|
break
|
|
440
593
|
else:
|
|
441
|
-
# second pass: prefer
|
|
594
|
+
# second pass: prefer lc_run-* ids over lc_* ids
|
|
442
595
|
for id_ in candidates:
|
|
443
|
-
if id_ and id_.startswith(
|
|
596
|
+
if id_ and id_.startswith(LC_ID_PREFIX):
|
|
444
597
|
chunk_id = id_
|
|
445
598
|
break
|
|
446
599
|
else:
|
|
447
|
-
# third pass: take any remaining id (
|
|
600
|
+
# third pass: take any remaining id (auto-generated lc_* ids)
|
|
448
601
|
for id_ in candidates:
|
|
449
602
|
if id_:
|
|
450
603
|
chunk_id = id_
|
|
451
604
|
break
|
|
452
605
|
|
|
606
|
+
chunk_position: Optional[Literal["last"]] = (
|
|
607
|
+
"last" if any(x.chunk_position == "last" for x in [left, *others]) else None
|
|
608
|
+
)
|
|
609
|
+
|
|
453
610
|
return left.__class__(
|
|
454
611
|
example=left.example,
|
|
455
612
|
content=content,
|
|
@@ -458,6 +615,7 @@ def add_ai_message_chunks(
|
|
|
458
615
|
response_metadata=response_metadata,
|
|
459
616
|
usage_metadata=usage_metadata,
|
|
460
617
|
id=chunk_id,
|
|
618
|
+
chunk_position=chunk_position,
|
|
461
619
|
)
|
|
462
620
|
|
|
463
621
|
|