langchain-core 0.3.75__py3-none-any.whl → 0.3.77__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of langchain-core might be problematic. Click here for more details.
- langchain_core/_api/beta_decorator.py +22 -44
- langchain_core/_api/deprecation.py +30 -17
- langchain_core/_api/path.py +19 -2
- langchain_core/_import_utils.py +7 -0
- langchain_core/agents.py +10 -6
- langchain_core/beta/runnables/context.py +1 -2
- langchain_core/callbacks/base.py +28 -15
- langchain_core/callbacks/manager.py +83 -71
- langchain_core/callbacks/usage.py +6 -4
- langchain_core/chat_history.py +29 -21
- langchain_core/document_loaders/base.py +34 -9
- langchain_core/document_loaders/langsmith.py +4 -1
- langchain_core/documents/base.py +35 -10
- langchain_core/documents/transformers.py +4 -2
- langchain_core/embeddings/fake.py +8 -5
- langchain_core/env.py +2 -3
- langchain_core/example_selectors/base.py +12 -0
- langchain_core/exceptions.py +7 -0
- langchain_core/globals.py +17 -28
- langchain_core/indexing/api.py +88 -76
- langchain_core/indexing/base.py +5 -8
- langchain_core/indexing/in_memory.py +23 -3
- langchain_core/language_models/__init__.py +3 -2
- langchain_core/language_models/base.py +31 -20
- langchain_core/language_models/chat_models.py +98 -27
- langchain_core/language_models/fake_chat_models.py +10 -9
- langchain_core/language_models/llms.py +52 -18
- langchain_core/load/dump.py +2 -3
- langchain_core/load/load.py +15 -1
- langchain_core/load/serializable.py +39 -44
- langchain_core/memory.py +7 -3
- langchain_core/messages/ai.py +53 -24
- langchain_core/messages/base.py +43 -22
- langchain_core/messages/chat.py +4 -1
- langchain_core/messages/content_blocks.py +23 -2
- langchain_core/messages/function.py +9 -5
- langchain_core/messages/human.py +13 -10
- langchain_core/messages/modifier.py +1 -0
- langchain_core/messages/system.py +11 -8
- langchain_core/messages/tool.py +60 -29
- langchain_core/messages/utils.py +250 -131
- langchain_core/output_parsers/base.py +5 -2
- langchain_core/output_parsers/json.py +4 -4
- langchain_core/output_parsers/list.py +7 -22
- langchain_core/output_parsers/openai_functions.py +3 -0
- langchain_core/output_parsers/openai_tools.py +6 -1
- langchain_core/output_parsers/pydantic.py +4 -0
- langchain_core/output_parsers/string.py +5 -1
- langchain_core/output_parsers/xml.py +19 -19
- langchain_core/outputs/chat_generation.py +25 -10
- langchain_core/outputs/generation.py +14 -3
- langchain_core/outputs/llm_result.py +8 -1
- langchain_core/prompt_values.py +16 -6
- langchain_core/prompts/base.py +4 -9
- langchain_core/prompts/chat.py +89 -57
- langchain_core/prompts/dict.py +16 -8
- langchain_core/prompts/few_shot.py +12 -11
- langchain_core/prompts/few_shot_with_templates.py +5 -1
- langchain_core/prompts/image.py +12 -5
- langchain_core/prompts/message.py +5 -6
- langchain_core/prompts/pipeline.py +13 -8
- langchain_core/prompts/prompt.py +22 -8
- langchain_core/prompts/string.py +18 -10
- langchain_core/prompts/structured.py +7 -2
- langchain_core/rate_limiters.py +2 -2
- langchain_core/retrievers.py +7 -6
- langchain_core/runnables/base.py +406 -186
- langchain_core/runnables/branch.py +14 -19
- langchain_core/runnables/config.py +9 -15
- langchain_core/runnables/configurable.py +34 -19
- langchain_core/runnables/fallbacks.py +20 -13
- langchain_core/runnables/graph.py +48 -38
- langchain_core/runnables/graph_ascii.py +41 -18
- langchain_core/runnables/graph_mermaid.py +54 -25
- langchain_core/runnables/graph_png.py +27 -31
- langchain_core/runnables/history.py +55 -58
- langchain_core/runnables/passthrough.py +44 -21
- langchain_core/runnables/retry.py +44 -23
- langchain_core/runnables/router.py +9 -8
- langchain_core/runnables/schema.py +2 -0
- langchain_core/runnables/utils.py +51 -89
- langchain_core/stores.py +19 -31
- langchain_core/sys_info.py +9 -8
- langchain_core/tools/base.py +37 -28
- langchain_core/tools/convert.py +26 -15
- langchain_core/tools/simple.py +36 -8
- langchain_core/tools/structured.py +25 -12
- langchain_core/tracers/base.py +2 -2
- langchain_core/tracers/context.py +5 -1
- langchain_core/tracers/core.py +109 -39
- langchain_core/tracers/evaluation.py +22 -26
- langchain_core/tracers/event_stream.py +45 -34
- langchain_core/tracers/langchain.py +12 -3
- langchain_core/tracers/langchain_v1.py +10 -2
- langchain_core/tracers/log_stream.py +56 -17
- langchain_core/tracers/root_listeners.py +4 -20
- langchain_core/tracers/run_collector.py +6 -16
- langchain_core/tracers/schemas.py +5 -1
- langchain_core/utils/aiter.py +15 -7
- langchain_core/utils/env.py +3 -0
- langchain_core/utils/function_calling.py +50 -28
- langchain_core/utils/interactive_env.py +6 -2
- langchain_core/utils/iter.py +12 -4
- langchain_core/utils/json.py +12 -3
- langchain_core/utils/json_schema.py +156 -40
- langchain_core/utils/loading.py +5 -1
- langchain_core/utils/mustache.py +24 -15
- langchain_core/utils/pydantic.py +38 -9
- langchain_core/utils/utils.py +25 -9
- langchain_core/vectorstores/base.py +7 -20
- langchain_core/vectorstores/in_memory.py +23 -17
- langchain_core/vectorstores/utils.py +18 -12
- langchain_core/version.py +1 -1
- langchain_core-0.3.77.dist-info/METADATA +67 -0
- langchain_core-0.3.77.dist-info/RECORD +174 -0
- langchain_core-0.3.75.dist-info/METADATA +0 -106
- langchain_core-0.3.75.dist-info/RECORD +0 -174
- {langchain_core-0.3.75.dist-info → langchain_core-0.3.77.dist-info}/WHEEL +0 -0
- {langchain_core-0.3.75.dist-info → langchain_core-0.3.77.dist-info}/entry_points.txt +0 -0
langchain_core/messages/base.py
CHANGED
|
@@ -20,7 +20,7 @@ if TYPE_CHECKING:
|
|
|
20
20
|
class BaseMessage(Serializable):
|
|
21
21
|
"""Base abstract message class.
|
|
22
22
|
|
|
23
|
-
Messages are the inputs and outputs of
|
|
23
|
+
Messages are the inputs and outputs of ``ChatModel``s.
|
|
24
24
|
"""
|
|
25
25
|
|
|
26
26
|
content: Union[str, list[Union[str, dict]]]
|
|
@@ -31,17 +31,18 @@ class BaseMessage(Serializable):
|
|
|
31
31
|
|
|
32
32
|
For example, for a message from an AI, this could include tool calls as
|
|
33
33
|
encoded by the model provider.
|
|
34
|
+
|
|
34
35
|
"""
|
|
35
36
|
|
|
36
37
|
response_metadata: dict = Field(default_factory=dict)
|
|
37
|
-
"""
|
|
38
|
-
name."""
|
|
38
|
+
"""Examples: response headers, logprobs, token counts, model name."""
|
|
39
39
|
|
|
40
40
|
type: str
|
|
41
41
|
"""The type of the message. Must be a string that is unique to the message type.
|
|
42
42
|
|
|
43
43
|
The purpose of this field is to allow for easy identification of the message type
|
|
44
44
|
when deserializing messages.
|
|
45
|
+
|
|
45
46
|
"""
|
|
46
47
|
|
|
47
48
|
name: Optional[str] = None
|
|
@@ -51,20 +52,26 @@ class BaseMessage(Serializable):
|
|
|
51
52
|
|
|
52
53
|
Usage of this field is optional, and whether it's used or not is up to the
|
|
53
54
|
model implementation.
|
|
55
|
+
|
|
54
56
|
"""
|
|
55
57
|
|
|
56
58
|
id: Optional[str] = Field(default=None, coerce_numbers_to_str=True)
|
|
57
|
-
"""An optional unique identifier for the message.
|
|
58
|
-
|
|
59
|
+
"""An optional unique identifier for the message.
|
|
60
|
+
|
|
61
|
+
This should ideally be provided by the provider/model which created the message.
|
|
62
|
+
|
|
63
|
+
"""
|
|
59
64
|
|
|
60
65
|
model_config = ConfigDict(
|
|
61
66
|
extra="allow",
|
|
62
67
|
)
|
|
63
68
|
|
|
64
69
|
def __init__(
|
|
65
|
-
self,
|
|
70
|
+
self,
|
|
71
|
+
content: Union[str, list[Union[str, dict]]],
|
|
72
|
+
**kwargs: Any,
|
|
66
73
|
) -> None:
|
|
67
|
-
"""
|
|
74
|
+
"""Initialize ``BaseMessage``.
|
|
68
75
|
|
|
69
76
|
Args:
|
|
70
77
|
content: The string contents of the message.
|
|
@@ -73,7 +80,7 @@ class BaseMessage(Serializable):
|
|
|
73
80
|
|
|
74
81
|
@classmethod
|
|
75
82
|
def is_lc_serializable(cls) -> bool:
|
|
76
|
-
"""BaseMessage is serializable.
|
|
83
|
+
"""``BaseMessage`` is serializable.
|
|
77
84
|
|
|
78
85
|
Returns:
|
|
79
86
|
True
|
|
@@ -84,15 +91,17 @@ class BaseMessage(Serializable):
|
|
|
84
91
|
def get_lc_namespace(cls) -> list[str]:
|
|
85
92
|
"""Get the namespace of the langchain object.
|
|
86
93
|
|
|
87
|
-
|
|
94
|
+
Returns:
|
|
95
|
+
``["langchain", "schema", "messages"]``
|
|
88
96
|
"""
|
|
89
97
|
return ["langchain", "schema", "messages"]
|
|
90
98
|
|
|
91
99
|
def text(self) -> str:
|
|
92
|
-
"""Get the text content of the message.
|
|
100
|
+
"""Get the text ``content`` of the message.
|
|
93
101
|
|
|
94
102
|
Returns:
|
|
95
103
|
The text content of the message.
|
|
104
|
+
|
|
96
105
|
"""
|
|
97
106
|
if isinstance(self.content, str):
|
|
98
107
|
return self.content
|
|
@@ -109,8 +118,16 @@ class BaseMessage(Serializable):
|
|
|
109
118
|
)
|
|
110
119
|
|
|
111
120
|
def __add__(self, other: Any) -> ChatPromptTemplate:
|
|
112
|
-
"""Concatenate this message with another message.
|
|
113
|
-
|
|
121
|
+
"""Concatenate this message with another message.
|
|
122
|
+
|
|
123
|
+
Args:
|
|
124
|
+
other: Another message to concatenate with this one.
|
|
125
|
+
|
|
126
|
+
Returns:
|
|
127
|
+
A ChatPromptTemplate containing both messages.
|
|
128
|
+
"""
|
|
129
|
+
# Import locally to prevent circular imports.
|
|
130
|
+
from langchain_core.prompts.chat import ChatPromptTemplate # noqa: PLC0415
|
|
114
131
|
|
|
115
132
|
prompt = ChatPromptTemplate(messages=[self])
|
|
116
133
|
return prompt + other
|
|
@@ -127,6 +144,7 @@ class BaseMessage(Serializable):
|
|
|
127
144
|
|
|
128
145
|
Returns:
|
|
129
146
|
A pretty representation of the message.
|
|
147
|
+
|
|
130
148
|
"""
|
|
131
149
|
title = get_msg_title_repr(self.type.title() + " Message", bold=html)
|
|
132
150
|
# TODO: handle non-string content.
|
|
@@ -146,11 +164,12 @@ def merge_content(
|
|
|
146
164
|
"""Merge multiple message contents.
|
|
147
165
|
|
|
148
166
|
Args:
|
|
149
|
-
first_content: The first content
|
|
150
|
-
contents: The other
|
|
167
|
+
first_content: The first ``content``. Can be a string or a list.
|
|
168
|
+
contents: The other ``content``s. Can be a string or a list.
|
|
151
169
|
|
|
152
170
|
Returns:
|
|
153
171
|
The merged content.
|
|
172
|
+
|
|
154
173
|
"""
|
|
155
174
|
merged = first_content
|
|
156
175
|
for content in contents:
|
|
@@ -171,9 +190,7 @@ def merge_content(
|
|
|
171
190
|
elif merged and isinstance(merged[-1], str):
|
|
172
191
|
merged[-1] += content
|
|
173
192
|
# If second content is an empty string, treat as a no-op
|
|
174
|
-
elif content
|
|
175
|
-
pass
|
|
176
|
-
else:
|
|
193
|
+
elif content:
|
|
177
194
|
# Otherwise, add the second content as a new element of the list
|
|
178
195
|
merged.append(content)
|
|
179
196
|
return merged
|
|
@@ -200,9 +217,10 @@ class BaseMessageChunk(BaseMessage):
|
|
|
200
217
|
|
|
201
218
|
For example,
|
|
202
219
|
|
|
203
|
-
|
|
220
|
+
``AIMessageChunk(content="Hello") + AIMessageChunk(content=" World")``
|
|
221
|
+
|
|
222
|
+
will give ``AIMessageChunk(content="Hello World")``
|
|
204
223
|
|
|
205
|
-
will give `AIMessageChunk(content="Hello World")`
|
|
206
224
|
"""
|
|
207
225
|
if isinstance(other, BaseMessageChunk):
|
|
208
226
|
# If both are (subclasses of) BaseMessageChunk,
|
|
@@ -250,8 +268,9 @@ def message_to_dict(message: BaseMessage) -> dict:
|
|
|
250
268
|
message: Message to convert.
|
|
251
269
|
|
|
252
270
|
Returns:
|
|
253
|
-
Message as a dict. The dict will have a
|
|
254
|
-
and a
|
|
271
|
+
Message as a dict. The dict will have a ``type`` key with the message type
|
|
272
|
+
and a ``data`` key with the message data as a dict.
|
|
273
|
+
|
|
255
274
|
"""
|
|
256
275
|
return {"type": message.type, "data": message.model_dump()}
|
|
257
276
|
|
|
@@ -260,10 +279,11 @@ def messages_to_dict(messages: Sequence[BaseMessage]) -> list[dict]:
|
|
|
260
279
|
"""Convert a sequence of Messages to a list of dictionaries.
|
|
261
280
|
|
|
262
281
|
Args:
|
|
263
|
-
messages: Sequence of messages (as
|
|
282
|
+
messages: Sequence of messages (as ``BaseMessage``s) to convert.
|
|
264
283
|
|
|
265
284
|
Returns:
|
|
266
285
|
List of messages as dicts.
|
|
286
|
+
|
|
267
287
|
"""
|
|
268
288
|
return [message_to_dict(m) for m in messages]
|
|
269
289
|
|
|
@@ -277,6 +297,7 @@ def get_msg_title_repr(title: str, *, bold: bool = False) -> str:
|
|
|
277
297
|
|
|
278
298
|
Returns:
|
|
279
299
|
The title representation.
|
|
300
|
+
|
|
280
301
|
"""
|
|
281
302
|
padded = " " + title + " "
|
|
282
303
|
sep_len = (80 - len(padded)) // 2
|
langchain_core/messages/chat.py
CHANGED
|
@@ -30,7 +30,10 @@ class ChatMessageChunk(ChatMessage, BaseMessageChunk):
|
|
|
30
30
|
# non-chunk variant.
|
|
31
31
|
type: Literal["ChatMessageChunk"] = "ChatMessageChunk" # type: ignore[assignment]
|
|
32
32
|
"""The type of the message (used during serialization).
|
|
33
|
-
|
|
33
|
+
|
|
34
|
+
Defaults to ``'ChatMessageChunk'``.
|
|
35
|
+
|
|
36
|
+
"""
|
|
34
37
|
|
|
35
38
|
@override
|
|
36
39
|
def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore[override]
|
|
@@ -88,7 +88,18 @@ def is_data_content_block(
|
|
|
88
88
|
|
|
89
89
|
|
|
90
90
|
def convert_to_openai_image_block(content_block: dict[str, Any]) -> dict:
|
|
91
|
-
"""Convert image content block to format expected by OpenAI Chat Completions API.
|
|
91
|
+
"""Convert image content block to format expected by OpenAI Chat Completions API.
|
|
92
|
+
|
|
93
|
+
Args:
|
|
94
|
+
content_block: The content block to convert.
|
|
95
|
+
|
|
96
|
+
Raises:
|
|
97
|
+
ValueError: If the source type is not supported or if ``mime_type`` is missing
|
|
98
|
+
for base64 data.
|
|
99
|
+
|
|
100
|
+
Returns:
|
|
101
|
+
A dictionary formatted for OpenAI's API.
|
|
102
|
+
"""
|
|
92
103
|
if content_block["source_type"] == "url":
|
|
93
104
|
return {
|
|
94
105
|
"type": "image_url",
|
|
@@ -112,7 +123,17 @@ def convert_to_openai_image_block(content_block: dict[str, Any]) -> dict:
|
|
|
112
123
|
|
|
113
124
|
|
|
114
125
|
def convert_to_openai_data_block(block: dict) -> dict:
|
|
115
|
-
"""Format standard data content block to format expected by OpenAI.
|
|
126
|
+
"""Format standard data content block to format expected by OpenAI.
|
|
127
|
+
|
|
128
|
+
Args:
|
|
129
|
+
block: A data content block.
|
|
130
|
+
|
|
131
|
+
Raises:
|
|
132
|
+
ValueError: If the block type or source type is not supported.
|
|
133
|
+
|
|
134
|
+
Returns:
|
|
135
|
+
A dictionary formatted for OpenAI's API.
|
|
136
|
+
"""
|
|
116
137
|
if block["type"] == "image":
|
|
117
138
|
formatted_block = convert_to_openai_image_block(block)
|
|
118
139
|
|
|
@@ -15,19 +15,20 @@ from langchain_core.utils._merge import merge_dicts
|
|
|
15
15
|
class FunctionMessage(BaseMessage):
|
|
16
16
|
"""Message for passing the result of executing a tool back to a model.
|
|
17
17
|
|
|
18
|
-
FunctionMessage are an older version of the ToolMessage schema, and
|
|
19
|
-
do not contain the tool_call_id field.
|
|
18
|
+
``FunctionMessage`` are an older version of the ``ToolMessage`` schema, and
|
|
19
|
+
do not contain the ``tool_call_id`` field.
|
|
20
20
|
|
|
21
|
-
The tool_call_id field is used to associate the tool call request with the
|
|
21
|
+
The ``tool_call_id`` field is used to associate the tool call request with the
|
|
22
22
|
tool call response. This is useful in situations where a chat model is able
|
|
23
23
|
to request multiple tool calls in parallel.
|
|
24
|
+
|
|
24
25
|
"""
|
|
25
26
|
|
|
26
27
|
name: str
|
|
27
28
|
"""The name of the function that was executed."""
|
|
28
29
|
|
|
29
30
|
type: Literal["function"] = "function"
|
|
30
|
-
"""The type of the message (used for serialization). Defaults to
|
|
31
|
+
"""The type of the message (used for serialization). Defaults to ``'function'``."""
|
|
31
32
|
|
|
32
33
|
|
|
33
34
|
class FunctionMessageChunk(FunctionMessage, BaseMessageChunk):
|
|
@@ -38,7 +39,10 @@ class FunctionMessageChunk(FunctionMessage, BaseMessageChunk):
|
|
|
38
39
|
# non-chunk variant.
|
|
39
40
|
type: Literal["FunctionMessageChunk"] = "FunctionMessageChunk" # type: ignore[assignment]
|
|
40
41
|
"""The type of the message (used for serialization).
|
|
41
|
-
|
|
42
|
+
|
|
43
|
+
Defaults to ``'FunctionMessageChunk'``.
|
|
44
|
+
|
|
45
|
+
"""
|
|
42
46
|
|
|
43
47
|
@override
|
|
44
48
|
def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore[override]
|
langchain_core/messages/human.py
CHANGED
|
@@ -8,7 +8,7 @@ from langchain_core.messages.base import BaseMessage, BaseMessageChunk
|
|
|
8
8
|
class HumanMessage(BaseMessage):
|
|
9
9
|
"""Message from a human.
|
|
10
10
|
|
|
11
|
-
|
|
11
|
+
``HumanMessage``s are messages that are passed in from a human to the model.
|
|
12
12
|
|
|
13
13
|
Example:
|
|
14
14
|
|
|
@@ -17,12 +17,8 @@ class HumanMessage(BaseMessage):
|
|
|
17
17
|
from langchain_core.messages import HumanMessage, SystemMessage
|
|
18
18
|
|
|
19
19
|
messages = [
|
|
20
|
-
SystemMessage(
|
|
21
|
-
|
|
22
|
-
),
|
|
23
|
-
HumanMessage(
|
|
24
|
-
content="What is your name?"
|
|
25
|
-
)
|
|
20
|
+
SystemMessage(content="You are a helpful assistant! Your name is Bob."),
|
|
21
|
+
HumanMessage(content="What is your name?"),
|
|
26
22
|
]
|
|
27
23
|
|
|
28
24
|
# Instantiate a chat model and invoke it with the messages
|
|
@@ -36,15 +32,22 @@ class HumanMessage(BaseMessage):
|
|
|
36
32
|
|
|
37
33
|
At the moment, this is ignored by most models. Usage is discouraged.
|
|
38
34
|
Defaults to False.
|
|
35
|
+
|
|
39
36
|
"""
|
|
40
37
|
|
|
41
38
|
type: Literal["human"] = "human"
|
|
42
|
-
"""The type of the message (used for serialization).
|
|
39
|
+
"""The type of the message (used for serialization).
|
|
40
|
+
|
|
41
|
+
Defaults to ``'human'``.
|
|
42
|
+
|
|
43
|
+
"""
|
|
43
44
|
|
|
44
45
|
def __init__(
|
|
45
|
-
self,
|
|
46
|
+
self,
|
|
47
|
+
content: Union[str, list[Union[str, dict]]],
|
|
48
|
+
**kwargs: Any,
|
|
46
49
|
) -> None:
|
|
47
|
-
"""
|
|
50
|
+
"""Initialize ``HumanMessage``.
|
|
48
51
|
|
|
49
52
|
Args:
|
|
50
53
|
content: The string contents of the message.
|
|
@@ -18,12 +18,8 @@ class SystemMessage(BaseMessage):
|
|
|
18
18
|
from langchain_core.messages import HumanMessage, SystemMessage
|
|
19
19
|
|
|
20
20
|
messages = [
|
|
21
|
-
SystemMessage(
|
|
22
|
-
|
|
23
|
-
),
|
|
24
|
-
HumanMessage(
|
|
25
|
-
content="What is your name?"
|
|
26
|
-
)
|
|
21
|
+
SystemMessage(content="You are a helpful assistant! Your name is Bob."),
|
|
22
|
+
HumanMessage(content="What is your name?"),
|
|
27
23
|
]
|
|
28
24
|
|
|
29
25
|
# Define a chat model and invoke it with the messages
|
|
@@ -32,7 +28,11 @@ class SystemMessage(BaseMessage):
|
|
|
32
28
|
"""
|
|
33
29
|
|
|
34
30
|
type: Literal["system"] = "system"
|
|
35
|
-
"""The type of the message (used for serialization).
|
|
31
|
+
"""The type of the message (used for serialization).
|
|
32
|
+
|
|
33
|
+
Defaults to ``'system'``.
|
|
34
|
+
|
|
35
|
+
"""
|
|
36
36
|
|
|
37
37
|
def __init__(
|
|
38
38
|
self, content: Union[str, list[Union[str, dict]]], **kwargs: Any
|
|
@@ -54,4 +54,7 @@ class SystemMessageChunk(SystemMessage, BaseMessageChunk):
|
|
|
54
54
|
# non-chunk variant.
|
|
55
55
|
type: Literal["SystemMessageChunk"] = "SystemMessageChunk" # type: ignore[assignment]
|
|
56
56
|
"""The type of the message (used for serialization).
|
|
57
|
-
|
|
57
|
+
|
|
58
|
+
Defaults to ``'SystemMessageChunk'``.
|
|
59
|
+
|
|
60
|
+
"""
|
langchain_core/messages/tool.py
CHANGED
|
@@ -14,28 +14,29 @@ from langchain_core.utils._merge import merge_dicts, merge_obj
|
|
|
14
14
|
class ToolOutputMixin:
|
|
15
15
|
"""Mixin for objects that tools can return directly.
|
|
16
16
|
|
|
17
|
-
If a custom BaseTool is invoked with a ToolCall and the output of custom code is
|
|
18
|
-
not an instance of ToolOutputMixin
|
|
19
|
-
string and wrapped in a ToolMessage
|
|
17
|
+
If a custom BaseTool is invoked with a ``ToolCall`` and the output of custom code is
|
|
18
|
+
not an instance of ``ToolOutputMixin``, the output will automatically be coerced to
|
|
19
|
+
a string and wrapped in a ``ToolMessage``.
|
|
20
|
+
|
|
20
21
|
"""
|
|
21
22
|
|
|
22
23
|
|
|
23
24
|
class ToolMessage(BaseMessage, ToolOutputMixin):
|
|
24
25
|
"""Message for passing the result of executing a tool back to a model.
|
|
25
26
|
|
|
26
|
-
|
|
27
|
-
is encoded inside the
|
|
27
|
+
``ToolMessage``s contain the result of a tool invocation. Typically, the result
|
|
28
|
+
is encoded inside the ``content`` field.
|
|
28
29
|
|
|
29
|
-
Example: A ToolMessage representing a result of 42 from a tool call with id
|
|
30
|
+
Example: A ``ToolMessage`` representing a result of ``42`` from a tool call with id
|
|
30
31
|
|
|
31
32
|
.. code-block:: python
|
|
32
33
|
|
|
33
34
|
from langchain_core.messages import ToolMessage
|
|
34
35
|
|
|
35
|
-
ToolMessage(content=
|
|
36
|
+
ToolMessage(content="42", tool_call_id="call_Jja7J89XsjrOLA5r!MEOW!SL")
|
|
36
37
|
|
|
37
38
|
|
|
38
|
-
Example: A ToolMessage where only part of the tool output is sent to the model
|
|
39
|
+
Example: A ``ToolMessage`` where only part of the tool output is sent to the model
|
|
39
40
|
and the full output is passed in to artifact.
|
|
40
41
|
|
|
41
42
|
.. versionadded:: 0.2.17
|
|
@@ -45,7 +46,8 @@ class ToolMessage(BaseMessage, ToolOutputMixin):
|
|
|
45
46
|
from langchain_core.messages import ToolMessage
|
|
46
47
|
|
|
47
48
|
tool_output = {
|
|
48
|
-
"stdout": "From the graph we can see that the correlation between
|
|
49
|
+
"stdout": "From the graph we can see that the correlation between "
|
|
50
|
+
"x and y is ...",
|
|
49
51
|
"stderr": None,
|
|
50
52
|
"artifacts": {"type": "image", "base64_data": "/9j/4gIcSU..."},
|
|
51
53
|
}
|
|
@@ -53,20 +55,24 @@ class ToolMessage(BaseMessage, ToolOutputMixin):
|
|
|
53
55
|
ToolMessage(
|
|
54
56
|
content=tool_output["stdout"],
|
|
55
57
|
artifact=tool_output,
|
|
56
|
-
tool_call_id=
|
|
58
|
+
tool_call_id="call_Jja7J89XsjrOLA5r!MEOW!SL",
|
|
57
59
|
)
|
|
58
60
|
|
|
59
|
-
The tool_call_id field is used to associate the tool call request with the
|
|
61
|
+
The ``tool_call_id`` field is used to associate the tool call request with the
|
|
60
62
|
tool call response. This is useful in situations where a chat model is able
|
|
61
63
|
to request multiple tool calls in parallel.
|
|
62
64
|
|
|
63
|
-
"""
|
|
65
|
+
"""
|
|
64
66
|
|
|
65
67
|
tool_call_id: str
|
|
66
68
|
"""Tool call that this message is responding to."""
|
|
67
69
|
|
|
68
70
|
type: Literal["tool"] = "tool"
|
|
69
|
-
"""The type of the message (used for serialization).
|
|
71
|
+
"""The type of the message (used for serialization).
|
|
72
|
+
|
|
73
|
+
Defaults to ``'tool'``.
|
|
74
|
+
|
|
75
|
+
"""
|
|
70
76
|
|
|
71
77
|
artifact: Any = None
|
|
72
78
|
"""Artifact of the Tool execution which is not meant to be sent to the model.
|
|
@@ -76,12 +82,14 @@ class ToolMessage(BaseMessage, ToolOutputMixin):
|
|
|
76
82
|
output is needed in other parts of the code.
|
|
77
83
|
|
|
78
84
|
.. versionadded:: 0.2.17
|
|
85
|
+
|
|
79
86
|
"""
|
|
80
87
|
|
|
81
88
|
status: Literal["success", "error"] = "success"
|
|
82
89
|
"""Status of the tool invocation.
|
|
83
90
|
|
|
84
91
|
.. versionadded:: 0.2.24
|
|
92
|
+
|
|
85
93
|
"""
|
|
86
94
|
|
|
87
95
|
additional_kwargs: dict = Field(default_factory=dict, repr=False)
|
|
@@ -96,6 +104,7 @@ class ToolMessage(BaseMessage, ToolOutputMixin):
|
|
|
96
104
|
|
|
97
105
|
Args:
|
|
98
106
|
values: The model arguments.
|
|
107
|
+
|
|
99
108
|
"""
|
|
100
109
|
content = values["content"]
|
|
101
110
|
if isinstance(content, tuple):
|
|
@@ -134,9 +143,11 @@ class ToolMessage(BaseMessage, ToolOutputMixin):
|
|
|
134
143
|
return values
|
|
135
144
|
|
|
136
145
|
def __init__(
|
|
137
|
-
self,
|
|
146
|
+
self,
|
|
147
|
+
content: Union[str, list[Union[str, dict]]],
|
|
148
|
+
**kwargs: Any,
|
|
138
149
|
) -> None:
|
|
139
|
-
"""
|
|
150
|
+
"""Initialize ``ToolMessage``.
|
|
140
151
|
|
|
141
152
|
Args:
|
|
142
153
|
content: The string contents of the message.
|
|
@@ -184,14 +195,10 @@ class ToolCall(TypedDict):
|
|
|
184
195
|
|
|
185
196
|
.. code-block:: python
|
|
186
197
|
|
|
187
|
-
{
|
|
188
|
-
"name": "foo",
|
|
189
|
-
"args": {"a": 1},
|
|
190
|
-
"id": "123"
|
|
191
|
-
}
|
|
198
|
+
{"name": "foo", "args": {"a": 1}, "id": "123"}
|
|
192
199
|
|
|
193
|
-
This represents a request to call the tool named
|
|
194
|
-
and an identifier of
|
|
200
|
+
This represents a request to call the tool named ``'foo'`` with arguments
|
|
201
|
+
``{"a": 1}`` and an identifier of ``'123'``.
|
|
195
202
|
|
|
196
203
|
"""
|
|
197
204
|
|
|
@@ -204,6 +211,7 @@ class ToolCall(TypedDict):
|
|
|
204
211
|
|
|
205
212
|
An identifier is needed to associate a tool call request with a tool
|
|
206
213
|
call result in events when multiple concurrent tool calls are made.
|
|
214
|
+
|
|
207
215
|
"""
|
|
208
216
|
type: NotRequired[Literal["tool_call"]]
|
|
209
217
|
|
|
@@ -220,6 +228,9 @@ def tool_call(
|
|
|
220
228
|
name: The name of the tool to be called.
|
|
221
229
|
args: The arguments to the tool call.
|
|
222
230
|
id: An identifier associated with the tool call.
|
|
231
|
+
|
|
232
|
+
Returns:
|
|
233
|
+
The created tool call.
|
|
223
234
|
"""
|
|
224
235
|
return ToolCall(name=name, args=args, id=id, type="tool_call")
|
|
225
236
|
|
|
@@ -227,21 +238,21 @@ def tool_call(
|
|
|
227
238
|
class ToolCallChunk(TypedDict):
|
|
228
239
|
"""A chunk of a tool call (e.g., as part of a stream).
|
|
229
240
|
|
|
230
|
-
When merging
|
|
241
|
+
When merging ``ToolCallChunk``s (e.g., via ``AIMessageChunk.__add__``),
|
|
231
242
|
all string attributes are concatenated. Chunks are only merged if their
|
|
232
|
-
values of
|
|
243
|
+
values of ``index`` are equal and not None.
|
|
233
244
|
|
|
234
245
|
Example:
|
|
235
246
|
|
|
236
247
|
.. code-block:: python
|
|
237
248
|
|
|
238
249
|
left_chunks = [ToolCallChunk(name="foo", args='{"a":', index=0)]
|
|
239
|
-
right_chunks = [ToolCallChunk(name=None, args=
|
|
250
|
+
right_chunks = [ToolCallChunk(name=None, args="1}", index=0)]
|
|
240
251
|
|
|
241
252
|
(
|
|
242
253
|
AIMessageChunk(content="", tool_call_chunks=left_chunks)
|
|
243
254
|
+ AIMessageChunk(content="", tool_call_chunks=right_chunks)
|
|
244
|
-
).tool_call_chunks == [ToolCallChunk(name=
|
|
255
|
+
).tool_call_chunks == [ToolCallChunk(name="foo", args='{"a":1}', index=0)]
|
|
245
256
|
|
|
246
257
|
"""
|
|
247
258
|
|
|
@@ -270,6 +281,9 @@ def tool_call_chunk(
|
|
|
270
281
|
args: The arguments to the tool call.
|
|
271
282
|
id: An identifier associated with the tool call.
|
|
272
283
|
index: The index of the tool call in a sequence.
|
|
284
|
+
|
|
285
|
+
Returns:
|
|
286
|
+
The created tool call chunk.
|
|
273
287
|
"""
|
|
274
288
|
return ToolCallChunk(
|
|
275
289
|
name=name, args=args, id=id, index=index, type="tool_call_chunk"
|
|
@@ -279,7 +293,7 @@ def tool_call_chunk(
|
|
|
279
293
|
class InvalidToolCall(TypedDict):
|
|
280
294
|
"""Allowance for errors made by LLM.
|
|
281
295
|
|
|
282
|
-
Here we add an
|
|
296
|
+
Here we add an ``error`` key to surface errors made during generation
|
|
283
297
|
(e.g., invalid JSON arguments.)
|
|
284
298
|
"""
|
|
285
299
|
|
|
@@ -308,6 +322,9 @@ def invalid_tool_call(
|
|
|
308
322
|
args: The arguments to the tool call.
|
|
309
323
|
id: An identifier associated with the tool call.
|
|
310
324
|
error: An error message associated with the tool call.
|
|
325
|
+
|
|
326
|
+
Returns:
|
|
327
|
+
The created invalid tool call.
|
|
311
328
|
"""
|
|
312
329
|
return InvalidToolCall(
|
|
313
330
|
name=name, args=args, id=id, error=error, type="invalid_tool_call"
|
|
@@ -317,7 +334,14 @@ def invalid_tool_call(
|
|
|
317
334
|
def default_tool_parser(
|
|
318
335
|
raw_tool_calls: list[dict],
|
|
319
336
|
) -> tuple[list[ToolCall], list[InvalidToolCall]]:
|
|
320
|
-
"""Best-effort parsing of tools.
|
|
337
|
+
"""Best-effort parsing of tools.
|
|
338
|
+
|
|
339
|
+
Args:
|
|
340
|
+
raw_tool_calls: List of raw tool call dicts to parse.
|
|
341
|
+
|
|
342
|
+
Returns:
|
|
343
|
+
A list of tool calls and invalid tool calls.
|
|
344
|
+
"""
|
|
321
345
|
tool_calls = []
|
|
322
346
|
invalid_tool_calls = []
|
|
323
347
|
for raw_tool_call in raw_tool_calls:
|
|
@@ -345,7 +369,14 @@ def default_tool_parser(
|
|
|
345
369
|
|
|
346
370
|
|
|
347
371
|
def default_tool_chunk_parser(raw_tool_calls: list[dict]) -> list[ToolCallChunk]:
|
|
348
|
-
"""Best-effort parsing of tool chunks.
|
|
372
|
+
"""Best-effort parsing of tool chunks.
|
|
373
|
+
|
|
374
|
+
Args:
|
|
375
|
+
raw_tool_calls: List of raw tool call dicts to parse.
|
|
376
|
+
|
|
377
|
+
Returns:
|
|
378
|
+
List of parsed ToolCallChunk objects.
|
|
379
|
+
"""
|
|
349
380
|
tool_call_chunks = []
|
|
350
381
|
for tool_call in raw_tool_calls:
|
|
351
382
|
if "function" not in tool_call:
|