langchain-core 1.0.0a8__py3-none-any.whl → 1.0.0rc2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of langchain-core might be problematic. Click here for more details.
- langchain_core/__init__.py +1 -1
- langchain_core/_api/__init__.py +0 -1
- langchain_core/_api/beta_decorator.py +17 -20
- langchain_core/_api/deprecation.py +30 -35
- langchain_core/_import_utils.py +1 -1
- langchain_core/agents.py +10 -9
- langchain_core/caches.py +46 -56
- langchain_core/callbacks/__init__.py +1 -8
- langchain_core/callbacks/base.py +232 -243
- langchain_core/callbacks/file.py +33 -33
- langchain_core/callbacks/manager.py +353 -416
- langchain_core/callbacks/stdout.py +21 -22
- langchain_core/callbacks/streaming_stdout.py +32 -32
- langchain_core/callbacks/usage.py +54 -51
- langchain_core/chat_history.py +43 -58
- langchain_core/document_loaders/base.py +21 -21
- langchain_core/document_loaders/langsmith.py +22 -22
- langchain_core/documents/__init__.py +0 -1
- langchain_core/documents/base.py +46 -49
- langchain_core/documents/transformers.py +28 -29
- langchain_core/embeddings/fake.py +50 -54
- langchain_core/example_selectors/semantic_similarity.py +4 -6
- langchain_core/exceptions.py +7 -8
- langchain_core/indexing/api.py +19 -25
- langchain_core/indexing/base.py +24 -24
- langchain_core/language_models/__init__.py +11 -27
- langchain_core/language_models/_utils.py +53 -54
- langchain_core/language_models/base.py +30 -24
- langchain_core/language_models/chat_models.py +123 -148
- langchain_core/language_models/fake_chat_models.py +7 -7
- langchain_core/language_models/llms.py +14 -16
- langchain_core/load/dump.py +3 -4
- langchain_core/load/load.py +7 -16
- langchain_core/load/serializable.py +37 -36
- langchain_core/messages/__init__.py +1 -16
- langchain_core/messages/ai.py +122 -123
- langchain_core/messages/base.py +31 -31
- langchain_core/messages/block_translators/__init__.py +17 -17
- langchain_core/messages/block_translators/anthropic.py +3 -3
- langchain_core/messages/block_translators/bedrock_converse.py +3 -3
- langchain_core/messages/block_translators/google_genai.py +5 -4
- langchain_core/messages/block_translators/google_vertexai.py +4 -32
- langchain_core/messages/block_translators/groq.py +117 -21
- langchain_core/messages/block_translators/langchain_v0.py +3 -3
- langchain_core/messages/block_translators/openai.py +5 -5
- langchain_core/messages/chat.py +2 -6
- langchain_core/messages/content.py +222 -209
- langchain_core/messages/function.py +6 -10
- langchain_core/messages/human.py +17 -24
- langchain_core/messages/modifier.py +2 -2
- langchain_core/messages/system.py +12 -22
- langchain_core/messages/tool.py +53 -69
- langchain_core/messages/utils.py +399 -417
- langchain_core/output_parsers/__init__.py +1 -14
- langchain_core/output_parsers/base.py +46 -47
- langchain_core/output_parsers/json.py +3 -4
- langchain_core/output_parsers/list.py +2 -2
- langchain_core/output_parsers/openai_functions.py +46 -44
- langchain_core/output_parsers/openai_tools.py +11 -16
- langchain_core/output_parsers/pydantic.py +10 -11
- langchain_core/output_parsers/string.py +2 -2
- langchain_core/output_parsers/transform.py +2 -2
- langchain_core/output_parsers/xml.py +1 -1
- langchain_core/outputs/__init__.py +1 -1
- langchain_core/outputs/chat_generation.py +14 -14
- langchain_core/outputs/generation.py +6 -6
- langchain_core/outputs/llm_result.py +5 -5
- langchain_core/prompt_values.py +11 -11
- langchain_core/prompts/__init__.py +3 -23
- langchain_core/prompts/base.py +33 -38
- langchain_core/prompts/chat.py +222 -229
- langchain_core/prompts/dict.py +3 -3
- langchain_core/prompts/few_shot.py +76 -83
- langchain_core/prompts/few_shot_with_templates.py +7 -9
- langchain_core/prompts/image.py +12 -14
- langchain_core/prompts/loading.py +1 -1
- langchain_core/prompts/message.py +3 -3
- langchain_core/prompts/prompt.py +20 -23
- langchain_core/prompts/string.py +20 -8
- langchain_core/prompts/structured.py +26 -27
- langchain_core/rate_limiters.py +50 -58
- langchain_core/retrievers.py +41 -182
- langchain_core/runnables/base.py +565 -597
- langchain_core/runnables/branch.py +8 -8
- langchain_core/runnables/config.py +37 -44
- langchain_core/runnables/configurable.py +9 -10
- langchain_core/runnables/fallbacks.py +9 -9
- langchain_core/runnables/graph.py +46 -50
- langchain_core/runnables/graph_ascii.py +19 -18
- langchain_core/runnables/graph_mermaid.py +20 -31
- langchain_core/runnables/graph_png.py +7 -7
- langchain_core/runnables/history.py +22 -22
- langchain_core/runnables/passthrough.py +11 -11
- langchain_core/runnables/retry.py +3 -3
- langchain_core/runnables/router.py +2 -2
- langchain_core/runnables/schema.py +33 -33
- langchain_core/runnables/utils.py +30 -34
- langchain_core/stores.py +72 -102
- langchain_core/sys_info.py +27 -29
- langchain_core/tools/__init__.py +1 -14
- langchain_core/tools/base.py +70 -71
- langchain_core/tools/convert.py +100 -104
- langchain_core/tools/render.py +9 -9
- langchain_core/tools/retriever.py +7 -7
- langchain_core/tools/simple.py +6 -7
- langchain_core/tools/structured.py +18 -24
- langchain_core/tracers/__init__.py +1 -9
- langchain_core/tracers/base.py +35 -35
- langchain_core/tracers/context.py +12 -17
- langchain_core/tracers/event_stream.py +3 -3
- langchain_core/tracers/langchain.py +8 -8
- langchain_core/tracers/log_stream.py +17 -18
- langchain_core/tracers/memory_stream.py +3 -3
- langchain_core/tracers/root_listeners.py +2 -2
- langchain_core/tracers/schemas.py +0 -129
- langchain_core/tracers/stdout.py +1 -2
- langchain_core/utils/__init__.py +1 -1
- langchain_core/utils/aiter.py +32 -32
- langchain_core/utils/env.py +5 -5
- langchain_core/utils/function_calling.py +59 -154
- langchain_core/utils/html.py +4 -4
- langchain_core/utils/input.py +3 -3
- langchain_core/utils/interactive_env.py +1 -1
- langchain_core/utils/iter.py +20 -20
- langchain_core/utils/json.py +1 -1
- langchain_core/utils/json_schema.py +2 -2
- langchain_core/utils/mustache.py +5 -5
- langchain_core/utils/pydantic.py +17 -17
- langchain_core/utils/strings.py +5 -5
- langchain_core/utils/utils.py +25 -28
- langchain_core/vectorstores/base.py +55 -87
- langchain_core/vectorstores/in_memory.py +83 -85
- langchain_core/vectorstores/utils.py +2 -2
- langchain_core/version.py +1 -1
- {langchain_core-1.0.0a8.dist-info → langchain_core-1.0.0rc2.dist-info}/METADATA +23 -11
- langchain_core-1.0.0rc2.dist-info/RECORD +172 -0
- langchain_core/memory.py +0 -120
- langchain_core/pydantic_v1/__init__.py +0 -30
- langchain_core/pydantic_v1/dataclasses.py +0 -23
- langchain_core/pydantic_v1/main.py +0 -23
- langchain_core-1.0.0a8.dist-info/RECORD +0 -176
- {langchain_core-1.0.0a8.dist-info → langchain_core-1.0.0rc2.dist-info}/WHEEL +0 -0
|
@@ -15,11 +15,11 @@ from langchain_core.utils._merge import merge_dicts
|
|
|
15
15
|
class FunctionMessage(BaseMessage):
|
|
16
16
|
"""Message for passing the result of executing a tool back to a model.
|
|
17
17
|
|
|
18
|
-
|
|
19
|
-
do not contain the
|
|
18
|
+
`FunctionMessage` are an older version of the `ToolMessage` schema, and
|
|
19
|
+
do not contain the `tool_call_id` field.
|
|
20
20
|
|
|
21
|
-
The
|
|
22
|
-
tool call response.
|
|
21
|
+
The `tool_call_id` field is used to associate the tool call request with the
|
|
22
|
+
tool call response. Useful in situations where a chat model is able
|
|
23
23
|
to request multiple tool calls in parallel.
|
|
24
24
|
|
|
25
25
|
"""
|
|
@@ -28,7 +28,7 @@ class FunctionMessage(BaseMessage):
|
|
|
28
28
|
"""The name of the function that was executed."""
|
|
29
29
|
|
|
30
30
|
type: Literal["function"] = "function"
|
|
31
|
-
"""The type of the message (used for serialization).
|
|
31
|
+
"""The type of the message (used for serialization)."""
|
|
32
32
|
|
|
33
33
|
|
|
34
34
|
class FunctionMessageChunk(FunctionMessage, BaseMessageChunk):
|
|
@@ -38,11 +38,7 @@ class FunctionMessageChunk(FunctionMessage, BaseMessageChunk):
|
|
|
38
38
|
# to make sure that the chunk variant can be discriminated from the
|
|
39
39
|
# non-chunk variant.
|
|
40
40
|
type: Literal["FunctionMessageChunk"] = "FunctionMessageChunk" # type: ignore[assignment]
|
|
41
|
-
"""The type of the message (used for serialization).
|
|
42
|
-
|
|
43
|
-
Defaults to ``'FunctionMessageChunk'``.
|
|
44
|
-
|
|
45
|
-
"""
|
|
41
|
+
"""The type of the message (used for serialization)."""
|
|
46
42
|
|
|
47
43
|
@override
|
|
48
44
|
def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore[override]
|
langchain_core/messages/human.py
CHANGED
|
@@ -7,33 +7,27 @@ from langchain_core.messages.base import BaseMessage, BaseMessageChunk
|
|
|
7
7
|
|
|
8
8
|
|
|
9
9
|
class HumanMessage(BaseMessage):
|
|
10
|
-
"""Message from
|
|
10
|
+
"""Message from the user.
|
|
11
11
|
|
|
12
|
-
|
|
12
|
+
A `HumanMessage` is a message that is passed in from a user to the model.
|
|
13
13
|
|
|
14
14
|
Example:
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
print(model.invoke(messages))
|
|
28
|
-
|
|
15
|
+
```python
|
|
16
|
+
from langchain_core.messages import HumanMessage, SystemMessage
|
|
17
|
+
|
|
18
|
+
messages = [
|
|
19
|
+
SystemMessage(content="You are a helpful assistant! Your name is Bob."),
|
|
20
|
+
HumanMessage(content="What is your name?"),
|
|
21
|
+
]
|
|
22
|
+
|
|
23
|
+
# Instantiate a chat model and invoke it with the messages
|
|
24
|
+
model = ...
|
|
25
|
+
print(model.invoke(messages))
|
|
26
|
+
```
|
|
29
27
|
"""
|
|
30
28
|
|
|
31
29
|
type: Literal["human"] = "human"
|
|
32
|
-
"""The type of the message (used for serialization).
|
|
33
|
-
|
|
34
|
-
Defaults to ``'human'``.
|
|
35
|
-
|
|
36
|
-
"""
|
|
30
|
+
"""The type of the message (used for serialization)."""
|
|
37
31
|
|
|
38
32
|
@overload
|
|
39
33
|
def __init__(
|
|
@@ -56,7 +50,7 @@ class HumanMessage(BaseMessage):
|
|
|
56
50
|
content_blocks: list[types.ContentBlock] | None = None,
|
|
57
51
|
**kwargs: Any,
|
|
58
52
|
) -> None:
|
|
59
|
-
"""Specify
|
|
53
|
+
"""Specify `content` as positional arg or `content_blocks` for typing."""
|
|
60
54
|
if content_blocks is not None:
|
|
61
55
|
super().__init__(
|
|
62
56
|
content=cast("str | list[str | dict]", content_blocks),
|
|
@@ -73,5 +67,4 @@ class HumanMessageChunk(HumanMessage, BaseMessageChunk):
|
|
|
73
67
|
# to make sure that the chunk variant can be discriminated from the
|
|
74
68
|
# non-chunk variant.
|
|
75
69
|
type: Literal["HumanMessageChunk"] = "HumanMessageChunk" # type: ignore[assignment]
|
|
76
|
-
"""The type of the message (used for serialization).
|
|
77
|
-
Defaults to "HumanMessageChunk"."""
|
|
70
|
+
"""The type of the message (used for serialization)."""
|
|
@@ -9,7 +9,7 @@ class RemoveMessage(BaseMessage):
|
|
|
9
9
|
"""Message responsible for deleting other messages."""
|
|
10
10
|
|
|
11
11
|
type: Literal["remove"] = "remove"
|
|
12
|
-
"""The type of the message (used for serialization).
|
|
12
|
+
"""The type of the message (used for serialization)."""
|
|
13
13
|
|
|
14
14
|
def __init__(
|
|
15
15
|
self,
|
|
@@ -20,7 +20,7 @@ class RemoveMessage(BaseMessage):
|
|
|
20
20
|
|
|
21
21
|
Args:
|
|
22
22
|
id: The ID of the message to remove.
|
|
23
|
-
kwargs: Additional fields to pass to the message.
|
|
23
|
+
**kwargs: Additional fields to pass to the message.
|
|
24
24
|
|
|
25
25
|
Raises:
|
|
26
26
|
ValueError: If the 'content' field is passed in kwargs.
|
|
@@ -13,27 +13,21 @@ class SystemMessage(BaseMessage):
|
|
|
13
13
|
of input messages.
|
|
14
14
|
|
|
15
15
|
Example:
|
|
16
|
+
```python
|
|
17
|
+
from langchain_core.messages import HumanMessage, SystemMessage
|
|
16
18
|
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
messages = [
|
|
22
|
-
SystemMessage(content="You are a helpful assistant! Your name is Bob."),
|
|
23
|
-
HumanMessage(content="What is your name?"),
|
|
24
|
-
]
|
|
25
|
-
|
|
26
|
-
# Define a chat model and invoke it with the messages
|
|
27
|
-
print(model.invoke(messages))
|
|
19
|
+
messages = [
|
|
20
|
+
SystemMessage(content="You are a helpful assistant! Your name is Bob."),
|
|
21
|
+
HumanMessage(content="What is your name?"),
|
|
22
|
+
]
|
|
28
23
|
|
|
24
|
+
# Define a chat model and invoke it with the messages
|
|
25
|
+
print(model.invoke(messages))
|
|
26
|
+
```
|
|
29
27
|
"""
|
|
30
28
|
|
|
31
29
|
type: Literal["system"] = "system"
|
|
32
|
-
"""The type of the message (used for serialization).
|
|
33
|
-
|
|
34
|
-
Defaults to ``'system'``.
|
|
35
|
-
|
|
36
|
-
"""
|
|
30
|
+
"""The type of the message (used for serialization)."""
|
|
37
31
|
|
|
38
32
|
@overload
|
|
39
33
|
def __init__(
|
|
@@ -56,7 +50,7 @@ class SystemMessage(BaseMessage):
|
|
|
56
50
|
content_blocks: list[types.ContentBlock] | None = None,
|
|
57
51
|
**kwargs: Any,
|
|
58
52
|
) -> None:
|
|
59
|
-
"""Specify
|
|
53
|
+
"""Specify `content` as positional arg or `content_blocks` for typing."""
|
|
60
54
|
if content_blocks is not None:
|
|
61
55
|
super().__init__(
|
|
62
56
|
content=cast("str | list[str | dict]", content_blocks),
|
|
@@ -73,8 +67,4 @@ class SystemMessageChunk(SystemMessage, BaseMessageChunk):
|
|
|
73
67
|
# to make sure that the chunk variant can be discriminated from the
|
|
74
68
|
# non-chunk variant.
|
|
75
69
|
type: Literal["SystemMessageChunk"] = "SystemMessageChunk" # type: ignore[assignment]
|
|
76
|
-
"""The type of the message (used for serialization).
|
|
77
|
-
|
|
78
|
-
Defaults to ``'SystemMessageChunk'``.
|
|
79
|
-
|
|
80
|
-
"""
|
|
70
|
+
"""The type of the message (used for serialization)."""
|
langchain_core/messages/tool.py
CHANGED
|
@@ -16,9 +16,9 @@ from langchain_core.utils._merge import merge_dicts, merge_obj
|
|
|
16
16
|
class ToolOutputMixin:
|
|
17
17
|
"""Mixin for objects that tools can return directly.
|
|
18
18
|
|
|
19
|
-
If a custom BaseTool is invoked with a
|
|
20
|
-
not an instance of
|
|
21
|
-
a string and wrapped in a
|
|
19
|
+
If a custom BaseTool is invoked with a `ToolCall` and the output of custom code is
|
|
20
|
+
not an instance of `ToolOutputMixin`, the output will automatically be coerced to
|
|
21
|
+
a string and wrapped in a `ToolMessage`.
|
|
22
22
|
|
|
23
23
|
"""
|
|
24
24
|
|
|
@@ -26,42 +26,39 @@ class ToolOutputMixin:
|
|
|
26
26
|
class ToolMessage(BaseMessage, ToolOutputMixin):
|
|
27
27
|
"""Message for passing the result of executing a tool back to a model.
|
|
28
28
|
|
|
29
|
-
|
|
30
|
-
is encoded inside the
|
|
29
|
+
`ToolMessage` objects contain the result of a tool invocation. Typically, the result
|
|
30
|
+
is encoded inside the `content` field.
|
|
31
31
|
|
|
32
|
-
Example: A
|
|
32
|
+
Example: A `ToolMessage` representing a result of `42` from a tool call with id
|
|
33
33
|
|
|
34
|
-
|
|
34
|
+
```python
|
|
35
|
+
from langchain_core.messages import ToolMessage
|
|
35
36
|
|
|
36
|
-
|
|
37
|
+
ToolMessage(content="42", tool_call_id="call_Jja7J89XsjrOLA5r!MEOW!SL")
|
|
38
|
+
```
|
|
37
39
|
|
|
38
|
-
|
|
40
|
+
Example: A `ToolMessage` where only part of the tool output is sent to the model
|
|
41
|
+
and the full output is passed in to artifact.
|
|
39
42
|
|
|
43
|
+
```python
|
|
44
|
+
from langchain_core.messages import ToolMessage
|
|
40
45
|
|
|
41
|
-
|
|
42
|
-
|
|
46
|
+
tool_output = {
|
|
47
|
+
"stdout": "From the graph we can see that the correlation between "
|
|
48
|
+
"x and y is ...",
|
|
49
|
+
"stderr": None,
|
|
50
|
+
"artifacts": {"type": "image", "base64_data": "/9j/4gIcSU..."},
|
|
51
|
+
}
|
|
43
52
|
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
tool_output = {
|
|
51
|
-
"stdout": "From the graph we can see that the correlation between "
|
|
52
|
-
"x and y is ...",
|
|
53
|
-
"stderr": None,
|
|
54
|
-
"artifacts": {"type": "image", "base64_data": "/9j/4gIcSU..."},
|
|
55
|
-
}
|
|
56
|
-
|
|
57
|
-
ToolMessage(
|
|
58
|
-
content=tool_output["stdout"],
|
|
59
|
-
artifact=tool_output,
|
|
60
|
-
tool_call_id="call_Jja7J89XsjrOLA5r!MEOW!SL",
|
|
61
|
-
)
|
|
53
|
+
ToolMessage(
|
|
54
|
+
content=tool_output["stdout"],
|
|
55
|
+
artifact=tool_output,
|
|
56
|
+
tool_call_id="call_Jja7J89XsjrOLA5r!MEOW!SL",
|
|
57
|
+
)
|
|
58
|
+
```
|
|
62
59
|
|
|
63
|
-
The
|
|
64
|
-
tool call response.
|
|
60
|
+
The `tool_call_id` field is used to associate the tool call request with the
|
|
61
|
+
tool call response. Useful in situations where a chat model is able
|
|
65
62
|
to request multiple tool calls in parallel.
|
|
66
63
|
|
|
67
64
|
"""
|
|
@@ -70,11 +67,7 @@ class ToolMessage(BaseMessage, ToolOutputMixin):
|
|
|
70
67
|
"""Tool call that this message is responding to."""
|
|
71
68
|
|
|
72
69
|
type: Literal["tool"] = "tool"
|
|
73
|
-
"""The type of the message (used for serialization).
|
|
74
|
-
|
|
75
|
-
Defaults to ``'tool'``.
|
|
76
|
-
|
|
77
|
-
"""
|
|
70
|
+
"""The type of the message (used for serialization)."""
|
|
78
71
|
|
|
79
72
|
artifact: Any = None
|
|
80
73
|
"""Artifact of the Tool execution which is not meant to be sent to the model.
|
|
@@ -83,21 +76,15 @@ class ToolMessage(BaseMessage, ToolOutputMixin):
|
|
|
83
76
|
a subset of the full tool output is being passed as message content but the full
|
|
84
77
|
output is needed in other parts of the code.
|
|
85
78
|
|
|
86
|
-
!!! version-added "Added in version 0.2.17"
|
|
87
|
-
|
|
88
79
|
"""
|
|
89
80
|
|
|
90
81
|
status: Literal["success", "error"] = "success"
|
|
91
|
-
"""Status of the tool invocation.
|
|
92
|
-
|
|
93
|
-
!!! version-added "Added in version 0.2.24"
|
|
94
|
-
|
|
95
|
-
"""
|
|
82
|
+
"""Status of the tool invocation."""
|
|
96
83
|
|
|
97
84
|
additional_kwargs: dict = Field(default_factory=dict, repr=False)
|
|
98
|
-
"""Currently inherited from BaseMessage
|
|
85
|
+
"""Currently inherited from `BaseMessage`, but not used."""
|
|
99
86
|
response_metadata: dict = Field(default_factory=dict, repr=False)
|
|
100
|
-
"""Currently inherited from BaseMessage
|
|
87
|
+
"""Currently inherited from `BaseMessage`, but not used."""
|
|
101
88
|
|
|
102
89
|
@model_validator(mode="before")
|
|
103
90
|
@classmethod
|
|
@@ -165,12 +152,12 @@ class ToolMessage(BaseMessage, ToolOutputMixin):
|
|
|
165
152
|
content_blocks: list[types.ContentBlock] | None = None,
|
|
166
153
|
**kwargs: Any,
|
|
167
154
|
) -> None:
|
|
168
|
-
"""Initialize
|
|
155
|
+
"""Initialize a `ToolMessage`.
|
|
169
156
|
|
|
170
|
-
Specify
|
|
157
|
+
Specify `content` as positional arg or `content_blocks` for typing.
|
|
171
158
|
|
|
172
159
|
Args:
|
|
173
|
-
content: The
|
|
160
|
+
content: The contents of the message.
|
|
174
161
|
content_blocks: Typed standard content.
|
|
175
162
|
**kwargs: Additional fields.
|
|
176
163
|
"""
|
|
@@ -216,16 +203,15 @@ class ToolMessageChunk(ToolMessage, BaseMessageChunk):
|
|
|
216
203
|
|
|
217
204
|
|
|
218
205
|
class ToolCall(TypedDict):
|
|
219
|
-
"""Represents
|
|
206
|
+
"""Represents an AI's request to call a tool.
|
|
220
207
|
|
|
221
208
|
Example:
|
|
209
|
+
```python
|
|
210
|
+
{"name": "foo", "args": {"a": 1}, "id": "123"}
|
|
211
|
+
```
|
|
222
212
|
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
{"name": "foo", "args": {"a": 1}, "id": "123"}
|
|
226
|
-
|
|
227
|
-
This represents a request to call the tool named ``'foo'`` with arguments
|
|
228
|
-
``{"a": 1}`` and an identifier of ``'123'``.
|
|
213
|
+
This represents a request to call the tool named `'foo'` with arguments
|
|
214
|
+
`{"a": 1}` and an identifier of `'123'`.
|
|
229
215
|
|
|
230
216
|
"""
|
|
231
217
|
|
|
@@ -263,24 +249,22 @@ def tool_call(
|
|
|
263
249
|
|
|
264
250
|
|
|
265
251
|
class ToolCallChunk(TypedDict):
|
|
266
|
-
"""A chunk of a tool call (
|
|
252
|
+
"""A chunk of a tool call (yielded when streaming).
|
|
267
253
|
|
|
268
|
-
When merging
|
|
254
|
+
When merging `ToolCallChunk`s (e.g., via `AIMessageChunk.__add__`),
|
|
269
255
|
all string attributes are concatenated. Chunks are only merged if their
|
|
270
|
-
values of
|
|
256
|
+
values of `index` are equal and not None.
|
|
271
257
|
|
|
272
258
|
Example:
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
(
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
).tool_call_chunks == [ToolCallChunk(name="foo", args='{"a":1}', index=0)]
|
|
283
|
-
|
|
259
|
+
```python
|
|
260
|
+
left_chunks = [ToolCallChunk(name="foo", args='{"a":', index=0)]
|
|
261
|
+
right_chunks = [ToolCallChunk(name=None, args="1}", index=0)]
|
|
262
|
+
|
|
263
|
+
(
|
|
264
|
+
AIMessageChunk(content="", tool_call_chunks=left_chunks)
|
|
265
|
+
+ AIMessageChunk(content="", tool_call_chunks=right_chunks)
|
|
266
|
+
).tool_call_chunks == [ToolCallChunk(name="foo", args='{"a":1}', index=0)]
|
|
267
|
+
```
|
|
284
268
|
"""
|
|
285
269
|
|
|
286
270
|
name: str | None
|