langchain-core 0.3.79__py3-none-any.whl → 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of langchain-core might be problematic. Click here for more details.
- langchain_core/__init__.py +1 -1
- langchain_core/_api/__init__.py +3 -4
- langchain_core/_api/beta_decorator.py +23 -26
- langchain_core/_api/deprecation.py +52 -65
- langchain_core/_api/path.py +3 -6
- langchain_core/_import_utils.py +3 -4
- langchain_core/agents.py +19 -19
- langchain_core/caches.py +53 -63
- langchain_core/callbacks/__init__.py +1 -8
- langchain_core/callbacks/base.py +323 -334
- langchain_core/callbacks/file.py +44 -44
- langchain_core/callbacks/manager.py +441 -507
- langchain_core/callbacks/stdout.py +29 -30
- langchain_core/callbacks/streaming_stdout.py +32 -32
- langchain_core/callbacks/usage.py +60 -57
- langchain_core/chat_history.py +48 -63
- langchain_core/document_loaders/base.py +23 -23
- langchain_core/document_loaders/langsmith.py +37 -37
- langchain_core/documents/__init__.py +0 -1
- langchain_core/documents/base.py +62 -65
- langchain_core/documents/compressor.py +4 -4
- langchain_core/documents/transformers.py +28 -29
- langchain_core/embeddings/fake.py +50 -54
- langchain_core/example_selectors/length_based.py +1 -1
- langchain_core/example_selectors/semantic_similarity.py +21 -25
- langchain_core/exceptions.py +10 -11
- langchain_core/globals.py +3 -151
- langchain_core/indexing/api.py +61 -66
- langchain_core/indexing/base.py +58 -58
- langchain_core/indexing/in_memory.py +3 -3
- langchain_core/language_models/__init__.py +14 -27
- langchain_core/language_models/_utils.py +270 -84
- langchain_core/language_models/base.py +55 -162
- langchain_core/language_models/chat_models.py +442 -402
- langchain_core/language_models/fake.py +11 -11
- langchain_core/language_models/fake_chat_models.py +61 -39
- langchain_core/language_models/llms.py +123 -231
- langchain_core/load/dump.py +4 -5
- langchain_core/load/load.py +18 -28
- langchain_core/load/mapping.py +2 -4
- langchain_core/load/serializable.py +39 -40
- langchain_core/messages/__init__.py +61 -22
- langchain_core/messages/ai.py +368 -163
- langchain_core/messages/base.py +214 -43
- langchain_core/messages/block_translators/__init__.py +111 -0
- langchain_core/messages/block_translators/anthropic.py +470 -0
- langchain_core/messages/block_translators/bedrock.py +94 -0
- langchain_core/messages/block_translators/bedrock_converse.py +297 -0
- langchain_core/messages/block_translators/google_genai.py +530 -0
- langchain_core/messages/block_translators/google_vertexai.py +21 -0
- langchain_core/messages/block_translators/groq.py +143 -0
- langchain_core/messages/block_translators/langchain_v0.py +301 -0
- langchain_core/messages/block_translators/openai.py +1010 -0
- langchain_core/messages/chat.py +2 -6
- langchain_core/messages/content.py +1423 -0
- langchain_core/messages/function.py +6 -10
- langchain_core/messages/human.py +41 -38
- langchain_core/messages/modifier.py +2 -2
- langchain_core/messages/system.py +38 -28
- langchain_core/messages/tool.py +96 -103
- langchain_core/messages/utils.py +478 -504
- langchain_core/output_parsers/__init__.py +1 -14
- langchain_core/output_parsers/base.py +58 -61
- langchain_core/output_parsers/json.py +7 -8
- langchain_core/output_parsers/list.py +5 -7
- langchain_core/output_parsers/openai_functions.py +49 -47
- langchain_core/output_parsers/openai_tools.py +14 -19
- langchain_core/output_parsers/pydantic.py +12 -13
- langchain_core/output_parsers/string.py +2 -2
- langchain_core/output_parsers/transform.py +15 -17
- langchain_core/output_parsers/xml.py +8 -10
- langchain_core/outputs/__init__.py +1 -1
- langchain_core/outputs/chat_generation.py +18 -18
- langchain_core/outputs/chat_result.py +1 -3
- langchain_core/outputs/generation.py +8 -8
- langchain_core/outputs/llm_result.py +10 -10
- langchain_core/prompt_values.py +12 -12
- langchain_core/prompts/__init__.py +3 -27
- langchain_core/prompts/base.py +45 -55
- langchain_core/prompts/chat.py +254 -313
- langchain_core/prompts/dict.py +5 -5
- langchain_core/prompts/few_shot.py +81 -88
- langchain_core/prompts/few_shot_with_templates.py +11 -13
- langchain_core/prompts/image.py +12 -14
- langchain_core/prompts/loading.py +6 -8
- langchain_core/prompts/message.py +3 -3
- langchain_core/prompts/prompt.py +24 -39
- langchain_core/prompts/string.py +4 -4
- langchain_core/prompts/structured.py +42 -50
- langchain_core/rate_limiters.py +51 -60
- langchain_core/retrievers.py +49 -190
- langchain_core/runnables/base.py +1484 -1709
- langchain_core/runnables/branch.py +45 -61
- langchain_core/runnables/config.py +80 -88
- langchain_core/runnables/configurable.py +117 -134
- langchain_core/runnables/fallbacks.py +83 -79
- langchain_core/runnables/graph.py +85 -95
- langchain_core/runnables/graph_ascii.py +27 -28
- langchain_core/runnables/graph_mermaid.py +38 -50
- langchain_core/runnables/graph_png.py +15 -16
- langchain_core/runnables/history.py +135 -148
- langchain_core/runnables/passthrough.py +124 -150
- langchain_core/runnables/retry.py +46 -51
- langchain_core/runnables/router.py +25 -30
- langchain_core/runnables/schema.py +79 -74
- langchain_core/runnables/utils.py +62 -68
- langchain_core/stores.py +81 -115
- langchain_core/structured_query.py +8 -8
- langchain_core/sys_info.py +27 -29
- langchain_core/tools/__init__.py +1 -14
- langchain_core/tools/base.py +179 -187
- langchain_core/tools/convert.py +131 -139
- langchain_core/tools/render.py +10 -10
- langchain_core/tools/retriever.py +11 -11
- langchain_core/tools/simple.py +19 -24
- langchain_core/tools/structured.py +30 -39
- langchain_core/tracers/__init__.py +1 -9
- langchain_core/tracers/base.py +97 -99
- langchain_core/tracers/context.py +29 -52
- langchain_core/tracers/core.py +50 -60
- langchain_core/tracers/evaluation.py +11 -11
- langchain_core/tracers/event_stream.py +115 -70
- langchain_core/tracers/langchain.py +21 -21
- langchain_core/tracers/log_stream.py +43 -43
- langchain_core/tracers/memory_stream.py +3 -3
- langchain_core/tracers/root_listeners.py +16 -16
- langchain_core/tracers/run_collector.py +2 -4
- langchain_core/tracers/schemas.py +0 -129
- langchain_core/tracers/stdout.py +3 -3
- langchain_core/utils/__init__.py +1 -4
- langchain_core/utils/_merge.py +46 -8
- langchain_core/utils/aiter.py +57 -61
- langchain_core/utils/env.py +9 -9
- langchain_core/utils/function_calling.py +89 -191
- langchain_core/utils/html.py +7 -8
- langchain_core/utils/input.py +6 -6
- langchain_core/utils/interactive_env.py +1 -1
- langchain_core/utils/iter.py +37 -42
- langchain_core/utils/json.py +4 -3
- langchain_core/utils/json_schema.py +8 -8
- langchain_core/utils/mustache.py +9 -11
- langchain_core/utils/pydantic.py +33 -35
- langchain_core/utils/strings.py +5 -5
- langchain_core/utils/usage.py +1 -1
- langchain_core/utils/utils.py +80 -54
- langchain_core/vectorstores/base.py +129 -164
- langchain_core/vectorstores/in_memory.py +99 -174
- langchain_core/vectorstores/utils.py +5 -5
- langchain_core/version.py +1 -1
- {langchain_core-0.3.79.dist-info → langchain_core-1.0.0.dist-info}/METADATA +28 -27
- langchain_core-1.0.0.dist-info/RECORD +172 -0
- {langchain_core-0.3.79.dist-info → langchain_core-1.0.0.dist-info}/WHEEL +1 -1
- langchain_core/beta/__init__.py +0 -1
- langchain_core/beta/runnables/__init__.py +0 -1
- langchain_core/beta/runnables/context.py +0 -447
- langchain_core/memory.py +0 -120
- langchain_core/messages/content_blocks.py +0 -176
- langchain_core/prompts/pipeline.py +0 -138
- langchain_core/pydantic_v1/__init__.py +0 -30
- langchain_core/pydantic_v1/dataclasses.py +0 -23
- langchain_core/pydantic_v1/main.py +0 -23
- langchain_core/tracers/langchain_v1.py +0 -31
- langchain_core/utils/loading.py +0 -35
- langchain_core-0.3.79.dist-info/RECORD +0 -174
- langchain_core-0.3.79.dist-info/entry_points.txt +0 -4
|
@@ -0,0 +1,470 @@
|
|
|
1
|
+
"""Derivations of standard content blocks from Anthropic content."""
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
from collections.abc import Iterable
|
|
5
|
+
from typing import Any, cast
|
|
6
|
+
|
|
7
|
+
from langchain_core.messages import AIMessage, AIMessageChunk
|
|
8
|
+
from langchain_core.messages import content as types
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def _populate_extras(
|
|
12
|
+
standard_block: types.ContentBlock, block: dict[str, Any], known_fields: set[str]
|
|
13
|
+
) -> types.ContentBlock:
|
|
14
|
+
"""Mutate a block, populating extras."""
|
|
15
|
+
if standard_block.get("type") == "non_standard":
|
|
16
|
+
return standard_block
|
|
17
|
+
|
|
18
|
+
for key, value in block.items():
|
|
19
|
+
if key not in known_fields:
|
|
20
|
+
if "extras" not in standard_block:
|
|
21
|
+
# Below type-ignores are because mypy thinks a non-standard block can
|
|
22
|
+
# get here, although we exclude them above.
|
|
23
|
+
standard_block["extras"] = {} # type: ignore[typeddict-unknown-key]
|
|
24
|
+
standard_block["extras"][key] = value # type: ignore[typeddict-item]
|
|
25
|
+
|
|
26
|
+
return standard_block
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def _convert_to_v1_from_anthropic_input(
|
|
30
|
+
content: list[types.ContentBlock],
|
|
31
|
+
) -> list[types.ContentBlock]:
|
|
32
|
+
"""Convert Anthropic format blocks to v1 format.
|
|
33
|
+
|
|
34
|
+
During the `content_blocks` parsing process, we wrap blocks not recognized as a v1
|
|
35
|
+
block as a `'non_standard'` block with the original block stored in the `value`
|
|
36
|
+
field. This function attempts to unpack those blocks and convert any blocks that
|
|
37
|
+
might be Anthropic format to v1 ContentBlocks.
|
|
38
|
+
|
|
39
|
+
If conversion fails, the block is left as a `'non_standard'` block.
|
|
40
|
+
|
|
41
|
+
Args:
|
|
42
|
+
content: List of content blocks to process.
|
|
43
|
+
|
|
44
|
+
Returns:
|
|
45
|
+
Updated list with Anthropic blocks converted to v1 format.
|
|
46
|
+
"""
|
|
47
|
+
|
|
48
|
+
def _iter_blocks() -> Iterable[types.ContentBlock]:
|
|
49
|
+
blocks: list[dict[str, Any]] = [
|
|
50
|
+
cast("dict[str, Any]", block)
|
|
51
|
+
if block.get("type") != "non_standard"
|
|
52
|
+
else block["value"] # type: ignore[typeddict-item] # this is only non-standard blocks
|
|
53
|
+
for block in content
|
|
54
|
+
]
|
|
55
|
+
for block in blocks:
|
|
56
|
+
block_type = block.get("type")
|
|
57
|
+
|
|
58
|
+
if (
|
|
59
|
+
block_type == "document"
|
|
60
|
+
and "source" in block
|
|
61
|
+
and "type" in block["source"]
|
|
62
|
+
):
|
|
63
|
+
if block["source"]["type"] == "base64":
|
|
64
|
+
file_block: types.FileContentBlock = {
|
|
65
|
+
"type": "file",
|
|
66
|
+
"base64": block["source"]["data"],
|
|
67
|
+
"mime_type": block["source"]["media_type"],
|
|
68
|
+
}
|
|
69
|
+
_populate_extras(file_block, block, {"type", "source"})
|
|
70
|
+
yield file_block
|
|
71
|
+
|
|
72
|
+
elif block["source"]["type"] == "url":
|
|
73
|
+
file_block = {
|
|
74
|
+
"type": "file",
|
|
75
|
+
"url": block["source"]["url"],
|
|
76
|
+
}
|
|
77
|
+
_populate_extras(file_block, block, {"type", "source"})
|
|
78
|
+
yield file_block
|
|
79
|
+
|
|
80
|
+
elif block["source"]["type"] == "file":
|
|
81
|
+
file_block = {
|
|
82
|
+
"type": "file",
|
|
83
|
+
"id": block["source"]["file_id"],
|
|
84
|
+
}
|
|
85
|
+
_populate_extras(file_block, block, {"type", "source"})
|
|
86
|
+
yield file_block
|
|
87
|
+
|
|
88
|
+
elif block["source"]["type"] == "text":
|
|
89
|
+
plain_text_block: types.PlainTextContentBlock = {
|
|
90
|
+
"type": "text-plain",
|
|
91
|
+
"text": block["source"]["data"],
|
|
92
|
+
"mime_type": block.get("media_type", "text/plain"),
|
|
93
|
+
}
|
|
94
|
+
_populate_extras(plain_text_block, block, {"type", "source"})
|
|
95
|
+
yield plain_text_block
|
|
96
|
+
|
|
97
|
+
else:
|
|
98
|
+
yield {"type": "non_standard", "value": block}
|
|
99
|
+
|
|
100
|
+
elif (
|
|
101
|
+
block_type == "image"
|
|
102
|
+
and "source" in block
|
|
103
|
+
and "type" in block["source"]
|
|
104
|
+
):
|
|
105
|
+
if block["source"]["type"] == "base64":
|
|
106
|
+
image_block: types.ImageContentBlock = {
|
|
107
|
+
"type": "image",
|
|
108
|
+
"base64": block["source"]["data"],
|
|
109
|
+
"mime_type": block["source"]["media_type"],
|
|
110
|
+
}
|
|
111
|
+
_populate_extras(image_block, block, {"type", "source"})
|
|
112
|
+
yield image_block
|
|
113
|
+
|
|
114
|
+
elif block["source"]["type"] == "url":
|
|
115
|
+
image_block = {
|
|
116
|
+
"type": "image",
|
|
117
|
+
"url": block["source"]["url"],
|
|
118
|
+
}
|
|
119
|
+
_populate_extras(image_block, block, {"type", "source"})
|
|
120
|
+
yield image_block
|
|
121
|
+
|
|
122
|
+
elif block["source"]["type"] == "file":
|
|
123
|
+
image_block = {
|
|
124
|
+
"type": "image",
|
|
125
|
+
"id": block["source"]["file_id"],
|
|
126
|
+
}
|
|
127
|
+
_populate_extras(image_block, block, {"type", "source"})
|
|
128
|
+
yield image_block
|
|
129
|
+
|
|
130
|
+
else:
|
|
131
|
+
yield {"type": "non_standard", "value": block}
|
|
132
|
+
|
|
133
|
+
elif block_type in types.KNOWN_BLOCK_TYPES:
|
|
134
|
+
yield cast("types.ContentBlock", block)
|
|
135
|
+
|
|
136
|
+
else:
|
|
137
|
+
yield {"type": "non_standard", "value": block}
|
|
138
|
+
|
|
139
|
+
return list(_iter_blocks())
|
|
140
|
+
|
|
141
|
+
|
|
142
|
+
def _convert_citation_to_v1(citation: dict[str, Any]) -> types.Annotation:
|
|
143
|
+
citation_type = citation.get("type")
|
|
144
|
+
|
|
145
|
+
if citation_type == "web_search_result_location":
|
|
146
|
+
url_citation: types.Citation = {
|
|
147
|
+
"type": "citation",
|
|
148
|
+
"cited_text": citation["cited_text"],
|
|
149
|
+
"url": citation["url"],
|
|
150
|
+
}
|
|
151
|
+
if title := citation.get("title"):
|
|
152
|
+
url_citation["title"] = title
|
|
153
|
+
known_fields = {"type", "cited_text", "url", "title", "index", "extras"}
|
|
154
|
+
for key, value in citation.items():
|
|
155
|
+
if key not in known_fields:
|
|
156
|
+
if "extras" not in url_citation:
|
|
157
|
+
url_citation["extras"] = {}
|
|
158
|
+
url_citation["extras"][key] = value
|
|
159
|
+
|
|
160
|
+
return url_citation
|
|
161
|
+
|
|
162
|
+
if citation_type in (
|
|
163
|
+
"char_location",
|
|
164
|
+
"content_block_location",
|
|
165
|
+
"page_location",
|
|
166
|
+
"search_result_location",
|
|
167
|
+
):
|
|
168
|
+
document_citation: types.Citation = {
|
|
169
|
+
"type": "citation",
|
|
170
|
+
"cited_text": citation["cited_text"],
|
|
171
|
+
}
|
|
172
|
+
if "document_title" in citation:
|
|
173
|
+
document_citation["title"] = citation["document_title"]
|
|
174
|
+
elif title := citation.get("title"):
|
|
175
|
+
document_citation["title"] = title
|
|
176
|
+
else:
|
|
177
|
+
pass
|
|
178
|
+
known_fields = {
|
|
179
|
+
"type",
|
|
180
|
+
"cited_text",
|
|
181
|
+
"document_title",
|
|
182
|
+
"title",
|
|
183
|
+
"index",
|
|
184
|
+
"extras",
|
|
185
|
+
}
|
|
186
|
+
for key, value in citation.items():
|
|
187
|
+
if key not in known_fields:
|
|
188
|
+
if "extras" not in document_citation:
|
|
189
|
+
document_citation["extras"] = {}
|
|
190
|
+
document_citation["extras"][key] = value
|
|
191
|
+
|
|
192
|
+
return document_citation
|
|
193
|
+
|
|
194
|
+
return {
|
|
195
|
+
"type": "non_standard_annotation",
|
|
196
|
+
"value": citation,
|
|
197
|
+
}
|
|
198
|
+
|
|
199
|
+
|
|
200
|
+
def _convert_to_v1_from_anthropic(message: AIMessage) -> list[types.ContentBlock]:
|
|
201
|
+
"""Convert Anthropic message content to v1 format."""
|
|
202
|
+
if isinstance(message.content, str):
|
|
203
|
+
content: list[str | dict] = [{"type": "text", "text": message.content}]
|
|
204
|
+
else:
|
|
205
|
+
content = message.content
|
|
206
|
+
|
|
207
|
+
def _iter_blocks() -> Iterable[types.ContentBlock]:
|
|
208
|
+
for block in content:
|
|
209
|
+
if not isinstance(block, dict):
|
|
210
|
+
continue
|
|
211
|
+
block_type = block.get("type")
|
|
212
|
+
|
|
213
|
+
if block_type == "text":
|
|
214
|
+
if citations := block.get("citations"):
|
|
215
|
+
text_block: types.TextContentBlock = {
|
|
216
|
+
"type": "text",
|
|
217
|
+
"text": block.get("text", ""),
|
|
218
|
+
"annotations": [_convert_citation_to_v1(a) for a in citations],
|
|
219
|
+
}
|
|
220
|
+
else:
|
|
221
|
+
text_block = {"type": "text", "text": block["text"]}
|
|
222
|
+
if "index" in block:
|
|
223
|
+
text_block["index"] = block["index"]
|
|
224
|
+
yield text_block
|
|
225
|
+
|
|
226
|
+
elif block_type == "thinking":
|
|
227
|
+
reasoning_block: types.ReasoningContentBlock = {
|
|
228
|
+
"type": "reasoning",
|
|
229
|
+
"reasoning": block.get("thinking", ""),
|
|
230
|
+
}
|
|
231
|
+
if "index" in block:
|
|
232
|
+
reasoning_block["index"] = block["index"]
|
|
233
|
+
known_fields = {"type", "thinking", "index", "extras"}
|
|
234
|
+
for key in block:
|
|
235
|
+
if key not in known_fields:
|
|
236
|
+
if "extras" not in reasoning_block:
|
|
237
|
+
reasoning_block["extras"] = {}
|
|
238
|
+
reasoning_block["extras"][key] = block[key]
|
|
239
|
+
yield reasoning_block
|
|
240
|
+
|
|
241
|
+
elif block_type == "tool_use":
|
|
242
|
+
if (
|
|
243
|
+
isinstance(message, AIMessageChunk)
|
|
244
|
+
and len(message.tool_call_chunks) == 1
|
|
245
|
+
and message.chunk_position != "last"
|
|
246
|
+
):
|
|
247
|
+
# Isolated chunk
|
|
248
|
+
tool_call_chunk: types.ToolCallChunk = (
|
|
249
|
+
message.tool_call_chunks[0].copy() # type: ignore[assignment]
|
|
250
|
+
)
|
|
251
|
+
if "type" not in tool_call_chunk:
|
|
252
|
+
tool_call_chunk["type"] = "tool_call_chunk"
|
|
253
|
+
yield tool_call_chunk
|
|
254
|
+
else:
|
|
255
|
+
tool_call_block: types.ToolCall | None = None
|
|
256
|
+
# Non-streaming or gathered chunk
|
|
257
|
+
if len(message.tool_calls) == 1:
|
|
258
|
+
tool_call_block = {
|
|
259
|
+
"type": "tool_call",
|
|
260
|
+
"name": message.tool_calls[0]["name"],
|
|
261
|
+
"args": message.tool_calls[0]["args"],
|
|
262
|
+
"id": message.tool_calls[0].get("id"),
|
|
263
|
+
}
|
|
264
|
+
elif call_id := block.get("id"):
|
|
265
|
+
for tc in message.tool_calls:
|
|
266
|
+
if tc.get("id") == call_id:
|
|
267
|
+
tool_call_block = {
|
|
268
|
+
"type": "tool_call",
|
|
269
|
+
"name": tc["name"],
|
|
270
|
+
"args": tc["args"],
|
|
271
|
+
"id": tc.get("id"),
|
|
272
|
+
}
|
|
273
|
+
break
|
|
274
|
+
else:
|
|
275
|
+
pass
|
|
276
|
+
if not tool_call_block:
|
|
277
|
+
tool_call_block = {
|
|
278
|
+
"type": "tool_call",
|
|
279
|
+
"name": block.get("name", ""),
|
|
280
|
+
"args": block.get("input", {}),
|
|
281
|
+
"id": block.get("id", ""),
|
|
282
|
+
}
|
|
283
|
+
if "index" in block:
|
|
284
|
+
tool_call_block["index"] = block["index"]
|
|
285
|
+
yield tool_call_block
|
|
286
|
+
|
|
287
|
+
elif block_type == "input_json_delta" and isinstance(
|
|
288
|
+
message, AIMessageChunk
|
|
289
|
+
):
|
|
290
|
+
if len(message.tool_call_chunks) == 1:
|
|
291
|
+
tool_call_chunk = (
|
|
292
|
+
message.tool_call_chunks[0].copy() # type: ignore[assignment]
|
|
293
|
+
)
|
|
294
|
+
if "type" not in tool_call_chunk:
|
|
295
|
+
tool_call_chunk["type"] = "tool_call_chunk"
|
|
296
|
+
yield tool_call_chunk
|
|
297
|
+
|
|
298
|
+
else:
|
|
299
|
+
server_tool_call_chunk: types.ServerToolCallChunk = {
|
|
300
|
+
"type": "server_tool_call_chunk",
|
|
301
|
+
"args": block.get("partial_json", ""),
|
|
302
|
+
}
|
|
303
|
+
if "index" in block:
|
|
304
|
+
server_tool_call_chunk["index"] = block["index"]
|
|
305
|
+
yield server_tool_call_chunk
|
|
306
|
+
|
|
307
|
+
elif block_type == "server_tool_use":
|
|
308
|
+
if block.get("name") == "code_execution":
|
|
309
|
+
server_tool_use_name = "code_interpreter"
|
|
310
|
+
else:
|
|
311
|
+
server_tool_use_name = block.get("name", "")
|
|
312
|
+
if (
|
|
313
|
+
isinstance(message, AIMessageChunk)
|
|
314
|
+
and block.get("input") == {}
|
|
315
|
+
and "partial_json" not in block
|
|
316
|
+
and message.chunk_position != "last"
|
|
317
|
+
):
|
|
318
|
+
# First chunk in a stream
|
|
319
|
+
server_tool_call_chunk = {
|
|
320
|
+
"type": "server_tool_call_chunk",
|
|
321
|
+
"name": server_tool_use_name,
|
|
322
|
+
"args": "",
|
|
323
|
+
"id": block.get("id", ""),
|
|
324
|
+
}
|
|
325
|
+
if "index" in block:
|
|
326
|
+
server_tool_call_chunk["index"] = block["index"]
|
|
327
|
+
known_fields = {"type", "name", "input", "id", "index"}
|
|
328
|
+
_populate_extras(server_tool_call_chunk, block, known_fields)
|
|
329
|
+
yield server_tool_call_chunk
|
|
330
|
+
else:
|
|
331
|
+
server_tool_call: types.ServerToolCall = {
|
|
332
|
+
"type": "server_tool_call",
|
|
333
|
+
"name": server_tool_use_name,
|
|
334
|
+
"args": block.get("input", {}),
|
|
335
|
+
"id": block.get("id", ""),
|
|
336
|
+
}
|
|
337
|
+
|
|
338
|
+
if block.get("input") == {} and "partial_json" in block:
|
|
339
|
+
try:
|
|
340
|
+
input_ = json.loads(block["partial_json"])
|
|
341
|
+
if isinstance(input_, dict):
|
|
342
|
+
server_tool_call["args"] = input_
|
|
343
|
+
except json.JSONDecodeError:
|
|
344
|
+
pass
|
|
345
|
+
|
|
346
|
+
if "index" in block:
|
|
347
|
+
server_tool_call["index"] = block["index"]
|
|
348
|
+
known_fields = {
|
|
349
|
+
"type",
|
|
350
|
+
"name",
|
|
351
|
+
"input",
|
|
352
|
+
"partial_json",
|
|
353
|
+
"id",
|
|
354
|
+
"index",
|
|
355
|
+
}
|
|
356
|
+
_populate_extras(server_tool_call, block, known_fields)
|
|
357
|
+
|
|
358
|
+
yield server_tool_call
|
|
359
|
+
|
|
360
|
+
elif block_type == "mcp_tool_use":
|
|
361
|
+
if (
|
|
362
|
+
isinstance(message, AIMessageChunk)
|
|
363
|
+
and block.get("input") == {}
|
|
364
|
+
and "partial_json" not in block
|
|
365
|
+
and message.chunk_position != "last"
|
|
366
|
+
):
|
|
367
|
+
# First chunk in a stream
|
|
368
|
+
server_tool_call_chunk = {
|
|
369
|
+
"type": "server_tool_call_chunk",
|
|
370
|
+
"name": "remote_mcp",
|
|
371
|
+
"args": "",
|
|
372
|
+
"id": block.get("id", ""),
|
|
373
|
+
}
|
|
374
|
+
if "name" in block:
|
|
375
|
+
server_tool_call_chunk["extras"] = {"tool_name": block["name"]}
|
|
376
|
+
known_fields = {"type", "name", "input", "id", "index"}
|
|
377
|
+
_populate_extras(server_tool_call_chunk, block, known_fields)
|
|
378
|
+
if "index" in block:
|
|
379
|
+
server_tool_call_chunk["index"] = block["index"]
|
|
380
|
+
yield server_tool_call_chunk
|
|
381
|
+
else:
|
|
382
|
+
server_tool_call = {
|
|
383
|
+
"type": "server_tool_call",
|
|
384
|
+
"name": "remote_mcp",
|
|
385
|
+
"args": block.get("input", {}),
|
|
386
|
+
"id": block.get("id", ""),
|
|
387
|
+
}
|
|
388
|
+
|
|
389
|
+
if block.get("input") == {} and "partial_json" in block:
|
|
390
|
+
try:
|
|
391
|
+
input_ = json.loads(block["partial_json"])
|
|
392
|
+
if isinstance(input_, dict):
|
|
393
|
+
server_tool_call["args"] = input_
|
|
394
|
+
except json.JSONDecodeError:
|
|
395
|
+
pass
|
|
396
|
+
|
|
397
|
+
if "name" in block:
|
|
398
|
+
server_tool_call["extras"] = {"tool_name": block["name"]}
|
|
399
|
+
known_fields = {
|
|
400
|
+
"type",
|
|
401
|
+
"name",
|
|
402
|
+
"input",
|
|
403
|
+
"partial_json",
|
|
404
|
+
"id",
|
|
405
|
+
"index",
|
|
406
|
+
}
|
|
407
|
+
_populate_extras(server_tool_call, block, known_fields)
|
|
408
|
+
if "index" in block:
|
|
409
|
+
server_tool_call["index"] = block["index"]
|
|
410
|
+
|
|
411
|
+
yield server_tool_call
|
|
412
|
+
|
|
413
|
+
elif block_type and block_type.endswith("_tool_result"):
|
|
414
|
+
server_tool_result: types.ServerToolResult = {
|
|
415
|
+
"type": "server_tool_result",
|
|
416
|
+
"tool_call_id": block.get("tool_use_id", ""),
|
|
417
|
+
"status": "success",
|
|
418
|
+
"extras": {"block_type": block_type},
|
|
419
|
+
}
|
|
420
|
+
if output := block.get("content", []):
|
|
421
|
+
server_tool_result["output"] = output
|
|
422
|
+
if isinstance(output, dict) and output.get(
|
|
423
|
+
"error_code" # web_search, code_interpreter
|
|
424
|
+
):
|
|
425
|
+
server_tool_result["status"] = "error"
|
|
426
|
+
if block.get("is_error"): # mcp_tool_result
|
|
427
|
+
server_tool_result["status"] = "error"
|
|
428
|
+
if "index" in block:
|
|
429
|
+
server_tool_result["index"] = block["index"]
|
|
430
|
+
|
|
431
|
+
known_fields = {"type", "tool_use_id", "content", "is_error", "index"}
|
|
432
|
+
_populate_extras(server_tool_result, block, known_fields)
|
|
433
|
+
|
|
434
|
+
yield server_tool_result
|
|
435
|
+
|
|
436
|
+
else:
|
|
437
|
+
new_block: types.NonStandardContentBlock = {
|
|
438
|
+
"type": "non_standard",
|
|
439
|
+
"value": block,
|
|
440
|
+
}
|
|
441
|
+
if "index" in new_block["value"]:
|
|
442
|
+
new_block["index"] = new_block["value"].pop("index")
|
|
443
|
+
yield new_block
|
|
444
|
+
|
|
445
|
+
return list(_iter_blocks())
|
|
446
|
+
|
|
447
|
+
|
|
448
|
+
def translate_content(message: AIMessage) -> list[types.ContentBlock]:
|
|
449
|
+
"""Derive standard content blocks from a message with Anthropic content."""
|
|
450
|
+
return _convert_to_v1_from_anthropic(message)
|
|
451
|
+
|
|
452
|
+
|
|
453
|
+
def translate_content_chunk(message: AIMessageChunk) -> list[types.ContentBlock]:
|
|
454
|
+
"""Derive standard content blocks from a message chunk with Anthropic content."""
|
|
455
|
+
return _convert_to_v1_from_anthropic(message)
|
|
456
|
+
|
|
457
|
+
|
|
458
|
+
def _register_anthropic_translator() -> None:
|
|
459
|
+
"""Register the Anthropic translator with the central registry.
|
|
460
|
+
|
|
461
|
+
Run automatically when the module is imported.
|
|
462
|
+
"""
|
|
463
|
+
from langchain_core.messages.block_translators import ( # noqa: PLC0415
|
|
464
|
+
register_translator,
|
|
465
|
+
)
|
|
466
|
+
|
|
467
|
+
register_translator("anthropic", translate_content, translate_content_chunk)
|
|
468
|
+
|
|
469
|
+
|
|
470
|
+
_register_anthropic_translator()
|
|
@@ -0,0 +1,94 @@
|
|
|
1
|
+
"""Derivations of standard content blocks from Bedrock content."""
|
|
2
|
+
|
|
3
|
+
from langchain_core.messages import AIMessage, AIMessageChunk
|
|
4
|
+
from langchain_core.messages import content as types
|
|
5
|
+
from langchain_core.messages.block_translators.anthropic import (
|
|
6
|
+
_convert_to_v1_from_anthropic,
|
|
7
|
+
)
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def _convert_to_v1_from_bedrock(message: AIMessage) -> list[types.ContentBlock]:
|
|
11
|
+
"""Convert bedrock message content to v1 format."""
|
|
12
|
+
out = _convert_to_v1_from_anthropic(message)
|
|
13
|
+
|
|
14
|
+
content_tool_call_ids = {
|
|
15
|
+
block.get("id")
|
|
16
|
+
for block in out
|
|
17
|
+
if isinstance(block, dict) and block.get("type") == "tool_call"
|
|
18
|
+
}
|
|
19
|
+
for tool_call in message.tool_calls:
|
|
20
|
+
if (id_ := tool_call.get("id")) and id_ not in content_tool_call_ids:
|
|
21
|
+
tool_call_block: types.ToolCall = {
|
|
22
|
+
"type": "tool_call",
|
|
23
|
+
"id": id_,
|
|
24
|
+
"name": tool_call["name"],
|
|
25
|
+
"args": tool_call["args"],
|
|
26
|
+
}
|
|
27
|
+
if "index" in tool_call:
|
|
28
|
+
tool_call_block["index"] = tool_call["index"] # type: ignore[typeddict-item]
|
|
29
|
+
if "extras" in tool_call:
|
|
30
|
+
tool_call_block["extras"] = tool_call["extras"] # type: ignore[typeddict-item]
|
|
31
|
+
out.append(tool_call_block)
|
|
32
|
+
return out
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def _convert_to_v1_from_bedrock_chunk(
|
|
36
|
+
message: AIMessageChunk,
|
|
37
|
+
) -> list[types.ContentBlock]:
|
|
38
|
+
"""Convert bedrock message chunk content to v1 format."""
|
|
39
|
+
if (
|
|
40
|
+
message.content == ""
|
|
41
|
+
and not message.additional_kwargs
|
|
42
|
+
and not message.tool_calls
|
|
43
|
+
):
|
|
44
|
+
# Bedrock outputs multiple chunks containing response metadata
|
|
45
|
+
return []
|
|
46
|
+
|
|
47
|
+
out = _convert_to_v1_from_anthropic(message)
|
|
48
|
+
|
|
49
|
+
if (
|
|
50
|
+
message.tool_call_chunks
|
|
51
|
+
and not message.content
|
|
52
|
+
and message.chunk_position != "last" # keep tool_calls if aggregated
|
|
53
|
+
):
|
|
54
|
+
for tool_call_chunk in message.tool_call_chunks:
|
|
55
|
+
tc: types.ToolCallChunk = {
|
|
56
|
+
"type": "tool_call_chunk",
|
|
57
|
+
"id": tool_call_chunk.get("id"),
|
|
58
|
+
"name": tool_call_chunk.get("name"),
|
|
59
|
+
"args": tool_call_chunk.get("args"),
|
|
60
|
+
}
|
|
61
|
+
if (idx := tool_call_chunk.get("index")) is not None:
|
|
62
|
+
tc["index"] = idx
|
|
63
|
+
out.append(tc)
|
|
64
|
+
return out
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
def translate_content(message: AIMessage) -> list[types.ContentBlock]:
|
|
68
|
+
"""Derive standard content blocks from a message with Bedrock content."""
|
|
69
|
+
if "claude" not in message.response_metadata.get("model_name", "").lower():
|
|
70
|
+
raise NotImplementedError # fall back to best-effort parsing
|
|
71
|
+
return _convert_to_v1_from_bedrock(message)
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
def translate_content_chunk(message: AIMessageChunk) -> list[types.ContentBlock]:
|
|
75
|
+
"""Derive standard content blocks from a message chunk with Bedrock content."""
|
|
76
|
+
# TODO: add model_name to all Bedrock chunks and update core merging logic
|
|
77
|
+
# to not append during aggregation. Then raise NotImplementedError here if
|
|
78
|
+
# not an Anthropic model to fall back to best-effort parsing.
|
|
79
|
+
return _convert_to_v1_from_bedrock_chunk(message)
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
def _register_bedrock_translator() -> None:
|
|
83
|
+
"""Register the bedrock translator with the central registry.
|
|
84
|
+
|
|
85
|
+
Run automatically when the module is imported.
|
|
86
|
+
"""
|
|
87
|
+
from langchain_core.messages.block_translators import ( # noqa: PLC0415
|
|
88
|
+
register_translator,
|
|
89
|
+
)
|
|
90
|
+
|
|
91
|
+
register_translator("bedrock", translate_content, translate_content_chunk)
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
_register_bedrock_translator()
|