langchain-core 0.4.0.dev0__py3-none-any.whl → 1.0.0a2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- langchain_core/_api/beta_decorator.py +2 -2
- langchain_core/_api/deprecation.py +1 -1
- langchain_core/beta/runnables/context.py +1 -1
- langchain_core/callbacks/base.py +14 -23
- langchain_core/callbacks/file.py +13 -2
- langchain_core/callbacks/manager.py +74 -157
- langchain_core/callbacks/streaming_stdout.py +3 -4
- langchain_core/callbacks/usage.py +2 -12
- langchain_core/chat_history.py +6 -6
- langchain_core/documents/base.py +1 -1
- langchain_core/documents/compressor.py +9 -6
- langchain_core/indexing/base.py +2 -2
- langchain_core/language_models/_utils.py +232 -101
- langchain_core/language_models/base.py +35 -23
- langchain_core/language_models/chat_models.py +248 -54
- langchain_core/language_models/fake_chat_models.py +28 -81
- langchain_core/load/dump.py +3 -4
- langchain_core/messages/__init__.py +30 -24
- langchain_core/messages/ai.py +188 -30
- langchain_core/messages/base.py +164 -25
- langchain_core/messages/block_translators/__init__.py +89 -0
- langchain_core/messages/block_translators/anthropic.py +451 -0
- langchain_core/messages/block_translators/bedrock.py +45 -0
- langchain_core/messages/block_translators/bedrock_converse.py +47 -0
- langchain_core/messages/block_translators/google_genai.py +45 -0
- langchain_core/messages/block_translators/google_vertexai.py +47 -0
- langchain_core/messages/block_translators/groq.py +45 -0
- langchain_core/messages/block_translators/langchain_v0.py +164 -0
- langchain_core/messages/block_translators/ollama.py +45 -0
- langchain_core/messages/block_translators/openai.py +798 -0
- langchain_core/messages/{content_blocks.py → content.py} +303 -278
- langchain_core/messages/human.py +29 -9
- langchain_core/messages/system.py +29 -9
- langchain_core/messages/tool.py +94 -13
- langchain_core/messages/utils.py +34 -234
- langchain_core/output_parsers/base.py +14 -50
- langchain_core/output_parsers/json.py +2 -5
- langchain_core/output_parsers/list.py +2 -7
- langchain_core/output_parsers/openai_functions.py +5 -28
- langchain_core/output_parsers/openai_tools.py +49 -90
- langchain_core/output_parsers/pydantic.py +2 -3
- langchain_core/output_parsers/transform.py +12 -53
- langchain_core/output_parsers/xml.py +9 -17
- langchain_core/prompt_values.py +8 -112
- langchain_core/prompts/chat.py +1 -3
- langchain_core/runnables/base.py +500 -451
- langchain_core/runnables/branch.py +1 -1
- langchain_core/runnables/fallbacks.py +4 -4
- langchain_core/runnables/history.py +1 -1
- langchain_core/runnables/passthrough.py +3 -3
- langchain_core/runnables/retry.py +1 -1
- langchain_core/runnables/router.py +1 -1
- langchain_core/structured_query.py +3 -7
- langchain_core/tools/base.py +14 -41
- langchain_core/tools/convert.py +2 -22
- langchain_core/tools/retriever.py +1 -8
- langchain_core/tools/structured.py +2 -10
- langchain_core/tracers/_streaming.py +6 -7
- langchain_core/tracers/base.py +7 -14
- langchain_core/tracers/core.py +4 -27
- langchain_core/tracers/event_stream.py +4 -15
- langchain_core/tracers/langchain.py +3 -14
- langchain_core/tracers/log_stream.py +2 -3
- langchain_core/utils/_merge.py +45 -7
- langchain_core/utils/function_calling.py +22 -9
- langchain_core/utils/utils.py +29 -0
- langchain_core/version.py +1 -1
- {langchain_core-0.4.0.dev0.dist-info → langchain_core-1.0.0a2.dist-info}/METADATA +7 -9
- {langchain_core-0.4.0.dev0.dist-info → langchain_core-1.0.0a2.dist-info}/RECORD +71 -64
- langchain_core/v1/__init__.py +0 -1
- langchain_core/v1/chat_models.py +0 -1047
- langchain_core/v1/messages.py +0 -755
- {langchain_core-0.4.0.dev0.dist-info → langchain_core-1.0.0a2.dist-info}/WHEEL +0 -0
- {langchain_core-0.4.0.dev0.dist-info → langchain_core-1.0.0a2.dist-info}/entry_points.txt +0 -0
|
@@ -0,0 +1,451 @@
|
|
|
1
|
+
"""Derivations of standard content blocks from Anthropic content."""
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
from collections.abc import Iterable
|
|
5
|
+
from typing import Any, Optional, cast
|
|
6
|
+
|
|
7
|
+
from langchain_core.messages import AIMessage, AIMessageChunk
|
|
8
|
+
from langchain_core.messages import content as types
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def _populate_extras(
|
|
12
|
+
standard_block: types.ContentBlock, block: dict[str, Any], known_fields: set[str]
|
|
13
|
+
) -> types.ContentBlock:
|
|
14
|
+
"""Mutate a block, populating extras."""
|
|
15
|
+
if standard_block.get("type") == "non_standard":
|
|
16
|
+
return standard_block
|
|
17
|
+
|
|
18
|
+
for key, value in block.items():
|
|
19
|
+
if key not in known_fields:
|
|
20
|
+
if "extras" not in block:
|
|
21
|
+
# Below type-ignores are because mypy thinks a non-standard block can
|
|
22
|
+
# get here, although we exclude them above.
|
|
23
|
+
standard_block["extras"] = {} # type: ignore[typeddict-unknown-key]
|
|
24
|
+
standard_block["extras"][key] = value # type: ignore[typeddict-item]
|
|
25
|
+
|
|
26
|
+
return standard_block
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def _convert_to_v1_from_anthropic_input(
|
|
30
|
+
content: list[types.ContentBlock],
|
|
31
|
+
) -> list[types.ContentBlock]:
|
|
32
|
+
"""Attempt to unpack non-standard blocks."""
|
|
33
|
+
|
|
34
|
+
def _iter_blocks() -> Iterable[types.ContentBlock]:
|
|
35
|
+
blocks: list[dict[str, Any]] = [
|
|
36
|
+
cast("dict[str, Any]", block)
|
|
37
|
+
if block.get("type") != "non_standard"
|
|
38
|
+
else block["value"] # type: ignore[typeddict-item] # this is only non-standard blocks
|
|
39
|
+
for block in content
|
|
40
|
+
]
|
|
41
|
+
for block in blocks:
|
|
42
|
+
block_type = block.get("type")
|
|
43
|
+
|
|
44
|
+
if (
|
|
45
|
+
block_type == "document"
|
|
46
|
+
and "source" in block
|
|
47
|
+
and "type" in block["source"]
|
|
48
|
+
):
|
|
49
|
+
if block["source"]["type"] == "base64":
|
|
50
|
+
file_block: types.FileContentBlock = {
|
|
51
|
+
"type": "file",
|
|
52
|
+
"base64": block["source"]["data"],
|
|
53
|
+
"mime_type": block["source"]["media_type"],
|
|
54
|
+
}
|
|
55
|
+
_populate_extras(file_block, block, {"type", "source"})
|
|
56
|
+
yield file_block
|
|
57
|
+
|
|
58
|
+
elif block["source"]["type"] == "url":
|
|
59
|
+
file_block = {
|
|
60
|
+
"type": "file",
|
|
61
|
+
"url": block["source"]["url"],
|
|
62
|
+
}
|
|
63
|
+
_populate_extras(file_block, block, {"type", "source"})
|
|
64
|
+
yield file_block
|
|
65
|
+
|
|
66
|
+
elif block["source"]["type"] == "file":
|
|
67
|
+
file_block = {
|
|
68
|
+
"type": "file",
|
|
69
|
+
"id": block["source"]["file_id"],
|
|
70
|
+
}
|
|
71
|
+
_populate_extras(file_block, block, {"type", "source"})
|
|
72
|
+
yield file_block
|
|
73
|
+
|
|
74
|
+
elif block["source"]["type"] == "text":
|
|
75
|
+
plain_text_block: types.PlainTextContentBlock = {
|
|
76
|
+
"type": "text-plain",
|
|
77
|
+
"text": block["source"]["data"],
|
|
78
|
+
"mime_type": block.get("media_type", "text/plain"),
|
|
79
|
+
}
|
|
80
|
+
_populate_extras(plain_text_block, block, {"type", "source"})
|
|
81
|
+
yield plain_text_block
|
|
82
|
+
|
|
83
|
+
else:
|
|
84
|
+
yield {"type": "non_standard", "value": block}
|
|
85
|
+
|
|
86
|
+
elif (
|
|
87
|
+
block_type == "image"
|
|
88
|
+
and "source" in block
|
|
89
|
+
and "type" in block["source"]
|
|
90
|
+
):
|
|
91
|
+
if block["source"]["type"] == "base64":
|
|
92
|
+
image_block: types.ImageContentBlock = {
|
|
93
|
+
"type": "image",
|
|
94
|
+
"base64": block["source"]["data"],
|
|
95
|
+
"mime_type": block["source"]["media_type"],
|
|
96
|
+
}
|
|
97
|
+
_populate_extras(image_block, block, {"type", "source"})
|
|
98
|
+
yield image_block
|
|
99
|
+
|
|
100
|
+
elif block["source"]["type"] == "url":
|
|
101
|
+
image_block = {
|
|
102
|
+
"type": "image",
|
|
103
|
+
"url": block["source"]["url"],
|
|
104
|
+
}
|
|
105
|
+
_populate_extras(image_block, block, {"type", "source"})
|
|
106
|
+
yield image_block
|
|
107
|
+
|
|
108
|
+
elif block["source"]["type"] == "file":
|
|
109
|
+
image_block = {
|
|
110
|
+
"type": "image",
|
|
111
|
+
"id": block["source"]["file_id"],
|
|
112
|
+
}
|
|
113
|
+
_populate_extras(image_block, block, {"type", "source"})
|
|
114
|
+
yield image_block
|
|
115
|
+
|
|
116
|
+
else:
|
|
117
|
+
yield {"type": "non_standard", "value": block}
|
|
118
|
+
|
|
119
|
+
elif block_type in types.KNOWN_BLOCK_TYPES:
|
|
120
|
+
yield cast("types.ContentBlock", block)
|
|
121
|
+
|
|
122
|
+
else:
|
|
123
|
+
yield {"type": "non_standard", "value": block}
|
|
124
|
+
|
|
125
|
+
return list(_iter_blocks())
|
|
126
|
+
|
|
127
|
+
|
|
128
|
+
def _convert_citation_to_v1(citation: dict[str, Any]) -> types.Annotation:
|
|
129
|
+
citation_type = citation.get("type")
|
|
130
|
+
|
|
131
|
+
if citation_type == "web_search_result_location":
|
|
132
|
+
url_citation: types.Citation = {
|
|
133
|
+
"type": "citation",
|
|
134
|
+
"cited_text": citation["cited_text"],
|
|
135
|
+
"url": citation["url"],
|
|
136
|
+
}
|
|
137
|
+
if title := citation.get("title"):
|
|
138
|
+
url_citation["title"] = title
|
|
139
|
+
known_fields = {"type", "cited_text", "url", "title", "index", "extras"}
|
|
140
|
+
for key, value in citation.items():
|
|
141
|
+
if key not in known_fields:
|
|
142
|
+
if "extras" not in url_citation:
|
|
143
|
+
url_citation["extras"] = {}
|
|
144
|
+
url_citation["extras"][key] = value
|
|
145
|
+
|
|
146
|
+
return url_citation
|
|
147
|
+
|
|
148
|
+
if citation_type in (
|
|
149
|
+
"char_location",
|
|
150
|
+
"content_block_location",
|
|
151
|
+
"page_location",
|
|
152
|
+
"search_result_location",
|
|
153
|
+
):
|
|
154
|
+
document_citation: types.Citation = {
|
|
155
|
+
"type": "citation",
|
|
156
|
+
"cited_text": citation["cited_text"],
|
|
157
|
+
}
|
|
158
|
+
if "document_title" in citation:
|
|
159
|
+
document_citation["title"] = citation["document_title"]
|
|
160
|
+
elif title := citation.get("title"):
|
|
161
|
+
document_citation["title"] = title
|
|
162
|
+
else:
|
|
163
|
+
pass
|
|
164
|
+
known_fields = {
|
|
165
|
+
"type",
|
|
166
|
+
"cited_text",
|
|
167
|
+
"document_title",
|
|
168
|
+
"title",
|
|
169
|
+
"index",
|
|
170
|
+
"extras",
|
|
171
|
+
}
|
|
172
|
+
for key, value in citation.items():
|
|
173
|
+
if key not in known_fields:
|
|
174
|
+
if "extras" not in document_citation:
|
|
175
|
+
document_citation["extras"] = {}
|
|
176
|
+
document_citation["extras"][key] = value
|
|
177
|
+
|
|
178
|
+
return document_citation
|
|
179
|
+
|
|
180
|
+
return {
|
|
181
|
+
"type": "non_standard_annotation",
|
|
182
|
+
"value": citation,
|
|
183
|
+
}
|
|
184
|
+
|
|
185
|
+
|
|
186
|
+
def _convert_to_v1_from_anthropic(message: AIMessage) -> list[types.ContentBlock]:
|
|
187
|
+
"""Convert Anthropic message content to v1 format."""
|
|
188
|
+
if isinstance(message.content, str):
|
|
189
|
+
message.content = [{"type": "text", "text": message.content}]
|
|
190
|
+
|
|
191
|
+
def _iter_blocks() -> Iterable[types.ContentBlock]:
|
|
192
|
+
for block in message.content:
|
|
193
|
+
if not isinstance(block, dict):
|
|
194
|
+
continue
|
|
195
|
+
block_type = block.get("type")
|
|
196
|
+
|
|
197
|
+
if block_type == "text":
|
|
198
|
+
if citations := block.get("citations"):
|
|
199
|
+
text_block: types.TextContentBlock = {
|
|
200
|
+
"type": "text",
|
|
201
|
+
"text": block.get("text", ""),
|
|
202
|
+
"annotations": [_convert_citation_to_v1(a) for a in citations],
|
|
203
|
+
}
|
|
204
|
+
else:
|
|
205
|
+
text_block = {"type": "text", "text": block["text"]}
|
|
206
|
+
if "index" in block:
|
|
207
|
+
text_block["index"] = block["index"]
|
|
208
|
+
yield text_block
|
|
209
|
+
|
|
210
|
+
elif block_type == "thinking":
|
|
211
|
+
reasoning_block: types.ReasoningContentBlock = {
|
|
212
|
+
"type": "reasoning",
|
|
213
|
+
"reasoning": block.get("thinking", ""),
|
|
214
|
+
}
|
|
215
|
+
if "index" in block:
|
|
216
|
+
reasoning_block["index"] = block["index"]
|
|
217
|
+
known_fields = {"type", "thinking", "index", "extras"}
|
|
218
|
+
for key in block:
|
|
219
|
+
if key not in known_fields:
|
|
220
|
+
if "extras" not in reasoning_block:
|
|
221
|
+
reasoning_block["extras"] = {}
|
|
222
|
+
reasoning_block["extras"][key] = block[key]
|
|
223
|
+
yield reasoning_block
|
|
224
|
+
|
|
225
|
+
elif block_type == "tool_use":
|
|
226
|
+
if (
|
|
227
|
+
isinstance(message, AIMessageChunk)
|
|
228
|
+
and len(message.tool_call_chunks) == 1
|
|
229
|
+
and message.chunk_position != "last"
|
|
230
|
+
):
|
|
231
|
+
# Isolated chunk
|
|
232
|
+
tool_call_chunk: types.ToolCallChunk = (
|
|
233
|
+
message.tool_call_chunks[0].copy() # type: ignore[assignment]
|
|
234
|
+
)
|
|
235
|
+
if "type" not in tool_call_chunk:
|
|
236
|
+
tool_call_chunk["type"] = "tool_call_chunk"
|
|
237
|
+
yield tool_call_chunk
|
|
238
|
+
else:
|
|
239
|
+
tool_call_block: Optional[types.ToolCall] = None
|
|
240
|
+
# Non-streaming or gathered chunk
|
|
241
|
+
if len(message.tool_calls) == 1:
|
|
242
|
+
tool_call_block = {
|
|
243
|
+
"type": "tool_call",
|
|
244
|
+
"name": message.tool_calls[0]["name"],
|
|
245
|
+
"args": message.tool_calls[0]["args"],
|
|
246
|
+
"id": message.tool_calls[0].get("id"),
|
|
247
|
+
}
|
|
248
|
+
elif call_id := block.get("id"):
|
|
249
|
+
for tc in message.tool_calls:
|
|
250
|
+
if tc.get("id") == call_id:
|
|
251
|
+
tool_call_block = {
|
|
252
|
+
"type": "tool_call",
|
|
253
|
+
"name": tc["name"],
|
|
254
|
+
"args": tc["args"],
|
|
255
|
+
"id": tc.get("id"),
|
|
256
|
+
}
|
|
257
|
+
break
|
|
258
|
+
else:
|
|
259
|
+
pass
|
|
260
|
+
if not tool_call_block:
|
|
261
|
+
tool_call_block = {
|
|
262
|
+
"type": "tool_call",
|
|
263
|
+
"name": block.get("name", ""),
|
|
264
|
+
"args": block.get("input", {}),
|
|
265
|
+
"id": block.get("id", ""),
|
|
266
|
+
}
|
|
267
|
+
if "index" in block:
|
|
268
|
+
tool_call_block["index"] = block["index"]
|
|
269
|
+
yield tool_call_block
|
|
270
|
+
|
|
271
|
+
elif (
|
|
272
|
+
block_type == "input_json_delta"
|
|
273
|
+
and isinstance(message, AIMessageChunk)
|
|
274
|
+
and len(message.tool_call_chunks) == 1
|
|
275
|
+
):
|
|
276
|
+
tool_call_chunk = (
|
|
277
|
+
message.tool_call_chunks[0].copy() # type: ignore[assignment]
|
|
278
|
+
)
|
|
279
|
+
if "type" not in tool_call_chunk:
|
|
280
|
+
tool_call_chunk["type"] = "tool_call_chunk"
|
|
281
|
+
yield tool_call_chunk
|
|
282
|
+
|
|
283
|
+
elif block_type == "server_tool_use":
|
|
284
|
+
if block.get("name") == "web_search":
|
|
285
|
+
web_search_call: types.WebSearchCall = {"type": "web_search_call"}
|
|
286
|
+
|
|
287
|
+
if query := block.get("input", {}).get("query"):
|
|
288
|
+
web_search_call["query"] = query
|
|
289
|
+
|
|
290
|
+
elif block.get("input") == {} and "partial_json" in block:
|
|
291
|
+
try:
|
|
292
|
+
input_ = json.loads(block["partial_json"])
|
|
293
|
+
if isinstance(input_, dict) and "query" in input_:
|
|
294
|
+
web_search_call["query"] = input_["query"]
|
|
295
|
+
except json.JSONDecodeError:
|
|
296
|
+
pass
|
|
297
|
+
|
|
298
|
+
if "id" in block:
|
|
299
|
+
web_search_call["id"] = block["id"]
|
|
300
|
+
if "index" in block:
|
|
301
|
+
web_search_call["index"] = block["index"]
|
|
302
|
+
known_fields = {"type", "name", "input", "id", "index"}
|
|
303
|
+
for key, value in block.items():
|
|
304
|
+
if key not in known_fields:
|
|
305
|
+
if "extras" not in web_search_call:
|
|
306
|
+
web_search_call["extras"] = {}
|
|
307
|
+
web_search_call["extras"][key] = value
|
|
308
|
+
yield web_search_call
|
|
309
|
+
|
|
310
|
+
elif block.get("name") == "code_execution":
|
|
311
|
+
code_interpreter_call: types.CodeInterpreterCall = {
|
|
312
|
+
"type": "code_interpreter_call"
|
|
313
|
+
}
|
|
314
|
+
|
|
315
|
+
if code := block.get("input", {}).get("code"):
|
|
316
|
+
code_interpreter_call["code"] = code
|
|
317
|
+
|
|
318
|
+
elif block.get("input") == {} and "partial_json" in block:
|
|
319
|
+
try:
|
|
320
|
+
input_ = json.loads(block["partial_json"])
|
|
321
|
+
if isinstance(input_, dict) and "code" in input_:
|
|
322
|
+
code_interpreter_call["code"] = input_["code"]
|
|
323
|
+
except json.JSONDecodeError:
|
|
324
|
+
pass
|
|
325
|
+
|
|
326
|
+
if "id" in block:
|
|
327
|
+
code_interpreter_call["id"] = block["id"]
|
|
328
|
+
if "index" in block:
|
|
329
|
+
code_interpreter_call["index"] = block["index"]
|
|
330
|
+
known_fields = {"type", "name", "input", "id", "index"}
|
|
331
|
+
for key, value in block.items():
|
|
332
|
+
if key not in known_fields:
|
|
333
|
+
if "extras" not in code_interpreter_call:
|
|
334
|
+
code_interpreter_call["extras"] = {}
|
|
335
|
+
code_interpreter_call["extras"][key] = value
|
|
336
|
+
yield code_interpreter_call
|
|
337
|
+
|
|
338
|
+
else:
|
|
339
|
+
new_block: types.NonStandardContentBlock = {
|
|
340
|
+
"type": "non_standard",
|
|
341
|
+
"value": block,
|
|
342
|
+
}
|
|
343
|
+
if "index" in new_block["value"]:
|
|
344
|
+
new_block["index"] = new_block["value"].pop("index")
|
|
345
|
+
yield new_block
|
|
346
|
+
|
|
347
|
+
elif block_type == "web_search_tool_result":
|
|
348
|
+
web_search_result: types.WebSearchResult = {"type": "web_search_result"}
|
|
349
|
+
if "tool_use_id" in block:
|
|
350
|
+
web_search_result["id"] = block["tool_use_id"]
|
|
351
|
+
if "index" in block:
|
|
352
|
+
web_search_result["index"] = block["index"]
|
|
353
|
+
|
|
354
|
+
if web_search_result_content := block.get("content", []):
|
|
355
|
+
if "extras" not in web_search_result:
|
|
356
|
+
web_search_result["extras"] = {}
|
|
357
|
+
urls = []
|
|
358
|
+
extra_content = []
|
|
359
|
+
for result_content in web_search_result_content:
|
|
360
|
+
if isinstance(result_content, dict):
|
|
361
|
+
if "url" in result_content:
|
|
362
|
+
urls.append(result_content["url"])
|
|
363
|
+
extra_content.append(result_content)
|
|
364
|
+
web_search_result["extras"]["content"] = extra_content
|
|
365
|
+
if urls:
|
|
366
|
+
web_search_result["urls"] = urls
|
|
367
|
+
yield web_search_result
|
|
368
|
+
|
|
369
|
+
elif block_type == "code_execution_tool_result":
|
|
370
|
+
code_interpreter_result: types.CodeInterpreterResult = {
|
|
371
|
+
"type": "code_interpreter_result",
|
|
372
|
+
"output": [],
|
|
373
|
+
}
|
|
374
|
+
if "tool_use_id" in block:
|
|
375
|
+
code_interpreter_result["id"] = block["tool_use_id"]
|
|
376
|
+
if "index" in block:
|
|
377
|
+
code_interpreter_result["index"] = block["index"]
|
|
378
|
+
|
|
379
|
+
code_interpreter_output: types.CodeInterpreterOutput = {
|
|
380
|
+
"type": "code_interpreter_output"
|
|
381
|
+
}
|
|
382
|
+
|
|
383
|
+
code_execution_content = block.get("content", {})
|
|
384
|
+
if code_execution_content.get("type") == "code_execution_result":
|
|
385
|
+
if "return_code" in code_execution_content:
|
|
386
|
+
code_interpreter_output["return_code"] = code_execution_content[
|
|
387
|
+
"return_code"
|
|
388
|
+
]
|
|
389
|
+
if "stdout" in code_execution_content:
|
|
390
|
+
code_interpreter_output["stdout"] = code_execution_content[
|
|
391
|
+
"stdout"
|
|
392
|
+
]
|
|
393
|
+
if stderr := code_execution_content.get("stderr"):
|
|
394
|
+
code_interpreter_output["stderr"] = stderr
|
|
395
|
+
if (
|
|
396
|
+
output := code_interpreter_output.get("content")
|
|
397
|
+
) and isinstance(output, list):
|
|
398
|
+
if "extras" not in code_interpreter_result:
|
|
399
|
+
code_interpreter_result["extras"] = {}
|
|
400
|
+
code_interpreter_result["extras"]["content"] = output
|
|
401
|
+
for output_block in output:
|
|
402
|
+
if "file_id" in output_block:
|
|
403
|
+
if "file_ids" not in code_interpreter_output:
|
|
404
|
+
code_interpreter_output["file_ids"] = []
|
|
405
|
+
code_interpreter_output["file_ids"].append(
|
|
406
|
+
output_block["file_id"]
|
|
407
|
+
)
|
|
408
|
+
code_interpreter_result["output"].append(code_interpreter_output)
|
|
409
|
+
|
|
410
|
+
elif (
|
|
411
|
+
code_execution_content.get("type")
|
|
412
|
+
== "code_execution_tool_result_error"
|
|
413
|
+
):
|
|
414
|
+
if "extras" not in code_interpreter_result:
|
|
415
|
+
code_interpreter_result["extras"] = {}
|
|
416
|
+
code_interpreter_result["extras"]["error_code"] = (
|
|
417
|
+
code_execution_content.get("error_code")
|
|
418
|
+
)
|
|
419
|
+
|
|
420
|
+
yield code_interpreter_result
|
|
421
|
+
|
|
422
|
+
else:
|
|
423
|
+
new_block = {"type": "non_standard", "value": block}
|
|
424
|
+
if "index" in new_block["value"]:
|
|
425
|
+
new_block["index"] = new_block["value"].pop("index")
|
|
426
|
+
yield new_block
|
|
427
|
+
|
|
428
|
+
return list(_iter_blocks())
|
|
429
|
+
|
|
430
|
+
|
|
431
|
+
def translate_content(message: AIMessage) -> list[types.ContentBlock]:
|
|
432
|
+
"""Derive standard content blocks from a message with OpenAI content."""
|
|
433
|
+
return _convert_to_v1_from_anthropic(message)
|
|
434
|
+
|
|
435
|
+
|
|
436
|
+
def translate_content_chunk(message: AIMessageChunk) -> list[types.ContentBlock]:
|
|
437
|
+
"""Derive standard content blocks from a message chunk with OpenAI content."""
|
|
438
|
+
return _convert_to_v1_from_anthropic(message)
|
|
439
|
+
|
|
440
|
+
|
|
441
|
+
def _register_anthropic_translator() -> None:
|
|
442
|
+
"""Register the Anthropic translator with the central registry.
|
|
443
|
+
|
|
444
|
+
Run automatically when the module is imported.
|
|
445
|
+
"""
|
|
446
|
+
from langchain_core.messages.block_translators import register_translator
|
|
447
|
+
|
|
448
|
+
register_translator("anthropic", translate_content, translate_content_chunk)
|
|
449
|
+
|
|
450
|
+
|
|
451
|
+
_register_anthropic_translator()
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
"""Derivations of standard content blocks from Amazon (Bedrock) content."""
|
|
2
|
+
|
|
3
|
+
import warnings
|
|
4
|
+
|
|
5
|
+
from langchain_core.messages import AIMessage, AIMessageChunk
|
|
6
|
+
from langchain_core.messages import content as types
|
|
7
|
+
|
|
8
|
+
WARNED = False
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def translate_content(message: AIMessage) -> list[types.ContentBlock]: # noqa: ARG001
|
|
12
|
+
"""Derive standard content blocks from a message with Bedrock content."""
|
|
13
|
+
global WARNED # noqa: PLW0603
|
|
14
|
+
if not WARNED:
|
|
15
|
+
warning_message = (
|
|
16
|
+
"Content block standardization is not yet fully supported for Bedrock."
|
|
17
|
+
)
|
|
18
|
+
warnings.warn(warning_message, stacklevel=2)
|
|
19
|
+
WARNED = True
|
|
20
|
+
raise NotImplementedError
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def translate_content_chunk(message: AIMessageChunk) -> list[types.ContentBlock]: # noqa: ARG001
|
|
24
|
+
"""Derive standard content blocks from a chunk with Bedrock content."""
|
|
25
|
+
global WARNED # noqa: PLW0603
|
|
26
|
+
if not WARNED:
|
|
27
|
+
warning_message = (
|
|
28
|
+
"Content block standardization is not yet fully supported for Bedrock."
|
|
29
|
+
)
|
|
30
|
+
warnings.warn(warning_message, stacklevel=2)
|
|
31
|
+
WARNED = True
|
|
32
|
+
raise NotImplementedError
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def _register_bedrock_translator() -> None:
|
|
36
|
+
"""Register the Bedrock translator with the central registry.
|
|
37
|
+
|
|
38
|
+
Run automatically when the module is imported.
|
|
39
|
+
"""
|
|
40
|
+
from langchain_core.messages.block_translators import register_translator
|
|
41
|
+
|
|
42
|
+
register_translator("bedrock", translate_content, translate_content_chunk)
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
_register_bedrock_translator()
|
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
"""Derivations of standard content blocks from Amazon (Bedrock Converse) content."""
|
|
2
|
+
|
|
3
|
+
import warnings
|
|
4
|
+
|
|
5
|
+
from langchain_core.messages import AIMessage, AIMessageChunk
|
|
6
|
+
from langchain_core.messages import content as types
|
|
7
|
+
|
|
8
|
+
WARNED = False
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def translate_content(message: AIMessage) -> list[types.ContentBlock]: # noqa: ARG001
|
|
12
|
+
"""Derive standard content blocks from a message with Bedrock Converse content."""
|
|
13
|
+
global WARNED # noqa: PLW0603
|
|
14
|
+
if not WARNED:
|
|
15
|
+
warning_message = (
|
|
16
|
+
"Content block standardization is not yet fully supported for Bedrock "
|
|
17
|
+
"Converse."
|
|
18
|
+
)
|
|
19
|
+
warnings.warn(warning_message, stacklevel=2)
|
|
20
|
+
WARNED = True
|
|
21
|
+
raise NotImplementedError
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def translate_content_chunk(message: AIMessageChunk) -> list[types.ContentBlock]: # noqa: ARG001
|
|
25
|
+
"""Derive standard content blocks from a chunk with Bedrock Converse content."""
|
|
26
|
+
global WARNED # noqa: PLW0603
|
|
27
|
+
if not WARNED:
|
|
28
|
+
warning_message = (
|
|
29
|
+
"Content block standardization is not yet fully supported for Bedrock "
|
|
30
|
+
"Converse."
|
|
31
|
+
)
|
|
32
|
+
warnings.warn(warning_message, stacklevel=2)
|
|
33
|
+
WARNED = True
|
|
34
|
+
raise NotImplementedError
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
def _register_bedrock_converse_translator() -> None:
|
|
38
|
+
"""Register the Bedrock Converse translator with the central registry.
|
|
39
|
+
|
|
40
|
+
Run automatically when the module is imported.
|
|
41
|
+
"""
|
|
42
|
+
from langchain_core.messages.block_translators import register_translator
|
|
43
|
+
|
|
44
|
+
register_translator("bedrock_converse", translate_content, translate_content_chunk)
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
_register_bedrock_converse_translator()
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
"""Derivations of standard content blocks from Google (GenAI) content."""
|
|
2
|
+
|
|
3
|
+
import warnings
|
|
4
|
+
|
|
5
|
+
from langchain_core.messages import AIMessage, AIMessageChunk
|
|
6
|
+
from langchain_core.messages import content as types
|
|
7
|
+
|
|
8
|
+
WARNED = False
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def translate_content(message: AIMessage) -> list[types.ContentBlock]: # noqa: ARG001
|
|
12
|
+
"""Derive standard content blocks from a message with Google (GenAI) content."""
|
|
13
|
+
global WARNED # noqa: PLW0603
|
|
14
|
+
if not WARNED:
|
|
15
|
+
warning_message = (
|
|
16
|
+
"Content block standardization is not yet fully supported for Google GenAI."
|
|
17
|
+
)
|
|
18
|
+
warnings.warn(warning_message, stacklevel=2)
|
|
19
|
+
WARNED = True
|
|
20
|
+
raise NotImplementedError
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def translate_content_chunk(message: AIMessageChunk) -> list[types.ContentBlock]: # noqa: ARG001
|
|
24
|
+
"""Derive standard content blocks from a chunk with Google (GenAI) content."""
|
|
25
|
+
global WARNED # noqa: PLW0603
|
|
26
|
+
if not WARNED:
|
|
27
|
+
warning_message = (
|
|
28
|
+
"Content block standardization is not yet fully supported for Google GenAI."
|
|
29
|
+
)
|
|
30
|
+
warnings.warn(warning_message, stacklevel=2)
|
|
31
|
+
WARNED = True
|
|
32
|
+
raise NotImplementedError
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def _register_google_genai_translator() -> None:
|
|
36
|
+
"""Register the Google (GenAI) translator with the central registry.
|
|
37
|
+
|
|
38
|
+
Run automatically when the module is imported.
|
|
39
|
+
"""
|
|
40
|
+
from langchain_core.messages.block_translators import register_translator
|
|
41
|
+
|
|
42
|
+
register_translator("google_genai", translate_content, translate_content_chunk)
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
_register_google_genai_translator()
|
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
"""Derivations of standard content blocks from Google (VertexAI) content."""
|
|
2
|
+
|
|
3
|
+
import warnings
|
|
4
|
+
|
|
5
|
+
from langchain_core.messages import AIMessage, AIMessageChunk
|
|
6
|
+
from langchain_core.messages import content as types
|
|
7
|
+
|
|
8
|
+
WARNED = False
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def translate_content(message: AIMessage) -> list[types.ContentBlock]: # noqa: ARG001
|
|
12
|
+
"""Derive standard content blocks from a message with Google (VertexAI) content."""
|
|
13
|
+
global WARNED # noqa: PLW0603
|
|
14
|
+
if not WARNED:
|
|
15
|
+
warning_message = (
|
|
16
|
+
"Content block standardization is not yet fully supported for Google "
|
|
17
|
+
"VertexAI."
|
|
18
|
+
)
|
|
19
|
+
warnings.warn(warning_message, stacklevel=2)
|
|
20
|
+
WARNED = True
|
|
21
|
+
raise NotImplementedError
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def translate_content_chunk(message: AIMessageChunk) -> list[types.ContentBlock]: # noqa: ARG001
|
|
25
|
+
"""Derive standard content blocks from a chunk with Google (VertexAI) content."""
|
|
26
|
+
global WARNED # noqa: PLW0603
|
|
27
|
+
if not WARNED:
|
|
28
|
+
warning_message = (
|
|
29
|
+
"Content block standardization is not yet fully supported for Google "
|
|
30
|
+
"VertexAI."
|
|
31
|
+
)
|
|
32
|
+
warnings.warn(warning_message, stacklevel=2)
|
|
33
|
+
WARNED = True
|
|
34
|
+
raise NotImplementedError
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
def _register_google_vertexai_translator() -> None:
|
|
38
|
+
"""Register the Google (VertexAI) translator with the central registry.
|
|
39
|
+
|
|
40
|
+
Run automatically when the module is imported.
|
|
41
|
+
"""
|
|
42
|
+
from langchain_core.messages.block_translators import register_translator
|
|
43
|
+
|
|
44
|
+
register_translator("google_vertexai", translate_content, translate_content_chunk)
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
_register_google_vertexai_translator()
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
"""Derivations of standard content blocks from Groq content."""
|
|
2
|
+
|
|
3
|
+
import warnings
|
|
4
|
+
|
|
5
|
+
from langchain_core.messages import AIMessage, AIMessageChunk
|
|
6
|
+
from langchain_core.messages import content as types
|
|
7
|
+
|
|
8
|
+
WARNED = False
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def translate_content(message: AIMessage) -> list[types.ContentBlock]: # noqa: ARG001
|
|
12
|
+
"""Derive standard content blocks from a message with Groq content."""
|
|
13
|
+
global WARNED # noqa: PLW0603
|
|
14
|
+
if not WARNED:
|
|
15
|
+
warning_message = (
|
|
16
|
+
"Content block standardization is not yet fully supported for Groq."
|
|
17
|
+
)
|
|
18
|
+
warnings.warn(warning_message, stacklevel=2)
|
|
19
|
+
WARNED = True
|
|
20
|
+
raise NotImplementedError
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def translate_content_chunk(message: AIMessageChunk) -> list[types.ContentBlock]: # noqa: ARG001
|
|
24
|
+
"""Derive standard content blocks from a message chunk with Groq content."""
|
|
25
|
+
global WARNED # noqa: PLW0603
|
|
26
|
+
if not WARNED:
|
|
27
|
+
warning_message = (
|
|
28
|
+
"Content block standardization is not yet fully supported for Groq."
|
|
29
|
+
)
|
|
30
|
+
warnings.warn(warning_message, stacklevel=2)
|
|
31
|
+
WARNED = True
|
|
32
|
+
raise NotImplementedError
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def _register_groq_translator() -> None:
|
|
36
|
+
"""Register the Groq translator with the central registry.
|
|
37
|
+
|
|
38
|
+
Run automatically when the module is imported.
|
|
39
|
+
"""
|
|
40
|
+
from langchain_core.messages.block_translators import register_translator
|
|
41
|
+
|
|
42
|
+
register_translator("groq", translate_content, translate_content_chunk)
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
_register_groq_translator()
|