langchain-core 0.4.0.dev0__py3-none-any.whl → 1.0.0a1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain-core might be problematic. Click here for more details.

Files changed (74) hide show
  1. langchain_core/_api/beta_decorator.py +2 -2
  2. langchain_core/_api/deprecation.py +1 -1
  3. langchain_core/beta/runnables/context.py +1 -1
  4. langchain_core/callbacks/base.py +14 -23
  5. langchain_core/callbacks/file.py +13 -2
  6. langchain_core/callbacks/manager.py +74 -157
  7. langchain_core/callbacks/streaming_stdout.py +3 -4
  8. langchain_core/callbacks/usage.py +2 -12
  9. langchain_core/chat_history.py +6 -6
  10. langchain_core/documents/base.py +1 -1
  11. langchain_core/documents/compressor.py +9 -6
  12. langchain_core/indexing/base.py +2 -2
  13. langchain_core/language_models/_utils.py +230 -101
  14. langchain_core/language_models/base.py +35 -23
  15. langchain_core/language_models/chat_models.py +245 -53
  16. langchain_core/language_models/fake_chat_models.py +28 -81
  17. langchain_core/load/dump.py +3 -4
  18. langchain_core/messages/__init__.py +38 -22
  19. langchain_core/messages/ai.py +188 -30
  20. langchain_core/messages/base.py +164 -25
  21. langchain_core/messages/block_translators/__init__.py +89 -0
  22. langchain_core/messages/block_translators/anthropic.py +451 -0
  23. langchain_core/messages/block_translators/bedrock.py +45 -0
  24. langchain_core/messages/block_translators/bedrock_converse.py +47 -0
  25. langchain_core/messages/block_translators/google_genai.py +45 -0
  26. langchain_core/messages/block_translators/google_vertexai.py +47 -0
  27. langchain_core/messages/block_translators/groq.py +45 -0
  28. langchain_core/messages/block_translators/langchain_v0.py +297 -0
  29. langchain_core/messages/block_translators/ollama.py +45 -0
  30. langchain_core/messages/block_translators/openai.py +586 -0
  31. langchain_core/messages/{content_blocks.py → content.py} +346 -213
  32. langchain_core/messages/human.py +29 -9
  33. langchain_core/messages/system.py +29 -9
  34. langchain_core/messages/tool.py +94 -13
  35. langchain_core/messages/utils.py +32 -234
  36. langchain_core/output_parsers/base.py +14 -50
  37. langchain_core/output_parsers/json.py +2 -5
  38. langchain_core/output_parsers/list.py +2 -7
  39. langchain_core/output_parsers/openai_functions.py +5 -28
  40. langchain_core/output_parsers/openai_tools.py +49 -90
  41. langchain_core/output_parsers/pydantic.py +2 -3
  42. langchain_core/output_parsers/transform.py +12 -53
  43. langchain_core/output_parsers/xml.py +9 -17
  44. langchain_core/prompt_values.py +8 -112
  45. langchain_core/prompts/chat.py +1 -3
  46. langchain_core/runnables/base.py +500 -451
  47. langchain_core/runnables/branch.py +1 -1
  48. langchain_core/runnables/fallbacks.py +4 -4
  49. langchain_core/runnables/history.py +1 -1
  50. langchain_core/runnables/passthrough.py +3 -3
  51. langchain_core/runnables/retry.py +1 -1
  52. langchain_core/runnables/router.py +1 -1
  53. langchain_core/structured_query.py +3 -7
  54. langchain_core/tools/base.py +14 -41
  55. langchain_core/tools/convert.py +2 -22
  56. langchain_core/tools/retriever.py +1 -8
  57. langchain_core/tools/structured.py +2 -10
  58. langchain_core/tracers/_streaming.py +6 -7
  59. langchain_core/tracers/base.py +7 -14
  60. langchain_core/tracers/core.py +4 -27
  61. langchain_core/tracers/event_stream.py +4 -15
  62. langchain_core/tracers/langchain.py +3 -14
  63. langchain_core/tracers/log_stream.py +2 -3
  64. langchain_core/utils/_merge.py +45 -7
  65. langchain_core/utils/function_calling.py +22 -9
  66. langchain_core/utils/utils.py +29 -0
  67. langchain_core/version.py +1 -1
  68. {langchain_core-0.4.0.dev0.dist-info → langchain_core-1.0.0a1.dist-info}/METADATA +7 -9
  69. {langchain_core-0.4.0.dev0.dist-info → langchain_core-1.0.0a1.dist-info}/RECORD +71 -64
  70. langchain_core/v1/__init__.py +0 -1
  71. langchain_core/v1/chat_models.py +0 -1047
  72. langchain_core/v1/messages.py +0 -755
  73. {langchain_core-0.4.0.dev0.dist-info → langchain_core-1.0.0a1.dist-info}/WHEEL +0 -0
  74. {langchain_core-0.4.0.dev0.dist-info → langchain_core-1.0.0a1.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,586 @@
1
+ """Derivations of standard content blocks from OpenAI content."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import json
6
+ from collections.abc import Iterable
7
+ from typing import TYPE_CHECKING, Any, Optional, Union, cast
8
+
9
+ from langchain_core.language_models._utils import (
10
+ _is_openai_data_block,
11
+ )
12
+ from langchain_core.messages import content as types
13
+ from langchain_core.messages.block_translators.langchain_v0 import (
14
+ _convert_openai_format_to_data_block,
15
+ )
16
+
17
+ if TYPE_CHECKING:
18
+ from langchain_core.messages import AIMessage, AIMessageChunk
19
+
20
+
21
+ # v1 / Chat Completions
22
+ def _convert_to_v1_from_chat_completions(
23
+ message: AIMessage,
24
+ ) -> list[types.ContentBlock]:
25
+ """Mutate a Chat Completions message to v1 format."""
26
+ content_blocks: list[types.ContentBlock] = []
27
+ if isinstance(message.content, str):
28
+ if message.content:
29
+ content_blocks = [{"type": "text", "text": message.content}]
30
+ else:
31
+ content_blocks = []
32
+
33
+ for tool_call in message.tool_calls:
34
+ content_blocks.append(
35
+ {
36
+ "type": "tool_call",
37
+ "name": tool_call["name"],
38
+ "args": tool_call["args"],
39
+ "id": tool_call.get("id"),
40
+ }
41
+ )
42
+
43
+ return content_blocks
44
+
45
+
46
+ def _convert_to_v1_from_chat_completions_input(
47
+ blocks: list[types.ContentBlock],
48
+ ) -> list[types.ContentBlock]:
49
+ """Convert OpenAI Chat Completions format blocks to v1 format.
50
+
51
+ Processes non_standard blocks that might be OpenAI format and converts them
52
+ to proper ContentBlocks. If conversion fails, leaves them as non_standard.
53
+
54
+ Args:
55
+ blocks: List of content blocks to process.
56
+
57
+ Returns:
58
+ Updated list with OpenAI blocks converted to v1 format.
59
+ """
60
+ from langchain_core.messages import content as types
61
+
62
+ converted_blocks = []
63
+ unpacked_blocks: list[dict[str, Any]] = [
64
+ cast("dict[str, Any]", block)
65
+ if block.get("type") != "non_standard"
66
+ else block["value"] # type: ignore[typeddict-item] # this is only non-standard blocks
67
+ for block in blocks
68
+ ]
69
+ for block in unpacked_blocks:
70
+ if block.get("type") in {
71
+ "image_url",
72
+ "input_audio",
73
+ "file",
74
+ } and _is_openai_data_block(block):
75
+ converted_block = _convert_openai_format_to_data_block(block)
76
+ # If conversion succeeded, use it; otherwise keep as non_standard
77
+ if (
78
+ isinstance(converted_block, dict)
79
+ and converted_block.get("type") in types.KNOWN_BLOCK_TYPES
80
+ ):
81
+ converted_blocks.append(cast("types.ContentBlock", converted_block))
82
+ else:
83
+ converted_blocks.append({"type": "non_standard", "value": block})
84
+ elif block.get("type") in types.KNOWN_BLOCK_TYPES:
85
+ converted_blocks.append(cast("types.ContentBlock", block))
86
+ else:
87
+ converted_blocks.append({"type": "non_standard", "value": block})
88
+
89
+ return converted_blocks
90
+
91
+
92
+ def _convert_to_v1_from_chat_completions_chunk(
93
+ chunk: AIMessageChunk,
94
+ ) -> list[types.ContentBlock]:
95
+ """Mutate a Chat Completions chunk to v1 format."""
96
+ content_blocks: list[types.ContentBlock] = []
97
+ if isinstance(chunk.content, str):
98
+ if chunk.content:
99
+ content_blocks = [{"type": "text", "text": chunk.content}]
100
+ else:
101
+ content_blocks = []
102
+
103
+ if chunk.chunk_position == "last":
104
+ for tool_call in chunk.tool_calls:
105
+ content_blocks.append(
106
+ {
107
+ "type": "tool_call",
108
+ "name": tool_call["name"],
109
+ "args": tool_call["args"],
110
+ "id": tool_call.get("id"),
111
+ }
112
+ )
113
+
114
+ else:
115
+ for tool_call_chunk in chunk.tool_call_chunks:
116
+ tc: types.ToolCallChunk = {
117
+ "type": "tool_call_chunk",
118
+ "id": tool_call_chunk.get("id"),
119
+ "name": tool_call_chunk.get("name"),
120
+ "args": tool_call_chunk.get("args"),
121
+ }
122
+ if (idx := tool_call_chunk.get("index")) is not None:
123
+ tc["index"] = idx
124
+ content_blocks.append(tc)
125
+
126
+ return content_blocks
127
+
128
+
129
+ def _convert_from_v1_to_chat_completions(message: AIMessage) -> AIMessage:
130
+ """Convert a v1 message to the Chat Completions format."""
131
+ if isinstance(message.content, list):
132
+ new_content: list = []
133
+ for block in message.content:
134
+ if isinstance(block, dict):
135
+ block_type = block.get("type")
136
+ if block_type == "text":
137
+ # Strip annotations
138
+ new_content.append({"type": "text", "text": block["text"]})
139
+ elif block_type in ("reasoning", "tool_call"):
140
+ pass
141
+ else:
142
+ new_content.append(block)
143
+ else:
144
+ new_content.append(block)
145
+ return message.model_copy(update={"content": new_content})
146
+
147
+ return message
148
+
149
+
150
+ # Responses
151
+ _FUNCTION_CALL_IDS_MAP_KEY = "__openai_function_call_ids__"
152
+
153
+
154
+ def _convert_from_v03_ai_message(message: AIMessage) -> AIMessage:
155
+ """Convert v0 AIMessage into ``output_version="responses/v1"`` format."""
156
+ from langchain_core.messages import AIMessageChunk
157
+
158
+ # Only update ChatOpenAI v0.3 AIMessages
159
+ is_chatopenai_v03 = (
160
+ isinstance(message.content, list)
161
+ and all(isinstance(b, dict) for b in message.content)
162
+ ) and (
163
+ any(
164
+ item in message.additional_kwargs
165
+ for item in [
166
+ "reasoning",
167
+ "tool_outputs",
168
+ "refusal",
169
+ _FUNCTION_CALL_IDS_MAP_KEY,
170
+ ]
171
+ )
172
+ or (
173
+ isinstance(message.id, str)
174
+ and message.id.startswith("msg_")
175
+ and (response_id := message.response_metadata.get("id"))
176
+ and isinstance(response_id, str)
177
+ and response_id.startswith("resp_")
178
+ )
179
+ )
180
+ if not is_chatopenai_v03:
181
+ return message
182
+
183
+ content_order = [
184
+ "reasoning",
185
+ "code_interpreter_call",
186
+ "mcp_call",
187
+ "image_generation_call",
188
+ "text",
189
+ "refusal",
190
+ "function_call",
191
+ "computer_call",
192
+ "mcp_list_tools",
193
+ "mcp_approval_request",
194
+ # N. B. "web_search_call" and "file_search_call" were not passed back in
195
+ # in v0.3
196
+ ]
197
+
198
+ # Build a bucket for every known block type
199
+ buckets: dict[str, list] = {key: [] for key in content_order}
200
+ unknown_blocks = []
201
+
202
+ # Reasoning
203
+ if reasoning := message.additional_kwargs.get("reasoning"):
204
+ if isinstance(message, AIMessageChunk) and message.chunk_position != "last":
205
+ buckets["reasoning"].append({**reasoning, "type": "reasoning"})
206
+ else:
207
+ buckets["reasoning"].append(reasoning)
208
+
209
+ # Refusal
210
+ if refusal := message.additional_kwargs.get("refusal"):
211
+ buckets["refusal"].append({"type": "refusal", "refusal": refusal})
212
+
213
+ # Text
214
+ for block in message.content:
215
+ if isinstance(block, dict) and block.get("type") == "text":
216
+ block_copy = block.copy()
217
+ if isinstance(message.id, str) and message.id.startswith("msg_"):
218
+ block_copy["id"] = message.id
219
+ buckets["text"].append(block_copy)
220
+ else:
221
+ unknown_blocks.append(block)
222
+
223
+ # Function calls
224
+ function_call_ids = message.additional_kwargs.get(_FUNCTION_CALL_IDS_MAP_KEY)
225
+ if (
226
+ isinstance(message, AIMessageChunk)
227
+ and len(message.tool_call_chunks) == 1
228
+ and message.chunk_position != "last"
229
+ ):
230
+ # Isolated chunk
231
+ tool_call_chunk = message.tool_call_chunks[0]
232
+ function_call = {
233
+ "type": "function_call",
234
+ "name": tool_call_chunk.get("name"),
235
+ "arguments": tool_call_chunk.get("args"),
236
+ "call_id": tool_call_chunk.get("id"),
237
+ }
238
+ if function_call_ids is not None and (
239
+ _id := function_call_ids.get(tool_call_chunk.get("id"))
240
+ ):
241
+ function_call["id"] = _id
242
+ buckets["function_call"].append(function_call)
243
+ else:
244
+ for tool_call in message.tool_calls:
245
+ function_call = {
246
+ "type": "function_call",
247
+ "name": tool_call["name"],
248
+ "arguments": json.dumps(tool_call["args"], ensure_ascii=False),
249
+ "call_id": tool_call["id"],
250
+ }
251
+ if function_call_ids is not None and (
252
+ _id := function_call_ids.get(tool_call["id"])
253
+ ):
254
+ function_call["id"] = _id
255
+ buckets["function_call"].append(function_call)
256
+
257
+ # Tool outputs
258
+ tool_outputs = message.additional_kwargs.get("tool_outputs", [])
259
+ for block in tool_outputs:
260
+ if isinstance(block, dict) and (key := block.get("type")) and key in buckets:
261
+ buckets[key].append(block)
262
+ else:
263
+ unknown_blocks.append(block)
264
+
265
+ # Re-assemble the content list in the canonical order
266
+ new_content = []
267
+ for key in content_order:
268
+ new_content.extend(buckets[key])
269
+ new_content.extend(unknown_blocks)
270
+
271
+ new_additional_kwargs = dict(message.additional_kwargs)
272
+ new_additional_kwargs.pop("reasoning", None)
273
+ new_additional_kwargs.pop("refusal", None)
274
+ new_additional_kwargs.pop("tool_outputs", None)
275
+
276
+ if "id" in message.response_metadata:
277
+ new_id = message.response_metadata["id"]
278
+ else:
279
+ new_id = message.id
280
+
281
+ return message.model_copy(
282
+ update={
283
+ "content": new_content,
284
+ "additional_kwargs": new_additional_kwargs,
285
+ "id": new_id,
286
+ },
287
+ deep=False,
288
+ )
289
+
290
+
291
+ # v1 / Responses
292
+ def _convert_annotation_to_v1(annotation: dict[str, Any]) -> types.Annotation:
293
+ annotation_type = annotation.get("type")
294
+
295
+ if annotation_type == "url_citation":
296
+ known_fields = {
297
+ "type",
298
+ "url",
299
+ "title",
300
+ "cited_text",
301
+ "start_index",
302
+ "end_index",
303
+ }
304
+ url_citation = cast("types.Citation", {})
305
+ for field in ("end_index", "start_index", "title"):
306
+ if field in annotation:
307
+ url_citation[field] = annotation[field]
308
+ url_citation["type"] = "citation"
309
+ url_citation["url"] = annotation["url"]
310
+ for field, value in annotation.items():
311
+ if field not in known_fields:
312
+ if "extras" not in url_citation:
313
+ url_citation["extras"] = {}
314
+ url_citation["extras"][field] = value
315
+ return url_citation
316
+
317
+ if annotation_type == "file_citation":
318
+ known_fields = {
319
+ "type",
320
+ "title",
321
+ "cited_text",
322
+ "start_index",
323
+ "end_index",
324
+ "filename",
325
+ }
326
+ document_citation: types.Citation = {"type": "citation"}
327
+ if "filename" in annotation:
328
+ document_citation["title"] = annotation["filename"]
329
+ for field, value in annotation.items():
330
+ if field not in known_fields:
331
+ if "extras" not in document_citation:
332
+ document_citation["extras"] = {}
333
+ document_citation["extras"][field] = value
334
+
335
+ return document_citation
336
+
337
+ # TODO: standardise container_file_citation?
338
+ non_standard_annotation: types.NonStandardAnnotation = {
339
+ "type": "non_standard_annotation",
340
+ "value": annotation,
341
+ }
342
+ return non_standard_annotation
343
+
344
+
345
+ def _explode_reasoning(block: dict[str, Any]) -> Iterable[types.ReasoningContentBlock]:
346
+ if "summary" not in block:
347
+ yield cast("types.ReasoningContentBlock", block)
348
+ return
349
+
350
+ known_fields = {"type", "reasoning", "id", "index"}
351
+ unknown_fields = [
352
+ field for field in block if field != "summary" and field not in known_fields
353
+ ]
354
+ if unknown_fields:
355
+ block["extras"] = {}
356
+ for field in unknown_fields:
357
+ block["extras"][field] = block.pop(field)
358
+
359
+ if not block["summary"]:
360
+ # [{'id': 'rs_...', 'summary': [], 'type': 'reasoning', 'index': 0}]
361
+ block = {k: v for k, v in block.items() if k != "summary"}
362
+ if "index" in block:
363
+ meaningful_idx = f"{block['index']}_0"
364
+ block["index"] = f"lc_rs_{meaningful_idx.encode().hex()}"
365
+ yield cast("types.ReasoningContentBlock", block)
366
+ return
367
+
368
+ # Common part for every exploded line, except 'summary'
369
+ common = {k: v for k, v in block.items() if k in known_fields}
370
+
371
+ # Optional keys that must appear only in the first exploded item
372
+ first_only = block.pop("extras", None)
373
+
374
+ for idx, part in enumerate(block["summary"]):
375
+ new_block = dict(common)
376
+ new_block["reasoning"] = part.get("text", "")
377
+ if idx == 0 and first_only:
378
+ new_block.update(first_only)
379
+ if "index" in new_block:
380
+ summary_index = part.get("index", 0)
381
+ meaningful_idx = f"{new_block['index']}_{summary_index}"
382
+ new_block["index"] = f"lc_rs_{meaningful_idx.encode().hex()}"
383
+
384
+ yield cast("types.ReasoningContentBlock", new_block)
385
+
386
+
387
+ def _convert_to_v1_from_responses(message: AIMessage) -> list[types.ContentBlock]:
388
+ """Convert a Responses message to v1 format."""
389
+
390
+ def _iter_blocks() -> Iterable[types.ContentBlock]:
391
+ for raw_block in message.content:
392
+ if not isinstance(raw_block, dict):
393
+ continue
394
+ block = raw_block.copy()
395
+ block_type = block.get("type")
396
+
397
+ if block_type == "text":
398
+ if "text" not in block:
399
+ block["text"] = ""
400
+ if "annotations" in block:
401
+ block["annotations"] = [
402
+ _convert_annotation_to_v1(a) for a in block["annotations"]
403
+ ]
404
+ if "index" in block:
405
+ block["index"] = f"lc_txt_{block['index']}"
406
+ yield cast("types.TextContentBlock", block)
407
+
408
+ elif block_type == "reasoning":
409
+ yield from _explode_reasoning(block)
410
+
411
+ elif block_type == "image_generation_call" and (
412
+ result := block.get("result")
413
+ ):
414
+ new_block = {"type": "image", "base64": result}
415
+ if output_format := block.get("output_format"):
416
+ new_block["mime_type"] = f"image/{output_format}"
417
+ if "id" in block:
418
+ new_block["id"] = block["id"]
419
+ if "index" in block:
420
+ new_block["index"] = f"lc_img_{block['index']}"
421
+ for extra_key in (
422
+ "status",
423
+ "background",
424
+ "output_format",
425
+ "quality",
426
+ "revised_prompt",
427
+ "size",
428
+ ):
429
+ if extra_key in block:
430
+ if "extras" not in new_block:
431
+ new_block["extras"] = {}
432
+ new_block["extras"][extra_key] = block[extra_key]
433
+ yield cast("types.ImageContentBlock", new_block)
434
+
435
+ elif block_type == "function_call":
436
+ tool_call_block: Optional[
437
+ Union[types.ToolCall, types.InvalidToolCall, types.ToolCallChunk]
438
+ ] = None
439
+ call_id = block.get("call_id", "")
440
+
441
+ from langchain_core.messages import AIMessageChunk
442
+
443
+ if (
444
+ isinstance(message, AIMessageChunk)
445
+ and len(message.tool_call_chunks) == 1
446
+ and message.chunk_position != "last"
447
+ ):
448
+ tool_call_block = message.tool_call_chunks[0].copy() # type: ignore[assignment]
449
+ elif call_id:
450
+ for tool_call in message.tool_calls or []:
451
+ if tool_call.get("id") == call_id:
452
+ tool_call_block = {
453
+ "type": "tool_call",
454
+ "name": tool_call["name"],
455
+ "args": tool_call["args"],
456
+ "id": tool_call.get("id"),
457
+ }
458
+ break
459
+ else:
460
+ for invalid_tool_call in message.invalid_tool_calls or []:
461
+ if invalid_tool_call.get("id") == call_id:
462
+ tool_call_block = invalid_tool_call.copy()
463
+ break
464
+ else:
465
+ pass
466
+ if tool_call_block:
467
+ if "id" in block:
468
+ if "extras" not in tool_call_block:
469
+ tool_call_block["extras"] = {}
470
+ tool_call_block["extras"]["item_id"] = block["id"]
471
+ if "index" in block:
472
+ tool_call_block["index"] = f"lc_tc_{block['index']}"
473
+ yield tool_call_block
474
+
475
+ elif block_type == "web_search_call":
476
+ web_search_call = {"type": "web_search_call", "id": block["id"]}
477
+ if "index" in block:
478
+ web_search_call["index"] = f"lc_wsc_{block['index']}"
479
+ if (
480
+ "action" in block
481
+ and isinstance(block["action"], dict)
482
+ and block["action"].get("type") == "search"
483
+ and "query" in block["action"]
484
+ ):
485
+ web_search_call["query"] = block["action"]["query"]
486
+ for key in block:
487
+ if key not in ("type", "id", "index"):
488
+ web_search_call[key] = block[key]
489
+
490
+ yield cast("types.WebSearchCall", web_search_call)
491
+
492
+ # If .content already has web_search_result, don't add
493
+ if not any(
494
+ isinstance(other_block, dict)
495
+ and other_block.get("type") == "web_search_result"
496
+ and other_block.get("id") == block["id"]
497
+ for other_block in message.content
498
+ ):
499
+ web_search_result = {"type": "web_search_result", "id": block["id"]}
500
+ if "index" in block and isinstance(block["index"], int):
501
+ web_search_result["index"] = f"lc_wsr_{block['index'] + 1}"
502
+ yield cast("types.WebSearchResult", web_search_result)
503
+
504
+ elif block_type == "code_interpreter_call":
505
+ code_interpreter_call = {
506
+ "type": "code_interpreter_call",
507
+ "id": block["id"],
508
+ }
509
+ if "code" in block:
510
+ code_interpreter_call["code"] = block["code"]
511
+ if "index" in block:
512
+ code_interpreter_call["index"] = f"lc_cic_{block['index']}"
513
+ known_fields = {"type", "id", "language", "code", "extras", "index"}
514
+ for key in block:
515
+ if key not in known_fields:
516
+ if "extras" not in code_interpreter_call:
517
+ code_interpreter_call["extras"] = {}
518
+ code_interpreter_call["extras"][key] = block[key]
519
+
520
+ code_interpreter_result = {
521
+ "type": "code_interpreter_result",
522
+ "id": block["id"],
523
+ }
524
+ if "outputs" in block:
525
+ code_interpreter_result["outputs"] = block["outputs"]
526
+ for output in block["outputs"]:
527
+ if (
528
+ isinstance(output, dict)
529
+ and (output_type := output.get("type"))
530
+ and output_type == "logs"
531
+ ):
532
+ if "output" not in code_interpreter_result:
533
+ code_interpreter_result["output"] = []
534
+ code_interpreter_result["output"].append(
535
+ {
536
+ "type": "code_interpreter_output",
537
+ "stdout": output.get("logs", ""),
538
+ }
539
+ )
540
+
541
+ if "status" in block:
542
+ code_interpreter_result["status"] = block["status"]
543
+ if "index" in block and isinstance(block["index"], int):
544
+ code_interpreter_result["index"] = f"lc_cir_{block['index'] + 1}"
545
+
546
+ yield cast("types.CodeInterpreterCall", code_interpreter_call)
547
+ yield cast("types.CodeInterpreterResult", code_interpreter_result)
548
+
549
+ elif block_type in types.KNOWN_BLOCK_TYPES:
550
+ yield cast("types.ContentBlock", block)
551
+ else:
552
+ new_block = {"type": "non_standard", "value": block}
553
+ if "index" in new_block["value"]:
554
+ new_block["index"] = f"lc_ns_{new_block['value'].pop('index')}"
555
+ yield cast("types.NonStandardContentBlock", new_block)
556
+
557
+ return list(_iter_blocks())
558
+
559
+
560
+ def translate_content(message: AIMessage) -> list[types.ContentBlock]:
561
+ """Derive standard content blocks from a message with OpenAI content."""
562
+ if isinstance(message.content, str):
563
+ return _convert_to_v1_from_chat_completions(message)
564
+ message = _convert_from_v03_ai_message(message)
565
+ return _convert_to_v1_from_responses(message)
566
+
567
+
568
+ def translate_content_chunk(message: AIMessageChunk) -> list[types.ContentBlock]:
569
+ """Derive standard content blocks from a message chunk with OpenAI content."""
570
+ if isinstance(message.content, str):
571
+ return _convert_to_v1_from_chat_completions_chunk(message)
572
+ message = _convert_from_v03_ai_message(message) # type: ignore[assignment]
573
+ return _convert_to_v1_from_responses(message)
574
+
575
+
576
+ def _register_openai_translator() -> None:
577
+ """Register the OpenAI translator with the central registry.
578
+
579
+ Run automatically when the module is imported.
580
+ """
581
+ from langchain_core.messages.block_translators import register_translator
582
+
583
+ register_translator("openai", translate_content, translate_content_chunk)
584
+
585
+
586
+ _register_openai_translator()