langchain-core 0.3.75__py3-none-any.whl → 1.0.0a1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain-core might be problematic. Click here for more details.

Files changed (32) hide show
  1. langchain_core/language_models/_utils.py +233 -68
  2. langchain_core/language_models/base.py +2 -1
  3. langchain_core/language_models/chat_models.py +196 -33
  4. langchain_core/language_models/fake_chat_models.py +22 -6
  5. langchain_core/messages/__init__.py +74 -4
  6. langchain_core/messages/ai.py +191 -26
  7. langchain_core/messages/base.py +164 -25
  8. langchain_core/messages/block_translators/__init__.py +89 -0
  9. langchain_core/messages/block_translators/anthropic.py +451 -0
  10. langchain_core/messages/block_translators/bedrock.py +45 -0
  11. langchain_core/messages/block_translators/bedrock_converse.py +47 -0
  12. langchain_core/messages/block_translators/google_genai.py +45 -0
  13. langchain_core/messages/block_translators/google_vertexai.py +47 -0
  14. langchain_core/messages/block_translators/groq.py +45 -0
  15. langchain_core/messages/block_translators/langchain_v0.py +297 -0
  16. langchain_core/messages/block_translators/ollama.py +45 -0
  17. langchain_core/messages/block_translators/openai.py +586 -0
  18. langchain_core/messages/content.py +1568 -0
  19. langchain_core/messages/human.py +29 -9
  20. langchain_core/messages/system.py +29 -9
  21. langchain_core/messages/tool.py +30 -27
  22. langchain_core/messages/utils.py +12 -5
  23. langchain_core/prompt_values.py +1 -1
  24. langchain_core/runnables/base.py +1 -1
  25. langchain_core/utils/_merge.py +44 -6
  26. langchain_core/utils/utils.py +29 -0
  27. langchain_core/version.py +1 -1
  28. {langchain_core-0.3.75.dist-info → langchain_core-1.0.0a1.dist-info}/METADATA +2 -2
  29. {langchain_core-0.3.75.dist-info → langchain_core-1.0.0a1.dist-info}/RECORD +31 -21
  30. langchain_core/messages/content_blocks.py +0 -155
  31. {langchain_core-0.3.75.dist-info → langchain_core-1.0.0a1.dist-info}/WHEEL +0 -0
  32. {langchain_core-0.3.75.dist-info → langchain_core-1.0.0a1.dist-info}/entry_points.txt +0 -0
@@ -27,7 +27,10 @@ from langchain_core.callbacks import (
27
27
  Callbacks,
28
28
  )
29
29
  from langchain_core.globals import get_llm_cache
30
- from langchain_core.language_models._utils import _normalize_messages
30
+ from langchain_core.language_models._utils import (
31
+ _normalize_messages,
32
+ _update_message_content_to_blocks,
33
+ )
31
34
  from langchain_core.language_models.base import (
32
35
  BaseLanguageModel,
33
36
  LangSmithParams,
@@ -36,16 +39,16 @@ from langchain_core.language_models.base import (
36
39
  from langchain_core.load import dumpd, dumps
37
40
  from langchain_core.messages import (
38
41
  AIMessage,
42
+ AIMessageChunk,
39
43
  AnyMessage,
40
44
  BaseMessage,
41
- BaseMessageChunk,
42
45
  HumanMessage,
43
46
  convert_to_messages,
47
+ convert_to_openai_data_block,
44
48
  convert_to_openai_image_block,
45
49
  is_data_content_block,
46
50
  message_chunk_to_message,
47
51
  )
48
- from langchain_core.messages.ai import _LC_ID_PREFIX
49
52
  from langchain_core.outputs import (
50
53
  ChatGeneration,
51
54
  ChatGenerationChunk,
@@ -65,6 +68,7 @@ from langchain_core.utils.function_calling import (
65
68
  convert_to_openai_tool,
66
69
  )
67
70
  from langchain_core.utils.pydantic import TypeBaseModel, is_basemodel_subclass
71
+ from langchain_core.utils.utils import LC_ID_PREFIX, from_env
68
72
 
69
73
  if TYPE_CHECKING:
70
74
  import uuid
@@ -125,7 +129,7 @@ def _format_for_tracing(messages: list[BaseMessage]) -> list[BaseMessage]:
125
129
  if (
126
130
  block.get("type") == "image"
127
131
  and is_data_content_block(block)
128
- and block.get("source_type") != "id"
132
+ and not ("file_id" in block or block.get("source_type") == "id")
129
133
  ):
130
134
  if message_to_trace is message:
131
135
  # Shallow copy
@@ -135,6 +139,19 @@ def _format_for_tracing(messages: list[BaseMessage]) -> list[BaseMessage]:
135
139
  message_to_trace.content[idx] = ( # type: ignore[index] # mypy confused by .model_copy
136
140
  convert_to_openai_image_block(block)
137
141
  )
142
+ elif (
143
+ block.get("type") == "file"
144
+ and is_data_content_block(block)
145
+ and "base64" in block
146
+ ):
147
+ if message_to_trace is message:
148
+ # Shallow copy
149
+ message_to_trace = message.model_copy()
150
+ message_to_trace.content = list(message_to_trace.content)
151
+
152
+ message_to_trace.content[idx] = convert_to_openai_data_block( # type: ignore[index]
153
+ block
154
+ )
138
155
  elif len(block) == 1 and "type" not in block:
139
156
  # Tracing assumes all content blocks have a "type" key. Here
140
157
  # we add this key if it is missing, and there's an obvious
@@ -216,7 +233,7 @@ def _format_ls_structured_output(ls_structured_output_format: Optional[dict]) ->
216
233
  return ls_structured_output_format_dict
217
234
 
218
235
 
219
- class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
236
+ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
220
237
  """Base class for chat models.
221
238
 
222
239
  Key imperative methods:
@@ -325,6 +342,28 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
325
342
 
326
343
  """
327
344
 
345
+ output_version: Optional[str] = Field(
346
+ default_factory=from_env("LC_OUTPUT_VERSION", default=None)
347
+ )
348
+ """Version of ``AIMessage`` output format to store in message content.
349
+
350
+ ``AIMessage.content_blocks`` will lazily parse the contents of ``content`` into a
351
+ standard format. This flag can be used to additionally store the standard format
352
+ in message content, e.g., for serialization purposes.
353
+
354
+ Supported values:
355
+
356
+ - ``"v0"``: provider-specific format in content (can lazily-parse with
357
+ ``.content_blocks``)
358
+ - ``"v1"``: standardized format in content (consistent with ``.content_blocks``)
359
+
360
+ Partner packages (e.g., ``langchain-openai``) can also use this field to roll out
361
+ new content formats in a backward-compatible way.
362
+
363
+ .. versionadded:: 1.0
364
+
365
+ """
366
+
328
367
  @model_validator(mode="before")
329
368
  @classmethod
330
369
  def raise_deprecation(cls, values: dict) -> Any:
@@ -386,21 +425,24 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
386
425
  *,
387
426
  stop: Optional[list[str]] = None,
388
427
  **kwargs: Any,
389
- ) -> BaseMessage:
428
+ ) -> AIMessage:
390
429
  config = ensure_config(config)
391
430
  return cast(
392
- "ChatGeneration",
393
- self.generate_prompt(
394
- [self._convert_input(input)],
395
- stop=stop,
396
- callbacks=config.get("callbacks"),
397
- tags=config.get("tags"),
398
- metadata=config.get("metadata"),
399
- run_name=config.get("run_name"),
400
- run_id=config.pop("run_id", None),
401
- **kwargs,
402
- ).generations[0][0],
403
- ).message
431
+ "AIMessage",
432
+ cast(
433
+ "ChatGeneration",
434
+ self.generate_prompt(
435
+ [self._convert_input(input)],
436
+ stop=stop,
437
+ callbacks=config.get("callbacks"),
438
+ tags=config.get("tags"),
439
+ metadata=config.get("metadata"),
440
+ run_name=config.get("run_name"),
441
+ run_id=config.pop("run_id", None),
442
+ **kwargs,
443
+ ).generations[0][0],
444
+ ).message,
445
+ )
404
446
 
405
447
  @override
406
448
  async def ainvoke(
@@ -410,7 +452,7 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
410
452
  *,
411
453
  stop: Optional[list[str]] = None,
412
454
  **kwargs: Any,
413
- ) -> BaseMessage:
455
+ ) -> AIMessage:
414
456
  config = ensure_config(config)
415
457
  llm_result = await self.agenerate_prompt(
416
458
  [self._convert_input(input)],
@@ -422,7 +464,9 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
422
464
  run_id=config.pop("run_id", None),
423
465
  **kwargs,
424
466
  )
425
- return cast("ChatGeneration", llm_result.generations[0][0]).message
467
+ return cast(
468
+ "AIMessage", cast("ChatGeneration", llm_result.generations[0][0]).message
469
+ )
426
470
 
427
471
  def _should_stream(
428
472
  self,
@@ -467,11 +511,11 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
467
511
  *,
468
512
  stop: Optional[list[str]] = None,
469
513
  **kwargs: Any,
470
- ) -> Iterator[BaseMessageChunk]:
514
+ ) -> Iterator[AIMessageChunk]:
471
515
  if not self._should_stream(async_api=False, **{**kwargs, "stream": True}):
472
516
  # model doesn't implement streaming, so use default implementation
473
517
  yield cast(
474
- "BaseMessageChunk",
518
+ "AIMessageChunk",
475
519
  self.invoke(input, config=config, stop=stop, **kwargs),
476
520
  )
477
521
  else:
@@ -516,16 +560,41 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
516
560
 
517
561
  try:
518
562
  input_messages = _normalize_messages(messages)
519
- run_id = "-".join((_LC_ID_PREFIX, str(run_manager.run_id)))
563
+ run_id = "-".join((LC_ID_PREFIX, str(run_manager.run_id)))
564
+ yielded = False
520
565
  for chunk in self._stream(input_messages, stop=stop, **kwargs):
521
566
  if chunk.message.id is None:
522
567
  chunk.message.id = run_id
523
568
  chunk.message.response_metadata = _gen_info_and_msg_metadata(chunk)
569
+ if self.output_version == "v1":
570
+ # Overwrite .content with .content_blocks
571
+ chunk.message = _update_message_content_to_blocks(
572
+ chunk.message, "v1"
573
+ )
524
574
  run_manager.on_llm_new_token(
525
575
  cast("str", chunk.message.content), chunk=chunk
526
576
  )
527
577
  chunks.append(chunk)
528
- yield chunk.message
578
+ yield cast("AIMessageChunk", chunk.message)
579
+ yielded = True
580
+
581
+ # Yield a final empty chunk with chunk_position="last" if not yet
582
+ # yielded
583
+ if (
584
+ yielded
585
+ and isinstance(chunk.message, AIMessageChunk)
586
+ and not chunk.message.chunk_position
587
+ ):
588
+ empty_content: Union[str, list] = (
589
+ "" if isinstance(chunk.message.content, str) else []
590
+ )
591
+ msg_chunk = AIMessageChunk(
592
+ content=empty_content, chunk_position="last", id=run_id
593
+ )
594
+ run_manager.on_llm_new_token(
595
+ "", chunk=ChatGenerationChunk(message=msg_chunk)
596
+ )
597
+ yield msg_chunk
529
598
  except BaseException as e:
530
599
  generations_with_error_metadata = _generate_response_from_error(e)
531
600
  chat_generation_chunk = merge_chat_generation_chunks(chunks)
@@ -558,11 +627,11 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
558
627
  *,
559
628
  stop: Optional[list[str]] = None,
560
629
  **kwargs: Any,
561
- ) -> AsyncIterator[BaseMessageChunk]:
630
+ ) -> AsyncIterator[AIMessageChunk]:
562
631
  if not self._should_stream(async_api=True, **{**kwargs, "stream": True}):
563
632
  # No async or sync stream is implemented, so fall back to ainvoke
564
633
  yield cast(
565
- "BaseMessageChunk",
634
+ "AIMessageChunk",
566
635
  await self.ainvoke(input, config=config, stop=stop, **kwargs),
567
636
  )
568
637
  return
@@ -609,7 +678,8 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
609
678
 
610
679
  try:
611
680
  input_messages = _normalize_messages(messages)
612
- run_id = "-".join((_LC_ID_PREFIX, str(run_manager.run_id)))
681
+ run_id = "-".join((LC_ID_PREFIX, str(run_manager.run_id)))
682
+ yielded = False
613
683
  async for chunk in self._astream(
614
684
  input_messages,
615
685
  stop=stop,
@@ -618,11 +688,34 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
618
688
  if chunk.message.id is None:
619
689
  chunk.message.id = run_id
620
690
  chunk.message.response_metadata = _gen_info_and_msg_metadata(chunk)
691
+ if self.output_version == "v1":
692
+ # Overwrite .content with .content_blocks
693
+ chunk.message = _update_message_content_to_blocks(
694
+ chunk.message, "v1"
695
+ )
621
696
  await run_manager.on_llm_new_token(
622
697
  cast("str", chunk.message.content), chunk=chunk
623
698
  )
624
699
  chunks.append(chunk)
625
- yield chunk.message
700
+ yield cast("AIMessageChunk", chunk.message)
701
+ yielded = True
702
+
703
+ # Yield a final empty chunk with chunk_position="last" if not yet yielded
704
+ if (
705
+ yielded
706
+ and isinstance(chunk.message, AIMessageChunk)
707
+ and not chunk.message.chunk_position
708
+ ):
709
+ empty_content: Union[str, list] = (
710
+ "" if isinstance(chunk.message.content, str) else []
711
+ )
712
+ msg_chunk = AIMessageChunk(
713
+ content=empty_content, chunk_position="last", id=run_id
714
+ )
715
+ await run_manager.on_llm_new_token(
716
+ "", chunk=ChatGenerationChunk(message=msg_chunk)
717
+ )
718
+ yield msg_chunk
626
719
  except BaseException as e:
627
720
  generations_with_error_metadata = _generate_response_from_error(e)
628
721
  chat_generation_chunk = merge_chat_generation_chunks(chunks)
@@ -1071,15 +1164,43 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
1071
1164
  **kwargs,
1072
1165
  ):
1073
1166
  chunks: list[ChatGenerationChunk] = []
1167
+ run_id: Optional[str] = (
1168
+ f"{LC_ID_PREFIX}-{run_manager.run_id}" if run_manager else None
1169
+ )
1170
+ yielded = False
1074
1171
  for chunk in self._stream(messages, stop=stop, **kwargs):
1075
1172
  chunk.message.response_metadata = _gen_info_and_msg_metadata(chunk)
1173
+ if self.output_version == "v1":
1174
+ # Overwrite .content with .content_blocks
1175
+ chunk.message = _update_message_content_to_blocks(
1176
+ chunk.message, "v1"
1177
+ )
1076
1178
  if run_manager:
1077
1179
  if chunk.message.id is None:
1078
- chunk.message.id = f"{_LC_ID_PREFIX}-{run_manager.run_id}"
1180
+ chunk.message.id = run_id
1079
1181
  run_manager.on_llm_new_token(
1080
1182
  cast("str", chunk.message.content), chunk=chunk
1081
1183
  )
1082
1184
  chunks.append(chunk)
1185
+ yielded = True
1186
+
1187
+ # Yield a final empty chunk with chunk_position="last" if not yet yielded
1188
+ if (
1189
+ yielded
1190
+ and isinstance(chunk.message, AIMessageChunk)
1191
+ and not chunk.message.chunk_position
1192
+ ):
1193
+ empty_content: Union[str, list] = (
1194
+ "" if isinstance(chunk.message.content, str) else []
1195
+ )
1196
+ chunk = ChatGenerationChunk(
1197
+ message=AIMessageChunk(
1198
+ content=empty_content, chunk_position="last", id=run_id
1199
+ )
1200
+ )
1201
+ if run_manager:
1202
+ run_manager.on_llm_new_token("", chunk=chunk)
1203
+ chunks.append(chunk)
1083
1204
  result = generate_from_stream(iter(chunks))
1084
1205
  elif inspect.signature(self._generate).parameters.get("run_manager"):
1085
1206
  result = self._generate(
@@ -1088,10 +1209,17 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
1088
1209
  else:
1089
1210
  result = self._generate(messages, stop=stop, **kwargs)
1090
1211
 
1212
+ if self.output_version == "v1":
1213
+ # Overwrite .content with .content_blocks
1214
+ for generation in result.generations:
1215
+ generation.message = _update_message_content_to_blocks(
1216
+ generation.message, "v1"
1217
+ )
1218
+
1091
1219
  # Add response metadata to each generation
1092
1220
  for idx, generation in enumerate(result.generations):
1093
1221
  if run_manager and generation.message.id is None:
1094
- generation.message.id = f"{_LC_ID_PREFIX}-{run_manager.run_id}-{idx}"
1222
+ generation.message.id = f"{LC_ID_PREFIX}-{run_manager.run_id}-{idx}"
1095
1223
  generation.message.response_metadata = _gen_info_and_msg_metadata(
1096
1224
  generation
1097
1225
  )
@@ -1144,15 +1272,43 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
1144
1272
  **kwargs,
1145
1273
  ):
1146
1274
  chunks: list[ChatGenerationChunk] = []
1275
+ run_id: Optional[str] = (
1276
+ f"{LC_ID_PREFIX}-{run_manager.run_id}" if run_manager else None
1277
+ )
1278
+ yielded = False
1147
1279
  async for chunk in self._astream(messages, stop=stop, **kwargs):
1148
1280
  chunk.message.response_metadata = _gen_info_and_msg_metadata(chunk)
1281
+ if self.output_version == "v1":
1282
+ # Overwrite .content with .content_blocks
1283
+ chunk.message = _update_message_content_to_blocks(
1284
+ chunk.message, "v1"
1285
+ )
1149
1286
  if run_manager:
1150
1287
  if chunk.message.id is None:
1151
- chunk.message.id = f"{_LC_ID_PREFIX}-{run_manager.run_id}"
1288
+ chunk.message.id = run_id
1152
1289
  await run_manager.on_llm_new_token(
1153
1290
  cast("str", chunk.message.content), chunk=chunk
1154
1291
  )
1155
1292
  chunks.append(chunk)
1293
+ yielded = True
1294
+
1295
+ # Yield a final empty chunk with chunk_position="last" if not yet yielded
1296
+ if (
1297
+ yielded
1298
+ and isinstance(chunk.message, AIMessageChunk)
1299
+ and not chunk.message.chunk_position
1300
+ ):
1301
+ empty_content: Union[str, list] = (
1302
+ "" if isinstance(chunk.message.content, str) else []
1303
+ )
1304
+ chunk = ChatGenerationChunk(
1305
+ message=AIMessageChunk(
1306
+ content=empty_content, chunk_position="last", id=run_id
1307
+ )
1308
+ )
1309
+ if run_manager:
1310
+ await run_manager.on_llm_new_token("", chunk=chunk)
1311
+ chunks.append(chunk)
1156
1312
  result = generate_from_stream(iter(chunks))
1157
1313
  elif inspect.signature(self._agenerate).parameters.get("run_manager"):
1158
1314
  result = await self._agenerate(
@@ -1161,10 +1317,17 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
1161
1317
  else:
1162
1318
  result = await self._agenerate(messages, stop=stop, **kwargs)
1163
1319
 
1320
+ if self.output_version == "v1":
1321
+ # Overwrite .content with .content_blocks
1322
+ for generation in result.generations:
1323
+ generation.message = _update_message_content_to_blocks(
1324
+ generation.message, "v1"
1325
+ )
1326
+
1164
1327
  # Add response metadata to each generation
1165
1328
  for idx, generation in enumerate(result.generations):
1166
1329
  if run_manager and generation.message.id is None:
1167
- generation.message.id = f"{_LC_ID_PREFIX}-{run_manager.run_id}-{idx}"
1330
+ generation.message.id = f"{LC_ID_PREFIX}-{run_manager.run_id}-{idx}"
1168
1331
  generation.message.response_metadata = _gen_info_and_msg_metadata(
1169
1332
  generation
1170
1333
  )
@@ -1389,7 +1552,7 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
1389
1552
  *,
1390
1553
  tool_choice: Optional[Union[str]] = None,
1391
1554
  **kwargs: Any,
1392
- ) -> Runnable[LanguageModelInput, BaseMessage]:
1555
+ ) -> Runnable[LanguageModelInput, AIMessage]:
1393
1556
  """Bind tools to the model.
1394
1557
 
1395
1558
  Args:
@@ -4,7 +4,7 @@ import asyncio
4
4
  import re
5
5
  import time
6
6
  from collections.abc import AsyncIterator, Iterator
7
- from typing import Any, Optional, Union, cast
7
+ from typing import Any, Literal, Optional, Union, cast
8
8
 
9
9
  from typing_extensions import override
10
10
 
@@ -112,7 +112,12 @@ class FakeListChatModel(SimpleChatModel):
112
112
  ):
113
113
  raise FakeListChatModelError
114
114
 
115
- yield ChatGenerationChunk(message=AIMessageChunk(content=c))
115
+ chunk_position: Optional[Literal["last"]] = (
116
+ "last" if i_c == len(response) - 1 else None
117
+ )
118
+ yield ChatGenerationChunk(
119
+ message=AIMessageChunk(content=c, chunk_position=chunk_position)
120
+ )
116
121
 
117
122
  @override
118
123
  async def _astream(
@@ -135,7 +140,12 @@ class FakeListChatModel(SimpleChatModel):
135
140
  and i_c == self.error_on_chunk_number
136
141
  ):
137
142
  raise FakeListChatModelError
138
- yield ChatGenerationChunk(message=AIMessageChunk(content=c))
143
+ chunk_position: Optional[Literal["last"]] = (
144
+ "last" if i_c == len(response) - 1 else None
145
+ )
146
+ yield ChatGenerationChunk(
147
+ message=AIMessageChunk(content=c, chunk_position=chunk_position)
148
+ )
139
149
 
140
150
  @property
141
151
  @override
@@ -151,7 +161,7 @@ class FakeListChatModel(SimpleChatModel):
151
161
  *,
152
162
  return_exceptions: bool = False,
153
163
  **kwargs: Any,
154
- ) -> list[BaseMessage]:
164
+ ) -> list[AIMessage]:
155
165
  if isinstance(config, list):
156
166
  return [self.invoke(m, c, **kwargs) for m, c in zip(inputs, config)]
157
167
  return [self.invoke(m, config, **kwargs) for m in inputs]
@@ -164,7 +174,7 @@ class FakeListChatModel(SimpleChatModel):
164
174
  *,
165
175
  return_exceptions: bool = False,
166
176
  **kwargs: Any,
167
- ) -> list[BaseMessage]:
177
+ ) -> list[AIMessage]:
168
178
  if isinstance(config, list):
169
179
  # do Not use an async iterator here because need explicit ordering
170
180
  return [await self.ainvoke(m, c, **kwargs) for m, c in zip(inputs, config)]
@@ -283,10 +293,16 @@ class GenericFakeChatModel(BaseChatModel):
283
293
 
284
294
  content_chunks = cast("list[str]", re.split(r"(\s)", content))
285
295
 
286
- for token in content_chunks:
296
+ for idx, token in enumerate(content_chunks):
287
297
  chunk = ChatGenerationChunk(
288
298
  message=AIMessageChunk(content=token, id=message.id)
289
299
  )
300
+ if (
301
+ idx == len(content_chunks) - 1
302
+ and isinstance(chunk.message, AIMessageChunk)
303
+ and not message.additional_kwargs
304
+ ):
305
+ chunk.message.chunk_position = "last"
290
306
  if run_manager:
291
307
  run_manager.on_llm_new_token(token, chunk=chunk)
292
308
  yield chunk
@@ -18,6 +18,7 @@
18
18
  from typing import TYPE_CHECKING
19
19
 
20
20
  from langchain_core._import_utils import import_attr
21
+ from langchain_core.utils.utils import LC_AUTO_PREFIX, LC_ID_PREFIX, ensure_id
21
22
 
22
23
  if TYPE_CHECKING:
23
24
  from langchain_core.messages.ai import (
@@ -32,10 +33,32 @@ if TYPE_CHECKING:
32
33
  messages_to_dict,
33
34
  )
34
35
  from langchain_core.messages.chat import ChatMessage, ChatMessageChunk
35
- from langchain_core.messages.content_blocks import (
36
+ from langchain_core.messages.content import (
37
+ Annotation,
38
+ AudioContentBlock,
39
+ Citation,
40
+ CodeInterpreterCall,
41
+ CodeInterpreterOutput,
42
+ CodeInterpreterResult,
43
+ ContentBlock,
44
+ DataContentBlock,
45
+ FileContentBlock,
46
+ ImageContentBlock,
47
+ NonStandardAnnotation,
48
+ NonStandardContentBlock,
49
+ PlainTextContentBlock,
50
+ ReasoningContentBlock,
51
+ TextContentBlock,
52
+ VideoContentBlock,
53
+ WebSearchCall,
54
+ WebSearchResult,
36
55
  convert_to_openai_data_block,
37
56
  convert_to_openai_image_block,
38
57
  is_data_content_block,
58
+ is_reasoning_block,
59
+ is_text_block,
60
+ is_tool_call_block,
61
+ is_tool_call_chunk,
39
62
  )
40
63
  from langchain_core.messages.function import FunctionMessage, FunctionMessageChunk
41
64
  from langchain_core.messages.human import HumanMessage, HumanMessageChunk
@@ -63,34 +86,59 @@ if TYPE_CHECKING:
63
86
  )
64
87
 
65
88
  __all__ = (
89
+ "LC_AUTO_PREFIX",
90
+ "LC_ID_PREFIX",
66
91
  "AIMessage",
67
92
  "AIMessageChunk",
93
+ "Annotation",
68
94
  "AnyMessage",
95
+ "AudioContentBlock",
69
96
  "BaseMessage",
70
97
  "BaseMessageChunk",
71
98
  "ChatMessage",
72
99
  "ChatMessageChunk",
100
+ "Citation",
101
+ "CodeInterpreterCall",
102
+ "CodeInterpreterOutput",
103
+ "CodeInterpreterResult",
104
+ "ContentBlock",
105
+ "DataContentBlock",
106
+ "FileContentBlock",
73
107
  "FunctionMessage",
74
108
  "FunctionMessageChunk",
75
109
  "HumanMessage",
76
110
  "HumanMessageChunk",
111
+ "ImageContentBlock",
77
112
  "InvalidToolCall",
78
113
  "MessageLikeRepresentation",
114
+ "NonStandardAnnotation",
115
+ "NonStandardContentBlock",
116
+ "PlainTextContentBlock",
117
+ "ReasoningContentBlock",
79
118
  "RemoveMessage",
80
119
  "SystemMessage",
81
120
  "SystemMessageChunk",
121
+ "TextContentBlock",
82
122
  "ToolCall",
83
123
  "ToolCallChunk",
84
124
  "ToolMessage",
85
125
  "ToolMessageChunk",
126
+ "VideoContentBlock",
127
+ "WebSearchCall",
128
+ "WebSearchResult",
86
129
  "_message_from_dict",
87
130
  "convert_to_messages",
88
131
  "convert_to_openai_data_block",
89
132
  "convert_to_openai_image_block",
90
133
  "convert_to_openai_messages",
134
+ "ensure_id",
91
135
  "filter_messages",
92
136
  "get_buffer_string",
93
137
  "is_data_content_block",
138
+ "is_reasoning_block",
139
+ "is_text_block",
140
+ "is_tool_call_block",
141
+ "is_tool_call_chunk",
94
142
  "merge_content",
95
143
  "merge_message_runs",
96
144
  "message_chunk_to_message",
@@ -103,35 +151,57 @@ __all__ = (
103
151
  _dynamic_imports = {
104
152
  "AIMessage": "ai",
105
153
  "AIMessageChunk": "ai",
154
+ "Annotation": "content",
155
+ "AudioContentBlock": "content",
106
156
  "BaseMessage": "base",
107
157
  "BaseMessageChunk": "base",
108
158
  "merge_content": "base",
109
159
  "message_to_dict": "base",
110
160
  "messages_to_dict": "base",
161
+ "Citation": "content",
162
+ "ContentBlock": "content",
111
163
  "ChatMessage": "chat",
112
164
  "ChatMessageChunk": "chat",
165
+ "CodeInterpreterCall": "content",
166
+ "CodeInterpreterOutput": "content",
167
+ "CodeInterpreterResult": "content",
168
+ "DataContentBlock": "content",
169
+ "FileContentBlock": "content",
113
170
  "FunctionMessage": "function",
114
171
  "FunctionMessageChunk": "function",
115
172
  "HumanMessage": "human",
116
173
  "HumanMessageChunk": "human",
174
+ "NonStandardAnnotation": "content",
175
+ "NonStandardContentBlock": "content",
176
+ "PlainTextContentBlock": "content",
177
+ "ReasoningContentBlock": "content",
117
178
  "RemoveMessage": "modifier",
118
179
  "SystemMessage": "system",
119
180
  "SystemMessageChunk": "system",
181
+ "WebSearchCall": "content",
182
+ "WebSearchResult": "content",
183
+ "ImageContentBlock": "content",
120
184
  "InvalidToolCall": "tool",
185
+ "TextContentBlock": "content",
121
186
  "ToolCall": "tool",
122
187
  "ToolCallChunk": "tool",
123
188
  "ToolMessage": "tool",
124
189
  "ToolMessageChunk": "tool",
190
+ "VideoContentBlock": "content",
125
191
  "AnyMessage": "utils",
126
192
  "MessageLikeRepresentation": "utils",
127
193
  "_message_from_dict": "utils",
128
194
  "convert_to_messages": "utils",
129
- "convert_to_openai_data_block": "content_blocks",
130
- "convert_to_openai_image_block": "content_blocks",
195
+ "convert_to_openai_data_block": "content",
196
+ "convert_to_openai_image_block": "content",
131
197
  "convert_to_openai_messages": "utils",
132
198
  "filter_messages": "utils",
133
199
  "get_buffer_string": "utils",
134
- "is_data_content_block": "content_blocks",
200
+ "is_data_content_block": "content",
201
+ "is_reasoning_block": "content",
202
+ "is_text_block": "content",
203
+ "is_tool_call_block": "content",
204
+ "is_tool_call_chunk": "content",
135
205
  "merge_message_runs": "utils",
136
206
  "message_chunk_to_message": "utils",
137
207
  "messages_from_dict": "utils",