langchain-core 0.4.0.dev0__py3-none-any.whl → 1.0.0a1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain-core might be problematic. Click here for more details.

Files changed (74) hide show
  1. langchain_core/_api/beta_decorator.py +2 -2
  2. langchain_core/_api/deprecation.py +1 -1
  3. langchain_core/beta/runnables/context.py +1 -1
  4. langchain_core/callbacks/base.py +14 -23
  5. langchain_core/callbacks/file.py +13 -2
  6. langchain_core/callbacks/manager.py +74 -157
  7. langchain_core/callbacks/streaming_stdout.py +3 -4
  8. langchain_core/callbacks/usage.py +2 -12
  9. langchain_core/chat_history.py +6 -6
  10. langchain_core/documents/base.py +1 -1
  11. langchain_core/documents/compressor.py +9 -6
  12. langchain_core/indexing/base.py +2 -2
  13. langchain_core/language_models/_utils.py +230 -101
  14. langchain_core/language_models/base.py +35 -23
  15. langchain_core/language_models/chat_models.py +245 -53
  16. langchain_core/language_models/fake_chat_models.py +28 -81
  17. langchain_core/load/dump.py +3 -4
  18. langchain_core/messages/__init__.py +38 -22
  19. langchain_core/messages/ai.py +188 -30
  20. langchain_core/messages/base.py +164 -25
  21. langchain_core/messages/block_translators/__init__.py +89 -0
  22. langchain_core/messages/block_translators/anthropic.py +451 -0
  23. langchain_core/messages/block_translators/bedrock.py +45 -0
  24. langchain_core/messages/block_translators/bedrock_converse.py +47 -0
  25. langchain_core/messages/block_translators/google_genai.py +45 -0
  26. langchain_core/messages/block_translators/google_vertexai.py +47 -0
  27. langchain_core/messages/block_translators/groq.py +45 -0
  28. langchain_core/messages/block_translators/langchain_v0.py +297 -0
  29. langchain_core/messages/block_translators/ollama.py +45 -0
  30. langchain_core/messages/block_translators/openai.py +586 -0
  31. langchain_core/messages/{content_blocks.py → content.py} +346 -213
  32. langchain_core/messages/human.py +29 -9
  33. langchain_core/messages/system.py +29 -9
  34. langchain_core/messages/tool.py +94 -13
  35. langchain_core/messages/utils.py +32 -234
  36. langchain_core/output_parsers/base.py +14 -50
  37. langchain_core/output_parsers/json.py +2 -5
  38. langchain_core/output_parsers/list.py +2 -7
  39. langchain_core/output_parsers/openai_functions.py +5 -28
  40. langchain_core/output_parsers/openai_tools.py +49 -90
  41. langchain_core/output_parsers/pydantic.py +2 -3
  42. langchain_core/output_parsers/transform.py +12 -53
  43. langchain_core/output_parsers/xml.py +9 -17
  44. langchain_core/prompt_values.py +8 -112
  45. langchain_core/prompts/chat.py +1 -3
  46. langchain_core/runnables/base.py +500 -451
  47. langchain_core/runnables/branch.py +1 -1
  48. langchain_core/runnables/fallbacks.py +4 -4
  49. langchain_core/runnables/history.py +1 -1
  50. langchain_core/runnables/passthrough.py +3 -3
  51. langchain_core/runnables/retry.py +1 -1
  52. langchain_core/runnables/router.py +1 -1
  53. langchain_core/structured_query.py +3 -7
  54. langchain_core/tools/base.py +14 -41
  55. langchain_core/tools/convert.py +2 -22
  56. langchain_core/tools/retriever.py +1 -8
  57. langchain_core/tools/structured.py +2 -10
  58. langchain_core/tracers/_streaming.py +6 -7
  59. langchain_core/tracers/base.py +7 -14
  60. langchain_core/tracers/core.py +4 -27
  61. langchain_core/tracers/event_stream.py +4 -15
  62. langchain_core/tracers/langchain.py +3 -14
  63. langchain_core/tracers/log_stream.py +2 -3
  64. langchain_core/utils/_merge.py +45 -7
  65. langchain_core/utils/function_calling.py +22 -9
  66. langchain_core/utils/utils.py +29 -0
  67. langchain_core/version.py +1 -1
  68. {langchain_core-0.4.0.dev0.dist-info → langchain_core-1.0.0a1.dist-info}/METADATA +7 -9
  69. {langchain_core-0.4.0.dev0.dist-info → langchain_core-1.0.0a1.dist-info}/RECORD +71 -64
  70. langchain_core/v1/__init__.py +0 -1
  71. langchain_core/v1/chat_models.py +0 -1047
  72. langchain_core/v1/messages.py +0 -755
  73. {langchain_core-0.4.0.dev0.dist-info → langchain_core-1.0.0a1.dist-info}/WHEEL +0 -0
  74. {langchain_core-0.4.0.dev0.dist-info → langchain_core-1.0.0a1.dist-info}/entry_points.txt +0 -0
@@ -1,7 +1,8 @@
1
1
  """Human message."""
2
2
 
3
- from typing import Any, Literal, Union
3
+ from typing import Any, Literal, Optional, Union, cast, overload
4
4
 
5
+ from langchain_core.messages import content as types
5
6
  from langchain_core.messages.base import BaseMessage, BaseMessageChunk
6
7
 
7
8
 
@@ -41,16 +42,35 @@ class HumanMessage(BaseMessage):
41
42
  type: Literal["human"] = "human"
42
43
  """The type of the message (used for serialization). Defaults to "human"."""
43
44
 
45
+ @overload
44
46
  def __init__(
45
- self, content: Union[str, list[Union[str, dict]]], **kwargs: Any
46
- ) -> None:
47
- """Pass in content as positional arg.
47
+ self,
48
+ content: Union[str, list[Union[str, dict]]],
49
+ **kwargs: Any,
50
+ ) -> None: ...
51
+
52
+ @overload
53
+ def __init__(
54
+ self,
55
+ content: Optional[Union[str, list[Union[str, dict]]]] = None,
56
+ content_blocks: Optional[list[types.ContentBlock]] = None,
57
+ **kwargs: Any,
58
+ ) -> None: ...
48
59
 
49
- Args:
50
- content: The string contents of the message.
51
- kwargs: Additional fields to pass to the message.
52
- """
53
- super().__init__(content=content, **kwargs)
60
+ def __init__(
61
+ self,
62
+ content: Optional[Union[str, list[Union[str, dict]]]] = None,
63
+ content_blocks: Optional[list[types.ContentBlock]] = None,
64
+ **kwargs: Any,
65
+ ) -> None:
66
+ """Specify ``content`` as positional arg or ``content_blocks`` for typing."""
67
+ if content_blocks is not None:
68
+ super().__init__(
69
+ content=cast("Union[str, list[Union[str, dict]]]", content_blocks),
70
+ **kwargs,
71
+ )
72
+ else:
73
+ super().__init__(content=content, **kwargs)
54
74
 
55
75
 
56
76
  class HumanMessageChunk(HumanMessage, BaseMessageChunk):
@@ -1,7 +1,8 @@
1
1
  """System message."""
2
2
 
3
- from typing import Any, Literal, Union
3
+ from typing import Any, Literal, Optional, Union, cast, overload
4
4
 
5
+ from langchain_core.messages import content as types
5
6
  from langchain_core.messages.base import BaseMessage, BaseMessageChunk
6
7
 
7
8
 
@@ -34,16 +35,35 @@ class SystemMessage(BaseMessage):
34
35
  type: Literal["system"] = "system"
35
36
  """The type of the message (used for serialization). Defaults to "system"."""
36
37
 
38
+ @overload
37
39
  def __init__(
38
- self, content: Union[str, list[Union[str, dict]]], **kwargs: Any
39
- ) -> None:
40
- """Pass in content as positional arg.
40
+ self,
41
+ content: Union[str, list[Union[str, dict]]],
42
+ **kwargs: Any,
43
+ ) -> None: ...
44
+
45
+ @overload
46
+ def __init__(
47
+ self,
48
+ content: Optional[Union[str, list[Union[str, dict]]]] = None,
49
+ content_blocks: Optional[list[types.ContentBlock]] = None,
50
+ **kwargs: Any,
51
+ ) -> None: ...
41
52
 
42
- Args:
43
- content: The string contents of the message.
44
- kwargs: Additional fields to pass to the message.
45
- """
46
- super().__init__(content=content, **kwargs)
53
+ def __init__(
54
+ self,
55
+ content: Optional[Union[str, list[Union[str, dict]]]] = None,
56
+ content_blocks: Optional[list[types.ContentBlock]] = None,
57
+ **kwargs: Any,
58
+ ) -> None:
59
+ """Specify ``content`` as positional arg or ``content_blocks`` for typing."""
60
+ if content_blocks is not None:
61
+ super().__init__(
62
+ content=cast("Union[str, list[Union[str, dict]]]", content_blocks),
63
+ **kwargs,
64
+ )
65
+ else:
66
+ super().__init__(content=content, **kwargs)
47
67
 
48
68
 
49
69
  class SystemMessageChunk(SystemMessage, BaseMessageChunk):
@@ -1,16 +1,15 @@
1
1
  """Messages for tools."""
2
2
 
3
3
  import json
4
- from typing import Any, Literal, Optional, Union
4
+ from typing import Any, Literal, Optional, Union, cast, overload
5
5
  from uuid import UUID
6
6
 
7
7
  from pydantic import Field, model_validator
8
- from typing_extensions import override
8
+ from typing_extensions import NotRequired, TypedDict, override
9
9
 
10
+ from langchain_core.messages import content as types
10
11
  from langchain_core.messages.base import BaseMessage, BaseMessageChunk, merge_content
11
- from langchain_core.messages.content_blocks import InvalidToolCall as InvalidToolCall
12
- from langchain_core.messages.content_blocks import ToolCall as ToolCall
13
- from langchain_core.messages.content_blocks import ToolCallChunk as ToolCallChunk
12
+ from langchain_core.messages.content import InvalidToolCall as InvalidToolCall
14
13
  from langchain_core.utils._merge import merge_dicts, merge_obj
15
14
 
16
15
 
@@ -136,16 +135,35 @@ class ToolMessage(BaseMessage, ToolOutputMixin):
136
135
  values["tool_call_id"] = str(tool_call_id)
137
136
  return values
138
137
 
138
+ @overload
139
139
  def __init__(
140
- self, content: Union[str, list[Union[str, dict]]], **kwargs: Any
141
- ) -> None:
142
- """Create a ToolMessage.
140
+ self,
141
+ content: Union[str, list[Union[str, dict]]],
142
+ **kwargs: Any,
143
+ ) -> None: ...
143
144
 
144
- Args:
145
- content: The string contents of the message.
146
- **kwargs: Additional fields.
147
- """
148
- super().__init__(content=content, **kwargs)
145
+ @overload
146
+ def __init__(
147
+ self,
148
+ content: Optional[Union[str, list[Union[str, dict]]]] = None,
149
+ content_blocks: Optional[list[types.ContentBlock]] = None,
150
+ **kwargs: Any,
151
+ ) -> None: ...
152
+
153
+ def __init__(
154
+ self,
155
+ content: Optional[Union[str, list[Union[str, dict]]]] = None,
156
+ content_blocks: Optional[list[types.ContentBlock]] = None,
157
+ **kwargs: Any,
158
+ ) -> None:
159
+ """Specify ``content`` as positional arg or ``content_blocks`` for typing."""
160
+ if content_blocks is not None:
161
+ super().__init__(
162
+ content=cast("Union[str, list[Union[str, dict]]]", content_blocks),
163
+ **kwargs,
164
+ )
165
+ else:
166
+ super().__init__(content=content, **kwargs)
149
167
 
150
168
 
151
169
  class ToolMessageChunk(ToolMessage, BaseMessageChunk):
@@ -180,6 +198,37 @@ class ToolMessageChunk(ToolMessage, BaseMessageChunk):
180
198
  return super().__add__(other)
181
199
 
182
200
 
201
+ class ToolCall(TypedDict):
202
+ """Represents a request to call a tool.
203
+
204
+ Example:
205
+
206
+ .. code-block:: python
207
+
208
+ {
209
+ "name": "foo",
210
+ "args": {"a": 1},
211
+ "id": "123"
212
+ }
213
+
214
+ This represents a request to call the tool named "foo" with arguments {"a": 1}
215
+ and an identifier of "123".
216
+
217
+ """
218
+
219
+ name: str
220
+ """The name of the tool to be called."""
221
+ args: dict[str, Any]
222
+ """The arguments to the tool call."""
223
+ id: Optional[str]
224
+ """An identifier associated with the tool call.
225
+
226
+ An identifier is needed to associate a tool call request with a tool
227
+ call result in events when multiple concurrent tool calls are made.
228
+ """
229
+ type: NotRequired[Literal["tool_call"]]
230
+
231
+
183
232
  def tool_call(
184
233
  *,
185
234
  name: str,
@@ -196,6 +245,38 @@ def tool_call(
196
245
  return ToolCall(name=name, args=args, id=id, type="tool_call")
197
246
 
198
247
 
248
+ class ToolCallChunk(TypedDict):
249
+ """A chunk of a tool call (e.g., as part of a stream).
250
+
251
+ When merging ToolCallChunks (e.g., via AIMessageChunk.__add__),
252
+ all string attributes are concatenated. Chunks are only merged if their
253
+ values of `index` are equal and not None.
254
+
255
+ Example:
256
+
257
+ .. code-block:: python
258
+
259
+ left_chunks = [ToolCallChunk(name="foo", args='{"a":', index=0)]
260
+ right_chunks = [ToolCallChunk(name=None, args='1}', index=0)]
261
+
262
+ (
263
+ AIMessageChunk(content="", tool_call_chunks=left_chunks)
264
+ + AIMessageChunk(content="", tool_call_chunks=right_chunks)
265
+ ).tool_call_chunks == [ToolCallChunk(name='foo', args='{"a":1}', index=0)]
266
+
267
+ """
268
+
269
+ name: Optional[str]
270
+ """The name of the tool to be called."""
271
+ args: Optional[str]
272
+ """The arguments to the tool call."""
273
+ id: Optional[str]
274
+ """An identifier associated with the tool call."""
275
+ index: Optional[int]
276
+ """The index of the tool call in a sequence."""
277
+ type: NotRequired[Literal["tool_call_chunk"]]
278
+
279
+
199
280
  def tool_call_chunk(
200
281
  *,
201
282
  name: Optional[str] = None,
@@ -31,21 +31,18 @@ from typing import (
31
31
  from pydantic import Discriminator, Field, Tag
32
32
 
33
33
  from langchain_core.exceptions import ErrorCode, create_message
34
- from langchain_core.messages import convert_to_openai_data_block, is_data_content_block
35
34
  from langchain_core.messages.ai import AIMessage, AIMessageChunk
36
35
  from langchain_core.messages.base import BaseMessage, BaseMessageChunk
37
36
  from langchain_core.messages.chat import ChatMessage, ChatMessageChunk
37
+ from langchain_core.messages.content import (
38
+ convert_to_openai_data_block,
39
+ is_data_content_block,
40
+ )
38
41
  from langchain_core.messages.function import FunctionMessage, FunctionMessageChunk
39
42
  from langchain_core.messages.human import HumanMessage, HumanMessageChunk
40
43
  from langchain_core.messages.modifier import RemoveMessage
41
44
  from langchain_core.messages.system import SystemMessage, SystemMessageChunk
42
45
  from langchain_core.messages.tool import ToolCall, ToolMessage, ToolMessageChunk
43
- from langchain_core.v1.messages import AIMessage as AIMessageV1
44
- from langchain_core.v1.messages import AIMessageChunk as AIMessageChunkV1
45
- from langchain_core.v1.messages import HumanMessage as HumanMessageV1
46
- from langchain_core.v1.messages import MessageV1, MessageV1Types
47
- from langchain_core.v1.messages import SystemMessage as SystemMessageV1
48
- from langchain_core.v1.messages import ToolMessage as ToolMessageV1
49
46
 
50
47
  if TYPE_CHECKING:
51
48
  from langchain_text_splitters import TextSplitter
@@ -136,7 +133,7 @@ def get_buffer_string(
136
133
  else:
137
134
  msg = f"Got unsupported message type: {m}"
138
135
  raise ValueError(msg) # noqa: TRY004
139
- message = f"{role}: {m.text()}"
136
+ message = f"{role}: {m.text}"
140
137
  if isinstance(m, AIMessage) and "function_call" in m.additional_kwargs:
141
138
  message += f"{m.additional_kwargs['function_call']}"
142
139
  string_messages.append(message)
@@ -202,14 +199,14 @@ def message_chunk_to_message(chunk: BaseMessageChunk) -> BaseMessage:
202
199
  # chunk classes always have the equivalent non-chunk class as their first parent
203
200
  ignore_keys = ["type"]
204
201
  if isinstance(chunk, AIMessageChunk):
205
- ignore_keys.append("tool_call_chunks")
202
+ ignore_keys.extend(["tool_call_chunks", "chunk_position"])
206
203
  return chunk.__class__.__mro__[1](
207
204
  **{k: v for k, v in chunk.__dict__.items() if k not in ignore_keys}
208
205
  )
209
206
 
210
207
 
211
208
  MessageLikeRepresentation = Union[
212
- BaseMessage, list[str], tuple[str, str], str, dict[str, Any], MessageV1
209
+ BaseMessage, list[str], tuple[str, str], str, dict[str, Any]
213
210
  ]
214
211
 
215
212
 
@@ -300,130 +297,6 @@ def _create_message_from_message_type(
300
297
  return message
301
298
 
302
299
 
303
- def _create_message_from_message_type_v1(
304
- message_type: str,
305
- content: str,
306
- name: Optional[str] = None,
307
- tool_call_id: Optional[str] = None,
308
- tool_calls: Optional[list[dict[str, Any]]] = None,
309
- id: Optional[str] = None,
310
- **kwargs: Any,
311
- ) -> MessageV1:
312
- """Create a message from a message type and content string.
313
-
314
- Args:
315
- message_type: (str) the type of the message (e.g., "human", "ai", etc.).
316
- content: (str) the content string.
317
- name: (str) the name of the message. Default is None.
318
- tool_call_id: (str) the tool call id. Default is None.
319
- tool_calls: (list[dict[str, Any]]) the tool calls. Default is None.
320
- id: (str) the id of the message. Default is None.
321
- kwargs: (dict[str, Any]) additional keyword arguments.
322
-
323
- Returns:
324
- a message of the appropriate type.
325
-
326
- Raises:
327
- ValueError: if the message type is not one of "human", "user", "ai",
328
- "assistant", "tool", "system", or "developer".
329
- """
330
- if name is not None:
331
- kwargs["name"] = name
332
- if tool_call_id is not None:
333
- kwargs["tool_call_id"] = tool_call_id
334
- if kwargs and (response_metadata := kwargs.pop("response_metadata", None)):
335
- kwargs["response_metadata"] = response_metadata
336
- if id is not None:
337
- kwargs["id"] = id
338
- if tool_calls is not None:
339
- kwargs["tool_calls"] = []
340
- for tool_call in tool_calls:
341
- # Convert OpenAI-format tool call to LangChain format.
342
- if "function" in tool_call:
343
- args = tool_call["function"]["arguments"]
344
- if isinstance(args, str):
345
- args = json.loads(args, strict=False)
346
- kwargs["tool_calls"].append(
347
- {
348
- "name": tool_call["function"]["name"],
349
- "args": args,
350
- "id": tool_call["id"],
351
- "type": "tool_call",
352
- }
353
- )
354
- else:
355
- kwargs["tool_calls"].append(tool_call)
356
- if message_type in {"human", "user"}:
357
- message: MessageV1 = HumanMessageV1(content=content, **kwargs)
358
- elif message_type in {"ai", "assistant"}:
359
- message = AIMessageV1(content=content, **kwargs)
360
- elif message_type in {"system", "developer"}:
361
- if message_type == "developer":
362
- kwargs["custom_role"] = "developer"
363
- message = SystemMessageV1(content=content, **kwargs)
364
- elif message_type == "tool":
365
- artifact = kwargs.pop("artifact", None)
366
- message = ToolMessageV1(content=content, artifact=artifact, **kwargs)
367
- else:
368
- msg = (
369
- f"Unexpected message type: '{message_type}'. Use one of 'human',"
370
- f" 'user', 'ai', 'assistant', 'function', 'tool', 'system', or 'developer'."
371
- )
372
- msg = create_message(message=msg, error_code=ErrorCode.MESSAGE_COERCION_FAILURE)
373
- raise ValueError(msg)
374
- return message
375
-
376
-
377
- def convert_from_v1_message(message: MessageV1) -> BaseMessage:
378
- """Compatibility layer to convert v1 messages to current messages.
379
-
380
- Args:
381
- message: MessageV1 instance to convert.
382
-
383
- Returns:
384
- BaseMessage: Converted message instance.
385
- """
386
- content = cast("Union[str, list[str | dict]]", message.content)
387
- if isinstance(message, AIMessageV1):
388
- return AIMessage(
389
- content=content,
390
- id=message.id,
391
- name=message.name,
392
- tool_calls=message.tool_calls,
393
- response_metadata=cast("dict", message.response_metadata),
394
- )
395
- if isinstance(message, AIMessageChunkV1):
396
- return AIMessageChunk(
397
- content=content,
398
- id=message.id,
399
- name=message.name,
400
- tool_call_chunks=message.tool_call_chunks,
401
- response_metadata=cast("dict", message.response_metadata),
402
- )
403
- if isinstance(message, HumanMessageV1):
404
- return HumanMessage(
405
- content=content,
406
- id=message.id,
407
- name=message.name,
408
- )
409
- if isinstance(message, SystemMessageV1):
410
- return SystemMessage(
411
- content=content,
412
- id=message.id,
413
- )
414
- if isinstance(message, ToolMessageV1):
415
- return ToolMessage(
416
- content=content,
417
- id=message.id,
418
- tool_call_id=message.tool_call_id,
419
- artifact=message.artifact,
420
- name=message.name,
421
- status=message.status,
422
- )
423
- message = f"Unsupported message type: {type(message)}"
424
- raise NotImplementedError(message)
425
-
426
-
427
300
  def _convert_to_message(message: MessageLikeRepresentation) -> BaseMessage:
428
301
  """Instantiate a message from a variety of message formats.
429
302
 
@@ -471,66 +344,6 @@ def _convert_to_message(message: MessageLikeRepresentation) -> BaseMessage:
471
344
  message_ = _create_message_from_message_type(
472
345
  msg_type, msg_content, **msg_kwargs
473
346
  )
474
- elif isinstance(message, MessageV1Types):
475
- message_ = convert_from_v1_message(message)
476
- else:
477
- msg = f"Unsupported message type: {type(message)}"
478
- msg = create_message(message=msg, error_code=ErrorCode.MESSAGE_COERCION_FAILURE)
479
- raise NotImplementedError(msg)
480
-
481
- return message_
482
-
483
-
484
- def _convert_to_message_v1(message: MessageLikeRepresentation) -> MessageV1:
485
- """Instantiate a message from a variety of message formats.
486
-
487
- The message format can be one of the following:
488
-
489
- - BaseMessagePromptTemplate
490
- - BaseMessage
491
- - 2-tuple of (role string, template); e.g., ("human", "{user_input}")
492
- - dict: a message dict with role and content keys
493
- - string: shorthand for ("human", template); e.g., "{user_input}"
494
-
495
- Args:
496
- message: a representation of a message in one of the supported formats.
497
-
498
- Returns:
499
- an instance of a message or a message template.
500
-
501
- Raises:
502
- NotImplementedError: if the message type is not supported.
503
- ValueError: if the message dict does not contain the required keys.
504
- """
505
- if isinstance(message, MessageV1Types):
506
- if isinstance(message, AIMessageChunkV1):
507
- message_: MessageV1 = message.to_message()
508
- else:
509
- message_ = message
510
- elif isinstance(message, str):
511
- message_ = _create_message_from_message_type_v1("human", message)
512
- elif isinstance(message, Sequence) and len(message) == 2:
513
- # mypy doesn't realise this can't be a string given the previous branch
514
- message_type_str, template = message # type: ignore[misc]
515
- message_ = _create_message_from_message_type_v1(message_type_str, template)
516
- elif isinstance(message, dict):
517
- msg_kwargs = message.copy()
518
- try:
519
- try:
520
- msg_type = msg_kwargs.pop("role")
521
- except KeyError:
522
- msg_type = msg_kwargs.pop("type")
523
- # None msg content is not allowed
524
- msg_content = msg_kwargs.pop("content") or ""
525
- except KeyError as e:
526
- msg = f"Message dict must contain 'role' and 'content' keys, got {message}"
527
- msg = create_message(
528
- message=msg, error_code=ErrorCode.MESSAGE_COERCION_FAILURE
529
- )
530
- raise ValueError(msg) from e
531
- message_ = _create_message_from_message_type_v1(
532
- msg_type, msg_content, **msg_kwargs
533
- )
534
347
  else:
535
348
  msg = f"Unsupported message type: {type(message)}"
536
349
  msg = create_message(message=msg, error_code=ErrorCode.MESSAGE_COERCION_FAILURE)
@@ -558,25 +371,6 @@ def convert_to_messages(
558
371
  return [_convert_to_message(m) for m in messages]
559
372
 
560
373
 
561
- def convert_to_messages_v1(
562
- messages: Union[Iterable[MessageLikeRepresentation], PromptValue],
563
- ) -> list[MessageV1]:
564
- """Convert a sequence of messages to a list of messages.
565
-
566
- Args:
567
- messages: Sequence of messages to convert.
568
-
569
- Returns:
570
- list of messages (BaseMessages).
571
- """
572
- # Import here to avoid circular imports
573
- from langchain_core.prompt_values import PromptValue
574
-
575
- if isinstance(messages, PromptValue):
576
- return messages.to_messages(message_version="v1")
577
- return [_convert_to_message_v1(m) for m in messages]
578
-
579
-
580
374
  def _runnable_support(func: Callable) -> Callable:
581
375
  @overload
582
376
  def wrapped(
@@ -865,22 +659,23 @@ def trim_messages(
865
659
  properties:
866
660
 
867
661
  1. The resulting chat history should be valid. Most chat models expect that chat
868
- history starts with either (1) a `HumanMessage` or (2) a `SystemMessage` followed
869
- by a `HumanMessage`. To achieve this, set `start_on="human"`.
870
- In addition, generally a `ToolMessage` can only appear after an `AIMessage`
662
+ history starts with either (1) a ``HumanMessage`` or (2) a ``SystemMessage`` followed
663
+ by a ``HumanMessage``. To achieve this, set ``start_on="human"``.
664
+ In addition, generally a ``ToolMessage`` can only appear after an ``AIMessage``
871
665
  that involved a tool call.
872
666
  Please see the following link for more information about messages:
873
667
  https://python.langchain.com/docs/concepts/#messages
874
668
  2. It includes recent messages and drops old messages in the chat history.
875
- To achieve this set the `strategy="last"`.
876
- 3. Usually, the new chat history should include the `SystemMessage` if it
877
- was present in the original chat history since the `SystemMessage` includes
878
- special instructions to the chat model. The `SystemMessage` is almost always
669
+ To achieve this set the ``strategy="last"``.
670
+ 3. Usually, the new chat history should include the ``SystemMessage`` if it
671
+ was present in the original chat history since the ``SystemMessage`` includes
672
+ special instructions to the chat model. The ``SystemMessage`` is almost always
879
673
  the first message in the history if present. To achieve this set the
880
- `include_system=True`.
674
+ ``include_system=True``.
881
675
 
882
- **Note** The examples below show how to configure `trim_messages` to achieve
883
- a behavior consistent with the above properties.
676
+ .. note::
677
+ The examples below show how to configure ``trim_messages`` to achieve a behavior
678
+ consistent with the above properties.
884
679
 
885
680
  Args:
886
681
  messages: Sequence of Message-like objects to trim.
@@ -1216,11 +1011,10 @@ def convert_to_openai_messages(
1216
1011
 
1217
1012
  oai_messages: list = []
1218
1013
 
1219
- if is_single := isinstance(messages, (BaseMessage, dict, str, MessageV1Types)):
1014
+ if is_single := isinstance(messages, (BaseMessage, dict, str)):
1220
1015
  messages = [messages]
1221
1016
 
1222
- # TODO: resolve type ignore here
1223
- messages = convert_to_messages(messages) # type: ignore[arg-type]
1017
+ messages = convert_to_messages(messages)
1224
1018
 
1225
1019
  for i, message in enumerate(messages):
1226
1020
  oai_msg: dict = {"role": _get_message_openai_role(message)}
@@ -1707,11 +1501,15 @@ def _msg_to_chunk(message: BaseMessage) -> BaseMessageChunk:
1707
1501
  def _chunk_to_msg(chunk: BaseMessageChunk) -> BaseMessage:
1708
1502
  if chunk.__class__ in _CHUNK_MSG_MAP:
1709
1503
  return _CHUNK_MSG_MAP[chunk.__class__](
1710
- **chunk.model_dump(exclude={"type", "tool_call_chunks"})
1504
+ **chunk.model_dump(exclude={"type", "tool_call_chunks", "chunk_position"})
1711
1505
  )
1712
1506
  for chunk_cls, msg_cls in _CHUNK_MSG_MAP.items():
1713
1507
  if isinstance(chunk, chunk_cls):
1714
- return msg_cls(**chunk.model_dump(exclude={"type", "tool_call_chunks"}))
1508
+ return msg_cls(
1509
+ **chunk.model_dump(
1510
+ exclude={"type", "tool_call_chunks", "chunk_position"}
1511
+ )
1512
+ )
1715
1513
 
1716
1514
  msg = (
1717
1515
  f"Unrecognized message chunk class {chunk.__class__}. Supported classes are "
@@ -1790,26 +1588,26 @@ def count_tokens_approximately(
1790
1588
  chars_per_token: Number of characters per token to use for the approximation.
1791
1589
  Default is 4 (one token corresponds to ~4 chars for common English text).
1792
1590
  You can also specify float values for more fine-grained control.
1793
- See more here: https://platform.openai.com/tokenizer
1591
+ `See more here. <https://platform.openai.com/tokenizer>`__
1794
1592
  extra_tokens_per_message: Number of extra tokens to add per message.
1795
1593
  Default is 3 (special tokens, including beginning/end of message).
1796
1594
  You can also specify float values for more fine-grained control.
1797
- See more here:
1798
- https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
1595
+ `See more here. <https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb>`__
1799
1596
  count_name: Whether to include message names in the count.
1800
1597
  Enabled by default.
1801
1598
 
1802
1599
  Returns:
1803
1600
  Approximate number of tokens in the messages.
1804
1601
 
1805
- Note:
1806
- This is a simple approximation that may not match the exact token count
1807
- used by specific models. For accurate counts, use model-specific tokenizers.
1602
+ .. note::
1603
+ This is a simple approximation that may not match the exact token count used by
1604
+ specific models. For accurate counts, use model-specific tokenizers.
1808
1605
 
1809
1606
  Warning:
1810
1607
  This function does not currently support counting image tokens.
1811
1608
 
1812
1609
  .. versionadded:: 0.3.46
1610
+
1813
1611
  """
1814
1612
  token_count = 0.0
1815
1613
  for message in convert_to_messages(messages):