langchain-core 0.3.71__py3-none-any.whl → 0.4.0.dev0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (84) hide show
  1. langchain_core/__init__.py +1 -1
  2. langchain_core/_api/beta_decorator.py +1 -0
  3. langchain_core/_api/deprecation.py +2 -0
  4. langchain_core/beta/runnables/context.py +1 -0
  5. langchain_core/callbacks/base.py +23 -14
  6. langchain_core/callbacks/file.py +1 -0
  7. langchain_core/callbacks/manager.py +145 -19
  8. langchain_core/callbacks/streaming_stdout.py +4 -3
  9. langchain_core/callbacks/usage.py +15 -3
  10. langchain_core/chat_history.py +1 -0
  11. langchain_core/document_loaders/langsmith.py +2 -1
  12. langchain_core/documents/base.py +2 -0
  13. langchain_core/embeddings/fake.py +2 -0
  14. langchain_core/indexing/api.py +10 -0
  15. langchain_core/language_models/_utils.py +37 -0
  16. langchain_core/language_models/base.py +4 -1
  17. langchain_core/language_models/chat_models.py +48 -27
  18. langchain_core/language_models/fake_chat_models.py +71 -1
  19. langchain_core/language_models/llms.py +1 -0
  20. langchain_core/memory.py +1 -0
  21. langchain_core/messages/__init__.py +54 -0
  22. langchain_core/messages/ai.py +31 -18
  23. langchain_core/messages/content_blocks.py +1349 -69
  24. langchain_core/messages/human.py +1 -0
  25. langchain_core/messages/modifier.py +1 -1
  26. langchain_core/messages/tool.py +8 -83
  27. langchain_core/messages/utils.py +221 -6
  28. langchain_core/output_parsers/base.py +51 -14
  29. langchain_core/output_parsers/json.py +5 -2
  30. langchain_core/output_parsers/list.py +7 -2
  31. langchain_core/output_parsers/openai_functions.py +29 -5
  32. langchain_core/output_parsers/openai_tools.py +90 -47
  33. langchain_core/output_parsers/pydantic.py +3 -2
  34. langchain_core/output_parsers/transform.py +53 -12
  35. langchain_core/output_parsers/xml.py +14 -5
  36. langchain_core/outputs/llm_result.py +4 -1
  37. langchain_core/prompt_values.py +111 -7
  38. langchain_core/prompts/base.py +4 -0
  39. langchain_core/prompts/chat.py +3 -0
  40. langchain_core/prompts/few_shot.py +1 -0
  41. langchain_core/prompts/few_shot_with_templates.py +1 -0
  42. langchain_core/prompts/image.py +1 -0
  43. langchain_core/prompts/pipeline.py +1 -0
  44. langchain_core/prompts/prompt.py +1 -0
  45. langchain_core/prompts/structured.py +1 -0
  46. langchain_core/rate_limiters.py +1 -0
  47. langchain_core/retrievers.py +3 -0
  48. langchain_core/runnables/base.py +75 -57
  49. langchain_core/runnables/branch.py +1 -0
  50. langchain_core/runnables/config.py +2 -2
  51. langchain_core/runnables/configurable.py +2 -1
  52. langchain_core/runnables/fallbacks.py +3 -7
  53. langchain_core/runnables/graph.py +5 -3
  54. langchain_core/runnables/graph_ascii.py +1 -0
  55. langchain_core/runnables/graph_mermaid.py +1 -0
  56. langchain_core/runnables/history.py +1 -0
  57. langchain_core/runnables/passthrough.py +3 -0
  58. langchain_core/runnables/retry.py +1 -0
  59. langchain_core/runnables/router.py +1 -0
  60. langchain_core/runnables/schema.py +1 -0
  61. langchain_core/stores.py +3 -0
  62. langchain_core/tools/base.py +43 -11
  63. langchain_core/tools/convert.py +25 -3
  64. langchain_core/tools/retriever.py +8 -1
  65. langchain_core/tools/structured.py +10 -1
  66. langchain_core/tracers/base.py +14 -7
  67. langchain_core/tracers/context.py +1 -1
  68. langchain_core/tracers/core.py +27 -4
  69. langchain_core/tracers/event_stream.py +14 -3
  70. langchain_core/tracers/langchain.py +14 -3
  71. langchain_core/tracers/log_stream.py +4 -1
  72. langchain_core/utils/aiter.py +5 -0
  73. langchain_core/utils/function_calling.py +2 -1
  74. langchain_core/utils/iter.py +1 -0
  75. langchain_core/utils/json_schema.py +1 -1
  76. langchain_core/v1/__init__.py +1 -0
  77. langchain_core/v1/chat_models.py +1047 -0
  78. langchain_core/v1/messages.py +755 -0
  79. langchain_core/vectorstores/base.py +1 -0
  80. langchain_core/version.py +1 -1
  81. {langchain_core-0.3.71.dist-info → langchain_core-0.4.0.dev0.dist-info}/METADATA +1 -1
  82. {langchain_core-0.3.71.dist-info → langchain_core-0.4.0.dev0.dist-info}/RECORD +84 -81
  83. {langchain_core-0.3.71.dist-info → langchain_core-0.4.0.dev0.dist-info}/WHEEL +0 -0
  84. {langchain_core-0.3.71.dist-info → langchain_core-0.4.0.dev0.dist-info}/entry_points.txt +0 -0
@@ -28,6 +28,7 @@ class HumanMessage(BaseMessage):
28
28
  # Instantiate a chat model and invoke it with the messages
29
29
  model = ...
30
30
  print(model.invoke(messages))
31
+
31
32
  """
32
33
 
33
34
  example: bool = False
@@ -13,7 +13,7 @@ class RemoveMessage(BaseMessage):
13
13
 
14
14
  def __init__(
15
15
  self,
16
- id: str, # noqa: A002
16
+ id: str,
17
17
  **kwargs: Any,
18
18
  ) -> None:
19
19
  """Create a RemoveMessage.
@@ -5,9 +5,12 @@ from typing import Any, Literal, Optional, Union
5
5
  from uuid import UUID
6
6
 
7
7
  from pydantic import Field, model_validator
8
- from typing_extensions import NotRequired, TypedDict, override
8
+ from typing_extensions import override
9
9
 
10
10
  from langchain_core.messages.base import BaseMessage, BaseMessageChunk, merge_content
11
+ from langchain_core.messages.content_blocks import InvalidToolCall as InvalidToolCall
12
+ from langchain_core.messages.content_blocks import ToolCall as ToolCall
13
+ from langchain_core.messages.content_blocks import ToolCallChunk as ToolCallChunk
11
14
  from langchain_core.utils._merge import merge_dicts, merge_obj
12
15
 
13
16
 
@@ -59,6 +62,7 @@ class ToolMessage(BaseMessage, ToolOutputMixin):
59
62
  The tool_call_id field is used to associate the tool call request with the
60
63
  tool call response. This is useful in situations where a chat model is able
61
64
  to request multiple tool calls in parallel.
65
+
62
66
  """ # noqa: E501
63
67
 
64
68
  tool_call_id: str
@@ -176,41 +180,11 @@ class ToolMessageChunk(ToolMessage, BaseMessageChunk):
176
180
  return super().__add__(other)
177
181
 
178
182
 
179
- class ToolCall(TypedDict):
180
- """Represents a request to call a tool.
181
-
182
- Example:
183
-
184
- .. code-block:: python
185
-
186
- {
187
- "name": "foo",
188
- "args": {"a": 1},
189
- "id": "123"
190
- }
191
-
192
- This represents a request to call the tool named "foo" with arguments {"a": 1}
193
- and an identifier of "123".
194
- """
195
-
196
- name: str
197
- """The name of the tool to be called."""
198
- args: dict[str, Any]
199
- """The arguments to the tool call."""
200
- id: Optional[str]
201
- """An identifier associated with the tool call.
202
-
203
- An identifier is needed to associate a tool call request with a tool
204
- call result in events when multiple concurrent tool calls are made.
205
- """
206
- type: NotRequired[Literal["tool_call"]]
207
-
208
-
209
183
  def tool_call(
210
184
  *,
211
185
  name: str,
212
186
  args: dict[str, Any],
213
- id: Optional[str], # noqa: A002
187
+ id: Optional[str],
214
188
  ) -> ToolCall:
215
189
  """Create a tool call.
216
190
 
@@ -222,42 +196,11 @@ def tool_call(
222
196
  return ToolCall(name=name, args=args, id=id, type="tool_call")
223
197
 
224
198
 
225
- class ToolCallChunk(TypedDict):
226
- """A chunk of a tool call (e.g., as part of a stream).
227
-
228
- When merging ToolCallChunks (e.g., via AIMessageChunk.__add__),
229
- all string attributes are concatenated. Chunks are only merged if their
230
- values of `index` are equal and not None.
231
-
232
- Example:
233
-
234
- .. code-block:: python
235
-
236
- left_chunks = [ToolCallChunk(name="foo", args='{"a":', index=0)]
237
- right_chunks = [ToolCallChunk(name=None, args='1}', index=0)]
238
-
239
- (
240
- AIMessageChunk(content="", tool_call_chunks=left_chunks)
241
- + AIMessageChunk(content="", tool_call_chunks=right_chunks)
242
- ).tool_call_chunks == [ToolCallChunk(name='foo', args='{"a":1}', index=0)]
243
- """
244
-
245
- name: Optional[str]
246
- """The name of the tool to be called."""
247
- args: Optional[str]
248
- """The arguments to the tool call."""
249
- id: Optional[str]
250
- """An identifier associated with the tool call."""
251
- index: Optional[int]
252
- """The index of the tool call in a sequence."""
253
- type: NotRequired[Literal["tool_call_chunk"]]
254
-
255
-
256
199
  def tool_call_chunk(
257
200
  *,
258
201
  name: Optional[str] = None,
259
202
  args: Optional[str] = None,
260
- id: Optional[str] = None, # noqa: A002
203
+ id: Optional[str] = None,
261
204
  index: Optional[int] = None,
262
205
  ) -> ToolCallChunk:
263
206
  """Create a tool call chunk.
@@ -273,29 +216,11 @@ def tool_call_chunk(
273
216
  )
274
217
 
275
218
 
276
- class InvalidToolCall(TypedDict):
277
- """Allowance for errors made by LLM.
278
-
279
- Here we add an `error` key to surface errors made during generation
280
- (e.g., invalid JSON arguments.)
281
- """
282
-
283
- name: Optional[str]
284
- """The name of the tool to be called."""
285
- args: Optional[str]
286
- """The arguments to the tool call."""
287
- id: Optional[str]
288
- """An identifier associated with the tool call."""
289
- error: Optional[str]
290
- """An error message associated with the tool call."""
291
- type: NotRequired[Literal["invalid_tool_call"]]
292
-
293
-
294
219
  def invalid_tool_call(
295
220
  *,
296
221
  name: Optional[str] = None,
297
222
  args: Optional[str] = None,
298
- id: Optional[str] = None, # noqa: A002
223
+ id: Optional[str] = None,
299
224
  error: Optional[str] = None,
300
225
  ) -> InvalidToolCall:
301
226
  """Create an invalid tool call.
@@ -40,6 +40,12 @@ from langchain_core.messages.human import HumanMessage, HumanMessageChunk
40
40
  from langchain_core.messages.modifier import RemoveMessage
41
41
  from langchain_core.messages.system import SystemMessage, SystemMessageChunk
42
42
  from langchain_core.messages.tool import ToolCall, ToolMessage, ToolMessageChunk
43
+ from langchain_core.v1.messages import AIMessage as AIMessageV1
44
+ from langchain_core.v1.messages import AIMessageChunk as AIMessageChunkV1
45
+ from langchain_core.v1.messages import HumanMessage as HumanMessageV1
46
+ from langchain_core.v1.messages import MessageV1, MessageV1Types
47
+ from langchain_core.v1.messages import SystemMessage as SystemMessageV1
48
+ from langchain_core.v1.messages import ToolMessage as ToolMessageV1
43
49
 
44
50
  if TYPE_CHECKING:
45
51
  from langchain_text_splitters import TextSplitter
@@ -111,6 +117,7 @@ def get_buffer_string(
111
117
  ]
112
118
  get_buffer_string(messages)
113
119
  # -> "Human: Hi, how are you?\nAI: Good, how are you?"
120
+
114
121
  """
115
122
  string_messages = []
116
123
  for m in messages:
@@ -202,7 +209,7 @@ def message_chunk_to_message(chunk: BaseMessageChunk) -> BaseMessage:
202
209
 
203
210
 
204
211
  MessageLikeRepresentation = Union[
205
- BaseMessage, list[str], tuple[str, str], str, dict[str, Any]
212
+ BaseMessage, list[str], tuple[str, str], str, dict[str, Any], MessageV1
206
213
  ]
207
214
 
208
215
 
@@ -212,7 +219,7 @@ def _create_message_from_message_type(
212
219
  name: Optional[str] = None,
213
220
  tool_call_id: Optional[str] = None,
214
221
  tool_calls: Optional[list[dict[str, Any]]] = None,
215
- id: Optional[str] = None, # noqa: A002
222
+ id: Optional[str] = None,
216
223
  **additional_kwargs: Any,
217
224
  ) -> BaseMessage:
218
225
  """Create a message from a message type and content string.
@@ -293,6 +300,130 @@ def _create_message_from_message_type(
293
300
  return message
294
301
 
295
302
 
303
+ def _create_message_from_message_type_v1(
304
+ message_type: str,
305
+ content: str,
306
+ name: Optional[str] = None,
307
+ tool_call_id: Optional[str] = None,
308
+ tool_calls: Optional[list[dict[str, Any]]] = None,
309
+ id: Optional[str] = None,
310
+ **kwargs: Any,
311
+ ) -> MessageV1:
312
+ """Create a message from a message type and content string.
313
+
314
+ Args:
315
+ message_type: (str) the type of the message (e.g., "human", "ai", etc.).
316
+ content: (str) the content string.
317
+ name: (str) the name of the message. Default is None.
318
+ tool_call_id: (str) the tool call id. Default is None.
319
+ tool_calls: (list[dict[str, Any]]) the tool calls. Default is None.
320
+ id: (str) the id of the message. Default is None.
321
+ kwargs: (dict[str, Any]) additional keyword arguments.
322
+
323
+ Returns:
324
+ a message of the appropriate type.
325
+
326
+ Raises:
327
+ ValueError: if the message type is not one of "human", "user", "ai",
328
+ "assistant", "tool", "system", or "developer".
329
+ """
330
+ if name is not None:
331
+ kwargs["name"] = name
332
+ if tool_call_id is not None:
333
+ kwargs["tool_call_id"] = tool_call_id
334
+ if kwargs and (response_metadata := kwargs.pop("response_metadata", None)):
335
+ kwargs["response_metadata"] = response_metadata
336
+ if id is not None:
337
+ kwargs["id"] = id
338
+ if tool_calls is not None:
339
+ kwargs["tool_calls"] = []
340
+ for tool_call in tool_calls:
341
+ # Convert OpenAI-format tool call to LangChain format.
342
+ if "function" in tool_call:
343
+ args = tool_call["function"]["arguments"]
344
+ if isinstance(args, str):
345
+ args = json.loads(args, strict=False)
346
+ kwargs["tool_calls"].append(
347
+ {
348
+ "name": tool_call["function"]["name"],
349
+ "args": args,
350
+ "id": tool_call["id"],
351
+ "type": "tool_call",
352
+ }
353
+ )
354
+ else:
355
+ kwargs["tool_calls"].append(tool_call)
356
+ if message_type in {"human", "user"}:
357
+ message: MessageV1 = HumanMessageV1(content=content, **kwargs)
358
+ elif message_type in {"ai", "assistant"}:
359
+ message = AIMessageV1(content=content, **kwargs)
360
+ elif message_type in {"system", "developer"}:
361
+ if message_type == "developer":
362
+ kwargs["custom_role"] = "developer"
363
+ message = SystemMessageV1(content=content, **kwargs)
364
+ elif message_type == "tool":
365
+ artifact = kwargs.pop("artifact", None)
366
+ message = ToolMessageV1(content=content, artifact=artifact, **kwargs)
367
+ else:
368
+ msg = (
369
+ f"Unexpected message type: '{message_type}'. Use one of 'human',"
370
+ f" 'user', 'ai', 'assistant', 'function', 'tool', 'system', or 'developer'."
371
+ )
372
+ msg = create_message(message=msg, error_code=ErrorCode.MESSAGE_COERCION_FAILURE)
373
+ raise ValueError(msg)
374
+ return message
375
+
376
+
377
+ def convert_from_v1_message(message: MessageV1) -> BaseMessage:
378
+ """Compatibility layer to convert v1 messages to current messages.
379
+
380
+ Args:
381
+ message: MessageV1 instance to convert.
382
+
383
+ Returns:
384
+ BaseMessage: Converted message instance.
385
+ """
386
+ content = cast("Union[str, list[str | dict]]", message.content)
387
+ if isinstance(message, AIMessageV1):
388
+ return AIMessage(
389
+ content=content,
390
+ id=message.id,
391
+ name=message.name,
392
+ tool_calls=message.tool_calls,
393
+ response_metadata=cast("dict", message.response_metadata),
394
+ )
395
+ if isinstance(message, AIMessageChunkV1):
396
+ return AIMessageChunk(
397
+ content=content,
398
+ id=message.id,
399
+ name=message.name,
400
+ tool_call_chunks=message.tool_call_chunks,
401
+ response_metadata=cast("dict", message.response_metadata),
402
+ )
403
+ if isinstance(message, HumanMessageV1):
404
+ return HumanMessage(
405
+ content=content,
406
+ id=message.id,
407
+ name=message.name,
408
+ )
409
+ if isinstance(message, SystemMessageV1):
410
+ return SystemMessage(
411
+ content=content,
412
+ id=message.id,
413
+ )
414
+ if isinstance(message, ToolMessageV1):
415
+ return ToolMessage(
416
+ content=content,
417
+ id=message.id,
418
+ tool_call_id=message.tool_call_id,
419
+ artifact=message.artifact,
420
+ name=message.name,
421
+ status=message.status,
422
+ )
423
+ message = f"Unsupported message type: {type(message)}"
424
+ raise NotImplementedError(message)
425
+
426
+
296
427
  def _convert_to_message(message: MessageLikeRepresentation) -> BaseMessage:
297
428
  """Instantiate a message from a variety of message formats.
298
429
 
@@ -340,6 +471,66 @@ def _convert_to_message(message: MessageLikeRepresentation) -> BaseMessage:
340
471
  message_ = _create_message_from_message_type(
341
472
  msg_type, msg_content, **msg_kwargs
342
473
  )
474
+ elif isinstance(message, MessageV1Types):
475
+ message_ = convert_from_v1_message(message)
476
+ else:
477
+ msg = f"Unsupported message type: {type(message)}"
478
+ msg = create_message(message=msg, error_code=ErrorCode.MESSAGE_COERCION_FAILURE)
479
+ raise NotImplementedError(msg)
480
+
481
+ return message_
482
+
483
+
484
+ def _convert_to_message_v1(message: MessageLikeRepresentation) -> MessageV1:
485
+ """Instantiate a message from a variety of message formats.
486
+
487
+ The message format can be one of the following:
488
+
489
+ - BaseMessagePromptTemplate
490
+ - BaseMessage
491
+ - 2-tuple of (role string, template); e.g., ("human", "{user_input}")
492
+ - dict: a message dict with role and content keys
493
+ - string: shorthand for ("human", template); e.g., "{user_input}"
494
+
495
+ Args:
496
+ message: a representation of a message in one of the supported formats.
497
+
498
+ Returns:
499
+ an instance of a message or a message template.
500
+
501
+ Raises:
502
+ NotImplementedError: if the message type is not supported.
503
+ ValueError: if the message dict does not contain the required keys.
504
+ """
505
+ if isinstance(message, MessageV1Types):
506
+ if isinstance(message, AIMessageChunkV1):
507
+ message_: MessageV1 = message.to_message()
508
+ else:
509
+ message_ = message
510
+ elif isinstance(message, str):
511
+ message_ = _create_message_from_message_type_v1("human", message)
512
+ elif isinstance(message, Sequence) and len(message) == 2:
513
+ # mypy doesn't realise this can't be a string given the previous branch
514
+ message_type_str, template = message # type: ignore[misc]
515
+ message_ = _create_message_from_message_type_v1(message_type_str, template)
516
+ elif isinstance(message, dict):
517
+ msg_kwargs = message.copy()
518
+ try:
519
+ try:
520
+ msg_type = msg_kwargs.pop("role")
521
+ except KeyError:
522
+ msg_type = msg_kwargs.pop("type")
523
+ # None msg content is not allowed
524
+ msg_content = msg_kwargs.pop("content") or ""
525
+ except KeyError as e:
526
+ msg = f"Message dict must contain 'role' and 'content' keys, got {message}"
527
+ msg = create_message(
528
+ message=msg, error_code=ErrorCode.MESSAGE_COERCION_FAILURE
529
+ )
530
+ raise ValueError(msg) from e
531
+ message_ = _create_message_from_message_type_v1(
532
+ msg_type, msg_content, **msg_kwargs
533
+ )
343
534
  else:
344
535
  msg = f"Unsupported message type: {type(message)}"
345
536
  msg = create_message(message=msg, error_code=ErrorCode.MESSAGE_COERCION_FAILURE)
@@ -367,6 +558,25 @@ def convert_to_messages(
367
558
  return [_convert_to_message(m) for m in messages]
368
559
 
369
560
 
561
+ def convert_to_messages_v1(
562
+ messages: Union[Iterable[MessageLikeRepresentation], PromptValue],
563
+ ) -> list[MessageV1]:
564
+ """Convert a sequence of messages to a list of messages.
565
+
566
+ Args:
567
+ messages: Sequence of messages to convert.
568
+
569
+ Returns:
570
+ list of messages (BaseMessages).
571
+ """
572
+ # Import here to avoid circular imports
573
+ from langchain_core.prompt_values import PromptValue
574
+
575
+ if isinstance(messages, PromptValue):
576
+ return messages.to_messages(message_version="v1")
577
+ return [_convert_to_message_v1(m) for m in messages]
578
+
579
+
370
580
  def _runnable_support(func: Callable) -> Callable:
371
581
  @overload
372
582
  def wrapped(
@@ -463,6 +673,7 @@ def filter_messages(
463
673
  SystemMessage("you're a good assistant."),
464
674
  HumanMessage("what's your name", id="foo", name="example_user"),
465
675
  ]
676
+
466
677
  """ # noqa: E501
467
678
  messages = convert_to_messages(messages)
468
679
  filtered: list[BaseMessage] = []
@@ -869,6 +1080,7 @@ def trim_messages(
869
1080
  HumanMessage("This is a 4 token text. The full message is 10 tokens.", id="first"),
870
1081
  AIMessage( [{"type": "text", "text": "This is the FIRST 4 token block."}], id="second"),
871
1082
  ]
1083
+
872
1084
  """ # noqa: E501
873
1085
  # Validate arguments
874
1086
  if start_on and strategy == "first":
@@ -1004,10 +1216,11 @@ def convert_to_openai_messages(
1004
1216
 
1005
1217
  oai_messages: list = []
1006
1218
 
1007
- if is_single := isinstance(messages, (BaseMessage, dict, str)):
1219
+ if is_single := isinstance(messages, (BaseMessage, dict, str, MessageV1Types)):
1008
1220
  messages = [messages]
1009
1221
 
1010
- messages = convert_to_messages(messages)
1222
+ # TODO: resolve type ignore here
1223
+ messages = convert_to_messages(messages) # type: ignore[arg-type]
1011
1224
 
1012
1225
  for i, message in enumerate(messages):
1013
1226
  oai_msg: dict = {"role": _get_message_openai_role(message)}
@@ -1176,7 +1389,9 @@ def convert_to_openai_messages(
1176
1389
  "id": block["id"],
1177
1390
  "function": {
1178
1391
  "name": block["name"],
1179
- "arguments": json.dumps(block["input"]),
1392
+ "arguments": json.dumps(
1393
+ block["input"], ensure_ascii=False
1394
+ ),
1180
1395
  },
1181
1396
  }
1182
1397
  )
@@ -1550,7 +1765,7 @@ def _convert_to_openai_tool_calls(tool_calls: list[ToolCall]) -> list[dict]:
1550
1765
  "id": tool_call["id"],
1551
1766
  "function": {
1552
1767
  "name": tool_call["name"],
1553
- "arguments": json.dumps(tool_call["args"]),
1768
+ "arguments": json.dumps(tool_call["args"], ensure_ascii=False),
1554
1769
  },
1555
1770
  }
1556
1771
  for tool_call in tool_calls
@@ -11,6 +11,7 @@ from typing import (
11
11
  Optional,
12
12
  TypeVar,
13
13
  Union,
14
+ cast,
14
15
  )
15
16
 
16
17
  from typing_extensions import override
@@ -20,19 +21,22 @@ from langchain_core.messages import AnyMessage, BaseMessage
20
21
  from langchain_core.outputs import ChatGeneration, Generation
21
22
  from langchain_core.runnables import Runnable, RunnableConfig, RunnableSerializable
22
23
  from langchain_core.runnables.config import run_in_executor
24
+ from langchain_core.v1.messages import AIMessage, MessageV1, MessageV1Types
23
25
 
24
26
  if TYPE_CHECKING:
25
27
  from langchain_core.prompt_values import PromptValue
26
28
 
27
29
  T = TypeVar("T")
28
- OutputParserLike = Runnable[LanguageModelOutput, T]
30
+ OutputParserLike = Runnable[Union[LanguageModelOutput, AIMessage], T]
29
31
 
30
32
 
31
33
  class BaseLLMOutputParser(ABC, Generic[T]):
32
34
  """Abstract base class for parsing the outputs of a model."""
33
35
 
34
36
  @abstractmethod
35
- def parse_result(self, result: list[Generation], *, partial: bool = False) -> T:
37
+ def parse_result(
38
+ self, result: Union[list[Generation], AIMessage], *, partial: bool = False
39
+ ) -> T:
36
40
  """Parse a list of candidate model Generations into a specific format.
37
41
 
38
42
  Args:
@@ -46,7 +50,7 @@ class BaseLLMOutputParser(ABC, Generic[T]):
46
50
  """
47
51
 
48
52
  async def aparse_result(
49
- self, result: list[Generation], *, partial: bool = False
53
+ self, result: Union[list[Generation], AIMessage], *, partial: bool = False
50
54
  ) -> T:
51
55
  """Async parse a list of candidate model Generations into a specific format.
52
56
 
@@ -71,7 +75,7 @@ class BaseGenerationOutputParser(
71
75
  @override
72
76
  def InputType(self) -> Any:
73
77
  """Return the input type for the parser."""
74
- return Union[str, AnyMessage]
78
+ return Union[str, AnyMessage, MessageV1]
75
79
 
76
80
  @property
77
81
  @override
@@ -84,7 +88,7 @@ class BaseGenerationOutputParser(
84
88
  @override
85
89
  def invoke(
86
90
  self,
87
- input: Union[str, BaseMessage],
91
+ input: Union[str, BaseMessage, MessageV1],
88
92
  config: Optional[RunnableConfig] = None,
89
93
  **kwargs: Any,
90
94
  ) -> T:
@@ -97,9 +101,16 @@ class BaseGenerationOutputParser(
97
101
  config,
98
102
  run_type="parser",
99
103
  )
104
+ if isinstance(input, MessageV1Types):
105
+ return self._call_with_config(
106
+ lambda inner_input: self.parse_result(inner_input),
107
+ input,
108
+ config,
109
+ run_type="parser",
110
+ )
100
111
  return self._call_with_config(
101
112
  lambda inner_input: self.parse_result([Generation(text=inner_input)]),
102
- input,
113
+ cast("str", input),
103
114
  config,
104
115
  run_type="parser",
105
116
  )
@@ -120,6 +131,13 @@ class BaseGenerationOutputParser(
120
131
  config,
121
132
  run_type="parser",
122
133
  )
134
+ if isinstance(input, MessageV1Types):
135
+ return await self._acall_with_config(
136
+ lambda inner_input: self.aparse_result(inner_input),
137
+ input,
138
+ config,
139
+ run_type="parser",
140
+ )
123
141
  return await self._acall_with_config(
124
142
  lambda inner_input: self.aparse_result([Generation(text=inner_input)]),
125
143
  input,
@@ -129,7 +147,7 @@ class BaseGenerationOutputParser(
129
147
 
130
148
 
131
149
  class BaseOutputParser(
132
- BaseLLMOutputParser, RunnableSerializable[LanguageModelOutput, T]
150
+ BaseLLMOutputParser, RunnableSerializable[Union[LanguageModelOutput, AIMessage], T]
133
151
  ):
134
152
  """Base class to parse the output of an LLM call.
135
153
 
@@ -155,13 +173,14 @@ class BaseOutputParser(
155
173
  @property
156
174
  def _type(self) -> str:
157
175
  return "boolean_output_parser"
176
+
158
177
  """ # noqa: E501
159
178
 
160
179
  @property
161
180
  @override
162
181
  def InputType(self) -> Any:
163
182
  """Return the input type for the parser."""
164
- return Union[str, AnyMessage]
183
+ return Union[str, AnyMessage, MessageV1]
165
184
 
166
185
  @property
167
186
  @override
@@ -188,7 +207,7 @@ class BaseOutputParser(
188
207
  @override
189
208
  def invoke(
190
209
  self,
191
- input: Union[str, BaseMessage],
210
+ input: Union[str, BaseMessage, MessageV1],
192
211
  config: Optional[RunnableConfig] = None,
193
212
  **kwargs: Any,
194
213
  ) -> T:
@@ -201,9 +220,16 @@ class BaseOutputParser(
201
220
  config,
202
221
  run_type="parser",
203
222
  )
223
+ if isinstance(input, MessageV1Types):
224
+ return self._call_with_config(
225
+ lambda inner_input: self.parse_result(inner_input),
226
+ input,
227
+ config,
228
+ run_type="parser",
229
+ )
204
230
  return self._call_with_config(
205
231
  lambda inner_input: self.parse_result([Generation(text=inner_input)]),
206
- input,
232
+ cast("str", input),
207
233
  config,
208
234
  run_type="parser",
209
235
  )
@@ -211,7 +237,7 @@ class BaseOutputParser(
211
237
  @override
212
238
  async def ainvoke(
213
239
  self,
214
- input: Union[str, BaseMessage],
240
+ input: Union[str, BaseMessage, MessageV1],
215
241
  config: Optional[RunnableConfig] = None,
216
242
  **kwargs: Optional[Any],
217
243
  ) -> T:
@@ -224,15 +250,24 @@ class BaseOutputParser(
224
250
  config,
225
251
  run_type="parser",
226
252
  )
253
+ if isinstance(input, MessageV1Types):
254
+ return await self._acall_with_config(
255
+ lambda inner_input: self.aparse_result(inner_input),
256
+ input,
257
+ config,
258
+ run_type="parser",
259
+ )
227
260
  return await self._acall_with_config(
228
261
  lambda inner_input: self.aparse_result([Generation(text=inner_input)]),
229
- input,
262
+ cast("str", input),
230
263
  config,
231
264
  run_type="parser",
232
265
  )
233
266
 
234
267
  @override
235
- def parse_result(self, result: list[Generation], *, partial: bool = False) -> T:
268
+ def parse_result(
269
+ self, result: Union[list[Generation], AIMessage], *, partial: bool = False
270
+ ) -> T:
236
271
  """Parse a list of candidate model Generations into a specific format.
237
272
 
238
273
  The return value is parsed from only the first Generation in the result, which
@@ -247,6 +282,8 @@ class BaseOutputParser(
247
282
  Returns:
248
283
  Structured output.
249
284
  """
285
+ if isinstance(result, AIMessage):
286
+ return self.parse(result.text)
250
287
  return self.parse(result[0].text)
251
288
 
252
289
  @abstractmethod
@@ -261,7 +298,7 @@ class BaseOutputParser(
261
298
  """
262
299
 
263
300
  async def aparse_result(
264
- self, result: list[Generation], *, partial: bool = False
301
+ self, result: Union[list[Generation], AIMessage], *, partial: bool = False
265
302
  ) -> T:
266
303
  """Async parse a list of candidate model Generations into a specific format.
267
304
 
@@ -21,6 +21,7 @@ from langchain_core.utils.json import (
21
21
  parse_json_markdown,
22
22
  parse_partial_json,
23
23
  )
24
+ from langchain_core.v1.messages import AIMessage
24
25
 
25
26
  # Union type needs to be last assignment to PydanticBaseModel to make mypy happy.
26
27
  PydanticBaseModel = Union[BaseModel, pydantic.BaseModel]
@@ -53,7 +54,9 @@ class JsonOutputParser(BaseCumulativeTransformOutputParser[Any]):
53
54
  return pydantic_object.schema()
54
55
  return None
55
56
 
56
- def parse_result(self, result: list[Generation], *, partial: bool = False) -> Any:
57
+ def parse_result(
58
+ self, result: Union[list[Generation], AIMessage], *, partial: bool = False
59
+ ) -> Any:
57
60
  """Parse the result of an LLM call to a JSON object.
58
61
 
59
62
  Args:
@@ -70,7 +73,7 @@ class JsonOutputParser(BaseCumulativeTransformOutputParser[Any]):
70
73
  Raises:
71
74
  OutputParserException: If the output is not valid JSON.
72
75
  """
73
- text = result[0].text
76
+ text = result.text if isinstance(result, AIMessage) else result[0].text
74
77
  text = text.strip()
75
78
  if partial:
76
79
  try: