langchain-core 0.3.76__py3-none-any.whl → 0.3.78__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of langchain-core might be problematic. Click here for more details.
- langchain_core/_api/beta_decorator.py +6 -5
- langchain_core/_api/deprecation.py +11 -11
- langchain_core/callbacks/base.py +17 -11
- langchain_core/callbacks/manager.py +2 -2
- langchain_core/callbacks/usage.py +2 -2
- langchain_core/chat_history.py +26 -16
- langchain_core/document_loaders/langsmith.py +1 -1
- langchain_core/indexing/api.py +31 -31
- langchain_core/language_models/chat_models.py +4 -2
- langchain_core/language_models/fake_chat_models.py +5 -2
- langchain_core/language_models/llms.py +3 -1
- langchain_core/load/serializable.py +1 -1
- langchain_core/messages/ai.py +22 -10
- langchain_core/messages/base.py +30 -16
- langchain_core/messages/chat.py +4 -1
- langchain_core/messages/function.py +9 -5
- langchain_core/messages/human.py +11 -4
- langchain_core/messages/modifier.py +1 -0
- langchain_core/messages/system.py +9 -2
- langchain_core/messages/tool.py +27 -16
- langchain_core/messages/utils.py +97 -83
- langchain_core/outputs/chat_generation.py +10 -6
- langchain_core/prompt_values.py +6 -2
- langchain_core/prompts/chat.py +6 -3
- langchain_core/prompts/few_shot.py +4 -1
- langchain_core/runnables/base.py +14 -13
- langchain_core/runnables/graph.py +4 -1
- langchain_core/runnables/graph_ascii.py +1 -1
- langchain_core/runnables/graph_mermaid.py +27 -10
- langchain_core/runnables/retry.py +35 -18
- langchain_core/stores.py +6 -6
- langchain_core/tools/base.py +7 -5
- langchain_core/tools/convert.py +2 -2
- langchain_core/tools/simple.py +1 -5
- langchain_core/tools/structured.py +0 -10
- langchain_core/tracers/event_stream.py +13 -15
- langchain_core/utils/aiter.py +1 -1
- langchain_core/utils/function_calling.py +13 -8
- langchain_core/utils/iter.py +1 -1
- langchain_core/utils/json.py +7 -1
- langchain_core/utils/json_schema.py +145 -39
- langchain_core/utils/pydantic.py +6 -5
- langchain_core/utils/utils.py +1 -1
- langchain_core/vectorstores/in_memory.py +5 -5
- langchain_core/version.py +1 -1
- {langchain_core-0.3.76.dist-info → langchain_core-0.3.78.dist-info}/METADATA +8 -18
- {langchain_core-0.3.76.dist-info → langchain_core-0.3.78.dist-info}/RECORD +49 -49
- {langchain_core-0.3.76.dist-info → langchain_core-0.3.78.dist-info}/WHEEL +0 -0
- {langchain_core-0.3.76.dist-info → langchain_core-0.3.78.dist-info}/entry_points.txt +0 -0
langchain_core/messages/utils.py
CHANGED
|
@@ -5,6 +5,7 @@ Some examples of what you can do with these functions include:
|
|
|
5
5
|
* Convert messages to strings (serialization)
|
|
6
6
|
* Convert messages from dicts to Message objects (deserialization)
|
|
7
7
|
* Filter messages from a list of messages based on name, type or id etc.
|
|
8
|
+
|
|
8
9
|
"""
|
|
9
10
|
|
|
10
11
|
from __future__ import annotations
|
|
@@ -91,13 +92,14 @@ AnyMessage = Annotated[
|
|
|
91
92
|
def get_buffer_string(
|
|
92
93
|
messages: Sequence[BaseMessage], human_prefix: str = "Human", ai_prefix: str = "AI"
|
|
93
94
|
) -> str:
|
|
94
|
-
r"""Convert a sequence of
|
|
95
|
+
r"""Convert a sequence of messages to strings and concatenate them into one string.
|
|
95
96
|
|
|
96
97
|
Args:
|
|
97
98
|
messages: Messages to be converted to strings.
|
|
98
|
-
human_prefix: The prefix to prepend to contents of
|
|
99
|
-
Default is
|
|
100
|
-
ai_prefix:
|
|
99
|
+
human_prefix: The prefix to prepend to contents of ``HumanMessage``s.
|
|
100
|
+
Default is ``'Human'``.
|
|
101
|
+
ai_prefix: The prefix to prepend to contents of ``AIMessage``. Default is
|
|
102
|
+
``'AI'``.
|
|
101
103
|
|
|
102
104
|
Returns:
|
|
103
105
|
A single string concatenation of all input messages.
|
|
@@ -176,19 +178,20 @@ def _message_from_dict(message: dict) -> BaseMessage:
|
|
|
176
178
|
|
|
177
179
|
|
|
178
180
|
def messages_from_dict(messages: Sequence[dict]) -> list[BaseMessage]:
|
|
179
|
-
"""Convert a sequence of messages from dicts to Message objects.
|
|
181
|
+
"""Convert a sequence of messages from dicts to ``Message`` objects.
|
|
180
182
|
|
|
181
183
|
Args:
|
|
182
184
|
messages: Sequence of messages (as dicts) to convert.
|
|
183
185
|
|
|
184
186
|
Returns:
|
|
185
187
|
list of messages (BaseMessages).
|
|
188
|
+
|
|
186
189
|
"""
|
|
187
190
|
return [_message_from_dict(m) for m in messages]
|
|
188
191
|
|
|
189
192
|
|
|
190
193
|
def message_chunk_to_message(chunk: BaseMessage) -> BaseMessage:
|
|
191
|
-
"""Convert a message chunk to a
|
|
194
|
+
"""Convert a message chunk to a ``Message``.
|
|
192
195
|
|
|
193
196
|
Args:
|
|
194
197
|
chunk: Message chunk to convert.
|
|
@@ -221,10 +224,10 @@ def _create_message_from_message_type(
|
|
|
221
224
|
id: Optional[str] = None,
|
|
222
225
|
**additional_kwargs: Any,
|
|
223
226
|
) -> BaseMessage:
|
|
224
|
-
"""Create a message from a
|
|
227
|
+
"""Create a message from a ``Message`` type and content string.
|
|
225
228
|
|
|
226
229
|
Args:
|
|
227
|
-
message_type: (str) the type of the message (e.g.,
|
|
230
|
+
message_type: (str) the type of the message (e.g., ``'human'``, ``'ai'``, etc.).
|
|
228
231
|
content: (str) the content string.
|
|
229
232
|
name: (str) the name of the message. Default is None.
|
|
230
233
|
tool_call_id: (str) the tool call id. Default is None.
|
|
@@ -236,8 +239,9 @@ def _create_message_from_message_type(
|
|
|
236
239
|
a message of the appropriate type.
|
|
237
240
|
|
|
238
241
|
Raises:
|
|
239
|
-
ValueError: if the message type is not one of
|
|
240
|
-
|
|
242
|
+
ValueError: if the message type is not one of ``'human'``, ``'user'``, ``'ai'``,
|
|
243
|
+
``'assistant'``, ``'function'``, ``'tool'``, ``'system'``, or
|
|
244
|
+
``'developer'``.
|
|
241
245
|
"""
|
|
242
246
|
kwargs: dict[str, Any] = {}
|
|
243
247
|
if name is not None:
|
|
@@ -286,6 +290,9 @@ def _create_message_from_message_type(
|
|
|
286
290
|
message = FunctionMessage(content=content, **kwargs)
|
|
287
291
|
elif message_type == "tool":
|
|
288
292
|
artifact = kwargs.get("additional_kwargs", {}).pop("artifact", None)
|
|
293
|
+
status = kwargs.get("additional_kwargs", {}).pop("status", None)
|
|
294
|
+
if status is not None:
|
|
295
|
+
kwargs["status"] = status
|
|
289
296
|
message = ToolMessage(content=content, artifact=artifact, **kwargs)
|
|
290
297
|
elif message_type == "remove":
|
|
291
298
|
message = RemoveMessage(**kwargs)
|
|
@@ -300,15 +307,15 @@ def _create_message_from_message_type(
|
|
|
300
307
|
|
|
301
308
|
|
|
302
309
|
def _convert_to_message(message: MessageLikeRepresentation) -> BaseMessage:
|
|
303
|
-
"""Instantiate a
|
|
310
|
+
"""Instantiate a ``Message`` from a variety of message formats.
|
|
304
311
|
|
|
305
312
|
The message format can be one of the following:
|
|
306
313
|
|
|
307
|
-
- BaseMessagePromptTemplate
|
|
308
|
-
- BaseMessage
|
|
309
|
-
- 2-tuple of (role string, template); e.g., (
|
|
314
|
+
- ``BaseMessagePromptTemplate``
|
|
315
|
+
- ``BaseMessage``
|
|
316
|
+
- 2-tuple of (role string, template); e.g., (``'human'``, ``'{user_input}'``)
|
|
310
317
|
- dict: a message dict with role and content keys
|
|
311
|
-
- string: shorthand for (
|
|
318
|
+
- string: shorthand for (``'human'``, template); e.g., ``'{user_input}'``
|
|
312
319
|
|
|
313
320
|
Args:
|
|
314
321
|
message: a representation of a message in one of the supported formats.
|
|
@@ -319,6 +326,7 @@ def _convert_to_message(message: MessageLikeRepresentation) -> BaseMessage:
|
|
|
319
326
|
Raises:
|
|
320
327
|
NotImplementedError: if the message type is not supported.
|
|
321
328
|
ValueError: if the message dict does not contain the required keys.
|
|
329
|
+
|
|
322
330
|
"""
|
|
323
331
|
if isinstance(message, BaseMessage):
|
|
324
332
|
message_ = message
|
|
@@ -364,6 +372,7 @@ def convert_to_messages(
|
|
|
364
372
|
|
|
365
373
|
Returns:
|
|
366
374
|
list of messages (BaseMessages).
|
|
375
|
+
|
|
367
376
|
"""
|
|
368
377
|
# Import here to avoid circular imports
|
|
369
378
|
from langchain_core.prompt_values import PromptValue # noqa: PLC0415
|
|
@@ -414,36 +423,36 @@ def filter_messages(
|
|
|
414
423
|
exclude_ids: Optional[Sequence[str]] = None,
|
|
415
424
|
exclude_tool_calls: Optional[Sequence[str] | bool] = None,
|
|
416
425
|
) -> list[BaseMessage]:
|
|
417
|
-
"""Filter messages based on name
|
|
426
|
+
"""Filter messages based on ``name``, ``type`` or ``id``.
|
|
418
427
|
|
|
419
428
|
Args:
|
|
420
429
|
messages: Sequence Message-like objects to filter.
|
|
421
430
|
include_names: Message names to include. Default is None.
|
|
422
431
|
exclude_names: Messages names to exclude. Default is None.
|
|
423
|
-
include_types: Message types to include. Can be specified as string names
|
|
424
|
-
|
|
425
|
-
SystemMessage
|
|
426
|
-
|
|
427
|
-
|
|
428
|
-
|
|
432
|
+
include_types: Message types to include. Can be specified as string names
|
|
433
|
+
(e.g. ``'system'``, ``'human'``, ``'ai'``, ...) or as ``BaseMessage``
|
|
434
|
+
classes (e.g. ``SystemMessage``, ``HumanMessage``, ``AIMessage``, ...).
|
|
435
|
+
Default is None.
|
|
436
|
+
exclude_types: Message types to exclude. Can be specified as string names
|
|
437
|
+
(e.g. ``'system'``, ``'human'``, ``'ai'``, ...) or as ``BaseMessage``
|
|
438
|
+
classes (e.g. ``SystemMessage``, ``HumanMessage``, ``AIMessage``, ...).
|
|
439
|
+
Default is None.
|
|
429
440
|
include_ids: Message IDs to include. Default is None.
|
|
430
441
|
exclude_ids: Message IDs to exclude. Default is None.
|
|
431
442
|
exclude_tool_calls: Tool call IDs to exclude. Default is None.
|
|
432
443
|
Can be one of the following:
|
|
433
|
-
|
|
434
|
-
|
|
435
|
-
will be excluded.
|
|
444
|
+
- ``True``: all ``AIMessage``s with tool calls and all
|
|
445
|
+
``ToolMessage``s will be excluded.
|
|
436
446
|
- a sequence of tool call IDs to exclude:
|
|
437
|
-
|
|
438
|
-
|
|
439
|
-
- The ``tool_calls`` in the AIMessage will be updated to exclude
|
|
440
|
-
tool calls.
|
|
441
|
-
|
|
442
|
-
the whole message is excluded.
|
|
447
|
+
- ``ToolMessage``s with the corresponding tool call ID will be
|
|
448
|
+
excluded.
|
|
449
|
+
- The ``tool_calls`` in the AIMessage will be updated to exclude
|
|
450
|
+
matching tool calls. If all ``tool_calls`` are filtered from an
|
|
451
|
+
AIMessage, the whole message is excluded.
|
|
443
452
|
|
|
444
453
|
Returns:
|
|
445
|
-
A list of Messages that meets at least one of the incl_
|
|
446
|
-
of the excl_
|
|
454
|
+
A list of Messages that meets at least one of the ``incl_*`` conditions and none
|
|
455
|
+
of the ``excl_*`` conditions. If not ``incl_*`` conditions are specified then
|
|
447
456
|
anything that is not explicitly excluded will be included.
|
|
448
457
|
|
|
449
458
|
Raises:
|
|
@@ -555,13 +564,14 @@ def merge_message_runs(
|
|
|
555
564
|
) -> list[BaseMessage]:
|
|
556
565
|
r"""Merge consecutive Messages of the same type.
|
|
557
566
|
|
|
558
|
-
|
|
559
|
-
|
|
567
|
+
.. note::
|
|
568
|
+
ToolMessages are not merged, as each has a distinct tool call id that can't be
|
|
569
|
+
merged.
|
|
560
570
|
|
|
561
571
|
Args:
|
|
562
572
|
messages: Sequence Message-like objects to merge.
|
|
563
573
|
chunk_separator: Specify the string to be inserted between message chunks.
|
|
564
|
-
Default is
|
|
574
|
+
Default is ``'\n'``.
|
|
565
575
|
|
|
566
576
|
Returns:
|
|
567
577
|
list of BaseMessages with consecutive runs of message types merged into single
|
|
@@ -702,8 +712,8 @@ def trim_messages(
|
|
|
702
712
|
) -> list[BaseMessage]:
|
|
703
713
|
r"""Trim messages to be below a token count.
|
|
704
714
|
|
|
705
|
-
trim_messages can be used to reduce the size of a chat history to a specified
|
|
706
|
-
count or specified message count.
|
|
715
|
+
``trim_messages`` can be used to reduce the size of a chat history to a specified
|
|
716
|
+
token count or specified message count.
|
|
707
717
|
|
|
708
718
|
In either case, if passing the trimmed chat history back into a chat model
|
|
709
719
|
directly, the resulting chat history should usually satisfy the following
|
|
@@ -711,13 +721,13 @@ def trim_messages(
|
|
|
711
721
|
|
|
712
722
|
1. The resulting chat history should be valid. Most chat models expect that chat
|
|
713
723
|
history starts with either (1) a ``HumanMessage`` or (2) a ``SystemMessage``
|
|
714
|
-
followed by a ``HumanMessage``. To achieve this, set ``start_on=
|
|
724
|
+
followed by a ``HumanMessage``. To achieve this, set ``start_on='human'``.
|
|
715
725
|
In addition, generally a ``ToolMessage`` can only appear after an ``AIMessage``
|
|
716
726
|
that involved a tool call.
|
|
717
727
|
Please see the following link for more information about messages:
|
|
718
728
|
https://python.langchain.com/docs/concepts/#messages
|
|
719
729
|
2. It includes recent messages and drops old messages in the chat history.
|
|
720
|
-
To achieve this set the ``strategy=
|
|
730
|
+
To achieve this set the ``strategy='last'``.
|
|
721
731
|
3. Usually, the new chat history should include the ``SystemMessage`` if it
|
|
722
732
|
was present in the original chat history since the ``SystemMessage`` includes
|
|
723
733
|
special instructions to the chat model. The ``SystemMessage`` is almost always
|
|
@@ -731,67 +741,67 @@ def trim_messages(
|
|
|
731
741
|
Args:
|
|
732
742
|
messages: Sequence of Message-like objects to trim.
|
|
733
743
|
max_tokens: Max token count of trimmed messages.
|
|
734
|
-
token_counter: Function or llm for counting tokens in a BaseMessage or a
|
|
735
|
-
BaseMessage
|
|
736
|
-
BaseLanguageModel.get_num_tokens_from_messages() will be used.
|
|
737
|
-
Set to
|
|
744
|
+
token_counter: Function or llm for counting tokens in a ``BaseMessage`` or a
|
|
745
|
+
list of ``BaseMessage``. If a ``BaseLanguageModel`` is passed in then
|
|
746
|
+
``BaseLanguageModel.get_num_tokens_from_messages()`` will be used.
|
|
747
|
+
Set to ``len`` to count the number of **messages** in the chat history.
|
|
738
748
|
|
|
739
749
|
.. note::
|
|
740
|
-
Use
|
|
741
|
-
|
|
750
|
+
Use ``count_tokens_approximately`` to get fast, approximate token
|
|
751
|
+
counts.
|
|
752
|
+
This is recommended for using ``trim_messages`` on the hot path, where
|
|
742
753
|
exact token counting is not necessary.
|
|
743
754
|
|
|
744
755
|
strategy: Strategy for trimming.
|
|
745
|
-
|
|
746
|
-
-
|
|
747
|
-
- "last": Keep the last <= n_count tokens of the messages.
|
|
748
|
-
|
|
756
|
+
- ``'first'``: Keep the first ``<= n_count`` tokens of the messages.
|
|
757
|
+
- ``'last'``: Keep the last ``<= n_count`` tokens of the messages.
|
|
749
758
|
Default is ``'last'``.
|
|
750
759
|
allow_partial: Whether to split a message if only part of the message can be
|
|
751
|
-
included. If ``strategy=
|
|
752
|
-
are included. If ``strategy=
|
|
760
|
+
included. If ``strategy='last'`` then the last partial contents of a message
|
|
761
|
+
are included. If ``strategy='first'`` then the first partial contents of a
|
|
753
762
|
message are included.
|
|
754
763
|
Default is False.
|
|
755
764
|
end_on: The message type to end on. If specified then every message after the
|
|
756
|
-
last occurrence of this type is ignored. If ``strategy
|
|
765
|
+
last occurrence of this type is ignored. If ``strategy='last'`` then this
|
|
757
766
|
is done before we attempt to get the last ``max_tokens``. If
|
|
758
|
-
``strategy
|
|
759
|
-
``max_tokens``. Can be specified as string names (e.g.
|
|
760
|
-
|
|
761
|
-
AIMessage
|
|
767
|
+
``strategy='first'`` then this is done after we get the first
|
|
768
|
+
``max_tokens``. Can be specified as string names (e.g. ``'system'``,
|
|
769
|
+
``'human'``, ``'ai'``, ...) or as ``BaseMessage`` classes (e.g.
|
|
770
|
+
``SystemMessage``, ``HumanMessage``, ``AIMessage``, ...). Can be a single
|
|
771
|
+
type or a list of types.
|
|
762
772
|
Default is None.
|
|
763
773
|
start_on: The message type to start on. Should only be specified if
|
|
764
|
-
``strategy=
|
|
774
|
+
``strategy='last'``. If specified then every message before
|
|
765
775
|
the first occurrence of this type is ignored. This is done after we trim
|
|
766
776
|
the initial messages to the last ``max_tokens``. Does not
|
|
767
|
-
apply to a SystemMessage at index 0 if ``include_system=True``. Can be
|
|
768
|
-
specified as string names (e.g.
|
|
769
|
-
BaseMessage classes (e.g. SystemMessage
|
|
770
|
-
be a single type or a list of types.
|
|
777
|
+
apply to a ``SystemMessage`` at index 0 if ``include_system=True``. Can be
|
|
778
|
+
specified as string names (e.g. ``'system'``, ``'human'``, ``'ai'``, ...) or
|
|
779
|
+
as ``BaseMessage`` classes (e.g. ``SystemMessage``, ``HumanMessage``,
|
|
780
|
+
``AIMessage``, ...). Can be a single type or a list of types.
|
|
771
781
|
Default is None.
|
|
772
782
|
include_system: Whether to keep the SystemMessage if there is one at index 0.
|
|
773
783
|
Should only be specified if ``strategy="last"``.
|
|
774
784
|
Default is False.
|
|
775
785
|
text_splitter: Function or ``langchain_text_splitters.TextSplitter`` for
|
|
776
786
|
splitting the string contents of a message. Only used if
|
|
777
|
-
``allow_partial=True``. If ``strategy=
|
|
778
|
-
from a partial message will be included. if ``strategy
|
|
787
|
+
``allow_partial=True``. If ``strategy='last'`` then the last split tokens
|
|
788
|
+
from a partial message will be included. if ``strategy='first'`` then the
|
|
779
789
|
first split tokens from a partial message will be included. Token splitter
|
|
780
790
|
assumes that separators are kept, so that split contents can be directly
|
|
781
791
|
concatenated to recreate the original text. Defaults to splitting on
|
|
782
792
|
newlines.
|
|
783
793
|
|
|
784
794
|
Returns:
|
|
785
|
-
list of trimmed
|
|
795
|
+
list of trimmed ``BaseMessage``.
|
|
786
796
|
|
|
787
797
|
Raises:
|
|
788
798
|
ValueError: if two incompatible arguments are specified or an unrecognized
|
|
789
799
|
``strategy`` is specified.
|
|
790
800
|
|
|
791
801
|
Example:
|
|
792
|
-
Trim chat history based on token count, keeping the SystemMessage if
|
|
793
|
-
present, and ensuring that the chat history starts with a HumanMessage (
|
|
794
|
-
or a SystemMessage followed by a HumanMessage).
|
|
802
|
+
Trim chat history based on token count, keeping the ``SystemMessage`` if
|
|
803
|
+
present, and ensuring that the chat history starts with a ``HumanMessage`` (
|
|
804
|
+
or a ``SystemMessage`` followed by a ``HumanMessage``).
|
|
795
805
|
|
|
796
806
|
.. code-block:: python
|
|
797
807
|
|
|
@@ -846,9 +856,9 @@ def trim_messages(
|
|
|
846
856
|
HumanMessage(content="what do you call a speechless parrot"),
|
|
847
857
|
]
|
|
848
858
|
|
|
849
|
-
Trim chat history based on the message count, keeping the SystemMessage if
|
|
850
|
-
present, and ensuring that the chat history starts with a HumanMessage (
|
|
851
|
-
or a SystemMessage followed by a HumanMessage).
|
|
859
|
+
Trim chat history based on the message count, keeping the ``SystemMessage`` if
|
|
860
|
+
present, and ensuring that the chat history starts with a ``HumanMessage`` (
|
|
861
|
+
or a ``SystemMessage`` followed by a ``HumanMessage``).
|
|
852
862
|
|
|
853
863
|
trim_messages(
|
|
854
864
|
messages,
|
|
@@ -1030,6 +1040,7 @@ def convert_to_openai_messages(
|
|
|
1030
1040
|
messages: Union[MessageLikeRepresentation, Sequence[MessageLikeRepresentation]],
|
|
1031
1041
|
*,
|
|
1032
1042
|
text_format: Literal["string", "block"] = "string",
|
|
1043
|
+
include_id: bool = False,
|
|
1033
1044
|
) -> Union[dict, list[dict]]:
|
|
1034
1045
|
"""Convert LangChain messages into OpenAI message dicts.
|
|
1035
1046
|
|
|
@@ -1037,17 +1048,18 @@ def convert_to_openai_messages(
|
|
|
1037
1048
|
messages: Message-like object or iterable of objects whose contents are
|
|
1038
1049
|
in OpenAI, Anthropic, Bedrock Converse, or VertexAI formats.
|
|
1039
1050
|
text_format: How to format string or text block contents:
|
|
1040
|
-
|
|
1041
|
-
|
|
1042
|
-
|
|
1043
|
-
|
|
1044
|
-
|
|
1045
|
-
|
|
1046
|
-
|
|
1047
|
-
|
|
1048
|
-
|
|
1049
|
-
|
|
1050
|
-
|
|
1051
|
+
- ``'string'``:
|
|
1052
|
+
If a message has a string content, this is left as a string. If
|
|
1053
|
+
a message has content blocks that are all of type ``'text'``, these
|
|
1054
|
+
are joined with a newline to make a single string. If a message has
|
|
1055
|
+
content blocks and at least one isn't of type ``'text'``, then
|
|
1056
|
+
all blocks are left as dicts.
|
|
1057
|
+
- ``'block'``:
|
|
1058
|
+
If a message has a string content, this is turned into a list
|
|
1059
|
+
with a single content block of type ``'text'``. If a message has
|
|
1060
|
+
content blocks these are left as is.
|
|
1061
|
+
include_id: Whether to include message ids in the openai messages, if they
|
|
1062
|
+
are present in the source messages.
|
|
1051
1063
|
|
|
1052
1064
|
Raises:
|
|
1053
1065
|
ValueError: if an unrecognized ``text_format`` is specified, or if a message
|
|
@@ -1136,6 +1148,8 @@ def convert_to_openai_messages(
|
|
|
1136
1148
|
oai_msg["refusal"] = message.additional_kwargs["refusal"]
|
|
1137
1149
|
if isinstance(message, ToolMessage):
|
|
1138
1150
|
oai_msg["tool_call_id"] = message.tool_call_id
|
|
1151
|
+
if include_id and message.id:
|
|
1152
|
+
oai_msg["id"] = message.id
|
|
1139
1153
|
|
|
1140
1154
|
if not message.content:
|
|
1141
1155
|
content = "" if text_format == "string" else []
|
|
@@ -1376,7 +1390,7 @@ def convert_to_openai_messages(
|
|
|
1376
1390
|
},
|
|
1377
1391
|
}
|
|
1378
1392
|
)
|
|
1379
|
-
elif block.get("type")
|
|
1393
|
+
elif block.get("type") in ["thinking", "reasoning"]:
|
|
1380
1394
|
content.append(block)
|
|
1381
1395
|
else:
|
|
1382
1396
|
err = (
|
|
@@ -15,14 +15,14 @@ from langchain_core.utils._merge import merge_dicts
|
|
|
15
15
|
class ChatGeneration(Generation):
|
|
16
16
|
"""A single chat generation output.
|
|
17
17
|
|
|
18
|
-
A subclass of Generation that represents the response from a chat model
|
|
18
|
+
A subclass of ``Generation`` that represents the response from a chat model
|
|
19
19
|
that generates chat messages.
|
|
20
20
|
|
|
21
|
-
The
|
|
22
|
-
Most of the time, the message will be of type
|
|
21
|
+
The ``message`` attribute is a structured representation of the chat message.
|
|
22
|
+
Most of the time, the message will be of type ``AIMessage``.
|
|
23
23
|
|
|
24
24
|
Users working with chat models will usually access information via either
|
|
25
|
-
|
|
25
|
+
``AIMessage`` (returned from runnable interfaces) or ``LLMResult`` (available
|
|
26
26
|
via callbacks).
|
|
27
27
|
"""
|
|
28
28
|
|
|
@@ -31,6 +31,7 @@ class ChatGeneration(Generation):
|
|
|
31
31
|
|
|
32
32
|
.. warning::
|
|
33
33
|
SHOULD NOT BE SET DIRECTLY!
|
|
34
|
+
|
|
34
35
|
"""
|
|
35
36
|
message: BaseMessage
|
|
36
37
|
"""The message output by the chat model."""
|
|
@@ -47,6 +48,9 @@ class ChatGeneration(Generation):
|
|
|
47
48
|
|
|
48
49
|
Returns:
|
|
49
50
|
The values of the object with the text attribute set.
|
|
51
|
+
|
|
52
|
+
Raises:
|
|
53
|
+
ValueError: If the message is not a string or a list.
|
|
50
54
|
"""
|
|
51
55
|
text = ""
|
|
52
56
|
if isinstance(self.message.content, str):
|
|
@@ -66,9 +70,9 @@ class ChatGeneration(Generation):
|
|
|
66
70
|
|
|
67
71
|
|
|
68
72
|
class ChatGenerationChunk(ChatGeneration):
|
|
69
|
-
"""ChatGeneration chunk.
|
|
73
|
+
"""``ChatGeneration`` chunk.
|
|
70
74
|
|
|
71
|
-
ChatGeneration chunks can be concatenated with other ChatGeneration chunks.
|
|
75
|
+
``ChatGeneration`` chunks can be concatenated with other ``ChatGeneration`` chunks.
|
|
72
76
|
"""
|
|
73
77
|
|
|
74
78
|
message: BaseMessageChunk
|
langchain_core/prompt_values.py
CHANGED
|
@@ -113,8 +113,12 @@ class ImageURL(TypedDict, total=False):
|
|
|
113
113
|
"""Image URL."""
|
|
114
114
|
|
|
115
115
|
detail: Literal["auto", "low", "high"]
|
|
116
|
-
"""Specifies the detail level of the image. Defaults to
|
|
117
|
-
Can be
|
|
116
|
+
"""Specifies the detail level of the image. Defaults to ``'auto'``.
|
|
117
|
+
Can be ``'auto'``, ``'low'``, or ``'high'``.
|
|
118
|
+
|
|
119
|
+
This follows OpenAI's Chat Completion API's image URL format.
|
|
120
|
+
|
|
121
|
+
"""
|
|
118
122
|
|
|
119
123
|
url: str
|
|
120
124
|
"""Either a URL of the image or the base64 encoded image data."""
|
langchain_core/prompts/chat.py
CHANGED
|
@@ -262,7 +262,7 @@ class BaseStringMessagePromptTemplate(BaseMessagePromptTemplate, ABC):
|
|
|
262
262
|
def from_template_file(
|
|
263
263
|
cls,
|
|
264
264
|
template_file: Union[str, Path],
|
|
265
|
-
input_variables: list[str],
|
|
265
|
+
input_variables: list[str], # noqa: ARG003 # Deprecated
|
|
266
266
|
**kwargs: Any,
|
|
267
267
|
) -> Self:
|
|
268
268
|
"""Create a class from a template file.
|
|
@@ -275,7 +275,7 @@ class BaseStringMessagePromptTemplate(BaseMessagePromptTemplate, ABC):
|
|
|
275
275
|
Returns:
|
|
276
276
|
A new instance of this class.
|
|
277
277
|
"""
|
|
278
|
-
prompt = PromptTemplate.from_file(template_file
|
|
278
|
+
prompt = PromptTemplate.from_file(template_file)
|
|
279
279
|
return cls(prompt=prompt, **kwargs)
|
|
280
280
|
|
|
281
281
|
@abstractmethod
|
|
@@ -813,7 +813,10 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
|
|
|
813
813
|
)
|
|
814
814
|
|
|
815
815
|
prompt_value = template.invoke(
|
|
816
|
-
{
|
|
816
|
+
{
|
|
817
|
+
"name": "Bob",
|
|
818
|
+
"user_input": "What is your name?",
|
|
819
|
+
}
|
|
817
820
|
)
|
|
818
821
|
# Output:
|
|
819
822
|
# ChatPromptValue(
|
|
@@ -281,7 +281,10 @@ class FewShotChatMessagePromptTemplate(
|
|
|
281
281
|
]
|
|
282
282
|
|
|
283
283
|
example_prompt = ChatPromptTemplate.from_messages(
|
|
284
|
-
[
|
|
284
|
+
[
|
|
285
|
+
("human", "What is {input}?"),
|
|
286
|
+
("ai", "{output}"),
|
|
287
|
+
]
|
|
285
288
|
)
|
|
286
289
|
|
|
287
290
|
few_shot_prompt = FewShotChatMessagePromptTemplate(
|
langchain_core/runnables/base.py
CHANGED
|
@@ -884,18 +884,18 @@ class Runnable(ABC, Generic[Input, Output]):
|
|
|
884
884
|
e.g., if the underlying ``Runnable`` uses an API which supports a batch mode.
|
|
885
885
|
|
|
886
886
|
Args:
|
|
887
|
-
|
|
888
|
-
|
|
889
|
-
|
|
890
|
-
|
|
891
|
-
|
|
892
|
-
|
|
893
|
-
|
|
894
|
-
|
|
895
|
-
|
|
887
|
+
inputs: A list of inputs to the ``Runnable``.
|
|
888
|
+
config: A config to use when invoking the ``Runnable``. The config supports
|
|
889
|
+
standard keys like ``'tags'``, ``'metadata'`` for
|
|
890
|
+
tracing purposes, ``'max_concurrency'`` for controlling how much work
|
|
891
|
+
to do in parallel, and other keys. Please refer to the
|
|
892
|
+
``RunnableConfig`` for more details. Defaults to None.
|
|
893
|
+
return_exceptions: Whether to return exceptions instead of raising them.
|
|
894
|
+
Defaults to False.
|
|
895
|
+
**kwargs: Additional keyword arguments to pass to the ``Runnable``.
|
|
896
896
|
|
|
897
897
|
Returns:
|
|
898
|
-
|
|
898
|
+
A list of outputs from the ``Runnable``.
|
|
899
899
|
|
|
900
900
|
"""
|
|
901
901
|
if not inputs:
|
|
@@ -1397,7 +1397,10 @@ class Runnable(ABC, Generic[Input, Output]):
|
|
|
1397
1397
|
.. code-block:: python
|
|
1398
1398
|
|
|
1399
1399
|
template = ChatPromptTemplate.from_messages(
|
|
1400
|
-
[
|
|
1400
|
+
[
|
|
1401
|
+
("system", "You are Cat Agent 007"),
|
|
1402
|
+
("human", "{question}"),
|
|
1403
|
+
]
|
|
1401
1404
|
).with_config({"run_name": "my_template", "tags": ["my_template"]})
|
|
1402
1405
|
|
|
1403
1406
|
|
|
@@ -2531,8 +2534,6 @@ class Runnable(ABC, Generic[Input, Output]):
|
|
|
2531
2534
|
name: The name of the tool. Defaults to None.
|
|
2532
2535
|
description: The description of the tool. Defaults to None.
|
|
2533
2536
|
arg_types: A dictionary of argument names to types. Defaults to None.
|
|
2534
|
-
message_version: Version of ``ToolMessage`` to return given
|
|
2535
|
-
:class:`~langchain_core.messages.content_blocks.ToolCall` input.
|
|
2536
2537
|
|
|
2537
2538
|
Returns:
|
|
2538
2539
|
A ``BaseTool`` instance.
|
|
@@ -614,7 +614,6 @@ class Graph:
|
|
|
614
614
|
|
|
615
615
|
Returns:
|
|
616
616
|
The Mermaid syntax string.
|
|
617
|
-
|
|
618
617
|
"""
|
|
619
618
|
# Import locally to prevent circular import
|
|
620
619
|
from langchain_core.runnables.graph_mermaid import draw_mermaid # noqa: PLC0415
|
|
@@ -648,6 +647,7 @@ class Graph:
|
|
|
648
647
|
max_retries: int = 1,
|
|
649
648
|
retry_delay: float = 1.0,
|
|
650
649
|
frontmatter_config: Optional[dict[str, Any]] = None,
|
|
650
|
+
base_url: Optional[str] = None,
|
|
651
651
|
) -> bytes:
|
|
652
652
|
"""Draw the graph as a PNG image using Mermaid.
|
|
653
653
|
|
|
@@ -683,6 +683,8 @@ class Graph:
|
|
|
683
683
|
"themeVariables": { "primaryColor": "#e2e2e2"},
|
|
684
684
|
}
|
|
685
685
|
}
|
|
686
|
+
base_url: The base URL of the Mermaid server for rendering via API.
|
|
687
|
+
Defaults to None.
|
|
686
688
|
|
|
687
689
|
Returns:
|
|
688
690
|
The PNG image as bytes.
|
|
@@ -707,6 +709,7 @@ class Graph:
|
|
|
707
709
|
padding=padding,
|
|
708
710
|
max_retries=max_retries,
|
|
709
711
|
retry_delay=retry_delay,
|
|
712
|
+
base_url=base_url,
|
|
710
713
|
)
|
|
711
714
|
|
|
712
715
|
|