langchain-core 1.0.0a4__py3-none-any.whl → 1.0.0a5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- langchain_core/_api/beta_decorator.py +6 -5
- langchain_core/_api/deprecation.py +11 -11
- langchain_core/callbacks/manager.py +2 -2
- langchain_core/callbacks/usage.py +2 -2
- langchain_core/document_loaders/langsmith.py +1 -1
- langchain_core/indexing/api.py +30 -30
- langchain_core/language_models/chat_models.py +1 -1
- langchain_core/language_models/fake_chat_models.py +5 -2
- langchain_core/load/serializable.py +1 -1
- langchain_core/messages/__init__.py +9 -15
- langchain_core/messages/ai.py +75 -9
- langchain_core/messages/base.py +75 -37
- langchain_core/messages/block_translators/__init__.py +11 -1
- langchain_core/messages/block_translators/anthropic.py +143 -128
- langchain_core/messages/block_translators/bedrock_converse.py +15 -1
- langchain_core/messages/block_translators/langchain_v0.py +180 -43
- langchain_core/messages/block_translators/openai.py +224 -42
- langchain_core/messages/chat.py +4 -1
- langchain_core/messages/content.py +56 -112
- langchain_core/messages/function.py +9 -5
- langchain_core/messages/human.py +6 -2
- langchain_core/messages/modifier.py +1 -0
- langchain_core/messages/system.py +9 -2
- langchain_core/messages/tool.py +31 -14
- langchain_core/messages/utils.py +89 -83
- langchain_core/outputs/chat_generation.py +10 -6
- langchain_core/prompt_values.py +6 -2
- langchain_core/prompts/chat.py +6 -3
- langchain_core/prompts/few_shot.py +4 -1
- langchain_core/runnables/base.py +4 -1
- langchain_core/runnables/graph_ascii.py +1 -1
- langchain_core/tools/base.py +1 -2
- langchain_core/tools/convert.py +1 -1
- langchain_core/utils/aiter.py +1 -1
- langchain_core/utils/function_calling.py +5 -6
- langchain_core/utils/iter.py +1 -1
- langchain_core/vectorstores/in_memory.py +5 -5
- langchain_core/version.py +1 -1
- {langchain_core-1.0.0a4.dist-info → langchain_core-1.0.0a5.dist-info}/METADATA +8 -8
- {langchain_core-1.0.0a4.dist-info → langchain_core-1.0.0a5.dist-info}/RECORD +42 -42
- {langchain_core-1.0.0a4.dist-info → langchain_core-1.0.0a5.dist-info}/WHEEL +0 -0
- {langchain_core-1.0.0a4.dist-info → langchain_core-1.0.0a5.dist-info}/entry_points.txt +0 -0
langchain_core/messages/utils.py
CHANGED
|
@@ -5,6 +5,7 @@ Some examples of what you can do with these functions include:
|
|
|
5
5
|
* Convert messages to strings (serialization)
|
|
6
6
|
* Convert messages from dicts to Message objects (deserialization)
|
|
7
7
|
* Filter messages from a list of messages based on name, type or id etc.
|
|
8
|
+
|
|
8
9
|
"""
|
|
9
10
|
|
|
10
11
|
from __future__ import annotations
|
|
@@ -96,13 +97,14 @@ AnyMessage = Annotated[
|
|
|
96
97
|
def get_buffer_string(
|
|
97
98
|
messages: Sequence[BaseMessage], human_prefix: str = "Human", ai_prefix: str = "AI"
|
|
98
99
|
) -> str:
|
|
99
|
-
r"""Convert a sequence of
|
|
100
|
+
r"""Convert a sequence of messages to strings and concatenate them into one string.
|
|
100
101
|
|
|
101
102
|
Args:
|
|
102
103
|
messages: Messages to be converted to strings.
|
|
103
|
-
human_prefix: The prefix to prepend to contents of
|
|
104
|
-
Default is
|
|
105
|
-
ai_prefix: The prefix to prepend to contents of
|
|
104
|
+
human_prefix: The prefix to prepend to contents of ``HumanMessage``s.
|
|
105
|
+
Default is ``'Human'``.
|
|
106
|
+
ai_prefix: The prefix to prepend to contents of ``AIMessage``. Default is
|
|
107
|
+
``'AI'``.
|
|
106
108
|
|
|
107
109
|
Returns:
|
|
108
110
|
A single string concatenation of all input messages.
|
|
@@ -181,19 +183,20 @@ def _message_from_dict(message: dict) -> BaseMessage:
|
|
|
181
183
|
|
|
182
184
|
|
|
183
185
|
def messages_from_dict(messages: Sequence[dict]) -> list[BaseMessage]:
|
|
184
|
-
"""Convert a sequence of messages from dicts to Message objects.
|
|
186
|
+
"""Convert a sequence of messages from dicts to ``Message`` objects.
|
|
185
187
|
|
|
186
188
|
Args:
|
|
187
189
|
messages: Sequence of messages (as dicts) to convert.
|
|
188
190
|
|
|
189
191
|
Returns:
|
|
190
192
|
list of messages (BaseMessages).
|
|
193
|
+
|
|
191
194
|
"""
|
|
192
195
|
return [_message_from_dict(m) for m in messages]
|
|
193
196
|
|
|
194
197
|
|
|
195
198
|
def message_chunk_to_message(chunk: BaseMessage) -> BaseMessage:
|
|
196
|
-
"""Convert a message chunk to a
|
|
199
|
+
"""Convert a message chunk to a ``Message``.
|
|
197
200
|
|
|
198
201
|
Args:
|
|
199
202
|
chunk: Message chunk to convert.
|
|
@@ -226,10 +229,10 @@ def _create_message_from_message_type(
|
|
|
226
229
|
id: Optional[str] = None,
|
|
227
230
|
**additional_kwargs: Any,
|
|
228
231
|
) -> BaseMessage:
|
|
229
|
-
"""Create a message from a
|
|
232
|
+
"""Create a message from a ``Message`` type and content string.
|
|
230
233
|
|
|
231
234
|
Args:
|
|
232
|
-
message_type: (str) the type of the message (e.g.,
|
|
235
|
+
message_type: (str) the type of the message (e.g., ``'human'``, ``'ai'``, etc.).
|
|
233
236
|
content: (str) the content string.
|
|
234
237
|
name: (str) the name of the message. Default is None.
|
|
235
238
|
tool_call_id: (str) the tool call id. Default is None.
|
|
@@ -241,8 +244,9 @@ def _create_message_from_message_type(
|
|
|
241
244
|
a message of the appropriate type.
|
|
242
245
|
|
|
243
246
|
Raises:
|
|
244
|
-
ValueError: if the message type is not one of
|
|
245
|
-
|
|
247
|
+
ValueError: if the message type is not one of ``'human'``, ``'user'``, ``'ai'``,
|
|
248
|
+
``'assistant'``, ``'function'``, ``'tool'``, ``'system'``, or
|
|
249
|
+
``'developer'``.
|
|
246
250
|
"""
|
|
247
251
|
kwargs: dict[str, Any] = {}
|
|
248
252
|
if name is not None:
|
|
@@ -308,15 +312,15 @@ def _create_message_from_message_type(
|
|
|
308
312
|
|
|
309
313
|
|
|
310
314
|
def _convert_to_message(message: MessageLikeRepresentation) -> BaseMessage:
|
|
311
|
-
"""Instantiate a
|
|
315
|
+
"""Instantiate a ``Message`` from a variety of message formats.
|
|
312
316
|
|
|
313
317
|
The message format can be one of the following:
|
|
314
318
|
|
|
315
|
-
- BaseMessagePromptTemplate
|
|
316
|
-
- BaseMessage
|
|
317
|
-
- 2-tuple of (role string, template); e.g., (
|
|
319
|
+
- ``BaseMessagePromptTemplate``
|
|
320
|
+
- ``BaseMessage``
|
|
321
|
+
- 2-tuple of (role string, template); e.g., (``'human'``, ``'{user_input}'``)
|
|
318
322
|
- dict: a message dict with role and content keys
|
|
319
|
-
- string: shorthand for (
|
|
323
|
+
- string: shorthand for (``'human'``, template); e.g., ``'{user_input}'``
|
|
320
324
|
|
|
321
325
|
Args:
|
|
322
326
|
message: a representation of a message in one of the supported formats.
|
|
@@ -327,6 +331,7 @@ def _convert_to_message(message: MessageLikeRepresentation) -> BaseMessage:
|
|
|
327
331
|
Raises:
|
|
328
332
|
NotImplementedError: if the message type is not supported.
|
|
329
333
|
ValueError: if the message dict does not contain the required keys.
|
|
334
|
+
|
|
330
335
|
"""
|
|
331
336
|
if isinstance(message, BaseMessage):
|
|
332
337
|
message_ = message
|
|
@@ -372,6 +377,7 @@ def convert_to_messages(
|
|
|
372
377
|
|
|
373
378
|
Returns:
|
|
374
379
|
list of messages (BaseMessages).
|
|
380
|
+
|
|
375
381
|
"""
|
|
376
382
|
# Import here to avoid circular imports
|
|
377
383
|
from langchain_core.prompt_values import PromptValue # noqa: PLC0415
|
|
@@ -422,36 +428,36 @@ def filter_messages(
|
|
|
422
428
|
exclude_ids: Optional[Sequence[str]] = None,
|
|
423
429
|
exclude_tool_calls: Optional[Sequence[str] | bool] = None,
|
|
424
430
|
) -> list[BaseMessage]:
|
|
425
|
-
"""Filter messages based on name
|
|
431
|
+
"""Filter messages based on ``name``, ``type`` or ``id``.
|
|
426
432
|
|
|
427
433
|
Args:
|
|
428
434
|
messages: Sequence Message-like objects to filter.
|
|
429
435
|
include_names: Message names to include. Default is None.
|
|
430
436
|
exclude_names: Messages names to exclude. Default is None.
|
|
431
|
-
include_types: Message types to include. Can be specified as string names
|
|
432
|
-
|
|
433
|
-
SystemMessage
|
|
434
|
-
|
|
435
|
-
|
|
436
|
-
|
|
437
|
+
include_types: Message types to include. Can be specified as string names
|
|
438
|
+
(e.g. ``'system'``, ``'human'``, ``'ai'``, ...) or as ``BaseMessage``
|
|
439
|
+
classes (e.g. ``SystemMessage``, ``HumanMessage``, ``AIMessage``, ...).
|
|
440
|
+
Default is None.
|
|
441
|
+
exclude_types: Message types to exclude. Can be specified as string names
|
|
442
|
+
(e.g. ``'system'``, ``'human'``, ``'ai'``, ...) or as ``BaseMessage``
|
|
443
|
+
classes (e.g. ``SystemMessage``, ``HumanMessage``, ``AIMessage``, ...).
|
|
444
|
+
Default is None.
|
|
437
445
|
include_ids: Message IDs to include. Default is None.
|
|
438
446
|
exclude_ids: Message IDs to exclude. Default is None.
|
|
439
447
|
exclude_tool_calls: Tool call IDs to exclude. Default is None.
|
|
440
448
|
Can be one of the following:
|
|
441
|
-
|
|
442
|
-
|
|
443
|
-
will be excluded.
|
|
449
|
+
- ``True``: all ``AIMessage``s with tool calls and all
|
|
450
|
+
``ToolMessage``s will be excluded.
|
|
444
451
|
- a sequence of tool call IDs to exclude:
|
|
445
|
-
|
|
446
|
-
|
|
447
|
-
- The ``tool_calls`` in the AIMessage will be updated to exclude
|
|
448
|
-
tool calls.
|
|
449
|
-
|
|
450
|
-
the whole message is excluded.
|
|
452
|
+
- ``ToolMessage``s with the corresponding tool call ID will be
|
|
453
|
+
excluded.
|
|
454
|
+
- The ``tool_calls`` in the AIMessage will be updated to exclude
|
|
455
|
+
matching tool calls. If all ``tool_calls`` are filtered from an
|
|
456
|
+
AIMessage, the whole message is excluded.
|
|
451
457
|
|
|
452
458
|
Returns:
|
|
453
|
-
A list of Messages that meets at least one of the incl_
|
|
454
|
-
of the excl_
|
|
459
|
+
A list of Messages that meets at least one of the ``incl_*`` conditions and none
|
|
460
|
+
of the ``excl_*`` conditions. If not ``incl_*`` conditions are specified then
|
|
455
461
|
anything that is not explicitly excluded will be included.
|
|
456
462
|
|
|
457
463
|
Raises:
|
|
@@ -563,13 +569,14 @@ def merge_message_runs(
|
|
|
563
569
|
) -> list[BaseMessage]:
|
|
564
570
|
r"""Merge consecutive Messages of the same type.
|
|
565
571
|
|
|
566
|
-
|
|
567
|
-
|
|
572
|
+
.. note::
|
|
573
|
+
ToolMessages are not merged, as each has a distinct tool call id that can't be
|
|
574
|
+
merged.
|
|
568
575
|
|
|
569
576
|
Args:
|
|
570
577
|
messages: Sequence Message-like objects to merge.
|
|
571
578
|
chunk_separator: Specify the string to be inserted between message chunks.
|
|
572
|
-
Default is
|
|
579
|
+
Default is ``'\n'``.
|
|
573
580
|
|
|
574
581
|
Returns:
|
|
575
582
|
list of BaseMessages with consecutive runs of message types merged into single
|
|
@@ -710,8 +717,8 @@ def trim_messages(
|
|
|
710
717
|
) -> list[BaseMessage]:
|
|
711
718
|
r"""Trim messages to be below a token count.
|
|
712
719
|
|
|
713
|
-
trim_messages can be used to reduce the size of a chat history to a specified
|
|
714
|
-
count or specified message count.
|
|
720
|
+
``trim_messages`` can be used to reduce the size of a chat history to a specified
|
|
721
|
+
token count or specified message count.
|
|
715
722
|
|
|
716
723
|
In either case, if passing the trimmed chat history back into a chat model
|
|
717
724
|
directly, the resulting chat history should usually satisfy the following
|
|
@@ -719,13 +726,13 @@ def trim_messages(
|
|
|
719
726
|
|
|
720
727
|
1. The resulting chat history should be valid. Most chat models expect that chat
|
|
721
728
|
history starts with either (1) a ``HumanMessage`` or (2) a ``SystemMessage``
|
|
722
|
-
followed by a ``HumanMessage``. To achieve this, set ``start_on=
|
|
729
|
+
followed by a ``HumanMessage``. To achieve this, set ``start_on='human'``.
|
|
723
730
|
In addition, generally a ``ToolMessage`` can only appear after an ``AIMessage``
|
|
724
731
|
that involved a tool call.
|
|
725
732
|
Please see the following link for more information about messages:
|
|
726
733
|
https://python.langchain.com/docs/concepts/#messages
|
|
727
734
|
2. It includes recent messages and drops old messages in the chat history.
|
|
728
|
-
To achieve this set the ``strategy=
|
|
735
|
+
To achieve this set the ``strategy='last'``.
|
|
729
736
|
3. Usually, the new chat history should include the ``SystemMessage`` if it
|
|
730
737
|
was present in the original chat history since the ``SystemMessage`` includes
|
|
731
738
|
special instructions to the chat model. The ``SystemMessage`` is almost always
|
|
@@ -739,67 +746,67 @@ def trim_messages(
|
|
|
739
746
|
Args:
|
|
740
747
|
messages: Sequence of Message-like objects to trim.
|
|
741
748
|
max_tokens: Max token count of trimmed messages.
|
|
742
|
-
token_counter: Function or llm for counting tokens in a BaseMessage or a
|
|
743
|
-
BaseMessage
|
|
744
|
-
BaseLanguageModel.get_num_tokens_from_messages() will be used.
|
|
745
|
-
Set to
|
|
749
|
+
token_counter: Function or llm for counting tokens in a ``BaseMessage`` or a
|
|
750
|
+
list of ``BaseMessage``. If a ``BaseLanguageModel`` is passed in then
|
|
751
|
+
``BaseLanguageModel.get_num_tokens_from_messages()`` will be used.
|
|
752
|
+
Set to ``len`` to count the number of **messages** in the chat history.
|
|
746
753
|
|
|
747
754
|
.. note::
|
|
748
|
-
Use
|
|
749
|
-
|
|
755
|
+
Use ``count_tokens_approximately`` to get fast, approximate token
|
|
756
|
+
counts.
|
|
757
|
+
This is recommended for using ``trim_messages`` on the hot path, where
|
|
750
758
|
exact token counting is not necessary.
|
|
751
759
|
|
|
752
760
|
strategy: Strategy for trimming.
|
|
753
|
-
|
|
754
|
-
-
|
|
755
|
-
- "last": Keep the last <= n_count tokens of the messages.
|
|
756
|
-
|
|
761
|
+
- ``'first'``: Keep the first ``<= n_count`` tokens of the messages.
|
|
762
|
+
- ``'last'``: Keep the last ``<= n_count`` tokens of the messages.
|
|
757
763
|
Default is ``'last'``.
|
|
758
764
|
allow_partial: Whether to split a message if only part of the message can be
|
|
759
|
-
included. If ``strategy=
|
|
760
|
-
are included. If ``strategy=
|
|
765
|
+
included. If ``strategy='last'`` then the last partial contents of a message
|
|
766
|
+
are included. If ``strategy='first'`` then the first partial contents of a
|
|
761
767
|
message are included.
|
|
762
768
|
Default is False.
|
|
763
769
|
end_on: The message type to end on. If specified then every message after the
|
|
764
|
-
last occurrence of this type is ignored. If ``strategy
|
|
770
|
+
last occurrence of this type is ignored. If ``strategy='last'`` then this
|
|
765
771
|
is done before we attempt to get the last ``max_tokens``. If
|
|
766
|
-
``strategy
|
|
767
|
-
``max_tokens``. Can be specified as string names (e.g.
|
|
768
|
-
|
|
769
|
-
AIMessage
|
|
772
|
+
``strategy='first'`` then this is done after we get the first
|
|
773
|
+
``max_tokens``. Can be specified as string names (e.g. ``'system'``,
|
|
774
|
+
``'human'``, ``'ai'``, ...) or as ``BaseMessage`` classes (e.g.
|
|
775
|
+
``SystemMessage``, ``HumanMessage``, ``AIMessage``, ...). Can be a single
|
|
776
|
+
type or a list of types.
|
|
770
777
|
Default is None.
|
|
771
778
|
start_on: The message type to start on. Should only be specified if
|
|
772
|
-
``strategy=
|
|
779
|
+
``strategy='last'``. If specified then every message before
|
|
773
780
|
the first occurrence of this type is ignored. This is done after we trim
|
|
774
781
|
the initial messages to the last ``max_tokens``. Does not
|
|
775
|
-
apply to a SystemMessage at index 0 if ``include_system=True``. Can be
|
|
776
|
-
specified as string names (e.g.
|
|
777
|
-
BaseMessage classes (e.g. SystemMessage
|
|
778
|
-
be a single type or a list of types.
|
|
782
|
+
apply to a ``SystemMessage`` at index 0 if ``include_system=True``. Can be
|
|
783
|
+
specified as string names (e.g. ``'system'``, ``'human'``, ``'ai'``, ...) or
|
|
784
|
+
as ``BaseMessage`` classes (e.g. ``SystemMessage``, ``HumanMessage``,
|
|
785
|
+
``AIMessage``, ...). Can be a single type or a list of types.
|
|
779
786
|
Default is None.
|
|
780
787
|
include_system: Whether to keep the SystemMessage if there is one at index 0.
|
|
781
788
|
Should only be specified if ``strategy="last"``.
|
|
782
789
|
Default is False.
|
|
783
790
|
text_splitter: Function or ``langchain_text_splitters.TextSplitter`` for
|
|
784
791
|
splitting the string contents of a message. Only used if
|
|
785
|
-
``allow_partial=True``. If ``strategy=
|
|
786
|
-
from a partial message will be included. if ``strategy
|
|
792
|
+
``allow_partial=True``. If ``strategy='last'`` then the last split tokens
|
|
793
|
+
from a partial message will be included. if ``strategy='first'`` then the
|
|
787
794
|
first split tokens from a partial message will be included. Token splitter
|
|
788
795
|
assumes that separators are kept, so that split contents can be directly
|
|
789
796
|
concatenated to recreate the original text. Defaults to splitting on
|
|
790
797
|
newlines.
|
|
791
798
|
|
|
792
799
|
Returns:
|
|
793
|
-
list of trimmed
|
|
800
|
+
list of trimmed ``BaseMessage``.
|
|
794
801
|
|
|
795
802
|
Raises:
|
|
796
803
|
ValueError: if two incompatible arguments are specified or an unrecognized
|
|
797
804
|
``strategy`` is specified.
|
|
798
805
|
|
|
799
806
|
Example:
|
|
800
|
-
Trim chat history based on token count, keeping the SystemMessage if
|
|
801
|
-
present, and ensuring that the chat history starts with a HumanMessage (
|
|
802
|
-
or a SystemMessage followed by a HumanMessage).
|
|
807
|
+
Trim chat history based on token count, keeping the ``SystemMessage`` if
|
|
808
|
+
present, and ensuring that the chat history starts with a ``HumanMessage`` (
|
|
809
|
+
or a ``SystemMessage`` followed by a ``HumanMessage``).
|
|
803
810
|
|
|
804
811
|
.. code-block:: python
|
|
805
812
|
|
|
@@ -854,9 +861,9 @@ def trim_messages(
|
|
|
854
861
|
HumanMessage(content="what do you call a speechless parrot"),
|
|
855
862
|
]
|
|
856
863
|
|
|
857
|
-
Trim chat history based on the message count, keeping the SystemMessage if
|
|
858
|
-
present, and ensuring that the chat history starts with a HumanMessage (
|
|
859
|
-
or a SystemMessage followed by a HumanMessage).
|
|
864
|
+
Trim chat history based on the message count, keeping the ``SystemMessage`` if
|
|
865
|
+
present, and ensuring that the chat history starts with a ``HumanMessage`` (
|
|
866
|
+
or a ``SystemMessage`` followed by a ``HumanMessage``).
|
|
860
867
|
|
|
861
868
|
trim_messages(
|
|
862
869
|
messages,
|
|
@@ -1045,17 +1052,16 @@ def convert_to_openai_messages(
|
|
|
1045
1052
|
messages: Message-like object or iterable of objects whose contents are
|
|
1046
1053
|
in OpenAI, Anthropic, Bedrock Converse, or VertexAI formats.
|
|
1047
1054
|
text_format: How to format string or text block contents:
|
|
1048
|
-
|
|
1049
|
-
|
|
1050
|
-
|
|
1051
|
-
|
|
1052
|
-
|
|
1053
|
-
|
|
1054
|
-
|
|
1055
|
-
|
|
1056
|
-
|
|
1057
|
-
|
|
1058
|
-
blocks these are left as is.
|
|
1055
|
+
- ``'string'``:
|
|
1056
|
+
If a message has a string content, this is left as a string. If
|
|
1057
|
+
a message has content blocks that are all of type ``'text'``, these
|
|
1058
|
+
are joined with a newline to make a single string. If a message has
|
|
1059
|
+
content blocks and at least one isn't of type ``'text'``, then
|
|
1060
|
+
all blocks are left as dicts.
|
|
1061
|
+
- ``'block'``:
|
|
1062
|
+
If a message has a string content, this is turned into a list
|
|
1063
|
+
with a single content block of type ``'text'``. If a message has
|
|
1064
|
+
content blocks these are left as is.
|
|
1059
1065
|
|
|
1060
1066
|
Raises:
|
|
1061
1067
|
ValueError: if an unrecognized ``text_format`` is specified, or if a message
|
|
@@ -1384,7 +1390,7 @@ def convert_to_openai_messages(
|
|
|
1384
1390
|
},
|
|
1385
1391
|
}
|
|
1386
1392
|
)
|
|
1387
|
-
elif block.get("type")
|
|
1393
|
+
elif block.get("type") in ["thinking", "reasoning"]:
|
|
1388
1394
|
content.append(block)
|
|
1389
1395
|
else:
|
|
1390
1396
|
err = (
|
|
@@ -15,14 +15,14 @@ from langchain_core.utils._merge import merge_dicts
|
|
|
15
15
|
class ChatGeneration(Generation):
|
|
16
16
|
"""A single chat generation output.
|
|
17
17
|
|
|
18
|
-
A subclass of Generation that represents the response from a chat model
|
|
18
|
+
A subclass of ``Generation`` that represents the response from a chat model
|
|
19
19
|
that generates chat messages.
|
|
20
20
|
|
|
21
|
-
The
|
|
22
|
-
Most of the time, the message will be of type
|
|
21
|
+
The ``message`` attribute is a structured representation of the chat message.
|
|
22
|
+
Most of the time, the message will be of type ``AIMessage``.
|
|
23
23
|
|
|
24
24
|
Users working with chat models will usually access information via either
|
|
25
|
-
|
|
25
|
+
``AIMessage`` (returned from runnable interfaces) or ``LLMResult`` (available
|
|
26
26
|
via callbacks).
|
|
27
27
|
"""
|
|
28
28
|
|
|
@@ -31,6 +31,7 @@ class ChatGeneration(Generation):
|
|
|
31
31
|
|
|
32
32
|
.. warning::
|
|
33
33
|
SHOULD NOT BE SET DIRECTLY!
|
|
34
|
+
|
|
34
35
|
"""
|
|
35
36
|
message: BaseMessage
|
|
36
37
|
"""The message output by the chat model."""
|
|
@@ -47,6 +48,9 @@ class ChatGeneration(Generation):
|
|
|
47
48
|
|
|
48
49
|
Returns:
|
|
49
50
|
The values of the object with the text attribute set.
|
|
51
|
+
|
|
52
|
+
Raises:
|
|
53
|
+
ValueError: If the message is not a string or a list.
|
|
50
54
|
"""
|
|
51
55
|
text = ""
|
|
52
56
|
if isinstance(self.message.content, str):
|
|
@@ -66,9 +70,9 @@ class ChatGeneration(Generation):
|
|
|
66
70
|
|
|
67
71
|
|
|
68
72
|
class ChatGenerationChunk(ChatGeneration):
|
|
69
|
-
"""ChatGeneration chunk.
|
|
73
|
+
"""``ChatGeneration`` chunk.
|
|
70
74
|
|
|
71
|
-
ChatGeneration chunks can be concatenated with other ChatGeneration chunks.
|
|
75
|
+
``ChatGeneration`` chunks can be concatenated with other ``ChatGeneration`` chunks.
|
|
72
76
|
"""
|
|
73
77
|
|
|
74
78
|
message: BaseMessageChunk
|
langchain_core/prompt_values.py
CHANGED
|
@@ -113,8 +113,12 @@ class ImageURL(TypedDict, total=False):
|
|
|
113
113
|
"""Image URL."""
|
|
114
114
|
|
|
115
115
|
detail: Literal["auto", "low", "high"]
|
|
116
|
-
"""Specifies the detail level of the image. Defaults to
|
|
117
|
-
Can be
|
|
116
|
+
"""Specifies the detail level of the image. Defaults to ``'auto'``.
|
|
117
|
+
Can be ``'auto'``, ``'low'``, or ``'high'``.
|
|
118
|
+
|
|
119
|
+
This follows OpenAI's Chat Completion API's image URL format.
|
|
120
|
+
|
|
121
|
+
"""
|
|
118
122
|
|
|
119
123
|
url: str
|
|
120
124
|
"""Either a URL of the image or the base64 encoded image data."""
|
langchain_core/prompts/chat.py
CHANGED
|
@@ -262,7 +262,7 @@ class BaseStringMessagePromptTemplate(BaseMessagePromptTemplate, ABC):
|
|
|
262
262
|
def from_template_file(
|
|
263
263
|
cls,
|
|
264
264
|
template_file: Union[str, Path],
|
|
265
|
-
input_variables: list[str],
|
|
265
|
+
input_variables: list[str], # noqa: ARG003 # Deprecated
|
|
266
266
|
**kwargs: Any,
|
|
267
267
|
) -> Self:
|
|
268
268
|
"""Create a class from a template file.
|
|
@@ -275,7 +275,7 @@ class BaseStringMessagePromptTemplate(BaseMessagePromptTemplate, ABC):
|
|
|
275
275
|
Returns:
|
|
276
276
|
A new instance of this class.
|
|
277
277
|
"""
|
|
278
|
-
prompt = PromptTemplate.from_file(template_file
|
|
278
|
+
prompt = PromptTemplate.from_file(template_file)
|
|
279
279
|
return cls(prompt=prompt, **kwargs)
|
|
280
280
|
|
|
281
281
|
@abstractmethod
|
|
@@ -812,7 +812,10 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
|
|
|
812
812
|
)
|
|
813
813
|
|
|
814
814
|
prompt_value = template.invoke(
|
|
815
|
-
{
|
|
815
|
+
{
|
|
816
|
+
"name": "Bob",
|
|
817
|
+
"user_input": "What is your name?",
|
|
818
|
+
}
|
|
816
819
|
)
|
|
817
820
|
# Output:
|
|
818
821
|
# ChatPromptValue(
|
|
@@ -281,7 +281,10 @@ class FewShotChatMessagePromptTemplate(
|
|
|
281
281
|
]
|
|
282
282
|
|
|
283
283
|
example_prompt = ChatPromptTemplate.from_messages(
|
|
284
|
-
[
|
|
284
|
+
[
|
|
285
|
+
("human", "What is {input}?"),
|
|
286
|
+
("ai", "{output}"),
|
|
287
|
+
]
|
|
285
288
|
)
|
|
286
289
|
|
|
287
290
|
few_shot_prompt = FewShotChatMessagePromptTemplate(
|
langchain_core/runnables/base.py
CHANGED
|
@@ -1399,7 +1399,10 @@ class Runnable(ABC, Generic[Input, Output]):
|
|
|
1399
1399
|
.. code-block:: python
|
|
1400
1400
|
|
|
1401
1401
|
template = ChatPromptTemplate.from_messages(
|
|
1402
|
-
[
|
|
1402
|
+
[
|
|
1403
|
+
("system", "You are Cat Agent 007"),
|
|
1404
|
+
("human", "{question}"),
|
|
1405
|
+
]
|
|
1403
1406
|
).with_config({"run_name": "my_template", "tags": ["my_template"]})
|
|
1404
1407
|
|
|
1405
1408
|
|
langchain_core/tools/base.py
CHANGED
|
@@ -1324,7 +1324,6 @@ def get_all_basemodel_annotations(
|
|
|
1324
1324
|
"""
|
|
1325
1325
|
# cls has no subscript: cls = FooBar
|
|
1326
1326
|
if isinstance(cls, type):
|
|
1327
|
-
# Gather pydantic field objects (v2: model_fields / v1: __fields__)
|
|
1328
1327
|
fields = get_fields(cls)
|
|
1329
1328
|
alias_map = {field.alias: name for name, field in fields.items() if field.alias}
|
|
1330
1329
|
|
|
@@ -1402,7 +1401,7 @@ def _replace_type_vars(
|
|
|
1402
1401
|
if type_ in generic_map:
|
|
1403
1402
|
return generic_map[type_]
|
|
1404
1403
|
if default_to_bound:
|
|
1405
|
-
return type_.__bound__
|
|
1404
|
+
return type_.__bound__ if type_.__bound__ is not None else Any
|
|
1406
1405
|
return type_
|
|
1407
1406
|
if (origin := get_origin(type_)) and (args := get_args(type_)):
|
|
1408
1407
|
new_args = tuple(
|
langchain_core/tools/convert.py
CHANGED
langchain_core/utils/aiter.py
CHANGED
|
@@ -165,7 +165,7 @@ class Tee(Generic[T]):
|
|
|
165
165
|
A ``tee`` works lazily and can handle an infinite ``iterable``, provided
|
|
166
166
|
that all iterators advance.
|
|
167
167
|
|
|
168
|
-
.. code-block::
|
|
168
|
+
.. code-block:: python
|
|
169
169
|
|
|
170
170
|
async def derivative(sensor_data):
|
|
171
171
|
previous, current = a.tee(sensor_data, n=2)
|
|
@@ -670,14 +670,13 @@ def tool_example_to_messages(
|
|
|
670
670
|
The ``ToolMessage`` is required because some chat models are hyper-optimized for
|
|
671
671
|
agents rather than for an extraction use case.
|
|
672
672
|
|
|
673
|
-
|
|
674
|
-
input:
|
|
675
|
-
tool_calls:
|
|
676
|
-
|
|
677
|
-
tool_outputs: Optional[list[str]], a list of tool call outputs.
|
|
673
|
+
Args:
|
|
674
|
+
input: The user input
|
|
675
|
+
tool_calls: Tool calls represented as Pydantic BaseModels
|
|
676
|
+
tool_outputs: Tool call outputs.
|
|
678
677
|
Does not need to be provided. If not provided, a placeholder value
|
|
679
678
|
will be inserted. Defaults to None.
|
|
680
|
-
ai_response:
|
|
679
|
+
ai_response: If provided, content for a final ``AIMessage``.
|
|
681
680
|
|
|
682
681
|
Returns:
|
|
683
682
|
A list of messages
|
langchain_core/utils/iter.py
CHANGED
|
@@ -101,7 +101,7 @@ class Tee(Generic[T]):
|
|
|
101
101
|
A ``tee`` works lazily and can handle an infinite ``iterable``, provided
|
|
102
102
|
that all iterators advance.
|
|
103
103
|
|
|
104
|
-
.. code-block::
|
|
104
|
+
.. code-block:: python
|
|
105
105
|
|
|
106
106
|
async def derivative(sensor_data):
|
|
107
107
|
previous, current = a.tee(sensor_data, n=2)
|
|
@@ -94,7 +94,7 @@ class InMemoryVectorStore(VectorStore):
|
|
|
94
94
|
for doc in results:
|
|
95
95
|
print(f"* {doc.page_content} [{doc.metadata}]")
|
|
96
96
|
|
|
97
|
-
.. code-block::
|
|
97
|
+
.. code-block::
|
|
98
98
|
|
|
99
99
|
* thud [{'bar': 'baz'}]
|
|
100
100
|
|
|
@@ -111,7 +111,7 @@ class InMemoryVectorStore(VectorStore):
|
|
|
111
111
|
for doc in results:
|
|
112
112
|
print(f"* {doc.page_content} [{doc.metadata}]")
|
|
113
113
|
|
|
114
|
-
.. code-block::
|
|
114
|
+
.. code-block::
|
|
115
115
|
|
|
116
116
|
* thud [{'bar': 'baz'}]
|
|
117
117
|
|
|
@@ -123,7 +123,7 @@ class InMemoryVectorStore(VectorStore):
|
|
|
123
123
|
for doc, score in results:
|
|
124
124
|
print(f"* [SIM={score:3f}] {doc.page_content} [{doc.metadata}]")
|
|
125
125
|
|
|
126
|
-
.. code-block::
|
|
126
|
+
.. code-block::
|
|
127
127
|
|
|
128
128
|
* [SIM=0.832268] foo [{'baz': 'bar'}]
|
|
129
129
|
|
|
@@ -144,7 +144,7 @@ class InMemoryVectorStore(VectorStore):
|
|
|
144
144
|
for doc, score in results:
|
|
145
145
|
print(f"* [SIM={score:3f}] {doc.page_content} [{doc.metadata}]")
|
|
146
146
|
|
|
147
|
-
.. code-block::
|
|
147
|
+
.. code-block::
|
|
148
148
|
|
|
149
149
|
* [SIM=0.832268] foo [{'baz': 'bar'}]
|
|
150
150
|
|
|
@@ -157,7 +157,7 @@ class InMemoryVectorStore(VectorStore):
|
|
|
157
157
|
)
|
|
158
158
|
retriever.invoke("thud")
|
|
159
159
|
|
|
160
|
-
.. code-block::
|
|
160
|
+
.. code-block::
|
|
161
161
|
|
|
162
162
|
[Document(id='2', metadata={'bar': 'baz'}, page_content='thud')]
|
|
163
163
|
|
langchain_core/version.py
CHANGED
|
@@ -1,19 +1,19 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: langchain-core
|
|
3
|
-
Version: 1.0.
|
|
3
|
+
Version: 1.0.0a5
|
|
4
4
|
Summary: Building applications with LLMs through composability
|
|
5
5
|
License: MIT
|
|
6
6
|
Project-URL: Source Code, https://github.com/langchain-ai/langchain/tree/master/libs/core
|
|
7
7
|
Project-URL: Release Notes, https://github.com/langchain-ai/langchain/releases?q=tag%3A%22langchain-core%3D%3D0%22&expanded=true
|
|
8
8
|
Project-URL: repository, https://github.com/langchain-ai/langchain
|
|
9
|
-
Requires-Python:
|
|
10
|
-
Requires-Dist: langsmith
|
|
9
|
+
Requires-Python: <4.0.0,>=3.10.0
|
|
10
|
+
Requires-Dist: langsmith<1.0.0,>=0.3.45
|
|
11
11
|
Requires-Dist: tenacity!=8.4.0,<10.0.0,>=8.1.0
|
|
12
|
-
Requires-Dist: jsonpatch<2.0,>=1.33
|
|
13
|
-
Requires-Dist: PyYAML
|
|
14
|
-
Requires-Dist: typing-extensions
|
|
15
|
-
Requires-Dist: packaging
|
|
16
|
-
Requires-Dist: pydantic
|
|
12
|
+
Requires-Dist: jsonpatch<2.0.0,>=1.33.0
|
|
13
|
+
Requires-Dist: PyYAML<7.0.0,>=5.3.0
|
|
14
|
+
Requires-Dist: typing-extensions<5.0.0,>=4.7.0
|
|
15
|
+
Requires-Dist: packaging<26.0.0,>=23.2.0
|
|
16
|
+
Requires-Dist: pydantic<3.0.0,>=2.7.4
|
|
17
17
|
Description-Content-Type: text/markdown
|
|
18
18
|
|
|
19
19
|
# 🦜🍎️ LangChain Core
|