langchain-core 1.0.3__py3-none-any.whl → 1.0.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- langchain_core/agents.py +36 -27
- langchain_core/callbacks/manager.py +18 -1
- langchain_core/callbacks/usage.py +2 -2
- langchain_core/documents/base.py +6 -6
- langchain_core/example_selectors/length_based.py +1 -1
- langchain_core/indexing/api.py +6 -6
- langchain_core/language_models/_utils.py +1 -1
- langchain_core/language_models/base.py +37 -18
- langchain_core/language_models/chat_models.py +44 -28
- langchain_core/language_models/llms.py +66 -36
- langchain_core/messages/ai.py +3 -3
- langchain_core/messages/base.py +1 -1
- langchain_core/messages/content.py +2 -2
- langchain_core/messages/utils.py +12 -8
- langchain_core/output_parsers/openai_tools.py +14 -2
- langchain_core/outputs/generation.py +6 -5
- langchain_core/prompt_values.py +2 -2
- langchain_core/prompts/base.py +47 -44
- langchain_core/prompts/chat.py +35 -28
- langchain_core/prompts/dict.py +1 -1
- langchain_core/prompts/message.py +4 -4
- langchain_core/runnables/base.py +97 -52
- langchain_core/runnables/branch.py +22 -20
- langchain_core/runnables/configurable.py +30 -29
- langchain_core/runnables/fallbacks.py +22 -20
- langchain_core/runnables/graph_mermaid.py +4 -1
- langchain_core/runnables/graph_png.py +28 -0
- langchain_core/runnables/history.py +43 -32
- langchain_core/runnables/passthrough.py +35 -25
- langchain_core/runnables/router.py +5 -5
- langchain_core/runnables/schema.py +1 -1
- langchain_core/sys_info.py +4 -2
- langchain_core/tools/base.py +22 -16
- langchain_core/utils/function_calling.py +9 -6
- langchain_core/utils/input.py +3 -0
- langchain_core/utils/pydantic.py +2 -2
- langchain_core/version.py +1 -1
- {langchain_core-1.0.3.dist-info → langchain_core-1.0.4.dist-info}/METADATA +1 -1
- {langchain_core-1.0.3.dist-info → langchain_core-1.0.4.dist-info}/RECORD +40 -40
- {langchain_core-1.0.3.dist-info → langchain_core-1.0.4.dist-info}/WHEEL +0 -0
|
@@ -651,9 +651,12 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
|
|
651
651
|
|
|
652
652
|
Args:
|
|
653
653
|
prompts: The prompts to generate from.
|
|
654
|
-
stop: Stop words to use when generating.
|
|
655
|
-
|
|
656
|
-
|
|
654
|
+
stop: Stop words to use when generating.
|
|
655
|
+
|
|
656
|
+
Model output is cut off at the first occurrence of any of these
|
|
657
|
+
substrings.
|
|
658
|
+
|
|
659
|
+
If stop tokens are not supported consider raising `NotImplementedError`.
|
|
657
660
|
run_manager: Callback manager for the run.
|
|
658
661
|
|
|
659
662
|
Returns:
|
|
@@ -671,9 +674,12 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
|
|
671
674
|
|
|
672
675
|
Args:
|
|
673
676
|
prompts: The prompts to generate from.
|
|
674
|
-
stop: Stop words to use when generating.
|
|
675
|
-
|
|
676
|
-
|
|
677
|
+
stop: Stop words to use when generating.
|
|
678
|
+
|
|
679
|
+
Model output is cut off at the first occurrence of any of these
|
|
680
|
+
substrings.
|
|
681
|
+
|
|
682
|
+
If stop tokens are not supported consider raising `NotImplementedError`.
|
|
677
683
|
run_manager: Callback manager for the run.
|
|
678
684
|
|
|
679
685
|
Returns:
|
|
@@ -705,11 +711,14 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
|
|
705
711
|
|
|
706
712
|
Args:
|
|
707
713
|
prompt: The prompt to generate from.
|
|
708
|
-
stop: Stop words to use when generating.
|
|
709
|
-
|
|
714
|
+
stop: Stop words to use when generating.
|
|
715
|
+
|
|
716
|
+
Model output is cut off at the first occurrence of any of these
|
|
717
|
+
substrings.
|
|
710
718
|
run_manager: Callback manager for the run.
|
|
711
|
-
**kwargs: Arbitrary additional keyword arguments.
|
|
712
|
-
|
|
719
|
+
**kwargs: Arbitrary additional keyword arguments.
|
|
720
|
+
|
|
721
|
+
These are usually passed to the model provider API call.
|
|
713
722
|
|
|
714
723
|
Yields:
|
|
715
724
|
Generation chunks.
|
|
@@ -731,11 +740,14 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
|
|
731
740
|
|
|
732
741
|
Args:
|
|
733
742
|
prompt: The prompt to generate from.
|
|
734
|
-
stop: Stop words to use when generating.
|
|
735
|
-
|
|
743
|
+
stop: Stop words to use when generating.
|
|
744
|
+
|
|
745
|
+
Model output is cut off at the first occurrence of any of these
|
|
746
|
+
substrings.
|
|
736
747
|
run_manager: Callback manager for the run.
|
|
737
|
-
**kwargs: Arbitrary additional keyword arguments.
|
|
738
|
-
|
|
748
|
+
**kwargs: Arbitrary additional keyword arguments.
|
|
749
|
+
|
|
750
|
+
These are usually passed to the model provider API call.
|
|
739
751
|
|
|
740
752
|
Yields:
|
|
741
753
|
Generation chunks.
|
|
@@ -846,10 +858,14 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
|
|
846
858
|
|
|
847
859
|
Args:
|
|
848
860
|
prompts: List of string prompts.
|
|
849
|
-
stop: Stop words to use when generating.
|
|
850
|
-
|
|
851
|
-
|
|
852
|
-
|
|
861
|
+
stop: Stop words to use when generating.
|
|
862
|
+
|
|
863
|
+
Model output is cut off at the first occurrence of any of these
|
|
864
|
+
substrings.
|
|
865
|
+
callbacks: `Callbacks` to pass through.
|
|
866
|
+
|
|
867
|
+
Used for executing additional functionality, such as logging or
|
|
868
|
+
streaming, throughout generation.
|
|
853
869
|
tags: List of tags to associate with each prompt. If provided, the length
|
|
854
870
|
of the list must match the length of the prompts list.
|
|
855
871
|
metadata: List of metadata dictionaries to associate with each prompt. If
|
|
@@ -859,8 +875,9 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
|
|
859
875
|
length of the list must match the length of the prompts list.
|
|
860
876
|
run_id: List of run IDs to associate with each prompt. If provided, the
|
|
861
877
|
length of the list must match the length of the prompts list.
|
|
862
|
-
**kwargs: Arbitrary additional keyword arguments.
|
|
863
|
-
|
|
878
|
+
**kwargs: Arbitrary additional keyword arguments.
|
|
879
|
+
|
|
880
|
+
These are usually passed to the model provider API call.
|
|
864
881
|
|
|
865
882
|
Raises:
|
|
866
883
|
ValueError: If prompts is not a list.
|
|
@@ -1116,10 +1133,14 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
|
|
1116
1133
|
|
|
1117
1134
|
Args:
|
|
1118
1135
|
prompts: List of string prompts.
|
|
1119
|
-
stop: Stop words to use when generating.
|
|
1120
|
-
|
|
1121
|
-
|
|
1122
|
-
|
|
1136
|
+
stop: Stop words to use when generating.
|
|
1137
|
+
|
|
1138
|
+
Model output is cut off at the first occurrence of any of these
|
|
1139
|
+
substrings.
|
|
1140
|
+
callbacks: `Callbacks` to pass through.
|
|
1141
|
+
|
|
1142
|
+
Used for executing additional functionality, such as logging or
|
|
1143
|
+
streaming, throughout generation.
|
|
1123
1144
|
tags: List of tags to associate with each prompt. If provided, the length
|
|
1124
1145
|
of the list must match the length of the prompts list.
|
|
1125
1146
|
metadata: List of metadata dictionaries to associate with each prompt. If
|
|
@@ -1129,8 +1150,9 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
|
|
1129
1150
|
length of the list must match the length of the prompts list.
|
|
1130
1151
|
run_id: List of run IDs to associate with each prompt. If provided, the
|
|
1131
1152
|
length of the list must match the length of the prompts list.
|
|
1132
|
-
**kwargs: Arbitrary additional keyword arguments.
|
|
1133
|
-
|
|
1153
|
+
**kwargs: Arbitrary additional keyword arguments.
|
|
1154
|
+
|
|
1155
|
+
These are usually passed to the model provider API call.
|
|
1134
1156
|
|
|
1135
1157
|
Raises:
|
|
1136
1158
|
ValueError: If the length of `callbacks`, `tags`, `metadata`, or
|
|
@@ -1410,12 +1432,16 @@ class LLM(BaseLLM):
|
|
|
1410
1432
|
|
|
1411
1433
|
Args:
|
|
1412
1434
|
prompt: The prompt to generate from.
|
|
1413
|
-
stop: Stop words to use when generating.
|
|
1414
|
-
|
|
1415
|
-
|
|
1435
|
+
stop: Stop words to use when generating.
|
|
1436
|
+
|
|
1437
|
+
Model output is cut off at the first occurrence of any of these
|
|
1438
|
+
substrings.
|
|
1439
|
+
|
|
1440
|
+
If stop tokens are not supported consider raising `NotImplementedError`.
|
|
1416
1441
|
run_manager: Callback manager for the run.
|
|
1417
|
-
**kwargs: Arbitrary additional keyword arguments.
|
|
1418
|
-
|
|
1442
|
+
**kwargs: Arbitrary additional keyword arguments.
|
|
1443
|
+
|
|
1444
|
+
These are usually passed to the model provider API call.
|
|
1419
1445
|
|
|
1420
1446
|
Returns:
|
|
1421
1447
|
The model output as a string. SHOULD NOT include the prompt.
|
|
@@ -1436,12 +1462,16 @@ class LLM(BaseLLM):
|
|
|
1436
1462
|
|
|
1437
1463
|
Args:
|
|
1438
1464
|
prompt: The prompt to generate from.
|
|
1439
|
-
stop: Stop words to use when generating.
|
|
1440
|
-
|
|
1441
|
-
|
|
1465
|
+
stop: Stop words to use when generating.
|
|
1466
|
+
|
|
1467
|
+
Model output is cut off at the first occurrence of any of these
|
|
1468
|
+
substrings.
|
|
1469
|
+
|
|
1470
|
+
If stop tokens are not supported consider raising `NotImplementedError`.
|
|
1442
1471
|
run_manager: Callback manager for the run.
|
|
1443
|
-
**kwargs: Arbitrary additional keyword arguments.
|
|
1444
|
-
|
|
1472
|
+
**kwargs: Arbitrary additional keyword arguments.
|
|
1473
|
+
|
|
1474
|
+
These are usually passed to the model provider API call.
|
|
1445
1475
|
|
|
1446
1476
|
Returns:
|
|
1447
1477
|
The model output as a string. SHOULD NOT include the prompt.
|
langchain_core/messages/ai.py
CHANGED
|
@@ -50,7 +50,7 @@ class InputTokenDetails(TypedDict, total=False):
|
|
|
50
50
|
|
|
51
51
|
May also hold extra provider-specific keys.
|
|
52
52
|
|
|
53
|
-
!!! version-added "Added in
|
|
53
|
+
!!! version-added "Added in `langchain-core` 0.3.9"
|
|
54
54
|
|
|
55
55
|
"""
|
|
56
56
|
|
|
@@ -85,7 +85,7 @@ class OutputTokenDetails(TypedDict, total=False):
|
|
|
85
85
|
|
|
86
86
|
May also hold extra provider-specific keys.
|
|
87
87
|
|
|
88
|
-
!!! version-added "Added in
|
|
88
|
+
!!! version-added "Added in `langchain-core` 0.3.9"
|
|
89
89
|
|
|
90
90
|
"""
|
|
91
91
|
|
|
@@ -123,7 +123,7 @@ class UsageMetadata(TypedDict):
|
|
|
123
123
|
}
|
|
124
124
|
```
|
|
125
125
|
|
|
126
|
-
!!! warning "Behavior changed in 0.3.9"
|
|
126
|
+
!!! warning "Behavior changed in `langchain-core` 0.3.9"
|
|
127
127
|
Added `input_token_details` and `output_token_details`.
|
|
128
128
|
|
|
129
129
|
!!! note "LangSmith SDK"
|
langchain_core/messages/base.py
CHANGED
|
@@ -199,7 +199,7 @@ class BaseMessage(Serializable):
|
|
|
199
199
|
def content_blocks(self) -> list[types.ContentBlock]:
|
|
200
200
|
r"""Load content blocks from the message content.
|
|
201
201
|
|
|
202
|
-
!!! version-added "Added in
|
|
202
|
+
!!! version-added "Added in `langchain-core` 1.0.0"
|
|
203
203
|
|
|
204
204
|
"""
|
|
205
205
|
# Needed here to avoid circular import, as these classes import BaseMessages
|
|
@@ -867,7 +867,7 @@ def _get_data_content_block_types() -> tuple[str, ...]:
|
|
|
867
867
|
Example: ("image", "video", "audio", "text-plain", "file")
|
|
868
868
|
|
|
869
869
|
Note that old style multimodal blocks type literals with new style blocks.
|
|
870
|
-
|
|
870
|
+
Specifically, "image", "audio", and "file".
|
|
871
871
|
|
|
872
872
|
See the docstring of `_normalize_messages` in `language_models._utils` for details.
|
|
873
873
|
"""
|
|
@@ -906,7 +906,7 @@ def is_data_content_block(block: dict) -> bool:
|
|
|
906
906
|
|
|
907
907
|
# 'text' is checked to support v0 PlainTextContentBlock types
|
|
908
908
|
# We must guard against new style TextContentBlock which also has 'text' `type`
|
|
909
|
-
# by ensuring the
|
|
909
|
+
# by ensuring the presence of `source_type`
|
|
910
910
|
if block["type"] == "text" and "source_type" not in block: # noqa: SIM103 # This is more readable
|
|
911
911
|
return False
|
|
912
912
|
|
langchain_core/messages/utils.py
CHANGED
|
@@ -328,12 +328,16 @@ def _convert_to_message(message: MessageLikeRepresentation) -> BaseMessage:
|
|
|
328
328
|
"""
|
|
329
329
|
if isinstance(message, BaseMessage):
|
|
330
330
|
message_ = message
|
|
331
|
-
elif isinstance(message,
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
331
|
+
elif isinstance(message, Sequence):
|
|
332
|
+
if isinstance(message, str):
|
|
333
|
+
message_ = _create_message_from_message_type("human", message)
|
|
334
|
+
else:
|
|
335
|
+
try:
|
|
336
|
+
message_type_str, template = message
|
|
337
|
+
except ValueError as e:
|
|
338
|
+
msg = "Message as a sequence must be (role string, template)"
|
|
339
|
+
raise NotImplementedError(msg) from e
|
|
340
|
+
message_ = _create_message_from_message_type(message_type_str, template)
|
|
337
341
|
elif isinstance(message, dict):
|
|
338
342
|
msg_kwargs = message.copy()
|
|
339
343
|
try:
|
|
@@ -1097,7 +1101,7 @@ def convert_to_openai_messages(
|
|
|
1097
1101
|
# ]
|
|
1098
1102
|
```
|
|
1099
1103
|
|
|
1100
|
-
!!! version-added "Added in
|
|
1104
|
+
!!! version-added "Added in `langchain-core` 0.3.11"
|
|
1101
1105
|
|
|
1102
1106
|
""" # noqa: E501
|
|
1103
1107
|
if text_format not in {"string", "block"}:
|
|
@@ -1697,7 +1701,7 @@ def count_tokens_approximately(
|
|
|
1697
1701
|
Warning:
|
|
1698
1702
|
This function does not currently support counting image tokens.
|
|
1699
1703
|
|
|
1700
|
-
!!! version-added "Added in
|
|
1704
|
+
!!! version-added "Added in `langchain-core` 0.3.46"
|
|
1701
1705
|
|
|
1702
1706
|
"""
|
|
1703
1707
|
token_count = 0.0
|
|
@@ -15,7 +15,11 @@ from langchain_core.messages.tool import tool_call as create_tool_call
|
|
|
15
15
|
from langchain_core.output_parsers.transform import BaseCumulativeTransformOutputParser
|
|
16
16
|
from langchain_core.outputs import ChatGeneration, Generation
|
|
17
17
|
from langchain_core.utils.json import parse_partial_json
|
|
18
|
-
from langchain_core.utils.pydantic import
|
|
18
|
+
from langchain_core.utils.pydantic import (
|
|
19
|
+
TypeBaseModel,
|
|
20
|
+
is_pydantic_v1_subclass,
|
|
21
|
+
is_pydantic_v2_subclass,
|
|
22
|
+
)
|
|
19
23
|
|
|
20
24
|
logger = logging.getLogger(__name__)
|
|
21
25
|
|
|
@@ -323,7 +327,15 @@ class PydanticToolsParser(JsonOutputToolsParser):
|
|
|
323
327
|
return None if self.first_tool_only else []
|
|
324
328
|
|
|
325
329
|
json_results = [json_results] if self.first_tool_only else json_results
|
|
326
|
-
|
|
330
|
+
name_dict_v2: dict[str, TypeBaseModel] = {
|
|
331
|
+
tool.model_config.get("title") or tool.__name__: tool
|
|
332
|
+
for tool in self.tools
|
|
333
|
+
if is_pydantic_v2_subclass(tool)
|
|
334
|
+
}
|
|
335
|
+
name_dict_v1: dict[str, TypeBaseModel] = {
|
|
336
|
+
tool.__name__: tool for tool in self.tools if is_pydantic_v1_subclass(tool)
|
|
337
|
+
}
|
|
338
|
+
name_dict: dict[str, TypeBaseModel] = {**name_dict_v2, **name_dict_v1}
|
|
327
339
|
pydantic_objects = []
|
|
328
340
|
for res in json_results:
|
|
329
341
|
if not isinstance(res["args"], dict):
|
|
@@ -20,8 +20,7 @@ class Generation(Serializable):
|
|
|
20
20
|
|
|
21
21
|
LangChain users working with chat models will usually access information via
|
|
22
22
|
`AIMessage` (returned from runnable interfaces) or `LLMResult` (available
|
|
23
|
-
via callbacks). Please refer
|
|
24
|
-
for more information.
|
|
23
|
+
via callbacks). Please refer to `AIMessage` and `LLMResult` for more information.
|
|
25
24
|
"""
|
|
26
25
|
|
|
27
26
|
text: str
|
|
@@ -34,11 +33,13 @@ class Generation(Serializable):
|
|
|
34
33
|
"""
|
|
35
34
|
type: Literal["Generation"] = "Generation"
|
|
36
35
|
"""Type is used exclusively for serialization purposes.
|
|
37
|
-
|
|
36
|
+
|
|
37
|
+
Set to "Generation" for this class.
|
|
38
|
+
"""
|
|
38
39
|
|
|
39
40
|
@classmethod
|
|
40
41
|
def is_lc_serializable(cls) -> bool:
|
|
41
|
-
"""Return True as this class is serializable."""
|
|
42
|
+
"""Return `True` as this class is serializable."""
|
|
42
43
|
return True
|
|
43
44
|
|
|
44
45
|
@classmethod
|
|
@@ -52,7 +53,7 @@ class Generation(Serializable):
|
|
|
52
53
|
|
|
53
54
|
|
|
54
55
|
class GenerationChunk(Generation):
|
|
55
|
-
"""
|
|
56
|
+
"""`GenerationChunk`, which can be concatenated with other Generation chunks."""
|
|
56
57
|
|
|
57
58
|
def __add__(self, other: GenerationChunk) -> GenerationChunk:
|
|
58
59
|
"""Concatenate two `GenerationChunk`s.
|
langchain_core/prompt_values.py
CHANGED
|
@@ -30,7 +30,7 @@ class PromptValue(Serializable, ABC):
|
|
|
30
30
|
|
|
31
31
|
@classmethod
|
|
32
32
|
def is_lc_serializable(cls) -> bool:
|
|
33
|
-
"""Return True as this class is serializable."""
|
|
33
|
+
"""Return `True` as this class is serializable."""
|
|
34
34
|
return True
|
|
35
35
|
|
|
36
36
|
@classmethod
|
|
@@ -48,7 +48,7 @@ class PromptValue(Serializable, ABC):
|
|
|
48
48
|
|
|
49
49
|
@abstractmethod
|
|
50
50
|
def to_messages(self) -> list[BaseMessage]:
|
|
51
|
-
"""Return prompt as a list of
|
|
51
|
+
"""Return prompt as a list of messages."""
|
|
52
52
|
|
|
53
53
|
|
|
54
54
|
class StringPromptValue(PromptValue):
|
langchain_core/prompts/base.py
CHANGED
|
@@ -46,23 +46,27 @@ class BasePromptTemplate(
|
|
|
46
46
|
|
|
47
47
|
input_variables: list[str]
|
|
48
48
|
"""A list of the names of the variables whose values are required as inputs to the
|
|
49
|
-
prompt.
|
|
49
|
+
prompt.
|
|
50
|
+
"""
|
|
50
51
|
optional_variables: list[str] = Field(default=[])
|
|
51
52
|
"""A list of the names of the variables for placeholder or `MessagePlaceholder` that
|
|
52
53
|
are optional.
|
|
53
54
|
|
|
54
|
-
These variables are auto inferred from the prompt and user need not provide them.
|
|
55
|
+
These variables are auto inferred from the prompt and user need not provide them.
|
|
56
|
+
"""
|
|
55
57
|
input_types: typing.Dict[str, Any] = Field(default_factory=dict, exclude=True) # noqa: UP006
|
|
56
58
|
"""A dictionary of the types of the variables the prompt template expects.
|
|
57
59
|
|
|
58
|
-
If not provided, all variables are assumed to be strings.
|
|
60
|
+
If not provided, all variables are assumed to be strings.
|
|
61
|
+
"""
|
|
59
62
|
output_parser: BaseOutputParser | None = None
|
|
60
63
|
"""How to parse the output of calling an LLM on this formatted prompt."""
|
|
61
64
|
partial_variables: Mapping[str, Any] = Field(default_factory=dict)
|
|
62
65
|
"""A dictionary of the partial variables the prompt template carries.
|
|
63
66
|
|
|
64
|
-
Partial variables populate the template so that you don't need to
|
|
65
|
-
|
|
67
|
+
Partial variables populate the template so that you don't need to pass them in every
|
|
68
|
+
time you call the prompt.
|
|
69
|
+
"""
|
|
66
70
|
metadata: typing.Dict[str, Any] | None = None # noqa: UP006
|
|
67
71
|
"""Metadata to be used for tracing."""
|
|
68
72
|
tags: list[str] | None = None
|
|
@@ -107,7 +111,7 @@ class BasePromptTemplate(
|
|
|
107
111
|
|
|
108
112
|
@classmethod
|
|
109
113
|
def is_lc_serializable(cls) -> bool:
|
|
110
|
-
"""Return True as this class is serializable."""
|
|
114
|
+
"""Return `True` as this class is serializable."""
|
|
111
115
|
return True
|
|
112
116
|
|
|
113
117
|
model_config = ConfigDict(
|
|
@@ -129,7 +133,7 @@ class BasePromptTemplate(
|
|
|
129
133
|
"""Get the input schema for the prompt.
|
|
130
134
|
|
|
131
135
|
Args:
|
|
132
|
-
config:
|
|
136
|
+
config: Configuration for the prompt.
|
|
133
137
|
|
|
134
138
|
Returns:
|
|
135
139
|
The input schema for the prompt.
|
|
@@ -197,8 +201,8 @@ class BasePromptTemplate(
|
|
|
197
201
|
"""Invoke the prompt.
|
|
198
202
|
|
|
199
203
|
Args:
|
|
200
|
-
input:
|
|
201
|
-
config:
|
|
204
|
+
input: Input to the prompt.
|
|
205
|
+
config: Configuration for the prompt.
|
|
202
206
|
|
|
203
207
|
Returns:
|
|
204
208
|
The output of the prompt.
|
|
@@ -223,8 +227,8 @@ class BasePromptTemplate(
|
|
|
223
227
|
"""Async invoke the prompt.
|
|
224
228
|
|
|
225
229
|
Args:
|
|
226
|
-
input:
|
|
227
|
-
config:
|
|
230
|
+
input: Input to the prompt.
|
|
231
|
+
config: Configuration for the prompt.
|
|
228
232
|
|
|
229
233
|
Returns:
|
|
230
234
|
The output of the prompt.
|
|
@@ -244,7 +248,7 @@ class BasePromptTemplate(
|
|
|
244
248
|
|
|
245
249
|
@abstractmethod
|
|
246
250
|
def format_prompt(self, **kwargs: Any) -> PromptValue:
|
|
247
|
-
"""Create
|
|
251
|
+
"""Create `PromptValue`.
|
|
248
252
|
|
|
249
253
|
Args:
|
|
250
254
|
**kwargs: Any arguments to be passed to the prompt template.
|
|
@@ -254,7 +258,7 @@ class BasePromptTemplate(
|
|
|
254
258
|
"""
|
|
255
259
|
|
|
256
260
|
async def aformat_prompt(self, **kwargs: Any) -> PromptValue:
|
|
257
|
-
"""Async create
|
|
261
|
+
"""Async create `PromptValue`.
|
|
258
262
|
|
|
259
263
|
Args:
|
|
260
264
|
**kwargs: Any arguments to be passed to the prompt template.
|
|
@@ -268,7 +272,7 @@ class BasePromptTemplate(
|
|
|
268
272
|
"""Return a partial of the prompt template.
|
|
269
273
|
|
|
270
274
|
Args:
|
|
271
|
-
**kwargs:
|
|
275
|
+
**kwargs: Partial variables to set.
|
|
272
276
|
|
|
273
277
|
Returns:
|
|
274
278
|
A partial of the prompt template.
|
|
@@ -298,9 +302,9 @@ class BasePromptTemplate(
|
|
|
298
302
|
A formatted string.
|
|
299
303
|
|
|
300
304
|
Example:
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
305
|
+
```python
|
|
306
|
+
prompt.format(variable1="foo")
|
|
307
|
+
```
|
|
304
308
|
"""
|
|
305
309
|
|
|
306
310
|
async def aformat(self, **kwargs: Any) -> FormatOutputType:
|
|
@@ -313,9 +317,9 @@ class BasePromptTemplate(
|
|
|
313
317
|
A formatted string.
|
|
314
318
|
|
|
315
319
|
Example:
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
320
|
+
```python
|
|
321
|
+
await prompt.aformat(variable1="foo")
|
|
322
|
+
```
|
|
319
323
|
"""
|
|
320
324
|
return self.format(**kwargs)
|
|
321
325
|
|
|
@@ -350,9 +354,9 @@ class BasePromptTemplate(
|
|
|
350
354
|
NotImplementedError: If the prompt type is not implemented.
|
|
351
355
|
|
|
352
356
|
Example:
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
357
|
+
```python
|
|
358
|
+
prompt.save(file_path="path/prompt.yaml")
|
|
359
|
+
```
|
|
356
360
|
"""
|
|
357
361
|
if self.partial_variables:
|
|
358
362
|
msg = "Cannot save prompt with partial variables."
|
|
@@ -404,23 +408,23 @@ def format_document(doc: Document, prompt: BasePromptTemplate[str]) -> str:
|
|
|
404
408
|
|
|
405
409
|
First, this pulls information from the document from two sources:
|
|
406
410
|
|
|
407
|
-
1. page_content
|
|
408
|
-
This takes the information from the `document.page_content`
|
|
409
|
-
|
|
410
|
-
2. metadata
|
|
411
|
-
This takes information from `document.metadata` and assigns
|
|
412
|
-
|
|
411
|
+
1. `page_content`:
|
|
412
|
+
This takes the information from the `document.page_content` and assigns it to a
|
|
413
|
+
variable named `page_content`.
|
|
414
|
+
2. `metadata`:
|
|
415
|
+
This takes information from `document.metadata` and assigns it to variables of
|
|
416
|
+
the same name.
|
|
413
417
|
|
|
414
418
|
Those variables are then passed into the `prompt` to produce a formatted string.
|
|
415
419
|
|
|
416
420
|
Args:
|
|
417
|
-
doc: Document
|
|
421
|
+
doc: `Document`, the `page_content` and `metadata` will be used to create
|
|
418
422
|
the final string.
|
|
419
|
-
prompt: BasePromptTemplate
|
|
420
|
-
and metadata into the final string.
|
|
423
|
+
prompt: `BasePromptTemplate`, will be used to format the `page_content`
|
|
424
|
+
and `metadata` into the final string.
|
|
421
425
|
|
|
422
426
|
Returns:
|
|
423
|
-
|
|
427
|
+
String of the document formatted.
|
|
424
428
|
|
|
425
429
|
Example:
|
|
426
430
|
```python
|
|
@@ -431,7 +435,6 @@ def format_document(doc: Document, prompt: BasePromptTemplate[str]) -> str:
|
|
|
431
435
|
prompt = PromptTemplate.from_template("Page {page}: {page_content}")
|
|
432
436
|
format_document(doc, prompt)
|
|
433
437
|
>>> "Page 1: This is a joke"
|
|
434
|
-
|
|
435
438
|
```
|
|
436
439
|
"""
|
|
437
440
|
return prompt.format(**_get_document_info(doc, prompt))
|
|
@@ -442,22 +445,22 @@ async def aformat_document(doc: Document, prompt: BasePromptTemplate[str]) -> st
|
|
|
442
445
|
|
|
443
446
|
First, this pulls information from the document from two sources:
|
|
444
447
|
|
|
445
|
-
1. page_content
|
|
446
|
-
This takes the information from the `document.page_content`
|
|
447
|
-
|
|
448
|
-
2. metadata
|
|
449
|
-
This takes information from `document.metadata` and assigns
|
|
450
|
-
|
|
448
|
+
1. `page_content`:
|
|
449
|
+
This takes the information from the `document.page_content` and assigns it to a
|
|
450
|
+
variable named `page_content`.
|
|
451
|
+
2. `metadata`:
|
|
452
|
+
This takes information from `document.metadata` and assigns it to variables of
|
|
453
|
+
the same name.
|
|
451
454
|
|
|
452
455
|
Those variables are then passed into the `prompt` to produce a formatted string.
|
|
453
456
|
|
|
454
457
|
Args:
|
|
455
|
-
doc: Document
|
|
458
|
+
doc: `Document`, the `page_content` and `metadata` will be used to create
|
|
456
459
|
the final string.
|
|
457
|
-
prompt: BasePromptTemplate
|
|
458
|
-
and metadata into the final string.
|
|
460
|
+
prompt: `BasePromptTemplate`, will be used to format the `page_content`
|
|
461
|
+
and `metadata` into the final string.
|
|
459
462
|
|
|
460
463
|
Returns:
|
|
461
|
-
|
|
464
|
+
String of the document formatted.
|
|
462
465
|
"""
|
|
463
466
|
return await prompt.aformat(**_get_document_info(doc, prompt))
|