langchain-core 1.0.0a8__py3-none-any.whl → 1.0.0rc1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of langchain-core might be problematic. Click here for more details.
- langchain_core/__init__.py +1 -1
- langchain_core/_api/__init__.py +0 -1
- langchain_core/_api/beta_decorator.py +17 -20
- langchain_core/_api/deprecation.py +30 -35
- langchain_core/_import_utils.py +1 -1
- langchain_core/agents.py +7 -6
- langchain_core/caches.py +4 -10
- langchain_core/callbacks/__init__.py +1 -8
- langchain_core/callbacks/base.py +232 -243
- langchain_core/callbacks/file.py +33 -33
- langchain_core/callbacks/manager.py +353 -416
- langchain_core/callbacks/stdout.py +21 -22
- langchain_core/callbacks/streaming_stdout.py +32 -32
- langchain_core/callbacks/usage.py +54 -51
- langchain_core/chat_history.py +42 -57
- langchain_core/document_loaders/langsmith.py +21 -21
- langchain_core/documents/__init__.py +0 -1
- langchain_core/documents/base.py +37 -40
- langchain_core/documents/transformers.py +28 -29
- langchain_core/embeddings/fake.py +46 -52
- langchain_core/exceptions.py +5 -5
- langchain_core/indexing/api.py +11 -11
- langchain_core/indexing/base.py +24 -24
- langchain_core/language_models/__init__.py +0 -2
- langchain_core/language_models/_utils.py +51 -53
- langchain_core/language_models/base.py +23 -24
- langchain_core/language_models/chat_models.py +121 -144
- langchain_core/language_models/fake_chat_models.py +5 -5
- langchain_core/language_models/llms.py +10 -12
- langchain_core/load/dump.py +1 -1
- langchain_core/load/load.py +16 -16
- langchain_core/load/serializable.py +35 -34
- langchain_core/messages/__init__.py +1 -16
- langchain_core/messages/ai.py +105 -104
- langchain_core/messages/base.py +26 -26
- langchain_core/messages/block_translators/__init__.py +17 -17
- langchain_core/messages/block_translators/anthropic.py +2 -2
- langchain_core/messages/block_translators/bedrock_converse.py +2 -2
- langchain_core/messages/block_translators/google_genai.py +2 -2
- langchain_core/messages/block_translators/groq.py +117 -21
- langchain_core/messages/block_translators/langchain_v0.py +2 -2
- langchain_core/messages/block_translators/openai.py +4 -4
- langchain_core/messages/chat.py +1 -1
- langchain_core/messages/content.py +189 -193
- langchain_core/messages/function.py +5 -5
- langchain_core/messages/human.py +15 -17
- langchain_core/messages/modifier.py +1 -1
- langchain_core/messages/system.py +12 -14
- langchain_core/messages/tool.py +45 -49
- langchain_core/messages/utils.py +384 -396
- langchain_core/output_parsers/__init__.py +1 -14
- langchain_core/output_parsers/base.py +22 -23
- langchain_core/output_parsers/json.py +3 -3
- langchain_core/output_parsers/list.py +1 -1
- langchain_core/output_parsers/openai_functions.py +46 -44
- langchain_core/output_parsers/openai_tools.py +7 -7
- langchain_core/output_parsers/pydantic.py +10 -11
- langchain_core/output_parsers/string.py +1 -1
- langchain_core/output_parsers/transform.py +2 -2
- langchain_core/output_parsers/xml.py +1 -1
- langchain_core/outputs/__init__.py +1 -1
- langchain_core/outputs/chat_generation.py +14 -14
- langchain_core/outputs/generation.py +5 -5
- langchain_core/outputs/llm_result.py +5 -5
- langchain_core/prompt_values.py +5 -5
- langchain_core/prompts/__init__.py +3 -23
- langchain_core/prompts/base.py +32 -37
- langchain_core/prompts/chat.py +216 -222
- langchain_core/prompts/dict.py +2 -2
- langchain_core/prompts/few_shot.py +76 -83
- langchain_core/prompts/few_shot_with_templates.py +6 -8
- langchain_core/prompts/image.py +11 -13
- langchain_core/prompts/loading.py +1 -1
- langchain_core/prompts/message.py +2 -2
- langchain_core/prompts/prompt.py +14 -16
- langchain_core/prompts/string.py +19 -7
- langchain_core/prompts/structured.py +24 -25
- langchain_core/rate_limiters.py +36 -38
- langchain_core/retrievers.py +41 -182
- langchain_core/runnables/base.py +565 -590
- langchain_core/runnables/branch.py +7 -7
- langchain_core/runnables/config.py +37 -44
- langchain_core/runnables/configurable.py +8 -9
- langchain_core/runnables/fallbacks.py +8 -8
- langchain_core/runnables/graph.py +28 -27
- langchain_core/runnables/graph_ascii.py +19 -18
- langchain_core/runnables/graph_mermaid.py +20 -31
- langchain_core/runnables/graph_png.py +7 -7
- langchain_core/runnables/history.py +20 -20
- langchain_core/runnables/passthrough.py +8 -8
- langchain_core/runnables/retry.py +3 -3
- langchain_core/runnables/router.py +1 -1
- langchain_core/runnables/schema.py +33 -33
- langchain_core/runnables/utils.py +30 -34
- langchain_core/stores.py +72 -102
- langchain_core/sys_info.py +27 -29
- langchain_core/tools/__init__.py +1 -14
- langchain_core/tools/base.py +63 -63
- langchain_core/tools/convert.py +92 -92
- langchain_core/tools/render.py +9 -9
- langchain_core/tools/retriever.py +1 -1
- langchain_core/tools/simple.py +6 -7
- langchain_core/tools/structured.py +17 -18
- langchain_core/tracers/__init__.py +1 -9
- langchain_core/tracers/base.py +35 -35
- langchain_core/tracers/context.py +12 -17
- langchain_core/tracers/event_stream.py +3 -3
- langchain_core/tracers/langchain.py +8 -8
- langchain_core/tracers/log_stream.py +17 -18
- langchain_core/tracers/memory_stream.py +2 -2
- langchain_core/tracers/schemas.py +0 -129
- langchain_core/utils/aiter.py +31 -31
- langchain_core/utils/env.py +5 -5
- langchain_core/utils/function_calling.py +48 -120
- langchain_core/utils/html.py +4 -4
- langchain_core/utils/input.py +2 -2
- langchain_core/utils/interactive_env.py +1 -1
- langchain_core/utils/iter.py +19 -19
- langchain_core/utils/json.py +1 -1
- langchain_core/utils/json_schema.py +2 -2
- langchain_core/utils/mustache.py +5 -5
- langchain_core/utils/pydantic.py +17 -17
- langchain_core/utils/strings.py +4 -4
- langchain_core/utils/utils.py +25 -28
- langchain_core/vectorstores/base.py +43 -64
- langchain_core/vectorstores/in_memory.py +83 -85
- langchain_core/version.py +1 -1
- {langchain_core-1.0.0a8.dist-info → langchain_core-1.0.0rc1.dist-info}/METADATA +23 -11
- langchain_core-1.0.0rc1.dist-info/RECORD +172 -0
- langchain_core/memory.py +0 -120
- langchain_core/pydantic_v1/__init__.py +0 -30
- langchain_core/pydantic_v1/dataclasses.py +0 -23
- langchain_core/pydantic_v1/main.py +0 -23
- langchain_core-1.0.0a8.dist-info/RECORD +0 -176
- {langchain_core-1.0.0a8.dist-info → langchain_core-1.0.0rc1.dist-info}/WHEEL +0 -0
|
@@ -1,17 +1,4 @@
|
|
|
1
|
-
"""**OutputParser** classes parse the output of an LLM call.
|
|
2
|
-
|
|
3
|
-
**Class hierarchy:**
|
|
4
|
-
|
|
5
|
-
.. code-block::
|
|
6
|
-
|
|
7
|
-
BaseLLMOutputParser --> BaseOutputParser --> <name>OutputParser # ListOutputParser, PydanticOutputParser
|
|
8
|
-
|
|
9
|
-
**Main helpers:**
|
|
10
|
-
|
|
11
|
-
.. code-block::
|
|
12
|
-
|
|
13
|
-
Serializable, Generation, PromptValue
|
|
14
|
-
""" # noqa: E501
|
|
1
|
+
"""**OutputParser** classes parse the output of an LLM call."""
|
|
15
2
|
|
|
16
3
|
from typing import TYPE_CHECKING
|
|
17
4
|
|
|
@@ -134,29 +134,28 @@ class BaseOutputParser(
|
|
|
134
134
|
Output parsers help structure language model responses.
|
|
135
135
|
|
|
136
136
|
Example:
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
137
|
+
```python
|
|
138
|
+
class BooleanOutputParser(BaseOutputParser[bool]):
|
|
139
|
+
true_val: str = "YES"
|
|
140
|
+
false_val: str = "NO"
|
|
141
|
+
|
|
142
|
+
def parse(self, text: str) -> bool:
|
|
143
|
+
cleaned_text = text.strip().upper()
|
|
144
|
+
if cleaned_text not in (
|
|
145
|
+
self.true_val.upper(),
|
|
146
|
+
self.false_val.upper(),
|
|
147
|
+
):
|
|
148
|
+
raise OutputParserException(
|
|
149
|
+
f"BooleanOutputParser expected output value to either be "
|
|
150
|
+
f"{self.true_val} or {self.false_val} (case-insensitive). "
|
|
151
|
+
f"Received {cleaned_text}."
|
|
152
|
+
)
|
|
153
|
+
return cleaned_text == self.true_val.upper()
|
|
154
|
+
|
|
155
|
+
@property
|
|
156
|
+
def _type(self) -> str:
|
|
157
|
+
return "boolean_output_parser"
|
|
158
|
+
```
|
|
160
159
|
"""
|
|
161
160
|
|
|
162
161
|
@property
|
|
@@ -40,7 +40,7 @@ class JsonOutputParser(BaseCumulativeTransformOutputParser[Any]):
|
|
|
40
40
|
|
|
41
41
|
pydantic_object: Annotated[type[TBaseModel] | None, SkipValidation()] = None # type: ignore[valid-type]
|
|
42
42
|
"""The Pydantic object to use for validation.
|
|
43
|
-
If None
|
|
43
|
+
If `None`, no validation is performed."""
|
|
44
44
|
|
|
45
45
|
@override
|
|
46
46
|
def _diff(self, prev: Any | None, next: Any) -> Any:
|
|
@@ -59,9 +59,9 @@ class JsonOutputParser(BaseCumulativeTransformOutputParser[Any]):
|
|
|
59
59
|
Args:
|
|
60
60
|
result: The result of the LLM call.
|
|
61
61
|
partial: Whether to parse partial JSON objects.
|
|
62
|
-
If True
|
|
62
|
+
If `True`, the output will be a JSON object containing
|
|
63
63
|
all the keys that have been returned so far.
|
|
64
|
-
If False
|
|
64
|
+
If `False`, the output will be the full JSON object.
|
|
65
65
|
Default is False.
|
|
66
66
|
|
|
67
67
|
Returns:
|
|
@@ -149,7 +149,7 @@ class CommaSeparatedListOutputParser(ListOutputParser):
|
|
|
149
149
|
"""Get the namespace of the langchain object.
|
|
150
150
|
|
|
151
151
|
Returns:
|
|
152
|
-
|
|
152
|
+
`["langchain", "output_parsers", "list"]`
|
|
153
153
|
"""
|
|
154
154
|
return ["langchain", "output_parsers", "list"]
|
|
155
155
|
|
|
@@ -31,13 +31,13 @@ class OutputFunctionsParser(BaseGenerationOutputParser[Any]):
|
|
|
31
31
|
|
|
32
32
|
Args:
|
|
33
33
|
result: The result of the LLM call.
|
|
34
|
-
partial: Whether to parse partial JSON objects.
|
|
34
|
+
partial: Whether to parse partial JSON objects.
|
|
35
35
|
|
|
36
36
|
Returns:
|
|
37
37
|
The parsed JSON object.
|
|
38
38
|
|
|
39
39
|
Raises:
|
|
40
|
-
OutputParserException
|
|
40
|
+
`OutputParserException`: If the output is not valid JSON.
|
|
41
41
|
"""
|
|
42
42
|
generation = result[0]
|
|
43
43
|
if not isinstance(generation, ChatGeneration):
|
|
@@ -56,7 +56,7 @@ class OutputFunctionsParser(BaseGenerationOutputParser[Any]):
|
|
|
56
56
|
|
|
57
57
|
|
|
58
58
|
class JsonOutputFunctionsParser(BaseCumulativeTransformOutputParser[Any]):
|
|
59
|
-
"""Parse an output as the
|
|
59
|
+
"""Parse an output as the JSON object."""
|
|
60
60
|
|
|
61
61
|
strict: bool = False
|
|
62
62
|
"""Whether to allow non-JSON-compliant strings.
|
|
@@ -82,13 +82,13 @@ class JsonOutputFunctionsParser(BaseCumulativeTransformOutputParser[Any]):
|
|
|
82
82
|
|
|
83
83
|
Args:
|
|
84
84
|
result: The result of the LLM call.
|
|
85
|
-
partial: Whether to parse partial JSON objects.
|
|
85
|
+
partial: Whether to parse partial JSON objects.
|
|
86
86
|
|
|
87
87
|
Returns:
|
|
88
88
|
The parsed JSON object.
|
|
89
89
|
|
|
90
90
|
Raises:
|
|
91
|
-
|
|
91
|
+
OutputParserExcept`ion: If the output is not valid JSON.
|
|
92
92
|
"""
|
|
93
93
|
if len(result) != 1:
|
|
94
94
|
msg = f"Expected exactly one result, but got {len(result)}"
|
|
@@ -155,7 +155,7 @@ class JsonOutputFunctionsParser(BaseCumulativeTransformOutputParser[Any]):
|
|
|
155
155
|
|
|
156
156
|
|
|
157
157
|
class JsonKeyOutputFunctionsParser(JsonOutputFunctionsParser):
|
|
158
|
-
"""Parse an output as the element of the
|
|
158
|
+
"""Parse an output as the element of the JSON object."""
|
|
159
159
|
|
|
160
160
|
key_name: str
|
|
161
161
|
"""The name of the key to return."""
|
|
@@ -165,7 +165,7 @@ class JsonKeyOutputFunctionsParser(JsonOutputFunctionsParser):
|
|
|
165
165
|
|
|
166
166
|
Args:
|
|
167
167
|
result: The result of the LLM call.
|
|
168
|
-
partial: Whether to parse partial JSON objects.
|
|
168
|
+
partial: Whether to parse partial JSON objects.
|
|
169
169
|
|
|
170
170
|
Returns:
|
|
171
171
|
The parsed JSON object.
|
|
@@ -177,48 +177,50 @@ class JsonKeyOutputFunctionsParser(JsonOutputFunctionsParser):
|
|
|
177
177
|
|
|
178
178
|
|
|
179
179
|
class PydanticOutputFunctionsParser(OutputFunctionsParser):
|
|
180
|
-
"""Parse an output as a
|
|
180
|
+
"""Parse an output as a Pydantic object.
|
|
181
181
|
|
|
182
|
-
This parser is used to parse the output of a
|
|
183
|
-
|
|
182
|
+
This parser is used to parse the output of a chat model that uses OpenAI function
|
|
183
|
+
format to invoke functions.
|
|
184
184
|
|
|
185
|
-
The parser extracts the function call invocation and matches
|
|
186
|
-
|
|
185
|
+
The parser extracts the function call invocation and matches them to the Pydantic
|
|
186
|
+
schema provided.
|
|
187
187
|
|
|
188
|
-
An exception will be raised if the function call does not match
|
|
189
|
-
the provided schema.
|
|
188
|
+
An exception will be raised if the function call does not match the provided schema.
|
|
190
189
|
|
|
191
190
|
Example:
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
"
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
chat_generation = ChatGeneration(message=message)
|
|
191
|
+
```python
|
|
192
|
+
message = AIMessage(
|
|
193
|
+
content="This is a test message",
|
|
194
|
+
additional_kwargs={
|
|
195
|
+
"function_call": {
|
|
196
|
+
"name": "cookie",
|
|
197
|
+
"arguments": json.dumps({"name": "value", "age": 10}),
|
|
198
|
+
}
|
|
199
|
+
},
|
|
200
|
+
)
|
|
201
|
+
chat_generation = ChatGeneration(message=message)
|
|
204
202
|
|
|
205
|
-
class Cookie(BaseModel):
|
|
206
|
-
name: str
|
|
207
|
-
age: int
|
|
208
203
|
|
|
209
|
-
|
|
210
|
-
|
|
204
|
+
class Cookie(BaseModel):
|
|
205
|
+
name: str
|
|
206
|
+
age: int
|
|
211
207
|
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
208
|
+
|
|
209
|
+
class Dog(BaseModel):
|
|
210
|
+
species: str
|
|
211
|
+
|
|
212
|
+
|
|
213
|
+
# Full output
|
|
214
|
+
parser = PydanticOutputFunctionsParser(
|
|
215
|
+
pydantic_schema={"cookie": Cookie, "dog": Dog}
|
|
216
|
+
)
|
|
217
|
+
result = parser.parse_result([chat_generation])
|
|
218
|
+
```
|
|
217
219
|
|
|
218
220
|
"""
|
|
219
221
|
|
|
220
222
|
pydantic_schema: type[BaseModel] | dict[str, type[BaseModel]]
|
|
221
|
-
"""The
|
|
223
|
+
"""The Pydantic schema to parse the output with.
|
|
222
224
|
|
|
223
225
|
If multiple schemas are provided, then the function name will be used to
|
|
224
226
|
determine which schema to use.
|
|
@@ -227,7 +229,7 @@ class PydanticOutputFunctionsParser(OutputFunctionsParser):
|
|
|
227
229
|
@model_validator(mode="before")
|
|
228
230
|
@classmethod
|
|
229
231
|
def validate_schema(cls, values: dict) -> Any:
|
|
230
|
-
"""Validate the
|
|
232
|
+
"""Validate the Pydantic schema.
|
|
231
233
|
|
|
232
234
|
Args:
|
|
233
235
|
values: The values to validate.
|
|
@@ -236,7 +238,7 @@ class PydanticOutputFunctionsParser(OutputFunctionsParser):
|
|
|
236
238
|
The validated values.
|
|
237
239
|
|
|
238
240
|
Raises:
|
|
239
|
-
ValueError
|
|
241
|
+
`ValueError`: If the schema is not a Pydantic schema.
|
|
240
242
|
"""
|
|
241
243
|
schema = values["pydantic_schema"]
|
|
242
244
|
if "args_only" not in values:
|
|
@@ -259,10 +261,10 @@ class PydanticOutputFunctionsParser(OutputFunctionsParser):
|
|
|
259
261
|
|
|
260
262
|
Args:
|
|
261
263
|
result: The result of the LLM call.
|
|
262
|
-
partial: Whether to parse partial JSON objects.
|
|
264
|
+
partial: Whether to parse partial JSON objects.
|
|
263
265
|
|
|
264
266
|
Raises:
|
|
265
|
-
ValueError
|
|
267
|
+
`ValueError`: If the Pydantic schema is not valid.
|
|
266
268
|
|
|
267
269
|
Returns:
|
|
268
270
|
The parsed JSON object.
|
|
@@ -285,13 +287,13 @@ class PydanticOutputFunctionsParser(OutputFunctionsParser):
|
|
|
285
287
|
elif issubclass(pydantic_schema, BaseModelV1):
|
|
286
288
|
pydantic_args = pydantic_schema.parse_raw(args)
|
|
287
289
|
else:
|
|
288
|
-
msg = f"Unsupported
|
|
290
|
+
msg = f"Unsupported Pydantic schema: {pydantic_schema}"
|
|
289
291
|
raise ValueError(msg)
|
|
290
292
|
return pydantic_args
|
|
291
293
|
|
|
292
294
|
|
|
293
295
|
class PydanticAttrOutputFunctionsParser(PydanticOutputFunctionsParser):
|
|
294
|
-
"""Parse an output as an attribute of a
|
|
296
|
+
"""Parse an output as an attribute of a Pydantic object."""
|
|
295
297
|
|
|
296
298
|
attr_name: str
|
|
297
299
|
"""The name of the attribute to return."""
|
|
@@ -302,7 +304,7 @@ class PydanticAttrOutputFunctionsParser(PydanticOutputFunctionsParser):
|
|
|
302
304
|
|
|
303
305
|
Args:
|
|
304
306
|
result: The result of the LLM call.
|
|
305
|
-
partial: Whether to parse partial JSON objects.
|
|
307
|
+
partial: Whether to parse partial JSON objects.
|
|
306
308
|
|
|
307
309
|
Returns:
|
|
308
310
|
The parsed JSON object.
|
|
@@ -148,7 +148,7 @@ class JsonOutputToolsParser(BaseCumulativeTransformOutputParser[Any]):
|
|
|
148
148
|
first_tool_only: bool = False
|
|
149
149
|
"""Whether to return only the first tool call.
|
|
150
150
|
|
|
151
|
-
If False
|
|
151
|
+
If `False`, the result will be a list of tool calls, or an empty list
|
|
152
152
|
if no tool calls are found.
|
|
153
153
|
|
|
154
154
|
If true, and multiple tool calls are found, only the first one will be returned,
|
|
@@ -162,9 +162,9 @@ class JsonOutputToolsParser(BaseCumulativeTransformOutputParser[Any]):
|
|
|
162
162
|
Args:
|
|
163
163
|
result: The result of the LLM call.
|
|
164
164
|
partial: Whether to parse partial JSON.
|
|
165
|
-
If True
|
|
165
|
+
If `True`, the output will be a JSON object containing
|
|
166
166
|
all the keys that have been returned so far.
|
|
167
|
-
If False
|
|
167
|
+
If `False`, the output will be the full JSON object.
|
|
168
168
|
Default is False.
|
|
169
169
|
|
|
170
170
|
Returns:
|
|
@@ -226,9 +226,9 @@ class JsonOutputKeyToolsParser(JsonOutputToolsParser):
|
|
|
226
226
|
Args:
|
|
227
227
|
result: The result of the LLM call.
|
|
228
228
|
partial: Whether to parse partial JSON.
|
|
229
|
-
If True
|
|
229
|
+
If `True`, the output will be a JSON object containing
|
|
230
230
|
all the keys that have been returned so far.
|
|
231
|
-
If False
|
|
231
|
+
If `False`, the output will be the full JSON object.
|
|
232
232
|
Default is False.
|
|
233
233
|
|
|
234
234
|
Raises:
|
|
@@ -310,9 +310,9 @@ class PydanticToolsParser(JsonOutputToolsParser):
|
|
|
310
310
|
Args:
|
|
311
311
|
result: The result of the LLM call.
|
|
312
312
|
partial: Whether to parse partial JSON.
|
|
313
|
-
If True
|
|
313
|
+
If `True`, the output will be a JSON object containing
|
|
314
314
|
all the keys that have been returned so far.
|
|
315
|
-
If False
|
|
315
|
+
If `False`, the output will be the full JSON object.
|
|
316
316
|
Default is False.
|
|
317
317
|
|
|
318
318
|
Returns:
|
|
@@ -17,10 +17,10 @@ from langchain_core.utils.pydantic import (
|
|
|
17
17
|
|
|
18
18
|
|
|
19
19
|
class PydanticOutputParser(JsonOutputParser, Generic[TBaseModel]):
|
|
20
|
-
"""Parse an output using a
|
|
20
|
+
"""Parse an output using a Pydantic model."""
|
|
21
21
|
|
|
22
22
|
pydantic_object: Annotated[type[TBaseModel], SkipValidation()]
|
|
23
|
-
"""The
|
|
23
|
+
"""The Pydantic model to parse."""
|
|
24
24
|
|
|
25
25
|
def _parse_obj(self, obj: dict) -> TBaseModel:
|
|
26
26
|
try:
|
|
@@ -45,21 +45,20 @@ class PydanticOutputParser(JsonOutputParser, Generic[TBaseModel]):
|
|
|
45
45
|
def parse_result(
|
|
46
46
|
self, result: list[Generation], *, partial: bool = False
|
|
47
47
|
) -> TBaseModel | None:
|
|
48
|
-
"""Parse the result of an LLM call to a
|
|
48
|
+
"""Parse the result of an LLM call to a Pydantic object.
|
|
49
49
|
|
|
50
50
|
Args:
|
|
51
51
|
result: The result of the LLM call.
|
|
52
52
|
partial: Whether to parse partial JSON objects.
|
|
53
|
-
If True
|
|
53
|
+
If `True`, the output will be a JSON object containing
|
|
54
54
|
all the keys that have been returned so far.
|
|
55
|
-
Defaults to False.
|
|
56
55
|
|
|
57
56
|
Raises:
|
|
58
|
-
OutputParserException
|
|
59
|
-
or does not conform to the
|
|
57
|
+
`OutputParserException`: If the result is not valid JSON
|
|
58
|
+
or does not conform to the Pydantic model.
|
|
60
59
|
|
|
61
60
|
Returns:
|
|
62
|
-
The parsed
|
|
61
|
+
The parsed Pydantic object.
|
|
63
62
|
"""
|
|
64
63
|
try:
|
|
65
64
|
json_object = super().parse_result(result)
|
|
@@ -70,13 +69,13 @@ class PydanticOutputParser(JsonOutputParser, Generic[TBaseModel]):
|
|
|
70
69
|
raise
|
|
71
70
|
|
|
72
71
|
def parse(self, text: str) -> TBaseModel:
|
|
73
|
-
"""Parse the output of an LLM call to a
|
|
72
|
+
"""Parse the output of an LLM call to a Pydantic object.
|
|
74
73
|
|
|
75
74
|
Args:
|
|
76
75
|
text: The output of the LLM call.
|
|
77
76
|
|
|
78
77
|
Returns:
|
|
79
|
-
The parsed
|
|
78
|
+
The parsed Pydantic object.
|
|
80
79
|
"""
|
|
81
80
|
return super().parse(text)
|
|
82
81
|
|
|
@@ -107,7 +106,7 @@ class PydanticOutputParser(JsonOutputParser, Generic[TBaseModel]):
|
|
|
107
106
|
@property
|
|
108
107
|
@override
|
|
109
108
|
def OutputType(self) -> type[TBaseModel]:
|
|
110
|
-
"""Return the
|
|
109
|
+
"""Return the Pydantic model."""
|
|
111
110
|
return self.pydantic_object
|
|
112
111
|
|
|
113
112
|
|
|
@@ -22,7 +22,7 @@ class StrOutputParser(BaseTransformOutputParser[str]):
|
|
|
22
22
|
"""Get the namespace of the langchain object.
|
|
23
23
|
|
|
24
24
|
Returns:
|
|
25
|
-
|
|
25
|
+
`["langchain", "schema", "output_parser"]`
|
|
26
26
|
"""
|
|
27
27
|
return ["langchain", "schema", "output_parser"]
|
|
28
28
|
|
|
@@ -64,7 +64,7 @@ class BaseTransformOutputParser(BaseOutputParser[T]):
|
|
|
64
64
|
Args:
|
|
65
65
|
input: The input to transform.
|
|
66
66
|
config: The configuration to use for the transformation.
|
|
67
|
-
kwargs: Additional keyword arguments.
|
|
67
|
+
**kwargs: Additional keyword arguments.
|
|
68
68
|
|
|
69
69
|
Yields:
|
|
70
70
|
The transformed output.
|
|
@@ -85,7 +85,7 @@ class BaseTransformOutputParser(BaseOutputParser[T]):
|
|
|
85
85
|
Args:
|
|
86
86
|
input: The input to transform.
|
|
87
87
|
config: The configuration to use for the transformation.
|
|
88
|
-
kwargs: Additional keyword arguments.
|
|
88
|
+
**kwargs: Additional keyword arguments.
|
|
89
89
|
|
|
90
90
|
Yields:
|
|
91
91
|
The transformed output.
|
|
@@ -82,7 +82,7 @@ class _StreamingParser:
|
|
|
82
82
|
chunk: A chunk of text to parse. This can be a string or a BaseMessage.
|
|
83
83
|
|
|
84
84
|
Yields:
|
|
85
|
-
|
|
85
|
+
A dictionary representing the parsed XML element.
|
|
86
86
|
|
|
87
87
|
Raises:
|
|
88
88
|
xml.etree.ElementTree.ParseError: If the XML is not well-formed.
|
|
@@ -12,7 +12,7 @@ When invoking models via the standard runnable methods (e.g. invoke, batch, etc.
|
|
|
12
12
|
- LLMs will return regular text strings.
|
|
13
13
|
|
|
14
14
|
In addition, users can access the raw output of either LLMs or chat models via
|
|
15
|
-
callbacks. The
|
|
15
|
+
callbacks. The `on_chat_model_end` and `on_llm_end` callbacks will return an
|
|
16
16
|
LLMResult object containing the generated outputs and any additional information
|
|
17
17
|
returned by the model provider.
|
|
18
18
|
|
|
@@ -15,14 +15,14 @@ from langchain_core.utils._merge import merge_dicts
|
|
|
15
15
|
class ChatGeneration(Generation):
|
|
16
16
|
"""A single chat generation output.
|
|
17
17
|
|
|
18
|
-
A subclass of
|
|
18
|
+
A subclass of `Generation` that represents the response from a chat model
|
|
19
19
|
that generates chat messages.
|
|
20
20
|
|
|
21
|
-
The
|
|
22
|
-
Most of the time, the message will be of type
|
|
21
|
+
The `message` attribute is a structured representation of the chat message.
|
|
22
|
+
Most of the time, the message will be of type `AIMessage`.
|
|
23
23
|
|
|
24
24
|
Users working with chat models will usually access information via either
|
|
25
|
-
|
|
25
|
+
`AIMessage` (returned from runnable interfaces) or `LLMResult` (available
|
|
26
26
|
via callbacks).
|
|
27
27
|
"""
|
|
28
28
|
|
|
@@ -70,9 +70,9 @@ class ChatGeneration(Generation):
|
|
|
70
70
|
|
|
71
71
|
|
|
72
72
|
class ChatGenerationChunk(ChatGeneration):
|
|
73
|
-
"""
|
|
73
|
+
"""`ChatGeneration` chunk.
|
|
74
74
|
|
|
75
|
-
|
|
75
|
+
`ChatGeneration` chunks can be concatenated with other `ChatGeneration` chunks.
|
|
76
76
|
"""
|
|
77
77
|
|
|
78
78
|
message: BaseMessageChunk
|
|
@@ -84,18 +84,18 @@ class ChatGenerationChunk(ChatGeneration):
|
|
|
84
84
|
def __add__(
|
|
85
85
|
self, other: ChatGenerationChunk | list[ChatGenerationChunk]
|
|
86
86
|
) -> ChatGenerationChunk:
|
|
87
|
-
"""Concatenate two
|
|
87
|
+
"""Concatenate two `ChatGenerationChunk`s.
|
|
88
88
|
|
|
89
89
|
Args:
|
|
90
|
-
other: The other
|
|
90
|
+
other: The other `ChatGenerationChunk` or list of `ChatGenerationChunk`
|
|
91
91
|
to concatenate.
|
|
92
92
|
|
|
93
93
|
Raises:
|
|
94
|
-
TypeError: If other is not a
|
|
95
|
-
|
|
94
|
+
TypeError: If other is not a `ChatGenerationChunk` or list of
|
|
95
|
+
`ChatGenerationChunk`.
|
|
96
96
|
|
|
97
97
|
Returns:
|
|
98
|
-
A new
|
|
98
|
+
A new `ChatGenerationChunk` concatenated from self and other.
|
|
99
99
|
"""
|
|
100
100
|
if isinstance(other, ChatGenerationChunk):
|
|
101
101
|
generation_info = merge_dicts(
|
|
@@ -124,13 +124,13 @@ class ChatGenerationChunk(ChatGeneration):
|
|
|
124
124
|
def merge_chat_generation_chunks(
|
|
125
125
|
chunks: list[ChatGenerationChunk],
|
|
126
126
|
) -> ChatGenerationChunk | None:
|
|
127
|
-
"""Merge a list of
|
|
127
|
+
"""Merge a list of `ChatGenerationChunk`s into a single `ChatGenerationChunk`.
|
|
128
128
|
|
|
129
129
|
Args:
|
|
130
|
-
chunks: A list of
|
|
130
|
+
chunks: A list of `ChatGenerationChunk` to merge.
|
|
131
131
|
|
|
132
132
|
Returns:
|
|
133
|
-
A merged
|
|
133
|
+
A merged `ChatGenerationChunk`, or None if the input list is empty.
|
|
134
134
|
"""
|
|
135
135
|
if not chunks:
|
|
136
136
|
return None
|
|
@@ -47,7 +47,7 @@ class Generation(Serializable):
|
|
|
47
47
|
"""Get the namespace of the langchain object.
|
|
48
48
|
|
|
49
49
|
Returns:
|
|
50
|
-
|
|
50
|
+
`["langchain", "schema", "output"]`
|
|
51
51
|
"""
|
|
52
52
|
return ["langchain", "schema", "output"]
|
|
53
53
|
|
|
@@ -56,16 +56,16 @@ class GenerationChunk(Generation):
|
|
|
56
56
|
"""Generation chunk, which can be concatenated with other Generation chunks."""
|
|
57
57
|
|
|
58
58
|
def __add__(self, other: GenerationChunk) -> GenerationChunk:
|
|
59
|
-
"""Concatenate two
|
|
59
|
+
"""Concatenate two `GenerationChunk`s.
|
|
60
60
|
|
|
61
61
|
Args:
|
|
62
|
-
other: Another
|
|
62
|
+
other: Another `GenerationChunk` to concatenate with.
|
|
63
63
|
|
|
64
64
|
Raises:
|
|
65
|
-
TypeError: If other is not a
|
|
65
|
+
TypeError: If other is not a `GenerationChunk`.
|
|
66
66
|
|
|
67
67
|
Returns:
|
|
68
|
-
A new
|
|
68
|
+
A new `GenerationChunk` concatenated from self and other.
|
|
69
69
|
"""
|
|
70
70
|
if isinstance(other, GenerationChunk):
|
|
71
71
|
generation_info = merge_dicts(
|
|
@@ -30,8 +30,8 @@ class LLMResult(BaseModel):
|
|
|
30
30
|
The second dimension of the list represents different candidate generations for a
|
|
31
31
|
given prompt.
|
|
32
32
|
|
|
33
|
-
- When returned from **an LLM**, the type is
|
|
34
|
-
- When returned from a **chat model**, the type is
|
|
33
|
+
- When returned from **an LLM**, the type is `list[list[Generation]]`.
|
|
34
|
+
- When returned from a **chat model**, the type is `list[list[ChatGeneration]]`.
|
|
35
35
|
|
|
36
36
|
ChatGeneration is a subclass of Generation that has a field for a structured chat
|
|
37
37
|
message.
|
|
@@ -91,13 +91,13 @@ class LLMResult(BaseModel):
|
|
|
91
91
|
return llm_results
|
|
92
92
|
|
|
93
93
|
def __eq__(self, other: object) -> bool:
|
|
94
|
-
"""Check for
|
|
94
|
+
"""Check for `LLMResult` equality by ignoring any metadata related to runs.
|
|
95
95
|
|
|
96
96
|
Args:
|
|
97
|
-
other: Another
|
|
97
|
+
other: Another `LLMResult` object to compare against.
|
|
98
98
|
|
|
99
99
|
Returns:
|
|
100
|
-
True if the generations and
|
|
100
|
+
`True` if the generations and `llm_output` are equal, `False` otherwise.
|
|
101
101
|
"""
|
|
102
102
|
if not isinstance(other, LLMResult):
|
|
103
103
|
return NotImplemented
|
langchain_core/prompt_values.py
CHANGED
|
@@ -40,7 +40,7 @@ class PromptValue(Serializable, ABC):
|
|
|
40
40
|
This is used to determine the namespace of the object when serializing.
|
|
41
41
|
|
|
42
42
|
Returns:
|
|
43
|
-
|
|
43
|
+
`["langchain", "schema", "prompt"]`
|
|
44
44
|
"""
|
|
45
45
|
return ["langchain", "schema", "prompt"]
|
|
46
46
|
|
|
@@ -67,7 +67,7 @@ class StringPromptValue(PromptValue):
|
|
|
67
67
|
This is used to determine the namespace of the object when serializing.
|
|
68
68
|
|
|
69
69
|
Returns:
|
|
70
|
-
|
|
70
|
+
`["langchain", "prompts", "base"]`
|
|
71
71
|
"""
|
|
72
72
|
return ["langchain", "prompts", "base"]
|
|
73
73
|
|
|
@@ -104,7 +104,7 @@ class ChatPromptValue(PromptValue):
|
|
|
104
104
|
This is used to determine the namespace of the object when serializing.
|
|
105
105
|
|
|
106
106
|
Returns:
|
|
107
|
-
|
|
107
|
+
`["langchain", "prompts", "chat"]`
|
|
108
108
|
"""
|
|
109
109
|
return ["langchain", "prompts", "chat"]
|
|
110
110
|
|
|
@@ -113,8 +113,8 @@ class ImageURL(TypedDict, total=False):
|
|
|
113
113
|
"""Image URL."""
|
|
114
114
|
|
|
115
115
|
detail: Literal["auto", "low", "high"]
|
|
116
|
-
"""Specifies the detail level of the image. Defaults to
|
|
117
|
-
Can be
|
|
116
|
+
"""Specifies the detail level of the image. Defaults to `'auto'`.
|
|
117
|
+
Can be `'auto'`, `'low'`, or `'high'`.
|
|
118
118
|
|
|
119
119
|
This follows OpenAI's Chat Completion API's image URL format.
|
|
120
120
|
|
|
@@ -1,28 +1,8 @@
|
|
|
1
1
|
"""**Prompt** is the input to the model.
|
|
2
2
|
|
|
3
|
-
Prompt is often constructed
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
**Class hierarchy:**
|
|
8
|
-
|
|
9
|
-
.. code-block::
|
|
10
|
-
|
|
11
|
-
BasePromptTemplate --> StringPromptTemplate --> PromptTemplate
|
|
12
|
-
FewShotPromptTemplate
|
|
13
|
-
FewShotPromptWithTemplates
|
|
14
|
-
BaseChatPromptTemplate --> AutoGPTPrompt
|
|
15
|
-
ChatPromptTemplate --> AgentScratchPadChatPromptTemplate
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
BaseMessagePromptTemplate --> MessagesPlaceholder
|
|
20
|
-
BaseStringMessagePromptTemplate --> ChatMessagePromptTemplate
|
|
21
|
-
HumanMessagePromptTemplate
|
|
22
|
-
AIMessagePromptTemplate
|
|
23
|
-
SystemMessagePromptTemplate
|
|
24
|
-
|
|
25
|
-
""" # noqa: E501
|
|
3
|
+
Prompt is often constructed from multiple components and prompt values. Prompt classes
|
|
4
|
+
and functions make constructing and working with prompts easy.
|
|
5
|
+
"""
|
|
26
6
|
|
|
27
7
|
from typing import TYPE_CHECKING
|
|
28
8
|
|