langchain-core 0.3.68__py3-none-any.whl → 0.3.70__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of langchain-core might be problematic. Click here for more details.
- langchain_core/_api/deprecation.py +3 -3
- langchain_core/_import_utils.py +2 -2
- langchain_core/caches.py +1 -1
- langchain_core/callbacks/manager.py +2 -2
- langchain_core/chat_history.py +20 -16
- langchain_core/document_loaders/base.py +3 -3
- langchain_core/documents/base.py +3 -3
- langchain_core/indexing/api.py +6 -6
- langchain_core/language_models/_utils.py +1 -1
- langchain_core/language_models/base.py +1 -1
- langchain_core/language_models/chat_models.py +8 -8
- langchain_core/language_models/fake_chat_models.py +6 -2
- langchain_core/language_models/llms.py +23 -26
- langchain_core/load/load.py +23 -2
- langchain_core/load/serializable.py +4 -4
- langchain_core/messages/tool.py +1 -3
- langchain_core/messages/utils.py +29 -32
- langchain_core/output_parsers/base.py +1 -1
- langchain_core/output_parsers/openai_functions.py +7 -7
- langchain_core/output_parsers/openai_tools.py +38 -8
- langchain_core/output_parsers/xml.py +7 -7
- langchain_core/outputs/__init__.py +8 -9
- langchain_core/outputs/chat_generation.py +5 -3
- langchain_core/outputs/generation.py +2 -1
- langchain_core/outputs/llm_result.py +14 -14
- langchain_core/prompts/base.py +5 -5
- langchain_core/prompts/chat.py +22 -21
- langchain_core/prompts/dict.py +0 -2
- langchain_core/prompts/pipeline.py +13 -15
- langchain_core/prompts/prompt.py +4 -4
- langchain_core/prompts/string.py +4 -4
- langchain_core/rate_limiters.py +2 -3
- langchain_core/retrievers.py +6 -6
- langchain_core/runnables/base.py +21 -18
- langchain_core/runnables/branch.py +3 -3
- langchain_core/runnables/graph.py +1 -1
- langchain_core/runnables/history.py +3 -3
- langchain_core/runnables/router.py +1 -2
- langchain_core/runnables/utils.py +1 -1
- langchain_core/stores.py +1 -1
- langchain_core/sys_info.py +2 -2
- langchain_core/tools/base.py +7 -7
- langchain_core/tools/structured.py +8 -1
- langchain_core/tracers/core.py +4 -4
- langchain_core/tracers/event_stream.py +5 -5
- langchain_core/tracers/log_stream.py +5 -1
- langchain_core/utils/_merge.py +2 -0
- langchain_core/utils/env.py +2 -2
- langchain_core/utils/function_calling.py +4 -6
- langchain_core/utils/image.py +1 -1
- langchain_core/utils/json_schema.py +64 -59
- langchain_core/utils/mustache.py +9 -4
- langchain_core/vectorstores/base.py +10 -10
- langchain_core/vectorstores/in_memory.py +5 -5
- langchain_core/vectorstores/utils.py +21 -0
- langchain_core/version.py +1 -1
- {langchain_core-0.3.68.dist-info → langchain_core-0.3.70.dist-info}/METADATA +2 -2
- {langchain_core-0.3.68.dist-info → langchain_core-0.3.70.dist-info}/RECORD +60 -60
- {langchain_core-0.3.68.dist-info → langchain_core-0.3.70.dist-info}/WHEEL +0 -0
- {langchain_core-0.3.68.dist-info → langchain_core-0.3.70.dist-info}/entry_points.txt +0 -0
langchain_core/messages/utils.py
CHANGED
|
@@ -138,34 +138,34 @@ def get_buffer_string(
|
|
|
138
138
|
|
|
139
139
|
|
|
140
140
|
def _message_from_dict(message: dict) -> BaseMessage:
|
|
141
|
-
|
|
142
|
-
if
|
|
141
|
+
type_ = message["type"]
|
|
142
|
+
if type_ == "human":
|
|
143
143
|
return HumanMessage(**message["data"])
|
|
144
|
-
if
|
|
144
|
+
if type_ == "ai":
|
|
145
145
|
return AIMessage(**message["data"])
|
|
146
|
-
if
|
|
146
|
+
if type_ == "system":
|
|
147
147
|
return SystemMessage(**message["data"])
|
|
148
|
-
if
|
|
148
|
+
if type_ == "chat":
|
|
149
149
|
return ChatMessage(**message["data"])
|
|
150
|
-
if
|
|
150
|
+
if type_ == "function":
|
|
151
151
|
return FunctionMessage(**message["data"])
|
|
152
|
-
if
|
|
152
|
+
if type_ == "tool":
|
|
153
153
|
return ToolMessage(**message["data"])
|
|
154
|
-
if
|
|
154
|
+
if type_ == "remove":
|
|
155
155
|
return RemoveMessage(**message["data"])
|
|
156
|
-
if
|
|
156
|
+
if type_ == "AIMessageChunk":
|
|
157
157
|
return AIMessageChunk(**message["data"])
|
|
158
|
-
if
|
|
158
|
+
if type_ == "HumanMessageChunk":
|
|
159
159
|
return HumanMessageChunk(**message["data"])
|
|
160
|
-
if
|
|
160
|
+
if type_ == "FunctionMessageChunk":
|
|
161
161
|
return FunctionMessageChunk(**message["data"])
|
|
162
|
-
if
|
|
162
|
+
if type_ == "ToolMessageChunk":
|
|
163
163
|
return ToolMessageChunk(**message["data"])
|
|
164
|
-
if
|
|
164
|
+
if type_ == "SystemMessageChunk":
|
|
165
165
|
return SystemMessageChunk(**message["data"])
|
|
166
|
-
if
|
|
166
|
+
if type_ == "ChatMessageChunk":
|
|
167
167
|
return ChatMessageChunk(**message["data"])
|
|
168
|
-
msg = f"Got unexpected message type: {
|
|
168
|
+
msg = f"Got unexpected message type: {type_}"
|
|
169
169
|
raise ValueError(msg)
|
|
170
170
|
|
|
171
171
|
|
|
@@ -263,15 +263,15 @@ def _create_message_from_message_type(
|
|
|
263
263
|
)
|
|
264
264
|
else:
|
|
265
265
|
kwargs["tool_calls"].append(tool_call)
|
|
266
|
-
if message_type in
|
|
266
|
+
if message_type in {"human", "user"}:
|
|
267
267
|
if example := kwargs.get("additional_kwargs", {}).pop("example", False):
|
|
268
268
|
kwargs["example"] = example
|
|
269
269
|
message: BaseMessage = HumanMessage(content=content, **kwargs)
|
|
270
|
-
elif message_type in
|
|
270
|
+
elif message_type in {"ai", "assistant"}:
|
|
271
271
|
if example := kwargs.get("additional_kwargs", {}).pop("example", False):
|
|
272
272
|
kwargs["example"] = example
|
|
273
273
|
message = AIMessage(content=content, **kwargs)
|
|
274
|
-
elif message_type in
|
|
274
|
+
elif message_type in {"system", "developer"}:
|
|
275
275
|
if message_type == "developer":
|
|
276
276
|
kwargs["additional_kwargs"] = kwargs.get("additional_kwargs") or {}
|
|
277
277
|
kwargs["additional_kwargs"]["__openai_role__"] = "developer"
|
|
@@ -315,13 +315,13 @@ def _convert_to_message(message: MessageLikeRepresentation) -> BaseMessage:
|
|
|
315
315
|
ValueError: if the message dict does not contain the required keys.
|
|
316
316
|
"""
|
|
317
317
|
if isinstance(message, BaseMessage):
|
|
318
|
-
|
|
318
|
+
message_ = message
|
|
319
319
|
elif isinstance(message, str):
|
|
320
|
-
|
|
320
|
+
message_ = _create_message_from_message_type("human", message)
|
|
321
321
|
elif isinstance(message, Sequence) and len(message) == 2:
|
|
322
322
|
# mypy doesn't realise this can't be a string given the previous branch
|
|
323
323
|
message_type_str, template = message # type: ignore[misc]
|
|
324
|
-
|
|
324
|
+
message_ = _create_message_from_message_type(message_type_str, template)
|
|
325
325
|
elif isinstance(message, dict):
|
|
326
326
|
msg_kwargs = message.copy()
|
|
327
327
|
try:
|
|
@@ -337,7 +337,7 @@ def _convert_to_message(message: MessageLikeRepresentation) -> BaseMessage:
|
|
|
337
337
|
message=msg, error_code=ErrorCode.MESSAGE_COERCION_FAILURE
|
|
338
338
|
)
|
|
339
339
|
raise ValueError(msg) from e
|
|
340
|
-
|
|
340
|
+
message_ = _create_message_from_message_type(
|
|
341
341
|
msg_type, msg_content, **msg_kwargs
|
|
342
342
|
)
|
|
343
343
|
else:
|
|
@@ -345,7 +345,7 @@ def _convert_to_message(message: MessageLikeRepresentation) -> BaseMessage:
|
|
|
345
345
|
msg = create_message(message=msg, error_code=ErrorCode.MESSAGE_COERCION_FAILURE)
|
|
346
346
|
raise NotImplementedError(msg)
|
|
347
347
|
|
|
348
|
-
return
|
|
348
|
+
return message_
|
|
349
349
|
|
|
350
350
|
|
|
351
351
|
def convert_to_messages(
|
|
@@ -370,7 +370,7 @@ def convert_to_messages(
|
|
|
370
370
|
def _runnable_support(func: Callable) -> Callable:
|
|
371
371
|
@overload
|
|
372
372
|
def wrapped(
|
|
373
|
-
messages:
|
|
373
|
+
messages: None = None, **kwargs: Any
|
|
374
374
|
) -> Runnable[Sequence[MessageLikeRepresentation], list[BaseMessage]]: ...
|
|
375
375
|
|
|
376
376
|
@overload
|
|
@@ -519,8 +519,6 @@ def filter_messages(
|
|
|
519
519
|
or (include_ids and msg.id in include_ids)
|
|
520
520
|
):
|
|
521
521
|
filtered.append(msg)
|
|
522
|
-
else:
|
|
523
|
-
pass
|
|
524
522
|
|
|
525
523
|
return filtered
|
|
526
524
|
|
|
@@ -1000,7 +998,7 @@ def convert_to_openai_messages(
|
|
|
1000
998
|
.. versionadded:: 0.3.11
|
|
1001
999
|
|
|
1002
1000
|
""" # noqa: E501
|
|
1003
|
-
if text_format not in
|
|
1001
|
+
if text_format not in {"string", "block"}:
|
|
1004
1002
|
err = f"Unrecognized {text_format=}, expected one of 'string' or 'block'."
|
|
1005
1003
|
raise ValueError(err)
|
|
1006
1004
|
|
|
@@ -1125,8 +1123,7 @@ def convert_to_openai_messages(
|
|
|
1125
1123
|
"type": "image_url",
|
|
1126
1124
|
"image_url": {
|
|
1127
1125
|
"url": (
|
|
1128
|
-
f"data:image/{image['format']};"
|
|
1129
|
-
f"base64,{b64_image}"
|
|
1126
|
+
f"data:image/{image['format']};base64,{b64_image}"
|
|
1130
1127
|
)
|
|
1131
1128
|
},
|
|
1132
1129
|
}
|
|
@@ -1341,8 +1338,8 @@ def _first_max_tokens(
|
|
|
1341
1338
|
excluded.content = list(reversed(excluded.content))
|
|
1342
1339
|
for _ in range(1, num_block):
|
|
1343
1340
|
excluded.content = excluded.content[:-1]
|
|
1344
|
-
if token_counter(messages[:idx]
|
|
1345
|
-
messages = messages[:idx]
|
|
1341
|
+
if token_counter([*messages[:idx], excluded]) <= max_tokens:
|
|
1342
|
+
messages = [*messages[:idx], excluded]
|
|
1346
1343
|
idx += 1
|
|
1347
1344
|
included_partial = True
|
|
1348
1345
|
break
|
|
@@ -1393,7 +1390,7 @@ def _first_max_tokens(
|
|
|
1393
1390
|
if partial_strategy == "last":
|
|
1394
1391
|
content_splits = list(reversed(content_splits))
|
|
1395
1392
|
excluded.content = "".join(content_splits)
|
|
1396
|
-
messages = messages[:idx]
|
|
1393
|
+
messages = [*messages[:idx], excluded]
|
|
1397
1394
|
idx += 1
|
|
1398
1395
|
|
|
1399
1396
|
if end_on:
|
|
@@ -263,23 +263,23 @@ class PydanticOutputFunctionsParser(OutputFunctionsParser):
|
|
|
263
263
|
Returns:
|
|
264
264
|
The parsed JSON object.
|
|
265
265
|
"""
|
|
266
|
-
|
|
266
|
+
result_ = super().parse_result(result)
|
|
267
267
|
if self.args_only:
|
|
268
268
|
if hasattr(self.pydantic_schema, "model_validate_json"):
|
|
269
|
-
pydantic_args = self.pydantic_schema.model_validate_json(
|
|
269
|
+
pydantic_args = self.pydantic_schema.model_validate_json(result_)
|
|
270
270
|
else:
|
|
271
|
-
pydantic_args = self.pydantic_schema.parse_raw(
|
|
271
|
+
pydantic_args = self.pydantic_schema.parse_raw(result_) # type: ignore[attr-defined]
|
|
272
272
|
else:
|
|
273
|
-
fn_name =
|
|
274
|
-
|
|
273
|
+
fn_name = result_["name"]
|
|
274
|
+
args = result_["arguments"]
|
|
275
275
|
if isinstance(self.pydantic_schema, dict):
|
|
276
276
|
pydantic_schema = self.pydantic_schema[fn_name]
|
|
277
277
|
else:
|
|
278
278
|
pydantic_schema = self.pydantic_schema
|
|
279
279
|
if issubclass(pydantic_schema, BaseModel):
|
|
280
|
-
pydantic_args = pydantic_schema.model_validate_json(
|
|
280
|
+
pydantic_args = pydantic_schema.model_validate_json(args)
|
|
281
281
|
elif issubclass(pydantic_schema, BaseModelV1):
|
|
282
|
-
pydantic_args = pydantic_schema.parse_raw(
|
|
282
|
+
pydantic_args = pydantic_schema.parse_raw(args)
|
|
283
283
|
else:
|
|
284
284
|
msg = f"Unsupported pydantic schema: {pydantic_schema}"
|
|
285
285
|
raise ValueError(msg)
|
|
@@ -234,12 +234,39 @@ class JsonOutputKeyToolsParser(JsonOutputToolsParser):
|
|
|
234
234
|
Returns:
|
|
235
235
|
The parsed tool calls.
|
|
236
236
|
"""
|
|
237
|
-
|
|
238
|
-
|
|
237
|
+
generation = result[0]
|
|
238
|
+
if not isinstance(generation, ChatGeneration):
|
|
239
|
+
msg = "This output parser can only be used with a chat generation."
|
|
240
|
+
raise OutputParserException(msg)
|
|
241
|
+
message = generation.message
|
|
242
|
+
if isinstance(message, AIMessage) and message.tool_calls:
|
|
243
|
+
parsed_tool_calls = [dict(tc) for tc in message.tool_calls]
|
|
244
|
+
for tool_call in parsed_tool_calls:
|
|
245
|
+
if not self.return_id:
|
|
246
|
+
_ = tool_call.pop("id")
|
|
247
|
+
else:
|
|
248
|
+
try:
|
|
249
|
+
raw_tool_calls = copy.deepcopy(message.additional_kwargs["tool_calls"])
|
|
250
|
+
except KeyError:
|
|
251
|
+
if self.first_tool_only:
|
|
252
|
+
return None
|
|
253
|
+
return []
|
|
254
|
+
parsed_tool_calls = parse_tool_calls(
|
|
255
|
+
raw_tool_calls,
|
|
256
|
+
partial=partial,
|
|
257
|
+
strict=self.strict,
|
|
258
|
+
return_id=self.return_id,
|
|
259
|
+
)
|
|
260
|
+
# For backwards compatibility
|
|
261
|
+
for tc in parsed_tool_calls:
|
|
262
|
+
tc["type"] = tc.pop("name")
|
|
239
263
|
if self.first_tool_only:
|
|
264
|
+
parsed_result = list(
|
|
265
|
+
filter(lambda x: x["type"] == self.key_name, parsed_tool_calls)
|
|
266
|
+
)
|
|
240
267
|
single_result = (
|
|
241
|
-
parsed_result
|
|
242
|
-
if parsed_result and parsed_result["type"] == self.key_name
|
|
268
|
+
parsed_result[0]
|
|
269
|
+
if parsed_result and parsed_result[0]["type"] == self.key_name
|
|
243
270
|
else None
|
|
244
271
|
)
|
|
245
272
|
if self.return_id:
|
|
@@ -247,10 +274,13 @@ class JsonOutputKeyToolsParser(JsonOutputToolsParser):
|
|
|
247
274
|
if single_result:
|
|
248
275
|
return single_result["args"]
|
|
249
276
|
return None
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
277
|
+
return (
|
|
278
|
+
[res for res in parsed_tool_calls if res["type"] == self.key_name]
|
|
279
|
+
if self.return_id
|
|
280
|
+
else [
|
|
281
|
+
res["args"] for res in parsed_tool_calls if res["type"] == self.key_name
|
|
282
|
+
]
|
|
283
|
+
)
|
|
254
284
|
|
|
255
285
|
|
|
256
286
|
# Common cause of ValidationError is truncated output due to max_tokens.
|
|
@@ -61,10 +61,10 @@ class _StreamingParser:
|
|
|
61
61
|
"You can install it with `pip install defusedxml` "
|
|
62
62
|
)
|
|
63
63
|
raise ImportError(msg) from e
|
|
64
|
-
|
|
64
|
+
parser_ = XMLParser(target=TreeBuilder())
|
|
65
65
|
else:
|
|
66
|
-
|
|
67
|
-
self.pull_parser = ET.XMLPullParser(["start", "end"], _parser=
|
|
66
|
+
parser_ = None
|
|
67
|
+
self.pull_parser = ET.XMLPullParser(["start", "end"], _parser=parser_)
|
|
68
68
|
self.xml_start_re = re.compile(r"<[a-zA-Z:_]")
|
|
69
69
|
self.current_path: list[str] = []
|
|
70
70
|
self.current_path_has_children = False
|
|
@@ -218,9 +218,9 @@ class XMLOutputParser(BaseTransformOutputParser):
|
|
|
218
218
|
"See https://github.com/tiran/defusedxml for more details"
|
|
219
219
|
)
|
|
220
220
|
raise ImportError(msg) from e
|
|
221
|
-
|
|
221
|
+
et = ElementTree # Use the defusedxml parser
|
|
222
222
|
else:
|
|
223
|
-
|
|
223
|
+
et = ET # Use the standard library parser
|
|
224
224
|
|
|
225
225
|
match = re.search(r"```(xml)?(.*)```", text, re.DOTALL)
|
|
226
226
|
if match is not None:
|
|
@@ -232,9 +232,9 @@ class XMLOutputParser(BaseTransformOutputParser):
|
|
|
232
232
|
|
|
233
233
|
text = text.strip()
|
|
234
234
|
try:
|
|
235
|
-
root =
|
|
235
|
+
root = et.fromstring(text)
|
|
236
236
|
return self._root_to_dict(root)
|
|
237
|
-
except
|
|
237
|
+
except et.ParseError as e:
|
|
238
238
|
msg = f"Failed to parse XML format from completion {text}. Got: {e}"
|
|
239
239
|
raise OutputParserException(msg, llm_output=text) from e
|
|
240
240
|
|
|
@@ -1,24 +1,23 @@
|
|
|
1
1
|
"""Output classes.
|
|
2
2
|
|
|
3
|
-
|
|
4
|
-
and the output of a chat.
|
|
3
|
+
Used to represent the output of a language model call and the output of a chat.
|
|
5
4
|
|
|
6
|
-
The top container for information is the `LLMResult` object. `LLMResult` is used by
|
|
7
|
-
|
|
8
|
-
|
|
5
|
+
The top container for information is the `LLMResult` object. `LLMResult` is used by both
|
|
6
|
+
chat models and LLMs. This object contains the output of the language model and any
|
|
7
|
+
additional information that the model provider wants to return.
|
|
9
8
|
|
|
10
9
|
When invoking models via the standard runnable methods (e.g. invoke, batch, etc.):
|
|
10
|
+
|
|
11
11
|
- Chat models will return `AIMessage` objects.
|
|
12
12
|
- LLMs will return regular text strings.
|
|
13
13
|
|
|
14
14
|
In addition, users can access the raw output of either LLMs or chat models via
|
|
15
|
-
callbacks. The on_chat_model_end and on_llm_end callbacks will return an
|
|
15
|
+
callbacks. The ``on_chat_model_end`` and ``on_llm_end`` callbacks will return an
|
|
16
16
|
LLMResult object containing the generated outputs and any additional information
|
|
17
17
|
returned by the model provider.
|
|
18
18
|
|
|
19
|
-
In general, if information is already available
|
|
20
|
-
|
|
21
|
-
from the `LLMResult` object.
|
|
19
|
+
In general, if information is already available in the AIMessage object, it is
|
|
20
|
+
recommended to access it from there rather than from the `LLMResult` object.
|
|
22
21
|
"""
|
|
23
22
|
|
|
24
23
|
from typing import TYPE_CHECKING
|
|
@@ -27,7 +27,11 @@ class ChatGeneration(Generation):
|
|
|
27
27
|
"""
|
|
28
28
|
|
|
29
29
|
text: str = ""
|
|
30
|
-
"""
|
|
30
|
+
"""The text contents of the output message.
|
|
31
|
+
|
|
32
|
+
.. warning::
|
|
33
|
+
SHOULD NOT BE SET DIRECTLY!
|
|
34
|
+
"""
|
|
31
35
|
message: BaseMessage
|
|
32
36
|
"""The message output by the chat model."""
|
|
33
37
|
# Override type to be ChatGeneration, ignore mypy error as this is intentional
|
|
@@ -60,8 +64,6 @@ class ChatGeneration(Generation):
|
|
|
60
64
|
if isinstance(block, dict) and "text" in block:
|
|
61
65
|
text = block["text"]
|
|
62
66
|
break
|
|
63
|
-
else:
|
|
64
|
-
pass
|
|
65
67
|
self.text = text
|
|
66
68
|
return self
|
|
67
69
|
|
|
@@ -11,7 +11,8 @@ from langchain_core.utils._merge import merge_dicts
|
|
|
11
11
|
class Generation(Serializable):
|
|
12
12
|
"""A single text generation output.
|
|
13
13
|
|
|
14
|
-
Generation represents the response from an
|
|
14
|
+
Generation represents the response from an
|
|
15
|
+
`"old-fashioned" LLM <https://python.langchain.com/docs/concepts/text_llms/>__` that
|
|
15
16
|
generates regular text (not chat messages).
|
|
16
17
|
|
|
17
18
|
This model is used internally by chat model and will eventually
|
|
@@ -15,9 +15,9 @@ from langchain_core.outputs.run_info import RunInfo
|
|
|
15
15
|
class LLMResult(BaseModel):
|
|
16
16
|
"""A container for results of an LLM call.
|
|
17
17
|
|
|
18
|
-
Both chat models and LLMs generate an LLMResult object. This object contains
|
|
19
|
-
|
|
20
|
-
|
|
18
|
+
Both chat models and LLMs generate an LLMResult object. This object contains the
|
|
19
|
+
generated outputs and any additional information that the model provider wants to
|
|
20
|
+
return.
|
|
21
21
|
"""
|
|
22
22
|
|
|
23
23
|
generations: list[
|
|
@@ -25,17 +25,16 @@ class LLMResult(BaseModel):
|
|
|
25
25
|
]
|
|
26
26
|
"""Generated outputs.
|
|
27
27
|
|
|
28
|
-
The first dimension of the list represents completions for different input
|
|
29
|
-
prompts.
|
|
28
|
+
The first dimension of the list represents completions for different input prompts.
|
|
30
29
|
|
|
31
|
-
The second dimension of the list represents different candidate generations
|
|
32
|
-
|
|
30
|
+
The second dimension of the list represents different candidate generations for a
|
|
31
|
+
given prompt.
|
|
33
32
|
|
|
34
|
-
When returned from an LLM the type is list[list[Generation]]
|
|
35
|
-
When returned from a chat model the type is list[list[ChatGeneration]]
|
|
33
|
+
- When returned from **an LLM**, the type is ``list[list[Generation]]``.
|
|
34
|
+
- When returned from a **chat model**, the type is ``list[list[ChatGeneration]]``.
|
|
36
35
|
|
|
37
|
-
ChatGeneration is a subclass of Generation that has a field for a structured
|
|
38
|
-
|
|
36
|
+
ChatGeneration is a subclass of Generation that has a field for a structured chat
|
|
37
|
+
message.
|
|
39
38
|
"""
|
|
40
39
|
llm_output: Optional[dict] = None
|
|
41
40
|
"""For arbitrary LLM provider specific output.
|
|
@@ -43,9 +42,8 @@ class LLMResult(BaseModel):
|
|
|
43
42
|
This dictionary is a free-form dictionary that can contain any information that the
|
|
44
43
|
provider wants to return. It is not standardized and is provider-specific.
|
|
45
44
|
|
|
46
|
-
Users should generally avoid relying on this field and instead rely on
|
|
47
|
-
|
|
48
|
-
AIMessage.
|
|
45
|
+
Users should generally avoid relying on this field and instead rely on accessing
|
|
46
|
+
relevant information from standardized fields present in AIMessage.
|
|
49
47
|
"""
|
|
50
48
|
run: Optional[list[RunInfo]] = None
|
|
51
49
|
"""List of metadata info for model call for each input."""
|
|
@@ -97,3 +95,5 @@ class LLMResult(BaseModel):
|
|
|
97
95
|
self.generations == other.generations
|
|
98
96
|
and self.llm_output == other.llm_output
|
|
99
97
|
)
|
|
98
|
+
|
|
99
|
+
__hash__ = None # type: ignore[assignment]
|
langchain_core/prompts/base.py
CHANGED
|
@@ -43,7 +43,7 @@ FormatOutputType = TypeVar("FormatOutputType")
|
|
|
43
43
|
|
|
44
44
|
|
|
45
45
|
class BasePromptTemplate(
|
|
46
|
-
RunnableSerializable[dict, PromptValue], Generic[FormatOutputType]
|
|
46
|
+
RunnableSerializable[dict, PromptValue], ABC, Generic[FormatOutputType]
|
|
47
47
|
):
|
|
48
48
|
"""Base class for all prompt templates, returning a prompt."""
|
|
49
49
|
|
|
@@ -186,14 +186,14 @@ class BasePromptTemplate(
|
|
|
186
186
|
return inner_input
|
|
187
187
|
|
|
188
188
|
def _format_prompt_with_error_handling(self, inner_input: dict) -> PromptValue:
|
|
189
|
-
|
|
190
|
-
return self.format_prompt(**
|
|
189
|
+
inner_input_ = self._validate_input(inner_input)
|
|
190
|
+
return self.format_prompt(**inner_input_)
|
|
191
191
|
|
|
192
192
|
async def _aformat_prompt_with_error_handling(
|
|
193
193
|
self, inner_input: dict
|
|
194
194
|
) -> PromptValue:
|
|
195
|
-
|
|
196
|
-
return await self.aformat_prompt(**
|
|
195
|
+
inner_input_ = self._validate_input(inner_input)
|
|
196
|
+
return await self.aformat_prompt(**inner_input_)
|
|
197
197
|
|
|
198
198
|
@override
|
|
199
199
|
def invoke(
|
langchain_core/prompts/chat.py
CHANGED
|
@@ -35,7 +35,7 @@ from langchain_core.messages import (
|
|
|
35
35
|
convert_to_messages,
|
|
36
36
|
)
|
|
37
37
|
from langchain_core.messages.base import get_msg_title_repr
|
|
38
|
-
from langchain_core.prompt_values import ChatPromptValue, ImageURL
|
|
38
|
+
from langchain_core.prompt_values import ChatPromptValue, ImageURL
|
|
39
39
|
from langchain_core.prompts.base import BasePromptTemplate
|
|
40
40
|
from langchain_core.prompts.dict import DictPromptTemplate
|
|
41
41
|
from langchain_core.prompts.image import ImagePromptTemplate
|
|
@@ -545,6 +545,7 @@ class _StringImageMessagePromptTemplate(BaseMessagePromptTemplate):
|
|
|
545
545
|
A new instance of this class.
|
|
546
546
|
"""
|
|
547
547
|
template = Path(template_file).read_text()
|
|
548
|
+
# TODO: .read_text(encoding="utf-8") for v0.4
|
|
548
549
|
return cls.from_template(template, input_variables=input_variables, **kwargs)
|
|
549
550
|
|
|
550
551
|
def format_messages(self, **kwargs: Any) -> list[BaseMessage]:
|
|
@@ -714,20 +715,20 @@ class BaseChatPromptTemplate(BasePromptTemplate, ABC):
|
|
|
714
715
|
"""
|
|
715
716
|
return (await self.aformat_prompt(**kwargs)).to_string()
|
|
716
717
|
|
|
717
|
-
def format_prompt(self, **kwargs: Any) ->
|
|
718
|
-
"""Format prompt. Should return a
|
|
718
|
+
def format_prompt(self, **kwargs: Any) -> ChatPromptValue:
|
|
719
|
+
"""Format prompt. Should return a ChatPromptValue.
|
|
719
720
|
|
|
720
721
|
Args:
|
|
721
722
|
**kwargs: Keyword arguments to use for formatting.
|
|
722
723
|
|
|
723
724
|
Returns:
|
|
724
|
-
|
|
725
|
+
ChatPromptValue.
|
|
725
726
|
"""
|
|
726
727
|
messages = self.format_messages(**kwargs)
|
|
727
728
|
return ChatPromptValue(messages=messages)
|
|
728
729
|
|
|
729
|
-
async def aformat_prompt(self, **kwargs: Any) ->
|
|
730
|
-
"""Async format prompt. Should return a
|
|
730
|
+
async def aformat_prompt(self, **kwargs: Any) -> ChatPromptValue:
|
|
731
|
+
"""Async format prompt. Should return a ChatPromptValue.
|
|
731
732
|
|
|
732
733
|
Args:
|
|
733
734
|
**kwargs: Keyword arguments to use for formatting.
|
|
@@ -946,7 +947,7 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
|
|
|
946
947
|
])
|
|
947
948
|
|
|
948
949
|
"""
|
|
949
|
-
|
|
950
|
+
messages_ = [
|
|
950
951
|
_convert_to_message_template(message, template_format)
|
|
951
952
|
for message in messages
|
|
952
953
|
]
|
|
@@ -955,7 +956,7 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
|
|
|
955
956
|
input_vars: set[str] = set()
|
|
956
957
|
optional_variables: set[str] = set()
|
|
957
958
|
partial_vars: dict[str, Any] = {}
|
|
958
|
-
for _message in
|
|
959
|
+
for _message in messages_:
|
|
959
960
|
if isinstance(_message, MessagesPlaceholder) and _message.optional:
|
|
960
961
|
partial_vars[_message.variable_name] = []
|
|
961
962
|
optional_variables.add(_message.variable_name)
|
|
@@ -970,7 +971,7 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
|
|
|
970
971
|
"partial_variables": partial_vars,
|
|
971
972
|
**kwargs,
|
|
972
973
|
}
|
|
973
|
-
cast("type[ChatPromptTemplate]", super()).__init__(messages=
|
|
974
|
+
cast("type[ChatPromptTemplate]", super()).__init__(messages=messages_, **kwargs)
|
|
974
975
|
|
|
975
976
|
@classmethod
|
|
976
977
|
def get_lc_namespace(cls) -> list[str]:
|
|
@@ -1005,8 +1006,8 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
|
|
|
1005
1006
|
**partials
|
|
1006
1007
|
)
|
|
1007
1008
|
if isinstance(other, (list, tuple)):
|
|
1008
|
-
|
|
1009
|
-
return ChatPromptTemplate(messages=self.messages +
|
|
1009
|
+
other_ = ChatPromptTemplate.from_messages(other)
|
|
1010
|
+
return ChatPromptTemplate(messages=self.messages + other_.messages).partial(
|
|
1010
1011
|
**partials
|
|
1011
1012
|
)
|
|
1012
1013
|
if isinstance(other, str):
|
|
@@ -1053,9 +1054,9 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
|
|
|
1053
1054
|
if message.variable_name not in input_types:
|
|
1054
1055
|
input_types[message.variable_name] = list[AnyMessage]
|
|
1055
1056
|
if "partial_variables" in values:
|
|
1056
|
-
input_vars
|
|
1057
|
+
input_vars -= set(values["partial_variables"])
|
|
1057
1058
|
if optional_variables:
|
|
1058
|
-
input_vars
|
|
1059
|
+
input_vars -= optional_variables
|
|
1059
1060
|
if "input_variables" in values and values.get("validate_template"):
|
|
1060
1061
|
if input_vars != set(values["input_variables"]):
|
|
1061
1062
|
msg = (
|
|
@@ -1338,11 +1339,11 @@ def _create_template_from_message_type(
|
|
|
1338
1339
|
Raises:
|
|
1339
1340
|
ValueError: If unexpected message type.
|
|
1340
1341
|
"""
|
|
1341
|
-
if message_type in
|
|
1342
|
+
if message_type in {"human", "user"}:
|
|
1342
1343
|
message: BaseMessagePromptTemplate = HumanMessagePromptTemplate.from_template(
|
|
1343
1344
|
template, template_format=template_format
|
|
1344
1345
|
)
|
|
1345
|
-
elif message_type in
|
|
1346
|
+
elif message_type in {"ai", "assistant"}:
|
|
1346
1347
|
message = AIMessagePromptTemplate.from_template(
|
|
1347
1348
|
cast("str", template), template_format=template_format
|
|
1348
1349
|
)
|
|
@@ -1417,13 +1418,13 @@ def _convert_to_message_template(
|
|
|
1417
1418
|
ValueError: If 2-tuple does not have 2 elements.
|
|
1418
1419
|
"""
|
|
1419
1420
|
if isinstance(message, (BaseMessagePromptTemplate, BaseChatPromptTemplate)):
|
|
1420
|
-
|
|
1421
|
+
message_: Union[
|
|
1421
1422
|
BaseMessage, BaseMessagePromptTemplate, BaseChatPromptTemplate
|
|
1422
1423
|
] = message
|
|
1423
1424
|
elif isinstance(message, BaseMessage):
|
|
1424
|
-
|
|
1425
|
+
message_ = message
|
|
1425
1426
|
elif isinstance(message, str):
|
|
1426
|
-
|
|
1427
|
+
message_ = _create_template_from_message_type(
|
|
1427
1428
|
"human", message, template_format=template_format
|
|
1428
1429
|
)
|
|
1429
1430
|
elif isinstance(message, (tuple, dict)):
|
|
@@ -1440,11 +1441,11 @@ def _convert_to_message_template(
|
|
|
1440
1441
|
raise ValueError(msg)
|
|
1441
1442
|
message_type_str, template = message
|
|
1442
1443
|
if isinstance(message_type_str, str):
|
|
1443
|
-
|
|
1444
|
+
message_ = _create_template_from_message_type(
|
|
1444
1445
|
message_type_str, template, template_format=template_format
|
|
1445
1446
|
)
|
|
1446
1447
|
else:
|
|
1447
|
-
|
|
1448
|
+
message_ = message_type_str(
|
|
1448
1449
|
prompt=PromptTemplate.from_template(
|
|
1449
1450
|
cast("str", template), template_format=template_format
|
|
1450
1451
|
)
|
|
@@ -1453,7 +1454,7 @@ def _convert_to_message_template(
|
|
|
1453
1454
|
msg = f"Unsupported message type: {type(message)}"
|
|
1454
1455
|
raise NotImplementedError(msg)
|
|
1455
1456
|
|
|
1456
|
-
return
|
|
1457
|
+
return message_
|
|
1457
1458
|
|
|
1458
1459
|
|
|
1459
1460
|
# For backwards compat:
|
langchain_core/prompts/dict.py
CHANGED