langchain-core 1.0.3__py3-none-any.whl → 1.0.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- langchain_core/agents.py +36 -27
- langchain_core/callbacks/manager.py +18 -1
- langchain_core/callbacks/usage.py +2 -2
- langchain_core/documents/base.py +6 -6
- langchain_core/example_selectors/length_based.py +1 -1
- langchain_core/indexing/api.py +6 -6
- langchain_core/language_models/_utils.py +1 -1
- langchain_core/language_models/base.py +37 -18
- langchain_core/language_models/chat_models.py +44 -28
- langchain_core/language_models/llms.py +66 -36
- langchain_core/messages/ai.py +3 -3
- langchain_core/messages/base.py +1 -1
- langchain_core/messages/content.py +2 -2
- langchain_core/messages/utils.py +12 -8
- langchain_core/output_parsers/openai_tools.py +14 -2
- langchain_core/outputs/generation.py +6 -5
- langchain_core/prompt_values.py +2 -2
- langchain_core/prompts/base.py +47 -44
- langchain_core/prompts/chat.py +35 -28
- langchain_core/prompts/dict.py +1 -1
- langchain_core/prompts/message.py +4 -4
- langchain_core/runnables/base.py +97 -52
- langchain_core/runnables/branch.py +22 -20
- langchain_core/runnables/configurable.py +30 -29
- langchain_core/runnables/fallbacks.py +22 -20
- langchain_core/runnables/graph_mermaid.py +4 -1
- langchain_core/runnables/graph_png.py +28 -0
- langchain_core/runnables/history.py +43 -32
- langchain_core/runnables/passthrough.py +35 -25
- langchain_core/runnables/router.py +5 -5
- langchain_core/runnables/schema.py +1 -1
- langchain_core/sys_info.py +4 -2
- langchain_core/tools/base.py +22 -16
- langchain_core/utils/function_calling.py +9 -6
- langchain_core/utils/input.py +3 -0
- langchain_core/utils/pydantic.py +2 -2
- langchain_core/version.py +1 -1
- {langchain_core-1.0.3.dist-info → langchain_core-1.0.4.dist-info}/METADATA +1 -1
- {langchain_core-1.0.3.dist-info → langchain_core-1.0.4.dist-info}/RECORD +40 -40
- {langchain_core-1.0.3.dist-info → langchain_core-1.0.4.dist-info}/WHEEL +0 -0
langchain_core/agents.py
CHANGED
|
@@ -52,31 +52,33 @@ class AgentAction(Serializable):
|
|
|
52
52
|
"""The input to pass in to the Tool."""
|
|
53
53
|
log: str
|
|
54
54
|
"""Additional information to log about the action.
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
55
|
+
|
|
56
|
+
This log can be used in a few ways. First, it can be used to audit what exactly the
|
|
57
|
+
LLM predicted to lead to this `(tool, tool_input)`.
|
|
58
|
+
|
|
59
|
+
Second, it can be used in future iterations to show the LLMs prior thoughts. This is
|
|
60
|
+
useful when `(tool, tool_input)` does not contain full information about the LLM
|
|
61
|
+
prediction (for example, any `thought` before the tool/tool_input).
|
|
62
|
+
"""
|
|
61
63
|
type: Literal["AgentAction"] = "AgentAction"
|
|
62
64
|
|
|
63
65
|
# Override init to support instantiation by position for backward compat.
|
|
64
66
|
def __init__(self, tool: str, tool_input: str | dict, log: str, **kwargs: Any):
|
|
65
|
-
"""Create an AgentAction
|
|
67
|
+
"""Create an `AgentAction`.
|
|
66
68
|
|
|
67
69
|
Args:
|
|
68
70
|
tool: The name of the tool to execute.
|
|
69
|
-
tool_input: The input to pass in to the Tool
|
|
71
|
+
tool_input: The input to pass in to the `Tool`.
|
|
70
72
|
log: Additional information to log about the action.
|
|
71
73
|
"""
|
|
72
74
|
super().__init__(tool=tool, tool_input=tool_input, log=log, **kwargs)
|
|
73
75
|
|
|
74
76
|
@classmethod
|
|
75
77
|
def is_lc_serializable(cls) -> bool:
|
|
76
|
-
"""AgentAction is serializable.
|
|
78
|
+
"""`AgentAction` is serializable.
|
|
77
79
|
|
|
78
80
|
Returns:
|
|
79
|
-
True
|
|
81
|
+
`True`
|
|
80
82
|
"""
|
|
81
83
|
return True
|
|
82
84
|
|
|
@@ -98,19 +100,23 @@ class AgentAction(Serializable):
|
|
|
98
100
|
class AgentActionMessageLog(AgentAction):
|
|
99
101
|
"""Representation of an action to be executed by an agent.
|
|
100
102
|
|
|
101
|
-
This is similar to AgentAction
|
|
102
|
-
chat messages.
|
|
103
|
-
|
|
103
|
+
This is similar to `AgentAction`, but includes a message log consisting of
|
|
104
|
+
chat messages.
|
|
105
|
+
|
|
106
|
+
This is useful when working with `ChatModels`, and is used to reconstruct
|
|
107
|
+
conversation history from the agent's perspective.
|
|
104
108
|
"""
|
|
105
109
|
|
|
106
110
|
message_log: Sequence[BaseMessage]
|
|
107
|
-
"""Similar to log, this can be used to pass along extra
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
if (tool, tool_input) cannot be used to fully recreate the
|
|
111
|
-
prediction, and you need that LLM prediction (for future agent iteration).
|
|
111
|
+
"""Similar to log, this can be used to pass along extra information about what exact
|
|
112
|
+
messages were predicted by the LLM before parsing out the `(tool, tool_input)`.
|
|
113
|
+
|
|
114
|
+
This is again useful if `(tool, tool_input)` cannot be used to fully recreate the
|
|
115
|
+
LLM prediction, and you need that LLM prediction (for future agent iteration).
|
|
116
|
+
|
|
112
117
|
Compared to `log`, this is useful when the underlying LLM is a
|
|
113
|
-
chat model (and therefore returns messages rather than a string).
|
|
118
|
+
chat model (and therefore returns messages rather than a string).
|
|
119
|
+
"""
|
|
114
120
|
# Ignoring type because we're overriding the type from AgentAction.
|
|
115
121
|
# And this is the correct thing to do in this case.
|
|
116
122
|
# The type literal is used for serialization purposes.
|
|
@@ -132,19 +138,22 @@ class AgentStep(Serializable):
|
|
|
132
138
|
|
|
133
139
|
|
|
134
140
|
class AgentFinish(Serializable):
|
|
135
|
-
"""Final return value of an ActionAgent
|
|
141
|
+
"""Final return value of an `ActionAgent`.
|
|
136
142
|
|
|
137
|
-
Agents return an AgentFinish when they have reached a stopping condition.
|
|
143
|
+
Agents return an `AgentFinish` when they have reached a stopping condition.
|
|
138
144
|
"""
|
|
139
145
|
|
|
140
146
|
return_values: dict
|
|
141
147
|
"""Dictionary of return values."""
|
|
142
148
|
log: str
|
|
143
149
|
"""Additional information to log about the return value.
|
|
150
|
+
|
|
144
151
|
This is used to pass along the full LLM prediction, not just the parsed out
|
|
145
|
-
return value.
|
|
146
|
-
|
|
147
|
-
|
|
152
|
+
return value.
|
|
153
|
+
|
|
154
|
+
For example, if the full LLM prediction was `Final Answer: 2` you may want to just
|
|
155
|
+
return `2` as a return value, but pass along the full string as a `log` (for
|
|
156
|
+
debugging or observability purposes).
|
|
148
157
|
"""
|
|
149
158
|
type: Literal["AgentFinish"] = "AgentFinish"
|
|
150
159
|
|
|
@@ -154,7 +163,7 @@ class AgentFinish(Serializable):
|
|
|
154
163
|
|
|
155
164
|
@classmethod
|
|
156
165
|
def is_lc_serializable(cls) -> bool:
|
|
157
|
-
"""Return True as this class is serializable."""
|
|
166
|
+
"""Return `True` as this class is serializable."""
|
|
158
167
|
return True
|
|
159
168
|
|
|
160
169
|
@classmethod
|
|
@@ -202,7 +211,7 @@ def _convert_agent_observation_to_messages(
|
|
|
202
211
|
observation: Observation to convert to a message.
|
|
203
212
|
|
|
204
213
|
Returns:
|
|
205
|
-
AIMessage that corresponds to the original tool invocation.
|
|
214
|
+
`AIMessage` that corresponds to the original tool invocation.
|
|
206
215
|
"""
|
|
207
216
|
if isinstance(agent_action, AgentActionMessageLog):
|
|
208
217
|
return [_create_function_message(agent_action, observation)]
|
|
@@ -225,7 +234,7 @@ def _create_function_message(
|
|
|
225
234
|
observation: the result of the tool invocation.
|
|
226
235
|
|
|
227
236
|
Returns:
|
|
228
|
-
FunctionMessage that corresponds to the original tool invocation.
|
|
237
|
+
`FunctionMessage` that corresponds to the original tool invocation.
|
|
229
238
|
"""
|
|
230
239
|
if not isinstance(observation, str):
|
|
231
240
|
try:
|
|
@@ -229,7 +229,24 @@ def shielded(func: Func) -> Func:
|
|
|
229
229
|
|
|
230
230
|
@functools.wraps(func)
|
|
231
231
|
async def wrapped(*args: Any, **kwargs: Any) -> Any:
|
|
232
|
-
|
|
232
|
+
# Capture the current context to preserve context variables
|
|
233
|
+
ctx = copy_context()
|
|
234
|
+
|
|
235
|
+
# Create the coroutine
|
|
236
|
+
coro = func(*args, **kwargs)
|
|
237
|
+
|
|
238
|
+
# For Python 3.11+, create task with explicit context
|
|
239
|
+
# For older versions, fallback to original behavior
|
|
240
|
+
try:
|
|
241
|
+
# Create a task with the captured context to preserve context variables
|
|
242
|
+
task = asyncio.create_task(coro, context=ctx) # type: ignore[call-arg, unused-ignore]
|
|
243
|
+
# `call-arg` used to not fail 3.9 or 3.10 tests
|
|
244
|
+
return await asyncio.shield(task)
|
|
245
|
+
except TypeError:
|
|
246
|
+
# Python < 3.11 fallback - create task normally then shield
|
|
247
|
+
# This won't preserve context perfectly but is better than nothing
|
|
248
|
+
task = asyncio.create_task(coro)
|
|
249
|
+
return await asyncio.shield(task)
|
|
233
250
|
|
|
234
251
|
return cast("Func", wrapped)
|
|
235
252
|
|
|
@@ -43,7 +43,7 @@ class UsageMetadataCallbackHandler(BaseCallbackHandler):
|
|
|
43
43
|
'input_token_details': {'cache_read': 0, 'cache_creation': 0}}}
|
|
44
44
|
```
|
|
45
45
|
|
|
46
|
-
!!! version-added "Added in
|
|
46
|
+
!!! version-added "Added in `langchain-core` 0.3.49"
|
|
47
47
|
|
|
48
48
|
"""
|
|
49
49
|
|
|
@@ -134,7 +134,7 @@ def get_usage_metadata_callback(
|
|
|
134
134
|
}
|
|
135
135
|
```
|
|
136
136
|
|
|
137
|
-
!!! version-added "Added in
|
|
137
|
+
!!! version-added "Added in `langchain-core` 0.3.49"
|
|
138
138
|
|
|
139
139
|
"""
|
|
140
140
|
usage_metadata_callback_var: ContextVar[UsageMetadataCallbackHandler | None] = (
|
langchain_core/documents/base.py
CHANGED
|
@@ -114,11 +114,11 @@ class Blob(BaseMedia):
|
|
|
114
114
|
data: bytes | str | None = None
|
|
115
115
|
"""Raw data associated with the `Blob`."""
|
|
116
116
|
mimetype: str | None = None
|
|
117
|
-
"""
|
|
117
|
+
"""MIME type, not to be confused with a file extension."""
|
|
118
118
|
encoding: str = "utf-8"
|
|
119
119
|
"""Encoding to use if decoding the bytes into a string.
|
|
120
120
|
|
|
121
|
-
|
|
121
|
+
Uses `utf-8` as default encoding if decoding to string.
|
|
122
122
|
"""
|
|
123
123
|
path: PathLike | None = None
|
|
124
124
|
"""Location where the original content was found."""
|
|
@@ -134,7 +134,7 @@ class Blob(BaseMedia):
|
|
|
134
134
|
|
|
135
135
|
If a path is associated with the `Blob`, it will default to the path location.
|
|
136
136
|
|
|
137
|
-
Unless explicitly set via a metadata field called `
|
|
137
|
+
Unless explicitly set via a metadata field called `'source'`, in which
|
|
138
138
|
case that value will be used instead.
|
|
139
139
|
"""
|
|
140
140
|
if self.metadata and "source" in self.metadata:
|
|
@@ -309,7 +309,7 @@ class Document(BaseMedia):
|
|
|
309
309
|
|
|
310
310
|
@classmethod
|
|
311
311
|
def is_lc_serializable(cls) -> bool:
|
|
312
|
-
"""Return True as this class is serializable."""
|
|
312
|
+
"""Return `True` as this class is serializable."""
|
|
313
313
|
return True
|
|
314
314
|
|
|
315
315
|
@classmethod
|
|
@@ -322,10 +322,10 @@ class Document(BaseMedia):
|
|
|
322
322
|
return ["langchain", "schema", "document"]
|
|
323
323
|
|
|
324
324
|
def __str__(self) -> str:
|
|
325
|
-
"""Override __str__ to restrict it to page_content and metadata.
|
|
325
|
+
"""Override `__str__` to restrict it to page_content and metadata.
|
|
326
326
|
|
|
327
327
|
Returns:
|
|
328
|
-
A string representation of the Document
|
|
328
|
+
A string representation of the `Document`.
|
|
329
329
|
"""
|
|
330
330
|
# The format matches pydantic format for __str__.
|
|
331
331
|
#
|
|
@@ -29,7 +29,7 @@ class LengthBasedExampleSelector(BaseExampleSelector, BaseModel):
|
|
|
29
29
|
max_length: int = 2048
|
|
30
30
|
"""Max length for the prompt, beyond which examples are cut."""
|
|
31
31
|
|
|
32
|
-
example_text_lengths: list[int] = Field(default_factory=list)
|
|
32
|
+
example_text_lengths: list[int] = Field(default_factory=list)
|
|
33
33
|
"""Length of each example."""
|
|
34
34
|
|
|
35
35
|
def add_example(self, example: dict[str, str]) -> None:
|
langchain_core/indexing/api.py
CHANGED
|
@@ -298,7 +298,7 @@ def index(
|
|
|
298
298
|
For the time being, documents are indexed using their hashes, and users
|
|
299
299
|
are not able to specify the uid of the document.
|
|
300
300
|
|
|
301
|
-
!!! warning "Behavior changed in 0.3.25"
|
|
301
|
+
!!! warning "Behavior changed in `langchain-core` 0.3.25"
|
|
302
302
|
Added `scoped_full` cleanup mode.
|
|
303
303
|
|
|
304
304
|
!!! warning
|
|
@@ -349,7 +349,7 @@ def index(
|
|
|
349
349
|
key_encoder: Hashing algorithm to use for hashing the document content and
|
|
350
350
|
metadata. Options include "blake2b", "sha256", and "sha512".
|
|
351
351
|
|
|
352
|
-
!!! version-added "Added in
|
|
352
|
+
!!! version-added "Added in `langchain-core` 0.3.66"
|
|
353
353
|
|
|
354
354
|
key_encoder: Hashing algorithm to use for hashing the document.
|
|
355
355
|
If not provided, a default encoder using SHA-1 will be used.
|
|
@@ -366,7 +366,7 @@ def index(
|
|
|
366
366
|
method of the `VectorStore` or the upsert method of the DocumentIndex.
|
|
367
367
|
For example, you can use this to specify a custom vector_field:
|
|
368
368
|
upsert_kwargs={"vector_field": "embedding"}
|
|
369
|
-
!!! version-added "Added in
|
|
369
|
+
!!! version-added "Added in `langchain-core` 0.3.10"
|
|
370
370
|
|
|
371
371
|
Returns:
|
|
372
372
|
Indexing result which contains information about how many documents
|
|
@@ -636,7 +636,7 @@ async def aindex(
|
|
|
636
636
|
For the time being, documents are indexed using their hashes, and users
|
|
637
637
|
are not able to specify the uid of the document.
|
|
638
638
|
|
|
639
|
-
!!! warning "Behavior changed in 0.3.25"
|
|
639
|
+
!!! warning "Behavior changed in `langchain-core` 0.3.25"
|
|
640
640
|
Added `scoped_full` cleanup mode.
|
|
641
641
|
|
|
642
642
|
!!! warning
|
|
@@ -687,7 +687,7 @@ async def aindex(
|
|
|
687
687
|
key_encoder: Hashing algorithm to use for hashing the document content and
|
|
688
688
|
metadata. Options include "blake2b", "sha256", and "sha512".
|
|
689
689
|
|
|
690
|
-
!!! version-added "Added in
|
|
690
|
+
!!! version-added "Added in `langchain-core` 0.3.66"
|
|
691
691
|
|
|
692
692
|
key_encoder: Hashing algorithm to use for hashing the document.
|
|
693
693
|
If not provided, a default encoder using SHA-1 will be used.
|
|
@@ -704,7 +704,7 @@ async def aindex(
|
|
|
704
704
|
method of the `VectorStore` or the upsert method of the DocumentIndex.
|
|
705
705
|
For example, you can use this to specify a custom vector_field:
|
|
706
706
|
upsert_kwargs={"vector_field": "embedding"}
|
|
707
|
-
!!! version-added "Added in
|
|
707
|
+
!!! version-added "Added in `langchain-core` 0.3.10"
|
|
708
708
|
|
|
709
709
|
Returns:
|
|
710
710
|
Indexing result which contains information about how many documents
|
|
@@ -139,7 +139,7 @@ def _normalize_messages(
|
|
|
139
139
|
directly; this may change in the future
|
|
140
140
|
- LangChain v0 standard content blocks for backward compatibility
|
|
141
141
|
|
|
142
|
-
!!! warning "Behavior changed in 1.0.0"
|
|
142
|
+
!!! warning "Behavior changed in `langchain-core` 1.0.0"
|
|
143
143
|
In previous versions, this function returned messages in LangChain v0 format.
|
|
144
144
|
Now, it returns messages in LangChain v1 format, which upgraded chat models now
|
|
145
145
|
expect to receive when passing back in message history. For backward
|
|
@@ -131,14 +131,19 @@ class BaseLanguageModel(
|
|
|
131
131
|
|
|
132
132
|
Caching is not currently supported for streaming methods of models.
|
|
133
133
|
"""
|
|
134
|
+
|
|
134
135
|
verbose: bool = Field(default_factory=_get_verbosity, exclude=True, repr=False)
|
|
135
136
|
"""Whether to print out response text."""
|
|
137
|
+
|
|
136
138
|
callbacks: Callbacks = Field(default=None, exclude=True)
|
|
137
139
|
"""Callbacks to add to the run trace."""
|
|
140
|
+
|
|
138
141
|
tags: list[str] | None = Field(default=None, exclude=True)
|
|
139
142
|
"""Tags to add to the run trace."""
|
|
143
|
+
|
|
140
144
|
metadata: dict[str, Any] | None = Field(default=None, exclude=True)
|
|
141
145
|
"""Metadata to add to the run trace."""
|
|
146
|
+
|
|
142
147
|
custom_get_token_ids: Callable[[str], list[int]] | None = Field(
|
|
143
148
|
default=None, exclude=True
|
|
144
149
|
)
|
|
@@ -195,15 +200,22 @@ class BaseLanguageModel(
|
|
|
195
200
|
type (e.g., pure text completion models vs chat models).
|
|
196
201
|
|
|
197
202
|
Args:
|
|
198
|
-
prompts: List of `PromptValue` objects.
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
203
|
+
prompts: List of `PromptValue` objects.
|
|
204
|
+
|
|
205
|
+
A `PromptValue` is an object that can be converted to match the format
|
|
206
|
+
of any language model (string for pure text generation models and
|
|
207
|
+
`BaseMessage` objects for chat models).
|
|
208
|
+
stop: Stop words to use when generating.
|
|
209
|
+
|
|
210
|
+
Model output is cut off at the first occurrence of any of these
|
|
211
|
+
substrings.
|
|
212
|
+
callbacks: `Callbacks` to pass through.
|
|
213
|
+
|
|
214
|
+
Used for executing additional functionality, such as logging or
|
|
215
|
+
streaming, throughout generation.
|
|
216
|
+
**kwargs: Arbitrary additional keyword arguments.
|
|
217
|
+
|
|
218
|
+
These are usually passed to the model provider API call.
|
|
207
219
|
|
|
208
220
|
Returns:
|
|
209
221
|
An `LLMResult`, which contains a list of candidate `Generation` objects for
|
|
@@ -232,15 +244,22 @@ class BaseLanguageModel(
|
|
|
232
244
|
type (e.g., pure text completion models vs chat models).
|
|
233
245
|
|
|
234
246
|
Args:
|
|
235
|
-
prompts: List of `PromptValue` objects.
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
247
|
+
prompts: List of `PromptValue` objects.
|
|
248
|
+
|
|
249
|
+
A `PromptValue` is an object that can be converted to match the format
|
|
250
|
+
of any language model (string for pure text generation models and
|
|
251
|
+
`BaseMessage` objects for chat models).
|
|
252
|
+
stop: Stop words to use when generating.
|
|
253
|
+
|
|
254
|
+
Model output is cut off at the first occurrence of any of these
|
|
255
|
+
substrings.
|
|
256
|
+
callbacks: `Callbacks` to pass through.
|
|
257
|
+
|
|
258
|
+
Used for executing additional functionality, such as logging or
|
|
259
|
+
streaming, throughout generation.
|
|
260
|
+
**kwargs: Arbitrary additional keyword arguments.
|
|
261
|
+
|
|
262
|
+
These are usually passed to the model provider API call.
|
|
244
263
|
|
|
245
264
|
Returns:
|
|
246
265
|
An `LLMResult`, which contains a list of candidate `Generation` objects for
|
|
@@ -332,7 +332,7 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
|
|
332
332
|
[`langchain-openai`](https://pypi.org/project/langchain-openai)) can also use this
|
|
333
333
|
field to roll out new content formats in a backward-compatible way.
|
|
334
334
|
|
|
335
|
-
!!! version-added "Added in
|
|
335
|
+
!!! version-added "Added in `langchain-core` 1.0"
|
|
336
336
|
|
|
337
337
|
"""
|
|
338
338
|
|
|
@@ -845,16 +845,21 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
|
|
845
845
|
|
|
846
846
|
Args:
|
|
847
847
|
messages: List of list of messages.
|
|
848
|
-
stop: Stop words to use when generating.
|
|
849
|
-
|
|
850
|
-
|
|
851
|
-
|
|
848
|
+
stop: Stop words to use when generating.
|
|
849
|
+
|
|
850
|
+
Model output is cut off at the first occurrence of any of these
|
|
851
|
+
substrings.
|
|
852
|
+
callbacks: `Callbacks` to pass through.
|
|
853
|
+
|
|
854
|
+
Used for executing additional functionality, such as logging or
|
|
855
|
+
streaming, throughout generation.
|
|
852
856
|
tags: The tags to apply.
|
|
853
857
|
metadata: The metadata to apply.
|
|
854
858
|
run_name: The name of the run.
|
|
855
859
|
run_id: The ID of the run.
|
|
856
|
-
**kwargs: Arbitrary additional keyword arguments.
|
|
857
|
-
|
|
860
|
+
**kwargs: Arbitrary additional keyword arguments.
|
|
861
|
+
|
|
862
|
+
These are usually passed to the model provider API call.
|
|
858
863
|
|
|
859
864
|
Returns:
|
|
860
865
|
An `LLMResult`, which contains a list of candidate `Generations` for each
|
|
@@ -963,16 +968,21 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
|
|
963
968
|
|
|
964
969
|
Args:
|
|
965
970
|
messages: List of list of messages.
|
|
966
|
-
stop: Stop words to use when generating.
|
|
967
|
-
|
|
968
|
-
|
|
969
|
-
|
|
971
|
+
stop: Stop words to use when generating.
|
|
972
|
+
|
|
973
|
+
Model output is cut off at the first occurrence of any of these
|
|
974
|
+
substrings.
|
|
975
|
+
callbacks: `Callbacks` to pass through.
|
|
976
|
+
|
|
977
|
+
Used for executing additional functionality, such as logging or
|
|
978
|
+
streaming, throughout generation.
|
|
970
979
|
tags: The tags to apply.
|
|
971
980
|
metadata: The metadata to apply.
|
|
972
981
|
run_name: The name of the run.
|
|
973
982
|
run_id: The ID of the run.
|
|
974
|
-
**kwargs: Arbitrary additional keyword arguments.
|
|
975
|
-
|
|
983
|
+
**kwargs: Arbitrary additional keyword arguments.
|
|
984
|
+
|
|
985
|
+
These are usually passed to the model provider API call.
|
|
976
986
|
|
|
977
987
|
Returns:
|
|
978
988
|
An `LLMResult`, which contains a list of candidate `Generations` for each
|
|
@@ -1505,10 +1515,10 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
|
|
1505
1515
|
Args:
|
|
1506
1516
|
schema: The output schema. Can be passed in as:
|
|
1507
1517
|
|
|
1508
|
-
-
|
|
1509
|
-
-
|
|
1510
|
-
-
|
|
1511
|
-
-
|
|
1518
|
+
- An OpenAI function/tool schema,
|
|
1519
|
+
- A JSON Schema,
|
|
1520
|
+
- A `TypedDict` class,
|
|
1521
|
+
- Or a Pydantic class.
|
|
1512
1522
|
|
|
1513
1523
|
If `schema` is a Pydantic class then the model output will be a
|
|
1514
1524
|
Pydantic instance of that class, and the model-generated fields will be
|
|
@@ -1520,11 +1530,15 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
|
|
1520
1530
|
when specifying a Pydantic or `TypedDict` class.
|
|
1521
1531
|
|
|
1522
1532
|
include_raw:
|
|
1523
|
-
If `False` then only the parsed structured output is returned.
|
|
1524
|
-
|
|
1525
|
-
|
|
1526
|
-
|
|
1527
|
-
|
|
1533
|
+
If `False` then only the parsed structured output is returned.
|
|
1534
|
+
|
|
1535
|
+
If an error occurs during model output parsing it will be raised.
|
|
1536
|
+
|
|
1537
|
+
If `True` then both the raw model response (a `BaseMessage`) and the
|
|
1538
|
+
parsed model response will be returned.
|
|
1539
|
+
|
|
1540
|
+
If an error occurs during output parsing it will be caught and returned
|
|
1541
|
+
as well.
|
|
1528
1542
|
|
|
1529
1543
|
The final output is always a `dict` with keys `'raw'`, `'parsed'`, and
|
|
1530
1544
|
`'parsing_error'`.
|
|
@@ -1629,8 +1643,8 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
|
|
1629
1643
|
# }
|
|
1630
1644
|
```
|
|
1631
1645
|
|
|
1632
|
-
!!! warning "Behavior changed in 0.2.26"
|
|
1633
|
-
Added support for TypedDict class.
|
|
1646
|
+
!!! warning "Behavior changed in `langchain-core` 0.2.26"
|
|
1647
|
+
Added support for `TypedDict` class.
|
|
1634
1648
|
|
|
1635
1649
|
""" # noqa: E501
|
|
1636
1650
|
_ = kwargs.pop("method", None)
|
|
@@ -1676,9 +1690,8 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
|
|
1676
1690
|
def profile(self) -> ModelProfile:
|
|
1677
1691
|
"""Return profiling information for the model.
|
|
1678
1692
|
|
|
1679
|
-
This property
|
|
1680
|
-
|
|
1681
|
-
features.
|
|
1693
|
+
This property relies on the `langchain-model-profiles` package to retrieve chat
|
|
1694
|
+
model capabilities, such as context window sizes and supported features.
|
|
1682
1695
|
|
|
1683
1696
|
Raises:
|
|
1684
1697
|
ImportError: If `langchain-model-profiles` is not installed.
|
|
@@ -1764,9 +1777,12 @@ def _gen_info_and_msg_metadata(
|
|
|
1764
1777
|
}
|
|
1765
1778
|
|
|
1766
1779
|
|
|
1780
|
+
_MAX_CLEANUP_DEPTH = 100
|
|
1781
|
+
|
|
1782
|
+
|
|
1767
1783
|
def _cleanup_llm_representation(serialized: Any, depth: int) -> None:
|
|
1768
1784
|
"""Remove non-serializable objects from a serialized object."""
|
|
1769
|
-
if depth >
|
|
1785
|
+
if depth > _MAX_CLEANUP_DEPTH: # Don't cooperate for pathological cases
|
|
1770
1786
|
return
|
|
1771
1787
|
|
|
1772
1788
|
if not isinstance(serialized, dict):
|