langchain-core 0.4.0.dev0__py3-none-any.whl → 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of langchain-core might be problematic. Click here for more details.
- langchain_core/__init__.py +1 -1
- langchain_core/_api/__init__.py +3 -4
- langchain_core/_api/beta_decorator.py +45 -70
- langchain_core/_api/deprecation.py +80 -80
- langchain_core/_api/path.py +22 -8
- langchain_core/_import_utils.py +10 -4
- langchain_core/agents.py +25 -21
- langchain_core/caches.py +53 -63
- langchain_core/callbacks/__init__.py +1 -8
- langchain_core/callbacks/base.py +341 -348
- langchain_core/callbacks/file.py +55 -44
- langchain_core/callbacks/manager.py +546 -683
- langchain_core/callbacks/stdout.py +29 -30
- langchain_core/callbacks/streaming_stdout.py +35 -36
- langchain_core/callbacks/usage.py +65 -70
- langchain_core/chat_history.py +48 -55
- langchain_core/document_loaders/base.py +46 -21
- langchain_core/document_loaders/langsmith.py +39 -36
- langchain_core/documents/__init__.py +0 -1
- langchain_core/documents/base.py +96 -74
- langchain_core/documents/compressor.py +12 -9
- langchain_core/documents/transformers.py +29 -28
- langchain_core/embeddings/fake.py +56 -57
- langchain_core/env.py +2 -3
- langchain_core/example_selectors/base.py +12 -0
- langchain_core/example_selectors/length_based.py +1 -1
- langchain_core/example_selectors/semantic_similarity.py +21 -25
- langchain_core/exceptions.py +15 -9
- langchain_core/globals.py +4 -163
- langchain_core/indexing/api.py +132 -125
- langchain_core/indexing/base.py +64 -67
- langchain_core/indexing/in_memory.py +26 -6
- langchain_core/language_models/__init__.py +15 -27
- langchain_core/language_models/_utils.py +267 -117
- langchain_core/language_models/base.py +92 -177
- langchain_core/language_models/chat_models.py +547 -407
- langchain_core/language_models/fake.py +11 -11
- langchain_core/language_models/fake_chat_models.py +72 -118
- langchain_core/language_models/llms.py +168 -242
- langchain_core/load/dump.py +8 -11
- langchain_core/load/load.py +32 -28
- langchain_core/load/mapping.py +2 -4
- langchain_core/load/serializable.py +50 -56
- langchain_core/messages/__init__.py +36 -51
- langchain_core/messages/ai.py +377 -150
- langchain_core/messages/base.py +239 -47
- langchain_core/messages/block_translators/__init__.py +111 -0
- langchain_core/messages/block_translators/anthropic.py +470 -0
- langchain_core/messages/block_translators/bedrock.py +94 -0
- langchain_core/messages/block_translators/bedrock_converse.py +297 -0
- langchain_core/messages/block_translators/google_genai.py +530 -0
- langchain_core/messages/block_translators/google_vertexai.py +21 -0
- langchain_core/messages/block_translators/groq.py +143 -0
- langchain_core/messages/block_translators/langchain_v0.py +301 -0
- langchain_core/messages/block_translators/openai.py +1010 -0
- langchain_core/messages/chat.py +2 -3
- langchain_core/messages/content.py +1423 -0
- langchain_core/messages/function.py +7 -7
- langchain_core/messages/human.py +44 -38
- langchain_core/messages/modifier.py +3 -2
- langchain_core/messages/system.py +40 -27
- langchain_core/messages/tool.py +160 -58
- langchain_core/messages/utils.py +527 -638
- langchain_core/output_parsers/__init__.py +1 -14
- langchain_core/output_parsers/base.py +68 -104
- langchain_core/output_parsers/json.py +13 -17
- langchain_core/output_parsers/list.py +11 -33
- langchain_core/output_parsers/openai_functions.py +56 -74
- langchain_core/output_parsers/openai_tools.py +68 -109
- langchain_core/output_parsers/pydantic.py +15 -13
- langchain_core/output_parsers/string.py +6 -2
- langchain_core/output_parsers/transform.py +17 -60
- langchain_core/output_parsers/xml.py +34 -44
- langchain_core/outputs/__init__.py +1 -1
- langchain_core/outputs/chat_generation.py +26 -11
- langchain_core/outputs/chat_result.py +1 -3
- langchain_core/outputs/generation.py +17 -6
- langchain_core/outputs/llm_result.py +15 -8
- langchain_core/prompt_values.py +29 -123
- langchain_core/prompts/__init__.py +3 -27
- langchain_core/prompts/base.py +48 -63
- langchain_core/prompts/chat.py +259 -288
- langchain_core/prompts/dict.py +19 -11
- langchain_core/prompts/few_shot.py +84 -90
- langchain_core/prompts/few_shot_with_templates.py +14 -12
- langchain_core/prompts/image.py +19 -14
- langchain_core/prompts/loading.py +6 -8
- langchain_core/prompts/message.py +7 -8
- langchain_core/prompts/prompt.py +42 -43
- langchain_core/prompts/string.py +37 -16
- langchain_core/prompts/structured.py +43 -46
- langchain_core/rate_limiters.py +51 -60
- langchain_core/retrievers.py +52 -192
- langchain_core/runnables/base.py +1727 -1683
- langchain_core/runnables/branch.py +52 -73
- langchain_core/runnables/config.py +89 -103
- langchain_core/runnables/configurable.py +128 -130
- langchain_core/runnables/fallbacks.py +93 -82
- langchain_core/runnables/graph.py +127 -127
- langchain_core/runnables/graph_ascii.py +63 -41
- langchain_core/runnables/graph_mermaid.py +87 -70
- langchain_core/runnables/graph_png.py +31 -36
- langchain_core/runnables/history.py +145 -161
- langchain_core/runnables/passthrough.py +141 -144
- langchain_core/runnables/retry.py +84 -68
- langchain_core/runnables/router.py +33 -37
- langchain_core/runnables/schema.py +79 -72
- langchain_core/runnables/utils.py +95 -139
- langchain_core/stores.py +85 -131
- langchain_core/structured_query.py +11 -15
- langchain_core/sys_info.py +31 -32
- langchain_core/tools/__init__.py +1 -14
- langchain_core/tools/base.py +221 -247
- langchain_core/tools/convert.py +144 -161
- langchain_core/tools/render.py +10 -10
- langchain_core/tools/retriever.py +12 -19
- langchain_core/tools/simple.py +52 -29
- langchain_core/tools/structured.py +56 -60
- langchain_core/tracers/__init__.py +1 -9
- langchain_core/tracers/_streaming.py +6 -7
- langchain_core/tracers/base.py +103 -112
- langchain_core/tracers/context.py +29 -48
- langchain_core/tracers/core.py +142 -105
- langchain_core/tracers/evaluation.py +30 -34
- langchain_core/tracers/event_stream.py +162 -117
- langchain_core/tracers/langchain.py +34 -36
- langchain_core/tracers/log_stream.py +87 -49
- langchain_core/tracers/memory_stream.py +3 -3
- langchain_core/tracers/root_listeners.py +18 -34
- langchain_core/tracers/run_collector.py +8 -20
- langchain_core/tracers/schemas.py +0 -125
- langchain_core/tracers/stdout.py +3 -3
- langchain_core/utils/__init__.py +1 -4
- langchain_core/utils/_merge.py +47 -9
- langchain_core/utils/aiter.py +70 -66
- langchain_core/utils/env.py +12 -9
- langchain_core/utils/function_calling.py +139 -206
- langchain_core/utils/html.py +7 -8
- langchain_core/utils/input.py +6 -6
- langchain_core/utils/interactive_env.py +6 -2
- langchain_core/utils/iter.py +48 -45
- langchain_core/utils/json.py +14 -4
- langchain_core/utils/json_schema.py +159 -43
- langchain_core/utils/mustache.py +32 -25
- langchain_core/utils/pydantic.py +67 -40
- langchain_core/utils/strings.py +5 -5
- langchain_core/utils/usage.py +1 -1
- langchain_core/utils/utils.py +104 -62
- langchain_core/vectorstores/base.py +131 -179
- langchain_core/vectorstores/in_memory.py +113 -182
- langchain_core/vectorstores/utils.py +23 -17
- langchain_core/version.py +1 -1
- langchain_core-1.0.0.dist-info/METADATA +68 -0
- langchain_core-1.0.0.dist-info/RECORD +172 -0
- {langchain_core-0.4.0.dev0.dist-info → langchain_core-1.0.0.dist-info}/WHEEL +1 -1
- langchain_core/beta/__init__.py +0 -1
- langchain_core/beta/runnables/__init__.py +0 -1
- langchain_core/beta/runnables/context.py +0 -448
- langchain_core/memory.py +0 -116
- langchain_core/messages/content_blocks.py +0 -1435
- langchain_core/prompts/pipeline.py +0 -133
- langchain_core/pydantic_v1/__init__.py +0 -30
- langchain_core/pydantic_v1/dataclasses.py +0 -23
- langchain_core/pydantic_v1/main.py +0 -23
- langchain_core/tracers/langchain_v1.py +0 -23
- langchain_core/utils/loading.py +0 -31
- langchain_core/v1/__init__.py +0 -1
- langchain_core/v1/chat_models.py +0 -1047
- langchain_core/v1/messages.py +0 -755
- langchain_core-0.4.0.dev0.dist-info/METADATA +0 -108
- langchain_core-0.4.0.dev0.dist-info/RECORD +0 -177
- langchain_core-0.4.0.dev0.dist-info/entry_points.txt +0 -4
|
@@ -4,38 +4,46 @@ from __future__ import annotations
|
|
|
4
4
|
|
|
5
5
|
import warnings
|
|
6
6
|
from abc import ABC, abstractmethod
|
|
7
|
-
from collections.abc import Mapping, Sequence
|
|
7
|
+
from collections.abc import Callable, Mapping, Sequence
|
|
8
8
|
from functools import cache
|
|
9
9
|
from typing import (
|
|
10
10
|
TYPE_CHECKING,
|
|
11
11
|
Any,
|
|
12
|
-
Callable,
|
|
13
12
|
Literal,
|
|
14
|
-
|
|
13
|
+
TypeAlias,
|
|
15
14
|
TypeVar,
|
|
16
|
-
Union,
|
|
17
15
|
)
|
|
18
16
|
|
|
19
17
|
from pydantic import BaseModel, ConfigDict, Field, field_validator
|
|
20
|
-
from typing_extensions import
|
|
18
|
+
from typing_extensions import TypedDict, override
|
|
21
19
|
|
|
22
|
-
from langchain_core._api import deprecated
|
|
23
20
|
from langchain_core.caches import BaseCache
|
|
24
21
|
from langchain_core.callbacks import Callbacks
|
|
22
|
+
from langchain_core.globals import get_verbose
|
|
25
23
|
from langchain_core.messages import (
|
|
24
|
+
AIMessage,
|
|
26
25
|
AnyMessage,
|
|
27
26
|
BaseMessage,
|
|
28
27
|
MessageLikeRepresentation,
|
|
29
28
|
get_buffer_string,
|
|
30
29
|
)
|
|
31
|
-
from langchain_core.prompt_values import
|
|
30
|
+
from langchain_core.prompt_values import (
|
|
31
|
+
ChatPromptValueConcrete,
|
|
32
|
+
PromptValue,
|
|
33
|
+
StringPromptValue,
|
|
34
|
+
)
|
|
32
35
|
from langchain_core.runnables import Runnable, RunnableSerializable
|
|
33
|
-
from langchain_core.utils import get_pydantic_field_names
|
|
34
|
-
from langchain_core.v1.messages import AIMessage as AIMessageV1
|
|
35
36
|
|
|
36
37
|
if TYPE_CHECKING:
|
|
37
38
|
from langchain_core.outputs import LLMResult
|
|
38
39
|
|
|
40
|
+
try:
|
|
41
|
+
from transformers import GPT2TokenizerFast # type: ignore[import-not-found]
|
|
42
|
+
|
|
43
|
+
_HAS_TRANSFORMERS = True
|
|
44
|
+
except ImportError:
|
|
45
|
+
_HAS_TRANSFORMERS = False
|
|
46
|
+
|
|
39
47
|
|
|
40
48
|
class LangSmithParams(TypedDict, total=False):
|
|
41
49
|
"""LangSmith parameters for tracing."""
|
|
@@ -46,11 +54,11 @@ class LangSmithParams(TypedDict, total=False):
|
|
|
46
54
|
"""Name of the model."""
|
|
47
55
|
ls_model_type: Literal["chat", "llm"]
|
|
48
56
|
"""Type of the model. Should be 'chat' or 'llm'."""
|
|
49
|
-
ls_temperature:
|
|
57
|
+
ls_temperature: float | None
|
|
50
58
|
"""Temperature for generation."""
|
|
51
|
-
ls_max_tokens:
|
|
59
|
+
ls_max_tokens: int | None
|
|
52
60
|
"""Max tokens for generation."""
|
|
53
|
-
ls_stop:
|
|
61
|
+
ls_stop: list[str] | None
|
|
54
62
|
"""Stop words for generation."""
|
|
55
63
|
|
|
56
64
|
|
|
@@ -58,18 +66,22 @@ class LangSmithParams(TypedDict, total=False):
|
|
|
58
66
|
def get_tokenizer() -> Any:
|
|
59
67
|
"""Get a GPT-2 tokenizer instance.
|
|
60
68
|
|
|
61
|
-
This function is cached to avoid re-loading the tokenizer
|
|
62
|
-
|
|
69
|
+
This function is cached to avoid re-loading the tokenizer every time it is called.
|
|
70
|
+
|
|
71
|
+
Raises:
|
|
72
|
+
ImportError: If the transformers package is not installed.
|
|
73
|
+
|
|
74
|
+
Returns:
|
|
75
|
+
The GPT-2 tokenizer instance.
|
|
76
|
+
|
|
63
77
|
"""
|
|
64
|
-
|
|
65
|
-
from transformers import GPT2TokenizerFast # type: ignore[import-not-found]
|
|
66
|
-
except ImportError as e:
|
|
78
|
+
if not _HAS_TRANSFORMERS:
|
|
67
79
|
msg = (
|
|
68
80
|
"Could not import transformers python package. "
|
|
69
81
|
"This is needed in order to calculate get_token_ids. "
|
|
70
82
|
"Please install it with `pip install transformers`."
|
|
71
83
|
)
|
|
72
|
-
raise ImportError(msg)
|
|
84
|
+
raise ImportError(msg)
|
|
73
85
|
# create a GPT-2 tokenizer instance
|
|
74
86
|
return GPT2TokenizerFast.from_pretrained("gpt2")
|
|
75
87
|
|
|
@@ -83,17 +95,20 @@ def _get_token_ids_default_method(text: str) -> list[int]:
|
|
|
83
95
|
return tokenizer.encode(text)
|
|
84
96
|
|
|
85
97
|
|
|
86
|
-
LanguageModelInput =
|
|
87
|
-
|
|
98
|
+
LanguageModelInput = PromptValue | str | Sequence[MessageLikeRepresentation]
|
|
99
|
+
"""Input to a language model."""
|
|
100
|
+
|
|
101
|
+
LanguageModelOutput = BaseMessage | str
|
|
102
|
+
"""Output from a language model."""
|
|
103
|
+
|
|
88
104
|
LanguageModelLike = Runnable[LanguageModelInput, LanguageModelOutput]
|
|
89
|
-
|
|
90
|
-
"LanguageModelOutputVar", BaseMessage, str, AIMessageV1
|
|
91
|
-
)
|
|
105
|
+
"""Input/output interface for a language model."""
|
|
92
106
|
|
|
107
|
+
LanguageModelOutputVar = TypeVar("LanguageModelOutputVar", AIMessage, str)
|
|
108
|
+
"""Type variable for the output of a language model."""
|
|
93
109
|
|
|
94
|
-
def _get_verbosity() -> bool:
|
|
95
|
-
from langchain_core.globals import get_verbose
|
|
96
110
|
|
|
111
|
+
def _get_verbosity() -> bool:
|
|
97
112
|
return get_verbose()
|
|
98
113
|
|
|
99
114
|
|
|
@@ -102,16 +117,17 @@ class BaseLanguageModel(
|
|
|
102
117
|
):
|
|
103
118
|
"""Abstract base class for interfacing with language models.
|
|
104
119
|
|
|
105
|
-
All language model wrappers inherited from BaseLanguageModel
|
|
120
|
+
All language model wrappers inherited from `BaseLanguageModel`.
|
|
121
|
+
|
|
106
122
|
"""
|
|
107
123
|
|
|
108
|
-
cache:
|
|
124
|
+
cache: BaseCache | bool | None = Field(default=None, exclude=True)
|
|
109
125
|
"""Whether to cache the response.
|
|
110
126
|
|
|
111
|
-
* If
|
|
112
|
-
* If
|
|
113
|
-
* If None
|
|
114
|
-
* If instance of BaseCache
|
|
127
|
+
* If `True`, will use the global cache.
|
|
128
|
+
* If `False`, will not use a cache
|
|
129
|
+
* If `None`, will use the global cache if it's set, otherwise no cache.
|
|
130
|
+
* If instance of `BaseCache`, will use the provided cache.
|
|
115
131
|
|
|
116
132
|
Caching is not currently supported for streaming methods of models.
|
|
117
133
|
"""
|
|
@@ -119,11 +135,11 @@ class BaseLanguageModel(
|
|
|
119
135
|
"""Whether to print out response text."""
|
|
120
136
|
callbacks: Callbacks = Field(default=None, exclude=True)
|
|
121
137
|
"""Callbacks to add to the run trace."""
|
|
122
|
-
tags:
|
|
138
|
+
tags: list[str] | None = Field(default=None, exclude=True)
|
|
123
139
|
"""Tags to add to the run trace."""
|
|
124
|
-
metadata:
|
|
140
|
+
metadata: dict[str, Any] | None = Field(default=None, exclude=True)
|
|
125
141
|
"""Metadata to add to the run trace."""
|
|
126
|
-
custom_get_token_ids:
|
|
142
|
+
custom_get_token_ids: Callable[[str], list[int]] | None = Field(
|
|
127
143
|
default=None, exclude=True
|
|
128
144
|
)
|
|
129
145
|
"""Optional encoder to use for counting tokens."""
|
|
@@ -133,16 +149,17 @@ class BaseLanguageModel(
|
|
|
133
149
|
)
|
|
134
150
|
|
|
135
151
|
@field_validator("verbose", mode="before")
|
|
136
|
-
def set_verbose(cls, verbose:
|
|
137
|
-
"""If verbose is None
|
|
152
|
+
def set_verbose(cls, verbose: bool | None) -> bool: # noqa: FBT001
|
|
153
|
+
"""If verbose is `None`, set it.
|
|
138
154
|
|
|
139
|
-
This allows users to pass in None as verbose to access the global setting.
|
|
155
|
+
This allows users to pass in `None` as verbose to access the global setting.
|
|
140
156
|
|
|
141
157
|
Args:
|
|
142
158
|
verbose: The verbosity setting to use.
|
|
143
159
|
|
|
144
160
|
Returns:
|
|
145
161
|
The verbosity setting to use.
|
|
162
|
+
|
|
146
163
|
"""
|
|
147
164
|
if verbose is None:
|
|
148
165
|
return _get_verbosity()
|
|
@@ -151,26 +168,17 @@ class BaseLanguageModel(
|
|
|
151
168
|
@property
|
|
152
169
|
@override
|
|
153
170
|
def InputType(self) -> TypeAlias:
|
|
154
|
-
"""Get the input type for this
|
|
155
|
-
from langchain_core.prompt_values import (
|
|
156
|
-
ChatPromptValueConcrete,
|
|
157
|
-
StringPromptValue,
|
|
158
|
-
)
|
|
159
|
-
|
|
171
|
+
"""Get the input type for this `Runnable`."""
|
|
160
172
|
# This is a version of LanguageModelInput which replaces the abstract
|
|
161
173
|
# base class BaseMessage with a union of its subclasses, which makes
|
|
162
174
|
# for a much better schema.
|
|
163
|
-
return
|
|
164
|
-
str,
|
|
165
|
-
Union[StringPromptValue, ChatPromptValueConcrete],
|
|
166
|
-
list[AnyMessage],
|
|
167
|
-
]
|
|
175
|
+
return str | StringPromptValue | ChatPromptValueConcrete | list[AnyMessage]
|
|
168
176
|
|
|
169
177
|
@abstractmethod
|
|
170
178
|
def generate_prompt(
|
|
171
179
|
self,
|
|
172
180
|
prompts: list[PromptValue],
|
|
173
|
-
stop:
|
|
181
|
+
stop: list[str] | None = None,
|
|
174
182
|
callbacks: Callbacks = None,
|
|
175
183
|
**kwargs: Any,
|
|
176
184
|
) -> LLMResult:
|
|
@@ -180,32 +188,34 @@ class BaseLanguageModel(
|
|
|
180
188
|
API.
|
|
181
189
|
|
|
182
190
|
Use this method when you want to:
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
191
|
+
|
|
192
|
+
1. Take advantage of batched calls,
|
|
193
|
+
2. Need more output from the model than just the top generated value,
|
|
194
|
+
3. Are building chains that are agnostic to the underlying language model
|
|
195
|
+
type (e.g., pure text completion models vs chat models).
|
|
187
196
|
|
|
188
197
|
Args:
|
|
189
|
-
prompts: List of
|
|
190
|
-
converted to match the format of any language model (string for
|
|
191
|
-
text generation models and
|
|
198
|
+
prompts: List of `PromptValue` objects. A `PromptValue` is an object that
|
|
199
|
+
can be converted to match the format of any language model (string for
|
|
200
|
+
pure text generation models and `BaseMessage` objects for chat models).
|
|
192
201
|
stop: Stop words to use when generating. Model output is cut off at the
|
|
193
202
|
first occurrence of any of these substrings.
|
|
194
|
-
callbacks: Callbacks to pass through. Used for executing additional
|
|
203
|
+
callbacks: `Callbacks` to pass through. Used for executing additional
|
|
195
204
|
functionality, such as logging or streaming, throughout generation.
|
|
196
205
|
**kwargs: Arbitrary additional keyword arguments. These are usually passed
|
|
197
206
|
to the model provider API call.
|
|
198
207
|
|
|
199
208
|
Returns:
|
|
200
|
-
An LLMResult
|
|
201
|
-
prompt and additional model provider-specific output.
|
|
209
|
+
An `LLMResult`, which contains a list of candidate `Generation` objects for
|
|
210
|
+
each input prompt and additional model provider-specific output.
|
|
211
|
+
|
|
202
212
|
"""
|
|
203
213
|
|
|
204
214
|
@abstractmethod
|
|
205
215
|
async def agenerate_prompt(
|
|
206
216
|
self,
|
|
207
217
|
prompts: list[PromptValue],
|
|
208
|
-
stop:
|
|
218
|
+
stop: list[str] | None = None,
|
|
209
219
|
callbacks: Callbacks = None,
|
|
210
220
|
**kwargs: Any,
|
|
211
221
|
) -> LLMResult:
|
|
@@ -215,127 +225,37 @@ class BaseLanguageModel(
|
|
|
215
225
|
API.
|
|
216
226
|
|
|
217
227
|
Use this method when you want to:
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
228
|
+
|
|
229
|
+
1. Take advantage of batched calls,
|
|
230
|
+
2. Need more output from the model than just the top generated value,
|
|
231
|
+
3. Are building chains that are agnostic to the underlying language model
|
|
232
|
+
type (e.g., pure text completion models vs chat models).
|
|
222
233
|
|
|
223
234
|
Args:
|
|
224
|
-
prompts: List of
|
|
225
|
-
converted to match the format of any language model (string for
|
|
226
|
-
text generation models and
|
|
235
|
+
prompts: List of `PromptValue` objects. A `PromptValue` is an object that
|
|
236
|
+
can be converted to match the format of any language model (string for
|
|
237
|
+
pure text generation models and `BaseMessage` objects for chat models).
|
|
227
238
|
stop: Stop words to use when generating. Model output is cut off at the
|
|
228
239
|
first occurrence of any of these substrings.
|
|
229
|
-
callbacks: Callbacks to pass through. Used for executing additional
|
|
240
|
+
callbacks: `Callbacks` to pass through. Used for executing additional
|
|
230
241
|
functionality, such as logging or streaming, throughout generation.
|
|
231
242
|
**kwargs: Arbitrary additional keyword arguments. These are usually passed
|
|
232
243
|
to the model provider API call.
|
|
233
244
|
|
|
234
245
|
Returns:
|
|
235
|
-
An LLMResult
|
|
236
|
-
prompt and additional model provider-specific output.
|
|
246
|
+
An `LLMResult`, which contains a list of candidate `Generation` objects for
|
|
247
|
+
each input prompt and additional model provider-specific output.
|
|
248
|
+
|
|
237
249
|
"""
|
|
238
250
|
|
|
239
251
|
def with_structured_output(
|
|
240
|
-
self, schema:
|
|
241
|
-
) -> Runnable[LanguageModelInput,
|
|
252
|
+
self, schema: dict | type, **kwargs: Any
|
|
253
|
+
) -> Runnable[LanguageModelInput, dict | BaseModel]:
|
|
242
254
|
"""Not implemented on this class."""
|
|
243
255
|
# Implement this on child class if there is a way of steering the model to
|
|
244
256
|
# generate responses that match a given schema.
|
|
245
257
|
raise NotImplementedError
|
|
246
258
|
|
|
247
|
-
@deprecated("0.1.7", alternative="invoke", removal="1.0")
|
|
248
|
-
@abstractmethod
|
|
249
|
-
def predict(
|
|
250
|
-
self, text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any
|
|
251
|
-
) -> str:
|
|
252
|
-
"""Pass a single string input to the model and return a string.
|
|
253
|
-
|
|
254
|
-
Use this method when passing in raw text. If you want to pass in specific
|
|
255
|
-
types of chat messages, use predict_messages.
|
|
256
|
-
|
|
257
|
-
Args:
|
|
258
|
-
text: String input to pass to the model.
|
|
259
|
-
stop: Stop words to use when generating. Model output is cut off at the
|
|
260
|
-
first occurrence of any of these substrings.
|
|
261
|
-
**kwargs: Arbitrary additional keyword arguments. These are usually passed
|
|
262
|
-
to the model provider API call.
|
|
263
|
-
|
|
264
|
-
Returns:
|
|
265
|
-
Top model prediction as a string.
|
|
266
|
-
"""
|
|
267
|
-
|
|
268
|
-
@deprecated("0.1.7", alternative="invoke", removal="1.0")
|
|
269
|
-
@abstractmethod
|
|
270
|
-
def predict_messages(
|
|
271
|
-
self,
|
|
272
|
-
messages: list[BaseMessage],
|
|
273
|
-
*,
|
|
274
|
-
stop: Optional[Sequence[str]] = None,
|
|
275
|
-
**kwargs: Any,
|
|
276
|
-
) -> BaseMessage:
|
|
277
|
-
"""Pass a message sequence to the model and return a message.
|
|
278
|
-
|
|
279
|
-
Use this method when passing in chat messages. If you want to pass in raw text,
|
|
280
|
-
use predict.
|
|
281
|
-
|
|
282
|
-
Args:
|
|
283
|
-
messages: A sequence of chat messages corresponding to a single model input.
|
|
284
|
-
stop: Stop words to use when generating. Model output is cut off at the
|
|
285
|
-
first occurrence of any of these substrings.
|
|
286
|
-
**kwargs: Arbitrary additional keyword arguments. These are usually passed
|
|
287
|
-
to the model provider API call.
|
|
288
|
-
|
|
289
|
-
Returns:
|
|
290
|
-
Top model prediction as a message.
|
|
291
|
-
"""
|
|
292
|
-
|
|
293
|
-
@deprecated("0.1.7", alternative="ainvoke", removal="1.0")
|
|
294
|
-
@abstractmethod
|
|
295
|
-
async def apredict(
|
|
296
|
-
self, text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any
|
|
297
|
-
) -> str:
|
|
298
|
-
"""Asynchronously pass a string to the model and return a string.
|
|
299
|
-
|
|
300
|
-
Use this method when calling pure text generation models and only the top
|
|
301
|
-
candidate generation is needed.
|
|
302
|
-
|
|
303
|
-
Args:
|
|
304
|
-
text: String input to pass to the model.
|
|
305
|
-
stop: Stop words to use when generating. Model output is cut off at the
|
|
306
|
-
first occurrence of any of these substrings.
|
|
307
|
-
**kwargs: Arbitrary additional keyword arguments. These are usually passed
|
|
308
|
-
to the model provider API call.
|
|
309
|
-
|
|
310
|
-
Returns:
|
|
311
|
-
Top model prediction as a string.
|
|
312
|
-
"""
|
|
313
|
-
|
|
314
|
-
@deprecated("0.1.7", alternative="ainvoke", removal="1.0")
|
|
315
|
-
@abstractmethod
|
|
316
|
-
async def apredict_messages(
|
|
317
|
-
self,
|
|
318
|
-
messages: list[BaseMessage],
|
|
319
|
-
*,
|
|
320
|
-
stop: Optional[Sequence[str]] = None,
|
|
321
|
-
**kwargs: Any,
|
|
322
|
-
) -> BaseMessage:
|
|
323
|
-
"""Asynchronously pass messages to the model and return a message.
|
|
324
|
-
|
|
325
|
-
Use this method when calling chat models and only the top
|
|
326
|
-
candidate generation is needed.
|
|
327
|
-
|
|
328
|
-
Args:
|
|
329
|
-
messages: A sequence of chat messages corresponding to a single model input.
|
|
330
|
-
stop: Stop words to use when generating. Model output is cut off at the
|
|
331
|
-
first occurrence of any of these substrings.
|
|
332
|
-
**kwargs: Arbitrary additional keyword arguments. These are usually passed
|
|
333
|
-
to the model provider API call.
|
|
334
|
-
|
|
335
|
-
Returns:
|
|
336
|
-
Top model prediction as a message.
|
|
337
|
-
"""
|
|
338
|
-
|
|
339
259
|
@property
|
|
340
260
|
def _identifying_params(self) -> Mapping[str, Any]:
|
|
341
261
|
"""Get the identifying parameters."""
|
|
@@ -365,28 +285,31 @@ class BaseLanguageModel(
|
|
|
365
285
|
|
|
366
286
|
Returns:
|
|
367
287
|
The integer number of tokens in the text.
|
|
288
|
+
|
|
368
289
|
"""
|
|
369
290
|
return len(self.get_token_ids(text))
|
|
370
291
|
|
|
371
292
|
def get_num_tokens_from_messages(
|
|
372
293
|
self,
|
|
373
294
|
messages: list[BaseMessage],
|
|
374
|
-
tools:
|
|
295
|
+
tools: Sequence | None = None,
|
|
375
296
|
) -> int:
|
|
376
297
|
"""Get the number of tokens in the messages.
|
|
377
298
|
|
|
378
299
|
Useful for checking if an input fits in a model's context window.
|
|
379
300
|
|
|
380
|
-
|
|
381
|
-
|
|
301
|
+
!!! note
|
|
302
|
+
The base implementation of `get_num_tokens_from_messages` ignores tool
|
|
303
|
+
schemas.
|
|
382
304
|
|
|
383
305
|
Args:
|
|
384
306
|
messages: The message inputs to tokenize.
|
|
385
|
-
tools: If provided, sequence of dict, BaseModel
|
|
386
|
-
to be converted to tool schemas.
|
|
307
|
+
tools: If provided, sequence of dict, `BaseModel`, function, or
|
|
308
|
+
`BaseTool` objects to be converted to tool schemas.
|
|
387
309
|
|
|
388
310
|
Returns:
|
|
389
311
|
The sum of the number of tokens across the messages.
|
|
312
|
+
|
|
390
313
|
"""
|
|
391
314
|
if tools is not None:
|
|
392
315
|
warnings.warn(
|
|
@@ -394,11 +317,3 @@ class BaseLanguageModel(
|
|
|
394
317
|
stacklevel=2,
|
|
395
318
|
)
|
|
396
319
|
return sum(self.get_num_tokens(get_buffer_string([m])) for m in messages)
|
|
397
|
-
|
|
398
|
-
@classmethod
|
|
399
|
-
def _all_required_field_names(cls) -> set:
|
|
400
|
-
"""DEPRECATED: Kept for backwards compatibility.
|
|
401
|
-
|
|
402
|
-
Use get_pydantic_field_names.
|
|
403
|
-
"""
|
|
404
|
-
return get_pydantic_field_names(cls)
|