langchain-core 1.0.0a5__py3-none-any.whl → 1.0.0a7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of langchain-core might be problematic. Click here for more details.
- langchain_core/_api/__init__.py +3 -3
- langchain_core/_api/beta_decorator.py +6 -6
- langchain_core/_api/deprecation.py +21 -29
- langchain_core/_api/path.py +3 -6
- langchain_core/_import_utils.py +2 -3
- langchain_core/agents.py +10 -11
- langchain_core/caches.py +7 -7
- langchain_core/callbacks/base.py +91 -91
- langchain_core/callbacks/file.py +11 -11
- langchain_core/callbacks/manager.py +86 -89
- langchain_core/callbacks/stdout.py +8 -8
- langchain_core/callbacks/usage.py +4 -4
- langchain_core/chat_history.py +1 -37
- langchain_core/document_loaders/base.py +2 -2
- langchain_core/document_loaders/langsmith.py +15 -15
- langchain_core/documents/base.py +16 -16
- langchain_core/documents/compressor.py +4 -4
- langchain_core/example_selectors/length_based.py +1 -1
- langchain_core/example_selectors/semantic_similarity.py +17 -19
- langchain_core/exceptions.py +3 -3
- langchain_core/globals.py +3 -151
- langchain_core/indexing/api.py +44 -43
- langchain_core/indexing/base.py +30 -30
- langchain_core/indexing/in_memory.py +3 -3
- langchain_core/language_models/_utils.py +5 -7
- langchain_core/language_models/base.py +18 -132
- langchain_core/language_models/chat_models.py +118 -227
- langchain_core/language_models/fake.py +11 -11
- langchain_core/language_models/fake_chat_models.py +35 -29
- langchain_core/language_models/llms.py +91 -201
- langchain_core/load/dump.py +1 -1
- langchain_core/load/load.py +11 -12
- langchain_core/load/mapping.py +2 -4
- langchain_core/load/serializable.py +2 -4
- langchain_core/messages/ai.py +17 -20
- langchain_core/messages/base.py +28 -26
- langchain_core/messages/block_translators/__init__.py +17 -7
- langchain_core/messages/block_translators/anthropic.py +3 -3
- langchain_core/messages/block_translators/bedrock_converse.py +2 -2
- langchain_core/messages/block_translators/google_genai.py +502 -20
- langchain_core/messages/block_translators/langchain_v0.py +2 -2
- langchain_core/messages/block_translators/openai.py +6 -6
- langchain_core/messages/content.py +120 -124
- langchain_core/messages/human.py +7 -7
- langchain_core/messages/system.py +7 -7
- langchain_core/messages/tool.py +24 -24
- langchain_core/messages/utils.py +67 -79
- langchain_core/output_parsers/base.py +12 -14
- langchain_core/output_parsers/json.py +4 -4
- langchain_core/output_parsers/list.py +3 -5
- langchain_core/output_parsers/openai_functions.py +3 -3
- langchain_core/output_parsers/openai_tools.py +3 -3
- langchain_core/output_parsers/pydantic.py +2 -2
- langchain_core/output_parsers/transform.py +13 -15
- langchain_core/output_parsers/xml.py +7 -9
- langchain_core/outputs/chat_generation.py +4 -4
- langchain_core/outputs/chat_result.py +1 -3
- langchain_core/outputs/generation.py +2 -2
- langchain_core/outputs/llm_result.py +5 -5
- langchain_core/prompts/__init__.py +1 -5
- langchain_core/prompts/base.py +10 -15
- langchain_core/prompts/chat.py +31 -82
- langchain_core/prompts/dict.py +2 -2
- langchain_core/prompts/few_shot.py +5 -5
- langchain_core/prompts/few_shot_with_templates.py +4 -4
- langchain_core/prompts/loading.py +3 -5
- langchain_core/prompts/prompt.py +4 -16
- langchain_core/prompts/string.py +2 -1
- langchain_core/prompts/structured.py +16 -23
- langchain_core/rate_limiters.py +3 -4
- langchain_core/retrievers.py +14 -14
- langchain_core/runnables/base.py +938 -1054
- langchain_core/runnables/branch.py +36 -40
- langchain_core/runnables/config.py +27 -35
- langchain_core/runnables/configurable.py +108 -124
- langchain_core/runnables/fallbacks.py +76 -72
- langchain_core/runnables/graph.py +39 -45
- langchain_core/runnables/graph_ascii.py +9 -11
- langchain_core/runnables/graph_mermaid.py +18 -19
- langchain_core/runnables/graph_png.py +8 -9
- langchain_core/runnables/history.py +114 -127
- langchain_core/runnables/passthrough.py +113 -139
- langchain_core/runnables/retry.py +43 -48
- langchain_core/runnables/router.py +23 -28
- langchain_core/runnables/schema.py +42 -44
- langchain_core/runnables/utils.py +28 -31
- langchain_core/stores.py +9 -13
- langchain_core/structured_query.py +8 -8
- langchain_core/tools/base.py +63 -115
- langchain_core/tools/convert.py +31 -35
- langchain_core/tools/render.py +1 -1
- langchain_core/tools/retriever.py +4 -4
- langchain_core/tools/simple.py +13 -17
- langchain_core/tools/structured.py +12 -15
- langchain_core/tracers/base.py +62 -64
- langchain_core/tracers/context.py +17 -35
- langchain_core/tracers/core.py +49 -53
- langchain_core/tracers/evaluation.py +11 -11
- langchain_core/tracers/event_stream.py +58 -60
- langchain_core/tracers/langchain.py +13 -13
- langchain_core/tracers/log_stream.py +22 -24
- langchain_core/tracers/root_listeners.py +14 -14
- langchain_core/tracers/run_collector.py +2 -4
- langchain_core/tracers/schemas.py +8 -8
- langchain_core/tracers/stdout.py +2 -1
- langchain_core/utils/__init__.py +0 -3
- langchain_core/utils/_merge.py +2 -2
- langchain_core/utils/aiter.py +24 -28
- langchain_core/utils/env.py +4 -4
- langchain_core/utils/function_calling.py +31 -41
- langchain_core/utils/html.py +3 -4
- langchain_core/utils/input.py +3 -3
- langchain_core/utils/iter.py +15 -19
- langchain_core/utils/json.py +3 -2
- langchain_core/utils/json_schema.py +6 -6
- langchain_core/utils/mustache.py +3 -5
- langchain_core/utils/pydantic.py +16 -18
- langchain_core/utils/usage.py +1 -1
- langchain_core/utils/utils.py +29 -29
- langchain_core/vectorstores/base.py +18 -21
- langchain_core/vectorstores/in_memory.py +14 -87
- langchain_core/vectorstores/utils.py +2 -2
- langchain_core/version.py +1 -1
- {langchain_core-1.0.0a5.dist-info → langchain_core-1.0.0a7.dist-info}/METADATA +10 -31
- langchain_core-1.0.0a7.dist-info/RECORD +176 -0
- {langchain_core-1.0.0a5.dist-info → langchain_core-1.0.0a7.dist-info}/WHEEL +1 -1
- langchain_core/messages/block_translators/ollama.py +0 -47
- langchain_core/prompts/pipeline.py +0 -138
- langchain_core/tracers/langchain_v1.py +0 -31
- langchain_core/utils/loading.py +0 -35
- langchain_core-1.0.0a5.dist-info/RECORD +0 -181
- langchain_core-1.0.0a5.dist-info/entry_points.txt +0 -4
|
@@ -4,23 +4,19 @@ from __future__ import annotations
|
|
|
4
4
|
|
|
5
5
|
import warnings
|
|
6
6
|
from abc import ABC, abstractmethod
|
|
7
|
-
from collections.abc import Mapping, Sequence
|
|
7
|
+
from collections.abc import Callable, Mapping, Sequence
|
|
8
8
|
from functools import cache
|
|
9
9
|
from typing import (
|
|
10
10
|
TYPE_CHECKING,
|
|
11
11
|
Any,
|
|
12
|
-
Callable,
|
|
13
12
|
Literal,
|
|
14
|
-
Optional,
|
|
15
13
|
TypeAlias,
|
|
16
14
|
TypeVar,
|
|
17
|
-
Union,
|
|
18
15
|
)
|
|
19
16
|
|
|
20
17
|
from pydantic import BaseModel, ConfigDict, Field, field_validator
|
|
21
18
|
from typing_extensions import TypedDict, override
|
|
22
19
|
|
|
23
|
-
from langchain_core._api import deprecated
|
|
24
20
|
from langchain_core.caches import BaseCache
|
|
25
21
|
from langchain_core.callbacks import Callbacks
|
|
26
22
|
from langchain_core.globals import get_verbose
|
|
@@ -37,7 +33,6 @@ from langchain_core.prompt_values import (
|
|
|
37
33
|
StringPromptValue,
|
|
38
34
|
)
|
|
39
35
|
from langchain_core.runnables import Runnable, RunnableSerializable
|
|
40
|
-
from langchain_core.utils import get_pydantic_field_names
|
|
41
36
|
|
|
42
37
|
if TYPE_CHECKING:
|
|
43
38
|
from langchain_core.outputs import LLMResult
|
|
@@ -59,11 +54,11 @@ class LangSmithParams(TypedDict, total=False):
|
|
|
59
54
|
"""Name of the model."""
|
|
60
55
|
ls_model_type: Literal["chat", "llm"]
|
|
61
56
|
"""Type of the model. Should be 'chat' or 'llm'."""
|
|
62
|
-
ls_temperature:
|
|
57
|
+
ls_temperature: float | None
|
|
63
58
|
"""Temperature for generation."""
|
|
64
|
-
ls_max_tokens:
|
|
59
|
+
ls_max_tokens: int | None
|
|
65
60
|
"""Max tokens for generation."""
|
|
66
|
-
ls_stop:
|
|
61
|
+
ls_stop: list[str] | None
|
|
67
62
|
"""Stop words for generation."""
|
|
68
63
|
|
|
69
64
|
|
|
@@ -100,8 +95,8 @@ def _get_token_ids_default_method(text: str) -> list[int]:
|
|
|
100
95
|
return tokenizer.encode(text)
|
|
101
96
|
|
|
102
97
|
|
|
103
|
-
LanguageModelInput =
|
|
104
|
-
LanguageModelOutput =
|
|
98
|
+
LanguageModelInput = PromptValue | str | Sequence[MessageLikeRepresentation]
|
|
99
|
+
LanguageModelOutput = BaseMessage | str
|
|
105
100
|
LanguageModelLike = Runnable[LanguageModelInput, LanguageModelOutput]
|
|
106
101
|
LanguageModelOutputVar = TypeVar("LanguageModelOutputVar", AIMessage, str)
|
|
107
102
|
|
|
@@ -119,7 +114,7 @@ class BaseLanguageModel(
|
|
|
119
114
|
|
|
120
115
|
"""
|
|
121
116
|
|
|
122
|
-
cache:
|
|
117
|
+
cache: BaseCache | bool | None = Field(default=None, exclude=True)
|
|
123
118
|
"""Whether to cache the response.
|
|
124
119
|
|
|
125
120
|
* If true, will use the global cache.
|
|
@@ -134,11 +129,11 @@ class BaseLanguageModel(
|
|
|
134
129
|
"""Whether to print out response text."""
|
|
135
130
|
callbacks: Callbacks = Field(default=None, exclude=True)
|
|
136
131
|
"""Callbacks to add to the run trace."""
|
|
137
|
-
tags:
|
|
132
|
+
tags: list[str] | None = Field(default=None, exclude=True)
|
|
138
133
|
"""Tags to add to the run trace."""
|
|
139
|
-
metadata:
|
|
134
|
+
metadata: dict[str, Any] | None = Field(default=None, exclude=True)
|
|
140
135
|
"""Metadata to add to the run trace."""
|
|
141
|
-
custom_get_token_ids:
|
|
136
|
+
custom_get_token_ids: Callable[[str], list[int]] | None = Field(
|
|
142
137
|
default=None, exclude=True
|
|
143
138
|
)
|
|
144
139
|
"""Optional encoder to use for counting tokens."""
|
|
@@ -148,7 +143,7 @@ class BaseLanguageModel(
|
|
|
148
143
|
)
|
|
149
144
|
|
|
150
145
|
@field_validator("verbose", mode="before")
|
|
151
|
-
def set_verbose(cls, verbose:
|
|
146
|
+
def set_verbose(cls, verbose: bool | None) -> bool: # noqa: FBT001
|
|
152
147
|
"""If verbose is None, set it.
|
|
153
148
|
|
|
154
149
|
This allows users to pass in None as verbose to access the global setting.
|
|
@@ -171,17 +166,13 @@ class BaseLanguageModel(
|
|
|
171
166
|
# This is a version of LanguageModelInput which replaces the abstract
|
|
172
167
|
# base class BaseMessage with a union of its subclasses, which makes
|
|
173
168
|
# for a much better schema.
|
|
174
|
-
return
|
|
175
|
-
str,
|
|
176
|
-
Union[StringPromptValue, ChatPromptValueConcrete],
|
|
177
|
-
list[AnyMessage],
|
|
178
|
-
]
|
|
169
|
+
return str | StringPromptValue | ChatPromptValueConcrete | list[AnyMessage]
|
|
179
170
|
|
|
180
171
|
@abstractmethod
|
|
181
172
|
def generate_prompt(
|
|
182
173
|
self,
|
|
183
174
|
prompts: list[PromptValue],
|
|
184
|
-
stop:
|
|
175
|
+
stop: list[str] | None = None,
|
|
185
176
|
callbacks: Callbacks = None,
|
|
186
177
|
**kwargs: Any,
|
|
187
178
|
) -> LLMResult:
|
|
@@ -218,7 +209,7 @@ class BaseLanguageModel(
|
|
|
218
209
|
async def agenerate_prompt(
|
|
219
210
|
self,
|
|
220
211
|
prompts: list[PromptValue],
|
|
221
|
-
stop:
|
|
212
|
+
stop: list[str] | None = None,
|
|
222
213
|
callbacks: Callbacks = None,
|
|
223
214
|
**kwargs: Any,
|
|
224
215
|
) -> LLMResult:
|
|
@@ -252,109 +243,13 @@ class BaseLanguageModel(
|
|
|
252
243
|
"""
|
|
253
244
|
|
|
254
245
|
def with_structured_output(
|
|
255
|
-
self, schema:
|
|
256
|
-
) -> Runnable[LanguageModelInput,
|
|
246
|
+
self, schema: dict | type, **kwargs: Any
|
|
247
|
+
) -> Runnable[LanguageModelInput, dict | BaseModel]:
|
|
257
248
|
"""Not implemented on this class."""
|
|
258
249
|
# Implement this on child class if there is a way of steering the model to
|
|
259
250
|
# generate responses that match a given schema.
|
|
260
251
|
raise NotImplementedError
|
|
261
252
|
|
|
262
|
-
@deprecated("0.1.7", alternative="invoke", removal="1.0")
|
|
263
|
-
@abstractmethod
|
|
264
|
-
def predict(
|
|
265
|
-
self, text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any
|
|
266
|
-
) -> str:
|
|
267
|
-
"""Pass a single string input to the model and return a string.
|
|
268
|
-
|
|
269
|
-
Use this method when passing in raw text. If you want to pass in specific types
|
|
270
|
-
of chat messages, use predict_messages.
|
|
271
|
-
|
|
272
|
-
Args:
|
|
273
|
-
text: String input to pass to the model.
|
|
274
|
-
stop: Stop words to use when generating. Model output is cut off at the
|
|
275
|
-
first occurrence of any of these substrings.
|
|
276
|
-
**kwargs: Arbitrary additional keyword arguments. These are usually passed
|
|
277
|
-
to the model provider API call.
|
|
278
|
-
|
|
279
|
-
Returns:
|
|
280
|
-
Top model prediction as a string.
|
|
281
|
-
|
|
282
|
-
"""
|
|
283
|
-
|
|
284
|
-
@deprecated("0.1.7", alternative="invoke", removal="1.0")
|
|
285
|
-
@abstractmethod
|
|
286
|
-
def predict_messages(
|
|
287
|
-
self,
|
|
288
|
-
messages: list[BaseMessage],
|
|
289
|
-
*,
|
|
290
|
-
stop: Optional[Sequence[str]] = None,
|
|
291
|
-
**kwargs: Any,
|
|
292
|
-
) -> BaseMessage:
|
|
293
|
-
"""Pass a message sequence to the model and return a message.
|
|
294
|
-
|
|
295
|
-
Use this method when passing in chat messages. If you want to pass in raw text,
|
|
296
|
-
use predict.
|
|
297
|
-
|
|
298
|
-
Args:
|
|
299
|
-
messages: A sequence of chat messages corresponding to a single model input.
|
|
300
|
-
stop: Stop words to use when generating. Model output is cut off at the
|
|
301
|
-
first occurrence of any of these substrings.
|
|
302
|
-
**kwargs: Arbitrary additional keyword arguments. These are usually passed
|
|
303
|
-
to the model provider API call.
|
|
304
|
-
|
|
305
|
-
Returns:
|
|
306
|
-
Top model prediction as a message.
|
|
307
|
-
|
|
308
|
-
"""
|
|
309
|
-
|
|
310
|
-
@deprecated("0.1.7", alternative="ainvoke", removal="1.0")
|
|
311
|
-
@abstractmethod
|
|
312
|
-
async def apredict(
|
|
313
|
-
self, text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any
|
|
314
|
-
) -> str:
|
|
315
|
-
"""Asynchronously pass a string to the model and return a string.
|
|
316
|
-
|
|
317
|
-
Use this method when calling pure text generation models and only the top
|
|
318
|
-
candidate generation is needed.
|
|
319
|
-
|
|
320
|
-
Args:
|
|
321
|
-
text: String input to pass to the model.
|
|
322
|
-
stop: Stop words to use when generating. Model output is cut off at the
|
|
323
|
-
first occurrence of any of these substrings.
|
|
324
|
-
**kwargs: Arbitrary additional keyword arguments. These are usually passed
|
|
325
|
-
to the model provider API call.
|
|
326
|
-
|
|
327
|
-
Returns:
|
|
328
|
-
Top model prediction as a string.
|
|
329
|
-
|
|
330
|
-
"""
|
|
331
|
-
|
|
332
|
-
@deprecated("0.1.7", alternative="ainvoke", removal="1.0")
|
|
333
|
-
@abstractmethod
|
|
334
|
-
async def apredict_messages(
|
|
335
|
-
self,
|
|
336
|
-
messages: list[BaseMessage],
|
|
337
|
-
*,
|
|
338
|
-
stop: Optional[Sequence[str]] = None,
|
|
339
|
-
**kwargs: Any,
|
|
340
|
-
) -> BaseMessage:
|
|
341
|
-
"""Asynchronously pass messages to the model and return a message.
|
|
342
|
-
|
|
343
|
-
Use this method when calling chat models and only the top candidate generation
|
|
344
|
-
is needed.
|
|
345
|
-
|
|
346
|
-
Args:
|
|
347
|
-
messages: A sequence of chat messages corresponding to a single model input.
|
|
348
|
-
stop: Stop words to use when generating. Model output is cut off at the
|
|
349
|
-
first occurrence of any of these substrings.
|
|
350
|
-
**kwargs: Arbitrary additional keyword arguments. These are usually passed
|
|
351
|
-
to the model provider API call.
|
|
352
|
-
|
|
353
|
-
Returns:
|
|
354
|
-
Top model prediction as a message.
|
|
355
|
-
|
|
356
|
-
"""
|
|
357
|
-
|
|
358
253
|
@property
|
|
359
254
|
def _identifying_params(self) -> Mapping[str, Any]:
|
|
360
255
|
"""Get the identifying parameters."""
|
|
@@ -392,13 +287,13 @@ class BaseLanguageModel(
|
|
|
392
287
|
def get_num_tokens_from_messages(
|
|
393
288
|
self,
|
|
394
289
|
messages: list[BaseMessage],
|
|
395
|
-
tools:
|
|
290
|
+
tools: Sequence | None = None,
|
|
396
291
|
) -> int:
|
|
397
292
|
"""Get the number of tokens in the messages.
|
|
398
293
|
|
|
399
294
|
Useful for checking if an input fits in a model's context window.
|
|
400
295
|
|
|
401
|
-
|
|
296
|
+
!!! note
|
|
402
297
|
The base implementation of ``get_num_tokens_from_messages`` ignores tool
|
|
403
298
|
schemas.
|
|
404
299
|
|
|
@@ -417,12 +312,3 @@ class BaseLanguageModel(
|
|
|
417
312
|
stacklevel=2,
|
|
418
313
|
)
|
|
419
314
|
return sum(self.get_num_tokens(get_buffer_string([m])) for m in messages)
|
|
420
|
-
|
|
421
|
-
@classmethod
|
|
422
|
-
def _all_required_field_names(cls) -> set:
|
|
423
|
-
"""DEPRECATED: Kept for backwards compatibility.
|
|
424
|
-
|
|
425
|
-
Use ``get_pydantic_field_names``.
|
|
426
|
-
|
|
427
|
-
"""
|
|
428
|
-
return get_pydantic_field_names(cls)
|