langchain-core 0.4.0.dev0__py3-none-any.whl → 1.0.0a2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- langchain_core/_api/beta_decorator.py +2 -2
- langchain_core/_api/deprecation.py +1 -1
- langchain_core/beta/runnables/context.py +1 -1
- langchain_core/callbacks/base.py +14 -23
- langchain_core/callbacks/file.py +13 -2
- langchain_core/callbacks/manager.py +74 -157
- langchain_core/callbacks/streaming_stdout.py +3 -4
- langchain_core/callbacks/usage.py +2 -12
- langchain_core/chat_history.py +6 -6
- langchain_core/documents/base.py +1 -1
- langchain_core/documents/compressor.py +9 -6
- langchain_core/indexing/base.py +2 -2
- langchain_core/language_models/_utils.py +232 -101
- langchain_core/language_models/base.py +35 -23
- langchain_core/language_models/chat_models.py +248 -54
- langchain_core/language_models/fake_chat_models.py +28 -81
- langchain_core/load/dump.py +3 -4
- langchain_core/messages/__init__.py +30 -24
- langchain_core/messages/ai.py +188 -30
- langchain_core/messages/base.py +164 -25
- langchain_core/messages/block_translators/__init__.py +89 -0
- langchain_core/messages/block_translators/anthropic.py +451 -0
- langchain_core/messages/block_translators/bedrock.py +45 -0
- langchain_core/messages/block_translators/bedrock_converse.py +47 -0
- langchain_core/messages/block_translators/google_genai.py +45 -0
- langchain_core/messages/block_translators/google_vertexai.py +47 -0
- langchain_core/messages/block_translators/groq.py +45 -0
- langchain_core/messages/block_translators/langchain_v0.py +164 -0
- langchain_core/messages/block_translators/ollama.py +45 -0
- langchain_core/messages/block_translators/openai.py +798 -0
- langchain_core/messages/{content_blocks.py → content.py} +303 -278
- langchain_core/messages/human.py +29 -9
- langchain_core/messages/system.py +29 -9
- langchain_core/messages/tool.py +94 -13
- langchain_core/messages/utils.py +34 -234
- langchain_core/output_parsers/base.py +14 -50
- langchain_core/output_parsers/json.py +2 -5
- langchain_core/output_parsers/list.py +2 -7
- langchain_core/output_parsers/openai_functions.py +5 -28
- langchain_core/output_parsers/openai_tools.py +49 -90
- langchain_core/output_parsers/pydantic.py +2 -3
- langchain_core/output_parsers/transform.py +12 -53
- langchain_core/output_parsers/xml.py +9 -17
- langchain_core/prompt_values.py +8 -112
- langchain_core/prompts/chat.py +1 -3
- langchain_core/runnables/base.py +500 -451
- langchain_core/runnables/branch.py +1 -1
- langchain_core/runnables/fallbacks.py +4 -4
- langchain_core/runnables/history.py +1 -1
- langchain_core/runnables/passthrough.py +3 -3
- langchain_core/runnables/retry.py +1 -1
- langchain_core/runnables/router.py +1 -1
- langchain_core/structured_query.py +3 -7
- langchain_core/tools/base.py +14 -41
- langchain_core/tools/convert.py +2 -22
- langchain_core/tools/retriever.py +1 -8
- langchain_core/tools/structured.py +2 -10
- langchain_core/tracers/_streaming.py +6 -7
- langchain_core/tracers/base.py +7 -14
- langchain_core/tracers/core.py +4 -27
- langchain_core/tracers/event_stream.py +4 -15
- langchain_core/tracers/langchain.py +3 -14
- langchain_core/tracers/log_stream.py +2 -3
- langchain_core/utils/_merge.py +45 -7
- langchain_core/utils/function_calling.py +22 -9
- langchain_core/utils/utils.py +29 -0
- langchain_core/version.py +1 -1
- {langchain_core-0.4.0.dev0.dist-info → langchain_core-1.0.0a2.dist-info}/METADATA +7 -9
- {langchain_core-0.4.0.dev0.dist-info → langchain_core-1.0.0a2.dist-info}/RECORD +71 -64
- langchain_core/v1/__init__.py +0 -1
- langchain_core/v1/chat_models.py +0 -1047
- langchain_core/v1/messages.py +0 -755
- {langchain_core-0.4.0.dev0.dist-info → langchain_core-1.0.0a2.dist-info}/WHEEL +0 -0
- {langchain_core-0.4.0.dev0.dist-info → langchain_core-1.0.0a2.dist-info}/entry_points.txt +0 -0
|
@@ -23,6 +23,7 @@ from langchain_core._api import deprecated
|
|
|
23
23
|
from langchain_core.caches import BaseCache
|
|
24
24
|
from langchain_core.callbacks import Callbacks
|
|
25
25
|
from langchain_core.messages import (
|
|
26
|
+
AIMessage,
|
|
26
27
|
AnyMessage,
|
|
27
28
|
BaseMessage,
|
|
28
29
|
MessageLikeRepresentation,
|
|
@@ -31,7 +32,6 @@ from langchain_core.messages import (
|
|
|
31
32
|
from langchain_core.prompt_values import PromptValue
|
|
32
33
|
from langchain_core.runnables import Runnable, RunnableSerializable
|
|
33
34
|
from langchain_core.utils import get_pydantic_field_names
|
|
34
|
-
from langchain_core.v1.messages import AIMessage as AIMessageV1
|
|
35
35
|
|
|
36
36
|
if TYPE_CHECKING:
|
|
37
37
|
from langchain_core.outputs import LLMResult
|
|
@@ -58,8 +58,8 @@ class LangSmithParams(TypedDict, total=False):
|
|
|
58
58
|
def get_tokenizer() -> Any:
|
|
59
59
|
"""Get a GPT-2 tokenizer instance.
|
|
60
60
|
|
|
61
|
-
This function is cached to avoid re-loading the tokenizer
|
|
62
|
-
|
|
61
|
+
This function is cached to avoid re-loading the tokenizer every time it is called.
|
|
62
|
+
|
|
63
63
|
"""
|
|
64
64
|
try:
|
|
65
65
|
from transformers import GPT2TokenizerFast # type: ignore[import-not-found]
|
|
@@ -86,9 +86,7 @@ def _get_token_ids_default_method(text: str) -> list[int]:
|
|
|
86
86
|
LanguageModelInput = Union[PromptValue, str, Sequence[MessageLikeRepresentation]]
|
|
87
87
|
LanguageModelOutput = Union[BaseMessage, str]
|
|
88
88
|
LanguageModelLike = Runnable[LanguageModelInput, LanguageModelOutput]
|
|
89
|
-
LanguageModelOutputVar = TypeVar(
|
|
90
|
-
"LanguageModelOutputVar", BaseMessage, str, AIMessageV1
|
|
91
|
-
)
|
|
89
|
+
LanguageModelOutputVar = TypeVar("LanguageModelOutputVar", AIMessage, str)
|
|
92
90
|
|
|
93
91
|
|
|
94
92
|
def _get_verbosity() -> bool:
|
|
@@ -102,7 +100,8 @@ class BaseLanguageModel(
|
|
|
102
100
|
):
|
|
103
101
|
"""Abstract base class for interfacing with language models.
|
|
104
102
|
|
|
105
|
-
All language model wrappers inherited from BaseLanguageModel
|
|
103
|
+
All language model wrappers inherited from ``BaseLanguageModel``.
|
|
104
|
+
|
|
106
105
|
"""
|
|
107
106
|
|
|
108
107
|
cache: Union[BaseCache, bool, None] = Field(default=None, exclude=True)
|
|
@@ -111,9 +110,10 @@ class BaseLanguageModel(
|
|
|
111
110
|
* If true, will use the global cache.
|
|
112
111
|
* If false, will not use a cache
|
|
113
112
|
* If None, will use the global cache if it's set, otherwise no cache.
|
|
114
|
-
* If instance of BaseCache
|
|
113
|
+
* If instance of ``BaseCache``, will use the provided cache.
|
|
115
114
|
|
|
116
115
|
Caching is not currently supported for streaming methods of models.
|
|
116
|
+
|
|
117
117
|
"""
|
|
118
118
|
verbose: bool = Field(default_factory=_get_verbosity, exclude=True, repr=False)
|
|
119
119
|
"""Whether to print out response text."""
|
|
@@ -143,6 +143,7 @@ class BaseLanguageModel(
|
|
|
143
143
|
|
|
144
144
|
Returns:
|
|
145
145
|
The verbosity setting to use.
|
|
146
|
+
|
|
146
147
|
"""
|
|
147
148
|
if verbose is None:
|
|
148
149
|
return _get_verbosity()
|
|
@@ -198,7 +199,8 @@ class BaseLanguageModel(
|
|
|
198
199
|
|
|
199
200
|
Returns:
|
|
200
201
|
An LLMResult, which contains a list of candidate Generations for each input
|
|
201
|
-
|
|
202
|
+
prompt and additional model provider-specific output.
|
|
203
|
+
|
|
202
204
|
"""
|
|
203
205
|
|
|
204
206
|
@abstractmethod
|
|
@@ -232,8 +234,9 @@ class BaseLanguageModel(
|
|
|
232
234
|
to the model provider API call.
|
|
233
235
|
|
|
234
236
|
Returns:
|
|
235
|
-
An LLMResult
|
|
236
|
-
|
|
237
|
+
An ``LLMResult``, which contains a list of candidate Generations for each
|
|
238
|
+
input prompt and additional model provider-specific output.
|
|
239
|
+
|
|
237
240
|
"""
|
|
238
241
|
|
|
239
242
|
def with_structured_output(
|
|
@@ -251,8 +254,8 @@ class BaseLanguageModel(
|
|
|
251
254
|
) -> str:
|
|
252
255
|
"""Pass a single string input to the model and return a string.
|
|
253
256
|
|
|
254
|
-
|
|
255
|
-
|
|
257
|
+
Use this method when passing in raw text. If you want to pass in specific types
|
|
258
|
+
of chat messages, use predict_messages.
|
|
256
259
|
|
|
257
260
|
Args:
|
|
258
261
|
text: String input to pass to the model.
|
|
@@ -263,6 +266,7 @@ class BaseLanguageModel(
|
|
|
263
266
|
|
|
264
267
|
Returns:
|
|
265
268
|
Top model prediction as a string.
|
|
269
|
+
|
|
266
270
|
"""
|
|
267
271
|
|
|
268
272
|
@deprecated("0.1.7", alternative="invoke", removal="1.0")
|
|
@@ -277,7 +281,7 @@ class BaseLanguageModel(
|
|
|
277
281
|
"""Pass a message sequence to the model and return a message.
|
|
278
282
|
|
|
279
283
|
Use this method when passing in chat messages. If you want to pass in raw text,
|
|
280
|
-
|
|
284
|
+
use predict.
|
|
281
285
|
|
|
282
286
|
Args:
|
|
283
287
|
messages: A sequence of chat messages corresponding to a single model input.
|
|
@@ -288,6 +292,7 @@ class BaseLanguageModel(
|
|
|
288
292
|
|
|
289
293
|
Returns:
|
|
290
294
|
Top model prediction as a message.
|
|
295
|
+
|
|
291
296
|
"""
|
|
292
297
|
|
|
293
298
|
@deprecated("0.1.7", alternative="ainvoke", removal="1.0")
|
|
@@ -298,7 +303,7 @@ class BaseLanguageModel(
|
|
|
298
303
|
"""Asynchronously pass a string to the model and return a string.
|
|
299
304
|
|
|
300
305
|
Use this method when calling pure text generation models and only the top
|
|
301
|
-
|
|
306
|
+
candidate generation is needed.
|
|
302
307
|
|
|
303
308
|
Args:
|
|
304
309
|
text: String input to pass to the model.
|
|
@@ -309,6 +314,7 @@ class BaseLanguageModel(
|
|
|
309
314
|
|
|
310
315
|
Returns:
|
|
311
316
|
Top model prediction as a string.
|
|
317
|
+
|
|
312
318
|
"""
|
|
313
319
|
|
|
314
320
|
@deprecated("0.1.7", alternative="ainvoke", removal="1.0")
|
|
@@ -322,8 +328,8 @@ class BaseLanguageModel(
|
|
|
322
328
|
) -> BaseMessage:
|
|
323
329
|
"""Asynchronously pass messages to the model and return a message.
|
|
324
330
|
|
|
325
|
-
Use this method when calling chat models and only the top
|
|
326
|
-
|
|
331
|
+
Use this method when calling chat models and only the top candidate generation
|
|
332
|
+
is needed.
|
|
327
333
|
|
|
328
334
|
Args:
|
|
329
335
|
messages: A sequence of chat messages corresponding to a single model input.
|
|
@@ -334,6 +340,7 @@ class BaseLanguageModel(
|
|
|
334
340
|
|
|
335
341
|
Returns:
|
|
336
342
|
Top model prediction as a message.
|
|
343
|
+
|
|
337
344
|
"""
|
|
338
345
|
|
|
339
346
|
@property
|
|
@@ -349,7 +356,8 @@ class BaseLanguageModel(
|
|
|
349
356
|
|
|
350
357
|
Returns:
|
|
351
358
|
A list of ids corresponding to the tokens in the text, in order they occur
|
|
352
|
-
|
|
359
|
+
in the text.
|
|
360
|
+
|
|
353
361
|
"""
|
|
354
362
|
if self.custom_get_token_ids is not None:
|
|
355
363
|
return self.custom_get_token_ids(text)
|
|
@@ -365,6 +373,7 @@ class BaseLanguageModel(
|
|
|
365
373
|
|
|
366
374
|
Returns:
|
|
367
375
|
The integer number of tokens in the text.
|
|
376
|
+
|
|
368
377
|
"""
|
|
369
378
|
return len(self.get_token_ids(text))
|
|
370
379
|
|
|
@@ -377,16 +386,18 @@ class BaseLanguageModel(
|
|
|
377
386
|
|
|
378
387
|
Useful for checking if an input fits in a model's context window.
|
|
379
388
|
|
|
380
|
-
|
|
381
|
-
|
|
389
|
+
.. note::
|
|
390
|
+
The base implementation of ``get_num_tokens_from_messages`` ignores tool
|
|
391
|
+
schemas.
|
|
382
392
|
|
|
383
393
|
Args:
|
|
384
394
|
messages: The message inputs to tokenize.
|
|
385
|
-
tools: If provided, sequence of dict, BaseModel
|
|
386
|
-
to be converted to tool schemas.
|
|
395
|
+
tools: If provided, sequence of dict, ``BaseModel``, function, or
|
|
396
|
+
``BaseTools`` to be converted to tool schemas.
|
|
387
397
|
|
|
388
398
|
Returns:
|
|
389
399
|
The sum of the number of tokens across the messages.
|
|
400
|
+
|
|
390
401
|
"""
|
|
391
402
|
if tools is not None:
|
|
392
403
|
warnings.warn(
|
|
@@ -399,6 +410,7 @@ class BaseLanguageModel(
|
|
|
399
410
|
def _all_required_field_names(cls) -> set:
|
|
400
411
|
"""DEPRECATED: Kept for backwards compatibility.
|
|
401
412
|
|
|
402
|
-
Use get_pydantic_field_names
|
|
413
|
+
Use ``get_pydantic_field_names``.
|
|
414
|
+
|
|
403
415
|
"""
|
|
404
416
|
return get_pydantic_field_names(cls)
|