langchain-core 1.0.0a6__py3-none-any.whl → 1.0.0a8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of langchain-core might be problematic. Click here for more details.
- langchain_core/_api/__init__.py +3 -3
- langchain_core/_api/beta_decorator.py +6 -6
- langchain_core/_api/deprecation.py +21 -29
- langchain_core/_api/path.py +3 -6
- langchain_core/_import_utils.py +2 -3
- langchain_core/agents.py +10 -11
- langchain_core/caches.py +7 -7
- langchain_core/callbacks/base.py +91 -91
- langchain_core/callbacks/file.py +11 -11
- langchain_core/callbacks/manager.py +86 -89
- langchain_core/callbacks/stdout.py +8 -8
- langchain_core/callbacks/usage.py +4 -4
- langchain_core/chat_history.py +5 -5
- langchain_core/document_loaders/base.py +2 -2
- langchain_core/document_loaders/langsmith.py +15 -15
- langchain_core/documents/base.py +16 -16
- langchain_core/documents/compressor.py +4 -4
- langchain_core/example_selectors/length_based.py +1 -1
- langchain_core/example_selectors/semantic_similarity.py +17 -19
- langchain_core/exceptions.py +3 -3
- langchain_core/globals.py +3 -151
- langchain_core/indexing/api.py +44 -43
- langchain_core/indexing/base.py +30 -30
- langchain_core/indexing/in_memory.py +3 -3
- langchain_core/language_models/_utils.py +5 -7
- langchain_core/language_models/base.py +18 -132
- langchain_core/language_models/chat_models.py +118 -227
- langchain_core/language_models/fake.py +11 -11
- langchain_core/language_models/fake_chat_models.py +35 -29
- langchain_core/language_models/llms.py +91 -201
- langchain_core/load/dump.py +1 -1
- langchain_core/load/load.py +11 -12
- langchain_core/load/mapping.py +2 -4
- langchain_core/load/serializable.py +2 -4
- langchain_core/messages/ai.py +17 -20
- langchain_core/messages/base.py +23 -25
- langchain_core/messages/block_translators/__init__.py +2 -5
- langchain_core/messages/block_translators/anthropic.py +3 -3
- langchain_core/messages/block_translators/bedrock_converse.py +2 -2
- langchain_core/messages/block_translators/langchain_v0.py +2 -2
- langchain_core/messages/block_translators/openai.py +6 -6
- langchain_core/messages/content.py +120 -124
- langchain_core/messages/human.py +7 -7
- langchain_core/messages/system.py +7 -7
- langchain_core/messages/tool.py +24 -24
- langchain_core/messages/utils.py +67 -79
- langchain_core/output_parsers/base.py +12 -14
- langchain_core/output_parsers/json.py +4 -4
- langchain_core/output_parsers/list.py +3 -5
- langchain_core/output_parsers/openai_functions.py +3 -3
- langchain_core/output_parsers/openai_tools.py +3 -3
- langchain_core/output_parsers/pydantic.py +2 -2
- langchain_core/output_parsers/transform.py +13 -15
- langchain_core/output_parsers/xml.py +7 -9
- langchain_core/outputs/chat_generation.py +4 -4
- langchain_core/outputs/chat_result.py +1 -3
- langchain_core/outputs/generation.py +2 -2
- langchain_core/outputs/llm_result.py +5 -5
- langchain_core/prompts/__init__.py +1 -5
- langchain_core/prompts/base.py +10 -15
- langchain_core/prompts/chat.py +31 -82
- langchain_core/prompts/dict.py +2 -2
- langchain_core/prompts/few_shot.py +5 -5
- langchain_core/prompts/few_shot_with_templates.py +4 -4
- langchain_core/prompts/loading.py +3 -5
- langchain_core/prompts/prompt.py +4 -16
- langchain_core/prompts/string.py +2 -1
- langchain_core/prompts/structured.py +16 -23
- langchain_core/rate_limiters.py +3 -4
- langchain_core/retrievers.py +14 -14
- langchain_core/runnables/base.py +928 -1042
- langchain_core/runnables/branch.py +36 -40
- langchain_core/runnables/config.py +27 -35
- langchain_core/runnables/configurable.py +108 -124
- langchain_core/runnables/fallbacks.py +76 -72
- langchain_core/runnables/graph.py +39 -45
- langchain_core/runnables/graph_ascii.py +9 -11
- langchain_core/runnables/graph_mermaid.py +18 -19
- langchain_core/runnables/graph_png.py +8 -9
- langchain_core/runnables/history.py +114 -127
- langchain_core/runnables/passthrough.py +113 -139
- langchain_core/runnables/retry.py +43 -48
- langchain_core/runnables/router.py +23 -28
- langchain_core/runnables/schema.py +42 -44
- langchain_core/runnables/utils.py +28 -31
- langchain_core/stores.py +9 -13
- langchain_core/structured_query.py +8 -8
- langchain_core/tools/base.py +62 -115
- langchain_core/tools/convert.py +31 -35
- langchain_core/tools/render.py +1 -1
- langchain_core/tools/retriever.py +4 -4
- langchain_core/tools/simple.py +13 -17
- langchain_core/tools/structured.py +12 -15
- langchain_core/tracers/base.py +62 -64
- langchain_core/tracers/context.py +17 -35
- langchain_core/tracers/core.py +49 -53
- langchain_core/tracers/evaluation.py +11 -11
- langchain_core/tracers/event_stream.py +58 -60
- langchain_core/tracers/langchain.py +13 -13
- langchain_core/tracers/log_stream.py +22 -24
- langchain_core/tracers/root_listeners.py +14 -14
- langchain_core/tracers/run_collector.py +2 -4
- langchain_core/tracers/schemas.py +8 -8
- langchain_core/tracers/stdout.py +2 -1
- langchain_core/utils/__init__.py +0 -3
- langchain_core/utils/_merge.py +2 -2
- langchain_core/utils/aiter.py +24 -28
- langchain_core/utils/env.py +4 -4
- langchain_core/utils/function_calling.py +31 -41
- langchain_core/utils/html.py +3 -4
- langchain_core/utils/input.py +3 -3
- langchain_core/utils/iter.py +15 -19
- langchain_core/utils/json.py +3 -2
- langchain_core/utils/json_schema.py +6 -6
- langchain_core/utils/mustache.py +3 -5
- langchain_core/utils/pydantic.py +16 -18
- langchain_core/utils/usage.py +1 -1
- langchain_core/utils/utils.py +29 -29
- langchain_core/vectorstores/base.py +18 -21
- langchain_core/vectorstores/in_memory.py +14 -87
- langchain_core/vectorstores/utils.py +2 -2
- langchain_core/version.py +1 -1
- {langchain_core-1.0.0a6.dist-info → langchain_core-1.0.0a8.dist-info}/METADATA +10 -21
- langchain_core-1.0.0a8.dist-info/RECORD +176 -0
- {langchain_core-1.0.0a6.dist-info → langchain_core-1.0.0a8.dist-info}/WHEEL +1 -1
- langchain_core/messages/block_translators/ollama.py +0 -47
- langchain_core/prompts/pipeline.py +0 -138
- langchain_core/tracers/langchain_v1.py +0 -31
- langchain_core/utils/loading.py +0 -35
- langchain_core-1.0.0a6.dist-info/RECORD +0 -181
- langchain_core-1.0.0a6.dist-info/entry_points.txt +0 -4
|
@@ -7,21 +7,17 @@ import functools
|
|
|
7
7
|
import inspect
|
|
8
8
|
import json
|
|
9
9
|
import logging
|
|
10
|
-
import warnings
|
|
11
10
|
from abc import ABC, abstractmethod
|
|
12
|
-
from collections.abc import AsyncIterator, Iterator, Sequence
|
|
11
|
+
from collections.abc import AsyncIterator, Callable, Iterator, Sequence
|
|
13
12
|
from pathlib import Path
|
|
14
13
|
from typing import (
|
|
15
14
|
TYPE_CHECKING,
|
|
16
15
|
Any,
|
|
17
|
-
Callable,
|
|
18
|
-
Optional,
|
|
19
|
-
Union,
|
|
20
16
|
cast,
|
|
21
17
|
)
|
|
22
18
|
|
|
23
19
|
import yaml
|
|
24
|
-
from pydantic import ConfigDict
|
|
20
|
+
from pydantic import ConfigDict
|
|
25
21
|
from tenacity import (
|
|
26
22
|
RetryCallState,
|
|
27
23
|
before_sleep_log,
|
|
@@ -33,7 +29,6 @@ from tenacity import (
|
|
|
33
29
|
)
|
|
34
30
|
from typing_extensions import override
|
|
35
31
|
|
|
36
|
-
from langchain_core._api import deprecated
|
|
37
32
|
from langchain_core.caches import BaseCache
|
|
38
33
|
from langchain_core.callbacks import (
|
|
39
34
|
AsyncCallbackManager,
|
|
@@ -51,10 +46,7 @@ from langchain_core.language_models.base import (
|
|
|
51
46
|
)
|
|
52
47
|
from langchain_core.load import dumpd
|
|
53
48
|
from langchain_core.messages import (
|
|
54
|
-
AIMessage,
|
|
55
|
-
BaseMessage,
|
|
56
49
|
convert_to_messages,
|
|
57
|
-
get_buffer_string,
|
|
58
50
|
)
|
|
59
51
|
from langchain_core.outputs import Generation, GenerationChunk, LLMResult, RunInfo
|
|
60
52
|
from langchain_core.prompt_values import ChatPromptValue, PromptValue, StringPromptValue
|
|
@@ -76,9 +68,7 @@ def _log_error_once(msg: str) -> None:
|
|
|
76
68
|
def create_base_retry_decorator(
|
|
77
69
|
error_types: list[type[BaseException]],
|
|
78
70
|
max_retries: int = 1,
|
|
79
|
-
run_manager:
|
|
80
|
-
Union[AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun]
|
|
81
|
-
] = None,
|
|
71
|
+
run_manager: AsyncCallbackManagerForLLMRun | CallbackManagerForLLMRun | None = None,
|
|
82
72
|
) -> Callable[[Any], Any]:
|
|
83
73
|
"""Create a retry decorator for a given LLM and provided a list of error types.
|
|
84
74
|
|
|
@@ -129,9 +119,9 @@ def create_base_retry_decorator(
|
|
|
129
119
|
)
|
|
130
120
|
|
|
131
121
|
|
|
132
|
-
def _resolve_cache(*, cache:
|
|
122
|
+
def _resolve_cache(*, cache: BaseCache | bool | None) -> BaseCache | None:
|
|
133
123
|
"""Resolve the cache."""
|
|
134
|
-
llm_cache:
|
|
124
|
+
llm_cache: BaseCache | None
|
|
135
125
|
if isinstance(cache, BaseCache):
|
|
136
126
|
llm_cache = cache
|
|
137
127
|
elif cache is None:
|
|
@@ -156,7 +146,7 @@ def _resolve_cache(*, cache: Union[BaseCache, bool, None]) -> Optional[BaseCache
|
|
|
156
146
|
def get_prompts(
|
|
157
147
|
params: dict[str, Any],
|
|
158
148
|
prompts: list[str],
|
|
159
|
-
cache:
|
|
149
|
+
cache: BaseCache | bool | None = None, # noqa: FBT001
|
|
160
150
|
) -> tuple[dict[int, list], str, list[int], list[str]]:
|
|
161
151
|
"""Get prompts that are already cached.
|
|
162
152
|
|
|
@@ -192,7 +182,7 @@ def get_prompts(
|
|
|
192
182
|
async def aget_prompts(
|
|
193
183
|
params: dict[str, Any],
|
|
194
184
|
prompts: list[str],
|
|
195
|
-
cache:
|
|
185
|
+
cache: BaseCache | bool | None = None, # noqa: FBT001
|
|
196
186
|
) -> tuple[dict[int, list], str, list[int], list[str]]:
|
|
197
187
|
"""Get prompts that are already cached. Async version.
|
|
198
188
|
|
|
@@ -225,13 +215,13 @@ async def aget_prompts(
|
|
|
225
215
|
|
|
226
216
|
|
|
227
217
|
def update_cache(
|
|
228
|
-
cache:
|
|
218
|
+
cache: BaseCache | bool | None, # noqa: FBT001
|
|
229
219
|
existing_prompts: dict[int, list],
|
|
230
220
|
llm_string: str,
|
|
231
221
|
missing_prompt_idxs: list[int],
|
|
232
222
|
new_results: LLMResult,
|
|
233
223
|
prompts: list[str],
|
|
234
|
-
) ->
|
|
224
|
+
) -> dict | None:
|
|
235
225
|
"""Update the cache and get the LLM output.
|
|
236
226
|
|
|
237
227
|
Args:
|
|
@@ -258,13 +248,13 @@ def update_cache(
|
|
|
258
248
|
|
|
259
249
|
|
|
260
250
|
async def aupdate_cache(
|
|
261
|
-
cache:
|
|
251
|
+
cache: BaseCache | bool | None, # noqa: FBT001
|
|
262
252
|
existing_prompts: dict[int, list],
|
|
263
253
|
llm_string: str,
|
|
264
254
|
missing_prompt_idxs: list[int],
|
|
265
255
|
new_results: LLMResult,
|
|
266
256
|
prompts: list[str],
|
|
267
|
-
) ->
|
|
257
|
+
) -> dict | None:
|
|
268
258
|
"""Update the cache and get the LLM output. Async version.
|
|
269
259
|
|
|
270
260
|
Args:
|
|
@@ -296,26 +286,10 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
|
|
296
286
|
It should take in a prompt and return a string.
|
|
297
287
|
"""
|
|
298
288
|
|
|
299
|
-
callback_manager: Optional[BaseCallbackManager] = Field(default=None, exclude=True)
|
|
300
|
-
"""[DEPRECATED]"""
|
|
301
|
-
|
|
302
289
|
model_config = ConfigDict(
|
|
303
290
|
arbitrary_types_allowed=True,
|
|
304
291
|
)
|
|
305
292
|
|
|
306
|
-
@model_validator(mode="before")
|
|
307
|
-
@classmethod
|
|
308
|
-
def raise_deprecation(cls, values: dict) -> Any:
|
|
309
|
-
"""Raise deprecation warning if callback_manager is used."""
|
|
310
|
-
if values.get("callback_manager") is not None:
|
|
311
|
-
warnings.warn(
|
|
312
|
-
"callback_manager is deprecated. Please use callbacks instead.",
|
|
313
|
-
DeprecationWarning,
|
|
314
|
-
stacklevel=5,
|
|
315
|
-
)
|
|
316
|
-
values["callbacks"] = values.pop("callback_manager", None)
|
|
317
|
-
return values
|
|
318
|
-
|
|
319
293
|
@functools.cached_property
|
|
320
294
|
def _serialized(self) -> dict[str, Any]:
|
|
321
295
|
return dumpd(self)
|
|
@@ -343,7 +317,7 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
|
|
343
317
|
|
|
344
318
|
def _get_ls_params(
|
|
345
319
|
self,
|
|
346
|
-
stop:
|
|
320
|
+
stop: list[str] | None = None,
|
|
347
321
|
**kwargs: Any,
|
|
348
322
|
) -> LangSmithParams:
|
|
349
323
|
"""Get standard params for tracing."""
|
|
@@ -382,9 +356,9 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
|
|
382
356
|
def invoke(
|
|
383
357
|
self,
|
|
384
358
|
input: LanguageModelInput,
|
|
385
|
-
config:
|
|
359
|
+
config: RunnableConfig | None = None,
|
|
386
360
|
*,
|
|
387
|
-
stop:
|
|
361
|
+
stop: list[str] | None = None,
|
|
388
362
|
**kwargs: Any,
|
|
389
363
|
) -> str:
|
|
390
364
|
config = ensure_config(config)
|
|
@@ -407,9 +381,9 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
|
|
407
381
|
async def ainvoke(
|
|
408
382
|
self,
|
|
409
383
|
input: LanguageModelInput,
|
|
410
|
-
config:
|
|
384
|
+
config: RunnableConfig | None = None,
|
|
411
385
|
*,
|
|
412
|
-
stop:
|
|
386
|
+
stop: list[str] | None = None,
|
|
413
387
|
**kwargs: Any,
|
|
414
388
|
) -> str:
|
|
415
389
|
config = ensure_config(config)
|
|
@@ -429,7 +403,7 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
|
|
429
403
|
def batch(
|
|
430
404
|
self,
|
|
431
405
|
inputs: list[LanguageModelInput],
|
|
432
|
-
config:
|
|
406
|
+
config: RunnableConfig | list[RunnableConfig] | None = None,
|
|
433
407
|
*,
|
|
434
408
|
return_exceptions: bool = False,
|
|
435
409
|
**kwargs: Any,
|
|
@@ -476,7 +450,7 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
|
|
476
450
|
async def abatch(
|
|
477
451
|
self,
|
|
478
452
|
inputs: list[LanguageModelInput],
|
|
479
|
-
config:
|
|
453
|
+
config: RunnableConfig | list[RunnableConfig] | None = None,
|
|
480
454
|
*,
|
|
481
455
|
return_exceptions: bool = False,
|
|
482
456
|
**kwargs: Any,
|
|
@@ -522,9 +496,9 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
|
|
522
496
|
def stream(
|
|
523
497
|
self,
|
|
524
498
|
input: LanguageModelInput,
|
|
525
|
-
config:
|
|
499
|
+
config: RunnableConfig | None = None,
|
|
526
500
|
*,
|
|
527
|
-
stop:
|
|
501
|
+
stop: list[str] | None = None,
|
|
528
502
|
**kwargs: Any,
|
|
529
503
|
) -> Iterator[str]:
|
|
530
504
|
if type(self)._stream == BaseLLM._stream: # noqa: SLF001
|
|
@@ -559,7 +533,7 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
|
|
559
533
|
run_id=config.pop("run_id", None),
|
|
560
534
|
batch_size=1,
|
|
561
535
|
)
|
|
562
|
-
generation:
|
|
536
|
+
generation: GenerationChunk | None = None
|
|
563
537
|
try:
|
|
564
538
|
for chunk in self._stream(
|
|
565
539
|
prompt, stop=stop, run_manager=run_manager, **kwargs
|
|
@@ -589,9 +563,9 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
|
|
589
563
|
async def astream(
|
|
590
564
|
self,
|
|
591
565
|
input: LanguageModelInput,
|
|
592
|
-
config:
|
|
566
|
+
config: RunnableConfig | None = None,
|
|
593
567
|
*,
|
|
594
|
-
stop:
|
|
568
|
+
stop: list[str] | None = None,
|
|
595
569
|
**kwargs: Any,
|
|
596
570
|
) -> AsyncIterator[str]:
|
|
597
571
|
if (
|
|
@@ -629,7 +603,7 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
|
|
629
603
|
run_id=config.pop("run_id", None),
|
|
630
604
|
batch_size=1,
|
|
631
605
|
)
|
|
632
|
-
generation:
|
|
606
|
+
generation: GenerationChunk | None = None
|
|
633
607
|
try:
|
|
634
608
|
async for chunk in self._astream(
|
|
635
609
|
prompt,
|
|
@@ -662,8 +636,8 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
|
|
662
636
|
def _generate(
|
|
663
637
|
self,
|
|
664
638
|
prompts: list[str],
|
|
665
|
-
stop:
|
|
666
|
-
run_manager:
|
|
639
|
+
stop: list[str] | None = None,
|
|
640
|
+
run_manager: CallbackManagerForLLMRun | None = None,
|
|
667
641
|
**kwargs: Any,
|
|
668
642
|
) -> LLMResult:
|
|
669
643
|
"""Run the LLM on the given prompts.
|
|
@@ -682,8 +656,8 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
|
|
682
656
|
async def _agenerate(
|
|
683
657
|
self,
|
|
684
658
|
prompts: list[str],
|
|
685
|
-
stop:
|
|
686
|
-
run_manager:
|
|
659
|
+
stop: list[str] | None = None,
|
|
660
|
+
run_manager: AsyncCallbackManagerForLLMRun | None = None,
|
|
687
661
|
**kwargs: Any,
|
|
688
662
|
) -> LLMResult:
|
|
689
663
|
"""Run the LLM on the given prompts.
|
|
@@ -710,8 +684,8 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
|
|
710
684
|
def _stream(
|
|
711
685
|
self,
|
|
712
686
|
prompt: str,
|
|
713
|
-
stop:
|
|
714
|
-
run_manager:
|
|
687
|
+
stop: list[str] | None = None,
|
|
688
|
+
run_manager: CallbackManagerForLLMRun | None = None,
|
|
715
689
|
**kwargs: Any,
|
|
716
690
|
) -> Iterator[GenerationChunk]:
|
|
717
691
|
"""Stream the LLM on the given prompt.
|
|
@@ -738,8 +712,8 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
|
|
738
712
|
async def _astream(
|
|
739
713
|
self,
|
|
740
714
|
prompt: str,
|
|
741
|
-
stop:
|
|
742
|
-
run_manager:
|
|
715
|
+
stop: list[str] | None = None,
|
|
716
|
+
run_manager: AsyncCallbackManagerForLLMRun | None = None,
|
|
743
717
|
**kwargs: Any,
|
|
744
718
|
) -> AsyncIterator[GenerationChunk]:
|
|
745
719
|
"""An async version of the _stream method.
|
|
@@ -783,8 +757,8 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
|
|
783
757
|
def generate_prompt(
|
|
784
758
|
self,
|
|
785
759
|
prompts: list[PromptValue],
|
|
786
|
-
stop:
|
|
787
|
-
callbacks:
|
|
760
|
+
stop: list[str] | None = None,
|
|
761
|
+
callbacks: Callbacks | list[Callbacks] | None = None,
|
|
788
762
|
**kwargs: Any,
|
|
789
763
|
) -> LLMResult:
|
|
790
764
|
prompt_strings = [p.to_string() for p in prompts]
|
|
@@ -794,8 +768,8 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
|
|
794
768
|
async def agenerate_prompt(
|
|
795
769
|
self,
|
|
796
770
|
prompts: list[PromptValue],
|
|
797
|
-
stop:
|
|
798
|
-
callbacks:
|
|
771
|
+
stop: list[str] | None = None,
|
|
772
|
+
callbacks: Callbacks | list[Callbacks] | None = None,
|
|
799
773
|
**kwargs: Any,
|
|
800
774
|
) -> LLMResult:
|
|
801
775
|
prompt_strings = [p.to_string() for p in prompts]
|
|
@@ -806,7 +780,7 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
|
|
806
780
|
def _generate_helper(
|
|
807
781
|
self,
|
|
808
782
|
prompts: list[str],
|
|
809
|
-
stop:
|
|
783
|
+
stop: list[str] | None,
|
|
810
784
|
run_managers: list[CallbackManagerForLLMRun],
|
|
811
785
|
*,
|
|
812
786
|
new_arg_supported: bool,
|
|
@@ -829,7 +803,9 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
|
|
829
803
|
run_manager.on_llm_error(e, response=LLMResult(generations=[]))
|
|
830
804
|
raise
|
|
831
805
|
flattened_outputs = output.flatten()
|
|
832
|
-
for manager, flattened_output in zip(
|
|
806
|
+
for manager, flattened_output in zip(
|
|
807
|
+
run_managers, flattened_outputs, strict=False
|
|
808
|
+
):
|
|
833
809
|
manager.on_llm_end(flattened_output)
|
|
834
810
|
if run_managers:
|
|
835
811
|
output.run = [
|
|
@@ -840,13 +816,13 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
|
|
840
816
|
def generate(
|
|
841
817
|
self,
|
|
842
818
|
prompts: list[str],
|
|
843
|
-
stop:
|
|
844
|
-
callbacks:
|
|
819
|
+
stop: list[str] | None = None,
|
|
820
|
+
callbacks: Callbacks | list[Callbacks] | None = None,
|
|
845
821
|
*,
|
|
846
|
-
tags:
|
|
847
|
-
metadata:
|
|
848
|
-
run_name:
|
|
849
|
-
run_id:
|
|
822
|
+
tags: list[str] | list[list[str]] | None = None,
|
|
823
|
+
metadata: dict[str, Any] | list[dict[str, Any]] | None = None,
|
|
824
|
+
run_name: str | list[str] | None = None,
|
|
825
|
+
run_id: uuid.UUID | list[uuid.UUID | None] | None = None,
|
|
850
826
|
**kwargs: Any,
|
|
851
827
|
) -> LLMResult:
|
|
852
828
|
"""Pass a sequence of prompts to a model and return generations.
|
|
@@ -936,14 +912,12 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
|
|
936
912
|
msg = "run_name must be a list of the same length as prompts"
|
|
937
913
|
raise ValueError(msg)
|
|
938
914
|
callbacks = cast("list[Callbacks]", callbacks)
|
|
939
|
-
tags_list = cast(
|
|
940
|
-
"list[Optional[list[str]]]", tags or ([None] * len(prompts))
|
|
941
|
-
)
|
|
915
|
+
tags_list = cast("list[list[str] | None]", tags or ([None] * len(prompts)))
|
|
942
916
|
metadata_list = cast(
|
|
943
|
-
"list[
|
|
917
|
+
"list[dict[str, Any] | None]", metadata or ([{}] * len(prompts))
|
|
944
918
|
)
|
|
945
919
|
run_name_list = run_name or cast(
|
|
946
|
-
"list[
|
|
920
|
+
"list[str | None]", ([None] * len(prompts))
|
|
947
921
|
)
|
|
948
922
|
callback_managers = [
|
|
949
923
|
CallbackManager.configure(
|
|
@@ -955,7 +929,9 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
|
|
955
929
|
meta,
|
|
956
930
|
self.metadata,
|
|
957
931
|
)
|
|
958
|
-
for callback, tag, meta in zip(
|
|
932
|
+
for callback, tag, meta in zip(
|
|
933
|
+
callbacks, tags_list, metadata_list, strict=False
|
|
934
|
+
)
|
|
959
935
|
]
|
|
960
936
|
else:
|
|
961
937
|
# We've received a single callbacks arg to apply to all inputs
|
|
@@ -970,7 +946,7 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
|
|
970
946
|
self.metadata,
|
|
971
947
|
)
|
|
972
948
|
] * len(prompts)
|
|
973
|
-
run_name_list = [cast("
|
|
949
|
+
run_name_list = [cast("str | None", run_name)] * len(prompts)
|
|
974
950
|
run_ids_list = self._get_run_ids_list(run_id, prompts)
|
|
975
951
|
params = self.dict()
|
|
976
952
|
params["stop"] = stop
|
|
@@ -996,7 +972,11 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
|
|
996
972
|
run_id=run_id_,
|
|
997
973
|
)[0]
|
|
998
974
|
for callback_manager, prompt, run_name, run_id_ in zip(
|
|
999
|
-
callback_managers,
|
|
975
|
+
callback_managers,
|
|
976
|
+
prompts,
|
|
977
|
+
run_name_list,
|
|
978
|
+
run_ids_list,
|
|
979
|
+
strict=False,
|
|
1000
980
|
)
|
|
1001
981
|
]
|
|
1002
982
|
return self._generate_helper(
|
|
@@ -1046,7 +1026,7 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
|
|
1046
1026
|
|
|
1047
1027
|
@staticmethod
|
|
1048
1028
|
def _get_run_ids_list(
|
|
1049
|
-
run_id:
|
|
1029
|
+
run_id: uuid.UUID | list[uuid.UUID | None] | None, prompts: list
|
|
1050
1030
|
) -> list:
|
|
1051
1031
|
if run_id is None:
|
|
1052
1032
|
return [None] * len(prompts)
|
|
@@ -1063,7 +1043,7 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
|
|
1063
1043
|
async def _agenerate_helper(
|
|
1064
1044
|
self,
|
|
1065
1045
|
prompts: list[str],
|
|
1066
|
-
stop:
|
|
1046
|
+
stop: list[str] | None,
|
|
1067
1047
|
run_managers: list[AsyncCallbackManagerForLLMRun],
|
|
1068
1048
|
*,
|
|
1069
1049
|
new_arg_supported: bool,
|
|
@@ -1093,7 +1073,7 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
|
|
1093
1073
|
*[
|
|
1094
1074
|
run_manager.on_llm_end(flattened_output)
|
|
1095
1075
|
for run_manager, flattened_output in zip(
|
|
1096
|
-
run_managers, flattened_outputs
|
|
1076
|
+
run_managers, flattened_outputs, strict=False
|
|
1097
1077
|
)
|
|
1098
1078
|
]
|
|
1099
1079
|
)
|
|
@@ -1106,13 +1086,13 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
|
|
1106
1086
|
async def agenerate(
|
|
1107
1087
|
self,
|
|
1108
1088
|
prompts: list[str],
|
|
1109
|
-
stop:
|
|
1110
|
-
callbacks:
|
|
1089
|
+
stop: list[str] | None = None,
|
|
1090
|
+
callbacks: Callbacks | list[Callbacks] | None = None,
|
|
1111
1091
|
*,
|
|
1112
|
-
tags:
|
|
1113
|
-
metadata:
|
|
1114
|
-
run_name:
|
|
1115
|
-
run_id:
|
|
1092
|
+
tags: list[str] | list[list[str]] | None = None,
|
|
1093
|
+
metadata: dict[str, Any] | list[dict[str, Any]] | None = None,
|
|
1094
|
+
run_name: str | list[str] | None = None,
|
|
1095
|
+
run_id: uuid.UUID | list[uuid.UUID | None] | None = None,
|
|
1116
1096
|
**kwargs: Any,
|
|
1117
1097
|
) -> LLMResult:
|
|
1118
1098
|
"""Asynchronously pass a sequence of prompts to a model and return generations.
|
|
@@ -1191,14 +1171,12 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
|
|
1191
1171
|
msg = "run_name must be a list of the same length as prompts"
|
|
1192
1172
|
raise ValueError(msg)
|
|
1193
1173
|
callbacks = cast("list[Callbacks]", callbacks)
|
|
1194
|
-
tags_list = cast(
|
|
1195
|
-
"list[Optional[list[str]]]", tags or ([None] * len(prompts))
|
|
1196
|
-
)
|
|
1174
|
+
tags_list = cast("list[list[str] | None]", tags or ([None] * len(prompts)))
|
|
1197
1175
|
metadata_list = cast(
|
|
1198
|
-
"list[
|
|
1176
|
+
"list[dict[str, Any] | None]", metadata or ([{}] * len(prompts))
|
|
1199
1177
|
)
|
|
1200
1178
|
run_name_list = run_name or cast(
|
|
1201
|
-
"list[
|
|
1179
|
+
"list[str | None]", ([None] * len(prompts))
|
|
1202
1180
|
)
|
|
1203
1181
|
callback_managers = [
|
|
1204
1182
|
AsyncCallbackManager.configure(
|
|
@@ -1210,7 +1188,9 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
|
|
1210
1188
|
meta,
|
|
1211
1189
|
self.metadata,
|
|
1212
1190
|
)
|
|
1213
|
-
for callback, tag, meta in zip(
|
|
1191
|
+
for callback, tag, meta in zip(
|
|
1192
|
+
callbacks, tags_list, metadata_list, strict=False
|
|
1193
|
+
)
|
|
1214
1194
|
]
|
|
1215
1195
|
else:
|
|
1216
1196
|
# We've received a single callbacks arg to apply to all inputs
|
|
@@ -1225,7 +1205,7 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
|
|
1225
1205
|
self.metadata,
|
|
1226
1206
|
)
|
|
1227
1207
|
] * len(prompts)
|
|
1228
|
-
run_name_list = [cast("
|
|
1208
|
+
run_name_list = [cast("str | None", run_name)] * len(prompts)
|
|
1229
1209
|
run_ids_list = self._get_run_ids_list(run_id, prompts)
|
|
1230
1210
|
params = self.dict()
|
|
1231
1211
|
params["stop"] = stop
|
|
@@ -1255,7 +1235,11 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
|
|
1255
1235
|
run_id=run_id_,
|
|
1256
1236
|
)
|
|
1257
1237
|
for callback_manager, prompt, run_name, run_id_ in zip(
|
|
1258
|
-
callback_managers,
|
|
1238
|
+
callback_managers,
|
|
1239
|
+
prompts,
|
|
1240
|
+
run_name_list,
|
|
1241
|
+
run_ids_list,
|
|
1242
|
+
strict=False,
|
|
1259
1243
|
)
|
|
1260
1244
|
]
|
|
1261
1245
|
)
|
|
@@ -1308,64 +1292,14 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
|
|
1308
1292
|
generations = [existing_prompts[i] for i in range(len(prompts))]
|
|
1309
1293
|
return LLMResult(generations=generations, llm_output=llm_output, run=run_info)
|
|
1310
1294
|
|
|
1311
|
-
@deprecated("0.1.7", alternative="invoke", removal="1.0")
|
|
1312
|
-
def __call__(
|
|
1313
|
-
self,
|
|
1314
|
-
prompt: str,
|
|
1315
|
-
stop: Optional[list[str]] = None,
|
|
1316
|
-
callbacks: Callbacks = None,
|
|
1317
|
-
*,
|
|
1318
|
-
tags: Optional[list[str]] = None,
|
|
1319
|
-
metadata: Optional[dict[str, Any]] = None,
|
|
1320
|
-
**kwargs: Any,
|
|
1321
|
-
) -> str:
|
|
1322
|
-
"""Check Cache and run the LLM on the given prompt and input.
|
|
1323
|
-
|
|
1324
|
-
Args:
|
|
1325
|
-
prompt: The prompt to generate from.
|
|
1326
|
-
stop: Stop words to use when generating. Model output is cut off at the
|
|
1327
|
-
first occurrence of any of these substrings.
|
|
1328
|
-
callbacks: Callbacks to pass through. Used for executing additional
|
|
1329
|
-
functionality, such as logging or streaming, throughout generation.
|
|
1330
|
-
tags: List of tags to associate with the prompt.
|
|
1331
|
-
metadata: Metadata to associate with the prompt.
|
|
1332
|
-
**kwargs: Arbitrary additional keyword arguments. These are usually passed
|
|
1333
|
-
to the model provider API call.
|
|
1334
|
-
|
|
1335
|
-
Returns:
|
|
1336
|
-
The generated text.
|
|
1337
|
-
|
|
1338
|
-
Raises:
|
|
1339
|
-
ValueError: If the prompt is not a string.
|
|
1340
|
-
"""
|
|
1341
|
-
if not isinstance(prompt, str):
|
|
1342
|
-
msg = (
|
|
1343
|
-
"Argument `prompt` is expected to be a string. Instead found "
|
|
1344
|
-
f"{type(prompt)}. If you want to run the LLM on multiple prompts, use "
|
|
1345
|
-
"`generate` instead."
|
|
1346
|
-
)
|
|
1347
|
-
raise ValueError(msg) # noqa: TRY004
|
|
1348
|
-
return (
|
|
1349
|
-
self.generate(
|
|
1350
|
-
[prompt],
|
|
1351
|
-
stop=stop,
|
|
1352
|
-
callbacks=callbacks,
|
|
1353
|
-
tags=tags,
|
|
1354
|
-
metadata=metadata,
|
|
1355
|
-
**kwargs,
|
|
1356
|
-
)
|
|
1357
|
-
.generations[0][0]
|
|
1358
|
-
.text
|
|
1359
|
-
)
|
|
1360
|
-
|
|
1361
1295
|
async def _call_async(
|
|
1362
1296
|
self,
|
|
1363
1297
|
prompt: str,
|
|
1364
|
-
stop:
|
|
1298
|
+
stop: list[str] | None = None,
|
|
1365
1299
|
callbacks: Callbacks = None,
|
|
1366
1300
|
*,
|
|
1367
|
-
tags:
|
|
1368
|
-
metadata:
|
|
1301
|
+
tags: list[str] | None = None,
|
|
1302
|
+
metadata: dict[str, Any] | None = None,
|
|
1369
1303
|
**kwargs: Any,
|
|
1370
1304
|
) -> str:
|
|
1371
1305
|
"""Check Cache and run the LLM on the given prompt and input."""
|
|
@@ -1379,50 +1313,6 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
|
|
1379
1313
|
)
|
|
1380
1314
|
return result.generations[0][0].text
|
|
1381
1315
|
|
|
1382
|
-
@deprecated("0.1.7", alternative="invoke", removal="1.0")
|
|
1383
|
-
@override
|
|
1384
|
-
def predict(
|
|
1385
|
-
self, text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any
|
|
1386
|
-
) -> str:
|
|
1387
|
-
stop_ = None if stop is None else list(stop)
|
|
1388
|
-
return self(text, stop=stop_, **kwargs)
|
|
1389
|
-
|
|
1390
|
-
@deprecated("0.1.7", alternative="invoke", removal="1.0")
|
|
1391
|
-
@override
|
|
1392
|
-
def predict_messages(
|
|
1393
|
-
self,
|
|
1394
|
-
messages: list[BaseMessage],
|
|
1395
|
-
*,
|
|
1396
|
-
stop: Optional[Sequence[str]] = None,
|
|
1397
|
-
**kwargs: Any,
|
|
1398
|
-
) -> BaseMessage:
|
|
1399
|
-
text = get_buffer_string(messages)
|
|
1400
|
-
stop_ = None if stop is None else list(stop)
|
|
1401
|
-
content = self(text, stop=stop_, **kwargs)
|
|
1402
|
-
return AIMessage(content=content)
|
|
1403
|
-
|
|
1404
|
-
@deprecated("0.1.7", alternative="ainvoke", removal="1.0")
|
|
1405
|
-
@override
|
|
1406
|
-
async def apredict(
|
|
1407
|
-
self, text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any
|
|
1408
|
-
) -> str:
|
|
1409
|
-
stop_ = None if stop is None else list(stop)
|
|
1410
|
-
return await self._call_async(text, stop=stop_, **kwargs)
|
|
1411
|
-
|
|
1412
|
-
@deprecated("0.1.7", alternative="ainvoke", removal="1.0")
|
|
1413
|
-
@override
|
|
1414
|
-
async def apredict_messages(
|
|
1415
|
-
self,
|
|
1416
|
-
messages: list[BaseMessage],
|
|
1417
|
-
*,
|
|
1418
|
-
stop: Optional[Sequence[str]] = None,
|
|
1419
|
-
**kwargs: Any,
|
|
1420
|
-
) -> BaseMessage:
|
|
1421
|
-
text = get_buffer_string(messages)
|
|
1422
|
-
stop_ = None if stop is None else list(stop)
|
|
1423
|
-
content = await self._call_async(text, stop=stop_, **kwargs)
|
|
1424
|
-
return AIMessage(content=content)
|
|
1425
|
-
|
|
1426
1316
|
def __str__(self) -> str:
|
|
1427
1317
|
"""Return a string representation of the object for printing."""
|
|
1428
1318
|
cls_name = f"\033[1m{self.__class__.__name__}\033[0m"
|
|
@@ -1440,7 +1330,7 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
|
|
1440
1330
|
starter_dict["_type"] = self._llm_type
|
|
1441
1331
|
return starter_dict
|
|
1442
1332
|
|
|
1443
|
-
def save(self, file_path:
|
|
1333
|
+
def save(self, file_path: Path | str) -> None:
|
|
1444
1334
|
"""Save the LLM.
|
|
1445
1335
|
|
|
1446
1336
|
Args:
|
|
@@ -1510,8 +1400,8 @@ class LLM(BaseLLM):
|
|
|
1510
1400
|
def _call(
|
|
1511
1401
|
self,
|
|
1512
1402
|
prompt: str,
|
|
1513
|
-
stop:
|
|
1514
|
-
run_manager:
|
|
1403
|
+
stop: list[str] | None = None,
|
|
1404
|
+
run_manager: CallbackManagerForLLMRun | None = None,
|
|
1515
1405
|
**kwargs: Any,
|
|
1516
1406
|
) -> str:
|
|
1517
1407
|
"""Run the LLM on the given input.
|
|
@@ -1534,8 +1424,8 @@ class LLM(BaseLLM):
|
|
|
1534
1424
|
async def _acall(
|
|
1535
1425
|
self,
|
|
1536
1426
|
prompt: str,
|
|
1537
|
-
stop:
|
|
1538
|
-
run_manager:
|
|
1427
|
+
stop: list[str] | None = None,
|
|
1428
|
+
run_manager: AsyncCallbackManagerForLLMRun | None = None,
|
|
1539
1429
|
**kwargs: Any,
|
|
1540
1430
|
) -> str:
|
|
1541
1431
|
"""Async version of the _call method.
|
|
@@ -1568,8 +1458,8 @@ class LLM(BaseLLM):
|
|
|
1568
1458
|
def _generate(
|
|
1569
1459
|
self,
|
|
1570
1460
|
prompts: list[str],
|
|
1571
|
-
stop:
|
|
1572
|
-
run_manager:
|
|
1461
|
+
stop: list[str] | None = None,
|
|
1462
|
+
run_manager: CallbackManagerForLLMRun | None = None,
|
|
1573
1463
|
**kwargs: Any,
|
|
1574
1464
|
) -> LLMResult:
|
|
1575
1465
|
# TODO: add caching here.
|
|
@@ -1587,8 +1477,8 @@ class LLM(BaseLLM):
|
|
|
1587
1477
|
async def _agenerate(
|
|
1588
1478
|
self,
|
|
1589
1479
|
prompts: list[str],
|
|
1590
|
-
stop:
|
|
1591
|
-
run_manager:
|
|
1480
|
+
stop: list[str] | None = None,
|
|
1481
|
+
run_manager: AsyncCallbackManagerForLLMRun | None = None,
|
|
1592
1482
|
**kwargs: Any,
|
|
1593
1483
|
) -> LLMResult:
|
|
1594
1484
|
generations = []
|
langchain_core/load/dump.py
CHANGED
|
@@ -72,7 +72,7 @@ def dumps(obj: Any, *, pretty: bool = False, **kwargs: Any) -> str:
|
|
|
72
72
|
def dumpd(obj: Any) -> Any:
|
|
73
73
|
"""Return a dict representation of an object.
|
|
74
74
|
|
|
75
|
-
|
|
75
|
+
!!! note
|
|
76
76
|
Unfortunately this function is not as efficient as it could be because it first
|
|
77
77
|
dumps the object to a json string and then loads it back into a dictionary.
|
|
78
78
|
|