langchain-core 1.0.0a5__py3-none-any.whl → 1.0.0a7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of langchain-core might be problematic. Click here for more details.
- langchain_core/_api/__init__.py +3 -3
- langchain_core/_api/beta_decorator.py +6 -6
- langchain_core/_api/deprecation.py +21 -29
- langchain_core/_api/path.py +3 -6
- langchain_core/_import_utils.py +2 -3
- langchain_core/agents.py +10 -11
- langchain_core/caches.py +7 -7
- langchain_core/callbacks/base.py +91 -91
- langchain_core/callbacks/file.py +11 -11
- langchain_core/callbacks/manager.py +86 -89
- langchain_core/callbacks/stdout.py +8 -8
- langchain_core/callbacks/usage.py +4 -4
- langchain_core/chat_history.py +1 -37
- langchain_core/document_loaders/base.py +2 -2
- langchain_core/document_loaders/langsmith.py +15 -15
- langchain_core/documents/base.py +16 -16
- langchain_core/documents/compressor.py +4 -4
- langchain_core/example_selectors/length_based.py +1 -1
- langchain_core/example_selectors/semantic_similarity.py +17 -19
- langchain_core/exceptions.py +3 -3
- langchain_core/globals.py +3 -151
- langchain_core/indexing/api.py +44 -43
- langchain_core/indexing/base.py +30 -30
- langchain_core/indexing/in_memory.py +3 -3
- langchain_core/language_models/_utils.py +5 -7
- langchain_core/language_models/base.py +18 -132
- langchain_core/language_models/chat_models.py +118 -227
- langchain_core/language_models/fake.py +11 -11
- langchain_core/language_models/fake_chat_models.py +35 -29
- langchain_core/language_models/llms.py +91 -201
- langchain_core/load/dump.py +1 -1
- langchain_core/load/load.py +11 -12
- langchain_core/load/mapping.py +2 -4
- langchain_core/load/serializable.py +2 -4
- langchain_core/messages/ai.py +17 -20
- langchain_core/messages/base.py +28 -26
- langchain_core/messages/block_translators/__init__.py +17 -7
- langchain_core/messages/block_translators/anthropic.py +3 -3
- langchain_core/messages/block_translators/bedrock_converse.py +2 -2
- langchain_core/messages/block_translators/google_genai.py +502 -20
- langchain_core/messages/block_translators/langchain_v0.py +2 -2
- langchain_core/messages/block_translators/openai.py +6 -6
- langchain_core/messages/content.py +120 -124
- langchain_core/messages/human.py +7 -7
- langchain_core/messages/system.py +7 -7
- langchain_core/messages/tool.py +24 -24
- langchain_core/messages/utils.py +67 -79
- langchain_core/output_parsers/base.py +12 -14
- langchain_core/output_parsers/json.py +4 -4
- langchain_core/output_parsers/list.py +3 -5
- langchain_core/output_parsers/openai_functions.py +3 -3
- langchain_core/output_parsers/openai_tools.py +3 -3
- langchain_core/output_parsers/pydantic.py +2 -2
- langchain_core/output_parsers/transform.py +13 -15
- langchain_core/output_parsers/xml.py +7 -9
- langchain_core/outputs/chat_generation.py +4 -4
- langchain_core/outputs/chat_result.py +1 -3
- langchain_core/outputs/generation.py +2 -2
- langchain_core/outputs/llm_result.py +5 -5
- langchain_core/prompts/__init__.py +1 -5
- langchain_core/prompts/base.py +10 -15
- langchain_core/prompts/chat.py +31 -82
- langchain_core/prompts/dict.py +2 -2
- langchain_core/prompts/few_shot.py +5 -5
- langchain_core/prompts/few_shot_with_templates.py +4 -4
- langchain_core/prompts/loading.py +3 -5
- langchain_core/prompts/prompt.py +4 -16
- langchain_core/prompts/string.py +2 -1
- langchain_core/prompts/structured.py +16 -23
- langchain_core/rate_limiters.py +3 -4
- langchain_core/retrievers.py +14 -14
- langchain_core/runnables/base.py +938 -1054
- langchain_core/runnables/branch.py +36 -40
- langchain_core/runnables/config.py +27 -35
- langchain_core/runnables/configurable.py +108 -124
- langchain_core/runnables/fallbacks.py +76 -72
- langchain_core/runnables/graph.py +39 -45
- langchain_core/runnables/graph_ascii.py +9 -11
- langchain_core/runnables/graph_mermaid.py +18 -19
- langchain_core/runnables/graph_png.py +8 -9
- langchain_core/runnables/history.py +114 -127
- langchain_core/runnables/passthrough.py +113 -139
- langchain_core/runnables/retry.py +43 -48
- langchain_core/runnables/router.py +23 -28
- langchain_core/runnables/schema.py +42 -44
- langchain_core/runnables/utils.py +28 -31
- langchain_core/stores.py +9 -13
- langchain_core/structured_query.py +8 -8
- langchain_core/tools/base.py +63 -115
- langchain_core/tools/convert.py +31 -35
- langchain_core/tools/render.py +1 -1
- langchain_core/tools/retriever.py +4 -4
- langchain_core/tools/simple.py +13 -17
- langchain_core/tools/structured.py +12 -15
- langchain_core/tracers/base.py +62 -64
- langchain_core/tracers/context.py +17 -35
- langchain_core/tracers/core.py +49 -53
- langchain_core/tracers/evaluation.py +11 -11
- langchain_core/tracers/event_stream.py +58 -60
- langchain_core/tracers/langchain.py +13 -13
- langchain_core/tracers/log_stream.py +22 -24
- langchain_core/tracers/root_listeners.py +14 -14
- langchain_core/tracers/run_collector.py +2 -4
- langchain_core/tracers/schemas.py +8 -8
- langchain_core/tracers/stdout.py +2 -1
- langchain_core/utils/__init__.py +0 -3
- langchain_core/utils/_merge.py +2 -2
- langchain_core/utils/aiter.py +24 -28
- langchain_core/utils/env.py +4 -4
- langchain_core/utils/function_calling.py +31 -41
- langchain_core/utils/html.py +3 -4
- langchain_core/utils/input.py +3 -3
- langchain_core/utils/iter.py +15 -19
- langchain_core/utils/json.py +3 -2
- langchain_core/utils/json_schema.py +6 -6
- langchain_core/utils/mustache.py +3 -5
- langchain_core/utils/pydantic.py +16 -18
- langchain_core/utils/usage.py +1 -1
- langchain_core/utils/utils.py +29 -29
- langchain_core/vectorstores/base.py +18 -21
- langchain_core/vectorstores/in_memory.py +14 -87
- langchain_core/vectorstores/utils.py +2 -2
- langchain_core/version.py +1 -1
- {langchain_core-1.0.0a5.dist-info → langchain_core-1.0.0a7.dist-info}/METADATA +10 -31
- langchain_core-1.0.0a7.dist-info/RECORD +176 -0
- {langchain_core-1.0.0a5.dist-info → langchain_core-1.0.0a7.dist-info}/WHEEL +1 -1
- langchain_core/messages/block_translators/ollama.py +0 -47
- langchain_core/prompts/pipeline.py +0 -138
- langchain_core/tracers/langchain_v1.py +0 -31
- langchain_core/utils/loading.py +0 -35
- langchain_core-1.0.0a5.dist-info/RECORD +0 -181
- langchain_core-1.0.0a5.dist-info/entry_points.txt +0 -4
|
@@ -6,22 +6,19 @@ import asyncio
|
|
|
6
6
|
import inspect
|
|
7
7
|
import json
|
|
8
8
|
import typing
|
|
9
|
-
import warnings
|
|
10
9
|
from abc import ABC, abstractmethod
|
|
11
|
-
from collections.abc import AsyncIterator, Iterator, Sequence
|
|
10
|
+
from collections.abc import AsyncIterator, Callable, Iterator, Sequence
|
|
12
11
|
from functools import cached_property
|
|
13
12
|
from operator import itemgetter
|
|
14
|
-
from typing import TYPE_CHECKING, Any,
|
|
13
|
+
from typing import TYPE_CHECKING, Any, Literal, cast
|
|
15
14
|
|
|
16
|
-
from pydantic import BaseModel, ConfigDict, Field
|
|
15
|
+
from pydantic import BaseModel, ConfigDict, Field
|
|
17
16
|
from typing_extensions import override
|
|
18
17
|
|
|
19
|
-
from langchain_core._api import deprecated
|
|
20
18
|
from langchain_core.caches import BaseCache
|
|
21
19
|
from langchain_core.callbacks import (
|
|
22
20
|
AsyncCallbackManager,
|
|
23
21
|
AsyncCallbackManagerForLLMRun,
|
|
24
|
-
BaseCallbackManager,
|
|
25
22
|
CallbackManager,
|
|
26
23
|
CallbackManagerForLLMRun,
|
|
27
24
|
Callbacks,
|
|
@@ -42,11 +39,11 @@ from langchain_core.messages import (
|
|
|
42
39
|
AIMessageChunk,
|
|
43
40
|
AnyMessage,
|
|
44
41
|
BaseMessage,
|
|
45
|
-
HumanMessage,
|
|
46
42
|
convert_to_messages,
|
|
47
43
|
is_data_content_block,
|
|
48
44
|
message_chunk_to_message,
|
|
49
45
|
)
|
|
46
|
+
from langchain_core.messages import content as types
|
|
50
47
|
from langchain_core.messages.block_translators.openai import (
|
|
51
48
|
convert_to_openai_image_block,
|
|
52
49
|
)
|
|
@@ -223,7 +220,7 @@ async def agenerate_from_stream(
|
|
|
223
220
|
return await run_in_executor(None, generate_from_stream, iter(chunks))
|
|
224
221
|
|
|
225
222
|
|
|
226
|
-
def _format_ls_structured_output(ls_structured_output_format:
|
|
223
|
+
def _format_ls_structured_output(ls_structured_output_format: dict | None) -> dict:
|
|
227
224
|
if ls_structured_output_format:
|
|
228
225
|
try:
|
|
229
226
|
ls_structured_output_format_dict = {
|
|
@@ -319,20 +316,10 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
|
|
319
316
|
|
|
320
317
|
""" # noqa: E501
|
|
321
318
|
|
|
322
|
-
|
|
323
|
-
name="callback_manager", since="0.1.7", removal="1.0", alternative="callbacks"
|
|
324
|
-
)(
|
|
325
|
-
Field(
|
|
326
|
-
default=None,
|
|
327
|
-
exclude=True,
|
|
328
|
-
description="Callback manager to add to the run trace.",
|
|
329
|
-
)
|
|
330
|
-
)
|
|
331
|
-
|
|
332
|
-
rate_limiter: Optional[BaseRateLimiter] = Field(default=None, exclude=True)
|
|
319
|
+
rate_limiter: BaseRateLimiter | None = Field(default=None, exclude=True)
|
|
333
320
|
"An optional rate limiter to use for limiting the number of requests."
|
|
334
321
|
|
|
335
|
-
disable_streaming:
|
|
322
|
+
disable_streaming: bool | Literal["tool_calling"] = False
|
|
336
323
|
"""Whether to disable streaming for this model.
|
|
337
324
|
|
|
338
325
|
If streaming is bypassed, then ``stream()``/``astream()``/``astream_events()`` will
|
|
@@ -351,7 +338,7 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
|
|
351
338
|
|
|
352
339
|
"""
|
|
353
340
|
|
|
354
|
-
output_version:
|
|
341
|
+
output_version: str | None = Field(
|
|
355
342
|
default_factory=from_env("LC_OUTPUT_VERSION", default=None)
|
|
356
343
|
)
|
|
357
344
|
"""Version of ``AIMessage`` output format to store in message content.
|
|
@@ -369,31 +356,10 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
|
|
369
356
|
Partner packages (e.g., ``langchain-openai``) can also use this field to roll out
|
|
370
357
|
new content formats in a backward-compatible way.
|
|
371
358
|
|
|
372
|
-
|
|
359
|
+
!!! version-added "Added in version 1.0"
|
|
373
360
|
|
|
374
361
|
"""
|
|
375
362
|
|
|
376
|
-
@model_validator(mode="before")
|
|
377
|
-
@classmethod
|
|
378
|
-
def raise_deprecation(cls, values: dict) -> Any:
|
|
379
|
-
"""Emit deprecation warning if ``callback_manager`` is used.
|
|
380
|
-
|
|
381
|
-
Args:
|
|
382
|
-
values (Dict): Values to validate.
|
|
383
|
-
|
|
384
|
-
Returns:
|
|
385
|
-
Dict: Validated values.
|
|
386
|
-
|
|
387
|
-
"""
|
|
388
|
-
if values.get("callback_manager") is not None:
|
|
389
|
-
warnings.warn(
|
|
390
|
-
"callback_manager is deprecated. Please use callbacks instead.",
|
|
391
|
-
DeprecationWarning,
|
|
392
|
-
stacklevel=5,
|
|
393
|
-
)
|
|
394
|
-
values["callbacks"] = values.pop("callback_manager", None)
|
|
395
|
-
return values
|
|
396
|
-
|
|
397
363
|
model_config = ConfigDict(
|
|
398
364
|
arbitrary_types_allowed=True,
|
|
399
365
|
)
|
|
@@ -427,9 +393,9 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
|
|
427
393
|
def invoke(
|
|
428
394
|
self,
|
|
429
395
|
input: LanguageModelInput,
|
|
430
|
-
config:
|
|
396
|
+
config: RunnableConfig | None = None,
|
|
431
397
|
*,
|
|
432
|
-
stop:
|
|
398
|
+
stop: list[str] | None = None,
|
|
433
399
|
**kwargs: Any,
|
|
434
400
|
) -> AIMessage:
|
|
435
401
|
config = ensure_config(config)
|
|
@@ -454,9 +420,9 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
|
|
454
420
|
async def ainvoke(
|
|
455
421
|
self,
|
|
456
422
|
input: LanguageModelInput,
|
|
457
|
-
config:
|
|
423
|
+
config: RunnableConfig | None = None,
|
|
458
424
|
*,
|
|
459
|
-
stop:
|
|
425
|
+
stop: list[str] | None = None,
|
|
460
426
|
**kwargs: Any,
|
|
461
427
|
) -> AIMessage:
|
|
462
428
|
config = ensure_config(config)
|
|
@@ -478,9 +444,9 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
|
|
478
444
|
self,
|
|
479
445
|
*,
|
|
480
446
|
async_api: bool,
|
|
481
|
-
run_manager:
|
|
482
|
-
|
|
483
|
-
|
|
447
|
+
run_manager: CallbackManagerForLLMRun
|
|
448
|
+
| AsyncCallbackManagerForLLMRun
|
|
449
|
+
| None = None,
|
|
484
450
|
**kwargs: Any,
|
|
485
451
|
) -> bool:
|
|
486
452
|
"""Determine if a given model call should hit the streaming API."""
|
|
@@ -505,6 +471,9 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
|
|
505
471
|
if "stream" in kwargs:
|
|
506
472
|
return kwargs["stream"]
|
|
507
473
|
|
|
474
|
+
if getattr(self, "streaming", False):
|
|
475
|
+
return True
|
|
476
|
+
|
|
508
477
|
# Check if any streaming callback handlers have been passed in.
|
|
509
478
|
handlers = run_manager.handlers if run_manager else []
|
|
510
479
|
return any(isinstance(h, _StreamingCallbackHandler) for h in handlers)
|
|
@@ -513,9 +482,9 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
|
|
513
482
|
def stream(
|
|
514
483
|
self,
|
|
515
484
|
input: LanguageModelInput,
|
|
516
|
-
config:
|
|
485
|
+
config: RunnableConfig | None = None,
|
|
517
486
|
*,
|
|
518
|
-
stop:
|
|
487
|
+
stop: list[str] | None = None,
|
|
519
488
|
**kwargs: Any,
|
|
520
489
|
) -> Iterator[AIMessageChunk]:
|
|
521
490
|
if not self._should_stream(async_api=False, **{**kwargs, "stream": True}):
|
|
@@ -568,6 +537,8 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
|
|
568
537
|
input_messages = _normalize_messages(messages)
|
|
569
538
|
run_id = "-".join((LC_ID_PREFIX, str(run_manager.run_id)))
|
|
570
539
|
yielded = False
|
|
540
|
+
index = -1
|
|
541
|
+
index_type = ""
|
|
571
542
|
for chunk in self._stream(input_messages, stop=stop, **kwargs):
|
|
572
543
|
if chunk.message.id is None:
|
|
573
544
|
chunk.message.id = run_id
|
|
@@ -577,6 +548,14 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
|
|
577
548
|
chunk.message = _update_message_content_to_blocks(
|
|
578
549
|
chunk.message, "v1"
|
|
579
550
|
)
|
|
551
|
+
for block in cast(
|
|
552
|
+
"list[types.ContentBlock]", chunk.message.content
|
|
553
|
+
):
|
|
554
|
+
if block["type"] != index_type:
|
|
555
|
+
index_type = block["type"]
|
|
556
|
+
index = index + 1
|
|
557
|
+
if "index" not in block:
|
|
558
|
+
block["index"] = index
|
|
580
559
|
run_manager.on_llm_new_token(
|
|
581
560
|
cast("str", chunk.message.content), chunk=chunk
|
|
582
561
|
)
|
|
@@ -591,7 +570,7 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
|
|
591
570
|
and isinstance(chunk.message, AIMessageChunk)
|
|
592
571
|
and not chunk.message.chunk_position
|
|
593
572
|
):
|
|
594
|
-
empty_content:
|
|
573
|
+
empty_content: str | list = (
|
|
595
574
|
"" if isinstance(chunk.message.content, str) else []
|
|
596
575
|
)
|
|
597
576
|
msg_chunk = AIMessageChunk(
|
|
@@ -629,9 +608,9 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
|
|
629
608
|
async def astream(
|
|
630
609
|
self,
|
|
631
610
|
input: LanguageModelInput,
|
|
632
|
-
config:
|
|
611
|
+
config: RunnableConfig | None = None,
|
|
633
612
|
*,
|
|
634
|
-
stop:
|
|
613
|
+
stop: list[str] | None = None,
|
|
635
614
|
**kwargs: Any,
|
|
636
615
|
) -> AsyncIterator[AIMessageChunk]:
|
|
637
616
|
if not self._should_stream(async_api=True, **{**kwargs, "stream": True}):
|
|
@@ -686,6 +665,8 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
|
|
686
665
|
input_messages = _normalize_messages(messages)
|
|
687
666
|
run_id = "-".join((LC_ID_PREFIX, str(run_manager.run_id)))
|
|
688
667
|
yielded = False
|
|
668
|
+
index = -1
|
|
669
|
+
index_type = ""
|
|
689
670
|
async for chunk in self._astream(
|
|
690
671
|
input_messages,
|
|
691
672
|
stop=stop,
|
|
@@ -699,6 +680,14 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
|
|
699
680
|
chunk.message = _update_message_content_to_blocks(
|
|
700
681
|
chunk.message, "v1"
|
|
701
682
|
)
|
|
683
|
+
for block in cast(
|
|
684
|
+
"list[types.ContentBlock]", chunk.message.content
|
|
685
|
+
):
|
|
686
|
+
if block["type"] != index_type:
|
|
687
|
+
index_type = block["type"]
|
|
688
|
+
index = index + 1
|
|
689
|
+
if "index" not in block:
|
|
690
|
+
block["index"] = index
|
|
702
691
|
await run_manager.on_llm_new_token(
|
|
703
692
|
cast("str", chunk.message.content), chunk=chunk
|
|
704
693
|
)
|
|
@@ -712,7 +701,7 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
|
|
712
701
|
and isinstance(chunk.message, AIMessageChunk)
|
|
713
702
|
and not chunk.message.chunk_position
|
|
714
703
|
):
|
|
715
|
-
empty_content:
|
|
704
|
+
empty_content: str | list = (
|
|
716
705
|
"" if isinstance(chunk.message.content, str) else []
|
|
717
706
|
)
|
|
718
707
|
msg_chunk = AIMessageChunk(
|
|
@@ -747,7 +736,7 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
|
|
747
736
|
|
|
748
737
|
# --- Custom methods ---
|
|
749
738
|
|
|
750
|
-
def _combine_llm_outputs(self, llm_outputs: list[
|
|
739
|
+
def _combine_llm_outputs(self, llm_outputs: list[dict | None]) -> dict: # noqa: ARG002
|
|
751
740
|
return {}
|
|
752
741
|
|
|
753
742
|
def _convert_cached_generations(self, cache_val: list) -> list[ChatGeneration]:
|
|
@@ -791,7 +780,7 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
|
|
791
780
|
|
|
792
781
|
def _get_invocation_params(
|
|
793
782
|
self,
|
|
794
|
-
stop:
|
|
783
|
+
stop: list[str] | None = None,
|
|
795
784
|
**kwargs: Any,
|
|
796
785
|
) -> dict:
|
|
797
786
|
params = self.dict()
|
|
@@ -800,7 +789,7 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
|
|
800
789
|
|
|
801
790
|
def _get_ls_params(
|
|
802
791
|
self,
|
|
803
|
-
stop:
|
|
792
|
+
stop: list[str] | None = None,
|
|
804
793
|
**kwargs: Any,
|
|
805
794
|
) -> LangSmithParams:
|
|
806
795
|
"""Get standard params for tracing."""
|
|
@@ -838,7 +827,7 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
|
|
838
827
|
|
|
839
828
|
return ls_params
|
|
840
829
|
|
|
841
|
-
def _get_llm_string(self, stop:
|
|
830
|
+
def _get_llm_string(self, stop: list[str] | None = None, **kwargs: Any) -> str:
|
|
842
831
|
if self.is_lc_serializable():
|
|
843
832
|
params = {**kwargs, "stop": stop}
|
|
844
833
|
param_string = str(sorted(params.items()))
|
|
@@ -855,13 +844,13 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
|
|
855
844
|
def generate(
|
|
856
845
|
self,
|
|
857
846
|
messages: list[list[BaseMessage]],
|
|
858
|
-
stop:
|
|
847
|
+
stop: list[str] | None = None,
|
|
859
848
|
callbacks: Callbacks = None,
|
|
860
849
|
*,
|
|
861
|
-
tags:
|
|
862
|
-
metadata:
|
|
863
|
-
run_name:
|
|
864
|
-
run_id:
|
|
850
|
+
tags: list[str] | None = None,
|
|
851
|
+
metadata: dict[str, Any] | None = None,
|
|
852
|
+
run_name: str | None = None,
|
|
853
|
+
run_id: uuid.UUID | None = None,
|
|
865
854
|
**kwargs: Any,
|
|
866
855
|
) -> LLMResult:
|
|
867
856
|
"""Pass a sequence of prompts to the model and return model generations.
|
|
@@ -962,7 +951,9 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
|
|
962
951
|
output = LLMResult(generations=generations, llm_output=llm_output)
|
|
963
952
|
if run_managers:
|
|
964
953
|
run_infos = []
|
|
965
|
-
for manager, flattened_output in zip(
|
|
954
|
+
for manager, flattened_output in zip(
|
|
955
|
+
run_managers, flattened_outputs, strict=False
|
|
956
|
+
):
|
|
966
957
|
manager.on_llm_end(flattened_output)
|
|
967
958
|
run_infos.append(RunInfo(run_id=manager.run_id))
|
|
968
959
|
output.run = run_infos
|
|
@@ -971,13 +962,13 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
|
|
971
962
|
async def agenerate(
|
|
972
963
|
self,
|
|
973
964
|
messages: list[list[BaseMessage]],
|
|
974
|
-
stop:
|
|
965
|
+
stop: list[str] | None = None,
|
|
975
966
|
callbacks: Callbacks = None,
|
|
976
967
|
*,
|
|
977
|
-
tags:
|
|
978
|
-
metadata:
|
|
979
|
-
run_name:
|
|
980
|
-
run_id:
|
|
968
|
+
tags: list[str] | None = None,
|
|
969
|
+
metadata: dict[str, Any] | None = None,
|
|
970
|
+
run_name: str | None = None,
|
|
971
|
+
run_id: uuid.UUID | None = None,
|
|
981
972
|
**kwargs: Any,
|
|
982
973
|
) -> LLMResult:
|
|
983
974
|
"""Asynchronously pass a sequence of prompts to a model and return generations.
|
|
@@ -1084,7 +1075,7 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
|
|
1084
1075
|
llm_output=res.llm_output, # type: ignore[union-attr]
|
|
1085
1076
|
)
|
|
1086
1077
|
)
|
|
1087
|
-
for run_manager, res in zip(run_managers, results)
|
|
1078
|
+
for run_manager, res in zip(run_managers, results, strict=False)
|
|
1088
1079
|
if not isinstance(res, Exception)
|
|
1089
1080
|
]
|
|
1090
1081
|
)
|
|
@@ -1100,7 +1091,7 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
|
|
1100
1091
|
*[
|
|
1101
1092
|
run_manager.on_llm_end(flattened_output)
|
|
1102
1093
|
for run_manager, flattened_output in zip(
|
|
1103
|
-
run_managers, flattened_outputs
|
|
1094
|
+
run_managers, flattened_outputs, strict=False
|
|
1104
1095
|
)
|
|
1105
1096
|
]
|
|
1106
1097
|
)
|
|
@@ -1114,7 +1105,7 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
|
|
1114
1105
|
def generate_prompt(
|
|
1115
1106
|
self,
|
|
1116
1107
|
prompts: list[PromptValue],
|
|
1117
|
-
stop:
|
|
1108
|
+
stop: list[str] | None = None,
|
|
1118
1109
|
callbacks: Callbacks = None,
|
|
1119
1110
|
**kwargs: Any,
|
|
1120
1111
|
) -> LLMResult:
|
|
@@ -1125,7 +1116,7 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
|
|
1125
1116
|
async def agenerate_prompt(
|
|
1126
1117
|
self,
|
|
1127
1118
|
prompts: list[PromptValue],
|
|
1128
|
-
stop:
|
|
1119
|
+
stop: list[str] | None = None,
|
|
1129
1120
|
callbacks: Callbacks = None,
|
|
1130
1121
|
**kwargs: Any,
|
|
1131
1122
|
) -> LLMResult:
|
|
@@ -1137,8 +1128,8 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
|
|
1137
1128
|
def _generate_with_cache(
|
|
1138
1129
|
self,
|
|
1139
1130
|
messages: list[BaseMessage],
|
|
1140
|
-
stop:
|
|
1141
|
-
run_manager:
|
|
1131
|
+
stop: list[str] | None = None,
|
|
1132
|
+
run_manager: CallbackManagerForLLMRun | None = None,
|
|
1142
1133
|
**kwargs: Any,
|
|
1143
1134
|
) -> ChatResult:
|
|
1144
1135
|
llm_cache = self.cache if isinstance(self.cache, BaseCache) else get_llm_cache()
|
|
@@ -1174,10 +1165,12 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
|
|
1174
1165
|
**kwargs,
|
|
1175
1166
|
):
|
|
1176
1167
|
chunks: list[ChatGenerationChunk] = []
|
|
1177
|
-
run_id:
|
|
1168
|
+
run_id: str | None = (
|
|
1178
1169
|
f"{LC_ID_PREFIX}-{run_manager.run_id}" if run_manager else None
|
|
1179
1170
|
)
|
|
1180
1171
|
yielded = False
|
|
1172
|
+
index = -1
|
|
1173
|
+
index_type = ""
|
|
1181
1174
|
for chunk in self._stream(messages, stop=stop, **kwargs):
|
|
1182
1175
|
chunk.message.response_metadata = _gen_info_and_msg_metadata(chunk)
|
|
1183
1176
|
if self.output_version == "v1":
|
|
@@ -1185,6 +1178,14 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
|
|
1185
1178
|
chunk.message = _update_message_content_to_blocks(
|
|
1186
1179
|
chunk.message, "v1"
|
|
1187
1180
|
)
|
|
1181
|
+
for block in cast(
|
|
1182
|
+
"list[types.ContentBlock]", chunk.message.content
|
|
1183
|
+
):
|
|
1184
|
+
if block["type"] != index_type:
|
|
1185
|
+
index_type = block["type"]
|
|
1186
|
+
index = index + 1
|
|
1187
|
+
if "index" not in block:
|
|
1188
|
+
block["index"] = index
|
|
1188
1189
|
if run_manager:
|
|
1189
1190
|
if chunk.message.id is None:
|
|
1190
1191
|
chunk.message.id = run_id
|
|
@@ -1200,7 +1201,7 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
|
|
1200
1201
|
and isinstance(chunk.message, AIMessageChunk)
|
|
1201
1202
|
and not chunk.message.chunk_position
|
|
1202
1203
|
):
|
|
1203
|
-
empty_content:
|
|
1204
|
+
empty_content: str | list = (
|
|
1204
1205
|
"" if isinstance(chunk.message.content, str) else []
|
|
1205
1206
|
)
|
|
1206
1207
|
chunk = ChatGenerationChunk(
|
|
@@ -1245,8 +1246,8 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
|
|
1245
1246
|
async def _agenerate_with_cache(
|
|
1246
1247
|
self,
|
|
1247
1248
|
messages: list[BaseMessage],
|
|
1248
|
-
stop:
|
|
1249
|
-
run_manager:
|
|
1249
|
+
stop: list[str] | None = None,
|
|
1250
|
+
run_manager: AsyncCallbackManagerForLLMRun | None = None,
|
|
1250
1251
|
**kwargs: Any,
|
|
1251
1252
|
) -> ChatResult:
|
|
1252
1253
|
llm_cache = self.cache if isinstance(self.cache, BaseCache) else get_llm_cache()
|
|
@@ -1282,10 +1283,12 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
|
|
1282
1283
|
**kwargs,
|
|
1283
1284
|
):
|
|
1284
1285
|
chunks: list[ChatGenerationChunk] = []
|
|
1285
|
-
run_id:
|
|
1286
|
+
run_id: str | None = (
|
|
1286
1287
|
f"{LC_ID_PREFIX}-{run_manager.run_id}" if run_manager else None
|
|
1287
1288
|
)
|
|
1288
1289
|
yielded = False
|
|
1290
|
+
index = -1
|
|
1291
|
+
index_type = ""
|
|
1289
1292
|
async for chunk in self._astream(messages, stop=stop, **kwargs):
|
|
1290
1293
|
chunk.message.response_metadata = _gen_info_and_msg_metadata(chunk)
|
|
1291
1294
|
if self.output_version == "v1":
|
|
@@ -1293,6 +1296,14 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
|
|
1293
1296
|
chunk.message = _update_message_content_to_blocks(
|
|
1294
1297
|
chunk.message, "v1"
|
|
1295
1298
|
)
|
|
1299
|
+
for block in cast(
|
|
1300
|
+
"list[types.ContentBlock]", chunk.message.content
|
|
1301
|
+
):
|
|
1302
|
+
if block["type"] != index_type:
|
|
1303
|
+
index_type = block["type"]
|
|
1304
|
+
index = index + 1
|
|
1305
|
+
if "index" not in block:
|
|
1306
|
+
block["index"] = index
|
|
1296
1307
|
if run_manager:
|
|
1297
1308
|
if chunk.message.id is None:
|
|
1298
1309
|
chunk.message.id = run_id
|
|
@@ -1308,7 +1319,7 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
|
|
1308
1319
|
and isinstance(chunk.message, AIMessageChunk)
|
|
1309
1320
|
and not chunk.message.chunk_position
|
|
1310
1321
|
):
|
|
1311
|
-
empty_content:
|
|
1322
|
+
empty_content: str | list = (
|
|
1312
1323
|
"" if isinstance(chunk.message.content, str) else []
|
|
1313
1324
|
)
|
|
1314
1325
|
chunk = ChatGenerationChunk(
|
|
@@ -1354,8 +1365,8 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
|
|
1354
1365
|
def _generate(
|
|
1355
1366
|
self,
|
|
1356
1367
|
messages: list[BaseMessage],
|
|
1357
|
-
stop:
|
|
1358
|
-
run_manager:
|
|
1368
|
+
stop: list[str] | None = None,
|
|
1369
|
+
run_manager: CallbackManagerForLLMRun | None = None,
|
|
1359
1370
|
**kwargs: Any,
|
|
1360
1371
|
) -> ChatResult:
|
|
1361
1372
|
"""Generate the result.
|
|
@@ -1373,8 +1384,8 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
|
|
1373
1384
|
async def _agenerate(
|
|
1374
1385
|
self,
|
|
1375
1386
|
messages: list[BaseMessage],
|
|
1376
|
-
stop:
|
|
1377
|
-
run_manager:
|
|
1387
|
+
stop: list[str] | None = None,
|
|
1388
|
+
run_manager: AsyncCallbackManagerForLLMRun | None = None,
|
|
1378
1389
|
**kwargs: Any,
|
|
1379
1390
|
) -> ChatResult:
|
|
1380
1391
|
"""Generate the result.
|
|
@@ -1400,8 +1411,8 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
|
|
1400
1411
|
def _stream(
|
|
1401
1412
|
self,
|
|
1402
1413
|
messages: list[BaseMessage],
|
|
1403
|
-
stop:
|
|
1404
|
-
run_manager:
|
|
1414
|
+
stop: list[str] | None = None,
|
|
1415
|
+
run_manager: CallbackManagerForLLMRun | None = None,
|
|
1405
1416
|
**kwargs: Any,
|
|
1406
1417
|
) -> Iterator[ChatGenerationChunk]:
|
|
1407
1418
|
"""Stream the output of the model.
|
|
@@ -1420,8 +1431,8 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
|
|
1420
1431
|
async def _astream(
|
|
1421
1432
|
self,
|
|
1422
1433
|
messages: list[BaseMessage],
|
|
1423
|
-
stop:
|
|
1424
|
-
run_manager:
|
|
1434
|
+
stop: list[str] | None = None,
|
|
1435
|
+
run_manager: AsyncCallbackManagerForLLMRun | None = None,
|
|
1425
1436
|
**kwargs: Any,
|
|
1426
1437
|
) -> AsyncIterator[ChatGenerationChunk]:
|
|
1427
1438
|
"""Stream the output of the model.
|
|
@@ -1455,44 +1466,10 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
|
|
1455
1466
|
break
|
|
1456
1467
|
yield item # type: ignore[misc]
|
|
1457
1468
|
|
|
1458
|
-
@deprecated("0.1.7", alternative="invoke", removal="1.0")
|
|
1459
|
-
def __call__(
|
|
1460
|
-
self,
|
|
1461
|
-
messages: list[BaseMessage],
|
|
1462
|
-
stop: Optional[list[str]] = None,
|
|
1463
|
-
callbacks: Callbacks = None,
|
|
1464
|
-
**kwargs: Any,
|
|
1465
|
-
) -> BaseMessage:
|
|
1466
|
-
"""Call the model.
|
|
1467
|
-
|
|
1468
|
-
Args:
|
|
1469
|
-
messages: List of messages.
|
|
1470
|
-
stop: Stop words to use when generating. Model output is cut off at the
|
|
1471
|
-
first occurrence of any of these substrings.
|
|
1472
|
-
callbacks: Callbacks to pass through. Used for executing additional
|
|
1473
|
-
functionality, such as logging or streaming, throughout generation.
|
|
1474
|
-
**kwargs: Arbitrary additional keyword arguments. These are usually passed
|
|
1475
|
-
to the model provider API call.
|
|
1476
|
-
|
|
1477
|
-
Raises:
|
|
1478
|
-
ValueError: If the generation is not a chat generation.
|
|
1479
|
-
|
|
1480
|
-
Returns:
|
|
1481
|
-
The model output message.
|
|
1482
|
-
|
|
1483
|
-
"""
|
|
1484
|
-
generation = self.generate(
|
|
1485
|
-
[messages], stop=stop, callbacks=callbacks, **kwargs
|
|
1486
|
-
).generations[0][0]
|
|
1487
|
-
if isinstance(generation, ChatGeneration):
|
|
1488
|
-
return generation.message
|
|
1489
|
-
msg = "Unexpected generation type"
|
|
1490
|
-
raise ValueError(msg)
|
|
1491
|
-
|
|
1492
1469
|
async def _call_async(
|
|
1493
1470
|
self,
|
|
1494
1471
|
messages: list[BaseMessage],
|
|
1495
|
-
stop:
|
|
1472
|
+
stop: list[str] | None = None,
|
|
1496
1473
|
callbacks: Callbacks = None,
|
|
1497
1474
|
**kwargs: Any,
|
|
1498
1475
|
) -> BaseMessage:
|
|
@@ -1505,91 +1482,6 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
|
|
1505
1482
|
msg = "Unexpected generation type"
|
|
1506
1483
|
raise ValueError(msg)
|
|
1507
1484
|
|
|
1508
|
-
@deprecated("0.1.7", alternative="invoke", removal="1.0")
|
|
1509
|
-
def call_as_llm(
|
|
1510
|
-
self, message: str, stop: Optional[list[str]] = None, **kwargs: Any
|
|
1511
|
-
) -> str:
|
|
1512
|
-
"""Call the model.
|
|
1513
|
-
|
|
1514
|
-
Args:
|
|
1515
|
-
message: The input message.
|
|
1516
|
-
stop: Stop words to use when generating. Model output is cut off at the
|
|
1517
|
-
first occurrence of any of these substrings.
|
|
1518
|
-
**kwargs: Arbitrary additional keyword arguments. These are usually passed
|
|
1519
|
-
to the model provider API call.
|
|
1520
|
-
|
|
1521
|
-
Returns:
|
|
1522
|
-
The model output string.
|
|
1523
|
-
|
|
1524
|
-
"""
|
|
1525
|
-
return self.predict(message, stop=stop, **kwargs)
|
|
1526
|
-
|
|
1527
|
-
@deprecated("0.1.7", alternative="invoke", removal="1.0")
|
|
1528
|
-
@override
|
|
1529
|
-
def predict(
|
|
1530
|
-
self, text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any
|
|
1531
|
-
) -> str:
|
|
1532
|
-
"""Predict the next message.
|
|
1533
|
-
|
|
1534
|
-
Args:
|
|
1535
|
-
text: The input message.
|
|
1536
|
-
stop: Stop words to use when generating. Model output is cut off at the
|
|
1537
|
-
first occurrence of any of these substrings.
|
|
1538
|
-
**kwargs: Arbitrary additional keyword arguments. These are usually passed
|
|
1539
|
-
to the model provider API call.
|
|
1540
|
-
|
|
1541
|
-
Raises:
|
|
1542
|
-
ValueError: If the output is not a string.
|
|
1543
|
-
|
|
1544
|
-
Returns:
|
|
1545
|
-
The predicted output string.
|
|
1546
|
-
|
|
1547
|
-
"""
|
|
1548
|
-
stop_ = None if stop is None else list(stop)
|
|
1549
|
-
result = self([HumanMessage(content=text)], stop=stop_, **kwargs)
|
|
1550
|
-
if isinstance(result.content, str):
|
|
1551
|
-
return result.content
|
|
1552
|
-
msg = "Cannot use predict when output is not a string."
|
|
1553
|
-
raise ValueError(msg)
|
|
1554
|
-
|
|
1555
|
-
@deprecated("0.1.7", alternative="invoke", removal="1.0")
|
|
1556
|
-
@override
|
|
1557
|
-
def predict_messages(
|
|
1558
|
-
self,
|
|
1559
|
-
messages: list[BaseMessage],
|
|
1560
|
-
*,
|
|
1561
|
-
stop: Optional[Sequence[str]] = None,
|
|
1562
|
-
**kwargs: Any,
|
|
1563
|
-
) -> BaseMessage:
|
|
1564
|
-
stop_ = None if stop is None else list(stop)
|
|
1565
|
-
return self(messages, stop=stop_, **kwargs)
|
|
1566
|
-
|
|
1567
|
-
@deprecated("0.1.7", alternative="ainvoke", removal="1.0")
|
|
1568
|
-
@override
|
|
1569
|
-
async def apredict(
|
|
1570
|
-
self, text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any
|
|
1571
|
-
) -> str:
|
|
1572
|
-
stop_ = None if stop is None else list(stop)
|
|
1573
|
-
result = await self._call_async(
|
|
1574
|
-
[HumanMessage(content=text)], stop=stop_, **kwargs
|
|
1575
|
-
)
|
|
1576
|
-
if isinstance(result.content, str):
|
|
1577
|
-
return result.content
|
|
1578
|
-
msg = "Cannot use predict when output is not a string."
|
|
1579
|
-
raise ValueError(msg)
|
|
1580
|
-
|
|
1581
|
-
@deprecated("0.1.7", alternative="ainvoke", removal="1.0")
|
|
1582
|
-
@override
|
|
1583
|
-
async def apredict_messages(
|
|
1584
|
-
self,
|
|
1585
|
-
messages: list[BaseMessage],
|
|
1586
|
-
*,
|
|
1587
|
-
stop: Optional[Sequence[str]] = None,
|
|
1588
|
-
**kwargs: Any,
|
|
1589
|
-
) -> BaseMessage:
|
|
1590
|
-
stop_ = None if stop is None else list(stop)
|
|
1591
|
-
return await self._call_async(messages, stop=stop_, **kwargs)
|
|
1592
|
-
|
|
1593
1485
|
@property
|
|
1594
1486
|
@abstractmethod
|
|
1595
1487
|
def _llm_type(self) -> str:
|
|
@@ -1605,10 +1497,10 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
|
|
1605
1497
|
def bind_tools(
|
|
1606
1498
|
self,
|
|
1607
1499
|
tools: Sequence[
|
|
1608
|
-
|
|
1500
|
+
typing.Dict[str, Any] | type | Callable | BaseTool # noqa: UP006
|
|
1609
1501
|
],
|
|
1610
1502
|
*,
|
|
1611
|
-
tool_choice:
|
|
1503
|
+
tool_choice: str | None = None,
|
|
1612
1504
|
**kwargs: Any,
|
|
1613
1505
|
) -> Runnable[LanguageModelInput, AIMessage]:
|
|
1614
1506
|
"""Bind tools to the model.
|
|
@@ -1625,11 +1517,11 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
|
|
1625
1517
|
|
|
1626
1518
|
def with_structured_output(
|
|
1627
1519
|
self,
|
|
1628
|
-
schema:
|
|
1520
|
+
schema: typing.Dict | type, # noqa: UP006
|
|
1629
1521
|
*,
|
|
1630
1522
|
include_raw: bool = False,
|
|
1631
1523
|
**kwargs: Any,
|
|
1632
|
-
) -> Runnable[LanguageModelInput,
|
|
1524
|
+
) -> Runnable[LanguageModelInput, typing.Dict | BaseModel]: # noqa: UP006
|
|
1633
1525
|
"""Model wrapper that returns outputs formatted to match the given schema.
|
|
1634
1526
|
|
|
1635
1527
|
Args:
|
|
@@ -1643,7 +1535,7 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
|
|
1643
1535
|
If ``schema`` is a Pydantic class then the model output will be a
|
|
1644
1536
|
Pydantic instance of that class, and the model-generated fields will be
|
|
1645
1537
|
validated by the Pydantic class. Otherwise the model output will be a
|
|
1646
|
-
dict and will not be validated. See
|
|
1538
|
+
dict and will not be validated. See `langchain_core.utils.function_calling.convert_to_openai_tool`
|
|
1647
1539
|
for more on how to properly specify types and descriptions of
|
|
1648
1540
|
schema fields when specifying a Pydantic or TypedDict class.
|
|
1649
1541
|
|
|
@@ -1661,7 +1553,7 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
|
|
1661
1553
|
``with_structured_output()``.
|
|
1662
1554
|
|
|
1663
1555
|
Returns:
|
|
1664
|
-
A Runnable that takes same inputs as a
|
|
1556
|
+
A Runnable that takes same inputs as a `langchain_core.language_models.chat.BaseChatModel`.
|
|
1665
1557
|
|
|
1666
1558
|
If ``include_raw`` is False and ``schema`` is a Pydantic class, Runnable outputs
|
|
1667
1559
|
an instance of ``schema`` (i.e., a Pydantic object).
|
|
@@ -1752,8 +1644,7 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
|
|
1752
1644
|
# 'justification': 'Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume and density of the two substances differ.'
|
|
1753
1645
|
# }
|
|
1754
1646
|
|
|
1755
|
-
|
|
1756
|
-
|
|
1647
|
+
!!! warning "Behavior changed in 0.2.26"
|
|
1757
1648
|
Added support for TypedDict class.
|
|
1758
1649
|
|
|
1759
1650
|
""" # noqa: E501
|
|
@@ -1799,7 +1690,7 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
|
|
|
1799
1690
|
class SimpleChatModel(BaseChatModel):
|
|
1800
1691
|
"""Simplified implementation for a chat model to inherit from.
|
|
1801
1692
|
|
|
1802
|
-
|
|
1693
|
+
!!! note
|
|
1803
1694
|
This implementation is primarily here for backwards compatibility. For new
|
|
1804
1695
|
implementations, please use ``BaseChatModel`` directly.
|
|
1805
1696
|
|
|
@@ -1808,8 +1699,8 @@ class SimpleChatModel(BaseChatModel):
|
|
|
1808
1699
|
def _generate(
|
|
1809
1700
|
self,
|
|
1810
1701
|
messages: list[BaseMessage],
|
|
1811
|
-
stop:
|
|
1812
|
-
run_manager:
|
|
1702
|
+
stop: list[str] | None = None,
|
|
1703
|
+
run_manager: CallbackManagerForLLMRun | None = None,
|
|
1813
1704
|
**kwargs: Any,
|
|
1814
1705
|
) -> ChatResult:
|
|
1815
1706
|
output_str = self._call(messages, stop=stop, run_manager=run_manager, **kwargs)
|
|
@@ -1821,8 +1712,8 @@ class SimpleChatModel(BaseChatModel):
|
|
|
1821
1712
|
def _call(
|
|
1822
1713
|
self,
|
|
1823
1714
|
messages: list[BaseMessage],
|
|
1824
|
-
stop:
|
|
1825
|
-
run_manager:
|
|
1715
|
+
stop: list[str] | None = None,
|
|
1716
|
+
run_manager: CallbackManagerForLLMRun | None = None,
|
|
1826
1717
|
**kwargs: Any,
|
|
1827
1718
|
) -> str:
|
|
1828
1719
|
"""Simpler interface."""
|
|
@@ -1830,8 +1721,8 @@ class SimpleChatModel(BaseChatModel):
|
|
|
1830
1721
|
async def _agenerate(
|
|
1831
1722
|
self,
|
|
1832
1723
|
messages: list[BaseMessage],
|
|
1833
|
-
stop:
|
|
1834
|
-
run_manager:
|
|
1724
|
+
stop: list[str] | None = None,
|
|
1725
|
+
run_manager: AsyncCallbackManagerForLLMRun | None = None,
|
|
1835
1726
|
**kwargs: Any,
|
|
1836
1727
|
) -> ChatResult:
|
|
1837
1728
|
return await run_in_executor(
|
|
@@ -1845,7 +1736,7 @@ class SimpleChatModel(BaseChatModel):
|
|
|
1845
1736
|
|
|
1846
1737
|
|
|
1847
1738
|
def _gen_info_and_msg_metadata(
|
|
1848
|
-
generation:
|
|
1739
|
+
generation: ChatGeneration | ChatGenerationChunk,
|
|
1849
1740
|
) -> dict:
|
|
1850
1741
|
return {
|
|
1851
1742
|
**(generation.generation_info or {}),
|