langchain-core 0.3.75__py3-none-any.whl → 0.3.76__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of langchain-core might be problematic. Click here for more details.
- langchain_core/_api/beta_decorator.py +17 -40
- langchain_core/_api/deprecation.py +19 -6
- langchain_core/_api/path.py +19 -2
- langchain_core/_import_utils.py +7 -0
- langchain_core/agents.py +10 -6
- langchain_core/beta/runnables/context.py +1 -2
- langchain_core/callbacks/base.py +11 -4
- langchain_core/callbacks/manager.py +81 -69
- langchain_core/callbacks/usage.py +4 -2
- langchain_core/chat_history.py +4 -6
- langchain_core/document_loaders/base.py +34 -9
- langchain_core/document_loaders/langsmith.py +3 -0
- langchain_core/documents/base.py +35 -10
- langchain_core/documents/transformers.py +4 -2
- langchain_core/embeddings/fake.py +8 -5
- langchain_core/env.py +2 -3
- langchain_core/example_selectors/base.py +12 -0
- langchain_core/exceptions.py +7 -0
- langchain_core/globals.py +17 -28
- langchain_core/indexing/api.py +56 -44
- langchain_core/indexing/base.py +5 -8
- langchain_core/indexing/in_memory.py +23 -3
- langchain_core/language_models/__init__.py +3 -2
- langchain_core/language_models/base.py +31 -20
- langchain_core/language_models/chat_models.py +94 -25
- langchain_core/language_models/fake_chat_models.py +5 -7
- langchain_core/language_models/llms.py +49 -17
- langchain_core/load/dump.py +2 -3
- langchain_core/load/load.py +15 -1
- langchain_core/load/serializable.py +38 -43
- langchain_core/memory.py +7 -3
- langchain_core/messages/ai.py +36 -19
- langchain_core/messages/base.py +13 -6
- langchain_core/messages/content_blocks.py +23 -2
- langchain_core/messages/human.py +2 -6
- langchain_core/messages/system.py +2 -6
- langchain_core/messages/tool.py +33 -13
- langchain_core/messages/utils.py +182 -72
- langchain_core/output_parsers/base.py +5 -2
- langchain_core/output_parsers/json.py +4 -4
- langchain_core/output_parsers/list.py +7 -22
- langchain_core/output_parsers/openai_functions.py +3 -0
- langchain_core/output_parsers/openai_tools.py +6 -1
- langchain_core/output_parsers/pydantic.py +4 -0
- langchain_core/output_parsers/string.py +5 -1
- langchain_core/output_parsers/xml.py +19 -19
- langchain_core/outputs/chat_generation.py +18 -7
- langchain_core/outputs/generation.py +14 -3
- langchain_core/outputs/llm_result.py +8 -1
- langchain_core/prompt_values.py +10 -4
- langchain_core/prompts/base.py +4 -9
- langchain_core/prompts/chat.py +87 -58
- langchain_core/prompts/dict.py +16 -8
- langchain_core/prompts/few_shot.py +9 -11
- langchain_core/prompts/few_shot_with_templates.py +5 -1
- langchain_core/prompts/image.py +12 -5
- langchain_core/prompts/message.py +5 -6
- langchain_core/prompts/pipeline.py +13 -8
- langchain_core/prompts/prompt.py +22 -8
- langchain_core/prompts/string.py +18 -10
- langchain_core/prompts/structured.py +7 -2
- langchain_core/rate_limiters.py +2 -2
- langchain_core/retrievers.py +7 -6
- langchain_core/runnables/base.py +402 -183
- langchain_core/runnables/branch.py +14 -19
- langchain_core/runnables/config.py +9 -15
- langchain_core/runnables/configurable.py +34 -19
- langchain_core/runnables/fallbacks.py +20 -13
- langchain_core/runnables/graph.py +44 -37
- langchain_core/runnables/graph_ascii.py +40 -17
- langchain_core/runnables/graph_mermaid.py +27 -15
- langchain_core/runnables/graph_png.py +27 -31
- langchain_core/runnables/history.py +55 -58
- langchain_core/runnables/passthrough.py +44 -21
- langchain_core/runnables/retry.py +9 -5
- langchain_core/runnables/router.py +9 -8
- langchain_core/runnables/schema.py +2 -0
- langchain_core/runnables/utils.py +51 -89
- langchain_core/stores.py +13 -25
- langchain_core/sys_info.py +9 -8
- langchain_core/tools/base.py +30 -23
- langchain_core/tools/convert.py +24 -13
- langchain_core/tools/simple.py +35 -3
- langchain_core/tools/structured.py +25 -2
- langchain_core/tracers/base.py +2 -2
- langchain_core/tracers/context.py +5 -1
- langchain_core/tracers/core.py +109 -39
- langchain_core/tracers/evaluation.py +22 -26
- langchain_core/tracers/event_stream.py +40 -27
- langchain_core/tracers/langchain.py +12 -3
- langchain_core/tracers/langchain_v1.py +10 -2
- langchain_core/tracers/log_stream.py +56 -17
- langchain_core/tracers/root_listeners.py +4 -20
- langchain_core/tracers/run_collector.py +6 -16
- langchain_core/tracers/schemas.py +5 -1
- langchain_core/utils/aiter.py +14 -6
- langchain_core/utils/env.py +3 -0
- langchain_core/utils/function_calling.py +37 -20
- langchain_core/utils/interactive_env.py +6 -2
- langchain_core/utils/iter.py +11 -3
- langchain_core/utils/json.py +5 -2
- langchain_core/utils/json_schema.py +15 -5
- langchain_core/utils/loading.py +5 -1
- langchain_core/utils/mustache.py +24 -15
- langchain_core/utils/pydantic.py +32 -4
- langchain_core/utils/utils.py +24 -8
- langchain_core/vectorstores/base.py +7 -20
- langchain_core/vectorstores/in_memory.py +18 -12
- langchain_core/vectorstores/utils.py +18 -12
- langchain_core/version.py +1 -1
- langchain_core-0.3.76.dist-info/METADATA +77 -0
- langchain_core-0.3.76.dist-info/RECORD +174 -0
- langchain_core-0.3.75.dist-info/METADATA +0 -106
- langchain_core-0.3.75.dist-info/RECORD +0 -174
- {langchain_core-0.3.75.dist-info → langchain_core-0.3.76.dist-info}/WHEEL +0 -0
- {langchain_core-0.3.75.dist-info → langchain_core-0.3.76.dist-info}/entry_points.txt +0 -0
|
@@ -75,12 +75,13 @@ class FakeListChatModel(SimpleChatModel):
|
|
|
75
75
|
@override
|
|
76
76
|
def _call(
|
|
77
77
|
self,
|
|
78
|
-
|
|
79
|
-
stop: Optional[list[str]] = None,
|
|
80
|
-
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
|
78
|
+
*args: Any,
|
|
81
79
|
**kwargs: Any,
|
|
82
80
|
) -> str:
|
|
83
|
-
"""
|
|
81
|
+
"""Return the next response in the list.
|
|
82
|
+
|
|
83
|
+
Cycle back to the start if at the end.
|
|
84
|
+
"""
|
|
84
85
|
if self.sleep is not None:
|
|
85
86
|
time.sleep(self.sleep)
|
|
86
87
|
response = self.responses[self.i]
|
|
@@ -239,7 +240,6 @@ class GenericFakeChatModel(BaseChatModel):
|
|
|
239
240
|
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
|
240
241
|
**kwargs: Any,
|
|
241
242
|
) -> ChatResult:
|
|
242
|
-
"""Top Level call."""
|
|
243
243
|
message = next(self.messages)
|
|
244
244
|
message_ = AIMessage(content=message) if isinstance(message, str) else message
|
|
245
245
|
generation = ChatGeneration(message=message_)
|
|
@@ -252,7 +252,6 @@ class GenericFakeChatModel(BaseChatModel):
|
|
|
252
252
|
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
|
253
253
|
**kwargs: Any,
|
|
254
254
|
) -> Iterator[ChatGenerationChunk]:
|
|
255
|
-
"""Stream the output of the model."""
|
|
256
255
|
chat_result = self._generate(
|
|
257
256
|
messages, stop=stop, run_manager=run_manager, **kwargs
|
|
258
257
|
)
|
|
@@ -362,7 +361,6 @@ class ParrotFakeChatModel(BaseChatModel):
|
|
|
362
361
|
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
|
363
362
|
**kwargs: Any,
|
|
364
363
|
) -> ChatResult:
|
|
365
|
-
"""Top Level call."""
|
|
366
364
|
return ChatResult(generations=[ChatGeneration(message=messages[-1])])
|
|
367
365
|
|
|
368
366
|
@property
|
|
@@ -131,6 +131,7 @@ def create_base_retry_decorator(
|
|
|
131
131
|
|
|
132
132
|
def _resolve_cache(*, cache: Union[BaseCache, bool, None]) -> Optional[BaseCache]:
|
|
133
133
|
"""Resolve the cache."""
|
|
134
|
+
llm_cache: Optional[BaseCache]
|
|
134
135
|
if isinstance(cache, BaseCache):
|
|
135
136
|
llm_cache = cache
|
|
136
137
|
elif cache is None:
|
|
@@ -663,7 +664,18 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
|
|
663
664
|
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
|
664
665
|
**kwargs: Any,
|
|
665
666
|
) -> LLMResult:
|
|
666
|
-
"""Run the LLM on the given prompts.
|
|
667
|
+
"""Run the LLM on the given prompts.
|
|
668
|
+
|
|
669
|
+
Args:
|
|
670
|
+
prompts: The prompts to generate from.
|
|
671
|
+
stop: Stop words to use when generating. Model output is cut off at the
|
|
672
|
+
first occurrence of any of the stop substrings.
|
|
673
|
+
If stop tokens are not supported consider raising NotImplementedError.
|
|
674
|
+
run_manager: Callback manager for the run.
|
|
675
|
+
|
|
676
|
+
Returns:
|
|
677
|
+
The LLM result.
|
|
678
|
+
"""
|
|
667
679
|
|
|
668
680
|
async def _agenerate(
|
|
669
681
|
self,
|
|
@@ -672,7 +684,18 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
|
|
672
684
|
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
|
|
673
685
|
**kwargs: Any,
|
|
674
686
|
) -> LLMResult:
|
|
675
|
-
"""Run the LLM on the given prompts.
|
|
687
|
+
"""Run the LLM on the given prompts.
|
|
688
|
+
|
|
689
|
+
Args:
|
|
690
|
+
prompts: The prompts to generate from.
|
|
691
|
+
stop: Stop words to use when generating. Model output is cut off at the
|
|
692
|
+
first occurrence of any of the stop substrings.
|
|
693
|
+
If stop tokens are not supported consider raising NotImplementedError.
|
|
694
|
+
run_manager: Callback manager for the run.
|
|
695
|
+
|
|
696
|
+
Returns:
|
|
697
|
+
The LLM result.
|
|
698
|
+
"""
|
|
676
699
|
return await run_in_executor(
|
|
677
700
|
None,
|
|
678
701
|
self._generate,
|
|
@@ -705,8 +728,8 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
|
|
705
728
|
**kwargs: Arbitrary additional keyword arguments. These are usually passed
|
|
706
729
|
to the model provider API call.
|
|
707
730
|
|
|
708
|
-
|
|
709
|
-
|
|
731
|
+
Yields:
|
|
732
|
+
Generation chunks.
|
|
710
733
|
"""
|
|
711
734
|
raise NotImplementedError
|
|
712
735
|
|
|
@@ -731,8 +754,8 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
|
|
731
754
|
**kwargs: Arbitrary additional keyword arguments. These are usually passed
|
|
732
755
|
to the model provider API call.
|
|
733
756
|
|
|
734
|
-
|
|
735
|
-
|
|
757
|
+
Yields:
|
|
758
|
+
Generation chunks.
|
|
736
759
|
"""
|
|
737
760
|
iterator = await run_in_executor(
|
|
738
761
|
None,
|
|
@@ -830,10 +853,11 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
|
|
830
853
|
API.
|
|
831
854
|
|
|
832
855
|
Use this method when you want to:
|
|
833
|
-
|
|
834
|
-
|
|
835
|
-
|
|
836
|
-
|
|
856
|
+
|
|
857
|
+
1. Take advantage of batched calls,
|
|
858
|
+
2. Need more output from the model than just the top generated value,
|
|
859
|
+
3. Are building chains that are agnostic to the underlying language model
|
|
860
|
+
type (e.g., pure text completion models vs chat models).
|
|
837
861
|
|
|
838
862
|
Args:
|
|
839
863
|
prompts: List of string prompts.
|
|
@@ -853,6 +877,11 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
|
|
853
877
|
**kwargs: Arbitrary additional keyword arguments. These are usually passed
|
|
854
878
|
to the model provider API call.
|
|
855
879
|
|
|
880
|
+
Raises:
|
|
881
|
+
ValueError: If prompts is not a list.
|
|
882
|
+
ValueError: If the length of ``callbacks``, ``tags``, ``metadata``, or
|
|
883
|
+
``run_name`` (if provided) does not match the length of prompts.
|
|
884
|
+
|
|
856
885
|
Returns:
|
|
857
886
|
An LLMResult, which contains a list of candidate Generations for each input
|
|
858
887
|
prompt and additional model provider-specific output.
|
|
@@ -1090,10 +1119,11 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
|
|
1090
1119
|
API.
|
|
1091
1120
|
|
|
1092
1121
|
Use this method when you want to:
|
|
1093
|
-
|
|
1094
|
-
|
|
1095
|
-
|
|
1096
|
-
|
|
1122
|
+
|
|
1123
|
+
1. Take advantage of batched calls,
|
|
1124
|
+
2. Need more output from the model than just the top generated value,
|
|
1125
|
+
3. Are building chains that are agnostic to the underlying language model
|
|
1126
|
+
type (e.g., pure text completion models vs chat models).
|
|
1097
1127
|
|
|
1098
1128
|
Args:
|
|
1099
1129
|
prompts: List of string prompts.
|
|
@@ -1113,6 +1143,10 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
|
|
1113
1143
|
**kwargs: Arbitrary additional keyword arguments. These are usually passed
|
|
1114
1144
|
to the model provider API call.
|
|
1115
1145
|
|
|
1146
|
+
Raises:
|
|
1147
|
+
ValueError: If the length of ``callbacks``, ``tags``, ``metadata``, or
|
|
1148
|
+
``run_name`` (if provided) does not match the length of prompts.
|
|
1149
|
+
|
|
1116
1150
|
Returns:
|
|
1117
1151
|
An LLMResult, which contains a list of candidate Generations for each input
|
|
1118
1152
|
prompt and additional model provider-specific output.
|
|
@@ -1388,7 +1422,7 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
|
|
1388
1422
|
return AIMessage(content=content)
|
|
1389
1423
|
|
|
1390
1424
|
def __str__(self) -> str:
|
|
1391
|
-
"""
|
|
1425
|
+
"""Return a string representation of the object for printing."""
|
|
1392
1426
|
cls_name = f"\033[1m{self.__class__.__name__}\033[0m"
|
|
1393
1427
|
return f"{cls_name}\nParams: {self._identifying_params}"
|
|
1394
1428
|
|
|
@@ -1536,7 +1570,6 @@ class LLM(BaseLLM):
|
|
|
1536
1570
|
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
|
1537
1571
|
**kwargs: Any,
|
|
1538
1572
|
) -> LLMResult:
|
|
1539
|
-
"""Run the LLM on the given prompt and input."""
|
|
1540
1573
|
# TODO: add caching here.
|
|
1541
1574
|
generations = []
|
|
1542
1575
|
new_arg_supported = inspect.signature(self._call).parameters.get("run_manager")
|
|
@@ -1556,7 +1589,6 @@ class LLM(BaseLLM):
|
|
|
1556
1589
|
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
|
|
1557
1590
|
**kwargs: Any,
|
|
1558
1591
|
) -> LLMResult:
|
|
1559
|
-
"""Async run the LLM on the given prompt and input."""
|
|
1560
1592
|
generations = []
|
|
1561
1593
|
new_arg_supported = inspect.signature(self._acall).parameters.get("run_manager")
|
|
1562
1594
|
for prompt in prompts:
|
langchain_core/load/dump.py
CHANGED
|
@@ -6,6 +6,8 @@ from typing import Any
|
|
|
6
6
|
from pydantic import BaseModel
|
|
7
7
|
|
|
8
8
|
from langchain_core.load.serializable import Serializable, to_json_not_implemented
|
|
9
|
+
from langchain_core.messages import AIMessage
|
|
10
|
+
from langchain_core.outputs import ChatGeneration
|
|
9
11
|
|
|
10
12
|
|
|
11
13
|
def default(obj: Any) -> Any:
|
|
@@ -23,9 +25,6 @@ def default(obj: Any) -> Any:
|
|
|
23
25
|
|
|
24
26
|
|
|
25
27
|
def _dump_pydantic_models(obj: Any) -> Any:
|
|
26
|
-
from langchain_core.messages import AIMessage
|
|
27
|
-
from langchain_core.outputs import ChatGeneration
|
|
28
|
-
|
|
29
28
|
if (
|
|
30
29
|
isinstance(obj, ChatGeneration)
|
|
31
30
|
and isinstance(obj.message, AIMessage)
|
langchain_core/load/load.py
CHANGED
|
@@ -95,7 +95,21 @@ class Reviver:
|
|
|
95
95
|
self.ignore_unserializable_fields = ignore_unserializable_fields
|
|
96
96
|
|
|
97
97
|
def __call__(self, value: dict[str, Any]) -> Any:
|
|
98
|
-
"""Revive the value.
|
|
98
|
+
"""Revive the value.
|
|
99
|
+
|
|
100
|
+
Args:
|
|
101
|
+
value: The value to revive.
|
|
102
|
+
|
|
103
|
+
Returns:
|
|
104
|
+
The revived value.
|
|
105
|
+
|
|
106
|
+
Raises:
|
|
107
|
+
ValueError: If the namespace is invalid.
|
|
108
|
+
ValueError: If trying to deserialize something that cannot
|
|
109
|
+
be deserialized in the current version of langchain-core.
|
|
110
|
+
NotImplementedError: If the object is not implemented and
|
|
111
|
+
``ignore_unserializable_fields`` is False.
|
|
112
|
+
"""
|
|
99
113
|
if (
|
|
100
114
|
value.get("lc") == 1
|
|
101
115
|
and value.get("type") == "secret"
|
|
@@ -20,53 +20,41 @@ logger = logging.getLogger(__name__)
|
|
|
20
20
|
|
|
21
21
|
|
|
22
22
|
class BaseSerialized(TypedDict):
|
|
23
|
-
"""Base class for serialized objects.
|
|
24
|
-
|
|
25
|
-
Parameters:
|
|
26
|
-
lc: The version of the serialization format.
|
|
27
|
-
id: The unique identifier of the object.
|
|
28
|
-
name: The name of the object. Optional.
|
|
29
|
-
graph: The graph of the object. Optional.
|
|
30
|
-
"""
|
|
23
|
+
"""Base class for serialized objects."""
|
|
31
24
|
|
|
32
25
|
lc: int
|
|
26
|
+
"""The version of the serialization format."""
|
|
33
27
|
id: list[str]
|
|
28
|
+
"""The unique identifier of the object."""
|
|
34
29
|
name: NotRequired[str]
|
|
30
|
+
"""The name of the object. Optional."""
|
|
35
31
|
graph: NotRequired[dict[str, Any]]
|
|
32
|
+
"""The graph of the object. Optional."""
|
|
36
33
|
|
|
37
34
|
|
|
38
35
|
class SerializedConstructor(BaseSerialized):
|
|
39
|
-
"""Serialized constructor.
|
|
40
|
-
|
|
41
|
-
Parameters:
|
|
42
|
-
type: The type of the object. Must be "constructor".
|
|
43
|
-
kwargs: The constructor arguments.
|
|
44
|
-
"""
|
|
36
|
+
"""Serialized constructor."""
|
|
45
37
|
|
|
46
38
|
type: Literal["constructor"]
|
|
39
|
+
"""The type of the object. Must be ``'constructor'``."""
|
|
47
40
|
kwargs: dict[str, Any]
|
|
41
|
+
"""The constructor arguments."""
|
|
48
42
|
|
|
49
43
|
|
|
50
44
|
class SerializedSecret(BaseSerialized):
|
|
51
|
-
"""Serialized secret.
|
|
52
|
-
|
|
53
|
-
Parameters:
|
|
54
|
-
type: The type of the object. Must be "secret".
|
|
55
|
-
"""
|
|
45
|
+
"""Serialized secret."""
|
|
56
46
|
|
|
57
47
|
type: Literal["secret"]
|
|
48
|
+
"""The type of the object. Must be ``'secret'``."""
|
|
58
49
|
|
|
59
50
|
|
|
60
51
|
class SerializedNotImplemented(BaseSerialized):
|
|
61
|
-
"""Serialized not implemented.
|
|
62
|
-
|
|
63
|
-
Parameters:
|
|
64
|
-
type: The type of the object. Must be "not_implemented".
|
|
65
|
-
repr: The representation of the object. Optional.
|
|
66
|
-
"""
|
|
52
|
+
"""Serialized not implemented."""
|
|
67
53
|
|
|
68
54
|
type: Literal["not_implemented"]
|
|
55
|
+
"""The type of the object. Must be ``'not_implemented'``."""
|
|
69
56
|
repr: Optional[str]
|
|
57
|
+
"""The representation of the object. Optional."""
|
|
70
58
|
|
|
71
59
|
|
|
72
60
|
def try_neq_default(value: Any, key: str, model: BaseModel) -> bool:
|
|
@@ -79,9 +67,6 @@ def try_neq_default(value: Any, key: str, model: BaseModel) -> bool:
|
|
|
79
67
|
|
|
80
68
|
Returns:
|
|
81
69
|
Whether the value is different from the default.
|
|
82
|
-
|
|
83
|
-
Raises:
|
|
84
|
-
Exception: If the key is not in the model.
|
|
85
70
|
"""
|
|
86
71
|
field = type(model).model_fields[key]
|
|
87
72
|
return _try_neq_default(value, field)
|
|
@@ -109,19 +94,19 @@ class Serializable(BaseModel, ABC):
|
|
|
109
94
|
|
|
110
95
|
It relies on the following methods and properties:
|
|
111
96
|
|
|
112
|
-
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
-
|
|
123
|
-
-
|
|
124
|
-
|
|
97
|
+
- ``is_lc_serializable``: Is this class serializable?
|
|
98
|
+
By design, even if a class inherits from Serializable, it is not serializable by
|
|
99
|
+
default. This is to prevent accidental serialization of objects that should not
|
|
100
|
+
be serialized.
|
|
101
|
+
- ``get_lc_namespace``: Get the namespace of the langchain object.
|
|
102
|
+
During deserialization, this namespace is used to identify
|
|
103
|
+
the correct class to instantiate.
|
|
104
|
+
Please see the ``Reviver`` class in ``langchain_core.load.load`` for more details.
|
|
105
|
+
During deserialization an additional mapping is handle
|
|
106
|
+
classes that have moved or been renamed across package versions.
|
|
107
|
+
- ``lc_secrets``: A map of constructor argument names to secret ids.
|
|
108
|
+
- ``lc_attributes``: List of additional attribute names that should be included
|
|
109
|
+
as part of the serialized representation.
|
|
125
110
|
"""
|
|
126
111
|
|
|
127
112
|
# Remove default BaseModel init docstring.
|
|
@@ -148,6 +133,9 @@ class Serializable(BaseModel, ABC):
|
|
|
148
133
|
|
|
149
134
|
For example, if the class is `langchain.llms.openai.OpenAI`, then the
|
|
150
135
|
namespace is ["langchain", "llms", "openai"]
|
|
136
|
+
|
|
137
|
+
Returns:
|
|
138
|
+
The namespace as a list of strings.
|
|
151
139
|
"""
|
|
152
140
|
return cls.__module__.split(".")
|
|
153
141
|
|
|
@@ -171,7 +159,7 @@ class Serializable(BaseModel, ABC):
|
|
|
171
159
|
|
|
172
160
|
@classmethod
|
|
173
161
|
def lc_id(cls) -> list[str]:
|
|
174
|
-
"""
|
|
162
|
+
"""Return a unique identifier for this class for serialization purposes.
|
|
175
163
|
|
|
176
164
|
The unique identifier is a list of strings that describes the path
|
|
177
165
|
to the object.
|
|
@@ -203,6 +191,9 @@ class Serializable(BaseModel, ABC):
|
|
|
203
191
|
def to_json(self) -> Union[SerializedConstructor, SerializedNotImplemented]:
|
|
204
192
|
"""Serialize the object to JSON.
|
|
205
193
|
|
|
194
|
+
Raises:
|
|
195
|
+
ValueError: If the class has deprecated attributes.
|
|
196
|
+
|
|
206
197
|
Returns:
|
|
207
198
|
A json serializable object or a SerializedNotImplemented object.
|
|
208
199
|
"""
|
|
@@ -276,7 +267,11 @@ class Serializable(BaseModel, ABC):
|
|
|
276
267
|
}
|
|
277
268
|
|
|
278
269
|
def to_json_not_implemented(self) -> SerializedNotImplemented:
|
|
279
|
-
"""Serialize a "not implemented" object.
|
|
270
|
+
"""Serialize a "not implemented" object.
|
|
271
|
+
|
|
272
|
+
Returns:
|
|
273
|
+
SerializedNotImplemented.
|
|
274
|
+
"""
|
|
280
275
|
return to_json_not_implemented(self)
|
|
281
276
|
|
|
282
277
|
|
langchain_core/memory.py
CHANGED
|
@@ -45,16 +45,20 @@ class BaseMemory(Serializable, ABC):
|
|
|
45
45
|
def memory_variables(self) -> list[str]:
|
|
46
46
|
return list(self.memories.keys())
|
|
47
47
|
|
|
48
|
-
def load_memory_variables(
|
|
48
|
+
def load_memory_variables(
|
|
49
|
+
self, inputs: dict[str, Any]
|
|
50
|
+
) -> dict[str, str]:
|
|
49
51
|
return self.memories
|
|
50
52
|
|
|
51
|
-
def save_context(
|
|
53
|
+
def save_context(
|
|
54
|
+
self, inputs: dict[str, Any], outputs: dict[str, str]
|
|
55
|
+
) -> None:
|
|
52
56
|
pass
|
|
53
57
|
|
|
54
58
|
def clear(self) -> None:
|
|
55
59
|
pass
|
|
56
60
|
|
|
57
|
-
"""
|
|
61
|
+
"""
|
|
58
62
|
|
|
59
63
|
model_config = ConfigDict(
|
|
60
64
|
arbitrary_types_allowed=True,
|
langchain_core/messages/ai.py
CHANGED
|
@@ -124,7 +124,7 @@ class UsageMetadata(TypedDict):
|
|
|
124
124
|
"output_token_details": {
|
|
125
125
|
"audio": 10,
|
|
126
126
|
"reasoning": 200,
|
|
127
|
-
}
|
|
127
|
+
},
|
|
128
128
|
}
|
|
129
129
|
|
|
130
130
|
.. versionchanged:: 0.3.9
|
|
@@ -310,14 +310,8 @@ class AIMessageChunk(AIMessage, BaseMessageChunk):
|
|
|
310
310
|
def init_tool_calls(self) -> Self:
|
|
311
311
|
"""Initialize tool calls from tool call chunks.
|
|
312
312
|
|
|
313
|
-
Args:
|
|
314
|
-
values: The values to validate.
|
|
315
|
-
|
|
316
313
|
Returns:
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
Raises:
|
|
320
|
-
ValueError: If the tool call chunks are malformed.
|
|
314
|
+
This ``AIMessageChunk``.
|
|
321
315
|
"""
|
|
322
316
|
if not self.tool_call_chunks:
|
|
323
317
|
if self.tool_calls:
|
|
@@ -358,10 +352,7 @@ class AIMessageChunk(AIMessage, BaseMessageChunk):
|
|
|
358
352
|
|
|
359
353
|
for chunk in self.tool_call_chunks:
|
|
360
354
|
try:
|
|
361
|
-
|
|
362
|
-
args_ = parse_partial_json(chunk["args"])
|
|
363
|
-
else:
|
|
364
|
-
args_ = {}
|
|
355
|
+
args_ = parse_partial_json(chunk["args"]) if chunk["args"] else {}
|
|
365
356
|
if isinstance(args_, dict):
|
|
366
357
|
tool_calls.append(
|
|
367
358
|
create_tool_call(
|
|
@@ -392,7 +383,19 @@ class AIMessageChunk(AIMessage, BaseMessageChunk):
|
|
|
392
383
|
def add_ai_message_chunks(
|
|
393
384
|
left: AIMessageChunk, *others: AIMessageChunk
|
|
394
385
|
) -> AIMessageChunk:
|
|
395
|
-
"""Add multiple
|
|
386
|
+
"""Add multiple ``AIMessageChunk``s together.
|
|
387
|
+
|
|
388
|
+
Args:
|
|
389
|
+
left: The first ``AIMessageChunk``.
|
|
390
|
+
*others: Other ``AIMessageChunk``s to add.
|
|
391
|
+
|
|
392
|
+
Raises:
|
|
393
|
+
ValueError: If the example values of the chunks are not the same.
|
|
394
|
+
|
|
395
|
+
Returns:
|
|
396
|
+
The resulting ``AIMessageChunk``.
|
|
397
|
+
|
|
398
|
+
"""
|
|
396
399
|
if any(left.example != o.example for o in others):
|
|
397
400
|
msg = "Cannot concatenate AIMessageChunks with different example values."
|
|
398
401
|
raise ValueError(msg)
|
|
@@ -468,13 +471,13 @@ def add_usage(
|
|
|
468
471
|
input_tokens=5,
|
|
469
472
|
output_tokens=0,
|
|
470
473
|
total_tokens=5,
|
|
471
|
-
input_token_details=InputTokenDetails(cache_read=3)
|
|
474
|
+
input_token_details=InputTokenDetails(cache_read=3),
|
|
472
475
|
)
|
|
473
476
|
right = UsageMetadata(
|
|
474
477
|
input_tokens=0,
|
|
475
478
|
output_tokens=10,
|
|
476
479
|
total_tokens=10,
|
|
477
|
-
output_token_details=OutputTokenDetails(reasoning=4)
|
|
480
|
+
output_token_details=OutputTokenDetails(reasoning=4),
|
|
478
481
|
)
|
|
479
482
|
|
|
480
483
|
add_usage(left, right)
|
|
@@ -488,9 +491,16 @@ def add_usage(
|
|
|
488
491
|
output_tokens=10,
|
|
489
492
|
total_tokens=15,
|
|
490
493
|
input_token_details=InputTokenDetails(cache_read=3),
|
|
491
|
-
output_token_details=OutputTokenDetails(reasoning=4)
|
|
494
|
+
output_token_details=OutputTokenDetails(reasoning=4),
|
|
492
495
|
)
|
|
493
496
|
|
|
497
|
+
Args:
|
|
498
|
+
left: The first ``UsageMetadata`` object.
|
|
499
|
+
right: The second ``UsageMetadata`` object.
|
|
500
|
+
|
|
501
|
+
Returns:
|
|
502
|
+
The sum of the two ``UsageMetadata`` objects.
|
|
503
|
+
|
|
494
504
|
"""
|
|
495
505
|
if not (left or right):
|
|
496
506
|
return UsageMetadata(input_tokens=0, output_tokens=0, total_tokens=0)
|
|
@@ -525,13 +535,13 @@ def subtract_usage(
|
|
|
525
535
|
input_tokens=5,
|
|
526
536
|
output_tokens=10,
|
|
527
537
|
total_tokens=15,
|
|
528
|
-
input_token_details=InputTokenDetails(cache_read=4)
|
|
538
|
+
input_token_details=InputTokenDetails(cache_read=4),
|
|
529
539
|
)
|
|
530
540
|
right = UsageMetadata(
|
|
531
541
|
input_tokens=3,
|
|
532
542
|
output_tokens=8,
|
|
533
543
|
total_tokens=11,
|
|
534
|
-
output_token_details=OutputTokenDetails(reasoning=4)
|
|
544
|
+
output_token_details=OutputTokenDetails(reasoning=4),
|
|
535
545
|
)
|
|
536
546
|
|
|
537
547
|
subtract_usage(left, right)
|
|
@@ -545,9 +555,16 @@ def subtract_usage(
|
|
|
545
555
|
output_tokens=2,
|
|
546
556
|
total_tokens=4,
|
|
547
557
|
input_token_details=InputTokenDetails(cache_read=4),
|
|
548
|
-
output_token_details=OutputTokenDetails(reasoning=0)
|
|
558
|
+
output_token_details=OutputTokenDetails(reasoning=0),
|
|
549
559
|
)
|
|
550
560
|
|
|
561
|
+
Args:
|
|
562
|
+
left: The first ``UsageMetadata`` object.
|
|
563
|
+
right: The second ``UsageMetadata`` object.
|
|
564
|
+
|
|
565
|
+
Returns:
|
|
566
|
+
The resulting ``UsageMetadata`` after subtraction.
|
|
567
|
+
|
|
551
568
|
"""
|
|
552
569
|
if not (left or right):
|
|
553
570
|
return UsageMetadata(input_tokens=0, output_tokens=0, total_tokens=0)
|
langchain_core/messages/base.py
CHANGED
|
@@ -84,7 +84,8 @@ class BaseMessage(Serializable):
|
|
|
84
84
|
def get_lc_namespace(cls) -> list[str]:
|
|
85
85
|
"""Get the namespace of the langchain object.
|
|
86
86
|
|
|
87
|
-
|
|
87
|
+
Returns:
|
|
88
|
+
``["langchain", "schema", "messages"]``
|
|
88
89
|
"""
|
|
89
90
|
return ["langchain", "schema", "messages"]
|
|
90
91
|
|
|
@@ -109,8 +110,16 @@ class BaseMessage(Serializable):
|
|
|
109
110
|
)
|
|
110
111
|
|
|
111
112
|
def __add__(self, other: Any) -> ChatPromptTemplate:
|
|
112
|
-
"""Concatenate this message with another message.
|
|
113
|
-
|
|
113
|
+
"""Concatenate this message with another message.
|
|
114
|
+
|
|
115
|
+
Args:
|
|
116
|
+
other: Another message to concatenate with this one.
|
|
117
|
+
|
|
118
|
+
Returns:
|
|
119
|
+
A ChatPromptTemplate containing both messages.
|
|
120
|
+
"""
|
|
121
|
+
# Import locally to prevent circular imports.
|
|
122
|
+
from langchain_core.prompts.chat import ChatPromptTemplate # noqa: PLC0415
|
|
114
123
|
|
|
115
124
|
prompt = ChatPromptTemplate(messages=[self])
|
|
116
125
|
return prompt + other
|
|
@@ -171,9 +180,7 @@ def merge_content(
|
|
|
171
180
|
elif merged and isinstance(merged[-1], str):
|
|
172
181
|
merged[-1] += content
|
|
173
182
|
# If second content is an empty string, treat as a no-op
|
|
174
|
-
elif content
|
|
175
|
-
pass
|
|
176
|
-
else:
|
|
183
|
+
elif content:
|
|
177
184
|
# Otherwise, add the second content as a new element of the list
|
|
178
185
|
merged.append(content)
|
|
179
186
|
return merged
|
|
@@ -88,7 +88,18 @@ def is_data_content_block(
|
|
|
88
88
|
|
|
89
89
|
|
|
90
90
|
def convert_to_openai_image_block(content_block: dict[str, Any]) -> dict:
|
|
91
|
-
"""Convert image content block to format expected by OpenAI Chat Completions API.
|
|
91
|
+
"""Convert image content block to format expected by OpenAI Chat Completions API.
|
|
92
|
+
|
|
93
|
+
Args:
|
|
94
|
+
content_block: The content block to convert.
|
|
95
|
+
|
|
96
|
+
Raises:
|
|
97
|
+
ValueError: If the source type is not supported or if ``mime_type`` is missing
|
|
98
|
+
for base64 data.
|
|
99
|
+
|
|
100
|
+
Returns:
|
|
101
|
+
A dictionary formatted for OpenAI's API.
|
|
102
|
+
"""
|
|
92
103
|
if content_block["source_type"] == "url":
|
|
93
104
|
return {
|
|
94
105
|
"type": "image_url",
|
|
@@ -112,7 +123,17 @@ def convert_to_openai_image_block(content_block: dict[str, Any]) -> dict:
|
|
|
112
123
|
|
|
113
124
|
|
|
114
125
|
def convert_to_openai_data_block(block: dict) -> dict:
|
|
115
|
-
"""Format standard data content block to format expected by OpenAI.
|
|
126
|
+
"""Format standard data content block to format expected by OpenAI.
|
|
127
|
+
|
|
128
|
+
Args:
|
|
129
|
+
block: A data content block.
|
|
130
|
+
|
|
131
|
+
Raises:
|
|
132
|
+
ValueError: If the block type or source type is not supported.
|
|
133
|
+
|
|
134
|
+
Returns:
|
|
135
|
+
A dictionary formatted for OpenAI's API.
|
|
136
|
+
"""
|
|
116
137
|
if block["type"] == "image":
|
|
117
138
|
formatted_block = convert_to_openai_image_block(block)
|
|
118
139
|
|
langchain_core/messages/human.py
CHANGED
|
@@ -17,12 +17,8 @@ class HumanMessage(BaseMessage):
|
|
|
17
17
|
from langchain_core.messages import HumanMessage, SystemMessage
|
|
18
18
|
|
|
19
19
|
messages = [
|
|
20
|
-
SystemMessage(
|
|
21
|
-
|
|
22
|
-
),
|
|
23
|
-
HumanMessage(
|
|
24
|
-
content="What is your name?"
|
|
25
|
-
)
|
|
20
|
+
SystemMessage(content="You are a helpful assistant! Your name is Bob."),
|
|
21
|
+
HumanMessage(content="What is your name?"),
|
|
26
22
|
]
|
|
27
23
|
|
|
28
24
|
# Instantiate a chat model and invoke it with the messages
|
|
@@ -18,12 +18,8 @@ class SystemMessage(BaseMessage):
|
|
|
18
18
|
from langchain_core.messages import HumanMessage, SystemMessage
|
|
19
19
|
|
|
20
20
|
messages = [
|
|
21
|
-
SystemMessage(
|
|
22
|
-
|
|
23
|
-
),
|
|
24
|
-
HumanMessage(
|
|
25
|
-
content="What is your name?"
|
|
26
|
-
)
|
|
21
|
+
SystemMessage(content="You are a helpful assistant! Your name is Bob."),
|
|
22
|
+
HumanMessage(content="What is your name?"),
|
|
27
23
|
]
|
|
28
24
|
|
|
29
25
|
# Define a chat model and invoke it with the messages
|