langchain-core 0.4.0.dev0__py3-none-any.whl → 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of langchain-core might be problematic. Click here for more details.
- langchain_core/__init__.py +1 -1
- langchain_core/_api/__init__.py +3 -4
- langchain_core/_api/beta_decorator.py +45 -70
- langchain_core/_api/deprecation.py +80 -80
- langchain_core/_api/path.py +22 -8
- langchain_core/_import_utils.py +10 -4
- langchain_core/agents.py +25 -21
- langchain_core/caches.py +53 -63
- langchain_core/callbacks/__init__.py +1 -8
- langchain_core/callbacks/base.py +341 -348
- langchain_core/callbacks/file.py +55 -44
- langchain_core/callbacks/manager.py +546 -683
- langchain_core/callbacks/stdout.py +29 -30
- langchain_core/callbacks/streaming_stdout.py +35 -36
- langchain_core/callbacks/usage.py +65 -70
- langchain_core/chat_history.py +48 -55
- langchain_core/document_loaders/base.py +46 -21
- langchain_core/document_loaders/langsmith.py +39 -36
- langchain_core/documents/__init__.py +0 -1
- langchain_core/documents/base.py +96 -74
- langchain_core/documents/compressor.py +12 -9
- langchain_core/documents/transformers.py +29 -28
- langchain_core/embeddings/fake.py +56 -57
- langchain_core/env.py +2 -3
- langchain_core/example_selectors/base.py +12 -0
- langchain_core/example_selectors/length_based.py +1 -1
- langchain_core/example_selectors/semantic_similarity.py +21 -25
- langchain_core/exceptions.py +15 -9
- langchain_core/globals.py +4 -163
- langchain_core/indexing/api.py +132 -125
- langchain_core/indexing/base.py +64 -67
- langchain_core/indexing/in_memory.py +26 -6
- langchain_core/language_models/__init__.py +15 -27
- langchain_core/language_models/_utils.py +267 -117
- langchain_core/language_models/base.py +92 -177
- langchain_core/language_models/chat_models.py +547 -407
- langchain_core/language_models/fake.py +11 -11
- langchain_core/language_models/fake_chat_models.py +72 -118
- langchain_core/language_models/llms.py +168 -242
- langchain_core/load/dump.py +8 -11
- langchain_core/load/load.py +32 -28
- langchain_core/load/mapping.py +2 -4
- langchain_core/load/serializable.py +50 -56
- langchain_core/messages/__init__.py +36 -51
- langchain_core/messages/ai.py +377 -150
- langchain_core/messages/base.py +239 -47
- langchain_core/messages/block_translators/__init__.py +111 -0
- langchain_core/messages/block_translators/anthropic.py +470 -0
- langchain_core/messages/block_translators/bedrock.py +94 -0
- langchain_core/messages/block_translators/bedrock_converse.py +297 -0
- langchain_core/messages/block_translators/google_genai.py +530 -0
- langchain_core/messages/block_translators/google_vertexai.py +21 -0
- langchain_core/messages/block_translators/groq.py +143 -0
- langchain_core/messages/block_translators/langchain_v0.py +301 -0
- langchain_core/messages/block_translators/openai.py +1010 -0
- langchain_core/messages/chat.py +2 -3
- langchain_core/messages/content.py +1423 -0
- langchain_core/messages/function.py +7 -7
- langchain_core/messages/human.py +44 -38
- langchain_core/messages/modifier.py +3 -2
- langchain_core/messages/system.py +40 -27
- langchain_core/messages/tool.py +160 -58
- langchain_core/messages/utils.py +527 -638
- langchain_core/output_parsers/__init__.py +1 -14
- langchain_core/output_parsers/base.py +68 -104
- langchain_core/output_parsers/json.py +13 -17
- langchain_core/output_parsers/list.py +11 -33
- langchain_core/output_parsers/openai_functions.py +56 -74
- langchain_core/output_parsers/openai_tools.py +68 -109
- langchain_core/output_parsers/pydantic.py +15 -13
- langchain_core/output_parsers/string.py +6 -2
- langchain_core/output_parsers/transform.py +17 -60
- langchain_core/output_parsers/xml.py +34 -44
- langchain_core/outputs/__init__.py +1 -1
- langchain_core/outputs/chat_generation.py +26 -11
- langchain_core/outputs/chat_result.py +1 -3
- langchain_core/outputs/generation.py +17 -6
- langchain_core/outputs/llm_result.py +15 -8
- langchain_core/prompt_values.py +29 -123
- langchain_core/prompts/__init__.py +3 -27
- langchain_core/prompts/base.py +48 -63
- langchain_core/prompts/chat.py +259 -288
- langchain_core/prompts/dict.py +19 -11
- langchain_core/prompts/few_shot.py +84 -90
- langchain_core/prompts/few_shot_with_templates.py +14 -12
- langchain_core/prompts/image.py +19 -14
- langchain_core/prompts/loading.py +6 -8
- langchain_core/prompts/message.py +7 -8
- langchain_core/prompts/prompt.py +42 -43
- langchain_core/prompts/string.py +37 -16
- langchain_core/prompts/structured.py +43 -46
- langchain_core/rate_limiters.py +51 -60
- langchain_core/retrievers.py +52 -192
- langchain_core/runnables/base.py +1727 -1683
- langchain_core/runnables/branch.py +52 -73
- langchain_core/runnables/config.py +89 -103
- langchain_core/runnables/configurable.py +128 -130
- langchain_core/runnables/fallbacks.py +93 -82
- langchain_core/runnables/graph.py +127 -127
- langchain_core/runnables/graph_ascii.py +63 -41
- langchain_core/runnables/graph_mermaid.py +87 -70
- langchain_core/runnables/graph_png.py +31 -36
- langchain_core/runnables/history.py +145 -161
- langchain_core/runnables/passthrough.py +141 -144
- langchain_core/runnables/retry.py +84 -68
- langchain_core/runnables/router.py +33 -37
- langchain_core/runnables/schema.py +79 -72
- langchain_core/runnables/utils.py +95 -139
- langchain_core/stores.py +85 -131
- langchain_core/structured_query.py +11 -15
- langchain_core/sys_info.py +31 -32
- langchain_core/tools/__init__.py +1 -14
- langchain_core/tools/base.py +221 -247
- langchain_core/tools/convert.py +144 -161
- langchain_core/tools/render.py +10 -10
- langchain_core/tools/retriever.py +12 -19
- langchain_core/tools/simple.py +52 -29
- langchain_core/tools/structured.py +56 -60
- langchain_core/tracers/__init__.py +1 -9
- langchain_core/tracers/_streaming.py +6 -7
- langchain_core/tracers/base.py +103 -112
- langchain_core/tracers/context.py +29 -48
- langchain_core/tracers/core.py +142 -105
- langchain_core/tracers/evaluation.py +30 -34
- langchain_core/tracers/event_stream.py +162 -117
- langchain_core/tracers/langchain.py +34 -36
- langchain_core/tracers/log_stream.py +87 -49
- langchain_core/tracers/memory_stream.py +3 -3
- langchain_core/tracers/root_listeners.py +18 -34
- langchain_core/tracers/run_collector.py +8 -20
- langchain_core/tracers/schemas.py +0 -125
- langchain_core/tracers/stdout.py +3 -3
- langchain_core/utils/__init__.py +1 -4
- langchain_core/utils/_merge.py +47 -9
- langchain_core/utils/aiter.py +70 -66
- langchain_core/utils/env.py +12 -9
- langchain_core/utils/function_calling.py +139 -206
- langchain_core/utils/html.py +7 -8
- langchain_core/utils/input.py +6 -6
- langchain_core/utils/interactive_env.py +6 -2
- langchain_core/utils/iter.py +48 -45
- langchain_core/utils/json.py +14 -4
- langchain_core/utils/json_schema.py +159 -43
- langchain_core/utils/mustache.py +32 -25
- langchain_core/utils/pydantic.py +67 -40
- langchain_core/utils/strings.py +5 -5
- langchain_core/utils/usage.py +1 -1
- langchain_core/utils/utils.py +104 -62
- langchain_core/vectorstores/base.py +131 -179
- langchain_core/vectorstores/in_memory.py +113 -182
- langchain_core/vectorstores/utils.py +23 -17
- langchain_core/version.py +1 -1
- langchain_core-1.0.0.dist-info/METADATA +68 -0
- langchain_core-1.0.0.dist-info/RECORD +172 -0
- {langchain_core-0.4.0.dev0.dist-info → langchain_core-1.0.0.dist-info}/WHEEL +1 -1
- langchain_core/beta/__init__.py +0 -1
- langchain_core/beta/runnables/__init__.py +0 -1
- langchain_core/beta/runnables/context.py +0 -448
- langchain_core/memory.py +0 -116
- langchain_core/messages/content_blocks.py +0 -1435
- langchain_core/prompts/pipeline.py +0 -133
- langchain_core/pydantic_v1/__init__.py +0 -30
- langchain_core/pydantic_v1/dataclasses.py +0 -23
- langchain_core/pydantic_v1/main.py +0 -23
- langchain_core/tracers/langchain_v1.py +0 -23
- langchain_core/utils/loading.py +0 -31
- langchain_core/v1/__init__.py +0 -1
- langchain_core/v1/chat_models.py +0 -1047
- langchain_core/v1/messages.py +0 -755
- langchain_core-0.4.0.dev0.dist-info/METADATA +0 -108
- langchain_core-0.4.0.dev0.dist-info/RECORD +0 -177
- langchain_core-0.4.0.dev0.dist-info/entry_points.txt +0 -4
|
@@ -2,7 +2,7 @@
|
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
|
|
5
|
-
from typing import TYPE_CHECKING, Any
|
|
5
|
+
from typing import TYPE_CHECKING, Any
|
|
6
6
|
|
|
7
7
|
from typing_extensions import override
|
|
8
8
|
|
|
@@ -16,11 +16,11 @@ if TYPE_CHECKING:
|
|
|
16
16
|
class StdOutCallbackHandler(BaseCallbackHandler):
|
|
17
17
|
"""Callback Handler that prints to std out."""
|
|
18
18
|
|
|
19
|
-
def __init__(self, color:
|
|
19
|
+
def __init__(self, color: str | None = None) -> None:
|
|
20
20
|
"""Initialize callback handler.
|
|
21
21
|
|
|
22
22
|
Args:
|
|
23
|
-
color: The color to use for the text.
|
|
23
|
+
color: The color to use for the text.
|
|
24
24
|
"""
|
|
25
25
|
self.color = color
|
|
26
26
|
|
|
@@ -31,9 +31,9 @@ class StdOutCallbackHandler(BaseCallbackHandler):
|
|
|
31
31
|
"""Print out that we are entering a chain.
|
|
32
32
|
|
|
33
33
|
Args:
|
|
34
|
-
serialized
|
|
35
|
-
inputs
|
|
36
|
-
**kwargs
|
|
34
|
+
serialized: The serialized chain.
|
|
35
|
+
inputs: The inputs to the chain.
|
|
36
|
+
**kwargs: Additional keyword arguments.
|
|
37
37
|
"""
|
|
38
38
|
if "name" in kwargs:
|
|
39
39
|
name = kwargs["name"]
|
|
@@ -48,21 +48,21 @@ class StdOutCallbackHandler(BaseCallbackHandler):
|
|
|
48
48
|
"""Print out that we finished a chain.
|
|
49
49
|
|
|
50
50
|
Args:
|
|
51
|
-
outputs
|
|
52
|
-
**kwargs
|
|
51
|
+
outputs: The outputs of the chain.
|
|
52
|
+
**kwargs: Additional keyword arguments.
|
|
53
53
|
"""
|
|
54
54
|
print("\n\033[1m> Finished chain.\033[0m") # noqa: T201
|
|
55
55
|
|
|
56
56
|
@override
|
|
57
57
|
def on_agent_action(
|
|
58
|
-
self, action: AgentAction, color:
|
|
58
|
+
self, action: AgentAction, color: str | None = None, **kwargs: Any
|
|
59
59
|
) -> Any:
|
|
60
60
|
"""Run on agent action.
|
|
61
61
|
|
|
62
62
|
Args:
|
|
63
|
-
action
|
|
64
|
-
color
|
|
65
|
-
**kwargs
|
|
63
|
+
action: The agent action.
|
|
64
|
+
color: The color to use for the text.
|
|
65
|
+
**kwargs: Additional keyword arguments.
|
|
66
66
|
"""
|
|
67
67
|
print_text(action.log, color=color or self.color)
|
|
68
68
|
|
|
@@ -70,20 +70,19 @@ class StdOutCallbackHandler(BaseCallbackHandler):
|
|
|
70
70
|
def on_tool_end(
|
|
71
71
|
self,
|
|
72
72
|
output: Any,
|
|
73
|
-
color:
|
|
74
|
-
observation_prefix:
|
|
75
|
-
llm_prefix:
|
|
73
|
+
color: str | None = None,
|
|
74
|
+
observation_prefix: str | None = None,
|
|
75
|
+
llm_prefix: str | None = None,
|
|
76
76
|
**kwargs: Any,
|
|
77
77
|
) -> None:
|
|
78
78
|
"""If not the final action, print out observation.
|
|
79
79
|
|
|
80
80
|
Args:
|
|
81
|
-
output
|
|
82
|
-
color
|
|
83
|
-
observation_prefix
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
**kwargs (Any): Additional keyword arguments.
|
|
81
|
+
output: The output to print.
|
|
82
|
+
color: The color to use for the text.
|
|
83
|
+
observation_prefix: The observation prefix.
|
|
84
|
+
llm_prefix: The LLM prefix.
|
|
85
|
+
**kwargs: Additional keyword arguments.
|
|
87
86
|
"""
|
|
88
87
|
output = str(output)
|
|
89
88
|
if observation_prefix is not None:
|
|
@@ -96,29 +95,29 @@ class StdOutCallbackHandler(BaseCallbackHandler):
|
|
|
96
95
|
def on_text(
|
|
97
96
|
self,
|
|
98
97
|
text: str,
|
|
99
|
-
color:
|
|
98
|
+
color: str | None = None,
|
|
100
99
|
end: str = "",
|
|
101
100
|
**kwargs: Any,
|
|
102
101
|
) -> None:
|
|
103
102
|
"""Run when the agent ends.
|
|
104
103
|
|
|
105
104
|
Args:
|
|
106
|
-
text
|
|
107
|
-
color
|
|
108
|
-
end
|
|
109
|
-
**kwargs
|
|
105
|
+
text: The text to print.
|
|
106
|
+
color: The color to use for the text.
|
|
107
|
+
end: The end character to use.
|
|
108
|
+
**kwargs: Additional keyword arguments.
|
|
110
109
|
"""
|
|
111
110
|
print_text(text, color=color or self.color, end=end)
|
|
112
111
|
|
|
113
112
|
@override
|
|
114
113
|
def on_agent_finish(
|
|
115
|
-
self, finish: AgentFinish, color:
|
|
114
|
+
self, finish: AgentFinish, color: str | None = None, **kwargs: Any
|
|
116
115
|
) -> None:
|
|
117
116
|
"""Run on the agent end.
|
|
118
117
|
|
|
119
118
|
Args:
|
|
120
|
-
finish
|
|
121
|
-
color
|
|
122
|
-
**kwargs
|
|
119
|
+
finish: The agent finish.
|
|
120
|
+
color: The color to use for the text.
|
|
121
|
+
**kwargs: Additional keyword arguments.
|
|
123
122
|
"""
|
|
124
123
|
print_text(finish.log, color=color or self.color, end="\n")
|
|
@@ -3,7 +3,7 @@
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
|
|
5
5
|
import sys
|
|
6
|
-
from typing import TYPE_CHECKING, Any
|
|
6
|
+
from typing import TYPE_CHECKING, Any
|
|
7
7
|
|
|
8
8
|
from typing_extensions import override
|
|
9
9
|
|
|
@@ -13,7 +13,6 @@ if TYPE_CHECKING:
|
|
|
13
13
|
from langchain_core.agents import AgentAction, AgentFinish
|
|
14
14
|
from langchain_core.messages import BaseMessage
|
|
15
15
|
from langchain_core.outputs import LLMResult
|
|
16
|
-
from langchain_core.v1.messages import AIMessage, MessageV1
|
|
17
16
|
|
|
18
17
|
|
|
19
18
|
class StreamingStdOutCallbackHandler(BaseCallbackHandler):
|
|
@@ -25,23 +24,23 @@ class StreamingStdOutCallbackHandler(BaseCallbackHandler):
|
|
|
25
24
|
"""Run when LLM starts running.
|
|
26
25
|
|
|
27
26
|
Args:
|
|
28
|
-
serialized
|
|
29
|
-
prompts
|
|
30
|
-
**kwargs
|
|
27
|
+
serialized: The serialized LLM.
|
|
28
|
+
prompts: The prompts to run.
|
|
29
|
+
**kwargs: Additional keyword arguments.
|
|
31
30
|
"""
|
|
32
31
|
|
|
33
32
|
def on_chat_model_start(
|
|
34
33
|
self,
|
|
35
34
|
serialized: dict[str, Any],
|
|
36
|
-
messages:
|
|
35
|
+
messages: list[list[BaseMessage]],
|
|
37
36
|
**kwargs: Any,
|
|
38
37
|
) -> None:
|
|
39
38
|
"""Run when LLM starts running.
|
|
40
39
|
|
|
41
40
|
Args:
|
|
42
|
-
serialized
|
|
43
|
-
messages
|
|
44
|
-
**kwargs
|
|
41
|
+
serialized: The serialized LLM.
|
|
42
|
+
messages: The messages to run.
|
|
43
|
+
**kwargs: Additional keyword arguments.
|
|
45
44
|
"""
|
|
46
45
|
|
|
47
46
|
@override
|
|
@@ -49,26 +48,26 @@ class StreamingStdOutCallbackHandler(BaseCallbackHandler):
|
|
|
49
48
|
"""Run on new LLM token. Only available when streaming is enabled.
|
|
50
49
|
|
|
51
50
|
Args:
|
|
52
|
-
token
|
|
53
|
-
**kwargs
|
|
51
|
+
token: The new token.
|
|
52
|
+
**kwargs: Additional keyword arguments.
|
|
54
53
|
"""
|
|
55
54
|
sys.stdout.write(token)
|
|
56
55
|
sys.stdout.flush()
|
|
57
56
|
|
|
58
|
-
def on_llm_end(self, response:
|
|
57
|
+
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
|
|
59
58
|
"""Run when LLM ends running.
|
|
60
59
|
|
|
61
60
|
Args:
|
|
62
|
-
response
|
|
63
|
-
**kwargs
|
|
61
|
+
response: The response from the LLM.
|
|
62
|
+
**kwargs: Additional keyword arguments.
|
|
64
63
|
"""
|
|
65
64
|
|
|
66
65
|
def on_llm_error(self, error: BaseException, **kwargs: Any) -> None:
|
|
67
66
|
"""Run when LLM errors.
|
|
68
67
|
|
|
69
68
|
Args:
|
|
70
|
-
error
|
|
71
|
-
**kwargs
|
|
69
|
+
error: The error that occurred.
|
|
70
|
+
**kwargs: Additional keyword arguments.
|
|
72
71
|
"""
|
|
73
72
|
|
|
74
73
|
def on_chain_start(
|
|
@@ -77,25 +76,25 @@ class StreamingStdOutCallbackHandler(BaseCallbackHandler):
|
|
|
77
76
|
"""Run when a chain starts running.
|
|
78
77
|
|
|
79
78
|
Args:
|
|
80
|
-
serialized
|
|
81
|
-
inputs
|
|
82
|
-
**kwargs
|
|
79
|
+
serialized: The serialized chain.
|
|
80
|
+
inputs: The inputs to the chain.
|
|
81
|
+
**kwargs: Additional keyword arguments.
|
|
83
82
|
"""
|
|
84
83
|
|
|
85
84
|
def on_chain_end(self, outputs: dict[str, Any], **kwargs: Any) -> None:
|
|
86
85
|
"""Run when a chain ends running.
|
|
87
86
|
|
|
88
87
|
Args:
|
|
89
|
-
outputs
|
|
90
|
-
**kwargs
|
|
88
|
+
outputs: The outputs of the chain.
|
|
89
|
+
**kwargs: Additional keyword arguments.
|
|
91
90
|
"""
|
|
92
91
|
|
|
93
92
|
def on_chain_error(self, error: BaseException, **kwargs: Any) -> None:
|
|
94
93
|
"""Run when chain errors.
|
|
95
94
|
|
|
96
95
|
Args:
|
|
97
|
-
error
|
|
98
|
-
**kwargs
|
|
96
|
+
error: The error that occurred.
|
|
97
|
+
**kwargs: Additional keyword arguments.
|
|
99
98
|
"""
|
|
100
99
|
|
|
101
100
|
def on_tool_start(
|
|
@@ -104,47 +103,47 @@ class StreamingStdOutCallbackHandler(BaseCallbackHandler):
|
|
|
104
103
|
"""Run when the tool starts running.
|
|
105
104
|
|
|
106
105
|
Args:
|
|
107
|
-
serialized
|
|
108
|
-
input_str
|
|
109
|
-
**kwargs
|
|
106
|
+
serialized: The serialized tool.
|
|
107
|
+
input_str: The input string.
|
|
108
|
+
**kwargs: Additional keyword arguments.
|
|
110
109
|
"""
|
|
111
110
|
|
|
112
111
|
def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
|
|
113
112
|
"""Run on agent action.
|
|
114
113
|
|
|
115
114
|
Args:
|
|
116
|
-
action
|
|
117
|
-
**kwargs
|
|
115
|
+
action: The agent action.
|
|
116
|
+
**kwargs: Additional keyword arguments.
|
|
118
117
|
"""
|
|
119
118
|
|
|
120
119
|
def on_tool_end(self, output: Any, **kwargs: Any) -> None:
|
|
121
120
|
"""Run when tool ends running.
|
|
122
121
|
|
|
123
122
|
Args:
|
|
124
|
-
output
|
|
125
|
-
**kwargs
|
|
123
|
+
output: The output of the tool.
|
|
124
|
+
**kwargs: Additional keyword arguments.
|
|
126
125
|
"""
|
|
127
126
|
|
|
128
127
|
def on_tool_error(self, error: BaseException, **kwargs: Any) -> None:
|
|
129
128
|
"""Run when tool errors.
|
|
130
129
|
|
|
131
130
|
Args:
|
|
132
|
-
error
|
|
133
|
-
**kwargs
|
|
131
|
+
error: The error that occurred.
|
|
132
|
+
**kwargs: Additional keyword arguments.
|
|
134
133
|
"""
|
|
135
134
|
|
|
136
135
|
def on_text(self, text: str, **kwargs: Any) -> None:
|
|
137
136
|
"""Run on an arbitrary text.
|
|
138
137
|
|
|
139
138
|
Args:
|
|
140
|
-
text
|
|
141
|
-
**kwargs
|
|
139
|
+
text: The text to print.
|
|
140
|
+
**kwargs: Additional keyword arguments.
|
|
142
141
|
"""
|
|
143
142
|
|
|
144
143
|
def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None:
|
|
145
144
|
"""Run on the agent end.
|
|
146
145
|
|
|
147
146
|
Args:
|
|
148
|
-
finish
|
|
149
|
-
**kwargs
|
|
147
|
+
finish: The agent finish.
|
|
148
|
+
**kwargs: Additional keyword arguments.
|
|
150
149
|
"""
|
|
@@ -4,48 +4,46 @@ import threading
|
|
|
4
4
|
from collections.abc import Generator
|
|
5
5
|
from contextlib import contextmanager
|
|
6
6
|
from contextvars import ContextVar
|
|
7
|
-
from typing import Any
|
|
7
|
+
from typing import Any
|
|
8
8
|
|
|
9
9
|
from typing_extensions import override
|
|
10
10
|
|
|
11
11
|
from langchain_core.callbacks import BaseCallbackHandler
|
|
12
12
|
from langchain_core.messages import AIMessage
|
|
13
13
|
from langchain_core.messages.ai import UsageMetadata, add_usage
|
|
14
|
-
from langchain_core.messages.utils import convert_from_v1_message
|
|
15
14
|
from langchain_core.outputs import ChatGeneration, LLMResult
|
|
16
|
-
from langchain_core.
|
|
15
|
+
from langchain_core.tracers.context import register_configure_hook
|
|
17
16
|
|
|
18
17
|
|
|
19
18
|
class UsageMetadataCallbackHandler(BaseCallbackHandler):
|
|
20
19
|
"""Callback Handler that tracks AIMessage.usage_metadata.
|
|
21
20
|
|
|
22
21
|
Example:
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
.. versionadded:: 0.3.49
|
|
22
|
+
```python
|
|
23
|
+
from langchain.chat_models import init_chat_model
|
|
24
|
+
from langchain_core.callbacks import UsageMetadataCallbackHandler
|
|
25
|
+
|
|
26
|
+
llm_1 = init_chat_model(model="openai:gpt-4o-mini")
|
|
27
|
+
llm_2 = init_chat_model(model="anthropic:claude-3-5-haiku-latest")
|
|
28
|
+
|
|
29
|
+
callback = UsageMetadataCallbackHandler()
|
|
30
|
+
result_1 = llm_1.invoke("Hello", config={"callbacks": [callback]})
|
|
31
|
+
result_2 = llm_2.invoke("Hello", config={"callbacks": [callback]})
|
|
32
|
+
callback.usage_metadata
|
|
33
|
+
```
|
|
34
|
+
```txt
|
|
35
|
+
{'gpt-4o-mini-2024-07-18': {'input_tokens': 8,
|
|
36
|
+
'output_tokens': 10,
|
|
37
|
+
'total_tokens': 18,
|
|
38
|
+
'input_token_details': {'audio': 0, 'cache_read': 0},
|
|
39
|
+
'output_token_details': {'audio': 0, 'reasoning': 0}},
|
|
40
|
+
'claude-3-5-haiku-20241022': {'input_tokens': 8,
|
|
41
|
+
'output_tokens': 21,
|
|
42
|
+
'total_tokens': 29,
|
|
43
|
+
'input_token_details': {'cache_read': 0, 'cache_creation': 0}}}
|
|
44
|
+
```
|
|
45
|
+
|
|
46
|
+
!!! version-added "Added in version 0.3.49"
|
|
49
47
|
|
|
50
48
|
"""
|
|
51
49
|
|
|
@@ -60,17 +58,9 @@ class UsageMetadataCallbackHandler(BaseCallbackHandler):
|
|
|
60
58
|
return str(self.usage_metadata)
|
|
61
59
|
|
|
62
60
|
@override
|
|
63
|
-
def on_llm_end(
|
|
64
|
-
self, response: Union[LLMResult, AIMessageV1], **kwargs: Any
|
|
65
|
-
) -> None:
|
|
61
|
+
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
|
|
66
62
|
"""Collect token usage."""
|
|
67
63
|
# Check for usage_metadata (langchain-core >= 0.2.2)
|
|
68
|
-
if isinstance(response, AIMessageV1):
|
|
69
|
-
response = LLMResult(
|
|
70
|
-
generations=[
|
|
71
|
-
[ChatGeneration(message=convert_from_v1_message(response))]
|
|
72
|
-
]
|
|
73
|
-
)
|
|
74
64
|
try:
|
|
75
65
|
generation = response.generations[0][0]
|
|
76
66
|
except IndexError:
|
|
@@ -105,44 +95,49 @@ def get_usage_metadata_callback(
|
|
|
105
95
|
"""Get usage metadata callback.
|
|
106
96
|
|
|
107
97
|
Get context manager for tracking usage metadata across chat model calls using
|
|
108
|
-
|
|
98
|
+
`AIMessage.usage_metadata`.
|
|
109
99
|
|
|
110
100
|
Args:
|
|
111
|
-
name
|
|
112
|
-
``'usage_metadata_callback'``.
|
|
113
|
-
|
|
114
|
-
Example:
|
|
115
|
-
.. code-block:: python
|
|
116
|
-
|
|
117
|
-
from langchain.chat_models import init_chat_model
|
|
118
|
-
from langchain_core.callbacks import get_usage_metadata_callback
|
|
119
|
-
|
|
120
|
-
llm_1 = init_chat_model(model="openai:gpt-4o-mini")
|
|
121
|
-
llm_2 = init_chat_model(model="anthropic:claude-3-5-haiku-latest")
|
|
122
|
-
|
|
123
|
-
with get_usage_metadata_callback() as cb:
|
|
124
|
-
llm_1.invoke("Hello")
|
|
125
|
-
llm_2.invoke("Hello")
|
|
126
|
-
print(cb.usage_metadata)
|
|
101
|
+
name: The name of the context variable.
|
|
127
102
|
|
|
128
|
-
|
|
103
|
+
Yields:
|
|
104
|
+
The usage metadata callback.
|
|
129
105
|
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
106
|
+
Example:
|
|
107
|
+
```python
|
|
108
|
+
from langchain.chat_models import init_chat_model
|
|
109
|
+
from langchain_core.callbacks import get_usage_metadata_callback
|
|
110
|
+
|
|
111
|
+
llm_1 = init_chat_model(model="openai:gpt-4o-mini")
|
|
112
|
+
llm_2 = init_chat_model(model="anthropic:claude-3-5-haiku-latest")
|
|
113
|
+
|
|
114
|
+
with get_usage_metadata_callback() as cb:
|
|
115
|
+
llm_1.invoke("Hello")
|
|
116
|
+
llm_2.invoke("Hello")
|
|
117
|
+
print(cb.usage_metadata)
|
|
118
|
+
```
|
|
119
|
+
```txt
|
|
120
|
+
{
|
|
121
|
+
"gpt-4o-mini-2024-07-18": {
|
|
122
|
+
"input_tokens": 8,
|
|
123
|
+
"output_tokens": 10,
|
|
124
|
+
"total_tokens": 18,
|
|
125
|
+
"input_token_details": {"audio": 0, "cache_read": 0},
|
|
126
|
+
"output_token_details": {"audio": 0, "reasoning": 0},
|
|
127
|
+
},
|
|
128
|
+
"claude-3-5-haiku-20241022": {
|
|
129
|
+
"input_tokens": 8,
|
|
130
|
+
"output_tokens": 21,
|
|
131
|
+
"total_tokens": 29,
|
|
132
|
+
"input_token_details": {"cache_read": 0, "cache_creation": 0},
|
|
133
|
+
},
|
|
134
|
+
}
|
|
135
|
+
```
|
|
136
|
+
|
|
137
|
+
!!! version-added "Added in version 0.3.49"
|
|
141
138
|
|
|
142
139
|
"""
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
usage_metadata_callback_var: ContextVar[Optional[UsageMetadataCallbackHandler]] = (
|
|
140
|
+
usage_metadata_callback_var: ContextVar[UsageMetadataCallbackHandler | None] = (
|
|
146
141
|
ContextVar(name, default=None)
|
|
147
142
|
)
|
|
148
143
|
register_configure_hook(usage_metadata_callback_var, inheritable=True)
|
langchain_core/chat_history.py
CHANGED
|
@@ -1,23 +1,9 @@
|
|
|
1
|
-
"""**Chat message history** stores a history of the message interactions in a chat.
|
|
2
|
-
|
|
3
|
-
**Class hierarchy:**
|
|
4
|
-
|
|
5
|
-
.. code-block::
|
|
6
|
-
|
|
7
|
-
BaseChatMessageHistory --> <name>ChatMessageHistory # Examples: FileChatMessageHistory, PostgresChatMessageHistory
|
|
8
|
-
|
|
9
|
-
**Main helpers:**
|
|
10
|
-
|
|
11
|
-
.. code-block::
|
|
12
|
-
|
|
13
|
-
AIMessage, HumanMessage, BaseMessage
|
|
14
|
-
|
|
15
|
-
""" # noqa: E501
|
|
1
|
+
"""**Chat message history** stores a history of the message interactions in a chat."""
|
|
16
2
|
|
|
17
3
|
from __future__ import annotations
|
|
18
4
|
|
|
19
5
|
from abc import ABC, abstractmethod
|
|
20
|
-
from typing import TYPE_CHECKING
|
|
6
|
+
from typing import TYPE_CHECKING
|
|
21
7
|
|
|
22
8
|
from pydantic import BaseModel, Field
|
|
23
9
|
|
|
@@ -27,6 +13,7 @@ from langchain_core.messages import (
|
|
|
27
13
|
HumanMessage,
|
|
28
14
|
get_buffer_string,
|
|
29
15
|
)
|
|
16
|
+
from langchain_core.runnables.config import run_in_executor
|
|
30
17
|
|
|
31
18
|
if TYPE_CHECKING:
|
|
32
19
|
from collections.abc import Sequence
|
|
@@ -62,36 +49,45 @@ class BaseChatMessageHistory(ABC):
|
|
|
62
49
|
|
|
63
50
|
Example: Shows a default implementation.
|
|
64
51
|
|
|
65
|
-
|
|
52
|
+
```python
|
|
53
|
+
import json
|
|
54
|
+
import os
|
|
55
|
+
from langchain_core.messages import messages_from_dict, message_to_dict
|
|
56
|
+
|
|
66
57
|
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
58
|
+
class FileChatMessageHistory(BaseChatMessageHistory):
|
|
59
|
+
storage_path: str
|
|
60
|
+
session_id: str
|
|
70
61
|
|
|
71
|
-
|
|
72
|
-
|
|
62
|
+
@property
|
|
63
|
+
def messages(self) -> list[BaseMessage]:
|
|
64
|
+
try:
|
|
73
65
|
with open(
|
|
74
|
-
os.path.join(storage_path, session_id),
|
|
66
|
+
os.path.join(self.storage_path, self.session_id),
|
|
75
67
|
"r",
|
|
76
68
|
encoding="utf-8",
|
|
77
69
|
) as f:
|
|
78
|
-
|
|
79
|
-
return messages_from_dict(
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
70
|
+
messages_data = json.load(f)
|
|
71
|
+
return messages_from_dict(messages_data)
|
|
72
|
+
except FileNotFoundError:
|
|
73
|
+
return []
|
|
74
|
+
|
|
75
|
+
def add_messages(self, messages: Sequence[BaseMessage]) -> None:
|
|
76
|
+
all_messages = list(self.messages) # Existing messages
|
|
77
|
+
all_messages.extend(messages) # Add new messages
|
|
78
|
+
|
|
79
|
+
serialized = [message_to_dict(message) for message in all_messages]
|
|
80
|
+
file_path = os.path.join(self.storage_path, self.session_id)
|
|
81
|
+
os.makedirs(os.path.dirname(file_path), exist_ok=True)
|
|
82
|
+
with open(file_path, "w", encoding="utf-8") as f:
|
|
83
|
+
json.dump(serialized, f)
|
|
84
|
+
|
|
85
|
+
def clear(self) -> None:
|
|
86
|
+
file_path = os.path.join(self.storage_path, self.session_id)
|
|
87
|
+
os.makedirs(os.path.dirname(file_path), exist_ok=True)
|
|
88
|
+
with open(file_path, "w", encoding="utf-8") as f:
|
|
89
|
+
json.dump([], f)
|
|
90
|
+
```
|
|
95
91
|
"""
|
|
96
92
|
|
|
97
93
|
messages: list[BaseMessage]
|
|
@@ -109,17 +105,18 @@ class BaseChatMessageHistory(ABC):
|
|
|
109
105
|
|
|
110
106
|
In general, fetching messages may involve IO to the underlying
|
|
111
107
|
persistence layer.
|
|
112
|
-
"""
|
|
113
|
-
from langchain_core.runnables.config import run_in_executor
|
|
114
108
|
|
|
109
|
+
Returns:
|
|
110
|
+
The messages.
|
|
111
|
+
"""
|
|
115
112
|
return await run_in_executor(None, lambda: self.messages)
|
|
116
113
|
|
|
117
|
-
def add_user_message(self, message:
|
|
114
|
+
def add_user_message(self, message: HumanMessage | str) -> None:
|
|
118
115
|
"""Convenience method for adding a human message string to the store.
|
|
119
116
|
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
117
|
+
!!! note
|
|
118
|
+
This is a convenience method. Code should favor the bulk `add_messages`
|
|
119
|
+
interface instead to save on round-trips to the persistence layer.
|
|
123
120
|
|
|
124
121
|
This method may be deprecated in a future release.
|
|
125
122
|
|
|
@@ -131,12 +128,12 @@ class BaseChatMessageHistory(ABC):
|
|
|
131
128
|
else:
|
|
132
129
|
self.add_message(HumanMessage(content=message))
|
|
133
130
|
|
|
134
|
-
def add_ai_message(self, message:
|
|
131
|
+
def add_ai_message(self, message: AIMessage | str) -> None:
|
|
135
132
|
"""Convenience method for adding an AI message string to the store.
|
|
136
133
|
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
134
|
+
!!! note
|
|
135
|
+
This is a convenience method. Code should favor the bulk `add_messages`
|
|
136
|
+
interface instead to save on round-trips to the persistence layer.
|
|
140
137
|
|
|
141
138
|
This method may be deprecated in a future release.
|
|
142
139
|
|
|
@@ -156,7 +153,7 @@ class BaseChatMessageHistory(ABC):
|
|
|
156
153
|
|
|
157
154
|
Raises:
|
|
158
155
|
NotImplementedError: If the sub-class has not implemented an efficient
|
|
159
|
-
add_messages method.
|
|
156
|
+
`add_messages` method.
|
|
160
157
|
"""
|
|
161
158
|
if type(self).add_messages != BaseChatMessageHistory.add_messages:
|
|
162
159
|
# This means that the sub-class has implemented an efficient add_messages
|
|
@@ -187,8 +184,6 @@ class BaseChatMessageHistory(ABC):
|
|
|
187
184
|
Args:
|
|
188
185
|
messages: A sequence of BaseMessage objects to store.
|
|
189
186
|
"""
|
|
190
|
-
from langchain_core.runnables.config import run_in_executor
|
|
191
|
-
|
|
192
187
|
await run_in_executor(None, self.add_messages, messages)
|
|
193
188
|
|
|
194
189
|
@abstractmethod
|
|
@@ -197,8 +192,6 @@ class BaseChatMessageHistory(ABC):
|
|
|
197
192
|
|
|
198
193
|
async def aclear(self) -> None:
|
|
199
194
|
"""Async remove all messages from the store."""
|
|
200
|
-
from langchain_core.runnables.config import run_in_executor
|
|
201
|
-
|
|
202
195
|
await run_in_executor(None, self.clear)
|
|
203
196
|
|
|
204
197
|
def __str__(self) -> str:
|