langchain-core 1.0.0a7__py3-none-any.whl → 1.0.0rc1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of langchain-core might be problematic. Click here for more details.
- langchain_core/__init__.py +1 -1
- langchain_core/_api/__init__.py +0 -1
- langchain_core/_api/beta_decorator.py +17 -20
- langchain_core/_api/deprecation.py +30 -35
- langchain_core/_import_utils.py +1 -1
- langchain_core/agents.py +7 -6
- langchain_core/caches.py +4 -10
- langchain_core/callbacks/__init__.py +1 -8
- langchain_core/callbacks/base.py +232 -243
- langchain_core/callbacks/file.py +33 -33
- langchain_core/callbacks/manager.py +353 -416
- langchain_core/callbacks/stdout.py +21 -22
- langchain_core/callbacks/streaming_stdout.py +32 -32
- langchain_core/callbacks/usage.py +54 -51
- langchain_core/chat_history.py +76 -55
- langchain_core/document_loaders/langsmith.py +21 -21
- langchain_core/documents/__init__.py +0 -1
- langchain_core/documents/base.py +37 -40
- langchain_core/documents/transformers.py +28 -29
- langchain_core/embeddings/fake.py +46 -52
- langchain_core/exceptions.py +5 -5
- langchain_core/indexing/api.py +11 -11
- langchain_core/indexing/base.py +24 -24
- langchain_core/language_models/__init__.py +0 -2
- langchain_core/language_models/_utils.py +51 -53
- langchain_core/language_models/base.py +23 -24
- langchain_core/language_models/chat_models.py +121 -144
- langchain_core/language_models/fake_chat_models.py +5 -5
- langchain_core/language_models/llms.py +10 -12
- langchain_core/load/dump.py +1 -1
- langchain_core/load/load.py +16 -16
- langchain_core/load/serializable.py +35 -34
- langchain_core/messages/__init__.py +1 -16
- langchain_core/messages/ai.py +105 -104
- langchain_core/messages/base.py +26 -26
- langchain_core/messages/block_translators/__init__.py +17 -17
- langchain_core/messages/block_translators/anthropic.py +2 -2
- langchain_core/messages/block_translators/bedrock_converse.py +2 -2
- langchain_core/messages/block_translators/google_genai.py +2 -2
- langchain_core/messages/block_translators/groq.py +117 -21
- langchain_core/messages/block_translators/langchain_v0.py +2 -2
- langchain_core/messages/block_translators/openai.py +4 -4
- langchain_core/messages/chat.py +1 -1
- langchain_core/messages/content.py +189 -193
- langchain_core/messages/function.py +5 -5
- langchain_core/messages/human.py +15 -17
- langchain_core/messages/modifier.py +1 -1
- langchain_core/messages/system.py +12 -14
- langchain_core/messages/tool.py +45 -49
- langchain_core/messages/utils.py +384 -396
- langchain_core/output_parsers/__init__.py +1 -14
- langchain_core/output_parsers/base.py +22 -23
- langchain_core/output_parsers/json.py +3 -3
- langchain_core/output_parsers/list.py +1 -1
- langchain_core/output_parsers/openai_functions.py +46 -44
- langchain_core/output_parsers/openai_tools.py +7 -7
- langchain_core/output_parsers/pydantic.py +10 -11
- langchain_core/output_parsers/string.py +1 -1
- langchain_core/output_parsers/transform.py +2 -2
- langchain_core/output_parsers/xml.py +1 -1
- langchain_core/outputs/__init__.py +1 -1
- langchain_core/outputs/chat_generation.py +14 -14
- langchain_core/outputs/generation.py +5 -5
- langchain_core/outputs/llm_result.py +5 -5
- langchain_core/prompt_values.py +5 -5
- langchain_core/prompts/__init__.py +3 -23
- langchain_core/prompts/base.py +32 -37
- langchain_core/prompts/chat.py +216 -222
- langchain_core/prompts/dict.py +2 -2
- langchain_core/prompts/few_shot.py +76 -83
- langchain_core/prompts/few_shot_with_templates.py +6 -8
- langchain_core/prompts/image.py +11 -13
- langchain_core/prompts/loading.py +1 -1
- langchain_core/prompts/message.py +2 -2
- langchain_core/prompts/prompt.py +14 -16
- langchain_core/prompts/string.py +19 -7
- langchain_core/prompts/structured.py +24 -25
- langchain_core/rate_limiters.py +36 -38
- langchain_core/retrievers.py +41 -182
- langchain_core/runnables/base.py +565 -590
- langchain_core/runnables/branch.py +7 -7
- langchain_core/runnables/config.py +37 -44
- langchain_core/runnables/configurable.py +8 -9
- langchain_core/runnables/fallbacks.py +8 -8
- langchain_core/runnables/graph.py +28 -27
- langchain_core/runnables/graph_ascii.py +19 -18
- langchain_core/runnables/graph_mermaid.py +20 -31
- langchain_core/runnables/graph_png.py +7 -7
- langchain_core/runnables/history.py +20 -20
- langchain_core/runnables/passthrough.py +8 -8
- langchain_core/runnables/retry.py +3 -3
- langchain_core/runnables/router.py +1 -1
- langchain_core/runnables/schema.py +33 -33
- langchain_core/runnables/utils.py +30 -34
- langchain_core/stores.py +72 -102
- langchain_core/sys_info.py +27 -29
- langchain_core/tools/__init__.py +1 -14
- langchain_core/tools/base.py +63 -63
- langchain_core/tools/convert.py +92 -92
- langchain_core/tools/render.py +9 -9
- langchain_core/tools/retriever.py +1 -1
- langchain_core/tools/simple.py +6 -7
- langchain_core/tools/structured.py +17 -18
- langchain_core/tracers/__init__.py +1 -9
- langchain_core/tracers/base.py +35 -35
- langchain_core/tracers/context.py +12 -17
- langchain_core/tracers/event_stream.py +3 -3
- langchain_core/tracers/langchain.py +8 -8
- langchain_core/tracers/log_stream.py +17 -18
- langchain_core/tracers/memory_stream.py +2 -2
- langchain_core/tracers/schemas.py +0 -129
- langchain_core/utils/aiter.py +31 -31
- langchain_core/utils/env.py +5 -5
- langchain_core/utils/function_calling.py +48 -120
- langchain_core/utils/html.py +4 -4
- langchain_core/utils/input.py +2 -2
- langchain_core/utils/interactive_env.py +1 -1
- langchain_core/utils/iter.py +19 -19
- langchain_core/utils/json.py +1 -1
- langchain_core/utils/json_schema.py +2 -2
- langchain_core/utils/mustache.py +5 -5
- langchain_core/utils/pydantic.py +17 -17
- langchain_core/utils/strings.py +4 -4
- langchain_core/utils/utils.py +25 -28
- langchain_core/vectorstores/base.py +43 -64
- langchain_core/vectorstores/in_memory.py +83 -85
- langchain_core/version.py +1 -1
- {langchain_core-1.0.0a7.dist-info → langchain_core-1.0.0rc1.dist-info}/METADATA +23 -11
- langchain_core-1.0.0rc1.dist-info/RECORD +172 -0
- langchain_core/memory.py +0 -120
- langchain_core/pydantic_v1/__init__.py +0 -30
- langchain_core/pydantic_v1/dataclasses.py +0 -23
- langchain_core/pydantic_v1/main.py +0 -23
- langchain_core-1.0.0a7.dist-info/RECORD +0 -176
- {langchain_core-1.0.0a7.dist-info → langchain_core-1.0.0rc1.dist-info}/WHEEL +0 -0
|
@@ -20,7 +20,7 @@ class StdOutCallbackHandler(BaseCallbackHandler):
|
|
|
20
20
|
"""Initialize callback handler.
|
|
21
21
|
|
|
22
22
|
Args:
|
|
23
|
-
color: The color to use for the text.
|
|
23
|
+
color: The color to use for the text.
|
|
24
24
|
"""
|
|
25
25
|
self.color = color
|
|
26
26
|
|
|
@@ -31,9 +31,9 @@ class StdOutCallbackHandler(BaseCallbackHandler):
|
|
|
31
31
|
"""Print out that we are entering a chain.
|
|
32
32
|
|
|
33
33
|
Args:
|
|
34
|
-
serialized
|
|
35
|
-
inputs
|
|
36
|
-
**kwargs
|
|
34
|
+
serialized: The serialized chain.
|
|
35
|
+
inputs: The inputs to the chain.
|
|
36
|
+
**kwargs: Additional keyword arguments.
|
|
37
37
|
"""
|
|
38
38
|
if "name" in kwargs:
|
|
39
39
|
name = kwargs["name"]
|
|
@@ -48,8 +48,8 @@ class StdOutCallbackHandler(BaseCallbackHandler):
|
|
|
48
48
|
"""Print out that we finished a chain.
|
|
49
49
|
|
|
50
50
|
Args:
|
|
51
|
-
outputs
|
|
52
|
-
**kwargs
|
|
51
|
+
outputs: The outputs of the chain.
|
|
52
|
+
**kwargs: Additional keyword arguments.
|
|
53
53
|
"""
|
|
54
54
|
print("\n\033[1m> Finished chain.\033[0m") # noqa: T201
|
|
55
55
|
|
|
@@ -60,9 +60,9 @@ class StdOutCallbackHandler(BaseCallbackHandler):
|
|
|
60
60
|
"""Run on agent action.
|
|
61
61
|
|
|
62
62
|
Args:
|
|
63
|
-
action
|
|
64
|
-
color
|
|
65
|
-
**kwargs
|
|
63
|
+
action: The agent action.
|
|
64
|
+
color: The color to use for the text.
|
|
65
|
+
**kwargs: Additional keyword arguments.
|
|
66
66
|
"""
|
|
67
67
|
print_text(action.log, color=color or self.color)
|
|
68
68
|
|
|
@@ -78,12 +78,11 @@ class StdOutCallbackHandler(BaseCallbackHandler):
|
|
|
78
78
|
"""If not the final action, print out observation.
|
|
79
79
|
|
|
80
80
|
Args:
|
|
81
|
-
output
|
|
82
|
-
color
|
|
83
|
-
observation_prefix
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
**kwargs (Any): Additional keyword arguments.
|
|
81
|
+
output: The output to print.
|
|
82
|
+
color: The color to use for the text.
|
|
83
|
+
observation_prefix: The observation prefix.
|
|
84
|
+
llm_prefix: The LLM prefix.
|
|
85
|
+
**kwargs: Additional keyword arguments.
|
|
87
86
|
"""
|
|
88
87
|
output = str(output)
|
|
89
88
|
if observation_prefix is not None:
|
|
@@ -103,10 +102,10 @@ class StdOutCallbackHandler(BaseCallbackHandler):
|
|
|
103
102
|
"""Run when the agent ends.
|
|
104
103
|
|
|
105
104
|
Args:
|
|
106
|
-
text
|
|
107
|
-
color
|
|
108
|
-
end
|
|
109
|
-
**kwargs
|
|
105
|
+
text: The text to print.
|
|
106
|
+
color: The color to use for the text.
|
|
107
|
+
end: The end character to use. Defaults to "".
|
|
108
|
+
**kwargs: Additional keyword arguments.
|
|
110
109
|
"""
|
|
111
110
|
print_text(text, color=color or self.color, end=end)
|
|
112
111
|
|
|
@@ -117,8 +116,8 @@ class StdOutCallbackHandler(BaseCallbackHandler):
|
|
|
117
116
|
"""Run on the agent end.
|
|
118
117
|
|
|
119
118
|
Args:
|
|
120
|
-
finish
|
|
121
|
-
color
|
|
122
|
-
**kwargs
|
|
119
|
+
finish: The agent finish.
|
|
120
|
+
color: The color to use for the text.
|
|
121
|
+
**kwargs: Additional keyword arguments.
|
|
123
122
|
"""
|
|
124
123
|
print_text(finish.log, color=color or self.color, end="\n")
|
|
@@ -24,9 +24,9 @@ class StreamingStdOutCallbackHandler(BaseCallbackHandler):
|
|
|
24
24
|
"""Run when LLM starts running.
|
|
25
25
|
|
|
26
26
|
Args:
|
|
27
|
-
serialized
|
|
28
|
-
prompts
|
|
29
|
-
**kwargs
|
|
27
|
+
serialized: The serialized LLM.
|
|
28
|
+
prompts: The prompts to run.
|
|
29
|
+
**kwargs: Additional keyword arguments.
|
|
30
30
|
"""
|
|
31
31
|
|
|
32
32
|
def on_chat_model_start(
|
|
@@ -38,9 +38,9 @@ class StreamingStdOutCallbackHandler(BaseCallbackHandler):
|
|
|
38
38
|
"""Run when LLM starts running.
|
|
39
39
|
|
|
40
40
|
Args:
|
|
41
|
-
serialized
|
|
42
|
-
messages
|
|
43
|
-
**kwargs
|
|
41
|
+
serialized: The serialized LLM.
|
|
42
|
+
messages: The messages to run.
|
|
43
|
+
**kwargs: Additional keyword arguments.
|
|
44
44
|
"""
|
|
45
45
|
|
|
46
46
|
@override
|
|
@@ -48,8 +48,8 @@ class StreamingStdOutCallbackHandler(BaseCallbackHandler):
|
|
|
48
48
|
"""Run on new LLM token. Only available when streaming is enabled.
|
|
49
49
|
|
|
50
50
|
Args:
|
|
51
|
-
token
|
|
52
|
-
**kwargs
|
|
51
|
+
token: The new token.
|
|
52
|
+
**kwargs: Additional keyword arguments.
|
|
53
53
|
"""
|
|
54
54
|
sys.stdout.write(token)
|
|
55
55
|
sys.stdout.flush()
|
|
@@ -58,16 +58,16 @@ class StreamingStdOutCallbackHandler(BaseCallbackHandler):
|
|
|
58
58
|
"""Run when LLM ends running.
|
|
59
59
|
|
|
60
60
|
Args:
|
|
61
|
-
response
|
|
62
|
-
**kwargs
|
|
61
|
+
response: The response from the LLM.
|
|
62
|
+
**kwargs: Additional keyword arguments.
|
|
63
63
|
"""
|
|
64
64
|
|
|
65
65
|
def on_llm_error(self, error: BaseException, **kwargs: Any) -> None:
|
|
66
66
|
"""Run when LLM errors.
|
|
67
67
|
|
|
68
68
|
Args:
|
|
69
|
-
error
|
|
70
|
-
**kwargs
|
|
69
|
+
error: The error that occurred.
|
|
70
|
+
**kwargs: Additional keyword arguments.
|
|
71
71
|
"""
|
|
72
72
|
|
|
73
73
|
def on_chain_start(
|
|
@@ -76,25 +76,25 @@ class StreamingStdOutCallbackHandler(BaseCallbackHandler):
|
|
|
76
76
|
"""Run when a chain starts running.
|
|
77
77
|
|
|
78
78
|
Args:
|
|
79
|
-
serialized
|
|
80
|
-
inputs
|
|
81
|
-
**kwargs
|
|
79
|
+
serialized: The serialized chain.
|
|
80
|
+
inputs: The inputs to the chain.
|
|
81
|
+
**kwargs: Additional keyword arguments.
|
|
82
82
|
"""
|
|
83
83
|
|
|
84
84
|
def on_chain_end(self, outputs: dict[str, Any], **kwargs: Any) -> None:
|
|
85
85
|
"""Run when a chain ends running.
|
|
86
86
|
|
|
87
87
|
Args:
|
|
88
|
-
outputs
|
|
89
|
-
**kwargs
|
|
88
|
+
outputs: The outputs of the chain.
|
|
89
|
+
**kwargs: Additional keyword arguments.
|
|
90
90
|
"""
|
|
91
91
|
|
|
92
92
|
def on_chain_error(self, error: BaseException, **kwargs: Any) -> None:
|
|
93
93
|
"""Run when chain errors.
|
|
94
94
|
|
|
95
95
|
Args:
|
|
96
|
-
error
|
|
97
|
-
**kwargs
|
|
96
|
+
error: The error that occurred.
|
|
97
|
+
**kwargs: Additional keyword arguments.
|
|
98
98
|
"""
|
|
99
99
|
|
|
100
100
|
def on_tool_start(
|
|
@@ -103,47 +103,47 @@ class StreamingStdOutCallbackHandler(BaseCallbackHandler):
|
|
|
103
103
|
"""Run when the tool starts running.
|
|
104
104
|
|
|
105
105
|
Args:
|
|
106
|
-
serialized
|
|
107
|
-
input_str
|
|
108
|
-
**kwargs
|
|
106
|
+
serialized: The serialized tool.
|
|
107
|
+
input_str: The input string.
|
|
108
|
+
**kwargs: Additional keyword arguments.
|
|
109
109
|
"""
|
|
110
110
|
|
|
111
111
|
def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
|
|
112
112
|
"""Run on agent action.
|
|
113
113
|
|
|
114
114
|
Args:
|
|
115
|
-
action
|
|
116
|
-
**kwargs
|
|
115
|
+
action: The agent action.
|
|
116
|
+
**kwargs: Additional keyword arguments.
|
|
117
117
|
"""
|
|
118
118
|
|
|
119
119
|
def on_tool_end(self, output: Any, **kwargs: Any) -> None:
|
|
120
120
|
"""Run when tool ends running.
|
|
121
121
|
|
|
122
122
|
Args:
|
|
123
|
-
output
|
|
124
|
-
**kwargs
|
|
123
|
+
output: The output of the tool.
|
|
124
|
+
**kwargs: Additional keyword arguments.
|
|
125
125
|
"""
|
|
126
126
|
|
|
127
127
|
def on_tool_error(self, error: BaseException, **kwargs: Any) -> None:
|
|
128
128
|
"""Run when tool errors.
|
|
129
129
|
|
|
130
130
|
Args:
|
|
131
|
-
error
|
|
132
|
-
**kwargs
|
|
131
|
+
error: The error that occurred.
|
|
132
|
+
**kwargs: Additional keyword arguments.
|
|
133
133
|
"""
|
|
134
134
|
|
|
135
135
|
def on_text(self, text: str, **kwargs: Any) -> None:
|
|
136
136
|
"""Run on an arbitrary text.
|
|
137
137
|
|
|
138
138
|
Args:
|
|
139
|
-
text
|
|
140
|
-
**kwargs
|
|
139
|
+
text: The text to print.
|
|
140
|
+
**kwargs: Additional keyword arguments.
|
|
141
141
|
"""
|
|
142
142
|
|
|
143
143
|
def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None:
|
|
144
144
|
"""Run on the agent end.
|
|
145
145
|
|
|
146
146
|
Args:
|
|
147
|
-
finish
|
|
148
|
-
**kwargs
|
|
147
|
+
finish: The agent finish.
|
|
148
|
+
**kwargs: Additional keyword arguments.
|
|
149
149
|
"""
|
|
@@ -19,30 +19,29 @@ class UsageMetadataCallbackHandler(BaseCallbackHandler):
|
|
|
19
19
|
"""Callback Handler that tracks AIMessage.usage_metadata.
|
|
20
20
|
|
|
21
21
|
Example:
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
'input_token_details': {'cache_read': 0, 'cache_creation': 0}}}
|
|
22
|
+
```python
|
|
23
|
+
from langchain.chat_models import init_chat_model
|
|
24
|
+
from langchain_core.callbacks import UsageMetadataCallbackHandler
|
|
25
|
+
|
|
26
|
+
llm_1 = init_chat_model(model="openai:gpt-4o-mini")
|
|
27
|
+
llm_2 = init_chat_model(model="anthropic:claude-3-5-haiku-latest")
|
|
28
|
+
|
|
29
|
+
callback = UsageMetadataCallbackHandler()
|
|
30
|
+
result_1 = llm_1.invoke("Hello", config={"callbacks": [callback]})
|
|
31
|
+
result_2 = llm_2.invoke("Hello", config={"callbacks": [callback]})
|
|
32
|
+
callback.usage_metadata
|
|
33
|
+
```
|
|
34
|
+
```txt
|
|
35
|
+
{'gpt-4o-mini-2024-07-18': {'input_tokens': 8,
|
|
36
|
+
'output_tokens': 10,
|
|
37
|
+
'total_tokens': 18,
|
|
38
|
+
'input_token_details': {'audio': 0, 'cache_read': 0},
|
|
39
|
+
'output_token_details': {'audio': 0, 'reasoning': 0}},
|
|
40
|
+
'claude-3-5-haiku-20241022': {'input_tokens': 8,
|
|
41
|
+
'output_tokens': 21,
|
|
42
|
+
'total_tokens': 29,
|
|
43
|
+
'input_token_details': {'cache_read': 0, 'cache_creation': 0}}}
|
|
44
|
+
```
|
|
46
45
|
|
|
47
46
|
!!! version-added "Added in version 0.3.49"
|
|
48
47
|
|
|
@@ -96,40 +95,44 @@ def get_usage_metadata_callback(
|
|
|
96
95
|
"""Get usage metadata callback.
|
|
97
96
|
|
|
98
97
|
Get context manager for tracking usage metadata across chat model calls using
|
|
99
|
-
|
|
98
|
+
`AIMessage.usage_metadata`.
|
|
100
99
|
|
|
101
100
|
Args:
|
|
102
|
-
name
|
|
103
|
-
``'usage_metadata_callback'``.
|
|
101
|
+
name: The name of the context variable.
|
|
104
102
|
|
|
105
103
|
Yields:
|
|
106
104
|
The usage metadata callback.
|
|
107
105
|
|
|
108
106
|
Example:
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
107
|
+
```python
|
|
108
|
+
from langchain.chat_models import init_chat_model
|
|
109
|
+
from langchain_core.callbacks import get_usage_metadata_callback
|
|
110
|
+
|
|
111
|
+
llm_1 = init_chat_model(model="openai:gpt-4o-mini")
|
|
112
|
+
llm_2 = init_chat_model(model="anthropic:claude-3-5-haiku-latest")
|
|
113
|
+
|
|
114
|
+
with get_usage_metadata_callback() as cb:
|
|
115
|
+
llm_1.invoke("Hello")
|
|
116
|
+
llm_2.invoke("Hello")
|
|
117
|
+
print(cb.usage_metadata)
|
|
118
|
+
```
|
|
119
|
+
```txt
|
|
120
|
+
{
|
|
121
|
+
"gpt-4o-mini-2024-07-18": {
|
|
122
|
+
"input_tokens": 8,
|
|
123
|
+
"output_tokens": 10,
|
|
124
|
+
"total_tokens": 18,
|
|
125
|
+
"input_token_details": {"audio": 0, "cache_read": 0},
|
|
126
|
+
"output_token_details": {"audio": 0, "reasoning": 0},
|
|
127
|
+
},
|
|
128
|
+
"claude-3-5-haiku-20241022": {
|
|
129
|
+
"input_tokens": 8,
|
|
130
|
+
"output_tokens": 21,
|
|
131
|
+
"total_tokens": 29,
|
|
132
|
+
"input_token_details": {"cache_read": 0, "cache_creation": 0},
|
|
133
|
+
},
|
|
134
|
+
}
|
|
135
|
+
```
|
|
133
136
|
|
|
134
137
|
!!! version-added "Added in version 0.3.49"
|
|
135
138
|
|
langchain_core/chat_history.py
CHANGED
|
@@ -1,18 +1,4 @@
|
|
|
1
|
-
"""**Chat message history** stores a history of the message interactions in a chat.
|
|
2
|
-
|
|
3
|
-
**Class hierarchy:**
|
|
4
|
-
|
|
5
|
-
.. code-block::
|
|
6
|
-
|
|
7
|
-
BaseChatMessageHistory --> <name>ChatMessageHistory # Examples: FileChatMessageHistory, PostgresChatMessageHistory
|
|
8
|
-
|
|
9
|
-
**Main helpers:**
|
|
10
|
-
|
|
11
|
-
.. code-block::
|
|
12
|
-
|
|
13
|
-
AIMessage, HumanMessage, BaseMessage
|
|
14
|
-
|
|
15
|
-
""" # noqa: E501
|
|
1
|
+
"""**Chat message history** stores a history of the message interactions in a chat."""
|
|
16
2
|
|
|
17
3
|
from __future__ import annotations
|
|
18
4
|
|
|
@@ -22,7 +8,9 @@ from typing import TYPE_CHECKING
|
|
|
22
8
|
from pydantic import BaseModel, Field
|
|
23
9
|
|
|
24
10
|
from langchain_core.messages import (
|
|
11
|
+
AIMessage,
|
|
25
12
|
BaseMessage,
|
|
13
|
+
HumanMessage,
|
|
26
14
|
get_buffer_string,
|
|
27
15
|
)
|
|
28
16
|
from langchain_core.runnables.config import run_in_executor
|
|
@@ -61,46 +49,45 @@ class BaseChatMessageHistory(ABC):
|
|
|
61
49
|
|
|
62
50
|
Example: Shows a default implementation.
|
|
63
51
|
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
52
|
+
```python
|
|
53
|
+
import json
|
|
54
|
+
import os
|
|
55
|
+
from langchain_core.messages import messages_from_dict, message_to_dict
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
class FileChatMessageHistory(BaseChatMessageHistory):
|
|
59
|
+
storage_path: str
|
|
60
|
+
session_id: str
|
|
61
|
+
|
|
62
|
+
@property
|
|
63
|
+
def messages(self) -> list[BaseMessage]:
|
|
64
|
+
try:
|
|
65
|
+
with open(
|
|
66
|
+
os.path.join(self.storage_path, self.session_id),
|
|
67
|
+
"r",
|
|
68
|
+
encoding="utf-8",
|
|
69
|
+
) as f:
|
|
70
|
+
messages_data = json.load(f)
|
|
71
|
+
return messages_from_dict(messages_data)
|
|
72
|
+
except FileNotFoundError:
|
|
73
|
+
return []
|
|
74
|
+
|
|
75
|
+
def add_messages(self, messages: Sequence[BaseMessage]) -> None:
|
|
76
|
+
all_messages = list(self.messages) # Existing messages
|
|
77
|
+
all_messages.extend(messages) # Add new messages
|
|
78
|
+
|
|
79
|
+
serialized = [message_to_dict(message) for message in all_messages]
|
|
80
|
+
file_path = os.path.join(self.storage_path, self.session_id)
|
|
81
|
+
os.makedirs(os.path.dirname(file_path), exist_ok=True)
|
|
82
|
+
with open(file_path, "w", encoding="utf-8") as f:
|
|
83
|
+
json.dump(serialized, f)
|
|
84
|
+
|
|
85
|
+
def clear(self) -> None:
|
|
86
|
+
file_path = os.path.join(self.storage_path, self.session_id)
|
|
87
|
+
os.makedirs(os.path.dirname(file_path), exist_ok=True)
|
|
88
|
+
with open(file_path, "w", encoding="utf-8") as f:
|
|
89
|
+
json.dump([], f)
|
|
90
|
+
```
|
|
104
91
|
"""
|
|
105
92
|
|
|
106
93
|
messages: list[BaseMessage]
|
|
@@ -124,6 +111,40 @@ class BaseChatMessageHistory(ABC):
|
|
|
124
111
|
"""
|
|
125
112
|
return await run_in_executor(None, lambda: self.messages)
|
|
126
113
|
|
|
114
|
+
def add_user_message(self, message: HumanMessage | str) -> None:
|
|
115
|
+
"""Convenience method for adding a human message string to the store.
|
|
116
|
+
|
|
117
|
+
!!! note
|
|
118
|
+
This is a convenience method. Code should favor the bulk `add_messages`
|
|
119
|
+
interface instead to save on round-trips to the persistence layer.
|
|
120
|
+
|
|
121
|
+
This method may be deprecated in a future release.
|
|
122
|
+
|
|
123
|
+
Args:
|
|
124
|
+
message: The human message to add to the store.
|
|
125
|
+
"""
|
|
126
|
+
if isinstance(message, HumanMessage):
|
|
127
|
+
self.add_message(message)
|
|
128
|
+
else:
|
|
129
|
+
self.add_message(HumanMessage(content=message))
|
|
130
|
+
|
|
131
|
+
def add_ai_message(self, message: AIMessage | str) -> None:
|
|
132
|
+
"""Convenience method for adding an AI message string to the store.
|
|
133
|
+
|
|
134
|
+
!!! note
|
|
135
|
+
This is a convenience method. Code should favor the bulk `add_messages`
|
|
136
|
+
interface instead to save on round-trips to the persistence layer.
|
|
137
|
+
|
|
138
|
+
This method may be deprecated in a future release.
|
|
139
|
+
|
|
140
|
+
Args:
|
|
141
|
+
message: The AI message to add.
|
|
142
|
+
"""
|
|
143
|
+
if isinstance(message, AIMessage):
|
|
144
|
+
self.add_message(message)
|
|
145
|
+
else:
|
|
146
|
+
self.add_message(AIMessage(content=message))
|
|
147
|
+
|
|
127
148
|
def add_message(self, message: BaseMessage) -> None:
|
|
128
149
|
"""Add a Message object to the store.
|
|
129
150
|
|
|
@@ -22,22 +22,22 @@ class LangSmithLoader(BaseLoader):
|
|
|
22
22
|
|
|
23
23
|
??? note "Lazy load"
|
|
24
24
|
|
|
25
|
-
|
|
25
|
+
```python
|
|
26
|
+
from langchain_core.document_loaders import LangSmithLoader
|
|
26
27
|
|
|
27
|
-
|
|
28
|
+
loader = LangSmithLoader(dataset_id="...", limit=100)
|
|
29
|
+
docs = []
|
|
30
|
+
for doc in loader.lazy_load():
|
|
31
|
+
docs.append(doc)
|
|
32
|
+
```
|
|
28
33
|
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
docs.append(doc)
|
|
33
|
-
|
|
34
|
-
.. code-block:: python
|
|
35
|
-
|
|
36
|
-
# -> [Document("...", metadata={"inputs": {...}, "outputs": {...}, ...}), ...]
|
|
34
|
+
```python
|
|
35
|
+
# -> [Document("...", metadata={"inputs": {...}, "outputs": {...}, ...}), ...]
|
|
36
|
+
```
|
|
37
37
|
|
|
38
38
|
!!! version-added "Added in version 0.2.34"
|
|
39
39
|
|
|
40
|
-
"""
|
|
40
|
+
"""
|
|
41
41
|
|
|
42
42
|
def __init__(
|
|
43
43
|
self,
|
|
@@ -60,15 +60,15 @@ class LangSmithLoader(BaseLoader):
|
|
|
60
60
|
"""Create a LangSmith loader.
|
|
61
61
|
|
|
62
62
|
Args:
|
|
63
|
-
dataset_id: The ID of the dataset to filter by.
|
|
64
|
-
dataset_name: The name of the dataset to filter by.
|
|
65
|
-
content_key: The inputs key to set as Document page content.
|
|
66
|
-
are interpreted as nested keys. E.g.
|
|
63
|
+
dataset_id: The ID of the dataset to filter by.
|
|
64
|
+
dataset_name: The name of the dataset to filter by.
|
|
65
|
+
content_key: The inputs key to set as Document page content. `'.'` characters
|
|
66
|
+
are interpreted as nested keys. E.g. `content_key="first.second"` will
|
|
67
67
|
result in
|
|
68
|
-
|
|
68
|
+
`Document(page_content=format_content(example.inputs["first"]["second"]))`
|
|
69
69
|
format_content: Function for converting the content extracted from the example
|
|
70
70
|
inputs into a string. Defaults to JSON-encoding the contents.
|
|
71
|
-
example_ids: The IDs of the examples to filter by.
|
|
71
|
+
example_ids: The IDs of the examples to filter by.
|
|
72
72
|
as_of: The dataset version tag OR
|
|
73
73
|
timestamp to retrieve the examples as of.
|
|
74
74
|
Response examples will only be those that were present at the time
|
|
@@ -76,17 +76,17 @@ class LangSmithLoader(BaseLoader):
|
|
|
76
76
|
splits: A list of dataset splits, which are
|
|
77
77
|
divisions of your dataset such as 'train', 'test', or 'validation'.
|
|
78
78
|
Returns examples only from the specified splits.
|
|
79
|
-
inline_s3_urls: Whether to inline S3 URLs. Defaults to True
|
|
79
|
+
inline_s3_urls: Whether to inline S3 URLs. Defaults to `True`.
|
|
80
80
|
offset: The offset to start from. Defaults to 0.
|
|
81
81
|
limit: The maximum number of examples to return.
|
|
82
|
-
metadata: Metadata to filter by.
|
|
82
|
+
metadata: Metadata to filter by.
|
|
83
83
|
filter: A structured filter string to apply to the examples.
|
|
84
84
|
client: LangSmith Client. If not provided will be initialized from below args.
|
|
85
85
|
client_kwargs: Keyword args to pass to LangSmith client init. Should only be
|
|
86
|
-
specified if
|
|
86
|
+
specified if `client` isn't.
|
|
87
87
|
|
|
88
88
|
Raises:
|
|
89
|
-
ValueError: If both
|
|
89
|
+
ValueError: If both `client` and `client_kwargs` are provided.
|
|
90
90
|
""" # noqa: E501
|
|
91
91
|
if client and client_kwargs:
|
|
92
92
|
raise ValueError
|