langchain-core 0.3.72__py3-none-any.whl → 0.4.0.dev0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- langchain_core/_api/beta_decorator.py +1 -0
- langchain_core/_api/deprecation.py +2 -0
- langchain_core/beta/runnables/context.py +1 -0
- langchain_core/callbacks/base.py +23 -14
- langchain_core/callbacks/file.py +1 -0
- langchain_core/callbacks/manager.py +145 -19
- langchain_core/callbacks/streaming_stdout.py +4 -3
- langchain_core/callbacks/usage.py +15 -3
- langchain_core/chat_history.py +1 -0
- langchain_core/document_loaders/langsmith.py +2 -1
- langchain_core/documents/base.py +2 -0
- langchain_core/embeddings/fake.py +2 -0
- langchain_core/indexing/api.py +10 -0
- langchain_core/language_models/_utils.py +37 -0
- langchain_core/language_models/base.py +4 -1
- langchain_core/language_models/chat_models.py +48 -27
- langchain_core/language_models/fake_chat_models.py +71 -1
- langchain_core/language_models/llms.py +1 -0
- langchain_core/memory.py +1 -0
- langchain_core/messages/__init__.py +54 -0
- langchain_core/messages/ai.py +31 -18
- langchain_core/messages/content_blocks.py +1349 -69
- langchain_core/messages/human.py +1 -0
- langchain_core/messages/modifier.py +1 -1
- langchain_core/messages/tool.py +8 -83
- langchain_core/messages/utils.py +221 -6
- langchain_core/output_parsers/base.py +51 -14
- langchain_core/output_parsers/json.py +5 -2
- langchain_core/output_parsers/list.py +7 -2
- langchain_core/output_parsers/openai_functions.py +29 -5
- langchain_core/output_parsers/openai_tools.py +90 -47
- langchain_core/output_parsers/pydantic.py +3 -2
- langchain_core/output_parsers/transform.py +53 -12
- langchain_core/output_parsers/xml.py +14 -5
- langchain_core/outputs/llm_result.py +4 -1
- langchain_core/prompt_values.py +111 -7
- langchain_core/prompts/base.py +4 -0
- langchain_core/prompts/chat.py +3 -0
- langchain_core/prompts/few_shot.py +1 -0
- langchain_core/prompts/few_shot_with_templates.py +1 -0
- langchain_core/prompts/image.py +1 -0
- langchain_core/prompts/pipeline.py +1 -0
- langchain_core/prompts/prompt.py +1 -0
- langchain_core/prompts/structured.py +1 -0
- langchain_core/rate_limiters.py +1 -0
- langchain_core/retrievers.py +3 -0
- langchain_core/runnables/base.py +75 -57
- langchain_core/runnables/branch.py +1 -0
- langchain_core/runnables/config.py +2 -2
- langchain_core/runnables/configurable.py +2 -1
- langchain_core/runnables/fallbacks.py +3 -7
- langchain_core/runnables/graph.py +5 -3
- langchain_core/runnables/graph_ascii.py +1 -0
- langchain_core/runnables/graph_mermaid.py +1 -0
- langchain_core/runnables/history.py +1 -0
- langchain_core/runnables/passthrough.py +3 -0
- langchain_core/runnables/retry.py +1 -0
- langchain_core/runnables/router.py +1 -0
- langchain_core/runnables/schema.py +1 -0
- langchain_core/stores.py +3 -0
- langchain_core/tools/base.py +43 -11
- langchain_core/tools/convert.py +25 -3
- langchain_core/tools/retriever.py +8 -1
- langchain_core/tools/structured.py +10 -1
- langchain_core/tracers/base.py +14 -7
- langchain_core/tracers/context.py +1 -1
- langchain_core/tracers/core.py +27 -4
- langchain_core/tracers/event_stream.py +14 -3
- langchain_core/tracers/langchain.py +14 -3
- langchain_core/tracers/log_stream.py +4 -1
- langchain_core/utils/aiter.py +5 -0
- langchain_core/utils/function_calling.py +2 -1
- langchain_core/utils/iter.py +1 -0
- langchain_core/v1/__init__.py +1 -0
- langchain_core/v1/chat_models.py +1047 -0
- langchain_core/v1/messages.py +755 -0
- langchain_core/vectorstores/base.py +1 -0
- langchain_core/version.py +1 -1
- {langchain_core-0.3.72.dist-info → langchain_core-0.4.0.dev0.dist-info}/METADATA +1 -1
- {langchain_core-0.3.72.dist-info → langchain_core-0.4.0.dev0.dist-info}/RECORD +82 -79
- {langchain_core-0.3.72.dist-info → langchain_core-0.4.0.dev0.dist-info}/WHEEL +0 -0
- {langchain_core-0.3.72.dist-info → langchain_core-0.4.0.dev0.dist-info}/entry_points.txt +0 -0
langchain_core/tools/base.py
CHANGED
|
@@ -68,6 +68,7 @@ from langchain_core.utils.pydantic import (
|
|
|
68
68
|
is_pydantic_v1_subclass,
|
|
69
69
|
is_pydantic_v2_subclass,
|
|
70
70
|
)
|
|
71
|
+
from langchain_core.v1.messages import ToolMessage as ToolMessageV1
|
|
71
72
|
|
|
72
73
|
if TYPE_CHECKING:
|
|
73
74
|
import uuid
|
|
@@ -443,9 +444,7 @@ class ChildTool(BaseTool):
|
|
|
443
444
|
Args schema should be either:
|
|
444
445
|
|
|
445
446
|
- A subclass of pydantic.BaseModel.
|
|
446
|
-
or
|
|
447
447
|
- A subclass of pydantic.v1.BaseModel if accessing v1 namespace in pydantic 2
|
|
448
|
-
or
|
|
449
448
|
- a JSON schema dict
|
|
450
449
|
"""
|
|
451
450
|
return_direct: bool = False
|
|
@@ -500,6 +499,14 @@ class ChildTool(BaseTool):
|
|
|
500
499
|
two-tuple corresponding to the (content, artifact) of a ToolMessage.
|
|
501
500
|
"""
|
|
502
501
|
|
|
502
|
+
message_version: Literal["v0", "v1"] = "v0"
|
|
503
|
+
"""Version of ToolMessage to return given
|
|
504
|
+
:class:`~langchain_core.messages.content_blocks.ToolCall` input.
|
|
505
|
+
|
|
506
|
+
If ``"v0"``, output will be a v0 :class:`~langchain_core.messages.tool.ToolMessage`.
|
|
507
|
+
If ``"v1"``, output will be a v1 :class:`~langchain_core.messages.v1.ToolMessage`.
|
|
508
|
+
"""
|
|
509
|
+
|
|
503
510
|
def __init__(self, **kwargs: Any) -> None:
|
|
504
511
|
"""Initialize the tool."""
|
|
505
512
|
if (
|
|
@@ -837,7 +844,7 @@ class ChildTool(BaseTool):
|
|
|
837
844
|
|
|
838
845
|
content = None
|
|
839
846
|
artifact = None
|
|
840
|
-
status = "success"
|
|
847
|
+
status: Literal["success", "error"] = "success"
|
|
841
848
|
error_to_raise: Union[Exception, KeyboardInterrupt, None] = None
|
|
842
849
|
try:
|
|
843
850
|
child_config = patch_config(config, callbacks=run_manager.get_child())
|
|
@@ -881,7 +888,14 @@ class ChildTool(BaseTool):
|
|
|
881
888
|
if error_to_raise:
|
|
882
889
|
run_manager.on_tool_error(error_to_raise)
|
|
883
890
|
raise error_to_raise
|
|
884
|
-
output = _format_output(
|
|
891
|
+
output = _format_output(
|
|
892
|
+
content,
|
|
893
|
+
artifact,
|
|
894
|
+
tool_call_id,
|
|
895
|
+
self.name,
|
|
896
|
+
status,
|
|
897
|
+
message_version=self.message_version,
|
|
898
|
+
)
|
|
885
899
|
run_manager.on_tool_end(output, color=color, name=self.name, **kwargs)
|
|
886
900
|
return output
|
|
887
901
|
|
|
@@ -947,7 +961,7 @@ class ChildTool(BaseTool):
|
|
|
947
961
|
)
|
|
948
962
|
content = None
|
|
949
963
|
artifact = None
|
|
950
|
-
status = "success"
|
|
964
|
+
status: Literal["success", "error"] = "success"
|
|
951
965
|
error_to_raise: Optional[Union[Exception, KeyboardInterrupt]] = None
|
|
952
966
|
try:
|
|
953
967
|
tool_args, tool_kwargs = self._to_args_and_kwargs(tool_input, tool_call_id)
|
|
@@ -995,7 +1009,14 @@ class ChildTool(BaseTool):
|
|
|
995
1009
|
await run_manager.on_tool_error(error_to_raise)
|
|
996
1010
|
raise error_to_raise
|
|
997
1011
|
|
|
998
|
-
output = _format_output(
|
|
1012
|
+
output = _format_output(
|
|
1013
|
+
content,
|
|
1014
|
+
artifact,
|
|
1015
|
+
tool_call_id,
|
|
1016
|
+
self.name,
|
|
1017
|
+
status,
|
|
1018
|
+
message_version=self.message_version,
|
|
1019
|
+
)
|
|
999
1020
|
await run_manager.on_tool_end(output, color=color, name=self.name, **kwargs)
|
|
1000
1021
|
return output
|
|
1001
1022
|
|
|
@@ -1133,7 +1154,9 @@ def _format_output(
|
|
|
1133
1154
|
artifact: Any,
|
|
1134
1155
|
tool_call_id: Optional[str],
|
|
1135
1156
|
name: str,
|
|
1136
|
-
status:
|
|
1157
|
+
status: Literal["success", "error"],
|
|
1158
|
+
*,
|
|
1159
|
+
message_version: Literal["v0", "v1"] = "v0",
|
|
1137
1160
|
) -> Union[ToolOutputMixin, Any]:
|
|
1138
1161
|
"""Format tool output as a ToolMessage if appropriate.
|
|
1139
1162
|
|
|
@@ -1143,6 +1166,7 @@ def _format_output(
|
|
|
1143
1166
|
tool_call_id: The ID of the tool call.
|
|
1144
1167
|
name: The name of the tool.
|
|
1145
1168
|
status: The execution status.
|
|
1169
|
+
message_version: The version of the ToolMessage to return.
|
|
1146
1170
|
|
|
1147
1171
|
Returns:
|
|
1148
1172
|
The formatted output, either as a ToolMessage or the original content.
|
|
@@ -1151,7 +1175,15 @@ def _format_output(
|
|
|
1151
1175
|
return content
|
|
1152
1176
|
if not _is_message_content_type(content):
|
|
1153
1177
|
content = _stringify(content)
|
|
1154
|
-
|
|
1178
|
+
if message_version == "v0":
|
|
1179
|
+
return ToolMessage(
|
|
1180
|
+
content,
|
|
1181
|
+
artifact=artifact,
|
|
1182
|
+
tool_call_id=tool_call_id,
|
|
1183
|
+
name=name,
|
|
1184
|
+
status=status,
|
|
1185
|
+
)
|
|
1186
|
+
return ToolMessageV1(
|
|
1155
1187
|
content,
|
|
1156
1188
|
artifact=artifact,
|
|
1157
1189
|
tool_call_id=tool_call_id,
|
|
@@ -1258,8 +1290,8 @@ class InjectedToolCallId(InjectedToolArg):
|
|
|
1258
1290
|
This annotation is used to mark a tool parameter that should receive
|
|
1259
1291
|
the tool call ID at runtime.
|
|
1260
1292
|
|
|
1261
|
-
|
|
1262
|
-
|
|
1293
|
+
.. code-block:: python
|
|
1294
|
+
|
|
1263
1295
|
from typing_extensions import Annotated
|
|
1264
1296
|
from langchain_core.messages import ToolMessage
|
|
1265
1297
|
from langchain_core.tools import tool, InjectedToolCallId
|
|
@@ -1275,7 +1307,7 @@ class InjectedToolCallId(InjectedToolArg):
|
|
|
1275
1307
|
name="foo",
|
|
1276
1308
|
tool_call_id=tool_call_id
|
|
1277
1309
|
)
|
|
1278
|
-
|
|
1310
|
+
|
|
1279
1311
|
"""
|
|
1280
1312
|
|
|
1281
1313
|
|
langchain_core/tools/convert.py
CHANGED
|
@@ -22,6 +22,7 @@ def tool(
|
|
|
22
22
|
response_format: Literal["content", "content_and_artifact"] = "content",
|
|
23
23
|
parse_docstring: bool = False,
|
|
24
24
|
error_on_invalid_docstring: bool = True,
|
|
25
|
+
message_version: Literal["v0", "v1"] = "v0",
|
|
25
26
|
) -> Callable[[Union[Callable, Runnable]], BaseTool]: ...
|
|
26
27
|
|
|
27
28
|
|
|
@@ -37,6 +38,7 @@ def tool(
|
|
|
37
38
|
response_format: Literal["content", "content_and_artifact"] = "content",
|
|
38
39
|
parse_docstring: bool = False,
|
|
39
40
|
error_on_invalid_docstring: bool = True,
|
|
41
|
+
message_version: Literal["v0", "v1"] = "v0",
|
|
40
42
|
) -> BaseTool: ...
|
|
41
43
|
|
|
42
44
|
|
|
@@ -51,6 +53,7 @@ def tool(
|
|
|
51
53
|
response_format: Literal["content", "content_and_artifact"] = "content",
|
|
52
54
|
parse_docstring: bool = False,
|
|
53
55
|
error_on_invalid_docstring: bool = True,
|
|
56
|
+
message_version: Literal["v0", "v1"] = "v0",
|
|
54
57
|
) -> BaseTool: ...
|
|
55
58
|
|
|
56
59
|
|
|
@@ -65,6 +68,7 @@ def tool(
|
|
|
65
68
|
response_format: Literal["content", "content_and_artifact"] = "content",
|
|
66
69
|
parse_docstring: bool = False,
|
|
67
70
|
error_on_invalid_docstring: bool = True,
|
|
71
|
+
message_version: Literal["v0", "v1"] = "v0",
|
|
68
72
|
) -> Callable[[Union[Callable, Runnable]], BaseTool]: ...
|
|
69
73
|
|
|
70
74
|
|
|
@@ -79,6 +83,7 @@ def tool(
|
|
|
79
83
|
response_format: Literal["content", "content_and_artifact"] = "content",
|
|
80
84
|
parse_docstring: bool = False,
|
|
81
85
|
error_on_invalid_docstring: bool = True,
|
|
86
|
+
message_version: Literal["v0", "v1"] = "v0",
|
|
82
87
|
) -> Union[
|
|
83
88
|
BaseTool,
|
|
84
89
|
Callable[[Union[Callable, Runnable]], BaseTool],
|
|
@@ -118,6 +123,11 @@ def tool(
|
|
|
118
123
|
error_on_invalid_docstring: if ``parse_docstring`` is provided, configure
|
|
119
124
|
whether to raise ValueError on invalid Google Style docstrings.
|
|
120
125
|
Defaults to True.
|
|
126
|
+
message_version: Version of ToolMessage to return given
|
|
127
|
+
:class:`~langchain_core.messages.content_blocks.ToolCall` input.
|
|
128
|
+
|
|
129
|
+
If ``"v0"``, output will be a v0 :class:`~langchain_core.messages.tool.ToolMessage`.
|
|
130
|
+
If ``"v1"``, output will be a v1 :class:`~langchain_core.messages.v1.ToolMessage`.
|
|
121
131
|
|
|
122
132
|
Returns:
|
|
123
133
|
The tool.
|
|
@@ -144,7 +154,8 @@ def tool(
|
|
|
144
154
|
return "partial json of results", {"full": "object of results"}
|
|
145
155
|
|
|
146
156
|
.. versionadded:: 0.2.14
|
|
147
|
-
|
|
157
|
+
|
|
158
|
+
Parse Google-style docstrings:
|
|
148
159
|
|
|
149
160
|
.. code-block:: python
|
|
150
161
|
|
|
@@ -214,7 +225,8 @@ def tool(
|
|
|
214
225
|
monkey: The baz.
|
|
215
226
|
\"\"\"
|
|
216
227
|
return bar
|
|
217
|
-
|
|
228
|
+
|
|
229
|
+
""" # noqa: D214, D410, D411, E501
|
|
218
230
|
|
|
219
231
|
def _create_tool_factory(
|
|
220
232
|
tool_name: str,
|
|
@@ -272,6 +284,7 @@ def tool(
|
|
|
272
284
|
response_format=response_format,
|
|
273
285
|
parse_docstring=parse_docstring,
|
|
274
286
|
error_on_invalid_docstring=error_on_invalid_docstring,
|
|
287
|
+
message_version=message_version,
|
|
275
288
|
)
|
|
276
289
|
# If someone doesn't want a schema applied, we must treat it as
|
|
277
290
|
# a simple string->string function
|
|
@@ -288,6 +301,7 @@ def tool(
|
|
|
288
301
|
return_direct=return_direct,
|
|
289
302
|
coroutine=coroutine,
|
|
290
303
|
response_format=response_format,
|
|
304
|
+
message_version=message_version,
|
|
291
305
|
)
|
|
292
306
|
|
|
293
307
|
return _tool_factory
|
|
@@ -381,6 +395,7 @@ def convert_runnable_to_tool(
|
|
|
381
395
|
name: Optional[str] = None,
|
|
382
396
|
description: Optional[str] = None,
|
|
383
397
|
arg_types: Optional[dict[str, type]] = None,
|
|
398
|
+
message_version: Literal["v0", "v1"] = "v0",
|
|
384
399
|
) -> BaseTool:
|
|
385
400
|
"""Convert a Runnable into a BaseTool.
|
|
386
401
|
|
|
@@ -390,10 +405,15 @@ def convert_runnable_to_tool(
|
|
|
390
405
|
name: The name of the tool. Defaults to None.
|
|
391
406
|
description: The description of the tool. Defaults to None.
|
|
392
407
|
arg_types: The types of the arguments. Defaults to None.
|
|
408
|
+
message_version: Version of ToolMessage to return given
|
|
409
|
+
:class:`~langchain_core.messages.content_blocks.ToolCall` input.
|
|
410
|
+
|
|
411
|
+
If ``"v0"``, output will be a v0 :class:`~langchain_core.messages.tool.ToolMessage`.
|
|
412
|
+
If ``"v1"``, output will be a v1 :class:`~langchain_core.messages.v1.ToolMessage`.
|
|
393
413
|
|
|
394
414
|
Returns:
|
|
395
415
|
The tool.
|
|
396
|
-
"""
|
|
416
|
+
""" # noqa: E501
|
|
397
417
|
if args_schema:
|
|
398
418
|
runnable = runnable.with_types(input_type=args_schema)
|
|
399
419
|
description = description or _get_description_from_runnable(runnable)
|
|
@@ -406,6 +426,7 @@ def convert_runnable_to_tool(
|
|
|
406
426
|
func=runnable.invoke,
|
|
407
427
|
coroutine=runnable.ainvoke,
|
|
408
428
|
description=description,
|
|
429
|
+
message_version=message_version,
|
|
409
430
|
)
|
|
410
431
|
|
|
411
432
|
async def ainvoke_wrapper(
|
|
@@ -433,4 +454,5 @@ def convert_runnable_to_tool(
|
|
|
433
454
|
coroutine=ainvoke_wrapper,
|
|
434
455
|
description=description,
|
|
435
456
|
args_schema=args_schema,
|
|
457
|
+
message_version=message_version,
|
|
436
458
|
)
|
|
@@ -72,6 +72,7 @@ def create_retriever_tool(
|
|
|
72
72
|
document_prompt: Optional[BasePromptTemplate] = None,
|
|
73
73
|
document_separator: str = "\n\n",
|
|
74
74
|
response_format: Literal["content", "content_and_artifact"] = "content",
|
|
75
|
+
message_version: Literal["v0", "v1"] = "v1",
|
|
75
76
|
) -> Tool:
|
|
76
77
|
r"""Create a tool to do retrieval of documents.
|
|
77
78
|
|
|
@@ -88,10 +89,15 @@ def create_retriever_tool(
|
|
|
88
89
|
"content_and_artifact" then the output is expected to be a two-tuple
|
|
89
90
|
corresponding to the (content, artifact) of a ToolMessage (artifact
|
|
90
91
|
being a list of documents in this case). Defaults to "content".
|
|
92
|
+
message_version: Version of ToolMessage to return given
|
|
93
|
+
:class:`~langchain_core.messages.content_blocks.ToolCall` input.
|
|
94
|
+
|
|
95
|
+
If ``"v0"``, output will be a v0 :class:`~langchain_core.messages.tool.ToolMessage`.
|
|
96
|
+
If ``"v1"``, output will be a v1 :class:`~langchain_core.messages.v1.ToolMessage`.
|
|
91
97
|
|
|
92
98
|
Returns:
|
|
93
99
|
Tool class to pass to an agent.
|
|
94
|
-
"""
|
|
100
|
+
""" # noqa: E501
|
|
95
101
|
document_prompt = document_prompt or PromptTemplate.from_template("{page_content}")
|
|
96
102
|
func = partial(
|
|
97
103
|
_get_relevant_documents,
|
|
@@ -114,4 +120,5 @@ def create_retriever_tool(
|
|
|
114
120
|
coroutine=afunc,
|
|
115
121
|
args_schema=RetrieverInput,
|
|
116
122
|
response_format=response_format,
|
|
123
|
+
message_version=message_version,
|
|
117
124
|
)
|
|
@@ -129,6 +129,7 @@ class StructuredTool(BaseTool):
|
|
|
129
129
|
response_format: Literal["content", "content_and_artifact"] = "content",
|
|
130
130
|
parse_docstring: bool = False,
|
|
131
131
|
error_on_invalid_docstring: bool = False,
|
|
132
|
+
message_version: Literal["v0", "v1"] = "v0",
|
|
132
133
|
**kwargs: Any,
|
|
133
134
|
) -> StructuredTool:
|
|
134
135
|
"""Create tool from a given function.
|
|
@@ -157,6 +158,12 @@ class StructuredTool(BaseTool):
|
|
|
157
158
|
error_on_invalid_docstring: if ``parse_docstring`` is provided, configure
|
|
158
159
|
whether to raise ValueError on invalid Google Style docstrings.
|
|
159
160
|
Defaults to False.
|
|
161
|
+
message_version: Version of ToolMessage to return given
|
|
162
|
+
:class:`~langchain_core.messages.content_blocks.ToolCall` input.
|
|
163
|
+
|
|
164
|
+
If ``"v0"``, output will be a v0 :class:`~langchain_core.messages.tool.ToolMessage`.
|
|
165
|
+
If ``"v1"``, output will be a v1 :class:`~langchain_core.messages.v1.ToolMessage`.
|
|
166
|
+
|
|
160
167
|
kwargs: Additional arguments to pass to the tool
|
|
161
168
|
|
|
162
169
|
Returns:
|
|
@@ -174,7 +181,8 @@ class StructuredTool(BaseTool):
|
|
|
174
181
|
return a + b
|
|
175
182
|
tool = StructuredTool.from_function(add)
|
|
176
183
|
tool.run(1, 2) # 3
|
|
177
|
-
|
|
184
|
+
|
|
185
|
+
""" # noqa: E501
|
|
178
186
|
if func is not None:
|
|
179
187
|
source_function = func
|
|
180
188
|
elif coroutine is not None:
|
|
@@ -231,6 +239,7 @@ class StructuredTool(BaseTool):
|
|
|
231
239
|
description=description_,
|
|
232
240
|
return_direct=return_direct,
|
|
233
241
|
response_format=response_format,
|
|
242
|
+
message_version=message_version,
|
|
234
243
|
**kwargs,
|
|
235
244
|
)
|
|
236
245
|
|
langchain_core/tracers/base.py
CHANGED
|
@@ -17,6 +17,7 @@ from typing_extensions import override
|
|
|
17
17
|
from langchain_core.callbacks.base import AsyncCallbackHandler, BaseCallbackHandler
|
|
18
18
|
from langchain_core.exceptions import TracerException # noqa: F401
|
|
19
19
|
from langchain_core.tracers.core import _TracerCore
|
|
20
|
+
from langchain_core.v1.messages import AIMessage, AIMessageChunk, MessageV1
|
|
20
21
|
|
|
21
22
|
if TYPE_CHECKING:
|
|
22
23
|
from collections.abc import Sequence
|
|
@@ -54,7 +55,7 @@ class BaseTracer(_TracerCore, BaseCallbackHandler, ABC):
|
|
|
54
55
|
def on_chat_model_start(
|
|
55
56
|
self,
|
|
56
57
|
serialized: dict[str, Any],
|
|
57
|
-
messages: list[list[BaseMessage]],
|
|
58
|
+
messages: Union[list[list[BaseMessage]], list[MessageV1]],
|
|
58
59
|
*,
|
|
59
60
|
run_id: UUID,
|
|
60
61
|
tags: Optional[list[str]] = None,
|
|
@@ -138,7 +139,9 @@ class BaseTracer(_TracerCore, BaseCallbackHandler, ABC):
|
|
|
138
139
|
self,
|
|
139
140
|
token: str,
|
|
140
141
|
*,
|
|
141
|
-
chunk: Optional[
|
|
142
|
+
chunk: Optional[
|
|
143
|
+
Union[GenerationChunk, ChatGenerationChunk, AIMessageChunk]
|
|
144
|
+
] = None,
|
|
142
145
|
run_id: UUID,
|
|
143
146
|
parent_run_id: Optional[UUID] = None,
|
|
144
147
|
**kwargs: Any,
|
|
@@ -190,7 +193,9 @@ class BaseTracer(_TracerCore, BaseCallbackHandler, ABC):
|
|
|
190
193
|
)
|
|
191
194
|
|
|
192
195
|
@override
|
|
193
|
-
def on_llm_end(
|
|
196
|
+
def on_llm_end(
|
|
197
|
+
self, response: Union[LLMResult, AIMessage], *, run_id: UUID, **kwargs: Any
|
|
198
|
+
) -> Run:
|
|
194
199
|
"""End a trace for an LLM run.
|
|
195
200
|
|
|
196
201
|
Args:
|
|
@@ -562,7 +567,7 @@ class AsyncBaseTracer(_TracerCore, AsyncCallbackHandler, ABC):
|
|
|
562
567
|
async def on_chat_model_start(
|
|
563
568
|
self,
|
|
564
569
|
serialized: dict[str, Any],
|
|
565
|
-
messages: list[list[BaseMessage]],
|
|
570
|
+
messages: Union[list[list[BaseMessage]], list[MessageV1]],
|
|
566
571
|
*,
|
|
567
572
|
run_id: UUID,
|
|
568
573
|
parent_run_id: Optional[UUID] = None,
|
|
@@ -617,7 +622,9 @@ class AsyncBaseTracer(_TracerCore, AsyncCallbackHandler, ABC):
|
|
|
617
622
|
self,
|
|
618
623
|
token: str,
|
|
619
624
|
*,
|
|
620
|
-
chunk: Optional[
|
|
625
|
+
chunk: Optional[
|
|
626
|
+
Union[GenerationChunk, ChatGenerationChunk, AIMessageChunk]
|
|
627
|
+
] = None,
|
|
621
628
|
run_id: UUID,
|
|
622
629
|
parent_run_id: Optional[UUID] = None,
|
|
623
630
|
**kwargs: Any,
|
|
@@ -646,7 +653,7 @@ class AsyncBaseTracer(_TracerCore, AsyncCallbackHandler, ABC):
|
|
|
646
653
|
@override
|
|
647
654
|
async def on_llm_end(
|
|
648
655
|
self,
|
|
649
|
-
response: LLMResult,
|
|
656
|
+
response: Union[LLMResult, AIMessage],
|
|
650
657
|
*,
|
|
651
658
|
run_id: UUID,
|
|
652
659
|
parent_run_id: Optional[UUID] = None,
|
|
@@ -882,7 +889,7 @@ class AsyncBaseTracer(_TracerCore, AsyncCallbackHandler, ABC):
|
|
|
882
889
|
self,
|
|
883
890
|
run: Run,
|
|
884
891
|
token: str,
|
|
885
|
-
chunk: Optional[Union[GenerationChunk, ChatGenerationChunk]],
|
|
892
|
+
chunk: Optional[Union[GenerationChunk, ChatGenerationChunk, AIMessageChunk]],
|
|
886
893
|
) -> None:
|
|
887
894
|
"""Process new LLM token."""
|
|
888
895
|
|
|
@@ -62,7 +62,7 @@ def tracing_v2_enabled(
|
|
|
62
62
|
|
|
63
63
|
Args:
|
|
64
64
|
project_name (str, optional): The name of the project.
|
|
65
|
-
Defaults to
|
|
65
|
+
Defaults to ``'default'``.
|
|
66
66
|
example_id (str or UUID, optional): The ID of the example.
|
|
67
67
|
Defaults to None.
|
|
68
68
|
tags (list[str], optional): The tags to add to the run.
|
langchain_core/tracers/core.py
CHANGED
|
@@ -18,6 +18,7 @@ from typing import (
|
|
|
18
18
|
|
|
19
19
|
from langchain_core.exceptions import TracerException
|
|
20
20
|
from langchain_core.load import dumpd
|
|
21
|
+
from langchain_core.messages.utils import convert_from_v1_message
|
|
21
22
|
from langchain_core.outputs import (
|
|
22
23
|
ChatGeneration,
|
|
23
24
|
ChatGenerationChunk,
|
|
@@ -25,6 +26,12 @@ from langchain_core.outputs import (
|
|
|
25
26
|
LLMResult,
|
|
26
27
|
)
|
|
27
28
|
from langchain_core.tracers.schemas import Run
|
|
29
|
+
from langchain_core.v1.messages import (
|
|
30
|
+
AIMessage,
|
|
31
|
+
AIMessageChunk,
|
|
32
|
+
MessageV1,
|
|
33
|
+
MessageV1Types,
|
|
34
|
+
)
|
|
28
35
|
|
|
29
36
|
if TYPE_CHECKING:
|
|
30
37
|
from collections.abc import Coroutine, Sequence
|
|
@@ -156,7 +163,7 @@ class _TracerCore(ABC):
|
|
|
156
163
|
def _create_chat_model_run(
|
|
157
164
|
self,
|
|
158
165
|
serialized: dict[str, Any],
|
|
159
|
-
messages: list[list[BaseMessage]],
|
|
166
|
+
messages: Union[list[list[BaseMessage]], list[MessageV1]],
|
|
160
167
|
run_id: UUID,
|
|
161
168
|
tags: Optional[list[str]] = None,
|
|
162
169
|
parent_run_id: Optional[UUID] = None,
|
|
@@ -181,6 +188,12 @@ class _TracerCore(ABC):
|
|
|
181
188
|
start_time = datetime.now(timezone.utc)
|
|
182
189
|
if metadata:
|
|
183
190
|
kwargs.update({"metadata": metadata})
|
|
191
|
+
if isinstance(messages[0], MessageV1Types):
|
|
192
|
+
# Convert from v1 messages to BaseMessage
|
|
193
|
+
messages = [
|
|
194
|
+
[convert_from_v1_message(msg) for msg in messages] # type: ignore[arg-type]
|
|
195
|
+
]
|
|
196
|
+
messages = cast("list[list[BaseMessage]]", messages)
|
|
184
197
|
return Run(
|
|
185
198
|
id=run_id,
|
|
186
199
|
parent_run_id=parent_run_id,
|
|
@@ -230,7 +243,9 @@ class _TracerCore(ABC):
|
|
|
230
243
|
self,
|
|
231
244
|
token: str,
|
|
232
245
|
run_id: UUID,
|
|
233
|
-
chunk: Optional[
|
|
246
|
+
chunk: Optional[
|
|
247
|
+
Union[GenerationChunk, ChatGenerationChunk, AIMessageChunk]
|
|
248
|
+
] = None,
|
|
234
249
|
parent_run_id: Optional[UUID] = None, # noqa: ARG002
|
|
235
250
|
) -> Run:
|
|
236
251
|
"""Append token event to LLM run and return the run."""
|
|
@@ -276,7 +291,15 @@ class _TracerCore(ABC):
|
|
|
276
291
|
)
|
|
277
292
|
return llm_run
|
|
278
293
|
|
|
279
|
-
def _complete_llm_run(
|
|
294
|
+
def _complete_llm_run(
|
|
295
|
+
self, response: Union[LLMResult, AIMessage], run_id: UUID
|
|
296
|
+
) -> Run:
|
|
297
|
+
if isinstance(response, AIMessage):
|
|
298
|
+
response = LLMResult(
|
|
299
|
+
generations=[
|
|
300
|
+
[ChatGeneration(message=convert_from_v1_message(response))]
|
|
301
|
+
]
|
|
302
|
+
)
|
|
280
303
|
llm_run = self._get_run(run_id, run_type={"llm", "chat_model"})
|
|
281
304
|
if getattr(llm_run, "outputs", None) is None:
|
|
282
305
|
llm_run.outputs = {}
|
|
@@ -558,7 +581,7 @@ class _TracerCore(ABC):
|
|
|
558
581
|
self,
|
|
559
582
|
run: Run, # noqa: ARG002
|
|
560
583
|
token: str, # noqa: ARG002
|
|
561
|
-
chunk: Optional[Union[GenerationChunk, ChatGenerationChunk]], # noqa: ARG002
|
|
584
|
+
chunk: Optional[Union[GenerationChunk, ChatGenerationChunk, AIMessageChunk]], # noqa: ARG002
|
|
562
585
|
) -> Union[None, Coroutine[Any, Any, None]]:
|
|
563
586
|
"""Process new LLM token."""
|
|
564
587
|
return None
|
|
@@ -38,6 +38,7 @@ from langchain_core.runnables.utils import (
|
|
|
38
38
|
from langchain_core.tracers._streaming import _StreamingCallbackHandler
|
|
39
39
|
from langchain_core.tracers.memory_stream import _MemoryStream
|
|
40
40
|
from langchain_core.utils.aiter import aclosing, py_anext
|
|
41
|
+
from langchain_core.v1.messages import MessageV1
|
|
41
42
|
|
|
42
43
|
if TYPE_CHECKING:
|
|
43
44
|
from collections.abc import AsyncIterator, Iterator, Sequence
|
|
@@ -45,6 +46,8 @@ if TYPE_CHECKING:
|
|
|
45
46
|
from langchain_core.documents import Document
|
|
46
47
|
from langchain_core.runnables import Runnable, RunnableConfig
|
|
47
48
|
from langchain_core.tracers.log_stream import LogEntry
|
|
49
|
+
from langchain_core.v1.messages import AIMessage as AIMessageV1
|
|
50
|
+
from langchain_core.v1.messages import AIMessageChunk as AIMessageChunkV1
|
|
48
51
|
|
|
49
52
|
logger = logging.getLogger(__name__)
|
|
50
53
|
|
|
@@ -297,7 +300,7 @@ class _AstreamEventsCallbackHandler(AsyncCallbackHandler, _StreamingCallbackHand
|
|
|
297
300
|
async def on_chat_model_start(
|
|
298
301
|
self,
|
|
299
302
|
serialized: dict[str, Any],
|
|
300
|
-
messages: list[list[BaseMessage]],
|
|
303
|
+
messages: Union[list[list[BaseMessage]], list[MessageV1]],
|
|
301
304
|
*,
|
|
302
305
|
run_id: UUID,
|
|
303
306
|
tags: Optional[list[str]] = None,
|
|
@@ -307,6 +310,8 @@ class _AstreamEventsCallbackHandler(AsyncCallbackHandler, _StreamingCallbackHand
|
|
|
307
310
|
**kwargs: Any,
|
|
308
311
|
) -> None:
|
|
309
312
|
"""Start a trace for an LLM run."""
|
|
313
|
+
# below cast is because type is converted in handle_event
|
|
314
|
+
messages = cast("list[list[BaseMessage]]", messages)
|
|
310
315
|
name_ = _assign_name(name, serialized)
|
|
311
316
|
run_type = "chat_model"
|
|
312
317
|
|
|
@@ -407,13 +412,18 @@ class _AstreamEventsCallbackHandler(AsyncCallbackHandler, _StreamingCallbackHand
|
|
|
407
412
|
self,
|
|
408
413
|
token: str,
|
|
409
414
|
*,
|
|
410
|
-
chunk: Optional[
|
|
415
|
+
chunk: Optional[
|
|
416
|
+
Union[GenerationChunk, ChatGenerationChunk, AIMessageChunkV1]
|
|
417
|
+
] = None,
|
|
411
418
|
run_id: UUID,
|
|
412
419
|
parent_run_id: Optional[UUID] = None,
|
|
413
420
|
**kwargs: Any,
|
|
414
421
|
) -> None:
|
|
415
422
|
"""Run on new LLM token. Only available when streaming is enabled."""
|
|
416
423
|
run_info = self.run_map.get(run_id)
|
|
424
|
+
chunk = cast(
|
|
425
|
+
"Optional[Union[GenerationChunk, ChatGenerationChunk]]", chunk
|
|
426
|
+
) # converted in handle_event
|
|
417
427
|
chunk_: Union[GenerationChunk, BaseMessageChunk]
|
|
418
428
|
|
|
419
429
|
if run_info is None:
|
|
@@ -456,9 +466,10 @@ class _AstreamEventsCallbackHandler(AsyncCallbackHandler, _StreamingCallbackHand
|
|
|
456
466
|
|
|
457
467
|
@override
|
|
458
468
|
async def on_llm_end(
|
|
459
|
-
self, response: LLMResult, *, run_id: UUID, **kwargs: Any
|
|
469
|
+
self, response: Union[LLMResult, AIMessageV1], *, run_id: UUID, **kwargs: Any
|
|
460
470
|
) -> None:
|
|
461
471
|
"""End a trace for an LLM run."""
|
|
472
|
+
response = cast("LLMResult", response) # converted in handle_event
|
|
462
473
|
run_info = self.run_map.pop(run_id)
|
|
463
474
|
inputs_ = run_info["inputs"]
|
|
464
475
|
|
|
@@ -5,7 +5,7 @@ from __future__ import annotations
|
|
|
5
5
|
import logging
|
|
6
6
|
from concurrent.futures import ThreadPoolExecutor
|
|
7
7
|
from datetime import datetime, timezone
|
|
8
|
-
from typing import TYPE_CHECKING, Any, Optional, Union
|
|
8
|
+
from typing import TYPE_CHECKING, Any, Optional, Union, cast
|
|
9
9
|
from uuid import UUID
|
|
10
10
|
|
|
11
11
|
from langsmith import Client
|
|
@@ -21,12 +21,15 @@ from typing_extensions import override
|
|
|
21
21
|
|
|
22
22
|
from langchain_core.env import get_runtime_environment
|
|
23
23
|
from langchain_core.load import dumpd
|
|
24
|
+
from langchain_core.messages.utils import convert_from_v1_message
|
|
24
25
|
from langchain_core.tracers.base import BaseTracer
|
|
25
26
|
from langchain_core.tracers.schemas import Run
|
|
27
|
+
from langchain_core.v1.messages import MessageV1Types
|
|
26
28
|
|
|
27
29
|
if TYPE_CHECKING:
|
|
28
30
|
from langchain_core.messages import BaseMessage
|
|
29
31
|
from langchain_core.outputs import ChatGenerationChunk, GenerationChunk
|
|
32
|
+
from langchain_core.v1.messages import AIMessageChunk, MessageV1
|
|
30
33
|
|
|
31
34
|
logger = logging.getLogger(__name__)
|
|
32
35
|
_LOGGED = set()
|
|
@@ -113,7 +116,7 @@ class LangChainTracer(BaseTracer):
|
|
|
113
116
|
def on_chat_model_start(
|
|
114
117
|
self,
|
|
115
118
|
serialized: dict[str, Any],
|
|
116
|
-
messages: list[list[BaseMessage]],
|
|
119
|
+
messages: Union[list[list[BaseMessage]], list[MessageV1]],
|
|
117
120
|
*,
|
|
118
121
|
run_id: UUID,
|
|
119
122
|
tags: Optional[list[str]] = None,
|
|
@@ -140,6 +143,12 @@ class LangChainTracer(BaseTracer):
|
|
|
140
143
|
start_time = datetime.now(timezone.utc)
|
|
141
144
|
if metadata:
|
|
142
145
|
kwargs.update({"metadata": metadata})
|
|
146
|
+
if isinstance(messages[0], MessageV1Types):
|
|
147
|
+
# Convert from v1 messages to BaseMessage
|
|
148
|
+
messages = [
|
|
149
|
+
[convert_from_v1_message(msg) for msg in messages] # type: ignore[arg-type]
|
|
150
|
+
]
|
|
151
|
+
messages = cast("list[list[BaseMessage]]", messages)
|
|
143
152
|
chat_model_run = Run(
|
|
144
153
|
id=run_id,
|
|
145
154
|
parent_run_id=parent_run_id,
|
|
@@ -232,7 +241,9 @@ class LangChainTracer(BaseTracer):
|
|
|
232
241
|
self,
|
|
233
242
|
token: str,
|
|
234
243
|
run_id: UUID,
|
|
235
|
-
chunk: Optional[
|
|
244
|
+
chunk: Optional[
|
|
245
|
+
Union[GenerationChunk, ChatGenerationChunk, AIMessageChunk]
|
|
246
|
+
] = None,
|
|
236
247
|
parent_run_id: Optional[UUID] = None,
|
|
237
248
|
) -> Run:
|
|
238
249
|
"""Append token event to LLM run and return the run."""
|
|
@@ -34,6 +34,7 @@ if TYPE_CHECKING:
|
|
|
34
34
|
|
|
35
35
|
from langchain_core.runnables.utils import Input, Output
|
|
36
36
|
from langchain_core.tracers.schemas import Run
|
|
37
|
+
from langchain_core.v1.messages import AIMessageChunk
|
|
37
38
|
|
|
38
39
|
|
|
39
40
|
class LogEntry(TypedDict):
|
|
@@ -210,7 +211,9 @@ class LogStreamCallbackHandler(BaseTracer, _StreamingCallbackHandler):
|
|
|
210
211
|
exclude_tags: Exclude runs from Runnables with matching tags.
|
|
211
212
|
_schema_format: Primarily changes how the inputs and outputs are
|
|
212
213
|
handled.
|
|
214
|
+
|
|
213
215
|
**For internal use only. This API will change.**
|
|
216
|
+
|
|
214
217
|
- 'original' is the format used by all current tracers.
|
|
215
218
|
This format is slightly inconsistent with respect to inputs
|
|
216
219
|
and outputs.
|
|
@@ -483,7 +486,7 @@ class LogStreamCallbackHandler(BaseTracer, _StreamingCallbackHandler):
|
|
|
483
486
|
self,
|
|
484
487
|
run: Run,
|
|
485
488
|
token: str,
|
|
486
|
-
chunk: Optional[Union[GenerationChunk, ChatGenerationChunk]],
|
|
489
|
+
chunk: Optional[Union[GenerationChunk, ChatGenerationChunk, AIMessageChunk]],
|
|
487
490
|
) -> None:
|
|
488
491
|
"""Process new LLM token."""
|
|
489
492
|
index = self._key_map_by_run_id.get(run.id)
|
langchain_core/utils/aiter.py
CHANGED
|
@@ -189,6 +189,7 @@ class Tee(Generic[T]):
|
|
|
189
189
|
To enforce sequential use of ``anext``, provide a ``lock``
|
|
190
190
|
- e.g. an :py:class:`asyncio.Lock` instance in an :py:mod:`asyncio` application -
|
|
191
191
|
and access is automatically synchronised.
|
|
192
|
+
|
|
192
193
|
"""
|
|
193
194
|
|
|
194
195
|
def __init__(
|
|
@@ -266,11 +267,15 @@ class aclosing(AbstractAsyncContextManager): # noqa: N801
|
|
|
266
267
|
|
|
267
268
|
Code like this:
|
|
268
269
|
|
|
270
|
+
.. code-block:: python
|
|
271
|
+
|
|
269
272
|
async with aclosing(<module>.fetch(<arguments>)) as agen:
|
|
270
273
|
<block>
|
|
271
274
|
|
|
272
275
|
is equivalent to this:
|
|
273
276
|
|
|
277
|
+
.. code-block:: python
|
|
278
|
+
|
|
274
279
|
agen = <module>.fetch(<arguments>)
|
|
275
280
|
try:
|
|
276
281
|
<block>
|
|
@@ -616,7 +616,7 @@ def convert_to_json_schema(
|
|
|
616
616
|
|
|
617
617
|
@beta()
|
|
618
618
|
def tool_example_to_messages(
|
|
619
|
-
input: str,
|
|
619
|
+
input: str,
|
|
620
620
|
tool_calls: list[BaseModel],
|
|
621
621
|
tool_outputs: Optional[list[str]] = None,
|
|
622
622
|
*,
|
|
@@ -687,6 +687,7 @@ def tool_example_to_messages(
|
|
|
687
687
|
messages.extend(
|
|
688
688
|
tool_example_to_messages(txt, [tool_call])
|
|
689
689
|
)
|
|
690
|
+
|
|
690
691
|
"""
|
|
691
692
|
messages: list[BaseMessage] = [HumanMessage(content=input)]
|
|
692
693
|
openai_tool_calls = [
|