langchain-core 0.3.76__py3-none-any.whl → 0.3.78__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of langchain-core might be problematic. Click here for more details.
- langchain_core/_api/beta_decorator.py +6 -5
- langchain_core/_api/deprecation.py +11 -11
- langchain_core/callbacks/base.py +17 -11
- langchain_core/callbacks/manager.py +2 -2
- langchain_core/callbacks/usage.py +2 -2
- langchain_core/chat_history.py +26 -16
- langchain_core/document_loaders/langsmith.py +1 -1
- langchain_core/indexing/api.py +31 -31
- langchain_core/language_models/chat_models.py +4 -2
- langchain_core/language_models/fake_chat_models.py +5 -2
- langchain_core/language_models/llms.py +3 -1
- langchain_core/load/serializable.py +1 -1
- langchain_core/messages/ai.py +22 -10
- langchain_core/messages/base.py +30 -16
- langchain_core/messages/chat.py +4 -1
- langchain_core/messages/function.py +9 -5
- langchain_core/messages/human.py +11 -4
- langchain_core/messages/modifier.py +1 -0
- langchain_core/messages/system.py +9 -2
- langchain_core/messages/tool.py +27 -16
- langchain_core/messages/utils.py +97 -83
- langchain_core/outputs/chat_generation.py +10 -6
- langchain_core/prompt_values.py +6 -2
- langchain_core/prompts/chat.py +6 -3
- langchain_core/prompts/few_shot.py +4 -1
- langchain_core/runnables/base.py +14 -13
- langchain_core/runnables/graph.py +4 -1
- langchain_core/runnables/graph_ascii.py +1 -1
- langchain_core/runnables/graph_mermaid.py +27 -10
- langchain_core/runnables/retry.py +35 -18
- langchain_core/stores.py +6 -6
- langchain_core/tools/base.py +7 -5
- langchain_core/tools/convert.py +2 -2
- langchain_core/tools/simple.py +1 -5
- langchain_core/tools/structured.py +0 -10
- langchain_core/tracers/event_stream.py +13 -15
- langchain_core/utils/aiter.py +1 -1
- langchain_core/utils/function_calling.py +13 -8
- langchain_core/utils/iter.py +1 -1
- langchain_core/utils/json.py +7 -1
- langchain_core/utils/json_schema.py +145 -39
- langchain_core/utils/pydantic.py +6 -5
- langchain_core/utils/utils.py +1 -1
- langchain_core/vectorstores/in_memory.py +5 -5
- langchain_core/version.py +1 -1
- {langchain_core-0.3.76.dist-info → langchain_core-0.3.78.dist-info}/METADATA +8 -18
- {langchain_core-0.3.76.dist-info → langchain_core-0.3.78.dist-info}/RECORD +49 -49
- {langchain_core-0.3.76.dist-info → langchain_core-0.3.78.dist-info}/WHEEL +0 -0
- {langchain_core-0.3.76.dist-info → langchain_core-0.3.78.dist-info}/entry_points.txt +0 -0
|
@@ -174,6 +174,7 @@ def beta(
|
|
|
174
174
|
def finalize(_wrapper: Callable[..., Any], new_doc: str) -> Any:
|
|
175
175
|
"""Finalize the property."""
|
|
176
176
|
return property(fget=_fget, fset=_fset, fdel=_fdel, doc=new_doc)
|
|
177
|
+
|
|
177
178
|
else:
|
|
178
179
|
_name = _name or obj.__qualname__
|
|
179
180
|
if not _obj_type:
|
|
@@ -226,17 +227,17 @@ def warn_beta(
|
|
|
226
227
|
) -> None:
|
|
227
228
|
"""Display a standardized beta annotation.
|
|
228
229
|
|
|
229
|
-
|
|
230
|
-
message
|
|
230
|
+
Args:
|
|
231
|
+
message:
|
|
231
232
|
Override the default beta message. The
|
|
232
233
|
%(name)s, %(obj_type)s, %(addendum)s
|
|
233
234
|
format specifiers will be replaced by the
|
|
234
235
|
values of the respective arguments passed to this function.
|
|
235
|
-
name
|
|
236
|
+
name:
|
|
236
237
|
The name of the annotated object.
|
|
237
|
-
obj_type
|
|
238
|
+
obj_type:
|
|
238
239
|
The object type being annotated.
|
|
239
|
-
addendum
|
|
240
|
+
addendum:
|
|
240
241
|
Additional text appended directly to the final message.
|
|
241
242
|
"""
|
|
242
243
|
if not message:
|
|
@@ -431,35 +431,35 @@ def warn_deprecated(
|
|
|
431
431
|
) -> None:
|
|
432
432
|
"""Display a standardized deprecation.
|
|
433
433
|
|
|
434
|
-
|
|
435
|
-
since
|
|
434
|
+
Args:
|
|
435
|
+
since:
|
|
436
436
|
The release at which this API became deprecated.
|
|
437
|
-
message
|
|
437
|
+
message:
|
|
438
438
|
Override the default deprecation message. The %(since)s,
|
|
439
439
|
%(name)s, %(alternative)s, %(obj_type)s, %(addendum)s,
|
|
440
440
|
and %(removal)s format specifiers will be replaced by the
|
|
441
441
|
values of the respective arguments passed to this function.
|
|
442
|
-
name
|
|
442
|
+
name:
|
|
443
443
|
The name of the deprecated object.
|
|
444
|
-
alternative
|
|
444
|
+
alternative:
|
|
445
445
|
An alternative API that the user may use in place of the
|
|
446
446
|
deprecated API. The deprecation warning will tell the user
|
|
447
447
|
about this alternative if provided.
|
|
448
|
-
alternative_import:
|
|
448
|
+
alternative_import:
|
|
449
449
|
An alternative import that the user may use instead.
|
|
450
|
-
pending
|
|
450
|
+
pending:
|
|
451
451
|
If True, uses a PendingDeprecationWarning instead of a
|
|
452
452
|
DeprecationWarning. Cannot be used together with removal.
|
|
453
|
-
obj_type
|
|
453
|
+
obj_type:
|
|
454
454
|
The object type being deprecated.
|
|
455
|
-
addendum
|
|
455
|
+
addendum:
|
|
456
456
|
Additional text appended directly to the final message.
|
|
457
|
-
removal
|
|
457
|
+
removal:
|
|
458
458
|
The expected removal version. With the default (an empty
|
|
459
459
|
string), a removal version is automatically computed from
|
|
460
460
|
since. Set to other Falsy values to not schedule a removal
|
|
461
461
|
date. Cannot be used together with pending.
|
|
462
|
-
package:
|
|
462
|
+
package:
|
|
463
463
|
The package of the deprecated object.
|
|
464
464
|
"""
|
|
465
465
|
if not pending:
|
langchain_core/callbacks/base.py
CHANGED
|
@@ -71,7 +71,9 @@ class LLMManagerMixin:
|
|
|
71
71
|
parent_run_id: Optional[UUID] = None,
|
|
72
72
|
**kwargs: Any,
|
|
73
73
|
) -> Any:
|
|
74
|
-
"""Run on new
|
|
74
|
+
"""Run on new output token. Only available when streaming is enabled.
|
|
75
|
+
|
|
76
|
+
For both chat models and non-chat models (legacy LLMs).
|
|
75
77
|
|
|
76
78
|
Args:
|
|
77
79
|
token (str): The new token.
|
|
@@ -243,7 +245,7 @@ class CallbackManagerMixin:
|
|
|
243
245
|
) -> Any:
|
|
244
246
|
"""Run when LLM starts running.
|
|
245
247
|
|
|
246
|
-
..
|
|
248
|
+
.. warning::
|
|
247
249
|
This method is called for non-chat models (regular LLMs). If you're
|
|
248
250
|
implementing a handler for a chat model, you should use
|
|
249
251
|
``on_chat_model_start`` instead.
|
|
@@ -271,8 +273,9 @@ class CallbackManagerMixin:
|
|
|
271
273
|
) -> Any:
|
|
272
274
|
"""Run when a chat model starts running.
|
|
273
275
|
|
|
274
|
-
|
|
275
|
-
|
|
276
|
+
.. warning::
|
|
277
|
+
This method is called for chat models. If you're implementing a handler for
|
|
278
|
+
a non-chat model, you should use ``on_llm_start`` instead.
|
|
276
279
|
|
|
277
280
|
Args:
|
|
278
281
|
serialized (dict[str, Any]): The serialized chat model.
|
|
@@ -489,9 +492,9 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
|
|
489
492
|
metadata: Optional[dict[str, Any]] = None,
|
|
490
493
|
**kwargs: Any,
|
|
491
494
|
) -> None:
|
|
492
|
-
"""Run when
|
|
495
|
+
"""Run when the model starts running.
|
|
493
496
|
|
|
494
|
-
..
|
|
497
|
+
.. warning::
|
|
495
498
|
This method is called for non-chat models (regular LLMs). If you're
|
|
496
499
|
implementing a handler for a chat model, you should use
|
|
497
500
|
``on_chat_model_start`` instead.
|
|
@@ -519,8 +522,9 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
|
|
519
522
|
) -> Any:
|
|
520
523
|
"""Run when a chat model starts running.
|
|
521
524
|
|
|
522
|
-
|
|
523
|
-
|
|
525
|
+
.. warning::
|
|
526
|
+
This method is called for chat models. If you're implementing a handler for
|
|
527
|
+
a non-chat model, you should use ``on_llm_start`` instead.
|
|
524
528
|
|
|
525
529
|
Args:
|
|
526
530
|
serialized (dict[str, Any]): The serialized chat model.
|
|
@@ -546,7 +550,9 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
|
|
546
550
|
tags: Optional[list[str]] = None,
|
|
547
551
|
**kwargs: Any,
|
|
548
552
|
) -> None:
|
|
549
|
-
"""Run on new
|
|
553
|
+
"""Run on new output token. Only available when streaming is enabled.
|
|
554
|
+
|
|
555
|
+
For both chat models and non-chat models (legacy LLMs).
|
|
550
556
|
|
|
551
557
|
Args:
|
|
552
558
|
token (str): The new token.
|
|
@@ -567,7 +573,7 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
|
|
567
573
|
tags: Optional[list[str]] = None,
|
|
568
574
|
**kwargs: Any,
|
|
569
575
|
) -> None:
|
|
570
|
-
"""Run when
|
|
576
|
+
"""Run when the model ends running.
|
|
571
577
|
|
|
572
578
|
Args:
|
|
573
579
|
response (LLMResult): The response which was generated.
|
|
@@ -867,7 +873,7 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
|
|
867
873
|
metadata: Optional[dict[str, Any]] = None,
|
|
868
874
|
**kwargs: Any,
|
|
869
875
|
) -> None:
|
|
870
|
-
"""Override to define a handler for
|
|
876
|
+
"""Override to define a handler for custom events.
|
|
871
877
|
|
|
872
878
|
Args:
|
|
873
879
|
name: The name of the custom event.
|
|
@@ -92,7 +92,7 @@ def trace_as_chain_group(
|
|
|
92
92
|
metadata (dict[str, Any], optional): The metadata to apply to all runs.
|
|
93
93
|
Defaults to None.
|
|
94
94
|
|
|
95
|
-
.. note
|
|
95
|
+
.. note::
|
|
96
96
|
Must have ``LANGCHAIN_TRACING_V2`` env var set to true to see the trace in
|
|
97
97
|
LangSmith.
|
|
98
98
|
|
|
@@ -179,7 +179,7 @@ async def atrace_as_chain_group(
|
|
|
179
179
|
Yields:
|
|
180
180
|
The async callback manager for the chain group.
|
|
181
181
|
|
|
182
|
-
.. note
|
|
182
|
+
.. note::
|
|
183
183
|
Must have ``LANGCHAIN_TRACING_V2`` env var set to true to see the trace in
|
|
184
184
|
LangSmith.
|
|
185
185
|
|
|
@@ -32,7 +32,7 @@ class UsageMetadataCallbackHandler(BaseCallbackHandler):
|
|
|
32
32
|
result_2 = llm_2.invoke("Hello", config={"callbacks": [callback]})
|
|
33
33
|
callback.usage_metadata
|
|
34
34
|
|
|
35
|
-
.. code-block::
|
|
35
|
+
.. code-block::
|
|
36
36
|
|
|
37
37
|
{'gpt-4o-mini-2024-07-18': {'input_tokens': 8,
|
|
38
38
|
'output_tokens': 10,
|
|
@@ -119,7 +119,7 @@ def get_usage_metadata_callback(
|
|
|
119
119
|
llm_2.invoke("Hello")
|
|
120
120
|
print(cb.usage_metadata)
|
|
121
121
|
|
|
122
|
-
.. code-block::
|
|
122
|
+
.. code-block::
|
|
123
123
|
|
|
124
124
|
{'gpt-4o-mini-2024-07-18': {'input_tokens': 8,
|
|
125
125
|
'output_tokens': 10,
|
langchain_core/chat_history.py
CHANGED
|
@@ -65,33 +65,43 @@ class BaseChatMessageHistory(ABC):
|
|
|
65
65
|
|
|
66
66
|
.. code-block:: python
|
|
67
67
|
|
|
68
|
+
import json
|
|
69
|
+
import os
|
|
70
|
+
from langchain_core.messages import messages_from_dict, message_to_dict
|
|
71
|
+
|
|
72
|
+
|
|
68
73
|
class FileChatMessageHistory(BaseChatMessageHistory):
|
|
69
74
|
storage_path: str
|
|
70
75
|
session_id: str
|
|
71
76
|
|
|
72
77
|
@property
|
|
73
|
-
def messages(self):
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
78
|
+
def messages(self) -> list[BaseMessage]:
|
|
79
|
+
try:
|
|
80
|
+
with open(
|
|
81
|
+
os.path.join(self.storage_path, self.session_id),
|
|
82
|
+
"r",
|
|
83
|
+
encoding="utf-8",
|
|
84
|
+
) as f:
|
|
85
|
+
messages_data = json.load(f)
|
|
86
|
+
return messages_from_dict(messages_data)
|
|
87
|
+
except FileNotFoundError:
|
|
88
|
+
return []
|
|
81
89
|
|
|
82
90
|
def add_messages(self, messages: Sequence[BaseMessage]) -> None:
|
|
83
91
|
all_messages = list(self.messages) # Existing messages
|
|
84
92
|
all_messages.extend(messages) # Add new messages
|
|
85
93
|
|
|
86
94
|
serialized = [message_to_dict(message) for message in all_messages]
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
with open(
|
|
90
|
-
json.dump(
|
|
91
|
-
|
|
92
|
-
def clear(self):
|
|
93
|
-
|
|
94
|
-
|
|
95
|
+
file_path = os.path.join(self.storage_path, self.session_id)
|
|
96
|
+
os.makedirs(os.path.dirname(file_path), exist_ok=True)
|
|
97
|
+
with open(file_path, "w", encoding="utf-8") as f:
|
|
98
|
+
json.dump(serialized, f)
|
|
99
|
+
|
|
100
|
+
def clear(self) -> None:
|
|
101
|
+
file_path = os.path.join(self.storage_path, self.session_id)
|
|
102
|
+
os.makedirs(os.path.dirname(file_path), exist_ok=True)
|
|
103
|
+
with open(file_path, "w", encoding="utf-8") as f:
|
|
104
|
+
json.dump([], f)
|
|
95
105
|
|
|
96
106
|
"""
|
|
97
107
|
|
langchain_core/indexing/api.py
CHANGED
|
@@ -56,7 +56,7 @@ def _warn_about_sha1() -> None:
|
|
|
56
56
|
"that map to the same fingerprint. If this matters in your "
|
|
57
57
|
"threat model, switch to a stronger algorithm such "
|
|
58
58
|
"as 'blake2b', 'sha256', or 'sha512' by specifying "
|
|
59
|
-
" `key_encoder` parameter in the
|
|
59
|
+
" `key_encoder` parameter in the `index` or `aindex` function. ",
|
|
60
60
|
category=UserWarning,
|
|
61
61
|
stacklevel=2,
|
|
62
62
|
)
|
|
@@ -296,7 +296,11 @@ def index(
|
|
|
296
296
|
For the time being, documents are indexed using their hashes, and users
|
|
297
297
|
are not able to specify the uid of the document.
|
|
298
298
|
|
|
299
|
-
|
|
299
|
+
.. versionchanged:: 0.3.25
|
|
300
|
+
Added ``scoped_full`` cleanup mode.
|
|
301
|
+
|
|
302
|
+
.. important::
|
|
303
|
+
|
|
300
304
|
* In full mode, the loader should be returning
|
|
301
305
|
the entire dataset, and not just a subset of the dataset.
|
|
302
306
|
Otherwise, the auto_cleanup will remove documents that it is not
|
|
@@ -309,7 +313,7 @@ def index(
|
|
|
309
313
|
chunks, and we index them using a batch size of 5, we'll have 3 batches
|
|
310
314
|
all with the same source id. In general, to avoid doing too much
|
|
311
315
|
redundant work select as big a batch size as possible.
|
|
312
|
-
* The
|
|
316
|
+
* The ``scoped_full`` mode is suitable if determining an appropriate batch size
|
|
313
317
|
is challenging or if your data loader cannot return the entire dataset at
|
|
314
318
|
once. This mode keeps track of source IDs in memory, which should be fine
|
|
315
319
|
for most use cases. If your dataset is large (10M+ docs), you will likely
|
|
@@ -378,10 +382,6 @@ def index(
|
|
|
378
382
|
TypeError: If ``vectorstore`` is not a VectorStore or a DocumentIndex.
|
|
379
383
|
AssertionError: If ``source_id`` is None when cleanup mode is incremental.
|
|
380
384
|
(should be unreachable code).
|
|
381
|
-
|
|
382
|
-
.. version_modified:: 0.3.25
|
|
383
|
-
|
|
384
|
-
* Added `scoped_full` cleanup mode.
|
|
385
385
|
"""
|
|
386
386
|
# Behavior is deprecated, but we keep it for backwards compatibility.
|
|
387
387
|
# # Warn only once per process.
|
|
@@ -636,26 +636,30 @@ async def aindex(
|
|
|
636
636
|
documents were deleted, which documents should be skipped.
|
|
637
637
|
|
|
638
638
|
For the time being, documents are indexed using their hashes, and users
|
|
639
|
-
|
|
640
|
-
|
|
641
|
-
|
|
642
|
-
|
|
643
|
-
|
|
644
|
-
|
|
645
|
-
|
|
646
|
-
|
|
647
|
-
|
|
648
|
-
|
|
649
|
-
|
|
650
|
-
|
|
651
|
-
|
|
652
|
-
|
|
653
|
-
|
|
654
|
-
|
|
655
|
-
|
|
656
|
-
|
|
657
|
-
|
|
658
|
-
|
|
639
|
+
are not able to specify the uid of the document.
|
|
640
|
+
|
|
641
|
+
.. versionchanged:: 0.3.25
|
|
642
|
+
Added ``scoped_full`` cleanup mode.
|
|
643
|
+
|
|
644
|
+
.. important::
|
|
645
|
+
|
|
646
|
+
* In full mode, the loader should be returning
|
|
647
|
+
the entire dataset, and not just a subset of the dataset.
|
|
648
|
+
Otherwise, the auto_cleanup will remove documents that it is not
|
|
649
|
+
supposed to.
|
|
650
|
+
* In incremental mode, if documents associated with a particular
|
|
651
|
+
source id appear across different batches, the indexing API
|
|
652
|
+
will do some redundant work. This will still result in the
|
|
653
|
+
correct end state of the index, but will unfortunately not be
|
|
654
|
+
100% efficient. For example, if a given document is split into 15
|
|
655
|
+
chunks, and we index them using a batch size of 5, we'll have 3 batches
|
|
656
|
+
all with the same source id. In general, to avoid doing too much
|
|
657
|
+
redundant work select as big a batch size as possible.
|
|
658
|
+
* The ``scoped_full`` mode is suitable if determining an appropriate batch size
|
|
659
|
+
is challenging or if your data loader cannot return the entire dataset at
|
|
660
|
+
once. This mode keeps track of source IDs in memory, which should be fine
|
|
661
|
+
for most use cases. If your dataset is large (10M+ docs), you will likely
|
|
662
|
+
need to parallelize the indexing process regardless.
|
|
659
663
|
|
|
660
664
|
Args:
|
|
661
665
|
docs_source: Data loader or iterable of documents to index.
|
|
@@ -720,10 +724,6 @@ async def aindex(
|
|
|
720
724
|
TypeError: If ``vector_store`` is not a VectorStore or DocumentIndex.
|
|
721
725
|
AssertionError: If ``source_id_key`` is None when cleanup mode is
|
|
722
726
|
incremental or ``scoped_full`` (should be unreachable).
|
|
723
|
-
|
|
724
|
-
.. version_modified:: 0.3.25
|
|
725
|
-
|
|
726
|
-
* Added `scoped_full` cleanup mode.
|
|
727
727
|
"""
|
|
728
728
|
# Behavior is deprecated, but we keep it for backwards compatibility.
|
|
729
729
|
# # Warn only once per process.
|
|
@@ -471,7 +471,7 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
|
|
|
471
471
|
**kwargs: Any,
|
|
472
472
|
) -> Iterator[BaseMessageChunk]:
|
|
473
473
|
if not self._should_stream(async_api=False, **{**kwargs, "stream": True}):
|
|
474
|
-
#
|
|
474
|
+
# Model doesn't implement streaming, so use default implementation
|
|
475
475
|
yield cast(
|
|
476
476
|
"BaseMessageChunk",
|
|
477
477
|
self.invoke(input, config=config, stop=stop, **kwargs),
|
|
@@ -720,7 +720,9 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
|
|
|
720
720
|
ls_params["ls_stop"] = stop
|
|
721
721
|
|
|
722
722
|
# model
|
|
723
|
-
if
|
|
723
|
+
if "model" in kwargs and isinstance(kwargs["model"], str):
|
|
724
|
+
ls_params["ls_model_name"] = kwargs["model"]
|
|
725
|
+
elif hasattr(self, "model") and isinstance(self.model, str):
|
|
724
726
|
ls_params["ls_model_name"] = self.model
|
|
725
727
|
elif hasattr(self, "model_name") and isinstance(self.model_name, str):
|
|
726
728
|
ls_params["ls_model_name"] = self.model_name
|
|
@@ -19,7 +19,7 @@ from langchain_core.runnables import RunnableConfig
|
|
|
19
19
|
|
|
20
20
|
|
|
21
21
|
class FakeMessagesListChatModel(BaseChatModel):
|
|
22
|
-
"""Fake ChatModel for testing purposes."""
|
|
22
|
+
"""Fake ``ChatModel`` for testing purposes."""
|
|
23
23
|
|
|
24
24
|
responses: list[BaseMessage]
|
|
25
25
|
"""List of responses to **cycle** through in order."""
|
|
@@ -212,10 +212,11 @@ class GenericFakeChatModel(BaseChatModel):
|
|
|
212
212
|
"""Generic fake chat model that can be used to test the chat model interface.
|
|
213
213
|
|
|
214
214
|
* Chat model should be usable in both sync and async tests
|
|
215
|
-
* Invokes on_llm_new_token to allow for testing of callback related code for new
|
|
215
|
+
* Invokes ``on_llm_new_token`` to allow for testing of callback related code for new
|
|
216
216
|
tokens.
|
|
217
217
|
* Includes logic to break messages into message chunk to facilitate testing of
|
|
218
218
|
streaming.
|
|
219
|
+
|
|
219
220
|
"""
|
|
220
221
|
|
|
221
222
|
messages: Iterator[Union[AIMessage, str]]
|
|
@@ -230,6 +231,7 @@ class GenericFakeChatModel(BaseChatModel):
|
|
|
230
231
|
.. warning::
|
|
231
232
|
Streaming is not implemented yet. We should try to implement it in the future by
|
|
232
233
|
delegating to invoke and then breaking the resulting output into message chunks.
|
|
234
|
+
|
|
233
235
|
"""
|
|
234
236
|
|
|
235
237
|
@override
|
|
@@ -351,6 +353,7 @@ class ParrotFakeChatModel(BaseChatModel):
|
|
|
351
353
|
"""Generic fake chat model that can be used to test the chat model interface.
|
|
352
354
|
|
|
353
355
|
* Chat model should be usable in both sync and async tests
|
|
356
|
+
|
|
354
357
|
"""
|
|
355
358
|
|
|
356
359
|
@override
|
|
@@ -357,7 +357,9 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
|
|
357
357
|
ls_params["ls_stop"] = stop
|
|
358
358
|
|
|
359
359
|
# model
|
|
360
|
-
if
|
|
360
|
+
if "model" in kwargs and isinstance(kwargs["model"], str):
|
|
361
|
+
ls_params["ls_model_name"] = kwargs["model"]
|
|
362
|
+
elif hasattr(self, "model") and isinstance(self.model, str):
|
|
361
363
|
ls_params["ls_model_name"] = self.model
|
|
362
364
|
elif hasattr(self, "model_name") and isinstance(self.model_name, str):
|
|
363
365
|
ls_params["ls_model_name"] = self.model_name
|
|
@@ -111,7 +111,7 @@ class Serializable(BaseModel, ABC):
|
|
|
111
111
|
|
|
112
112
|
# Remove default BaseModel init docstring.
|
|
113
113
|
def __init__(self, *args: Any, **kwargs: Any) -> None:
|
|
114
|
-
"""""" # noqa: D419
|
|
114
|
+
"""""" # noqa: D419 # Intentional blank docstring
|
|
115
115
|
super().__init__(*args, **kwargs)
|
|
116
116
|
|
|
117
117
|
@classmethod
|
langchain_core/messages/ai.py
CHANGED
|
@@ -45,7 +45,6 @@ class InputTokenDetails(TypedDict, total=False):
|
|
|
45
45
|
Does *not* need to sum to full input token count. Does *not* need to have all keys.
|
|
46
46
|
|
|
47
47
|
Example:
|
|
48
|
-
|
|
49
48
|
.. code-block:: python
|
|
50
49
|
|
|
51
50
|
{
|
|
@@ -72,6 +71,7 @@ class InputTokenDetails(TypedDict, total=False):
|
|
|
72
71
|
|
|
73
72
|
Since there was a cache hit, the tokens were read from the cache. More precisely,
|
|
74
73
|
the model state given these tokens was read from the cache.
|
|
74
|
+
|
|
75
75
|
"""
|
|
76
76
|
|
|
77
77
|
|
|
@@ -81,7 +81,6 @@ class OutputTokenDetails(TypedDict, total=False):
|
|
|
81
81
|
Does *not* need to sum to full output token count. Does *not* need to have all keys.
|
|
82
82
|
|
|
83
83
|
Example:
|
|
84
|
-
|
|
85
84
|
.. code-block:: python
|
|
86
85
|
|
|
87
86
|
{
|
|
@@ -100,6 +99,7 @@ class OutputTokenDetails(TypedDict, total=False):
|
|
|
100
99
|
|
|
101
100
|
Tokens generated by the model in a chain of thought process (i.e. by OpenAI's o1
|
|
102
101
|
models) that are not returned as part of model output.
|
|
102
|
+
|
|
103
103
|
"""
|
|
104
104
|
|
|
105
105
|
|
|
@@ -109,7 +109,6 @@ class UsageMetadata(TypedDict):
|
|
|
109
109
|
This is a standard representation of token usage that is consistent across models.
|
|
110
110
|
|
|
111
111
|
Example:
|
|
112
|
-
|
|
113
112
|
.. code-block:: python
|
|
114
113
|
|
|
115
114
|
{
|
|
@@ -148,6 +147,7 @@ class UsageMetadata(TypedDict):
|
|
|
148
147
|
"""Breakdown of output token counts.
|
|
149
148
|
|
|
150
149
|
Does *not* need to sum to full output token count. Does *not* need to have all keys.
|
|
150
|
+
|
|
151
151
|
"""
|
|
152
152
|
|
|
153
153
|
|
|
@@ -159,12 +159,14 @@ class AIMessage(BaseMessage):
|
|
|
159
159
|
This message represents the output of the model and consists of both
|
|
160
160
|
the raw output as returned by the model together standardized fields
|
|
161
161
|
(e.g., tool calls, usage metadata) added by the LangChain framework.
|
|
162
|
+
|
|
162
163
|
"""
|
|
163
164
|
|
|
164
165
|
example: bool = False
|
|
165
166
|
"""Use to denote that a message is part of an example conversation.
|
|
166
167
|
|
|
167
168
|
At the moment, this is ignored by most models. Usage is discouraged.
|
|
169
|
+
|
|
168
170
|
"""
|
|
169
171
|
|
|
170
172
|
tool_calls: list[ToolCall] = []
|
|
@@ -175,15 +177,18 @@ class AIMessage(BaseMessage):
|
|
|
175
177
|
"""If provided, usage metadata for a message, such as token counts.
|
|
176
178
|
|
|
177
179
|
This is a standard representation of token usage that is consistent across models.
|
|
180
|
+
|
|
178
181
|
"""
|
|
179
182
|
|
|
180
183
|
type: Literal["ai"] = "ai"
|
|
181
|
-
"""The type of the message (used for deserialization). Defaults to
|
|
184
|
+
"""The type of the message (used for deserialization). Defaults to ``'ai'``."""
|
|
182
185
|
|
|
183
186
|
def __init__(
|
|
184
|
-
self,
|
|
187
|
+
self,
|
|
188
|
+
content: Union[str, list[Union[str, dict]]],
|
|
189
|
+
**kwargs: Any,
|
|
185
190
|
) -> None:
|
|
186
|
-
"""
|
|
191
|
+
"""Initialize ``AIMessage``.
|
|
187
192
|
|
|
188
193
|
Args:
|
|
189
194
|
content: The content of the message.
|
|
@@ -254,6 +259,7 @@ class AIMessage(BaseMessage):
|
|
|
254
259
|
|
|
255
260
|
Returns:
|
|
256
261
|
A pretty representation of the message.
|
|
262
|
+
|
|
257
263
|
"""
|
|
258
264
|
base = super().pretty_repr(html=html)
|
|
259
265
|
lines = []
|
|
@@ -293,7 +299,10 @@ class AIMessageChunk(AIMessage, BaseMessageChunk):
|
|
|
293
299
|
# non-chunk variant.
|
|
294
300
|
type: Literal["AIMessageChunk"] = "AIMessageChunk" # type: ignore[assignment]
|
|
295
301
|
"""The type of the message (used for deserialization).
|
|
296
|
-
|
|
302
|
+
|
|
303
|
+
Defaults to ``AIMessageChunk``.
|
|
304
|
+
|
|
305
|
+
"""
|
|
297
306
|
|
|
298
307
|
tool_call_chunks: list[ToolCallChunk] = []
|
|
299
308
|
"""If provided, tool call chunks associated with the message."""
|
|
@@ -311,7 +320,10 @@ class AIMessageChunk(AIMessage, BaseMessageChunk):
|
|
|
311
320
|
"""Initialize tool calls from tool call chunks.
|
|
312
321
|
|
|
313
322
|
Returns:
|
|
314
|
-
|
|
323
|
+
The values with tool calls initialized.
|
|
324
|
+
|
|
325
|
+
Raises:
|
|
326
|
+
ValueError: If the tool call chunks are malformed.
|
|
315
327
|
"""
|
|
316
328
|
if not self.tool_call_chunks:
|
|
317
329
|
if self.tool_calls:
|
|
@@ -522,9 +534,9 @@ def add_usage(
|
|
|
522
534
|
def subtract_usage(
|
|
523
535
|
left: Optional[UsageMetadata], right: Optional[UsageMetadata]
|
|
524
536
|
) -> UsageMetadata:
|
|
525
|
-
"""Recursively subtract two UsageMetadata objects.
|
|
537
|
+
"""Recursively subtract two ``UsageMetadata`` objects.
|
|
526
538
|
|
|
527
|
-
Token counts cannot be negative so the actual operation is max(left - right, 0)
|
|
539
|
+
Token counts cannot be negative so the actual operation is ``max(left - right, 0)``.
|
|
528
540
|
|
|
529
541
|
Example:
|
|
530
542
|
.. code-block:: python
|