langchain-core 0.3.75__py3-none-any.whl → 0.3.76__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain-core might be problematic. Click here for more details.

Files changed (116) hide show
  1. langchain_core/_api/beta_decorator.py +17 -40
  2. langchain_core/_api/deprecation.py +19 -6
  3. langchain_core/_api/path.py +19 -2
  4. langchain_core/_import_utils.py +7 -0
  5. langchain_core/agents.py +10 -6
  6. langchain_core/beta/runnables/context.py +1 -2
  7. langchain_core/callbacks/base.py +11 -4
  8. langchain_core/callbacks/manager.py +81 -69
  9. langchain_core/callbacks/usage.py +4 -2
  10. langchain_core/chat_history.py +4 -6
  11. langchain_core/document_loaders/base.py +34 -9
  12. langchain_core/document_loaders/langsmith.py +3 -0
  13. langchain_core/documents/base.py +35 -10
  14. langchain_core/documents/transformers.py +4 -2
  15. langchain_core/embeddings/fake.py +8 -5
  16. langchain_core/env.py +2 -3
  17. langchain_core/example_selectors/base.py +12 -0
  18. langchain_core/exceptions.py +7 -0
  19. langchain_core/globals.py +17 -28
  20. langchain_core/indexing/api.py +56 -44
  21. langchain_core/indexing/base.py +5 -8
  22. langchain_core/indexing/in_memory.py +23 -3
  23. langchain_core/language_models/__init__.py +3 -2
  24. langchain_core/language_models/base.py +31 -20
  25. langchain_core/language_models/chat_models.py +94 -25
  26. langchain_core/language_models/fake_chat_models.py +5 -7
  27. langchain_core/language_models/llms.py +49 -17
  28. langchain_core/load/dump.py +2 -3
  29. langchain_core/load/load.py +15 -1
  30. langchain_core/load/serializable.py +38 -43
  31. langchain_core/memory.py +7 -3
  32. langchain_core/messages/ai.py +36 -19
  33. langchain_core/messages/base.py +13 -6
  34. langchain_core/messages/content_blocks.py +23 -2
  35. langchain_core/messages/human.py +2 -6
  36. langchain_core/messages/system.py +2 -6
  37. langchain_core/messages/tool.py +33 -13
  38. langchain_core/messages/utils.py +182 -72
  39. langchain_core/output_parsers/base.py +5 -2
  40. langchain_core/output_parsers/json.py +4 -4
  41. langchain_core/output_parsers/list.py +7 -22
  42. langchain_core/output_parsers/openai_functions.py +3 -0
  43. langchain_core/output_parsers/openai_tools.py +6 -1
  44. langchain_core/output_parsers/pydantic.py +4 -0
  45. langchain_core/output_parsers/string.py +5 -1
  46. langchain_core/output_parsers/xml.py +19 -19
  47. langchain_core/outputs/chat_generation.py +18 -7
  48. langchain_core/outputs/generation.py +14 -3
  49. langchain_core/outputs/llm_result.py +8 -1
  50. langchain_core/prompt_values.py +10 -4
  51. langchain_core/prompts/base.py +4 -9
  52. langchain_core/prompts/chat.py +87 -58
  53. langchain_core/prompts/dict.py +16 -8
  54. langchain_core/prompts/few_shot.py +9 -11
  55. langchain_core/prompts/few_shot_with_templates.py +5 -1
  56. langchain_core/prompts/image.py +12 -5
  57. langchain_core/prompts/message.py +5 -6
  58. langchain_core/prompts/pipeline.py +13 -8
  59. langchain_core/prompts/prompt.py +22 -8
  60. langchain_core/prompts/string.py +18 -10
  61. langchain_core/prompts/structured.py +7 -2
  62. langchain_core/rate_limiters.py +2 -2
  63. langchain_core/retrievers.py +7 -6
  64. langchain_core/runnables/base.py +402 -183
  65. langchain_core/runnables/branch.py +14 -19
  66. langchain_core/runnables/config.py +9 -15
  67. langchain_core/runnables/configurable.py +34 -19
  68. langchain_core/runnables/fallbacks.py +20 -13
  69. langchain_core/runnables/graph.py +44 -37
  70. langchain_core/runnables/graph_ascii.py +40 -17
  71. langchain_core/runnables/graph_mermaid.py +27 -15
  72. langchain_core/runnables/graph_png.py +27 -31
  73. langchain_core/runnables/history.py +55 -58
  74. langchain_core/runnables/passthrough.py +44 -21
  75. langchain_core/runnables/retry.py +9 -5
  76. langchain_core/runnables/router.py +9 -8
  77. langchain_core/runnables/schema.py +2 -0
  78. langchain_core/runnables/utils.py +51 -89
  79. langchain_core/stores.py +13 -25
  80. langchain_core/sys_info.py +9 -8
  81. langchain_core/tools/base.py +30 -23
  82. langchain_core/tools/convert.py +24 -13
  83. langchain_core/tools/simple.py +35 -3
  84. langchain_core/tools/structured.py +25 -2
  85. langchain_core/tracers/base.py +2 -2
  86. langchain_core/tracers/context.py +5 -1
  87. langchain_core/tracers/core.py +109 -39
  88. langchain_core/tracers/evaluation.py +22 -26
  89. langchain_core/tracers/event_stream.py +40 -27
  90. langchain_core/tracers/langchain.py +12 -3
  91. langchain_core/tracers/langchain_v1.py +10 -2
  92. langchain_core/tracers/log_stream.py +56 -17
  93. langchain_core/tracers/root_listeners.py +4 -20
  94. langchain_core/tracers/run_collector.py +6 -16
  95. langchain_core/tracers/schemas.py +5 -1
  96. langchain_core/utils/aiter.py +14 -6
  97. langchain_core/utils/env.py +3 -0
  98. langchain_core/utils/function_calling.py +37 -20
  99. langchain_core/utils/interactive_env.py +6 -2
  100. langchain_core/utils/iter.py +11 -3
  101. langchain_core/utils/json.py +5 -2
  102. langchain_core/utils/json_schema.py +15 -5
  103. langchain_core/utils/loading.py +5 -1
  104. langchain_core/utils/mustache.py +24 -15
  105. langchain_core/utils/pydantic.py +32 -4
  106. langchain_core/utils/utils.py +24 -8
  107. langchain_core/vectorstores/base.py +7 -20
  108. langchain_core/vectorstores/in_memory.py +18 -12
  109. langchain_core/vectorstores/utils.py +18 -12
  110. langchain_core/version.py +1 -1
  111. langchain_core-0.3.76.dist-info/METADATA +77 -0
  112. langchain_core-0.3.76.dist-info/RECORD +174 -0
  113. langchain_core-0.3.75.dist-info/METADATA +0 -106
  114. langchain_core-0.3.75.dist-info/RECORD +0 -174
  115. {langchain_core-0.3.75.dist-info → langchain_core-0.3.76.dist-info}/WHEEL +0 -0
  116. {langchain_core-0.3.75.dist-info → langchain_core-0.3.76.dist-info}/entry_points.txt +0 -0
@@ -185,6 +185,9 @@ def _get_document_with_hash(
185
185
  When changing the key encoder, you must change the
186
186
  index as well to avoid duplicated documents in the cache.
187
187
 
188
+ Raises:
189
+ ValueError: If the metadata cannot be serialized using json.
190
+
188
191
  Returns:
189
192
  Document with a unique identifier based on the hash of the content and metadata.
190
193
  """
@@ -291,21 +294,21 @@ def index(
291
294
  documents were deleted, which documents should be skipped.
292
295
 
293
296
  For the time being, documents are indexed using their hashes, and users
294
- are not able to specify the uid of the document.
297
+ are not able to specify the uid of the document.
295
298
 
296
299
  Important:
297
- * In full mode, the loader should be returning
298
- the entire dataset, and not just a subset of the dataset.
299
- Otherwise, the auto_cleanup will remove documents that it is not
300
- supposed to.
301
- * In incremental mode, if documents associated with a particular
302
- source id appear across different batches, the indexing API
303
- will do some redundant work. This will still result in the
304
- correct end state of the index, but will unfortunately not be
305
- 100% efficient. For example, if a given document is split into 15
306
- chunks, and we index them using a batch size of 5, we'll have 3 batches
307
- all with the same source id. In general, to avoid doing too much
308
- redundant work select as big a batch size as possible.
300
+ * In full mode, the loader should be returning
301
+ the entire dataset, and not just a subset of the dataset.
302
+ Otherwise, the auto_cleanup will remove documents that it is not
303
+ supposed to.
304
+ * In incremental mode, if documents associated with a particular
305
+ source id appear across different batches, the indexing API
306
+ will do some redundant work. This will still result in the
307
+ correct end state of the index, but will unfortunately not be
308
+ 100% efficient. For example, if a given document is split into 15
309
+ chunks, and we index them using a batch size of 5, we'll have 3 batches
310
+ all with the same source id. In general, to avoid doing too much
311
+ redundant work select as big a batch size as possible.
309
312
  * The `scoped_full` mode is suitable if determining an appropriate batch size
310
313
  is challenging or if your data loader cannot return the entire dataset at
311
314
  once. This mode keeps track of source IDs in memory, which should be fine
@@ -315,23 +318,22 @@ def index(
315
318
  Args:
316
319
  docs_source: Data loader or iterable of documents to index.
317
320
  record_manager: Timestamped set to keep track of which documents were
318
- updated.
321
+ updated.
319
322
  vector_store: VectorStore or DocumentIndex to index the documents into.
320
323
  batch_size: Batch size to use when indexing. Default is 100.
321
324
  cleanup: How to handle clean up of documents. Default is None.
325
+
322
326
  - incremental: Cleans up all documents that haven't been updated AND
323
- that are associated with source ids that were seen
324
- during indexing.
325
- Clean up is done continuously during indexing helping
326
- to minimize the probability of users seeing duplicated
327
- content.
327
+ that are associated with source ids that were seen during indexing.
328
+ Clean up is done continuously during indexing helping to minimize the
329
+ probability of users seeing duplicated content.
328
330
  - full: Delete all documents that have not been returned by the loader
329
- during this run of indexing.
330
- Clean up runs after all documents have been indexed.
331
- This means that users may see duplicated content during indexing.
331
+ during this run of indexing.
332
+ Clean up runs after all documents have been indexed.
333
+ This means that users may see duplicated content during indexing.
332
334
  - scoped_full: Similar to Full, but only deletes all documents
333
- that haven't been updated AND that are associated with
334
- source ids that were seen during indexing.
335
+ that haven't been updated AND that are associated with
336
+ source ids that were seen during indexing.
335
337
  - None: Do not delete any documents.
336
338
  source_id_key: Optional key that helps identify the original source
337
339
  of the document. Default is None.
@@ -358,10 +360,9 @@ def index(
358
360
  When changing the key encoder, you must change the
359
361
  index as well to avoid duplicated documents in the cache.
360
362
  upsert_kwargs: Additional keyword arguments to pass to the add_documents
361
- method of the VectorStore or the upsert method of the
362
- DocumentIndex. For example, you can use this to
363
- specify a custom vector_field:
364
- upsert_kwargs={"vector_field": "embedding"}
363
+ method of the VectorStore or the upsert method of the DocumentIndex.
364
+ For example, you can use this to specify a custom vector_field:
365
+ upsert_kwargs={"vector_field": "embedding"}
365
366
  .. versionadded:: 0.3.10
366
367
 
367
368
  Returns:
@@ -374,6 +375,9 @@ def index(
374
375
  ValueError: If vectorstore does not have
375
376
  "delete" and "add_documents" required methods.
376
377
  ValueError: If source_id_key is not None, but is not a string or callable.
378
+ TypeError: If ``vectorstore`` is not a VectorStore or a DocumentIndex.
379
+ AssertionError: If ``source_id`` is None when cleanup mode is incremental.
380
+ (should be unreachable code).
377
381
 
378
382
  .. version_modified:: 0.3.25
379
383
 
@@ -656,22 +660,22 @@ async def aindex(
656
660
  Args:
657
661
  docs_source: Data loader or iterable of documents to index.
658
662
  record_manager: Timestamped set to keep track of which documents were
659
- updated.
663
+ updated.
660
664
  vector_store: VectorStore or DocumentIndex to index the documents into.
661
665
  batch_size: Batch size to use when indexing. Default is 100.
662
666
  cleanup: How to handle clean up of documents. Default is None.
667
+
663
668
  - incremental: Cleans up all documents that haven't been updated AND
664
- that are associated with source ids that were seen
665
- during indexing.
666
- Clean up is done continuously during indexing helping
667
- to minimize the probability of users seeing duplicated
668
- content.
669
- - full: Delete all documents that haven to been returned by the loader.
670
- Clean up runs after all documents have been indexed.
671
- This means that users may see duplicated content during indexing.
669
+ that are associated with source ids that were seen during indexing.
670
+ Clean up is done continuously during indexing helping to minimize the
671
+ probability of users seeing duplicated content.
672
+ - full: Delete all documents that have not been returned by the loader
673
+ during this run of indexing.
674
+ Clean up runs after all documents have been indexed.
675
+ This means that users may see duplicated content during indexing.
672
676
  - scoped_full: Similar to Full, but only deletes all documents
673
- that haven't been updated AND that are associated with
674
- source ids that were seen during indexing.
677
+ that haven't been updated AND that are associated with
678
+ source ids that were seen during indexing.
675
679
  - None: Do not delete any documents.
676
680
  source_id_key: Optional key that helps identify the original source
677
681
  of the document. Default is None.
@@ -680,6 +684,12 @@ async def aindex(
680
684
  force_update: Force update documents even if they are present in the
681
685
  record manager. Useful if you are re-indexing with updated embeddings.
682
686
  Default is False.
687
+ key_encoder: Hashing algorithm to use for hashing the document content and
688
+ metadata. Default is "sha1".
689
+ Other options include "blake2b", "sha256", and "sha512".
690
+
691
+ .. versionadded:: 0.3.66
692
+
683
693
  key_encoder: Hashing algorithm to use for hashing the document.
684
694
  If not provided, a default encoder using SHA-1 will be used.
685
695
  SHA-1 is not collision-resistant, and a motivated attacker
@@ -691,11 +701,10 @@ async def aindex(
691
701
 
692
702
  When changing the key encoder, you must change the
693
703
  index as well to avoid duplicated documents in the cache.
694
- upsert_kwargs: Additional keyword arguments to pass to the aadd_documents
695
- method of the VectorStore or the aupsert method of the
696
- DocumentIndex. For example, you can use this to
697
- specify a custom vector_field:
698
- upsert_kwargs={"vector_field": "embedding"}
704
+ upsert_kwargs: Additional keyword arguments to pass to the add_documents
705
+ method of the VectorStore or the upsert method of the DocumentIndex.
706
+ For example, you can use this to specify a custom vector_field:
707
+ upsert_kwargs={"vector_field": "embedding"}
699
708
  .. versionadded:: 0.3.10
700
709
 
701
710
  Returns:
@@ -708,6 +717,9 @@ async def aindex(
708
717
  ValueError: If vectorstore does not have
709
718
  "adelete" and "aadd_documents" required methods.
710
719
  ValueError: If source_id_key is not None, but is not a string or callable.
720
+ TypeError: If ``vector_store`` is not a VectorStore or DocumentIndex.
721
+ AssertionError: If ``source_id_key`` is None when cleanup mode is
722
+ incremental or ``scoped_full`` (should be unreachable).
711
723
 
712
724
  .. version_modified:: 0.3.25
713
725
 
@@ -7,6 +7,8 @@ import time
7
7
  from abc import ABC, abstractmethod
8
8
  from typing import TYPE_CHECKING, Any, Optional, TypedDict
9
9
 
10
+ from typing_extensions import override
11
+
10
12
  from langchain_core._api import beta
11
13
  from langchain_core.retrievers import BaseRetriever
12
14
  from langchain_core.runnables import run_in_executor
@@ -254,14 +256,14 @@ class InMemoryRecordManager(RecordManager):
254
256
  """In-memory schema creation is simply ensuring the structure is initialized."""
255
257
 
256
258
  async def acreate_schema(self) -> None:
257
- """Async in-memory schema creation is simply ensuring the structure is initialized.""" # noqa: E501
259
+ """In-memory schema creation is simply ensuring the structure is initialized."""
258
260
 
261
+ @override
259
262
  def get_time(self) -> float:
260
- """Get the current server time as a high resolution timestamp!"""
261
263
  return time.time()
262
264
 
265
+ @override
263
266
  async def aget_time(self) -> float:
264
- """Async get the current server time as a high resolution timestamp!"""
265
267
  return self.get_time()
266
268
 
267
269
  def update(
@@ -322,11 +324,6 @@ class InMemoryRecordManager(RecordManager):
322
324
  raise an error.
323
325
  This is meant to help prevent time-drift issues since
324
326
  time may not be monotonically increasing!
325
-
326
- Raises:
327
- ValueError: If the length of keys doesn't match the length of group
328
- ids.
329
- ValueError: If time_at_least is in the future.
330
327
  """
331
328
  self.update(keys, group_ids=group_ids, time_at_least=time_at_least)
332
329
 
@@ -32,7 +32,17 @@ class InMemoryDocumentIndex(DocumentIndex):
32
32
 
33
33
  @override
34
34
  def upsert(self, items: Sequence[Document], /, **kwargs: Any) -> UpsertResponse:
35
- """Upsert items into the index."""
35
+ """Upsert documents into the index.
36
+
37
+ Args:
38
+ items: Sequence of documents to add to the index.
39
+ **kwargs: Additional keyword arguments.
40
+
41
+ Returns:
42
+ A response object that contains the list of IDs that were
43
+ successfully added or updated in the index and the list of IDs that
44
+ failed to be added or updated.
45
+ """
36
46
  ok_ids = []
37
47
 
38
48
  for item in items:
@@ -51,7 +61,18 @@ class InMemoryDocumentIndex(DocumentIndex):
51
61
 
52
62
  @override
53
63
  def delete(self, ids: Optional[list[str]] = None, **kwargs: Any) -> DeleteResponse:
54
- """Delete by ID."""
64
+ """Delete by IDs.
65
+
66
+ Args:
67
+ ids: List of ids to delete.
68
+
69
+ Raises:
70
+ ValueError: If ids is None.
71
+
72
+ Returns:
73
+ A response object that contains the list of IDs that were successfully
74
+ deleted and the list of IDs that failed to be deleted.
75
+ """
55
76
  if ids is None:
56
77
  msg = "IDs must be provided for deletion"
57
78
  raise ValueError(msg)
@@ -69,7 +90,6 @@ class InMemoryDocumentIndex(DocumentIndex):
69
90
 
70
91
  @override
71
92
  def get(self, ids: Sequence[str], /, **kwargs: Any) -> list[Document]:
72
- """Get by ids."""
73
93
  return [self.store[id_] for id_ in ids if id_ in self.store]
74
94
 
75
95
  @override
@@ -26,7 +26,8 @@ https://python.langchain.com/docs/how_to/custom_chat_model/
26
26
  **LLMs**
27
27
 
28
28
  Language models that takes a string as input and returns a string.
29
- These are traditionally older models (newer models generally are Chat Models, see below).
29
+ These are traditionally older models (newer models generally are Chat Models,
30
+ see below).
30
31
 
31
32
  Although the underlying models are string in, string out, the LangChain wrappers
32
33
  also allow these models to take messages as input. This gives them the same interface
@@ -39,7 +40,7 @@ Please see the following guide for more information on how to implement a custom
39
40
  https://python.langchain.com/docs/how_to/custom_llm/
40
41
 
41
42
 
42
- """ # noqa: E501
43
+ """
43
44
 
44
45
  from typing import TYPE_CHECKING
45
46
 
@@ -22,19 +22,31 @@ from typing_extensions import TypeAlias, TypedDict, override
22
22
  from langchain_core._api import deprecated
23
23
  from langchain_core.caches import BaseCache
24
24
  from langchain_core.callbacks import Callbacks
25
+ from langchain_core.globals import get_verbose
25
26
  from langchain_core.messages import (
26
27
  AnyMessage,
27
28
  BaseMessage,
28
29
  MessageLikeRepresentation,
29
30
  get_buffer_string,
30
31
  )
31
- from langchain_core.prompt_values import PromptValue
32
+ from langchain_core.prompt_values import (
33
+ ChatPromptValueConcrete,
34
+ PromptValue,
35
+ StringPromptValue,
36
+ )
32
37
  from langchain_core.runnables import Runnable, RunnableSerializable
33
38
  from langchain_core.utils import get_pydantic_field_names
34
39
 
35
40
  if TYPE_CHECKING:
36
41
  from langchain_core.outputs import LLMResult
37
42
 
43
+ try:
44
+ from transformers import GPT2TokenizerFast # type: ignore[import-not-found]
45
+
46
+ _HAS_TRANSFORMERS = True
47
+ except ImportError:
48
+ _HAS_TRANSFORMERS = False
49
+
38
50
 
39
51
  class LangSmithParams(TypedDict, total=False):
40
52
  """LangSmith parameters for tracing."""
@@ -59,16 +71,20 @@ def get_tokenizer() -> Any:
59
71
 
60
72
  This function is cached to avoid re-loading the tokenizer every time it is called.
61
73
 
74
+ Raises:
75
+ ImportError: If the transformers package is not installed.
76
+
77
+ Returns:
78
+ The GPT-2 tokenizer instance.
79
+
62
80
  """
63
- try:
64
- from transformers import GPT2TokenizerFast # type: ignore[import-not-found]
65
- except ImportError as e:
81
+ if not _HAS_TRANSFORMERS:
66
82
  msg = (
67
83
  "Could not import transformers python package. "
68
84
  "This is needed in order to calculate get_token_ids. "
69
85
  "Please install it with `pip install transformers`."
70
86
  )
71
- raise ImportError(msg) from e
87
+ raise ImportError(msg)
72
88
  # create a GPT-2 tokenizer instance
73
89
  return GPT2TokenizerFast.from_pretrained("gpt2")
74
90
 
@@ -89,8 +105,6 @@ LanguageModelOutputVar = TypeVar("LanguageModelOutputVar", BaseMessage, str)
89
105
 
90
106
 
91
107
  def _get_verbosity() -> bool:
92
- from langchain_core.globals import get_verbose
93
-
94
108
  return get_verbose()
95
109
 
96
110
 
@@ -152,11 +166,6 @@ class BaseLanguageModel(
152
166
  @override
153
167
  def InputType(self) -> TypeAlias:
154
168
  """Get the input type for this runnable."""
155
- from langchain_core.prompt_values import (
156
- ChatPromptValueConcrete,
157
- StringPromptValue,
158
- )
159
-
160
169
  # This is a version of LanguageModelInput which replaces the abstract
161
170
  # base class BaseMessage with a union of its subclasses, which makes
162
171
  # for a much better schema.
@@ -180,10 +189,11 @@ class BaseLanguageModel(
180
189
  API.
181
190
 
182
191
  Use this method when you want to:
183
- 1. take advantage of batched calls,
184
- 2. need more output from the model than just the top generated value,
185
- 3. are building chains that are agnostic to the underlying language model
186
- type (e.g., pure text completion models vs chat models).
192
+
193
+ 1. Take advantage of batched calls,
194
+ 2. Need more output from the model than just the top generated value,
195
+ 3. Are building chains that are agnostic to the underlying language model
196
+ type (e.g., pure text completion models vs chat models).
187
197
 
188
198
  Args:
189
199
  prompts: List of PromptValues. A PromptValue is an object that can be
@@ -216,10 +226,11 @@ class BaseLanguageModel(
216
226
  API.
217
227
 
218
228
  Use this method when you want to:
219
- 1. take advantage of batched calls,
220
- 2. need more output from the model than just the top generated value,
221
- 3. are building chains that are agnostic to the underlying language model
222
- type (e.g., pure text completion models vs chat models).
229
+
230
+ 1. Take advantage of batched calls,
231
+ 2. Need more output from the model than just the top generated value,
232
+ 3. Are building chains that are agnostic to the underlying language model
233
+ type (e.g., pure text completion models vs chat models).
223
234
 
224
235
  Args:
225
236
  prompts: List of PromptValues. A PromptValue is an object that can be
@@ -46,6 +46,10 @@ from langchain_core.messages import (
46
46
  message_chunk_to_message,
47
47
  )
48
48
  from langchain_core.messages.ai import _LC_ID_PREFIX
49
+ from langchain_core.output_parsers.openai_tools import (
50
+ JsonOutputKeyToolsParser,
51
+ PydanticToolsParser,
52
+ )
49
53
  from langchain_core.outputs import (
50
54
  ChatGeneration,
51
55
  ChatGenerationChunk,
@@ -148,8 +152,6 @@ def _format_for_tracing(messages: list[BaseMessage]) -> list[BaseMessage]:
148
152
  "type": key,
149
153
  key: block[key],
150
154
  }
151
- else:
152
- pass
153
155
  messages_to_trace.append(message_to_trace)
154
156
 
155
157
  return messages_to_trace
@@ -161,6 +163,9 @@ def generate_from_stream(stream: Iterator[ChatGenerationChunk]) -> ChatResult:
161
163
  Args:
162
164
  stream: Iterator of ``ChatGenerationChunk``.
163
165
 
166
+ Raises:
167
+ ValueError: If no generations are found in the stream.
168
+
164
169
  Returns:
165
170
  ChatResult: Chat result.
166
171
 
@@ -328,7 +333,7 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
328
333
  @model_validator(mode="before")
329
334
  @classmethod
330
335
  def raise_deprecation(cls, values: dict) -> Any:
331
- """Raise deprecation warning if ``callback_manager`` is used.
336
+ """Emit deprecation warning if ``callback_manager`` is used.
332
337
 
333
338
  Args:
334
339
  values (Dict): Values to validate.
@@ -336,9 +341,6 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
336
341
  Returns:
337
342
  Dict: Validated values.
338
343
 
339
- Raises:
340
- DeprecationWarning: If ``callback_manager`` is used.
341
-
342
344
  """
343
345
  if values.get("callback_manager") is not None:
344
346
  warnings.warn(
@@ -769,10 +771,11 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
769
771
  API.
770
772
 
771
773
  Use this method when you want to:
772
- 1. take advantage of batched calls,
773
- 2. need more output from the model than just the top generated value,
774
- 3. are building chains that are agnostic to the underlying language model
775
- type (e.g., pure text completion models vs chat models).
774
+
775
+ 1. Take advantage of batched calls,
776
+ 2. Need more output from the model than just the top generated value,
777
+ 3. Are building chains that are agnostic to the underlying language model
778
+ type (e.g., pure text completion models vs chat models).
776
779
 
777
780
  Args:
778
781
  messages: List of list of messages.
@@ -884,10 +887,11 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
884
887
  API.
885
888
 
886
889
  Use this method when you want to:
887
- 1. take advantage of batched calls,
888
- 2. need more output from the model than just the top generated value,
889
- 3. are building chains that are agnostic to the underlying language model
890
- type (e.g., pure text completion models vs chat models).
890
+
891
+ 1. Take advantage of batched calls,
892
+ 2. Need more output from the model than just the top generated value,
893
+ 3. Are building chains that are agnostic to the underlying language model
894
+ type (e.g., pure text completion models vs chat models).
891
895
 
892
896
  Args:
893
897
  messages: List of list of messages.
@@ -1185,7 +1189,17 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
1185
1189
  run_manager: Optional[CallbackManagerForLLMRun] = None,
1186
1190
  **kwargs: Any,
1187
1191
  ) -> ChatResult:
1188
- """Top Level call."""
1192
+ """Generate the result.
1193
+
1194
+ Args:
1195
+ messages: The messages to generate from.
1196
+ stop: Optional list of stop words to use when generating.
1197
+ run_manager: Optional callback manager to use for this call.
1198
+ **kwargs: Additional keyword arguments to pass to the model.
1199
+
1200
+ Returns:
1201
+ The chat result.
1202
+ """
1189
1203
 
1190
1204
  async def _agenerate(
1191
1205
  self,
@@ -1194,7 +1208,17 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
1194
1208
  run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
1195
1209
  **kwargs: Any,
1196
1210
  ) -> ChatResult:
1197
- """Top Level call."""
1211
+ """Generate the result.
1212
+
1213
+ Args:
1214
+ messages: The messages to generate from.
1215
+ stop: Optional list of stop words to use when generating.
1216
+ run_manager: Optional callback manager to use for this call.
1217
+ **kwargs: Additional keyword arguments to pass to the model.
1218
+
1219
+ Returns:
1220
+ The chat result.
1221
+ """
1198
1222
  return await run_in_executor(
1199
1223
  None,
1200
1224
  self._generate,
@@ -1211,6 +1235,17 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
1211
1235
  run_manager: Optional[CallbackManagerForLLMRun] = None,
1212
1236
  **kwargs: Any,
1213
1237
  ) -> Iterator[ChatGenerationChunk]:
1238
+ """Stream the output of the model.
1239
+
1240
+ Args:
1241
+ messages: The messages to generate from.
1242
+ stop: Optional list of stop words to use when generating.
1243
+ run_manager: Optional callback manager to use for this call.
1244
+ **kwargs: Additional keyword arguments to pass to the model.
1245
+
1246
+ Yields:
1247
+ The chat generation chunks.
1248
+ """
1214
1249
  raise NotImplementedError
1215
1250
 
1216
1251
  async def _astream(
@@ -1220,6 +1255,17 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
1220
1255
  run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
1221
1256
  **kwargs: Any,
1222
1257
  ) -> AsyncIterator[ChatGenerationChunk]:
1258
+ """Stream the output of the model.
1259
+
1260
+ Args:
1261
+ messages: The messages to generate from.
1262
+ stop: Optional list of stop words to use when generating.
1263
+ run_manager: Optional callback manager to use for this call.
1264
+ **kwargs: Additional keyword arguments to pass to the model.
1265
+
1266
+ Yields:
1267
+ The chat generation chunks.
1268
+ """
1223
1269
  iterator = await run_in_executor(
1224
1270
  None,
1225
1271
  self._stream,
@@ -1259,6 +1305,9 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
1259
1305
  **kwargs: Arbitrary additional keyword arguments. These are usually passed
1260
1306
  to the model provider API call.
1261
1307
 
1308
+ Raises:
1309
+ ValueError: If the generation is not a chat generation.
1310
+
1262
1311
  Returns:
1263
1312
  The model output message.
1264
1313
 
@@ -1320,6 +1369,9 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
1320
1369
  **kwargs: Arbitrary additional keyword arguments. These are usually passed
1321
1370
  to the model provider API call.
1322
1371
 
1372
+ Raises:
1373
+ ValueError: If the output is not a string.
1374
+
1323
1375
  Returns:
1324
1376
  The predicted output string.
1325
1377
 
@@ -1434,6 +1486,11 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
1434
1486
  will be caught and returned as well. The final output is always a dict
1435
1487
  with keys ``'raw'``, ``'parsed'``, and ``'parsing_error'``.
1436
1488
 
1489
+ Raises:
1490
+ ValueError: If there are any unsupported ``kwargs``.
1491
+ NotImplementedError: If the model does not implement
1492
+ ``with_structured_output()``.
1493
+
1437
1494
  Returns:
1438
1495
  A Runnable that takes same inputs as a :class:`langchain_core.language_models.chat.BaseChatModel`.
1439
1496
 
@@ -1453,15 +1510,20 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
1453
1510
 
1454
1511
  from pydantic import BaseModel
1455
1512
 
1513
+
1456
1514
  class AnswerWithJustification(BaseModel):
1457
1515
  '''An answer to the user question along with justification for the answer.'''
1516
+
1458
1517
  answer: str
1459
1518
  justification: str
1460
1519
 
1520
+
1461
1521
  llm = ChatModel(model="model-name", temperature=0)
1462
1522
  structured_llm = llm.with_structured_output(AnswerWithJustification)
1463
1523
 
1464
- structured_llm.invoke("What weighs more a pound of bricks or a pound of feathers")
1524
+ structured_llm.invoke(
1525
+ "What weighs more a pound of bricks or a pound of feathers"
1526
+ )
1465
1527
 
1466
1528
  # -> AnswerWithJustification(
1467
1529
  # answer='They weigh the same',
@@ -1473,15 +1535,22 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
1473
1535
 
1474
1536
  from pydantic import BaseModel
1475
1537
 
1538
+
1476
1539
  class AnswerWithJustification(BaseModel):
1477
1540
  '''An answer to the user question along with justification for the answer.'''
1541
+
1478
1542
  answer: str
1479
1543
  justification: str
1480
1544
 
1545
+
1481
1546
  llm = ChatModel(model="model-name", temperature=0)
1482
- structured_llm = llm.with_structured_output(AnswerWithJustification, include_raw=True)
1547
+ structured_llm = llm.with_structured_output(
1548
+ AnswerWithJustification, include_raw=True
1549
+ )
1483
1550
 
1484
- structured_llm.invoke("What weighs more a pound of bricks or a pound of feathers")
1551
+ structured_llm.invoke(
1552
+ "What weighs more a pound of bricks or a pound of feathers"
1553
+ )
1485
1554
  # -> {
1486
1555
  # 'raw': AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_Ao02pnFYXD6GN1yzc0uXPsvF', 'function': {'arguments': '{"answer":"They weigh the same.","justification":"Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ."}', 'name': 'AnswerWithJustification'}, 'type': 'function'}]}),
1487
1556
  # 'parsed': AnswerWithJustification(answer='They weigh the same.', justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'),
@@ -1494,16 +1563,21 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
1494
1563
  from pydantic import BaseModel
1495
1564
  from langchain_core.utils.function_calling import convert_to_openai_tool
1496
1565
 
1566
+
1497
1567
  class AnswerWithJustification(BaseModel):
1498
1568
  '''An answer to the user question along with justification for the answer.'''
1569
+
1499
1570
  answer: str
1500
1571
  justification: str
1501
1572
 
1573
+
1502
1574
  dict_schema = convert_to_openai_tool(AnswerWithJustification)
1503
1575
  llm = ChatModel(model="model-name", temperature=0)
1504
1576
  structured_llm = llm.with_structured_output(dict_schema)
1505
1577
 
1506
- structured_llm.invoke("What weighs more a pound of bricks or a pound of feathers")
1578
+ structured_llm.invoke(
1579
+ "What weighs more a pound of bricks or a pound of feathers"
1580
+ )
1507
1581
  # -> {
1508
1582
  # 'answer': 'They weigh the same',
1509
1583
  # 'justification': 'Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume and density of the two substances differ.'
@@ -1520,11 +1594,6 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
1520
1594
  msg = f"Received unsupported arguments {kwargs}"
1521
1595
  raise ValueError(msg)
1522
1596
 
1523
- from langchain_core.output_parsers.openai_tools import (
1524
- JsonOutputKeyToolsParser,
1525
- PydanticToolsParser,
1526
- )
1527
-
1528
1597
  if type(self).bind_tools is BaseChatModel.bind_tools:
1529
1598
  msg = "with_structured_output is not implemented for this model."
1530
1599
  raise NotImplementedError(msg)