langchain-core 1.0.0a2__py3-none-any.whl → 1.0.0a4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain-core might be problematic. Click here for more details.

Files changed (130) hide show
  1. langchain_core/_api/beta_decorator.py +17 -40
  2. langchain_core/_api/deprecation.py +20 -7
  3. langchain_core/_api/path.py +19 -2
  4. langchain_core/_import_utils.py +7 -0
  5. langchain_core/agents.py +10 -6
  6. langchain_core/callbacks/base.py +28 -15
  7. langchain_core/callbacks/manager.py +81 -69
  8. langchain_core/callbacks/usage.py +4 -2
  9. langchain_core/chat_history.py +29 -21
  10. langchain_core/document_loaders/base.py +34 -9
  11. langchain_core/document_loaders/langsmith.py +3 -0
  12. langchain_core/documents/base.py +35 -10
  13. langchain_core/documents/transformers.py +4 -2
  14. langchain_core/embeddings/fake.py +8 -5
  15. langchain_core/env.py +2 -3
  16. langchain_core/example_selectors/base.py +12 -0
  17. langchain_core/exceptions.py +7 -0
  18. langchain_core/globals.py +17 -28
  19. langchain_core/indexing/api.py +57 -45
  20. langchain_core/indexing/base.py +5 -8
  21. langchain_core/indexing/in_memory.py +23 -3
  22. langchain_core/language_models/__init__.py +6 -2
  23. langchain_core/language_models/_utils.py +27 -5
  24. langchain_core/language_models/base.py +33 -21
  25. langchain_core/language_models/chat_models.py +104 -31
  26. langchain_core/language_models/fake_chat_models.py +5 -7
  27. langchain_core/language_models/llms.py +54 -20
  28. langchain_core/load/dump.py +2 -3
  29. langchain_core/load/load.py +15 -1
  30. langchain_core/load/serializable.py +38 -43
  31. langchain_core/memory.py +7 -3
  32. langchain_core/messages/__init__.py +1 -1
  33. langchain_core/messages/ai.py +41 -34
  34. langchain_core/messages/base.py +20 -7
  35. langchain_core/messages/block_translators/__init__.py +10 -8
  36. langchain_core/messages/block_translators/anthropic.py +11 -7
  37. langchain_core/messages/block_translators/bedrock.py +76 -27
  38. langchain_core/messages/block_translators/bedrock_converse.py +259 -23
  39. langchain_core/messages/block_translators/google_genai.py +3 -1
  40. langchain_core/messages/block_translators/google_vertexai.py +3 -1
  41. langchain_core/messages/block_translators/groq.py +3 -1
  42. langchain_core/messages/block_translators/ollama.py +3 -1
  43. langchain_core/messages/block_translators/openai.py +50 -20
  44. langchain_core/messages/content.py +23 -13
  45. langchain_core/messages/human.py +2 -13
  46. langchain_core/messages/system.py +2 -6
  47. langchain_core/messages/tool.py +34 -14
  48. langchain_core/messages/utils.py +186 -73
  49. langchain_core/output_parsers/base.py +5 -2
  50. langchain_core/output_parsers/json.py +4 -4
  51. langchain_core/output_parsers/list.py +7 -22
  52. langchain_core/output_parsers/openai_functions.py +3 -0
  53. langchain_core/output_parsers/openai_tools.py +6 -1
  54. langchain_core/output_parsers/pydantic.py +4 -0
  55. langchain_core/output_parsers/string.py +5 -1
  56. langchain_core/output_parsers/xml.py +19 -19
  57. langchain_core/outputs/chat_generation.py +18 -7
  58. langchain_core/outputs/generation.py +14 -3
  59. langchain_core/outputs/llm_result.py +8 -1
  60. langchain_core/prompt_values.py +10 -4
  61. langchain_core/prompts/base.py +6 -11
  62. langchain_core/prompts/chat.py +88 -60
  63. langchain_core/prompts/dict.py +16 -8
  64. langchain_core/prompts/few_shot.py +9 -11
  65. langchain_core/prompts/few_shot_with_templates.py +5 -1
  66. langchain_core/prompts/image.py +12 -5
  67. langchain_core/prompts/loading.py +2 -2
  68. langchain_core/prompts/message.py +5 -6
  69. langchain_core/prompts/pipeline.py +13 -8
  70. langchain_core/prompts/prompt.py +22 -8
  71. langchain_core/prompts/string.py +18 -10
  72. langchain_core/prompts/structured.py +7 -2
  73. langchain_core/rate_limiters.py +2 -2
  74. langchain_core/retrievers.py +7 -6
  75. langchain_core/runnables/base.py +387 -246
  76. langchain_core/runnables/branch.py +11 -28
  77. langchain_core/runnables/config.py +20 -17
  78. langchain_core/runnables/configurable.py +34 -19
  79. langchain_core/runnables/fallbacks.py +20 -13
  80. langchain_core/runnables/graph.py +48 -38
  81. langchain_core/runnables/graph_ascii.py +40 -17
  82. langchain_core/runnables/graph_mermaid.py +54 -25
  83. langchain_core/runnables/graph_png.py +27 -31
  84. langchain_core/runnables/history.py +55 -58
  85. langchain_core/runnables/passthrough.py +44 -21
  86. langchain_core/runnables/retry.py +44 -23
  87. langchain_core/runnables/router.py +9 -8
  88. langchain_core/runnables/schema.py +9 -0
  89. langchain_core/runnables/utils.py +53 -90
  90. langchain_core/stores.py +19 -31
  91. langchain_core/sys_info.py +9 -8
  92. langchain_core/tools/base.py +36 -27
  93. langchain_core/tools/convert.py +25 -14
  94. langchain_core/tools/simple.py +36 -8
  95. langchain_core/tools/structured.py +25 -12
  96. langchain_core/tracers/base.py +2 -2
  97. langchain_core/tracers/context.py +5 -1
  98. langchain_core/tracers/core.py +110 -46
  99. langchain_core/tracers/evaluation.py +22 -26
  100. langchain_core/tracers/event_stream.py +97 -42
  101. langchain_core/tracers/langchain.py +12 -3
  102. langchain_core/tracers/langchain_v1.py +10 -2
  103. langchain_core/tracers/log_stream.py +56 -17
  104. langchain_core/tracers/root_listeners.py +4 -20
  105. langchain_core/tracers/run_collector.py +6 -16
  106. langchain_core/tracers/schemas.py +5 -1
  107. langchain_core/utils/aiter.py +14 -6
  108. langchain_core/utils/env.py +3 -0
  109. langchain_core/utils/function_calling.py +46 -20
  110. langchain_core/utils/interactive_env.py +6 -2
  111. langchain_core/utils/iter.py +12 -5
  112. langchain_core/utils/json.py +12 -3
  113. langchain_core/utils/json_schema.py +156 -40
  114. langchain_core/utils/loading.py +5 -1
  115. langchain_core/utils/mustache.py +25 -16
  116. langchain_core/utils/pydantic.py +38 -9
  117. langchain_core/utils/utils.py +25 -9
  118. langchain_core/vectorstores/base.py +7 -20
  119. langchain_core/vectorstores/in_memory.py +20 -14
  120. langchain_core/vectorstores/utils.py +18 -12
  121. langchain_core/version.py +1 -1
  122. langchain_core-1.0.0a4.dist-info/METADATA +77 -0
  123. langchain_core-1.0.0a4.dist-info/RECORD +181 -0
  124. langchain_core/beta/__init__.py +0 -1
  125. langchain_core/beta/runnables/__init__.py +0 -1
  126. langchain_core/beta/runnables/context.py +0 -448
  127. langchain_core-1.0.0a2.dist-info/METADATA +0 -106
  128. langchain_core-1.0.0a2.dist-info/RECORD +0 -184
  129. {langchain_core-1.0.0a2.dist-info → langchain_core-1.0.0a4.dist-info}/WHEEL +0 -0
  130. {langchain_core-1.0.0a2.dist-info → langchain_core-1.0.0a4.dist-info}/entry_points.txt +0 -0
@@ -26,7 +26,8 @@ https://python.langchain.com/docs/how_to/custom_chat_model/
26
26
  **LLMs**
27
27
 
28
28
  Language models that takes a string as input and returns a string.
29
- These are traditionally older models (newer models generally are Chat Models, see below).
29
+ These are traditionally older models (newer models generally are Chat Models,
30
+ see below).
30
31
 
31
32
  Although the underlying models are string in, string out, the LangChain wrappers
32
33
  also allow these models to take messages as input. This gives them the same interface
@@ -39,11 +40,12 @@ Please see the following guide for more information on how to implement a custom
39
40
  https://python.langchain.com/docs/how_to/custom_llm/
40
41
 
41
42
 
42
- """ # noqa: E501
43
+ """
43
44
 
44
45
  from typing import TYPE_CHECKING
45
46
 
46
47
  from langchain_core._import_utils import import_attr
48
+ from langchain_core.language_models._utils import is_openai_data_block
47
49
 
48
50
  if TYPE_CHECKING:
49
51
  from langchain_core.language_models.base import (
@@ -84,6 +86,7 @@ __all__ = (
84
86
  "ParrotFakeChatModel",
85
87
  "SimpleChatModel",
86
88
  "get_tokenizer",
89
+ "is_openai_data_block",
87
90
  )
88
91
 
89
92
  _dynamic_imports = {
@@ -103,6 +106,7 @@ _dynamic_imports = {
103
106
  "ParrotFakeChatModel": "fake_chat_models",
104
107
  "LLM": "llms",
105
108
  "BaseLLM": "llms",
109
+ "is_openai_data_block": "_utils",
106
110
  }
107
111
 
108
112
 
@@ -16,16 +16,34 @@ from langchain_core.messages.content import (
16
16
  )
17
17
 
18
18
 
19
- def _is_openai_data_block(block: dict) -> bool:
20
- """Check if the block contains multimodal data in OpenAI Chat Completions format.
19
+ def is_openai_data_block(
20
+ block: dict, filter_: Union[Literal["image", "audio", "file"], None] = None
21
+ ) -> bool:
22
+ """Check whether a block contains multimodal data in OpenAI Chat Completions format.
21
23
 
22
24
  Supports both data and ID-style blocks (e.g. ``'file_data'`` and ``'file_id'``)
23
25
 
24
26
  If additional keys are present, they are ignored / will not affect outcome as long
25
27
  as the required keys are present and valid.
26
28
 
29
+ Args:
30
+ block: The content block to check.
31
+ filter_: If provided, only return True for blocks matching this specific type.
32
+ - "image": Only match image_url blocks
33
+ - "audio": Only match input_audio blocks
34
+ - "file": Only match file blocks
35
+ If None, match any valid OpenAI data block type. Note that this means that
36
+ if the block has a valid OpenAI data type but the filter_ is set to a
37
+ different type, this function will return False.
38
+
39
+ Returns:
40
+ True if the block is a valid OpenAI data block and matches the filter_
41
+ (if provided).
42
+
27
43
  """
28
44
  if block.get("type") == "image_url":
45
+ if filter_ is not None and filter_ != "image":
46
+ return False
29
47
  if (
30
48
  (set(block.keys()) <= {"type", "image_url", "detail"})
31
49
  and (image_url := block.get("image_url"))
@@ -38,6 +56,8 @@ def _is_openai_data_block(block: dict) -> bool:
38
56
  # Ignore `'detail'` since it's optional and specific to OpenAI
39
57
 
40
58
  elif block.get("type") == "input_audio":
59
+ if filter_ is not None and filter_ != "audio":
60
+ return False
41
61
  if (audio := block.get("input_audio")) and isinstance(audio, dict):
42
62
  audio_data = audio.get("data")
43
63
  audio_format = audio.get("format")
@@ -46,6 +66,8 @@ def _is_openai_data_block(block: dict) -> bool:
46
66
  return True
47
67
 
48
68
  elif block.get("type") == "file":
69
+ if filter_ is not None and filter_ != "file":
70
+ return False
49
71
  if (file := block.get("file")) and isinstance(file, dict):
50
72
  file_data = file.get("file_data")
51
73
  file_id = file.get("file_id")
@@ -212,10 +234,10 @@ def _normalize_messages(
212
234
  }
213
235
 
214
236
  """
215
- from langchain_core.messages.block_translators.langchain_v0 import (
237
+ from langchain_core.messages.block_translators.langchain_v0 import ( # noqa: PLC0415
216
238
  _convert_legacy_v0_content_block_to_v1,
217
239
  )
218
- from langchain_core.messages.block_translators.openai import (
240
+ from langchain_core.messages.block_translators.openai import ( # noqa: PLC0415
219
241
  _convert_openai_format_to_data_block,
220
242
  )
221
243
 
@@ -232,7 +254,7 @@ def _normalize_messages(
232
254
  isinstance(block, dict)
233
255
  and block.get("type") in {"input_audio", "file"}
234
256
  # Discriminate between OpenAI/LC format since they share `'type'`
235
- and _is_openai_data_block(block)
257
+ and is_openai_data_block(block)
236
258
  ):
237
259
  formatted_message = _ensure_message_copy(message, formatted_message)
238
260
 
@@ -12,16 +12,18 @@ from typing import (
12
12
  Callable,
13
13
  Literal,
14
14
  Optional,
15
+ TypeAlias,
15
16
  TypeVar,
16
17
  Union,
17
18
  )
18
19
 
19
20
  from pydantic import BaseModel, ConfigDict, Field, field_validator
20
- from typing_extensions import TypeAlias, TypedDict, override
21
+ from typing_extensions import TypedDict, override
21
22
 
22
23
  from langchain_core._api import deprecated
23
24
  from langchain_core.caches import BaseCache
24
25
  from langchain_core.callbacks import Callbacks
26
+ from langchain_core.globals import get_verbose
25
27
  from langchain_core.messages import (
26
28
  AIMessage,
27
29
  AnyMessage,
@@ -29,13 +31,24 @@ from langchain_core.messages import (
29
31
  MessageLikeRepresentation,
30
32
  get_buffer_string,
31
33
  )
32
- from langchain_core.prompt_values import PromptValue
34
+ from langchain_core.prompt_values import (
35
+ ChatPromptValueConcrete,
36
+ PromptValue,
37
+ StringPromptValue,
38
+ )
33
39
  from langchain_core.runnables import Runnable, RunnableSerializable
34
40
  from langchain_core.utils import get_pydantic_field_names
35
41
 
36
42
  if TYPE_CHECKING:
37
43
  from langchain_core.outputs import LLMResult
38
44
 
45
+ try:
46
+ from transformers import GPT2TokenizerFast # type: ignore[import-not-found]
47
+
48
+ _HAS_TRANSFORMERS = True
49
+ except ImportError:
50
+ _HAS_TRANSFORMERS = False
51
+
39
52
 
40
53
  class LangSmithParams(TypedDict, total=False):
41
54
  """LangSmith parameters for tracing."""
@@ -60,16 +73,20 @@ def get_tokenizer() -> Any:
60
73
 
61
74
  This function is cached to avoid re-loading the tokenizer every time it is called.
62
75
 
76
+ Raises:
77
+ ImportError: If the transformers package is not installed.
78
+
79
+ Returns:
80
+ The GPT-2 tokenizer instance.
81
+
63
82
  """
64
- try:
65
- from transformers import GPT2TokenizerFast # type: ignore[import-not-found]
66
- except ImportError as e:
83
+ if not _HAS_TRANSFORMERS:
67
84
  msg = (
68
85
  "Could not import transformers python package. "
69
86
  "This is needed in order to calculate get_token_ids. "
70
87
  "Please install it with `pip install transformers`."
71
88
  )
72
- raise ImportError(msg) from e
89
+ raise ImportError(msg)
73
90
  # create a GPT-2 tokenizer instance
74
91
  return GPT2TokenizerFast.from_pretrained("gpt2")
75
92
 
@@ -90,8 +107,6 @@ LanguageModelOutputVar = TypeVar("LanguageModelOutputVar", AIMessage, str)
90
107
 
91
108
 
92
109
  def _get_verbosity() -> bool:
93
- from langchain_core.globals import get_verbose
94
-
95
110
  return get_verbose()
96
111
 
97
112
 
@@ -153,11 +168,6 @@ class BaseLanguageModel(
153
168
  @override
154
169
  def InputType(self) -> TypeAlias:
155
170
  """Get the input type for this runnable."""
156
- from langchain_core.prompt_values import (
157
- ChatPromptValueConcrete,
158
- StringPromptValue,
159
- )
160
-
161
171
  # This is a version of LanguageModelInput which replaces the abstract
162
172
  # base class BaseMessage with a union of its subclasses, which makes
163
173
  # for a much better schema.
@@ -181,10 +191,11 @@ class BaseLanguageModel(
181
191
  API.
182
192
 
183
193
  Use this method when you want to:
184
- 1. take advantage of batched calls,
185
- 2. need more output from the model than just the top generated value,
186
- 3. are building chains that are agnostic to the underlying language model
187
- type (e.g., pure text completion models vs chat models).
194
+
195
+ 1. Take advantage of batched calls,
196
+ 2. Need more output from the model than just the top generated value,
197
+ 3. Are building chains that are agnostic to the underlying language model
198
+ type (e.g., pure text completion models vs chat models).
188
199
 
189
200
  Args:
190
201
  prompts: List of PromptValues. A PromptValue is an object that can be
@@ -217,10 +228,11 @@ class BaseLanguageModel(
217
228
  API.
218
229
 
219
230
  Use this method when you want to:
220
- 1. take advantage of batched calls,
221
- 2. need more output from the model than just the top generated value,
222
- 3. are building chains that are agnostic to the underlying language model
223
- type (e.g., pure text completion models vs chat models).
231
+
232
+ 1. Take advantage of batched calls,
233
+ 2. Need more output from the model than just the top generated value,
234
+ 3. Are building chains that are agnostic to the underlying language model
235
+ type (e.g., pure text completion models vs chat models).
224
236
 
225
237
  Args:
226
238
  prompts: List of PromptValues. A PromptValue is an object that can be
@@ -48,9 +48,12 @@ from langchain_core.messages import (
48
48
  message_chunk_to_message,
49
49
  )
50
50
  from langchain_core.messages.block_translators.openai import (
51
- convert_to_openai_data_block,
52
51
  convert_to_openai_image_block,
53
52
  )
53
+ from langchain_core.output_parsers.openai_tools import (
54
+ JsonOutputKeyToolsParser,
55
+ PydanticToolsParser,
56
+ )
54
57
  from langchain_core.outputs import (
55
58
  ChatGeneration,
56
59
  ChatGenerationChunk,
@@ -143,17 +146,20 @@ def _format_for_tracing(messages: list[BaseMessage]) -> list[BaseMessage]:
143
146
  )
144
147
  elif (
145
148
  block.get("type") == "file"
146
- and is_data_content_block(block)
149
+ and is_data_content_block(block) # v0 (image/audio/file) or v1
147
150
  and "base64" in block
151
+ # Backward compat: convert v1 base64 blocks to v0
148
152
  ):
149
153
  if message_to_trace is message:
150
154
  # Shallow copy
151
155
  message_to_trace = message.model_copy()
152
156
  message_to_trace.content = list(message_to_trace.content)
153
157
 
154
- message_to_trace.content[idx] = convert_to_openai_data_block( # type: ignore[index]
155
- block
156
- )
158
+ message_to_trace.content[idx] = { # type: ignore[index]
159
+ **{k: v for k, v in block.items() if k != "base64"},
160
+ "data": block["base64"],
161
+ "source_type": "base64",
162
+ }
157
163
  elif len(block) == 1 and "type" not in block:
158
164
  # Tracing assumes all content blocks have a "type" key. Here
159
165
  # we add this key if it is missing, and there's an obvious
@@ -167,8 +173,6 @@ def _format_for_tracing(messages: list[BaseMessage]) -> list[BaseMessage]:
167
173
  "type": key,
168
174
  key: block[key],
169
175
  }
170
- else:
171
- pass
172
176
  messages_to_trace.append(message_to_trace)
173
177
 
174
178
  return messages_to_trace
@@ -180,6 +184,9 @@ def generate_from_stream(stream: Iterator[ChatGenerationChunk]) -> ChatResult:
180
184
  Args:
181
185
  stream: Iterator of ``ChatGenerationChunk``.
182
186
 
187
+ Raises:
188
+ ValueError: If no generations are found in the stream.
189
+
183
190
  Returns:
184
191
  ChatResult: Chat result.
185
192
 
@@ -369,7 +376,7 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
369
376
  @model_validator(mode="before")
370
377
  @classmethod
371
378
  def raise_deprecation(cls, values: dict) -> Any:
372
- """Raise deprecation warning if ``callback_manager`` is used.
379
+ """Emit deprecation warning if ``callback_manager`` is used.
373
380
 
374
381
  Args:
375
382
  values (Dict): Values to validate.
@@ -377,9 +384,6 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
377
384
  Returns:
378
385
  Dict: Validated values.
379
386
 
380
- Raises:
381
- DeprecationWarning: If ``callback_manager`` is used.
382
-
383
387
  """
384
388
  if values.get("callback_manager") is not None:
385
389
  warnings.warn(
@@ -813,7 +817,9 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
813
817
  ls_params["ls_stop"] = stop
814
818
 
815
819
  # model
816
- if hasattr(self, "model") and isinstance(self.model, str):
820
+ if "model" in kwargs and isinstance(kwargs["model"], str):
821
+ ls_params["ls_model_name"] = kwargs["model"]
822
+ elif hasattr(self, "model") and isinstance(self.model, str):
817
823
  ls_params["ls_model_name"] = self.model
818
824
  elif hasattr(self, "model_name") and isinstance(self.model_name, str):
819
825
  ls_params["ls_model_name"] = self.model_name
@@ -864,10 +870,11 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
864
870
  API.
865
871
 
866
872
  Use this method when you want to:
867
- 1. take advantage of batched calls,
868
- 2. need more output from the model than just the top generated value,
869
- 3. are building chains that are agnostic to the underlying language model
870
- type (e.g., pure text completion models vs chat models).
873
+
874
+ 1. Take advantage of batched calls,
875
+ 2. Need more output from the model than just the top generated value,
876
+ 3. Are building chains that are agnostic to the underlying language model
877
+ type (e.g., pure text completion models vs chat models).
871
878
 
872
879
  Args:
873
880
  messages: List of list of messages.
@@ -979,10 +986,11 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
979
986
  API.
980
987
 
981
988
  Use this method when you want to:
982
- 1. take advantage of batched calls,
983
- 2. need more output from the model than just the top generated value,
984
- 3. are building chains that are agnostic to the underlying language model
985
- type (e.g., pure text completion models vs chat models).
989
+
990
+ 1. Take advantage of batched calls,
991
+ 2. Need more output from the model than just the top generated value,
992
+ 3. Are building chains that are agnostic to the underlying language model
993
+ type (e.g., pure text completion models vs chat models).
986
994
 
987
995
  Args:
988
996
  messages: List of list of messages.
@@ -1350,7 +1358,17 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
1350
1358
  run_manager: Optional[CallbackManagerForLLMRun] = None,
1351
1359
  **kwargs: Any,
1352
1360
  ) -> ChatResult:
1353
- """Top Level call."""
1361
+ """Generate the result.
1362
+
1363
+ Args:
1364
+ messages: The messages to generate from.
1365
+ stop: Optional list of stop words to use when generating.
1366
+ run_manager: Optional callback manager to use for this call.
1367
+ **kwargs: Additional keyword arguments to pass to the model.
1368
+
1369
+ Returns:
1370
+ The chat result.
1371
+ """
1354
1372
 
1355
1373
  async def _agenerate(
1356
1374
  self,
@@ -1359,7 +1377,17 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
1359
1377
  run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
1360
1378
  **kwargs: Any,
1361
1379
  ) -> ChatResult:
1362
- """Top Level call."""
1380
+ """Generate the result.
1381
+
1382
+ Args:
1383
+ messages: The messages to generate from.
1384
+ stop: Optional list of stop words to use when generating.
1385
+ run_manager: Optional callback manager to use for this call.
1386
+ **kwargs: Additional keyword arguments to pass to the model.
1387
+
1388
+ Returns:
1389
+ The chat result.
1390
+ """
1363
1391
  return await run_in_executor(
1364
1392
  None,
1365
1393
  self._generate,
@@ -1376,6 +1404,17 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
1376
1404
  run_manager: Optional[CallbackManagerForLLMRun] = None,
1377
1405
  **kwargs: Any,
1378
1406
  ) -> Iterator[ChatGenerationChunk]:
1407
+ """Stream the output of the model.
1408
+
1409
+ Args:
1410
+ messages: The messages to generate from.
1411
+ stop: Optional list of stop words to use when generating.
1412
+ run_manager: Optional callback manager to use for this call.
1413
+ **kwargs: Additional keyword arguments to pass to the model.
1414
+
1415
+ Yields:
1416
+ The chat generation chunks.
1417
+ """
1379
1418
  raise NotImplementedError
1380
1419
 
1381
1420
  async def _astream(
@@ -1385,6 +1424,17 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
1385
1424
  run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
1386
1425
  **kwargs: Any,
1387
1426
  ) -> AsyncIterator[ChatGenerationChunk]:
1427
+ """Stream the output of the model.
1428
+
1429
+ Args:
1430
+ messages: The messages to generate from.
1431
+ stop: Optional list of stop words to use when generating.
1432
+ run_manager: Optional callback manager to use for this call.
1433
+ **kwargs: Additional keyword arguments to pass to the model.
1434
+
1435
+ Yields:
1436
+ The chat generation chunks.
1437
+ """
1388
1438
  iterator = await run_in_executor(
1389
1439
  None,
1390
1440
  self._stream,
@@ -1424,6 +1474,9 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
1424
1474
  **kwargs: Arbitrary additional keyword arguments. These are usually passed
1425
1475
  to the model provider API call.
1426
1476
 
1477
+ Raises:
1478
+ ValueError: If the generation is not a chat generation.
1479
+
1427
1480
  Returns:
1428
1481
  The model output message.
1429
1482
 
@@ -1485,6 +1538,9 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
1485
1538
  **kwargs: Arbitrary additional keyword arguments. These are usually passed
1486
1539
  to the model provider API call.
1487
1540
 
1541
+ Raises:
1542
+ ValueError: If the output is not a string.
1543
+
1488
1544
  Returns:
1489
1545
  The predicted output string.
1490
1546
 
@@ -1599,6 +1655,11 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
1599
1655
  will be caught and returned as well. The final output is always a dict
1600
1656
  with keys ``'raw'``, ``'parsed'``, and ``'parsing_error'``.
1601
1657
 
1658
+ Raises:
1659
+ ValueError: If there are any unsupported ``kwargs``.
1660
+ NotImplementedError: If the model does not implement
1661
+ ``with_structured_output()``.
1662
+
1602
1663
  Returns:
1603
1664
  A Runnable that takes same inputs as a :class:`langchain_core.language_models.chat.BaseChatModel`.
1604
1665
 
@@ -1618,15 +1679,20 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
1618
1679
 
1619
1680
  from pydantic import BaseModel
1620
1681
 
1682
+
1621
1683
  class AnswerWithJustification(BaseModel):
1622
1684
  '''An answer to the user question along with justification for the answer.'''
1685
+
1623
1686
  answer: str
1624
1687
  justification: str
1625
1688
 
1689
+
1626
1690
  llm = ChatModel(model="model-name", temperature=0)
1627
1691
  structured_llm = llm.with_structured_output(AnswerWithJustification)
1628
1692
 
1629
- structured_llm.invoke("What weighs more a pound of bricks or a pound of feathers")
1693
+ structured_llm.invoke(
1694
+ "What weighs more a pound of bricks or a pound of feathers"
1695
+ )
1630
1696
 
1631
1697
  # -> AnswerWithJustification(
1632
1698
  # answer='They weigh the same',
@@ -1638,15 +1704,22 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
1638
1704
 
1639
1705
  from pydantic import BaseModel
1640
1706
 
1707
+
1641
1708
  class AnswerWithJustification(BaseModel):
1642
1709
  '''An answer to the user question along with justification for the answer.'''
1710
+
1643
1711
  answer: str
1644
1712
  justification: str
1645
1713
 
1714
+
1646
1715
  llm = ChatModel(model="model-name", temperature=0)
1647
- structured_llm = llm.with_structured_output(AnswerWithJustification, include_raw=True)
1716
+ structured_llm = llm.with_structured_output(
1717
+ AnswerWithJustification, include_raw=True
1718
+ )
1648
1719
 
1649
- structured_llm.invoke("What weighs more a pound of bricks or a pound of feathers")
1720
+ structured_llm.invoke(
1721
+ "What weighs more a pound of bricks or a pound of feathers"
1722
+ )
1650
1723
  # -> {
1651
1724
  # 'raw': AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_Ao02pnFYXD6GN1yzc0uXPsvF', 'function': {'arguments': '{"answer":"They weigh the same.","justification":"Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ."}', 'name': 'AnswerWithJustification'}, 'type': 'function'}]}),
1652
1725
  # 'parsed': AnswerWithJustification(answer='They weigh the same.', justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'),
@@ -1659,16 +1732,21 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
1659
1732
  from pydantic import BaseModel
1660
1733
  from langchain_core.utils.function_calling import convert_to_openai_tool
1661
1734
 
1735
+
1662
1736
  class AnswerWithJustification(BaseModel):
1663
1737
  '''An answer to the user question along with justification for the answer.'''
1738
+
1664
1739
  answer: str
1665
1740
  justification: str
1666
1741
 
1742
+
1667
1743
  dict_schema = convert_to_openai_tool(AnswerWithJustification)
1668
1744
  llm = ChatModel(model="model-name", temperature=0)
1669
1745
  structured_llm = llm.with_structured_output(dict_schema)
1670
1746
 
1671
- structured_llm.invoke("What weighs more a pound of bricks or a pound of feathers")
1747
+ structured_llm.invoke(
1748
+ "What weighs more a pound of bricks or a pound of feathers"
1749
+ )
1672
1750
  # -> {
1673
1751
  # 'answer': 'They weigh the same',
1674
1752
  # 'justification': 'Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume and density of the two substances differ.'
@@ -1685,11 +1763,6 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
1685
1763
  msg = f"Received unsupported arguments {kwargs}"
1686
1764
  raise ValueError(msg)
1687
1765
 
1688
- from langchain_core.output_parsers.openai_tools import (
1689
- JsonOutputKeyToolsParser,
1690
- PydanticToolsParser,
1691
- )
1692
-
1693
1766
  if type(self).bind_tools is BaseChatModel.bind_tools:
1694
1767
  msg = "with_structured_output is not implemented for this model."
1695
1768
  raise NotImplementedError(msg)
@@ -75,12 +75,13 @@ class FakeListChatModel(SimpleChatModel):
75
75
  @override
76
76
  def _call(
77
77
  self,
78
- messages: list[BaseMessage],
79
- stop: Optional[list[str]] = None,
80
- run_manager: Optional[CallbackManagerForLLMRun] = None,
78
+ *args: Any,
81
79
  **kwargs: Any,
82
80
  ) -> str:
83
- """First try to lookup in queries, else return 'foo' or 'bar'."""
81
+ """Return the next response in the list.
82
+
83
+ Cycle back to the start if at the end.
84
+ """
84
85
  if self.sleep is not None:
85
86
  time.sleep(self.sleep)
86
87
  response = self.responses[self.i]
@@ -249,7 +250,6 @@ class GenericFakeChatModel(BaseChatModel):
249
250
  run_manager: Optional[CallbackManagerForLLMRun] = None,
250
251
  **kwargs: Any,
251
252
  ) -> ChatResult:
252
- """Top Level call."""
253
253
  message = next(self.messages)
254
254
  message_ = AIMessage(content=message) if isinstance(message, str) else message
255
255
  generation = ChatGeneration(message=message_)
@@ -262,7 +262,6 @@ class GenericFakeChatModel(BaseChatModel):
262
262
  run_manager: Optional[CallbackManagerForLLMRun] = None,
263
263
  **kwargs: Any,
264
264
  ) -> Iterator[ChatGenerationChunk]:
265
- """Stream the output of the model."""
266
265
  chat_result = self._generate(
267
266
  messages, stop=stop, run_manager=run_manager, **kwargs
268
267
  )
@@ -378,7 +377,6 @@ class ParrotFakeChatModel(BaseChatModel):
378
377
  run_manager: Optional[CallbackManagerForLLMRun] = None,
379
378
  **kwargs: Any,
380
379
  ) -> ChatResult:
381
- """Top Level call."""
382
380
  return ChatResult(generations=[ChatGeneration(message=messages[-1])])
383
381
 
384
382
  @property