langchain-core 1.0.0a8__py3-none-any.whl → 1.0.0rc2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain-core might be problematic. Click here for more details.

Files changed (142) hide show
  1. langchain_core/__init__.py +1 -1
  2. langchain_core/_api/__init__.py +0 -1
  3. langchain_core/_api/beta_decorator.py +17 -20
  4. langchain_core/_api/deprecation.py +30 -35
  5. langchain_core/_import_utils.py +1 -1
  6. langchain_core/agents.py +10 -9
  7. langchain_core/caches.py +46 -56
  8. langchain_core/callbacks/__init__.py +1 -8
  9. langchain_core/callbacks/base.py +232 -243
  10. langchain_core/callbacks/file.py +33 -33
  11. langchain_core/callbacks/manager.py +353 -416
  12. langchain_core/callbacks/stdout.py +21 -22
  13. langchain_core/callbacks/streaming_stdout.py +32 -32
  14. langchain_core/callbacks/usage.py +54 -51
  15. langchain_core/chat_history.py +43 -58
  16. langchain_core/document_loaders/base.py +21 -21
  17. langchain_core/document_loaders/langsmith.py +22 -22
  18. langchain_core/documents/__init__.py +0 -1
  19. langchain_core/documents/base.py +46 -49
  20. langchain_core/documents/transformers.py +28 -29
  21. langchain_core/embeddings/fake.py +50 -54
  22. langchain_core/example_selectors/semantic_similarity.py +4 -6
  23. langchain_core/exceptions.py +7 -8
  24. langchain_core/indexing/api.py +19 -25
  25. langchain_core/indexing/base.py +24 -24
  26. langchain_core/language_models/__init__.py +11 -27
  27. langchain_core/language_models/_utils.py +53 -54
  28. langchain_core/language_models/base.py +30 -24
  29. langchain_core/language_models/chat_models.py +123 -148
  30. langchain_core/language_models/fake_chat_models.py +7 -7
  31. langchain_core/language_models/llms.py +14 -16
  32. langchain_core/load/dump.py +3 -4
  33. langchain_core/load/load.py +7 -16
  34. langchain_core/load/serializable.py +37 -36
  35. langchain_core/messages/__init__.py +1 -16
  36. langchain_core/messages/ai.py +122 -123
  37. langchain_core/messages/base.py +31 -31
  38. langchain_core/messages/block_translators/__init__.py +17 -17
  39. langchain_core/messages/block_translators/anthropic.py +3 -3
  40. langchain_core/messages/block_translators/bedrock_converse.py +3 -3
  41. langchain_core/messages/block_translators/google_genai.py +5 -4
  42. langchain_core/messages/block_translators/google_vertexai.py +4 -32
  43. langchain_core/messages/block_translators/groq.py +117 -21
  44. langchain_core/messages/block_translators/langchain_v0.py +3 -3
  45. langchain_core/messages/block_translators/openai.py +5 -5
  46. langchain_core/messages/chat.py +2 -6
  47. langchain_core/messages/content.py +222 -209
  48. langchain_core/messages/function.py +6 -10
  49. langchain_core/messages/human.py +17 -24
  50. langchain_core/messages/modifier.py +2 -2
  51. langchain_core/messages/system.py +12 -22
  52. langchain_core/messages/tool.py +53 -69
  53. langchain_core/messages/utils.py +399 -417
  54. langchain_core/output_parsers/__init__.py +1 -14
  55. langchain_core/output_parsers/base.py +46 -47
  56. langchain_core/output_parsers/json.py +3 -4
  57. langchain_core/output_parsers/list.py +2 -2
  58. langchain_core/output_parsers/openai_functions.py +46 -44
  59. langchain_core/output_parsers/openai_tools.py +11 -16
  60. langchain_core/output_parsers/pydantic.py +10 -11
  61. langchain_core/output_parsers/string.py +2 -2
  62. langchain_core/output_parsers/transform.py +2 -2
  63. langchain_core/output_parsers/xml.py +1 -1
  64. langchain_core/outputs/__init__.py +1 -1
  65. langchain_core/outputs/chat_generation.py +14 -14
  66. langchain_core/outputs/generation.py +6 -6
  67. langchain_core/outputs/llm_result.py +5 -5
  68. langchain_core/prompt_values.py +11 -11
  69. langchain_core/prompts/__init__.py +3 -23
  70. langchain_core/prompts/base.py +33 -38
  71. langchain_core/prompts/chat.py +222 -229
  72. langchain_core/prompts/dict.py +3 -3
  73. langchain_core/prompts/few_shot.py +76 -83
  74. langchain_core/prompts/few_shot_with_templates.py +7 -9
  75. langchain_core/prompts/image.py +12 -14
  76. langchain_core/prompts/loading.py +1 -1
  77. langchain_core/prompts/message.py +3 -3
  78. langchain_core/prompts/prompt.py +20 -23
  79. langchain_core/prompts/string.py +20 -8
  80. langchain_core/prompts/structured.py +26 -27
  81. langchain_core/rate_limiters.py +50 -58
  82. langchain_core/retrievers.py +41 -182
  83. langchain_core/runnables/base.py +565 -597
  84. langchain_core/runnables/branch.py +8 -8
  85. langchain_core/runnables/config.py +37 -44
  86. langchain_core/runnables/configurable.py +9 -10
  87. langchain_core/runnables/fallbacks.py +9 -9
  88. langchain_core/runnables/graph.py +46 -50
  89. langchain_core/runnables/graph_ascii.py +19 -18
  90. langchain_core/runnables/graph_mermaid.py +20 -31
  91. langchain_core/runnables/graph_png.py +7 -7
  92. langchain_core/runnables/history.py +22 -22
  93. langchain_core/runnables/passthrough.py +11 -11
  94. langchain_core/runnables/retry.py +3 -3
  95. langchain_core/runnables/router.py +2 -2
  96. langchain_core/runnables/schema.py +33 -33
  97. langchain_core/runnables/utils.py +30 -34
  98. langchain_core/stores.py +72 -102
  99. langchain_core/sys_info.py +27 -29
  100. langchain_core/tools/__init__.py +1 -14
  101. langchain_core/tools/base.py +70 -71
  102. langchain_core/tools/convert.py +100 -104
  103. langchain_core/tools/render.py +9 -9
  104. langchain_core/tools/retriever.py +7 -7
  105. langchain_core/tools/simple.py +6 -7
  106. langchain_core/tools/structured.py +18 -24
  107. langchain_core/tracers/__init__.py +1 -9
  108. langchain_core/tracers/base.py +35 -35
  109. langchain_core/tracers/context.py +12 -17
  110. langchain_core/tracers/event_stream.py +3 -3
  111. langchain_core/tracers/langchain.py +8 -8
  112. langchain_core/tracers/log_stream.py +17 -18
  113. langchain_core/tracers/memory_stream.py +3 -3
  114. langchain_core/tracers/root_listeners.py +2 -2
  115. langchain_core/tracers/schemas.py +0 -129
  116. langchain_core/tracers/stdout.py +1 -2
  117. langchain_core/utils/__init__.py +1 -1
  118. langchain_core/utils/aiter.py +32 -32
  119. langchain_core/utils/env.py +5 -5
  120. langchain_core/utils/function_calling.py +59 -154
  121. langchain_core/utils/html.py +4 -4
  122. langchain_core/utils/input.py +3 -3
  123. langchain_core/utils/interactive_env.py +1 -1
  124. langchain_core/utils/iter.py +20 -20
  125. langchain_core/utils/json.py +1 -1
  126. langchain_core/utils/json_schema.py +2 -2
  127. langchain_core/utils/mustache.py +5 -5
  128. langchain_core/utils/pydantic.py +17 -17
  129. langchain_core/utils/strings.py +5 -5
  130. langchain_core/utils/utils.py +25 -28
  131. langchain_core/vectorstores/base.py +55 -87
  132. langchain_core/vectorstores/in_memory.py +83 -85
  133. langchain_core/vectorstores/utils.py +2 -2
  134. langchain_core/version.py +1 -1
  135. {langchain_core-1.0.0a8.dist-info → langchain_core-1.0.0rc2.dist-info}/METADATA +23 -11
  136. langchain_core-1.0.0rc2.dist-info/RECORD +172 -0
  137. langchain_core/memory.py +0 -120
  138. langchain_core/pydantic_v1/__init__.py +0 -30
  139. langchain_core/pydantic_v1/dataclasses.py +0 -23
  140. langchain_core/pydantic_v1/main.py +0 -23
  141. langchain_core-1.0.0a8.dist-info/RECORD +0 -176
  142. {langchain_core-1.0.0a8.dist-info → langchain_core-1.0.0rc2.dist-info}/WHEEL +0 -0
@@ -108,11 +108,11 @@ def _generate_response_from_error(error: BaseException) -> list[ChatGeneration]:
108
108
 
109
109
 
110
110
  def _format_for_tracing(messages: list[BaseMessage]) -> list[BaseMessage]:
111
- """Format messages for tracing in ``on_chat_model_start``.
111
+ """Format messages for tracing in `on_chat_model_start`.
112
112
 
113
113
  - Update image content blocks to OpenAI Chat Completions format (backward
114
114
  compatibility).
115
- - Add ``type`` key to content blocks that have a single key.
115
+ - Add `type` key to content blocks that have a single key.
116
116
 
117
117
  Args:
118
118
  messages: List of messages to format.
@@ -179,13 +179,13 @@ def generate_from_stream(stream: Iterator[ChatGenerationChunk]) -> ChatResult:
179
179
  """Generate from a stream.
180
180
 
181
181
  Args:
182
- stream: Iterator of ``ChatGenerationChunk``.
182
+ stream: Iterator of `ChatGenerationChunk`.
183
183
 
184
184
  Raises:
185
185
  ValueError: If no generations are found in the stream.
186
186
 
187
187
  Returns:
188
- ChatResult: Chat result.
188
+ Chat result.
189
189
 
190
190
  """
191
191
  generation = next(stream, None)
@@ -210,10 +210,10 @@ async def agenerate_from_stream(
210
210
  """Async generate from a stream.
211
211
 
212
212
  Args:
213
- stream: Iterator of ``ChatGenerationChunk``.
213
+ stream: Iterator of `ChatGenerationChunk`.
214
214
 
215
215
  Returns:
216
- ChatResult: Chat result.
216
+ Chat result.
217
217
 
218
218
  """
219
219
  chunks = [chunk async for chunk in stream]
@@ -240,79 +240,52 @@ def _format_ls_structured_output(ls_structured_output_format: dict | None) -> di
240
240
 
241
241
 
242
242
  class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
243
- """Base class for chat models.
243
+ r"""Base class for chat models.
244
244
 
245
245
  Key imperative methods:
246
246
  Methods that actually call the underlying model.
247
247
 
248
- +---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
249
- | Method | Input | Output | Description |
250
- +===========================+================================================================+=====================================================================+==================================================================================================+
251
- | `invoke` | str | list[dict | tuple | BaseMessage] | PromptValue | BaseMessage | A single chat model call. |
252
- +---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
253
- | `ainvoke` | ''' | BaseMessage | Defaults to running invoke in an async executor. |
254
- +---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
255
- | `stream` | ''' | Iterator[BaseMessageChunk] | Defaults to yielding output of invoke. |
256
- +---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
257
- | `astream` | ''' | AsyncIterator[BaseMessageChunk] | Defaults to yielding output of ainvoke. |
258
- +---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
259
- | `astream_events` | ''' | AsyncIterator[StreamEvent] | Event types: 'on_chat_model_start', 'on_chat_model_stream', 'on_chat_model_end'. |
260
- +---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
261
- | `batch` | list['''] | list[BaseMessage] | Defaults to running invoke in concurrent threads. |
262
- +---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
263
- | `abatch` | list['''] | list[BaseMessage] | Defaults to running ainvoke in concurrent threads. |
264
- +---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
265
- | `batch_as_completed` | list['''] | Iterator[tuple[int, Union[BaseMessage, Exception]]] | Defaults to running invoke in concurrent threads. |
266
- +---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
267
- | `abatch_as_completed` | list['''] | AsyncIterator[tuple[int, Union[BaseMessage, Exception]]] | Defaults to running ainvoke in concurrent threads. |
268
- +---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
269
-
270
- This table provides a brief overview of the main imperative methods. Please see the base Runnable reference for full documentation.
248
+ This table provides a brief overview of the main imperative methods. Please see the base `Runnable` reference for full documentation.
249
+
250
+ | Method | Input | Output | Description |
251
+ | ---------------------- | ------------------------------------------------------------ | ---------------------------------------------------------- | -------------------------------------------------------------------------------- |
252
+ | `invoke` | `str` \| `list[dict | tuple | BaseMessage]` \| `PromptValue` | `BaseMessage` | A single chat model call. |
253
+ | `ainvoke` | `'''` | `BaseMessage` | Defaults to running `invoke` in an async executor. |
254
+ | `stream` | `'''` | `Iterator[BaseMessageChunk]` | Defaults to yielding output of `invoke`. |
255
+ | `astream` | `'''` | `AsyncIterator[BaseMessageChunk]` | Defaults to yielding output of `ainvoke`. |
256
+ | `astream_events` | `'''` | `AsyncIterator[StreamEvent]` | Event types: `on_chat_model_start`, `on_chat_model_stream`, `on_chat_model_end`. |
257
+ | `batch` | `list[''']` | `list[BaseMessage]` | Defaults to running `invoke` in concurrent threads. |
258
+ | `abatch` | `list[''']` | `list[BaseMessage]` | Defaults to running `ainvoke` in concurrent threads. |
259
+ | `batch_as_completed` | `list[''']` | `Iterator[tuple[int, Union[BaseMessage, Exception]]]` | Defaults to running `invoke` in concurrent threads. |
260
+ | `abatch_as_completed` | `list[''']` | `AsyncIterator[tuple[int, Union[BaseMessage, Exception]]]` | Defaults to running `ainvoke` in concurrent threads. |
271
261
 
272
262
  Key declarative methods:
273
- Methods for creating another Runnable using the ChatModel.
274
-
275
- +----------------------------------+-----------------------------------------------------------------------------------------------------------+
276
- | Method | Description |
277
- +==================================+===========================================================================================================+
278
- | `bind_tools` | Create ChatModel that can call tools. |
279
- +----------------------------------+-----------------------------------------------------------------------------------------------------------+
280
- | `with_structured_output` | Create wrapper that structures model output using schema. |
281
- +----------------------------------+-----------------------------------------------------------------------------------------------------------+
282
- | `with_retry` | Create wrapper that retries model calls on failure. |
283
- +----------------------------------+-----------------------------------------------------------------------------------------------------------+
284
- | `with_fallbacks` | Create wrapper that falls back to other models on failure. |
285
- +----------------------------------+-----------------------------------------------------------------------------------------------------------+
286
- | `configurable_fields` | Specify init args of the model that can be configured at runtime via the RunnableConfig. |
287
- +----------------------------------+-----------------------------------------------------------------------------------------------------------+
288
- | `configurable_alternatives` | Specify alternative models which can be swapped in at runtime via the RunnableConfig. |
289
- +----------------------------------+-----------------------------------------------------------------------------------------------------------+
263
+ Methods for creating another `Runnable` using the chat model.
290
264
 
291
265
  This table provides a brief overview of the main declarative methods. Please see the reference for each method for full documentation.
292
266
 
267
+ | Method | Description |
268
+ | ---------------------------- | ------------------------------------------------------------------------------------------ |
269
+ | `bind_tools` | Create chat model that can call tools. |
270
+ | `with_structured_output` | Create wrapper that structures model output using schema. |
271
+ | `with_retry` | Create wrapper that retries model calls on failure. |
272
+ | `with_fallbacks` | Create wrapper that falls back to other models on failure. |
273
+ | `configurable_fields` | Specify init args of the model that can be configured at runtime via the `RunnableConfig`. |
274
+ | `configurable_alternatives` | Specify alternative models which can be swapped in at runtime via the `RunnableConfig`. |
275
+
293
276
  Creating custom chat model:
294
277
  Custom chat model implementations should inherit from this class.
295
278
  Please reference the table below for information about which
296
279
  methods and properties are required or optional for implementations.
297
280
 
298
- +----------------------------------+--------------------------------------------------------------------+-------------------+
299
- | Method/Property | Description | Required/Optional |
300
- +==================================+====================================================================+===================+
281
+ | Method/Property | Description | Required |
282
+ | -------------------------------- | ------------------------------------------------------------------ | ----------------- |
301
283
  | `_generate` | Use to generate a chat result from a prompt | Required |
302
- +----------------------------------+--------------------------------------------------------------------+-------------------+
303
284
  | `_llm_type` (property) | Used to uniquely identify the type of the model. Used for logging. | Required |
304
- +----------------------------------+--------------------------------------------------------------------+-------------------+
305
285
  | `_identifying_params` (property) | Represent model parameterization for tracing purposes. | Optional |
306
- +----------------------------------+--------------------------------------------------------------------+-------------------+
307
286
  | `_stream` | Use to implement streaming | Optional |
308
- +----------------------------------+--------------------------------------------------------------------+-------------------+
309
287
  | `_agenerate` | Use to implement a native async method | Optional |
310
- +----------------------------------+--------------------------------------------------------------------+-------------------+
311
288
  | `_astream` | Use to implement async version of `_stream` | Optional |
312
- +----------------------------------+--------------------------------------------------------------------+-------------------+
313
-
314
- Follow the guide for more information on how to implement a custom Chat Model:
315
- [Guide](https://python.langchain.com/docs/how_to/custom_chat_model/).
316
289
 
317
290
  """ # noqa: E501
318
291
 
@@ -322,39 +295,39 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
322
295
  disable_streaming: bool | Literal["tool_calling"] = False
323
296
  """Whether to disable streaming for this model.
324
297
 
325
- If streaming is bypassed, then ``stream()``/``astream()``/``astream_events()`` will
326
- defer to ``invoke()``/``ainvoke()``.
298
+ If streaming is bypassed, then `stream`/`astream`/`astream_events` will
299
+ defer to `invoke`/`ainvoke`.
327
300
 
328
- - If True, will always bypass streaming case.
329
- - If ``'tool_calling'``, will bypass streaming case only when the model is called
330
- with a ``tools`` keyword argument. In other words, LangChain will automatically
331
- switch to non-streaming behavior (``invoke()``) only when the tools argument is
332
- provided. This offers the best of both worlds.
333
- - If False (default), will always use streaming case if available.
301
+ - If `True`, will always bypass streaming case.
302
+ - If `'tool_calling'`, will bypass streaming case only when the model is called
303
+ with a `tools` keyword argument. In other words, LangChain will automatically
304
+ switch to non-streaming behavior (`invoke`) only when the tools argument is
305
+ provided. This offers the best of both worlds.
306
+ - If `False` (Default), will always use streaming case if available.
334
307
 
335
- The main reason for this flag is that code might be written using ``stream()`` and
308
+ The main reason for this flag is that code might be written using `stream` and
336
309
  a user may want to swap out a given model for another model whose the implementation
337
310
  does not properly support streaming.
338
-
339
311
  """
340
312
 
341
313
  output_version: str | None = Field(
342
314
  default_factory=from_env("LC_OUTPUT_VERSION", default=None)
343
315
  )
344
- """Version of ``AIMessage`` output format to store in message content.
316
+ """Version of `AIMessage` output format to store in message content.
345
317
 
346
- ``AIMessage.content_blocks`` will lazily parse the contents of ``content`` into a
318
+ `AIMessage.content_blocks` will lazily parse the contents of `content` into a
347
319
  standard format. This flag can be used to additionally store the standard format
348
320
  in message content, e.g., for serialization purposes.
349
321
 
350
322
  Supported values:
351
323
 
352
- - ``"v0"``: provider-specific format in content (can lazily-parse with
353
- ``.content_blocks``)
354
- - ``"v1"``: standardized format in content (consistent with ``.content_blocks``)
324
+ - `'v0'`: provider-specific format in content (can lazily-parse with
325
+ `content_blocks`)
326
+ - `'v1'`: standardized format in content (consistent with `content_blocks`)
355
327
 
356
- Partner packages (e.g., ``langchain-openai``) can also use this field to roll out
357
- new content formats in a backward-compatible way.
328
+ Partner packages (e.g.,
329
+ [`langchain-openai`](https://pypi.org/project/langchain-openai)) can also use this
330
+ field to roll out new content formats in a backward-compatible way.
358
331
 
359
332
  !!! version-added "Added in version 1.0"
360
333
 
@@ -373,7 +346,7 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
373
346
  @property
374
347
  @override
375
348
  def OutputType(self) -> Any:
376
- """Get the output type for this runnable."""
349
+ """Get the output type for this `Runnable`."""
377
350
  return AnyMessage
378
351
 
379
352
  def _convert_input(self, model_input: LanguageModelInput) -> PromptValue:
@@ -471,8 +444,10 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
471
444
  if "stream" in kwargs:
472
445
  return kwargs["stream"]
473
446
 
474
- if getattr(self, "streaming", False):
475
- return True
447
+ if "streaming" in self.model_fields_set:
448
+ streaming_value = getattr(self, "streaming", None)
449
+ if isinstance(streaming_value, bool):
450
+ return streaming_value
476
451
 
477
452
  # Check if any streaming callback handlers have been passed in.
478
453
  handlers = run_manager.handlers if run_manager else []
@@ -1529,120 +1504,120 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
1529
1504
 
1530
1505
  - an OpenAI function/tool schema,
1531
1506
  - a JSON Schema,
1532
- - a TypedDict class,
1507
+ - a `TypedDict` class,
1533
1508
  - or a Pydantic class.
1534
1509
 
1535
- If ``schema`` is a Pydantic class then the model output will be a
1510
+ If `schema` is a Pydantic class then the model output will be a
1536
1511
  Pydantic instance of that class, and the model-generated fields will be
1537
1512
  validated by the Pydantic class. Otherwise the model output will be a
1538
1513
  dict and will not be validated. See `langchain_core.utils.function_calling.convert_to_openai_tool`
1539
1514
  for more on how to properly specify types and descriptions of
1540
- schema fields when specifying a Pydantic or TypedDict class.
1515
+ schema fields when specifying a Pydantic or `TypedDict` class.
1541
1516
 
1542
1517
  include_raw:
1543
- If False then only the parsed structured output is returned. If
1544
- an error occurs during model output parsing it will be raised. If True
1518
+ If `False` then only the parsed structured output is returned. If
1519
+ an error occurs during model output parsing it will be raised. If `True`
1545
1520
  then both the raw model response (a BaseMessage) and the parsed model
1546
1521
  response will be returned. If an error occurs during output parsing it
1547
1522
  will be caught and returned as well. The final output is always a dict
1548
- with keys ``'raw'``, ``'parsed'``, and ``'parsing_error'``.
1523
+ with keys `'raw'`, `'parsed'`, and `'parsing_error'`.
1549
1524
 
1550
1525
  Raises:
1551
- ValueError: If there are any unsupported ``kwargs``.
1526
+ ValueError: If there are any unsupported `kwargs`.
1552
1527
  NotImplementedError: If the model does not implement
1553
- ``with_structured_output()``.
1528
+ `with_structured_output()`.
1554
1529
 
1555
1530
  Returns:
1556
1531
  A Runnable that takes same inputs as a `langchain_core.language_models.chat.BaseChatModel`.
1557
1532
 
1558
- If ``include_raw`` is False and ``schema`` is a Pydantic class, Runnable outputs
1559
- an instance of ``schema`` (i.e., a Pydantic object).
1533
+ If `include_raw` is False and `schema` is a Pydantic class, Runnable outputs
1534
+ an instance of `schema` (i.e., a Pydantic object).
1560
1535
 
1561
- Otherwise, if ``include_raw`` is False then Runnable outputs a dict.
1536
+ Otherwise, if `include_raw` is False then Runnable outputs a dict.
1562
1537
 
1563
- If ``include_raw`` is True, then Runnable outputs a dict with keys:
1538
+ If `include_raw` is True, then Runnable outputs a dict with keys:
1564
1539
 
1565
- - ``'raw'``: BaseMessage
1566
- - ``'parsed'``: None if there was a parsing error, otherwise the type depends on the ``schema`` as described above.
1567
- - ``'parsing_error'``: Optional[BaseException]
1540
+ - `'raw'`: BaseMessage
1541
+ - `'parsed'`: None if there was a parsing error, otherwise the type depends on the `schema` as described above.
1542
+ - `'parsing_error'`: BaseException | None
1568
1543
 
1569
1544
  Example: Pydantic schema (include_raw=False):
1570
- .. code-block:: python
1545
+ ```python
1546
+ from pydantic import BaseModel
1571
1547
 
1572
- from pydantic import BaseModel
1573
1548
 
1549
+ class AnswerWithJustification(BaseModel):
1550
+ '''An answer to the user question along with justification for the answer.'''
1574
1551
 
1575
- class AnswerWithJustification(BaseModel):
1576
- '''An answer to the user question along with justification for the answer.'''
1552
+ answer: str
1553
+ justification: str
1577
1554
 
1578
- answer: str
1579
- justification: str
1580
1555
 
1556
+ model = ChatModel(model="model-name", temperature=0)
1557
+ structured_model = model.with_structured_output(AnswerWithJustification)
1581
1558
 
1582
- llm = ChatModel(model="model-name", temperature=0)
1583
- structured_llm = llm.with_structured_output(AnswerWithJustification)
1584
-
1585
- structured_llm.invoke(
1586
- "What weighs more a pound of bricks or a pound of feathers"
1587
- )
1559
+ structured_model.invoke(
1560
+ "What weighs more a pound of bricks or a pound of feathers"
1561
+ )
1588
1562
 
1589
- # -> AnswerWithJustification(
1590
- # answer='They weigh the same',
1591
- # justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'
1592
- # )
1563
+ # -> AnswerWithJustification(
1564
+ # answer='They weigh the same',
1565
+ # justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'
1566
+ # )
1567
+ ```
1593
1568
 
1594
1569
  Example: Pydantic schema (include_raw=True):
1595
- .. code-block:: python
1596
-
1597
- from pydantic import BaseModel
1570
+ ```python
1571
+ from pydantic import BaseModel
1598
1572
 
1599
1573
 
1600
- class AnswerWithJustification(BaseModel):
1601
- '''An answer to the user question along with justification for the answer.'''
1574
+ class AnswerWithJustification(BaseModel):
1575
+ '''An answer to the user question along with justification for the answer.'''
1602
1576
 
1603
- answer: str
1604
- justification: str
1577
+ answer: str
1578
+ justification: str
1605
1579
 
1606
1580
 
1607
- llm = ChatModel(model="model-name", temperature=0)
1608
- structured_llm = llm.with_structured_output(
1609
- AnswerWithJustification, include_raw=True
1610
- )
1581
+ model = ChatModel(model="model-name", temperature=0)
1582
+ structured_model = model.with_structured_output(
1583
+ AnswerWithJustification, include_raw=True
1584
+ )
1611
1585
 
1612
- structured_llm.invoke(
1613
- "What weighs more a pound of bricks or a pound of feathers"
1614
- )
1615
- # -> {
1616
- # 'raw': AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_Ao02pnFYXD6GN1yzc0uXPsvF', 'function': {'arguments': '{"answer":"They weigh the same.","justification":"Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ."}', 'name': 'AnswerWithJustification'}, 'type': 'function'}]}),
1617
- # 'parsed': AnswerWithJustification(answer='They weigh the same.', justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'),
1618
- # 'parsing_error': None
1619
- # }
1586
+ structured_model.invoke(
1587
+ "What weighs more a pound of bricks or a pound of feathers"
1588
+ )
1589
+ # -> {
1590
+ # 'raw': AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_Ao02pnFYXD6GN1yzc0uXPsvF', 'function': {'arguments': '{"answer":"They weigh the same.","justification":"Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ."}', 'name': 'AnswerWithJustification'}, 'type': 'function'}]}),
1591
+ # 'parsed': AnswerWithJustification(answer='They weigh the same.', justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'),
1592
+ # 'parsing_error': None
1593
+ # }
1594
+ ```
1620
1595
 
1621
1596
  Example: Dict schema (include_raw=False):
1622
- .. code-block:: python
1597
+ ```python
1598
+ from pydantic import BaseModel
1599
+ from langchain_core.utils.function_calling import convert_to_openai_tool
1623
1600
 
1624
- from pydantic import BaseModel
1625
- from langchain_core.utils.function_calling import convert_to_openai_tool
1626
1601
 
1602
+ class AnswerWithJustification(BaseModel):
1603
+ '''An answer to the user question along with justification for the answer.'''
1627
1604
 
1628
- class AnswerWithJustification(BaseModel):
1629
- '''An answer to the user question along with justification for the answer.'''
1605
+ answer: str
1606
+ justification: str
1630
1607
 
1631
- answer: str
1632
- justification: str
1633
1608
 
1609
+ dict_schema = convert_to_openai_tool(AnswerWithJustification)
1610
+ model = ChatModel(model="model-name", temperature=0)
1611
+ structured_model = model.with_structured_output(dict_schema)
1634
1612
 
1635
- dict_schema = convert_to_openai_tool(AnswerWithJustification)
1636
- llm = ChatModel(model="model-name", temperature=0)
1637
- structured_llm = llm.with_structured_output(dict_schema)
1638
-
1639
- structured_llm.invoke(
1640
- "What weighs more a pound of bricks or a pound of feathers"
1641
- )
1642
- # -> {
1643
- # 'answer': 'They weigh the same',
1644
- # 'justification': 'Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume and density of the two substances differ.'
1645
- # }
1613
+ structured_model.invoke(
1614
+ "What weighs more a pound of bricks or a pound of feathers"
1615
+ )
1616
+ # -> {
1617
+ # 'answer': 'They weigh the same',
1618
+ # 'justification': 'Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume and density of the two substances differ.'
1619
+ # }
1620
+ ```
1646
1621
 
1647
1622
  !!! warning "Behavior changed in 0.2.26"
1648
1623
  Added support for TypedDict class.
@@ -1692,7 +1667,7 @@ class SimpleChatModel(BaseChatModel):
1692
1667
 
1693
1668
  !!! note
1694
1669
  This implementation is primarily here for backwards compatibility. For new
1695
- implementations, please use ``BaseChatModel`` directly.
1670
+ implementations, please use `BaseChatModel` directly.
1696
1671
 
1697
1672
  """
1698
1673
 
@@ -1,4 +1,4 @@
1
- """Fake ChatModel for testing purposes."""
1
+ """Fake chat model for testing purposes."""
2
2
 
3
3
  import asyncio
4
4
  import re
@@ -19,7 +19,7 @@ from langchain_core.runnables import RunnableConfig
19
19
 
20
20
 
21
21
  class FakeMessagesListChatModel(BaseChatModel):
22
- """Fake ``ChatModel`` for testing purposes."""
22
+ """Fake chat model for testing purposes."""
23
23
 
24
24
  responses: list[BaseMessage]
25
25
  """List of responses to **cycle** through in order."""
@@ -57,7 +57,7 @@ class FakeListChatModelError(Exception):
57
57
 
58
58
 
59
59
  class FakeListChatModel(SimpleChatModel):
60
- """Fake ChatModel for testing purposes."""
60
+ """Fake chat model for testing purposes."""
61
61
 
62
62
  responses: list[str]
63
63
  """List of responses to **cycle** through in order."""
@@ -228,10 +228,10 @@ class GenericFakeChatModel(BaseChatModel):
228
228
  """Generic fake chat model that can be used to test the chat model interface.
229
229
 
230
230
  * Chat model should be usable in both sync and async tests
231
- * Invokes ``on_llm_new_token`` to allow for testing of callback related code for new
232
- tokens.
231
+ * Invokes `on_llm_new_token` to allow for testing of callback related code for new
232
+ tokens.
233
233
  * Includes logic to break messages into message chunk to facilitate testing of
234
- streaming.
234
+ streaming.
235
235
 
236
236
  """
237
237
 
@@ -242,7 +242,7 @@ class GenericFakeChatModel(BaseChatModel):
242
242
  to make the interface more generic if needed.
243
243
 
244
244
  !!! note
245
- if you want to pass a list, you can use ``iter`` to convert it to an iterator.
245
+ if you want to pass a list, you can use `iter` to convert it to an iterator.
246
246
 
247
247
  !!! warning
248
248
  Streaming is not implemented yet. We should try to implement it in the future by
@@ -74,8 +74,8 @@ def create_base_retry_decorator(
74
74
 
75
75
  Args:
76
76
  error_types: List of error types to retry on.
77
- max_retries: Number of retries. Default is 1.
78
- run_manager: Callback manager for the run. Default is None.
77
+ max_retries: Number of retries.
78
+ run_manager: Callback manager for the run.
79
79
 
80
80
  Returns:
81
81
  A retry decorator.
@@ -153,7 +153,7 @@ def get_prompts(
153
153
  Args:
154
154
  params: Dictionary of parameters.
155
155
  prompts: List of prompts.
156
- cache: Cache object. Default is None.
156
+ cache: Cache object.
157
157
 
158
158
  Returns:
159
159
  A tuple of existing prompts, llm_string, missing prompt indexes,
@@ -189,7 +189,7 @@ async def aget_prompts(
189
189
  Args:
190
190
  params: Dictionary of parameters.
191
191
  prompts: List of prompts.
192
- cache: Cache object. Default is None.
192
+ cache: Cache object.
193
193
 
194
194
  Returns:
195
195
  A tuple of existing prompts, llm_string, missing prompt indexes,
@@ -299,7 +299,7 @@ class BaseLLM(BaseLanguageModel[str], ABC):
299
299
  @property
300
300
  @override
301
301
  def OutputType(self) -> type[str]:
302
- """Get the input type for this runnable."""
302
+ """Get the input type for this `Runnable`."""
303
303
  return str
304
304
 
305
305
  def _convert_input(self, model_input: LanguageModelInput) -> PromptValue:
@@ -835,7 +835,7 @@ class BaseLLM(BaseLanguageModel[str], ABC):
835
835
  1. Take advantage of batched calls,
836
836
  2. Need more output from the model than just the top generated value,
837
837
  3. Are building chains that are agnostic to the underlying language model
838
- type (e.g., pure text completion models vs chat models).
838
+ type (e.g., pure text completion models vs chat models).
839
839
 
840
840
  Args:
841
841
  prompts: List of string prompts.
@@ -857,8 +857,8 @@ class BaseLLM(BaseLanguageModel[str], ABC):
857
857
 
858
858
  Raises:
859
859
  ValueError: If prompts is not a list.
860
- ValueError: If the length of ``callbacks``, ``tags``, ``metadata``, or
861
- ``run_name`` (if provided) does not match the length of prompts.
860
+ ValueError: If the length of `callbacks`, `tags`, `metadata`, or
861
+ `run_name` (if provided) does not match the length of prompts.
862
862
 
863
863
  Returns:
864
864
  An LLMResult, which contains a list of candidate Generations for each input
@@ -1105,7 +1105,7 @@ class BaseLLM(BaseLanguageModel[str], ABC):
1105
1105
  1. Take advantage of batched calls,
1106
1106
  2. Need more output from the model than just the top generated value,
1107
1107
  3. Are building chains that are agnostic to the underlying language model
1108
- type (e.g., pure text completion models vs chat models).
1108
+ type (e.g., pure text completion models vs chat models).
1109
1109
 
1110
1110
  Args:
1111
1111
  prompts: List of string prompts.
@@ -1126,8 +1126,8 @@ class BaseLLM(BaseLanguageModel[str], ABC):
1126
1126
  to the model provider API call.
1127
1127
 
1128
1128
  Raises:
1129
- ValueError: If the length of ``callbacks``, ``tags``, ``metadata``, or
1130
- ``run_name`` (if provided) does not match the length of prompts.
1129
+ ValueError: If the length of `callbacks`, `tags`, `metadata`, or
1130
+ `run_name` (if provided) does not match the length of prompts.
1131
1131
 
1132
1132
  Returns:
1133
1133
  An LLMResult, which contains a list of candidate Generations for each input
@@ -1340,11 +1340,9 @@ class BaseLLM(BaseLanguageModel[str], ABC):
1340
1340
  ValueError: If the file path is not a string or Path object.
1341
1341
 
1342
1342
  Example:
1343
-
1344
- .. code-block:: python
1345
-
1346
- llm.save(file_path="path/llm.yaml")
1347
-
1343
+ ```python
1344
+ llm.save(file_path="path/llm.yaml")
1345
+ ```
1348
1346
  """
1349
1347
  # Convert file to Path object.
1350
1348
  save_path = Path(file_path)
@@ -42,10 +42,9 @@ def dumps(obj: Any, *, pretty: bool = False, **kwargs: Any) -> str:
42
42
 
43
43
  Args:
44
44
  obj: The object to dump.
45
- pretty: Whether to pretty print the json. If true, the json will be
46
- indented with 2 spaces (if no indent is provided as part of kwargs).
47
- Default is False.
48
- kwargs: Additional arguments to pass to json.dumps
45
+ pretty: Whether to pretty print the json. If `True`, the json will be
46
+ indented with 2 spaces (if no indent is provided as part of `kwargs`).
47
+ **kwargs: Additional arguments to pass to `json.dumps`
49
48
 
50
49
  Returns:
51
50
  A json string representation of the object.