langchain-core 1.0.0a8__py3-none-any.whl → 1.0.0rc1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain-core might be problematic. Click here for more details.

Files changed (135) hide show
  1. langchain_core/__init__.py +1 -1
  2. langchain_core/_api/__init__.py +0 -1
  3. langchain_core/_api/beta_decorator.py +17 -20
  4. langchain_core/_api/deprecation.py +30 -35
  5. langchain_core/_import_utils.py +1 -1
  6. langchain_core/agents.py +7 -6
  7. langchain_core/caches.py +4 -10
  8. langchain_core/callbacks/__init__.py +1 -8
  9. langchain_core/callbacks/base.py +232 -243
  10. langchain_core/callbacks/file.py +33 -33
  11. langchain_core/callbacks/manager.py +353 -416
  12. langchain_core/callbacks/stdout.py +21 -22
  13. langchain_core/callbacks/streaming_stdout.py +32 -32
  14. langchain_core/callbacks/usage.py +54 -51
  15. langchain_core/chat_history.py +42 -57
  16. langchain_core/document_loaders/langsmith.py +21 -21
  17. langchain_core/documents/__init__.py +0 -1
  18. langchain_core/documents/base.py +37 -40
  19. langchain_core/documents/transformers.py +28 -29
  20. langchain_core/embeddings/fake.py +46 -52
  21. langchain_core/exceptions.py +5 -5
  22. langchain_core/indexing/api.py +11 -11
  23. langchain_core/indexing/base.py +24 -24
  24. langchain_core/language_models/__init__.py +0 -2
  25. langchain_core/language_models/_utils.py +51 -53
  26. langchain_core/language_models/base.py +23 -24
  27. langchain_core/language_models/chat_models.py +121 -144
  28. langchain_core/language_models/fake_chat_models.py +5 -5
  29. langchain_core/language_models/llms.py +10 -12
  30. langchain_core/load/dump.py +1 -1
  31. langchain_core/load/load.py +16 -16
  32. langchain_core/load/serializable.py +35 -34
  33. langchain_core/messages/__init__.py +1 -16
  34. langchain_core/messages/ai.py +105 -104
  35. langchain_core/messages/base.py +26 -26
  36. langchain_core/messages/block_translators/__init__.py +17 -17
  37. langchain_core/messages/block_translators/anthropic.py +2 -2
  38. langchain_core/messages/block_translators/bedrock_converse.py +2 -2
  39. langchain_core/messages/block_translators/google_genai.py +2 -2
  40. langchain_core/messages/block_translators/groq.py +117 -21
  41. langchain_core/messages/block_translators/langchain_v0.py +2 -2
  42. langchain_core/messages/block_translators/openai.py +4 -4
  43. langchain_core/messages/chat.py +1 -1
  44. langchain_core/messages/content.py +189 -193
  45. langchain_core/messages/function.py +5 -5
  46. langchain_core/messages/human.py +15 -17
  47. langchain_core/messages/modifier.py +1 -1
  48. langchain_core/messages/system.py +12 -14
  49. langchain_core/messages/tool.py +45 -49
  50. langchain_core/messages/utils.py +384 -396
  51. langchain_core/output_parsers/__init__.py +1 -14
  52. langchain_core/output_parsers/base.py +22 -23
  53. langchain_core/output_parsers/json.py +3 -3
  54. langchain_core/output_parsers/list.py +1 -1
  55. langchain_core/output_parsers/openai_functions.py +46 -44
  56. langchain_core/output_parsers/openai_tools.py +7 -7
  57. langchain_core/output_parsers/pydantic.py +10 -11
  58. langchain_core/output_parsers/string.py +1 -1
  59. langchain_core/output_parsers/transform.py +2 -2
  60. langchain_core/output_parsers/xml.py +1 -1
  61. langchain_core/outputs/__init__.py +1 -1
  62. langchain_core/outputs/chat_generation.py +14 -14
  63. langchain_core/outputs/generation.py +5 -5
  64. langchain_core/outputs/llm_result.py +5 -5
  65. langchain_core/prompt_values.py +5 -5
  66. langchain_core/prompts/__init__.py +3 -23
  67. langchain_core/prompts/base.py +32 -37
  68. langchain_core/prompts/chat.py +216 -222
  69. langchain_core/prompts/dict.py +2 -2
  70. langchain_core/prompts/few_shot.py +76 -83
  71. langchain_core/prompts/few_shot_with_templates.py +6 -8
  72. langchain_core/prompts/image.py +11 -13
  73. langchain_core/prompts/loading.py +1 -1
  74. langchain_core/prompts/message.py +2 -2
  75. langchain_core/prompts/prompt.py +14 -16
  76. langchain_core/prompts/string.py +19 -7
  77. langchain_core/prompts/structured.py +24 -25
  78. langchain_core/rate_limiters.py +36 -38
  79. langchain_core/retrievers.py +41 -182
  80. langchain_core/runnables/base.py +565 -590
  81. langchain_core/runnables/branch.py +7 -7
  82. langchain_core/runnables/config.py +37 -44
  83. langchain_core/runnables/configurable.py +8 -9
  84. langchain_core/runnables/fallbacks.py +8 -8
  85. langchain_core/runnables/graph.py +28 -27
  86. langchain_core/runnables/graph_ascii.py +19 -18
  87. langchain_core/runnables/graph_mermaid.py +20 -31
  88. langchain_core/runnables/graph_png.py +7 -7
  89. langchain_core/runnables/history.py +20 -20
  90. langchain_core/runnables/passthrough.py +8 -8
  91. langchain_core/runnables/retry.py +3 -3
  92. langchain_core/runnables/router.py +1 -1
  93. langchain_core/runnables/schema.py +33 -33
  94. langchain_core/runnables/utils.py +30 -34
  95. langchain_core/stores.py +72 -102
  96. langchain_core/sys_info.py +27 -29
  97. langchain_core/tools/__init__.py +1 -14
  98. langchain_core/tools/base.py +63 -63
  99. langchain_core/tools/convert.py +92 -92
  100. langchain_core/tools/render.py +9 -9
  101. langchain_core/tools/retriever.py +1 -1
  102. langchain_core/tools/simple.py +6 -7
  103. langchain_core/tools/structured.py +17 -18
  104. langchain_core/tracers/__init__.py +1 -9
  105. langchain_core/tracers/base.py +35 -35
  106. langchain_core/tracers/context.py +12 -17
  107. langchain_core/tracers/event_stream.py +3 -3
  108. langchain_core/tracers/langchain.py +8 -8
  109. langchain_core/tracers/log_stream.py +17 -18
  110. langchain_core/tracers/memory_stream.py +2 -2
  111. langchain_core/tracers/schemas.py +0 -129
  112. langchain_core/utils/aiter.py +31 -31
  113. langchain_core/utils/env.py +5 -5
  114. langchain_core/utils/function_calling.py +48 -120
  115. langchain_core/utils/html.py +4 -4
  116. langchain_core/utils/input.py +2 -2
  117. langchain_core/utils/interactive_env.py +1 -1
  118. langchain_core/utils/iter.py +19 -19
  119. langchain_core/utils/json.py +1 -1
  120. langchain_core/utils/json_schema.py +2 -2
  121. langchain_core/utils/mustache.py +5 -5
  122. langchain_core/utils/pydantic.py +17 -17
  123. langchain_core/utils/strings.py +4 -4
  124. langchain_core/utils/utils.py +25 -28
  125. langchain_core/vectorstores/base.py +43 -64
  126. langchain_core/vectorstores/in_memory.py +83 -85
  127. langchain_core/version.py +1 -1
  128. {langchain_core-1.0.0a8.dist-info → langchain_core-1.0.0rc1.dist-info}/METADATA +23 -11
  129. langchain_core-1.0.0rc1.dist-info/RECORD +172 -0
  130. langchain_core/memory.py +0 -120
  131. langchain_core/pydantic_v1/__init__.py +0 -30
  132. langchain_core/pydantic_v1/dataclasses.py +0 -23
  133. langchain_core/pydantic_v1/main.py +0 -23
  134. langchain_core-1.0.0a8.dist-info/RECORD +0 -176
  135. {langchain_core-1.0.0a8.dist-info → langchain_core-1.0.0rc1.dist-info}/WHEEL +0 -0
@@ -108,11 +108,11 @@ def _generate_response_from_error(error: BaseException) -> list[ChatGeneration]:
108
108
 
109
109
 
110
110
  def _format_for_tracing(messages: list[BaseMessage]) -> list[BaseMessage]:
111
- """Format messages for tracing in ``on_chat_model_start``.
111
+ """Format messages for tracing in `on_chat_model_start`.
112
112
 
113
113
  - Update image content blocks to OpenAI Chat Completions format (backward
114
114
  compatibility).
115
- - Add ``type`` key to content blocks that have a single key.
115
+ - Add `type` key to content blocks that have a single key.
116
116
 
117
117
  Args:
118
118
  messages: List of messages to format.
@@ -179,13 +179,13 @@ def generate_from_stream(stream: Iterator[ChatGenerationChunk]) -> ChatResult:
179
179
  """Generate from a stream.
180
180
 
181
181
  Args:
182
- stream: Iterator of ``ChatGenerationChunk``.
182
+ stream: Iterator of `ChatGenerationChunk`.
183
183
 
184
184
  Raises:
185
185
  ValueError: If no generations are found in the stream.
186
186
 
187
187
  Returns:
188
- ChatResult: Chat result.
188
+ Chat result.
189
189
 
190
190
  """
191
191
  generation = next(stream, None)
@@ -210,10 +210,10 @@ async def agenerate_from_stream(
210
210
  """Async generate from a stream.
211
211
 
212
212
  Args:
213
- stream: Iterator of ``ChatGenerationChunk``.
213
+ stream: Iterator of `ChatGenerationChunk`.
214
214
 
215
215
  Returns:
216
- ChatResult: Chat result.
216
+ Chat result.
217
217
 
218
218
  """
219
219
  chunks = [chunk async for chunk in stream]
@@ -240,78 +240,54 @@ def _format_ls_structured_output(ls_structured_output_format: dict | None) -> di
240
240
 
241
241
 
242
242
  class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
243
- """Base class for chat models.
243
+ r"""Base class for chat models.
244
244
 
245
245
  Key imperative methods:
246
246
  Methods that actually call the underlying model.
247
247
 
248
- +---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
249
- | Method | Input | Output | Description |
250
- +===========================+================================================================+=====================================================================+==================================================================================================+
251
- | `invoke` | str | list[dict | tuple | BaseMessage] | PromptValue | BaseMessage | A single chat model call. |
252
- +---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
253
- | `ainvoke` | ''' | BaseMessage | Defaults to running invoke in an async executor. |
254
- +---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
255
- | `stream` | ''' | Iterator[BaseMessageChunk] | Defaults to yielding output of invoke. |
256
- +---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
257
- | `astream` | ''' | AsyncIterator[BaseMessageChunk] | Defaults to yielding output of ainvoke. |
258
- +---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
259
- | `astream_events` | ''' | AsyncIterator[StreamEvent] | Event types: 'on_chat_model_start', 'on_chat_model_stream', 'on_chat_model_end'. |
260
- +---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
261
- | `batch` | list['''] | list[BaseMessage] | Defaults to running invoke in concurrent threads. |
262
- +---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
263
- | `abatch` | list['''] | list[BaseMessage] | Defaults to running ainvoke in concurrent threads. |
264
- +---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
265
- | `batch_as_completed` | list['''] | Iterator[tuple[int, Union[BaseMessage, Exception]]] | Defaults to running invoke in concurrent threads. |
266
- +---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
267
- | `abatch_as_completed` | list['''] | AsyncIterator[tuple[int, Union[BaseMessage, Exception]]] | Defaults to running ainvoke in concurrent threads. |
268
- +---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+
269
-
270
- This table provides a brief overview of the main imperative methods. Please see the base Runnable reference for full documentation.
248
+ This table provides a brief overview of the main imperative methods. Please see the base `Runnable` reference for full documentation.
249
+
250
+ | Method | Input | Output | Description |
251
+ | ---------------------- | ------------------------------------------------------------ | ---------------------------------------------------------- | -------------------------------------------------------------------------------- |
252
+ | `invoke` | `str` \| `list[dict | tuple | BaseMessage]` \| `PromptValue` | `BaseMessage` | A single chat model call. |
253
+ | `ainvoke` | `'''` | `BaseMessage` | Defaults to running `invoke` in an async executor. |
254
+ | `stream` | `'''` | `Iterator[BaseMessageChunk]` | Defaults to yielding output of `invoke`. |
255
+ | `astream` | `'''` | `AsyncIterator[BaseMessageChunk]` | Defaults to yielding output of `ainvoke`. |
256
+ | `astream_events` | `'''` | `AsyncIterator[StreamEvent]` | Event types: `on_chat_model_start`, `on_chat_model_stream`, `on_chat_model_end`. |
257
+ | `batch` | `list[''']` | `list[BaseMessage]` | Defaults to running `invoke` in concurrent threads. |
258
+ | `abatch` | `list[''']` | `list[BaseMessage]` | Defaults to running `ainvoke` in concurrent threads. |
259
+ | `batch_as_completed` | `list[''']` | `Iterator[tuple[int, Union[BaseMessage, Exception]]]` | Defaults to running `invoke` in concurrent threads. |
260
+ | `abatch_as_completed` | `list[''']` | `AsyncIterator[tuple[int, Union[BaseMessage, Exception]]]` | Defaults to running `ainvoke` in concurrent threads. |
271
261
 
272
262
  Key declarative methods:
273
- Methods for creating another Runnable using the ChatModel.
274
-
275
- +----------------------------------+-----------------------------------------------------------------------------------------------------------+
276
- | Method | Description |
277
- +==================================+===========================================================================================================+
278
- | `bind_tools` | Create ChatModel that can call tools. |
279
- +----------------------------------+-----------------------------------------------------------------------------------------------------------+
280
- | `with_structured_output` | Create wrapper that structures model output using schema. |
281
- +----------------------------------+-----------------------------------------------------------------------------------------------------------+
282
- | `with_retry` | Create wrapper that retries model calls on failure. |
283
- +----------------------------------+-----------------------------------------------------------------------------------------------------------+
284
- | `with_fallbacks` | Create wrapper that falls back to other models on failure. |
285
- +----------------------------------+-----------------------------------------------------------------------------------------------------------+
286
- | `configurable_fields` | Specify init args of the model that can be configured at runtime via the RunnableConfig. |
287
- +----------------------------------+-----------------------------------------------------------------------------------------------------------+
288
- | `configurable_alternatives` | Specify alternative models which can be swapped in at runtime via the RunnableConfig. |
289
- +----------------------------------+-----------------------------------------------------------------------------------------------------------+
263
+ Methods for creating another `Runnable` using the chat model.
290
264
 
291
265
  This table provides a brief overview of the main declarative methods. Please see the reference for each method for full documentation.
292
266
 
267
+ | Method | Description |
268
+ | ---------------------------- | -------------------------------------------------------------------------------------------- |
269
+ | `bind_tools` | Create chat model that can call tools. |
270
+ | `with_structured_output` | Create wrapper that structures model output using schema. |
271
+ | `with_retry` | Create wrapper that retries model calls on failure. |
272
+ | `with_fallbacks` | Create wrapper that falls back to other models on failure. |
273
+ | `configurable_fields` | Specify init args of the model that can be configured at runtime via the `RunnableConfig`. |
274
+ | `configurable_alternatives` | Specify alternative models which can be swapped in at runtime via the `RunnableConfig`. |
275
+
293
276
  Creating custom chat model:
294
277
  Custom chat model implementations should inherit from this class.
295
278
  Please reference the table below for information about which
296
279
  methods and properties are required or optional for implementations.
297
280
 
298
- +----------------------------------+--------------------------------------------------------------------+-------------------+
299
281
  | Method/Property | Description | Required/Optional |
300
- +==================================+====================================================================+===================+
282
+ | -------------------------------- | ------------------------------------------------------------------ | ----------------- |
301
283
  | `_generate` | Use to generate a chat result from a prompt | Required |
302
- +----------------------------------+--------------------------------------------------------------------+-------------------+
303
284
  | `_llm_type` (property) | Used to uniquely identify the type of the model. Used for logging. | Required |
304
- +----------------------------------+--------------------------------------------------------------------+-------------------+
305
285
  | `_identifying_params` (property) | Represent model parameterization for tracing purposes. | Optional |
306
- +----------------------------------+--------------------------------------------------------------------+-------------------+
307
286
  | `_stream` | Use to implement streaming | Optional |
308
- +----------------------------------+--------------------------------------------------------------------+-------------------+
309
287
  | `_agenerate` | Use to implement a native async method | Optional |
310
- +----------------------------------+--------------------------------------------------------------------+-------------------+
311
288
  | `_astream` | Use to implement async version of `_stream` | Optional |
312
- +----------------------------------+--------------------------------------------------------------------+-------------------+
313
289
 
314
- Follow the guide for more information on how to implement a custom Chat Model:
290
+ Follow the guide for more information on how to implement a custom chat model:
315
291
  [Guide](https://python.langchain.com/docs/how_to/custom_chat_model/).
316
292
 
317
293
  """ # noqa: E501
@@ -322,38 +298,37 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
322
298
  disable_streaming: bool | Literal["tool_calling"] = False
323
299
  """Whether to disable streaming for this model.
324
300
 
325
- If streaming is bypassed, then ``stream()``/``astream()``/``astream_events()`` will
326
- defer to ``invoke()``/``ainvoke()``.
301
+ If streaming is bypassed, then `stream`/`astream`/`astream_events` will
302
+ defer to `invoke`/`ainvoke`.
327
303
 
328
- - If True, will always bypass streaming case.
329
- - If ``'tool_calling'``, will bypass streaming case only when the model is called
330
- with a ``tools`` keyword argument. In other words, LangChain will automatically
331
- switch to non-streaming behavior (``invoke()``) only when the tools argument is
332
- provided. This offers the best of both worlds.
333
- - If False (default), will always use streaming case if available.
304
+ - If `True`, will always bypass streaming case.
305
+ - If `'tool_calling'`, will bypass streaming case only when the model is called
306
+ with a `tools` keyword argument. In other words, LangChain will automatically
307
+ switch to non-streaming behavior (`invoke`) only when the tools argument is
308
+ provided. This offers the best of both worlds.
309
+ - If `False` (Default), will always use streaming case if available.
334
310
 
335
- The main reason for this flag is that code might be written using ``stream()`` and
311
+ The main reason for this flag is that code might be written using `stream` and
336
312
  a user may want to swap out a given model for another model whose the implementation
337
313
  does not properly support streaming.
338
-
339
314
  """
340
315
 
341
316
  output_version: str | None = Field(
342
317
  default_factory=from_env("LC_OUTPUT_VERSION", default=None)
343
318
  )
344
- """Version of ``AIMessage`` output format to store in message content.
319
+ """Version of `AIMessage` output format to store in message content.
345
320
 
346
- ``AIMessage.content_blocks`` will lazily parse the contents of ``content`` into a
321
+ `AIMessage.content_blocks` will lazily parse the contents of `content` into a
347
322
  standard format. This flag can be used to additionally store the standard format
348
323
  in message content, e.g., for serialization purposes.
349
324
 
350
325
  Supported values:
351
326
 
352
- - ``"v0"``: provider-specific format in content (can lazily-parse with
353
- ``.content_blocks``)
354
- - ``"v1"``: standardized format in content (consistent with ``.content_blocks``)
327
+ - `'v0'`: provider-specific format in content (can lazily-parse with
328
+ `.content_blocks`)
329
+ - `'v1'`: standardized format in content (consistent with `.content_blocks`)
355
330
 
356
- Partner packages (e.g., ``langchain-openai``) can also use this field to roll out
331
+ Partner packages (e.g., `langchain-openai`) can also use this field to roll out
357
332
  new content formats in a backward-compatible way.
358
333
 
359
334
  !!! version-added "Added in version 1.0"
@@ -373,7 +348,7 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
373
348
  @property
374
349
  @override
375
350
  def OutputType(self) -> Any:
376
- """Get the output type for this runnable."""
351
+ """Get the output type for this `Runnable`."""
377
352
  return AnyMessage
378
353
 
379
354
  def _convert_input(self, model_input: LanguageModelInput) -> PromptValue:
@@ -471,8 +446,10 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
471
446
  if "stream" in kwargs:
472
447
  return kwargs["stream"]
473
448
 
474
- if getattr(self, "streaming", False):
475
- return True
449
+ if "streaming" in self.model_fields_set:
450
+ streaming_value = getattr(self, "streaming", None)
451
+ if isinstance(streaming_value, bool):
452
+ return streaming_value
476
453
 
477
454
  # Check if any streaming callback handlers have been passed in.
478
455
  handlers = run_manager.handlers if run_manager else []
@@ -1529,120 +1506,120 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
1529
1506
 
1530
1507
  - an OpenAI function/tool schema,
1531
1508
  - a JSON Schema,
1532
- - a TypedDict class,
1509
+ - a `TypedDict` class,
1533
1510
  - or a Pydantic class.
1534
1511
 
1535
- If ``schema`` is a Pydantic class then the model output will be a
1512
+ If `schema` is a Pydantic class then the model output will be a
1536
1513
  Pydantic instance of that class, and the model-generated fields will be
1537
1514
  validated by the Pydantic class. Otherwise the model output will be a
1538
1515
  dict and will not be validated. See `langchain_core.utils.function_calling.convert_to_openai_tool`
1539
1516
  for more on how to properly specify types and descriptions of
1540
- schema fields when specifying a Pydantic or TypedDict class.
1517
+ schema fields when specifying a Pydantic or `TypedDict` class.
1541
1518
 
1542
1519
  include_raw:
1543
- If False then only the parsed structured output is returned. If
1544
- an error occurs during model output parsing it will be raised. If True
1520
+ If `False` then only the parsed structured output is returned. If
1521
+ an error occurs during model output parsing it will be raised. If `True`
1545
1522
  then both the raw model response (a BaseMessage) and the parsed model
1546
1523
  response will be returned. If an error occurs during output parsing it
1547
1524
  will be caught and returned as well. The final output is always a dict
1548
- with keys ``'raw'``, ``'parsed'``, and ``'parsing_error'``.
1525
+ with keys `'raw'`, `'parsed'`, and `'parsing_error'`.
1549
1526
 
1550
1527
  Raises:
1551
- ValueError: If there are any unsupported ``kwargs``.
1528
+ ValueError: If there are any unsupported `kwargs`.
1552
1529
  NotImplementedError: If the model does not implement
1553
- ``with_structured_output()``.
1530
+ `with_structured_output()`.
1554
1531
 
1555
1532
  Returns:
1556
1533
  A Runnable that takes same inputs as a `langchain_core.language_models.chat.BaseChatModel`.
1557
1534
 
1558
- If ``include_raw`` is False and ``schema`` is a Pydantic class, Runnable outputs
1559
- an instance of ``schema`` (i.e., a Pydantic object).
1535
+ If `include_raw` is False and `schema` is a Pydantic class, Runnable outputs
1536
+ an instance of `schema` (i.e., a Pydantic object).
1560
1537
 
1561
- Otherwise, if ``include_raw`` is False then Runnable outputs a dict.
1538
+ Otherwise, if `include_raw` is False then Runnable outputs a dict.
1562
1539
 
1563
- If ``include_raw`` is True, then Runnable outputs a dict with keys:
1540
+ If `include_raw` is True, then Runnable outputs a dict with keys:
1564
1541
 
1565
- - ``'raw'``: BaseMessage
1566
- - ``'parsed'``: None if there was a parsing error, otherwise the type depends on the ``schema`` as described above.
1567
- - ``'parsing_error'``: Optional[BaseException]
1542
+ - `'raw'`: BaseMessage
1543
+ - `'parsed'`: None if there was a parsing error, otherwise the type depends on the `schema` as described above.
1544
+ - `'parsing_error'`: BaseException | None
1568
1545
 
1569
1546
  Example: Pydantic schema (include_raw=False):
1570
- .. code-block:: python
1571
-
1572
- from pydantic import BaseModel
1547
+ ```python
1548
+ from pydantic import BaseModel
1573
1549
 
1574
1550
 
1575
- class AnswerWithJustification(BaseModel):
1576
- '''An answer to the user question along with justification for the answer.'''
1551
+ class AnswerWithJustification(BaseModel):
1552
+ '''An answer to the user question along with justification for the answer.'''
1577
1553
 
1578
- answer: str
1579
- justification: str
1554
+ answer: str
1555
+ justification: str
1580
1556
 
1581
1557
 
1582
- llm = ChatModel(model="model-name", temperature=0)
1583
- structured_llm = llm.with_structured_output(AnswerWithJustification)
1558
+ model = ChatModel(model="model-name", temperature=0)
1559
+ structured_model = model.with_structured_output(AnswerWithJustification)
1584
1560
 
1585
- structured_llm.invoke(
1586
- "What weighs more a pound of bricks or a pound of feathers"
1587
- )
1561
+ structured_model.invoke(
1562
+ "What weighs more a pound of bricks or a pound of feathers"
1563
+ )
1588
1564
 
1589
- # -> AnswerWithJustification(
1590
- # answer='They weigh the same',
1591
- # justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'
1592
- # )
1565
+ # -> AnswerWithJustification(
1566
+ # answer='They weigh the same',
1567
+ # justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'
1568
+ # )
1569
+ ```
1593
1570
 
1594
1571
  Example: Pydantic schema (include_raw=True):
1595
- .. code-block:: python
1596
-
1597
- from pydantic import BaseModel
1572
+ ```python
1573
+ from pydantic import BaseModel
1598
1574
 
1599
1575
 
1600
- class AnswerWithJustification(BaseModel):
1601
- '''An answer to the user question along with justification for the answer.'''
1576
+ class AnswerWithJustification(BaseModel):
1577
+ '''An answer to the user question along with justification for the answer.'''
1602
1578
 
1603
- answer: str
1604
- justification: str
1579
+ answer: str
1580
+ justification: str
1605
1581
 
1606
1582
 
1607
- llm = ChatModel(model="model-name", temperature=0)
1608
- structured_llm = llm.with_structured_output(
1609
- AnswerWithJustification, include_raw=True
1610
- )
1583
+ model = ChatModel(model="model-name", temperature=0)
1584
+ structured_model = model.with_structured_output(
1585
+ AnswerWithJustification, include_raw=True
1586
+ )
1611
1587
 
1612
- structured_llm.invoke(
1613
- "What weighs more a pound of bricks or a pound of feathers"
1614
- )
1615
- # -> {
1616
- # 'raw': AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_Ao02pnFYXD6GN1yzc0uXPsvF', 'function': {'arguments': '{"answer":"They weigh the same.","justification":"Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ."}', 'name': 'AnswerWithJustification'}, 'type': 'function'}]}),
1617
- # 'parsed': AnswerWithJustification(answer='They weigh the same.', justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'),
1618
- # 'parsing_error': None
1619
- # }
1588
+ structured_model.invoke(
1589
+ "What weighs more a pound of bricks or a pound of feathers"
1590
+ )
1591
+ # -> {
1592
+ # 'raw': AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_Ao02pnFYXD6GN1yzc0uXPsvF', 'function': {'arguments': '{"answer":"They weigh the same.","justification":"Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ."}', 'name': 'AnswerWithJustification'}, 'type': 'function'}]}),
1593
+ # 'parsed': AnswerWithJustification(answer='They weigh the same.', justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'),
1594
+ # 'parsing_error': None
1595
+ # }
1596
+ ```
1620
1597
 
1621
1598
  Example: Dict schema (include_raw=False):
1622
- .. code-block:: python
1599
+ ```python
1600
+ from pydantic import BaseModel
1601
+ from langchain_core.utils.function_calling import convert_to_openai_tool
1623
1602
 
1624
- from pydantic import BaseModel
1625
- from langchain_core.utils.function_calling import convert_to_openai_tool
1626
1603
 
1604
+ class AnswerWithJustification(BaseModel):
1605
+ '''An answer to the user question along with justification for the answer.'''
1627
1606
 
1628
- class AnswerWithJustification(BaseModel):
1629
- '''An answer to the user question along with justification for the answer.'''
1607
+ answer: str
1608
+ justification: str
1630
1609
 
1631
- answer: str
1632
- justification: str
1633
1610
 
1611
+ dict_schema = convert_to_openai_tool(AnswerWithJustification)
1612
+ model = ChatModel(model="model-name", temperature=0)
1613
+ structured_model = model.with_structured_output(dict_schema)
1634
1614
 
1635
- dict_schema = convert_to_openai_tool(AnswerWithJustification)
1636
- llm = ChatModel(model="model-name", temperature=0)
1637
- structured_llm = llm.with_structured_output(dict_schema)
1638
-
1639
- structured_llm.invoke(
1640
- "What weighs more a pound of bricks or a pound of feathers"
1641
- )
1642
- # -> {
1643
- # 'answer': 'They weigh the same',
1644
- # 'justification': 'Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume and density of the two substances differ.'
1645
- # }
1615
+ structured_model.invoke(
1616
+ "What weighs more a pound of bricks or a pound of feathers"
1617
+ )
1618
+ # -> {
1619
+ # 'answer': 'They weigh the same',
1620
+ # 'justification': 'Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume and density of the two substances differ.'
1621
+ # }
1622
+ ```
1646
1623
 
1647
1624
  !!! warning "Behavior changed in 0.2.26"
1648
1625
  Added support for TypedDict class.
@@ -1692,7 +1669,7 @@ class SimpleChatModel(BaseChatModel):
1692
1669
 
1693
1670
  !!! note
1694
1671
  This implementation is primarily here for backwards compatibility. For new
1695
- implementations, please use ``BaseChatModel`` directly.
1672
+ implementations, please use `BaseChatModel` directly.
1696
1673
 
1697
1674
  """
1698
1675
 
@@ -19,7 +19,7 @@ from langchain_core.runnables import RunnableConfig
19
19
 
20
20
 
21
21
  class FakeMessagesListChatModel(BaseChatModel):
22
- """Fake ``ChatModel`` for testing purposes."""
22
+ """Fake `ChatModel` for testing purposes."""
23
23
 
24
24
  responses: list[BaseMessage]
25
25
  """List of responses to **cycle** through in order."""
@@ -228,10 +228,10 @@ class GenericFakeChatModel(BaseChatModel):
228
228
  """Generic fake chat model that can be used to test the chat model interface.
229
229
 
230
230
  * Chat model should be usable in both sync and async tests
231
- * Invokes ``on_llm_new_token`` to allow for testing of callback related code for new
232
- tokens.
231
+ * Invokes `on_llm_new_token` to allow for testing of callback related code for new
232
+ tokens.
233
233
  * Includes logic to break messages into message chunk to facilitate testing of
234
- streaming.
234
+ streaming.
235
235
 
236
236
  """
237
237
 
@@ -242,7 +242,7 @@ class GenericFakeChatModel(BaseChatModel):
242
242
  to make the interface more generic if needed.
243
243
 
244
244
  !!! note
245
- if you want to pass a list, you can use ``iter`` to convert it to an iterator.
245
+ if you want to pass a list, you can use `iter` to convert it to an iterator.
246
246
 
247
247
  !!! warning
248
248
  Streaming is not implemented yet. We should try to implement it in the future by
@@ -299,7 +299,7 @@ class BaseLLM(BaseLanguageModel[str], ABC):
299
299
  @property
300
300
  @override
301
301
  def OutputType(self) -> type[str]:
302
- """Get the input type for this runnable."""
302
+ """Get the input type for this `Runnable`."""
303
303
  return str
304
304
 
305
305
  def _convert_input(self, model_input: LanguageModelInput) -> PromptValue:
@@ -835,7 +835,7 @@ class BaseLLM(BaseLanguageModel[str], ABC):
835
835
  1. Take advantage of batched calls,
836
836
  2. Need more output from the model than just the top generated value,
837
837
  3. Are building chains that are agnostic to the underlying language model
838
- type (e.g., pure text completion models vs chat models).
838
+ type (e.g., pure text completion models vs chat models).
839
839
 
840
840
  Args:
841
841
  prompts: List of string prompts.
@@ -857,8 +857,8 @@ class BaseLLM(BaseLanguageModel[str], ABC):
857
857
 
858
858
  Raises:
859
859
  ValueError: If prompts is not a list.
860
- ValueError: If the length of ``callbacks``, ``tags``, ``metadata``, or
861
- ``run_name`` (if provided) does not match the length of prompts.
860
+ ValueError: If the length of `callbacks`, `tags`, `metadata`, or
861
+ `run_name` (if provided) does not match the length of prompts.
862
862
 
863
863
  Returns:
864
864
  An LLMResult, which contains a list of candidate Generations for each input
@@ -1105,7 +1105,7 @@ class BaseLLM(BaseLanguageModel[str], ABC):
1105
1105
  1. Take advantage of batched calls,
1106
1106
  2. Need more output from the model than just the top generated value,
1107
1107
  3. Are building chains that are agnostic to the underlying language model
1108
- type (e.g., pure text completion models vs chat models).
1108
+ type (e.g., pure text completion models vs chat models).
1109
1109
 
1110
1110
  Args:
1111
1111
  prompts: List of string prompts.
@@ -1126,8 +1126,8 @@ class BaseLLM(BaseLanguageModel[str], ABC):
1126
1126
  to the model provider API call.
1127
1127
 
1128
1128
  Raises:
1129
- ValueError: If the length of ``callbacks``, ``tags``, ``metadata``, or
1130
- ``run_name`` (if provided) does not match the length of prompts.
1129
+ ValueError: If the length of `callbacks`, `tags`, `metadata`, or
1130
+ `run_name` (if provided) does not match the length of prompts.
1131
1131
 
1132
1132
  Returns:
1133
1133
  An LLMResult, which contains a list of candidate Generations for each input
@@ -1340,11 +1340,9 @@ class BaseLLM(BaseLanguageModel[str], ABC):
1340
1340
  ValueError: If the file path is not a string or Path object.
1341
1341
 
1342
1342
  Example:
1343
-
1344
- .. code-block:: python
1345
-
1346
- llm.save(file_path="path/llm.yaml")
1347
-
1343
+ ```python
1344
+ llm.save(file_path="path/llm.yaml")
1345
+ ```
1348
1346
  """
1349
1347
  # Convert file to Path object.
1350
1348
  save_path = Path(file_path)
@@ -45,7 +45,7 @@ def dumps(obj: Any, *, pretty: bool = False, **kwargs: Any) -> str:
45
45
  pretty: Whether to pretty print the json. If true, the json will be
46
46
  indented with 2 spaces (if no indent is provided as part of kwargs).
47
47
  Default is False.
48
- kwargs: Additional arguments to pass to json.dumps
48
+ **kwargs: Additional arguments to pass to json.dumps
49
49
 
50
50
  Returns:
51
51
  A json string representation of the object.
@@ -63,16 +63,16 @@ class Reviver:
63
63
  Args:
64
64
  secrets_map: A map of secrets to load. If a secret is not found in
65
65
  the map, it will be loaded from the environment if `secrets_from_env`
66
- is True. Defaults to None.
66
+ is True.
67
67
  valid_namespaces: A list of additional namespaces (modules)
68
- to allow to be deserialized. Defaults to None.
68
+ to allow to be deserialized.
69
69
  secrets_from_env: Whether to load secrets from the environment.
70
- Defaults to True.
70
+ Defaults to `True`.
71
71
  additional_import_mappings: A dictionary of additional namespace mappings
72
72
  You can use this to override default mappings or add new mappings.
73
- Defaults to None.
73
+
74
74
  ignore_unserializable_fields: Whether to ignore unserializable fields.
75
- Defaults to False.
75
+ Defaults to `False`.
76
76
  """
77
77
  self.secrets_from_env = secrets_from_env
78
78
  self.secrets_map = secrets_map or {}
@@ -107,7 +107,7 @@ class Reviver:
107
107
  ValueError: If trying to deserialize something that cannot
108
108
  be deserialized in the current version of langchain-core.
109
109
  NotImplementedError: If the object is not implemented and
110
- ``ignore_unserializable_fields`` is False.
110
+ `ignore_unserializable_fields` is False.
111
111
  """
112
112
  if (
113
113
  value.get("lc") == 1
@@ -200,16 +200,16 @@ def loads(
200
200
  text: The string to load.
201
201
  secrets_map: A map of secrets to load. If a secret is not found in
202
202
  the map, it will be loaded from the environment if `secrets_from_env`
203
- is True. Defaults to None.
203
+ is True.
204
204
  valid_namespaces: A list of additional namespaces (modules)
205
- to allow to be deserialized. Defaults to None.
205
+ to allow to be deserialized.
206
206
  secrets_from_env: Whether to load secrets from the environment.
207
- Defaults to True.
207
+ Defaults to `True`.
208
208
  additional_import_mappings: A dictionary of additional namespace mappings
209
209
  You can use this to override default mappings or add new mappings.
210
- Defaults to None.
210
+
211
211
  ignore_unserializable_fields: Whether to ignore unserializable fields.
212
- Defaults to False.
212
+ Defaults to `False`.
213
213
 
214
214
  Returns:
215
215
  Revived LangChain objects.
@@ -245,16 +245,16 @@ def load(
245
245
  obj: The object to load.
246
246
  secrets_map: A map of secrets to load. If a secret is not found in
247
247
  the map, it will be loaded from the environment if `secrets_from_env`
248
- is True. Defaults to None.
248
+ is True.
249
249
  valid_namespaces: A list of additional namespaces (modules)
250
- to allow to be deserialized. Defaults to None.
250
+ to allow to be deserialized.
251
251
  secrets_from_env: Whether to load secrets from the environment.
252
- Defaults to True.
252
+ Defaults to `True`.
253
253
  additional_import_mappings: A dictionary of additional namespace mappings
254
254
  You can use this to override default mappings or add new mappings.
255
- Defaults to None.
255
+
256
256
  ignore_unserializable_fields: Whether to ignore unserializable fields.
257
- Defaults to False.
257
+ Defaults to `False`.
258
258
 
259
259
  Returns:
260
260
  Revived LangChain objects.