langchain-core 0.3.75__py3-none-any.whl → 0.3.77__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain-core might be problematic. Click here for more details.

Files changed (119) hide show
  1. langchain_core/_api/beta_decorator.py +22 -44
  2. langchain_core/_api/deprecation.py +30 -17
  3. langchain_core/_api/path.py +19 -2
  4. langchain_core/_import_utils.py +7 -0
  5. langchain_core/agents.py +10 -6
  6. langchain_core/beta/runnables/context.py +1 -2
  7. langchain_core/callbacks/base.py +28 -15
  8. langchain_core/callbacks/manager.py +83 -71
  9. langchain_core/callbacks/usage.py +6 -4
  10. langchain_core/chat_history.py +29 -21
  11. langchain_core/document_loaders/base.py +34 -9
  12. langchain_core/document_loaders/langsmith.py +4 -1
  13. langchain_core/documents/base.py +35 -10
  14. langchain_core/documents/transformers.py +4 -2
  15. langchain_core/embeddings/fake.py +8 -5
  16. langchain_core/env.py +2 -3
  17. langchain_core/example_selectors/base.py +12 -0
  18. langchain_core/exceptions.py +7 -0
  19. langchain_core/globals.py +17 -28
  20. langchain_core/indexing/api.py +88 -76
  21. langchain_core/indexing/base.py +5 -8
  22. langchain_core/indexing/in_memory.py +23 -3
  23. langchain_core/language_models/__init__.py +3 -2
  24. langchain_core/language_models/base.py +31 -20
  25. langchain_core/language_models/chat_models.py +98 -27
  26. langchain_core/language_models/fake_chat_models.py +10 -9
  27. langchain_core/language_models/llms.py +52 -18
  28. langchain_core/load/dump.py +2 -3
  29. langchain_core/load/load.py +15 -1
  30. langchain_core/load/serializable.py +39 -44
  31. langchain_core/memory.py +7 -3
  32. langchain_core/messages/ai.py +53 -24
  33. langchain_core/messages/base.py +43 -22
  34. langchain_core/messages/chat.py +4 -1
  35. langchain_core/messages/content_blocks.py +23 -2
  36. langchain_core/messages/function.py +9 -5
  37. langchain_core/messages/human.py +13 -10
  38. langchain_core/messages/modifier.py +1 -0
  39. langchain_core/messages/system.py +11 -8
  40. langchain_core/messages/tool.py +60 -29
  41. langchain_core/messages/utils.py +250 -131
  42. langchain_core/output_parsers/base.py +5 -2
  43. langchain_core/output_parsers/json.py +4 -4
  44. langchain_core/output_parsers/list.py +7 -22
  45. langchain_core/output_parsers/openai_functions.py +3 -0
  46. langchain_core/output_parsers/openai_tools.py +6 -1
  47. langchain_core/output_parsers/pydantic.py +4 -0
  48. langchain_core/output_parsers/string.py +5 -1
  49. langchain_core/output_parsers/xml.py +19 -19
  50. langchain_core/outputs/chat_generation.py +25 -10
  51. langchain_core/outputs/generation.py +14 -3
  52. langchain_core/outputs/llm_result.py +8 -1
  53. langchain_core/prompt_values.py +16 -6
  54. langchain_core/prompts/base.py +4 -9
  55. langchain_core/prompts/chat.py +89 -57
  56. langchain_core/prompts/dict.py +16 -8
  57. langchain_core/prompts/few_shot.py +12 -11
  58. langchain_core/prompts/few_shot_with_templates.py +5 -1
  59. langchain_core/prompts/image.py +12 -5
  60. langchain_core/prompts/message.py +5 -6
  61. langchain_core/prompts/pipeline.py +13 -8
  62. langchain_core/prompts/prompt.py +22 -8
  63. langchain_core/prompts/string.py +18 -10
  64. langchain_core/prompts/structured.py +7 -2
  65. langchain_core/rate_limiters.py +2 -2
  66. langchain_core/retrievers.py +7 -6
  67. langchain_core/runnables/base.py +406 -186
  68. langchain_core/runnables/branch.py +14 -19
  69. langchain_core/runnables/config.py +9 -15
  70. langchain_core/runnables/configurable.py +34 -19
  71. langchain_core/runnables/fallbacks.py +20 -13
  72. langchain_core/runnables/graph.py +48 -38
  73. langchain_core/runnables/graph_ascii.py +41 -18
  74. langchain_core/runnables/graph_mermaid.py +54 -25
  75. langchain_core/runnables/graph_png.py +27 -31
  76. langchain_core/runnables/history.py +55 -58
  77. langchain_core/runnables/passthrough.py +44 -21
  78. langchain_core/runnables/retry.py +44 -23
  79. langchain_core/runnables/router.py +9 -8
  80. langchain_core/runnables/schema.py +2 -0
  81. langchain_core/runnables/utils.py +51 -89
  82. langchain_core/stores.py +19 -31
  83. langchain_core/sys_info.py +9 -8
  84. langchain_core/tools/base.py +37 -28
  85. langchain_core/tools/convert.py +26 -15
  86. langchain_core/tools/simple.py +36 -8
  87. langchain_core/tools/structured.py +25 -12
  88. langchain_core/tracers/base.py +2 -2
  89. langchain_core/tracers/context.py +5 -1
  90. langchain_core/tracers/core.py +109 -39
  91. langchain_core/tracers/evaluation.py +22 -26
  92. langchain_core/tracers/event_stream.py +45 -34
  93. langchain_core/tracers/langchain.py +12 -3
  94. langchain_core/tracers/langchain_v1.py +10 -2
  95. langchain_core/tracers/log_stream.py +56 -17
  96. langchain_core/tracers/root_listeners.py +4 -20
  97. langchain_core/tracers/run_collector.py +6 -16
  98. langchain_core/tracers/schemas.py +5 -1
  99. langchain_core/utils/aiter.py +15 -7
  100. langchain_core/utils/env.py +3 -0
  101. langchain_core/utils/function_calling.py +50 -28
  102. langchain_core/utils/interactive_env.py +6 -2
  103. langchain_core/utils/iter.py +12 -4
  104. langchain_core/utils/json.py +12 -3
  105. langchain_core/utils/json_schema.py +156 -40
  106. langchain_core/utils/loading.py +5 -1
  107. langchain_core/utils/mustache.py +24 -15
  108. langchain_core/utils/pydantic.py +38 -9
  109. langchain_core/utils/utils.py +25 -9
  110. langchain_core/vectorstores/base.py +7 -20
  111. langchain_core/vectorstores/in_memory.py +23 -17
  112. langchain_core/vectorstores/utils.py +18 -12
  113. langchain_core/version.py +1 -1
  114. langchain_core-0.3.77.dist-info/METADATA +67 -0
  115. langchain_core-0.3.77.dist-info/RECORD +174 -0
  116. langchain_core-0.3.75.dist-info/METADATA +0 -106
  117. langchain_core-0.3.75.dist-info/RECORD +0 -174
  118. {langchain_core-0.3.75.dist-info → langchain_core-0.3.77.dist-info}/WHEEL +0 -0
  119. {langchain_core-0.3.75.dist-info → langchain_core-0.3.77.dist-info}/entry_points.txt +0 -0
@@ -41,6 +41,7 @@ from pydantic import BaseModel, ConfigDict, Field, RootModel
41
41
  from typing_extensions import Literal, get_args, override
42
42
 
43
43
  from langchain_core._api import beta_decorator
44
+ from langchain_core.callbacks.manager import AsyncCallbackManager, CallbackManager
44
45
  from langchain_core.load.serializable import (
45
46
  Serializable,
46
47
  SerializedConstructor,
@@ -60,7 +61,6 @@ from langchain_core.runnables.config import (
60
61
  run_in_executor,
61
62
  set_config_context,
62
63
  )
63
- from langchain_core.runnables.graph import Graph
64
64
  from langchain_core.runnables.utils import (
65
65
  AddableDict,
66
66
  AnyConfigurableField,
@@ -81,6 +81,19 @@ from langchain_core.runnables.utils import (
81
81
  is_async_callable,
82
82
  is_async_generator,
83
83
  )
84
+ from langchain_core.tracers._streaming import _StreamingCallbackHandler
85
+ from langchain_core.tracers.event_stream import (
86
+ _astream_events_implementation_v1,
87
+ _astream_events_implementation_v2,
88
+ )
89
+ from langchain_core.tracers.log_stream import (
90
+ LogStreamCallbackHandler,
91
+ _astream_log_implementation,
92
+ )
93
+ from langchain_core.tracers.root_listeners import (
94
+ AsyncRootListenersTracer,
95
+ RootListenersTracer,
96
+ )
84
97
  from langchain_core.utils.aiter import aclosing, atee, py_anext
85
98
  from langchain_core.utils.iter import safetee
86
99
  from langchain_core.utils.pydantic import create_model_v2
@@ -94,6 +107,7 @@ if TYPE_CHECKING:
94
107
  from langchain_core.runnables.fallbacks import (
95
108
  RunnableWithFallbacks as RunnableWithFallbacksT,
96
109
  )
110
+ from langchain_core.runnables.graph import Graph
97
111
  from langchain_core.runnables.retry import ExponentialJitterParams
98
112
  from langchain_core.runnables.schema import StreamEvent
99
113
  from langchain_core.tools import BaseTool
@@ -114,12 +128,13 @@ class Runnable(ABC, Generic[Input, Output]):
114
128
  - **``invoke``/``ainvoke``**: Transforms a single input into an output.
115
129
  - **``batch``/``abatch``**: Efficiently transforms multiple inputs into outputs.
116
130
  - **``stream``/``astream``**: Streams output from a single input as it's produced.
117
- - **``astream_log``**: Streams output and selected intermediate results from an input.
131
+ - **``astream_log``**: Streams output and selected intermediate results from an
132
+ input.
118
133
 
119
134
  Built-in optimizations:
120
135
 
121
- - **Batch**: By default, batch runs invoke() in parallel using a thread pool executor.
122
- Override to optimize batching.
136
+ - **Batch**: By default, batch runs invoke() in parallel using a thread pool
137
+ executor. Override to optimize batching.
123
138
 
124
139
  - **Async**: Methods with ``'a'`` suffix are asynchronous. By default, they execute
125
140
  the sync counterpart using asyncio's thread pool.
@@ -129,14 +144,16 @@ class Runnable(ABC, Generic[Input, Output]):
129
144
  execution, add tags and metadata for tracing and debugging etc.
130
145
 
131
146
  Runnables expose schematic information about their input, output and config via
132
- the ``input_schema`` property, the ``output_schema`` property and ``config_schema`` method.
147
+ the ``input_schema`` property, the ``output_schema`` property and ``config_schema``
148
+ method.
133
149
 
134
150
  LCEL and Composition
135
151
  ====================
136
152
 
137
- The LangChain Expression Language (LCEL) is a declarative way to compose ``Runnables``
138
- into chains. Any chain constructed this way will automatically have sync, async,
139
- batch, and streaming support.
153
+ The LangChain Expression Language (LCEL) is a declarative way to compose
154
+ ``Runnables``into chains.
155
+ Any chain constructed this way will automatically have sync, async, batch, and
156
+ streaming support.
140
157
 
141
158
  The main composition primitives are ``RunnableSequence`` and ``RunnableParallel``.
142
159
 
@@ -157,25 +174,27 @@ class Runnable(ABC, Generic[Input, Output]):
157
174
 
158
175
  # A RunnableSequence constructed using the `|` operator
159
176
  sequence = RunnableLambda(lambda x: x + 1) | RunnableLambda(lambda x: x * 2)
160
- sequence.invoke(1) # 4
161
- sequence.batch([1, 2, 3]) # [4, 6, 8]
177
+ sequence.invoke(1) # 4
178
+ sequence.batch([1, 2, 3]) # [4, 6, 8]
162
179
 
163
180
 
164
181
  # A sequence that contains a RunnableParallel constructed using a dict literal
165
182
  sequence = RunnableLambda(lambda x: x + 1) | {
166
- 'mul_2': RunnableLambda(lambda x: x * 2),
167
- 'mul_5': RunnableLambda(lambda x: x * 5)
183
+ "mul_2": RunnableLambda(lambda x: x * 2),
184
+ "mul_5": RunnableLambda(lambda x: x * 5),
168
185
  }
169
- sequence.invoke(1) # {'mul_2': 4, 'mul_5': 10}
186
+ sequence.invoke(1) # {'mul_2': 4, 'mul_5': 10}
170
187
 
171
188
  Standard Methods
172
189
  ================
173
190
 
174
- All ``Runnable``s expose additional methods that can be used to modify their behavior
175
- (e.g., add a retry policy, add lifecycle listeners, make them configurable, etc.).
191
+ All ``Runnable``s expose additional methods that can be used to modify their
192
+ behavior (e.g., add a retry policy, add lifecycle listeners, make them
193
+ configurable, etc.).
176
194
 
177
- These methods will work on any ``Runnable``, including ``Runnable`` chains constructed
178
- by composing other ``Runnable``s. See the individual methods for details.
195
+ These methods will work on any ``Runnable``, including ``Runnable`` chains
196
+ constructed by composing other ``Runnable``s.
197
+ See the individual methods for details.
179
198
 
180
199
  For example,
181
200
 
@@ -219,6 +238,7 @@ class Runnable(ABC, Generic[Input, Output]):
219
238
  .. code-block:: python
220
239
 
221
240
  from langchain_core.globals import set_debug
241
+
222
242
  set_debug(True)
223
243
 
224
244
  Alternatively, you can pass existing or custom callbacks to any given chain:
@@ -227,14 +247,11 @@ class Runnable(ABC, Generic[Input, Output]):
227
247
 
228
248
  from langchain_core.tracers import ConsoleCallbackHandler
229
249
 
230
- chain.invoke(
231
- ...,
232
- config={'callbacks': [ConsoleCallbackHandler()]}
233
- )
250
+ chain.invoke(..., config={"callbacks": [ConsoleCallbackHandler()]})
234
251
 
235
252
  For a UI (and much more) checkout `LangSmith <https://docs.smith.langchain.com/>`__.
236
253
 
237
- """ # noqa: E501
254
+ """
238
255
 
239
256
  name: Optional[str]
240
257
  """The name of the ``Runnable``. Used for debugging and tracing."""
@@ -242,7 +259,15 @@ class Runnable(ABC, Generic[Input, Output]):
242
259
  def get_name(
243
260
  self, suffix: Optional[str] = None, *, name: Optional[str] = None
244
261
  ) -> str:
245
- """Get the name of the ``Runnable``."""
262
+ """Get the name of the ``Runnable``.
263
+
264
+ Args:
265
+ suffix: An optional suffix to append to the name.
266
+ name: An optional name to use instead of the ``Runnable``'s name.
267
+
268
+ Returns:
269
+ The name of the ``Runnable``.
270
+ """
246
271
  if name:
247
272
  name_ = name
248
273
  elif hasattr(self, "name") and self.name:
@@ -273,7 +298,13 @@ class Runnable(ABC, Generic[Input, Output]):
273
298
 
274
299
  @property
275
300
  def InputType(self) -> type[Input]: # noqa: N802
276
- """The type of input this ``Runnable`` accepts specified as a type annotation.""" # noqa: E501
301
+ """Input type.
302
+
303
+ The type of input this ``Runnable`` accepts specified as a type annotation.
304
+
305
+ Raises:
306
+ TypeError: If the input type cannot be inferred.
307
+ """
277
308
  # First loop through all parent classes and if any of them is
278
309
  # a pydantic model, we will pick up the generic parameterization
279
310
  # from that model via the __pydantic_generic_metadata__ attribute.
@@ -299,7 +330,13 @@ class Runnable(ABC, Generic[Input, Output]):
299
330
 
300
331
  @property
301
332
  def OutputType(self) -> type[Output]: # noqa: N802
302
- """The type of output this ``Runnable`` produces specified as a type annotation.""" # noqa: E501
333
+ """Output Type.
334
+
335
+ The type of output this ``Runnable`` produces specified as a type annotation.
336
+
337
+ Raises:
338
+ TypeError: If the output type cannot be inferred.
339
+ """
303
340
  # First loop through bases -- this will help generic
304
341
  # any pydantic models.
305
342
  for base in self.__class__.mro():
@@ -381,9 +418,11 @@ class Runnable(ABC, Generic[Input, Output]):
381
418
 
382
419
  from langchain_core.runnables import RunnableLambda
383
420
 
421
+
384
422
  def add_one(x: int) -> int:
385
423
  return x + 1
386
424
 
425
+
387
426
  runnable = RunnableLambda(add_one)
388
427
 
389
428
  print(runnable.get_input_jsonschema())
@@ -395,7 +434,10 @@ class Runnable(ABC, Generic[Input, Output]):
395
434
 
396
435
  @property
397
436
  def output_schema(self) -> type[BaseModel]:
398
- """The type of output this ``Runnable`` produces specified as a pydantic model.""" # noqa: E501
437
+ """Output schema.
438
+
439
+ The type of output this ``Runnable`` produces specified as a pydantic model.
440
+ """
399
441
  return self.get_output_schema()
400
442
 
401
443
  def get_output_schema(
@@ -455,9 +497,11 @@ class Runnable(ABC, Generic[Input, Output]):
455
497
 
456
498
  from langchain_core.runnables import RunnableLambda
457
499
 
500
+
458
501
  def add_one(x: int) -> int:
459
502
  return x + 1
460
503
 
504
+
461
505
  runnable = RunnableLambda(add_one)
462
506
 
463
507
  print(runnable.get_output_jsonschema())
@@ -535,6 +579,9 @@ class Runnable(ABC, Generic[Input, Output]):
535
579
 
536
580
  def get_graph(self, config: Optional[RunnableConfig] = None) -> Graph:
537
581
  """Return a graph representation of this ``Runnable``."""
582
+ # Import locally to prevent circular import
583
+ from langchain_core.runnables.graph import Graph # noqa: PLC0415
584
+
538
585
  graph = Graph()
539
586
  try:
540
587
  input_node = graph.add_node(self.get_input_schema(config))
@@ -555,7 +602,8 @@ class Runnable(ABC, Generic[Input, Output]):
555
602
  self, config: Optional[RunnableConfig] = None
556
603
  ) -> list[BasePromptTemplate]:
557
604
  """Return a list of prompts used by this ``Runnable``."""
558
- from langchain_core.prompts.base import BasePromptTemplate
605
+ # Import locally to prevent circular import
606
+ from langchain_core.prompts.base import BasePromptTemplate # noqa: PLC0415
559
607
 
560
608
  return [
561
609
  node.data
@@ -573,7 +621,17 @@ class Runnable(ABC, Generic[Input, Output]):
573
621
  Mapping[str, Union[Runnable[Any, Other], Callable[[Any], Other], Any]],
574
622
  ],
575
623
  ) -> RunnableSerializable[Input, Other]:
576
- """Compose this ``Runnable`` with another object to create a ``RunnableSequence``.""" # noqa: E501
624
+ """Runnable "or" operator.
625
+
626
+ Compose this ``Runnable`` with another object to create a
627
+ ``RunnableSequence``.
628
+
629
+ Args:
630
+ other: Another ``Runnable`` or a ``Runnable``-like object.
631
+
632
+ Returns:
633
+ A new ``Runnable``.
634
+ """
577
635
  return RunnableSequence(self, coerce_to_runnable(other))
578
636
 
579
637
  def __ror__(
@@ -586,7 +644,17 @@ class Runnable(ABC, Generic[Input, Output]):
586
644
  Mapping[str, Union[Runnable[Other, Any], Callable[[Other], Any], Any]],
587
645
  ],
588
646
  ) -> RunnableSerializable[Other, Output]:
589
- """Compose this ``Runnable`` with another object to create a ``RunnableSequence``.""" # noqa: E501
647
+ """Runnable "reverse-or" operator.
648
+
649
+ Compose this ``Runnable`` with another object to create a
650
+ ``RunnableSequence``.
651
+
652
+ Args:
653
+ other: Another ``Runnable`` or a ``Runnable``-like object.
654
+
655
+ Returns:
656
+ A new ``Runnable``.
657
+ """
590
658
  return RunnableSequence(coerce_to_runnable(other), self)
591
659
 
592
660
  def pipe(
@@ -594,21 +662,28 @@ class Runnable(ABC, Generic[Input, Output]):
594
662
  *others: Union[Runnable[Any, Other], Callable[[Any], Other]],
595
663
  name: Optional[str] = None,
596
664
  ) -> RunnableSerializable[Input, Other]:
597
- """Compose this ``Runnable`` with ``Runnable``-like objects to make a ``RunnableSequence``.
665
+ """Pipe runnables.
666
+
667
+ Compose this ``Runnable`` with ``Runnable``-like objects to make a
668
+ ``RunnableSequence``.
598
669
 
599
670
  Equivalent to ``RunnableSequence(self, *others)`` or ``self | others[0] | ...``
600
671
 
601
672
  Example:
673
+
602
674
  .. code-block:: python
603
675
 
604
676
  from langchain_core.runnables import RunnableLambda
605
677
 
678
+
606
679
  def add_one(x: int) -> int:
607
680
  return x + 1
608
681
 
682
+
609
683
  def mul_two(x: int) -> int:
610
684
  return x * 2
611
685
 
686
+
612
687
  runnable_1 = RunnableLambda(add_one)
613
688
  runnable_2 = RunnableLambda(mul_two)
614
689
  sequence = runnable_1.pipe(runnable_2)
@@ -623,13 +698,20 @@ class Runnable(ABC, Generic[Input, Output]):
623
698
  await sequence.abatch([1, 2, 3])
624
699
  # -> [4, 6, 8]
625
700
 
626
- """ # noqa: E501
701
+ Args:
702
+ *others: Other ``Runnable`` or ``Runnable``-like objects to compose
703
+ name: An optional name for the resulting ``RunnableSequence``.
704
+
705
+ Returns:
706
+ A new ``Runnable``.
707
+ """
627
708
  return RunnableSequence(self, *others, name=name)
628
709
 
629
710
  def pick(self, keys: Union[str, list[str]]) -> RunnableSerializable[Any, Any]:
630
711
  """Pick keys from the output dict of this ``Runnable``.
631
712
 
632
713
  Pick single key:
714
+
633
715
  .. code-block:: python
634
716
 
635
717
  import json
@@ -648,6 +730,7 @@ class Runnable(ABC, Generic[Input, Output]):
648
730
  # -> [1, 2, 3]
649
731
 
650
732
  Pick list of keys:
733
+
651
734
  .. code-block:: python
652
735
 
653
736
  from typing import Any
@@ -658,13 +741,14 @@ class Runnable(ABC, Generic[Input, Output]):
658
741
 
659
742
  as_str = RunnableLambda(str)
660
743
  as_json = RunnableLambda(json.loads)
744
+
745
+
661
746
  def as_bytes(x: Any) -> bytes:
662
747
  return bytes(x, "utf-8")
663
748
 
749
+
664
750
  chain = RunnableMap(
665
- str=as_str,
666
- json=as_json,
667
- bytes=RunnableLambda(as_bytes)
751
+ str=as_str, json=as_json, bytes=RunnableLambda(as_bytes)
668
752
  )
669
753
 
670
754
  chain.invoke("[1, 2, 3]")
@@ -674,8 +758,15 @@ class Runnable(ABC, Generic[Input, Output]):
674
758
  json_and_bytes_chain.invoke("[1, 2, 3]")
675
759
  # -> {"json": [1, 2, 3], "bytes": b"[1, 2, 3]"}
676
760
 
761
+ Args:
762
+ keys: A key or list of keys to pick from the output dict.
763
+
764
+ Returns:
765
+ a new ``Runnable``.
766
+
677
767
  """
678
- from langchain_core.runnables.passthrough import RunnablePick
768
+ # Import locally to prevent circular import
769
+ from langchain_core.runnables.passthrough import RunnablePick # noqa: PLC0415
679
770
 
680
771
  return self | RunnablePick(keys)
681
772
 
@@ -692,8 +783,6 @@ class Runnable(ABC, Generic[Input, Output]):
692
783
  ) -> RunnableSerializable[Any, Any]:
693
784
  """Assigns new fields to the dict output of this ``Runnable``.
694
785
 
695
- Returns a new ``Runnable``.
696
-
697
786
  .. code-block:: python
698
787
 
699
788
  from langchain_community.llms.fake import FakeStreamingListLLM
@@ -720,8 +809,16 @@ class Runnable(ABC, Generic[Input, Output]):
720
809
  {'str': {'title': 'Str',
721
810
  'type': 'string'}, 'hello': {'title': 'Hello', 'type': 'string'}}}
722
811
 
812
+ Args:
813
+ **kwargs: A mapping of keys to ``Runnable`` or ``Runnable``-like objects
814
+ that will be invoked with the entire output dict of this ``Runnable``.
815
+
816
+ Returns:
817
+ A new ``Runnable``.
818
+
723
819
  """
724
- from langchain_core.runnables.passthrough import RunnableAssign
820
+ # Import locally to prevent circular import
821
+ from langchain_core.runnables.passthrough import RunnableAssign # noqa: PLC0415
725
822
 
726
823
  return self | RunnableAssign(RunnableParallel[dict[str, Any]](kwargs))
727
824
 
@@ -755,12 +852,18 @@ class Runnable(ABC, Generic[Input, Output]):
755
852
  config: Optional[RunnableConfig] = None,
756
853
  **kwargs: Any,
757
854
  ) -> Output:
758
- """Default implementation of ``ainvoke``, calls ``invoke`` from a thread.
855
+ """Transform a single input into an output.
759
856
 
760
- The default implementation allows usage of async code even if
761
- the ``Runnable`` did not implement a native async version of ``invoke``.
857
+ Args:
858
+ input: The input to the ``Runnable``.
859
+ config: A config to use when invoking the ``Runnable``.
860
+ The config supports standard keys like ``'tags'``, ``'metadata'`` for
861
+ tracing purposes, ``'max_concurrency'`` for controlling how much work to
862
+ do in parallel, and other keys. Please refer to the ``RunnableConfig``
863
+ for more details. Defaults to None.
762
864
 
763
- Subclasses should override this method if they can run asynchronously.
865
+ Returns:
866
+ The output of the ``Runnable``.
764
867
 
765
868
  """
766
869
  return await run_in_executor(config, self.invoke, input, config, **kwargs)
@@ -780,6 +883,20 @@ class Runnable(ABC, Generic[Input, Output]):
780
883
  Subclasses should override this method if they can batch more efficiently;
781
884
  e.g., if the underlying ``Runnable`` uses an API which supports a batch mode.
782
885
 
886
+ Args:
887
+ inputs: A list of inputs to the ``Runnable``.
888
+ config: A config to use when invoking the ``Runnable``. The config supports
889
+ standard keys like ``'tags'``, ``'metadata'`` for
890
+ tracing purposes, ``'max_concurrency'`` for controlling how much work
891
+ to do in parallel, and other keys. Please refer to the
892
+ ``RunnableConfig`` for more details. Defaults to None.
893
+ return_exceptions: Whether to return exceptions instead of raising them.
894
+ Defaults to False.
895
+ **kwargs: Additional keyword arguments to pass to the ``Runnable``.
896
+
897
+ Returns:
898
+ A list of outputs from the ``Runnable``.
899
+
783
900
  """
784
901
  if not inputs:
785
902
  return []
@@ -834,6 +951,20 @@ class Runnable(ABC, Generic[Input, Output]):
834
951
 
835
952
  Yields results as they complete.
836
953
 
954
+ Args:
955
+ inputs: A list of inputs to the ``Runnable``.
956
+ config: A config to use when invoking the ``Runnable``.
957
+ The config supports standard keys like ``'tags'``, ``'metadata'`` for
958
+ tracing purposes, ``'max_concurrency'`` for controlling how much work to
959
+ do in parallel, and other keys. Please refer to the ``RunnableConfig``
960
+ for more details. Defaults to None.
961
+ return_exceptions: Whether to return exceptions instead of raising them.
962
+ Defaults to False.
963
+ **kwargs: Additional keyword arguments to pass to the ``Runnable``.
964
+
965
+ Yields:
966
+ Tuples of the index of the input and the output from the ``Runnable``.
967
+
837
968
  """
838
969
  if not inputs:
839
970
  return
@@ -898,7 +1029,7 @@ class Runnable(ABC, Generic[Input, Output]):
898
1029
  for more details. Defaults to None.
899
1030
  return_exceptions: Whether to return exceptions instead of raising them.
900
1031
  Defaults to False.
901
- kwargs: Additional keyword arguments to pass to the ``Runnable``.
1032
+ **kwargs: Additional keyword arguments to pass to the ``Runnable``.
902
1033
 
903
1034
  Returns:
904
1035
  A list of outputs from the ``Runnable``.
@@ -1120,11 +1251,6 @@ class Runnable(ABC, Generic[Input, Output]):
1120
1251
  A ``RunLogPatch`` or ``RunLog`` object.
1121
1252
 
1122
1253
  """
1123
- from langchain_core.tracers.log_stream import (
1124
- LogStreamCallbackHandler,
1125
- _astream_log_implementation,
1126
- )
1127
-
1128
1254
  stream = LogStreamCallbackHandler(
1129
1255
  auto_close=False,
1130
1256
  include_names=include_names,
@@ -1254,6 +1380,7 @@ class Runnable(ABC, Generic[Input, Output]):
1254
1380
  '''Format the docs.'''
1255
1381
  return ", ".join([doc.page_content for doc in docs])
1256
1382
 
1383
+
1257
1384
  format_docs = RunnableLambda(format_docs)
1258
1385
 
1259
1386
  ``some_tool``:
@@ -1270,7 +1397,10 @@ class Runnable(ABC, Generic[Input, Output]):
1270
1397
  .. code-block:: python
1271
1398
 
1272
1399
  template = ChatPromptTemplate.from_messages(
1273
- [("system", "You are Cat Agent 007"), ("human", "{question}")]
1400
+ [
1401
+ ("system", "You are Cat Agent 007"),
1402
+ ("human", "{question}"),
1403
+ ]
1274
1404
  ).with_config({"run_name": "my_template", "tags": ["my_template"]})
1275
1405
 
1276
1406
 
@@ -1280,9 +1410,11 @@ class Runnable(ABC, Generic[Input, Output]):
1280
1410
 
1281
1411
  from langchain_core.runnables import RunnableLambda
1282
1412
 
1413
+
1283
1414
  async def reverse(s: str) -> str:
1284
1415
  return s[::-1]
1285
1416
 
1417
+
1286
1418
  chain = RunnableLambda(func=reverse)
1287
1419
 
1288
1420
  events = [
@@ -1375,11 +1507,6 @@ class Runnable(ABC, Generic[Input, Output]):
1375
1507
  NotImplementedError: If the version is not ``'v1'`` or ``'v2'``.
1376
1508
 
1377
1509
  """ # noqa: E501
1378
- from langchain_core.tracers.event_stream import (
1379
- _astream_events_implementation_v1,
1380
- _astream_events_implementation_v2,
1381
- )
1382
-
1383
1510
  if version == "v2":
1384
1511
  event_stream = _astream_events_implementation_v2(
1385
1512
  self,
@@ -1422,7 +1549,9 @@ class Runnable(ABC, Generic[Input, Output]):
1422
1549
  config: Optional[RunnableConfig] = None,
1423
1550
  **kwargs: Optional[Any],
1424
1551
  ) -> Iterator[Output]:
1425
- """Default implementation of transform, which buffers input and calls ``astream``.
1552
+ """Transform inputs to outputs.
1553
+
1554
+ Default implementation of transform, which buffers input and calls ``astream``.
1426
1555
 
1427
1556
  Subclasses should override this method if they can start producing output while
1428
1557
  input is still being generated.
@@ -1435,7 +1564,7 @@ class Runnable(ABC, Generic[Input, Output]):
1435
1564
  Yields:
1436
1565
  The output of the ``Runnable``.
1437
1566
 
1438
- """ # noqa: E501
1567
+ """
1439
1568
  final: Input
1440
1569
  got_first_val = False
1441
1570
 
@@ -1465,7 +1594,9 @@ class Runnable(ABC, Generic[Input, Output]):
1465
1594
  config: Optional[RunnableConfig] = None,
1466
1595
  **kwargs: Optional[Any],
1467
1596
  ) -> AsyncIterator[Output]:
1468
- """Default implementation of atransform, which buffers input and calls ``astream``.
1597
+ """Transform inputs to outputs.
1598
+
1599
+ Default implementation of atransform, which buffers input and calls ``astream``.
1469
1600
 
1470
1601
  Subclasses should override this method if they can start producing output while
1471
1602
  input is still being generated.
@@ -1478,7 +1609,7 @@ class Runnable(ABC, Generic[Input, Output]):
1478
1609
  Yields:
1479
1610
  The output of the ``Runnable``.
1480
1611
 
1481
- """ # noqa: E501
1612
+ """
1482
1613
  final: Input
1483
1614
  got_first_val = False
1484
1615
 
@@ -1522,22 +1653,16 @@ class Runnable(ABC, Generic[Input, Output]):
1522
1653
  from langchain_ollama import ChatOllama
1523
1654
  from langchain_core.output_parsers import StrOutputParser
1524
1655
 
1525
- llm = ChatOllama(model='llama2')
1656
+ llm = ChatOllama(model="llama2")
1526
1657
 
1527
1658
  # Without bind.
1528
- chain = (
1529
- llm
1530
- | StrOutputParser()
1531
- )
1659
+ chain = llm | StrOutputParser()
1532
1660
 
1533
1661
  chain.invoke("Repeat quoted words exactly: 'One two three four five.'")
1534
1662
  # Output is 'One two three four five.'
1535
1663
 
1536
1664
  # With bind.
1537
- chain = (
1538
- llm.bind(stop=["three"])
1539
- | StrOutputParser()
1540
- )
1665
+ chain = llm.bind(stop=["three"]) | StrOutputParser()
1541
1666
 
1542
1667
  chain.invoke("Repeat quoted words exactly: 'One two three four five.'")
1543
1668
  # Output is 'One two'
@@ -1609,24 +1734,25 @@ class Runnable(ABC, Generic[Input, Output]):
1609
1734
 
1610
1735
  import time
1611
1736
 
1612
- def test_runnable(time_to_sleep : int):
1737
+
1738
+ def test_runnable(time_to_sleep: int):
1613
1739
  time.sleep(time_to_sleep)
1614
1740
 
1741
+
1615
1742
  def fn_start(run_obj: Run):
1616
1743
  print("start_time:", run_obj.start_time)
1617
1744
 
1745
+
1618
1746
  def fn_end(run_obj: Run):
1619
1747
  print("end_time:", run_obj.end_time)
1620
1748
 
1749
+
1621
1750
  chain = RunnableLambda(test_runnable).with_listeners(
1622
- on_start=fn_start,
1623
- on_end=fn_end
1751
+ on_start=fn_start, on_end=fn_end
1624
1752
  )
1625
1753
  chain.invoke(2)
1626
1754
 
1627
1755
  """
1628
- from langchain_core.tracers.root_listeners import RootListenersTracer
1629
-
1630
1756
  return RunnableBinding(
1631
1757
  bound=self,
1632
1758
  config_factories=[
@@ -1650,7 +1776,9 @@ class Runnable(ABC, Generic[Input, Output]):
1650
1776
  on_end: Optional[AsyncListener] = None,
1651
1777
  on_error: Optional[AsyncListener] = None,
1652
1778
  ) -> Runnable[Input, Output]:
1653
- """Bind async lifecycle listeners to a ``Runnable``, returning a new ``Runnable``.
1779
+ """Bind async lifecycle listeners to a ``Runnable``.
1780
+
1781
+ Returns a new ``Runnable``.
1654
1782
 
1655
1783
  The Run object contains information about the run, including its ``id``,
1656
1784
  ``type``, ``input``, ``output``, ``error``, ``start_time``, ``end_time``, and
@@ -1716,9 +1844,7 @@ class Runnable(ABC, Generic[Input, Output]):
1716
1844
  on end callback ends at 2025-03-01T07:05:29.883893+00:00
1717
1845
  on end callback ends at 2025-03-01T07:05:30.884831+00:00
1718
1846
 
1719
- """ # noqa: E501
1720
- from langchain_core.tracers.root_listeners import AsyncRootListenersTracer
1721
-
1847
+ """
1722
1848
  return RunnableBinding(
1723
1849
  bound=self,
1724
1850
  config_factories=[
@@ -1796,7 +1922,7 @@ class Runnable(ABC, Generic[Input, Output]):
1796
1922
  if x == 1:
1797
1923
  raise ValueError("x is 1")
1798
1924
  else:
1799
- pass
1925
+ pass
1800
1926
 
1801
1927
 
1802
1928
  runnable = RunnableLambda(_lambda)
@@ -1808,10 +1934,11 @@ class Runnable(ABC, Generic[Input, Output]):
1808
1934
  except ValueError:
1809
1935
  pass
1810
1936
 
1811
- assert (count == 2)
1937
+ assert count == 2
1812
1938
 
1813
1939
  """
1814
- from langchain_core.runnables.retry import RunnableRetry
1940
+ # Import locally to prevent circular import
1941
+ from langchain_core.runnables.retry import RunnableRetry # noqa: PLC0415
1815
1942
 
1816
1943
  return RunnableRetry(
1817
1944
  bound=self,
@@ -1837,11 +1964,13 @@ class Runnable(ABC, Generic[Input, Output]):
1837
1964
 
1838
1965
  from langchain_core.runnables import RunnableLambda
1839
1966
 
1967
+
1840
1968
  def _lambda(x: int) -> int:
1841
1969
  return x + 1
1842
1970
 
1971
+
1843
1972
  runnable = RunnableLambda(_lambda)
1844
- print(runnable.map().invoke([1, 2, 3])) # [2, 3, 4]
1973
+ print(runnable.map().invoke([1, 2, 3])) # [2, 3, 4]
1845
1974
 
1846
1975
  """
1847
1976
  return RunnableEach(bound=self)
@@ -1859,13 +1988,15 @@ class Runnable(ABC, Generic[Input, Output]):
1859
1988
  in order, upon failures.
1860
1989
 
1861
1990
  Args:
1862
- fallbacks: A sequence of runnables to try if the original ``Runnable`` fails.
1991
+ fallbacks: A sequence of runnables to try if the original ``Runnable``
1992
+ fails.
1863
1993
  exceptions_to_handle: A tuple of exception types to handle.
1864
1994
  Defaults to ``(Exception,)``.
1865
1995
  exception_key: If string is specified then handled exceptions will be passed
1866
- to fallbacks as part of the input under the specified key. If None,
1867
- exceptions will not be passed to fallbacks. If used, the base ``Runnable``
1868
- and its fallbacks must accept a dictionary as input. Defaults to None.
1996
+ to fallbacks as part of the input under the specified key.
1997
+ If None, exceptions will not be passed to fallbacks.
1998
+ If used, the base ``Runnable`` and its fallbacks must accept a
1999
+ dictionary as input. Defaults to None.
1869
2000
 
1870
2001
  Returns:
1871
2002
  A new ``Runnable`` that will try the original ``Runnable``, and then each
@@ -1891,23 +2022,28 @@ class Runnable(ABC, Generic[Input, Output]):
1891
2022
 
1892
2023
  runnable = RunnableGenerator(_generate_immediate_error).with_fallbacks(
1893
2024
  [RunnableGenerator(_generate)]
1894
- )
1895
- print(''.join(runnable.stream({}))) #foo bar
2025
+ )
2026
+ print("".join(runnable.stream({}))) # foo bar
1896
2027
 
1897
2028
  Args:
1898
- fallbacks: A sequence of runnables to try if the original ``Runnable`` fails.
2029
+ fallbacks: A sequence of runnables to try if the original ``Runnable``
2030
+ fails.
1899
2031
  exceptions_to_handle: A tuple of exception types to handle.
1900
2032
  exception_key: If string is specified then handled exceptions will be passed
1901
- to fallbacks as part of the input under the specified key. If None,
1902
- exceptions will not be passed to fallbacks. If used, the base ``Runnable``
1903
- and its fallbacks must accept a dictionary as input.
2033
+ to fallbacks as part of the input under the specified key.
2034
+ If None, exceptions will not be passed to fallbacks.
2035
+ If used, the base ``Runnable`` and its fallbacks must accept a
2036
+ dictionary as input.
1904
2037
 
1905
2038
  Returns:
1906
2039
  A new ``Runnable`` that will try the original ``Runnable``, and then each
1907
2040
  fallback in order, upon failures.
1908
2041
 
1909
- """ # noqa: E501
1910
- from langchain_core.runnables.fallbacks import RunnableWithFallbacks
2042
+ """
2043
+ # Import locally to prevent circular import
2044
+ from langchain_core.runnables.fallbacks import ( # noqa: PLC0415
2045
+ RunnableWithFallbacks,
2046
+ )
1911
2047
 
1912
2048
  return RunnableWithFallbacks(
1913
2049
  runnable=self,
@@ -1931,11 +2067,14 @@ class Runnable(ABC, Generic[Input, Output]):
1931
2067
  serialized: Optional[dict[str, Any]] = None,
1932
2068
  **kwargs: Optional[Any],
1933
2069
  ) -> Output:
1934
- """Helper method to transform an ``Input`` value to an ``Output`` value, with callbacks.
2070
+ """Call with config.
2071
+
2072
+ Helper method to transform an ``Input`` value to an ``Output`` value,
2073
+ with callbacks.
1935
2074
 
1936
2075
  Use this method to implement ``invoke`` in subclasses.
1937
2076
 
1938
- """ # noqa: E501
2077
+ """
1939
2078
  config = ensure_config(config)
1940
2079
  callback_manager = get_callback_manager_for_config(config)
1941
2080
  run_manager = callback_manager.on_chain_start(
@@ -1982,10 +2121,13 @@ class Runnable(ABC, Generic[Input, Output]):
1982
2121
  serialized: Optional[dict[str, Any]] = None,
1983
2122
  **kwargs: Optional[Any],
1984
2123
  ) -> Output:
1985
- """Helper method to transform an ``Input`` value to an ``Output`` value, with callbacks.
2124
+ """Async call with config.
2125
+
2126
+ Helper method to transform an ``Input`` value to an ``Output`` value,
2127
+ with callbacks.
1986
2128
 
1987
2129
  Use this method to implement ``ainvoke`` in subclasses.
1988
- """ # noqa: E501
2130
+ """
1989
2131
  config = ensure_config(config)
1990
2132
  callback_manager = get_async_callback_manager_for_config(config)
1991
2133
  run_manager = await callback_manager.on_chain_start(
@@ -2187,9 +2329,6 @@ class Runnable(ABC, Generic[Input, Output]):
2187
2329
  Use this to implement ``stream`` or ``transform`` in ``Runnable`` subclasses.
2188
2330
 
2189
2331
  """
2190
- # Mixin that is used by both astream log and astream events implementation
2191
- from langchain_core.tracers._streaming import _StreamingCallbackHandler
2192
-
2193
2332
  # tee the input so we can iterate over it twice
2194
2333
  input_for_tracing, input_for_transform = tee(inputs, 2)
2195
2334
  # Start the input iterator to ensure the input Runnable starts before this one
@@ -2293,9 +2432,6 @@ class Runnable(ABC, Generic[Input, Output]):
2293
2432
  Use this to implement ``astream`` or ``atransform`` in ``Runnable`` subclasses.
2294
2433
 
2295
2434
  """
2296
- # Mixin that is used by both astream log and astream events implementation
2297
- from langchain_core.tracers._streaming import _StreamingCallbackHandler
2298
-
2299
2435
  # tee the input so we can iterate over it twice
2300
2436
  input_for_tracing, input_for_transform = atee(inputs, 2)
2301
2437
  # Start the input iterator to ensure the input Runnable starts before this one
@@ -2398,8 +2534,6 @@ class Runnable(ABC, Generic[Input, Output]):
2398
2534
  name: The name of the tool. Defaults to None.
2399
2535
  description: The description of the tool. Defaults to None.
2400
2536
  arg_types: A dictionary of argument names to types. Defaults to None.
2401
- message_version: Version of ``ToolMessage`` to return given
2402
- :class:`~langchain_core.messages.content_blocks.ToolCall` input.
2403
2537
 
2404
2538
  Returns:
2405
2539
  A ``BaseTool`` instance.
@@ -2411,13 +2545,16 @@ class Runnable(ABC, Generic[Input, Output]):
2411
2545
  from typing_extensions import TypedDict
2412
2546
  from langchain_core.runnables import RunnableLambda
2413
2547
 
2548
+
2414
2549
  class Args(TypedDict):
2415
2550
  a: int
2416
2551
  b: list[int]
2417
2552
 
2553
+
2418
2554
  def f(x: Args) -> str:
2419
2555
  return str(x["a"] * max(x["b"]))
2420
2556
 
2557
+
2421
2558
  runnable = RunnableLambda(f)
2422
2559
  as_tool = runnable.as_tool()
2423
2560
  as_tool.invoke({"a": 3, "b": [1, 2]})
@@ -2450,9 +2587,11 @@ class Runnable(ABC, Generic[Input, Output]):
2450
2587
  from typing import Any
2451
2588
  from langchain_core.runnables import RunnableLambda
2452
2589
 
2590
+
2453
2591
  def f(x: dict[str, Any]) -> str:
2454
2592
  return str(x["a"] * max(x["b"]))
2455
2593
 
2594
+
2456
2595
  runnable = RunnableLambda(f)
2457
2596
  as_tool = runnable.as_tool(arg_types={"a": int, "b": list[int]})
2458
2597
  as_tool.invoke({"a": 3, "b": [1, 2]})
@@ -2463,12 +2602,15 @@ class Runnable(ABC, Generic[Input, Output]):
2463
2602
 
2464
2603
  from langchain_core.runnables import RunnableLambda
2465
2604
 
2605
+
2466
2606
  def f(x: str) -> str:
2467
2607
  return x + "a"
2468
2608
 
2609
+
2469
2610
  def g(x: str) -> str:
2470
2611
  return x + "z"
2471
2612
 
2613
+
2472
2614
  runnable = RunnableLambda(f) | g
2473
2615
  as_tool = runnable.as_tool()
2474
2616
  as_tool.invoke("b")
@@ -2477,7 +2619,7 @@ class Runnable(ABC, Generic[Input, Output]):
2477
2619
 
2478
2620
  """
2479
2621
  # Avoid circular import
2480
- from langchain_core.tools import convert_runnable_to_tool
2622
+ from langchain_core.tools import convert_runnable_to_tool # noqa: PLC0415
2481
2623
 
2482
2624
  return convert_runnable_to_tool(
2483
2625
  self,
@@ -2520,6 +2662,9 @@ class RunnableSerializable(Serializable, Runnable[Input, Output]):
2520
2662
  Args:
2521
2663
  **kwargs: A dictionary of ``ConfigurableField`` instances to configure.
2522
2664
 
2665
+ Raises:
2666
+ ValueError: If a configuration key is not found in the ``Runnable``.
2667
+
2523
2668
  Returns:
2524
2669
  A new ``Runnable`` with the fields configured.
2525
2670
 
@@ -2538,18 +2683,22 @@ class RunnableSerializable(Serializable, Runnable[Input, Output]):
2538
2683
 
2539
2684
  # max_tokens = 20
2540
2685
  print(
2541
- "max_tokens_20: ",
2542
- model.invoke("tell me something about chess").content
2686
+ "max_tokens_20: ", model.invoke("tell me something about chess").content
2543
2687
  )
2544
2688
 
2545
2689
  # max_tokens = 200
2546
- print("max_tokens_200: ", model.with_config(
2547
- configurable={"output_token_number": 200}
2548
- ).invoke("tell me something about chess").content
2690
+ print(
2691
+ "max_tokens_200: ",
2692
+ model.with_config(configurable={"output_token_number": 200})
2693
+ .invoke("tell me something about chess")
2694
+ .content,
2549
2695
  )
2550
2696
 
2551
2697
  """
2552
- from langchain_core.runnables.configurable import RunnableConfigurableFields
2698
+ # Import locally to prevent circular import
2699
+ from langchain_core.runnables.configurable import ( # noqa: PLC0415
2700
+ RunnableConfigurableFields,
2701
+ )
2553
2702
 
2554
2703
  model_fields = type(self).model_fields
2555
2704
  for key in kwargs:
@@ -2596,7 +2745,7 @@ class RunnableSerializable(Serializable, Runnable[Input, Output]):
2596
2745
  ).configurable_alternatives(
2597
2746
  ConfigurableField(id="llm"),
2598
2747
  default_key="anthropic",
2599
- openai=ChatOpenAI()
2748
+ openai=ChatOpenAI(),
2600
2749
  )
2601
2750
 
2602
2751
  # uses the default model ChatAnthropic
@@ -2604,13 +2753,14 @@ class RunnableSerializable(Serializable, Runnable[Input, Output]):
2604
2753
 
2605
2754
  # uses ChatOpenAI
2606
2755
  print(
2607
- model.with_config(
2608
- configurable={"llm": "openai"}
2609
- ).invoke("which organization created you?").content
2756
+ model.with_config(configurable={"llm": "openai"})
2757
+ .invoke("which organization created you?")
2758
+ .content
2610
2759
  )
2611
2760
 
2612
2761
  """
2613
- from langchain_core.runnables.configurable import (
2762
+ # Import locally to prevent circular import
2763
+ from langchain_core.runnables.configurable import ( # noqa: PLC0415
2614
2764
  RunnableConfigurableAlternatives,
2615
2765
  )
2616
2766
 
@@ -2626,7 +2776,11 @@ class RunnableSerializable(Serializable, Runnable[Input, Output]):
2626
2776
  def _seq_input_schema(
2627
2777
  steps: list[Runnable[Any, Any]], config: Optional[RunnableConfig]
2628
2778
  ) -> type[BaseModel]:
2629
- from langchain_core.runnables.passthrough import RunnableAssign, RunnablePick
2779
+ # Import locally to prevent circular import
2780
+ from langchain_core.runnables.passthrough import ( # noqa: PLC0415
2781
+ RunnableAssign,
2782
+ RunnablePick,
2783
+ )
2630
2784
 
2631
2785
  first = steps[0]
2632
2786
  if len(steps) == 1:
@@ -2652,7 +2806,11 @@ def _seq_input_schema(
2652
2806
  def _seq_output_schema(
2653
2807
  steps: list[Runnable[Any, Any]], config: Optional[RunnableConfig]
2654
2808
  ) -> type[BaseModel]:
2655
- from langchain_core.runnables.passthrough import RunnableAssign, RunnablePick
2809
+ # Import locally to prevent circular import
2810
+ from langchain_core.runnables.passthrough import ( # noqa: PLC0415
2811
+ RunnableAssign,
2812
+ RunnablePick,
2813
+ )
2656
2814
 
2657
2815
  last = steps[-1]
2658
2816
  if len(steps) == 1:
@@ -2739,12 +2897,15 @@ class RunnableSequence(RunnableSerializable[Input, Output]):
2739
2897
 
2740
2898
  from langchain_core.runnables import RunnableLambda
2741
2899
 
2900
+
2742
2901
  def add_one(x: int) -> int:
2743
2902
  return x + 1
2744
2903
 
2904
+
2745
2905
  def mul_two(x: int) -> int:
2746
2906
  return x * 2
2747
2907
 
2908
+
2748
2909
  runnable_1 = RunnableLambda(add_one)
2749
2910
  runnable_2 = RunnableLambda(mul_two)
2750
2911
  sequence = runnable_1 | runnable_2
@@ -2764,17 +2925,17 @@ class RunnableSequence(RunnableSerializable[Input, Output]):
2764
2925
  from langchain_openai import ChatOpenAI
2765
2926
 
2766
2927
  prompt = PromptTemplate.from_template(
2767
- 'In JSON format, give me a list of {topic} and their '
2768
- 'corresponding names in French, Spanish and in a '
2769
- 'Cat Language.'
2928
+ "In JSON format, give me a list of {topic} and their "
2929
+ "corresponding names in French, Spanish and in a "
2930
+ "Cat Language."
2770
2931
  )
2771
2932
 
2772
2933
  model = ChatOpenAI()
2773
2934
  chain = prompt | model | SimpleJsonOutputParser()
2774
2935
 
2775
- async for chunk in chain.astream({'topic': 'colors'}):
2776
- print('-') # noqa: T201
2777
- print(chunk, sep='', flush=True) # noqa: T201
2936
+ async for chunk in chain.astream({"topic": "colors"}):
2937
+ print("-") # noqa: T201
2938
+ print(chunk, sep="", flush=True) # noqa: T201
2778
2939
 
2779
2940
  """
2780
2941
 
@@ -2829,6 +2990,11 @@ class RunnableSequence(RunnableSerializable[Input, Output]):
2829
2990
  @classmethod
2830
2991
  @override
2831
2992
  def get_lc_namespace(cls) -> list[str]:
2993
+ """Get the namespace of the langchain object.
2994
+
2995
+ Returns:
2996
+ ``["langchain", "schema", "runnable"]``
2997
+ """
2832
2998
  return ["langchain", "schema", "runnable"]
2833
2999
 
2834
3000
  @property
@@ -2843,14 +3009,7 @@ class RunnableSequence(RunnableSerializable[Input, Output]):
2843
3009
  @classmethod
2844
3010
  @override
2845
3011
  def is_lc_serializable(cls) -> bool:
2846
- """Check if the object is serializable.
2847
-
2848
- Returns:
2849
- True if the object is serializable, False otherwise.
2850
-
2851
- Defaults to True.
2852
-
2853
- """
3012
+ """Return True as this class is serializable."""
2854
3013
  return True
2855
3014
 
2856
3015
  model_config = ConfigDict(
@@ -2908,7 +3067,8 @@ class RunnableSequence(RunnableSerializable[Input, Output]):
2908
3067
  The config specs of the ``Runnable``.
2909
3068
 
2910
3069
  """
2911
- from langchain_core.beta.runnables.context import (
3070
+ # Import locally to prevent circular import
3071
+ from langchain_core.beta.runnables.context import ( # noqa: PLC0415
2912
3072
  CONTEXT_CONFIG_PREFIX,
2913
3073
  _key_from_id,
2914
3074
  )
@@ -2966,7 +3126,8 @@ class RunnableSequence(RunnableSerializable[Input, Output]):
2966
3126
  ValueError: If a ``Runnable`` has no first or last node.
2967
3127
 
2968
3128
  """
2969
- from langchain_core.runnables.graph import Graph
3129
+ # Import locally to prevent circular import
3130
+ from langchain_core.runnables.graph import Graph # noqa: PLC0415
2970
3131
 
2971
3132
  graph = Graph()
2972
3133
  for step in self.steps:
@@ -3054,7 +3215,10 @@ class RunnableSequence(RunnableSerializable[Input, Output]):
3054
3215
  def invoke(
3055
3216
  self, input: Input, config: Optional[RunnableConfig] = None, **kwargs: Any
3056
3217
  ) -> Output:
3057
- from langchain_core.beta.runnables.context import config_with_context
3218
+ # Import locally to prevent circular import
3219
+ from langchain_core.beta.runnables.context import ( # noqa: PLC0415
3220
+ config_with_context,
3221
+ )
3058
3222
 
3059
3223
  # setup callbacks and context
3060
3224
  config = config_with_context(ensure_config(config), self.steps)
@@ -3095,7 +3259,10 @@ class RunnableSequence(RunnableSerializable[Input, Output]):
3095
3259
  config: Optional[RunnableConfig] = None,
3096
3260
  **kwargs: Optional[Any],
3097
3261
  ) -> Output:
3098
- from langchain_core.beta.runnables.context import aconfig_with_context
3262
+ # Import locally to prevent circular import
3263
+ from langchain_core.beta.runnables.context import ( # noqa: PLC0415
3264
+ aconfig_with_context,
3265
+ )
3099
3266
 
3100
3267
  # setup callbacks and context
3101
3268
  config = aconfig_with_context(ensure_config(config), self.steps)
@@ -3139,8 +3306,10 @@ class RunnableSequence(RunnableSerializable[Input, Output]):
3139
3306
  return_exceptions: bool = False,
3140
3307
  **kwargs: Optional[Any],
3141
3308
  ) -> list[Output]:
3142
- from langchain_core.beta.runnables.context import config_with_context
3143
- from langchain_core.callbacks.manager import CallbackManager
3309
+ # Import locally to prevent circular import
3310
+ from langchain_core.beta.runnables.context import ( # noqa: PLC0415
3311
+ config_with_context,
3312
+ )
3144
3313
 
3145
3314
  if not inputs:
3146
3315
  return []
@@ -3269,8 +3438,10 @@ class RunnableSequence(RunnableSerializable[Input, Output]):
3269
3438
  return_exceptions: bool = False,
3270
3439
  **kwargs: Optional[Any],
3271
3440
  ) -> list[Output]:
3272
- from langchain_core.beta.runnables.context import aconfig_with_context
3273
- from langchain_core.callbacks.manager import AsyncCallbackManager
3441
+ # Import locally to prevent circular import
3442
+ from langchain_core.beta.runnables.context import ( # noqa: PLC0415
3443
+ aconfig_with_context,
3444
+ )
3274
3445
 
3275
3446
  if not inputs:
3276
3447
  return []
@@ -3400,7 +3571,10 @@ class RunnableSequence(RunnableSerializable[Input, Output]):
3400
3571
  config: RunnableConfig,
3401
3572
  **kwargs: Any,
3402
3573
  ) -> Iterator[Output]:
3403
- from langchain_core.beta.runnables.context import config_with_context
3574
+ # Import locally to prevent circular import
3575
+ from langchain_core.beta.runnables.context import ( # noqa: PLC0415
3576
+ config_with_context,
3577
+ )
3404
3578
 
3405
3579
  steps = [self.first, *self.middle, self.last]
3406
3580
  config = config_with_context(config, self.steps)
@@ -3427,7 +3601,10 @@ class RunnableSequence(RunnableSerializable[Input, Output]):
3427
3601
  config: RunnableConfig,
3428
3602
  **kwargs: Any,
3429
3603
  ) -> AsyncIterator[Output]:
3430
- from langchain_core.beta.runnables.context import aconfig_with_context
3604
+ # Import locally to prevent circular import
3605
+ from langchain_core.beta.runnables.context import ( # noqa: PLC0415
3606
+ aconfig_with_context,
3607
+ )
3431
3608
 
3432
3609
  steps = [self.first, *self.middle, self.last]
3433
3610
  config = aconfig_with_context(config, self.steps)
@@ -3520,15 +3697,19 @@ class RunnableParallel(RunnableSerializable[Input, dict[str, Any]]):
3520
3697
 
3521
3698
  from langchain_core.runnables import RunnableLambda
3522
3699
 
3700
+
3523
3701
  def add_one(x: int) -> int:
3524
3702
  return x + 1
3525
3703
 
3704
+
3526
3705
  def mul_two(x: int) -> int:
3527
3706
  return x * 2
3528
3707
 
3708
+
3529
3709
  def mul_three(x: int) -> int:
3530
3710
  return x * 3
3531
3711
 
3712
+
3532
3713
  runnable_1 = RunnableLambda(add_one)
3533
3714
  runnable_2 = RunnableLambda(mul_two)
3534
3715
  runnable_3 = RunnableLambda(mul_three)
@@ -3564,8 +3745,7 @@ class RunnableParallel(RunnableSerializable[Input, dict[str, Any]]):
3564
3745
 
3565
3746
  model = ChatOpenAI()
3566
3747
  joke_chain = (
3567
- ChatPromptTemplate.from_template("tell me a joke about {topic}")
3568
- | model
3748
+ ChatPromptTemplate.from_template("tell me a joke about {topic}") | model
3569
3749
  )
3570
3750
  poem_chain = (
3571
3751
  ChatPromptTemplate.from_template("write a 2-line poem about {topic}")
@@ -3619,11 +3799,17 @@ class RunnableParallel(RunnableSerializable[Input, dict[str, Any]]):
3619
3799
  @classmethod
3620
3800
  @override
3621
3801
  def is_lc_serializable(cls) -> bool:
3802
+ """Return True as this class is serializable."""
3622
3803
  return True
3623
3804
 
3624
3805
  @classmethod
3625
3806
  @override
3626
3807
  def get_lc_namespace(cls) -> list[str]:
3808
+ """Get the namespace of the langchain object.
3809
+
3810
+ Returns:
3811
+ ``["langchain", "schema", "runnable"]``
3812
+ """
3627
3813
  return ["langchain", "schema", "runnable"]
3628
3814
 
3629
3815
  model_config = ConfigDict(
@@ -3731,7 +3917,8 @@ class RunnableParallel(RunnableSerializable[Input, dict[str, Any]]):
3731
3917
  ValueError: If a ``Runnable`` has no first or last node.
3732
3918
 
3733
3919
  """
3734
- from langchain_core.runnables.graph import Graph
3920
+ # Import locally to prevent circular import
3921
+ from langchain_core.runnables.graph import Graph # noqa: PLC0415
3735
3922
 
3736
3923
  graph = Graph()
3737
3924
  input_node = graph.add_node(self.get_input_schema(config))
@@ -3767,8 +3954,6 @@ class RunnableParallel(RunnableSerializable[Input, dict[str, Any]]):
3767
3954
  def invoke(
3768
3955
  self, input: Input, config: Optional[RunnableConfig] = None, **kwargs: Any
3769
3956
  ) -> dict[str, Any]:
3770
- from langchain_core.callbacks.manager import CallbackManager
3771
-
3772
3957
  # setup callbacks
3773
3958
  config = ensure_config(config)
3774
3959
  callback_manager = CallbackManager.configure(
@@ -4068,9 +4253,10 @@ class RunnableGenerator(Runnable[Input, Output]):
4068
4253
  for token in ["Have", " a", " nice", " day"]:
4069
4254
  yield token
4070
4255
 
4256
+
4071
4257
  runnable = RunnableGenerator(agen)
4072
4258
  await runnable.ainvoke(None) # "Have a nice day"
4073
- [p async for p in runnable.astream(None)] # ["Have", " a", " nice", " day"]
4259
+ [p async for p in runnable.astream(None)] # ["Have", " a", " nice", " day"]
4074
4260
 
4075
4261
  ``RunnableGenerator`` makes it easy to implement custom behavior within a streaming
4076
4262
  context. Below we show an example:
@@ -4090,6 +4276,7 @@ class RunnableGenerator(Runnable[Input, Output]):
4090
4276
  | StrOutputParser()
4091
4277
  )
4092
4278
 
4279
+
4093
4280
  def character_generator(input: Iterator[str]) -> Iterator[str]:
4094
4281
  for token in input:
4095
4282
  if "," in token or "." in token:
@@ -4100,7 +4287,10 @@ class RunnableGenerator(Runnable[Input, Output]):
4100
4287
 
4101
4288
  runnable = chant_chain | character_generator
4102
4289
  assert type(runnable.last) is RunnableGenerator
4103
- "".join(runnable.stream({"topic": "waste"})) # Reduce👏, Reuse👏, Recycle👏.
4290
+ "".join(
4291
+ runnable.stream({"topic": "waste"})
4292
+ ) # Reduce👏, Reuse👏, Recycle👏.
4293
+
4104
4294
 
4105
4295
  # Note that RunnableLambda can be used to delay streaming of one step in a
4106
4296
  # sequence until the previous step is finished:
@@ -4109,6 +4299,7 @@ class RunnableGenerator(Runnable[Input, Output]):
4109
4299
  for character in input[::-1]:
4110
4300
  yield character
4111
4301
 
4302
+
4112
4303
  runnable = chant_chain | RunnableLambda(reverse_generator)
4113
4304
  "".join(runnable.stream({"topic": "waste"})) # ".elcycer ,esuer ,ecudeR"
4114
4305
 
@@ -4353,26 +4544,29 @@ class RunnableLambda(Runnable[Input, Output]):
4353
4544
  # This is a RunnableLambda
4354
4545
  from langchain_core.runnables import RunnableLambda
4355
4546
 
4547
+
4356
4548
  def add_one(x: int) -> int:
4357
4549
  return x + 1
4358
4550
 
4551
+
4359
4552
  runnable = RunnableLambda(add_one)
4360
4553
 
4361
- runnable.invoke(1) # returns 2
4362
- runnable.batch([1, 2, 3]) # returns [2, 3, 4]
4554
+ runnable.invoke(1) # returns 2
4555
+ runnable.batch([1, 2, 3]) # returns [2, 3, 4]
4363
4556
 
4364
4557
  # Async is supported by default by delegating to the sync implementation
4365
- await runnable.ainvoke(1) # returns 2
4366
- await runnable.abatch([1, 2, 3]) # returns [2, 3, 4]
4558
+ await runnable.ainvoke(1) # returns 2
4559
+ await runnable.abatch([1, 2, 3]) # returns [2, 3, 4]
4367
4560
 
4368
4561
 
4369
4562
  # Alternatively, can provide both synd and sync implementations
4370
4563
  async def add_one_async(x: int) -> int:
4371
4564
  return x + 1
4372
4565
 
4566
+
4373
4567
  runnable = RunnableLambda(add_one, afunc=add_one_async)
4374
- runnable.invoke(1) # Uses add_one
4375
- await runnable.ainvoke(1) # Uses add_one_async
4568
+ runnable.invoke(1) # Uses add_one
4569
+ await runnable.ainvoke(1) # Uses add_one_async
4376
4570
 
4377
4571
  """
4378
4572
 
@@ -4607,6 +4801,9 @@ class RunnableLambda(Runnable[Input, Output]):
4607
4801
  @override
4608
4802
  def get_graph(self, config: RunnableConfig | None = None) -> Graph:
4609
4803
  if deps := self.deps:
4804
+ # Import locally to prevent circular import
4805
+ from langchain_core.runnables.graph import Graph # noqa: PLC0415
4806
+
4610
4807
  graph = Graph()
4611
4808
  input_node = graph.add_node(self.get_input_schema(config))
4612
4809
  output_node = graph.add_node(self.get_output_schema(config))
@@ -4644,7 +4841,7 @@ class RunnableLambda(Runnable[Input, Output]):
4644
4841
  __hash__ = None # type: ignore[assignment]
4645
4842
 
4646
4843
  def __repr__(self) -> str:
4647
- """A string representation of this ``Runnable``."""
4844
+ """Return a string representation of this ``Runnable``."""
4648
4845
  if self._repr is None:
4649
4846
  if hasattr(self, "func") and isinstance(self.func, itemgetter):
4650
4847
  self._repr = f"RunnableLambda({str(self.func)[len('operator.') :]})"
@@ -5080,13 +5277,16 @@ class RunnableLambda(Runnable[Input, Output]):
5080
5277
 
5081
5278
 
5082
5279
  class RunnableEachBase(RunnableSerializable[list[Input], list[Output]]):
5083
- """``Runnable`` that calls another ``Runnable`` for each element of the input sequence.
5280
+ """RunnableEachBase class.
5281
+
5282
+ ``Runnable`` that calls another ``Runnable`` for each element of the input sequence.
5084
5283
 
5085
- Use only if creating a new ``RunnableEach`` subclass with different ``__init__`` args.
5284
+ Use only if creating a new ``RunnableEach`` subclass with different ``__init__``
5285
+ args.
5086
5286
 
5087
5287
  See documentation for ``RunnableEach`` for more details.
5088
5288
 
5089
- """ # noqa: E501
5289
+ """
5090
5290
 
5091
5291
  bound: Runnable[Input, Output]
5092
5292
 
@@ -5154,11 +5354,17 @@ class RunnableEachBase(RunnableSerializable[list[Input], list[Output]]):
5154
5354
  @classmethod
5155
5355
  @override
5156
5356
  def is_lc_serializable(cls) -> bool:
5357
+ """Return True as this class is serializable."""
5157
5358
  return True
5158
5359
 
5159
5360
  @classmethod
5160
5361
  @override
5161
5362
  def get_lc_namespace(cls) -> list[str]:
5363
+ """Get the namespace of the langchain object.
5364
+
5365
+ Returns:
5366
+ ``["langchain", "schema", "runnable"]``
5367
+ """
5162
5368
  return ["langchain", "schema", "runnable"]
5163
5369
 
5164
5370
  def _invoke(
@@ -5204,14 +5410,19 @@ class RunnableEachBase(RunnableSerializable[list[Input], list[Output]]):
5204
5410
  config: Optional[RunnableConfig] = None,
5205
5411
  **kwargs: Optional[Any],
5206
5412
  ) -> AsyncIterator[StreamEvent]:
5413
+ def _error_stream_event(message: str) -> StreamEvent:
5414
+ raise NotImplementedError(message)
5415
+
5207
5416
  for _ in range(1):
5208
- msg = "RunnableEach does not support astream_events yet."
5209
- raise NotImplementedError(msg)
5210
- yield
5417
+ yield _error_stream_event(
5418
+ "RunnableEach does not support astream_events yet."
5419
+ )
5211
5420
 
5212
5421
 
5213
5422
  class RunnableEach(RunnableEachBase[Input, Output]):
5214
- """``Runnable`` that calls another ``Runnable`` for each element of the input sequence.
5423
+ """RunnableEach class.
5424
+
5425
+ ``Runnable`` that calls another ``Runnable`` for each element of the input sequence.
5215
5426
 
5216
5427
  It allows you to call multiple inputs with the bounded ``Runnable``.
5217
5428
 
@@ -5236,7 +5447,7 @@ class RunnableEach(RunnableEachBase[Input, Output]):
5236
5447
  {'topic':'Biology'}])
5237
5448
  print(output) # noqa: T201
5238
5449
 
5239
- """ # noqa: E501
5450
+ """
5240
5451
 
5241
5452
  @override
5242
5453
  def get_name(
@@ -5300,7 +5511,9 @@ class RunnableEach(RunnableEachBase[Input, Output]):
5300
5511
  on_end: Optional[AsyncListener] = None,
5301
5512
  on_error: Optional[AsyncListener] = None,
5302
5513
  ) -> RunnableEach[Input, Output]:
5303
- """Bind async lifecycle listeners to a ``Runnable``, returning a new ``Runnable``.
5514
+ """Bind async lifecycle listeners to a ``Runnable``.
5515
+
5516
+ Returns a new ``Runnable``.
5304
5517
 
5305
5518
  The ``Run`` object contains information about the run, including its ``id``,
5306
5519
  ``type``, ``input``, ``output``, ``error``, ``start_time``, ``end_time``, and
@@ -5317,7 +5530,7 @@ class RunnableEach(RunnableEachBase[Input, Output]):
5317
5530
  Returns:
5318
5531
  A new ``Runnable`` with the listeners bound.
5319
5532
 
5320
- """ # noqa: E501
5533
+ """
5321
5534
  return RunnableEach(
5322
5535
  bound=self.bound.with_alisteners(
5323
5536
  on_start=on_start, on_end=on_end, on_error=on_error
@@ -5388,22 +5601,23 @@ class RunnableBindingBase(RunnableSerializable[Input, Output]): # type: ignore[
5388
5601
  """Create a ``RunnableBinding`` from a ``Runnable`` and kwargs.
5389
5602
 
5390
5603
  Args:
5391
- bound: The underlying ``Runnable`` that this ``Runnable`` delegates calls to.
5604
+ bound: The underlying ``Runnable`` that this ``Runnable`` delegates calls
5605
+ to.
5392
5606
  kwargs: optional kwargs to pass to the underlying ``Runnable``, when running
5393
- the underlying ``Runnable`` (e.g., via ``invoke``, ``batch``,
5394
- ``transform``, or ``stream`` or async variants)
5395
- Defaults to None.
5607
+ the underlying ``Runnable`` (e.g., via ``invoke``, ``batch``,
5608
+ ``transform``, or ``stream`` or async variants)
5609
+ Defaults to None.
5396
5610
  config: optional config to bind to the underlying ``Runnable``.
5397
- Defaults to None.
5611
+ Defaults to None.
5398
5612
  config_factories: optional list of config factories to apply to the
5399
- config before binding to the underlying ``Runnable``.
5400
- Defaults to None.
5613
+ config before binding to the underlying ``Runnable``.
5614
+ Defaults to None.
5401
5615
  custom_input_type: Specify to override the input type of the underlying
5402
- ``Runnable`` with a custom type. Defaults to None.
5616
+ ``Runnable`` with a custom type. Defaults to None.
5403
5617
  custom_output_type: Specify to override the output type of the underlying
5404
- ``Runnable`` with a custom type. Defaults to None.
5618
+ ``Runnable`` with a custom type. Defaults to None.
5405
5619
  **other_kwargs: Unpacked into the base class.
5406
- """ # noqa: E501
5620
+ """
5407
5621
  super().__init__(
5408
5622
  bound=bound,
5409
5623
  kwargs=kwargs or {},
@@ -5470,6 +5684,7 @@ class RunnableBindingBase(RunnableSerializable[Input, Output]): # type: ignore[
5470
5684
  @classmethod
5471
5685
  @override
5472
5686
  def is_lc_serializable(cls) -> bool:
5687
+ """Return True as this class is serializable."""
5473
5688
  return True
5474
5689
 
5475
5690
  @classmethod
@@ -5477,7 +5692,8 @@ class RunnableBindingBase(RunnableSerializable[Input, Output]): # type: ignore[
5477
5692
  def get_lc_namespace(cls) -> list[str]:
5478
5693
  """Get the namespace of the langchain object.
5479
5694
 
5480
- Defaults to ``["langchain", "schema", "runnable"]``.
5695
+ Returns:
5696
+ ``["langchain", "schema", "runnable"]``
5481
5697
  """
5482
5698
  return ["langchain", "schema", "runnable"]
5483
5699
 
@@ -5744,9 +5960,11 @@ class RunnableBinding(RunnableBindingBase[Input, Output]): # type: ignore[no-re
5744
5960
  These methods include:
5745
5961
 
5746
5962
  - ``bind``: Bind kwargs to pass to the underlying ``Runnable`` when running it.
5747
- - ``with_config``: Bind config to pass to the underlying ``Runnable`` when running it.
5963
+ - ``with_config``: Bind config to pass to the underlying ``Runnable`` when running
5964
+ it.
5748
5965
  - ``with_listeners``: Bind lifecycle listeners to the underlying ``Runnable``.
5749
- - ``with_types``: Override the input and output types of the underlying ``Runnable``.
5966
+ - ``with_types``: Override the input and output types of the underlying
5967
+ ``Runnable``.
5750
5968
  - ``with_retry``: Bind a retry policy to the underlying ``Runnable``.
5751
5969
  - ``with_fallbacks``: Bind a fallback policy to the underlying ``Runnable``.
5752
5970
 
@@ -5758,12 +5976,13 @@ class RunnableBinding(RunnableBindingBase[Input, Output]): # type: ignore[no-re
5758
5976
  # Create a Runnable binding that invokes the ChatModel with the
5759
5977
  # additional kwarg `stop=['-']` when running it.
5760
5978
  from langchain_community.chat_models import ChatOpenAI
5979
+
5761
5980
  model = ChatOpenAI()
5762
- model.invoke('Say "Parrot-MAGIC"', stop=['-']) # Should return `Parrot`
5981
+ model.invoke('Say "Parrot-MAGIC"', stop=["-"]) # Should return `Parrot`
5763
5982
  # Using it the easy way via `bind` method which returns a new
5764
5983
  # RunnableBinding
5765
- runnable_binding = model.bind(stop=['-'])
5766
- runnable_binding.invoke('Say "Parrot-MAGIC"') # Should return `Parrot`
5984
+ runnable_binding = model.bind(stop=["-"])
5985
+ runnable_binding.invoke('Say "Parrot-MAGIC"') # Should return `Parrot`
5767
5986
 
5768
5987
  Can also be done by instantiating a ``RunnableBinding`` directly (not
5769
5988
  recommended):
@@ -5771,13 +5990,14 @@ class RunnableBinding(RunnableBindingBase[Input, Output]): # type: ignore[no-re
5771
5990
  .. code-block:: python
5772
5991
 
5773
5992
  from langchain_core.runnables import RunnableBinding
5993
+
5774
5994
  runnable_binding = RunnableBinding(
5775
5995
  bound=model,
5776
- kwargs={'stop': ['-']} # <-- Note the additional kwargs
5996
+ kwargs={"stop": ["-"]}, # <-- Note the additional kwargs
5777
5997
  )
5778
- runnable_binding.invoke('Say "Parrot-MAGIC"') # Should return `Parrot`
5998
+ runnable_binding.invoke('Say "Parrot-MAGIC"') # Should return `Parrot`
5779
5999
 
5780
- """ # noqa: E501
6000
+ """
5781
6001
 
5782
6002
  @override
5783
6003
  def bind(self, **kwargs: Any) -> Runnable[Input, Output]:
@@ -5847,7 +6067,6 @@ class RunnableBinding(RunnableBindingBase[Input, Output]): # type: ignore[no-re
5847
6067
  Returns:
5848
6068
  A new ``Runnable`` with the listeners bound.
5849
6069
  """
5850
- from langchain_core.tracers.root_listeners import RootListenersTracer
5851
6070
 
5852
6071
  def listener_config_factory(config: RunnableConfig) -> RunnableConfig:
5853
6072
  return {
@@ -6050,6 +6269,7 @@ def chain(
6050
6269
  from langchain_core.prompts import PromptTemplate
6051
6270
  from langchain_openai import OpenAI
6052
6271
 
6272
+
6053
6273
  @chain
6054
6274
  def my_func(fields):
6055
6275
  prompt = PromptTemplate("Hello, {name}!")