langchain-core 0.3.75__py3-none-any.whl → 0.3.76__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain-core might be problematic. Click here for more details.

Files changed (116) hide show
  1. langchain_core/_api/beta_decorator.py +17 -40
  2. langchain_core/_api/deprecation.py +19 -6
  3. langchain_core/_api/path.py +19 -2
  4. langchain_core/_import_utils.py +7 -0
  5. langchain_core/agents.py +10 -6
  6. langchain_core/beta/runnables/context.py +1 -2
  7. langchain_core/callbacks/base.py +11 -4
  8. langchain_core/callbacks/manager.py +81 -69
  9. langchain_core/callbacks/usage.py +4 -2
  10. langchain_core/chat_history.py +4 -6
  11. langchain_core/document_loaders/base.py +34 -9
  12. langchain_core/document_loaders/langsmith.py +3 -0
  13. langchain_core/documents/base.py +35 -10
  14. langchain_core/documents/transformers.py +4 -2
  15. langchain_core/embeddings/fake.py +8 -5
  16. langchain_core/env.py +2 -3
  17. langchain_core/example_selectors/base.py +12 -0
  18. langchain_core/exceptions.py +7 -0
  19. langchain_core/globals.py +17 -28
  20. langchain_core/indexing/api.py +56 -44
  21. langchain_core/indexing/base.py +5 -8
  22. langchain_core/indexing/in_memory.py +23 -3
  23. langchain_core/language_models/__init__.py +3 -2
  24. langchain_core/language_models/base.py +31 -20
  25. langchain_core/language_models/chat_models.py +94 -25
  26. langchain_core/language_models/fake_chat_models.py +5 -7
  27. langchain_core/language_models/llms.py +49 -17
  28. langchain_core/load/dump.py +2 -3
  29. langchain_core/load/load.py +15 -1
  30. langchain_core/load/serializable.py +38 -43
  31. langchain_core/memory.py +7 -3
  32. langchain_core/messages/ai.py +36 -19
  33. langchain_core/messages/base.py +13 -6
  34. langchain_core/messages/content_blocks.py +23 -2
  35. langchain_core/messages/human.py +2 -6
  36. langchain_core/messages/system.py +2 -6
  37. langchain_core/messages/tool.py +33 -13
  38. langchain_core/messages/utils.py +182 -72
  39. langchain_core/output_parsers/base.py +5 -2
  40. langchain_core/output_parsers/json.py +4 -4
  41. langchain_core/output_parsers/list.py +7 -22
  42. langchain_core/output_parsers/openai_functions.py +3 -0
  43. langchain_core/output_parsers/openai_tools.py +6 -1
  44. langchain_core/output_parsers/pydantic.py +4 -0
  45. langchain_core/output_parsers/string.py +5 -1
  46. langchain_core/output_parsers/xml.py +19 -19
  47. langchain_core/outputs/chat_generation.py +18 -7
  48. langchain_core/outputs/generation.py +14 -3
  49. langchain_core/outputs/llm_result.py +8 -1
  50. langchain_core/prompt_values.py +10 -4
  51. langchain_core/prompts/base.py +4 -9
  52. langchain_core/prompts/chat.py +87 -58
  53. langchain_core/prompts/dict.py +16 -8
  54. langchain_core/prompts/few_shot.py +9 -11
  55. langchain_core/prompts/few_shot_with_templates.py +5 -1
  56. langchain_core/prompts/image.py +12 -5
  57. langchain_core/prompts/message.py +5 -6
  58. langchain_core/prompts/pipeline.py +13 -8
  59. langchain_core/prompts/prompt.py +22 -8
  60. langchain_core/prompts/string.py +18 -10
  61. langchain_core/prompts/structured.py +7 -2
  62. langchain_core/rate_limiters.py +2 -2
  63. langchain_core/retrievers.py +7 -6
  64. langchain_core/runnables/base.py +402 -183
  65. langchain_core/runnables/branch.py +14 -19
  66. langchain_core/runnables/config.py +9 -15
  67. langchain_core/runnables/configurable.py +34 -19
  68. langchain_core/runnables/fallbacks.py +20 -13
  69. langchain_core/runnables/graph.py +44 -37
  70. langchain_core/runnables/graph_ascii.py +40 -17
  71. langchain_core/runnables/graph_mermaid.py +27 -15
  72. langchain_core/runnables/graph_png.py +27 -31
  73. langchain_core/runnables/history.py +55 -58
  74. langchain_core/runnables/passthrough.py +44 -21
  75. langchain_core/runnables/retry.py +9 -5
  76. langchain_core/runnables/router.py +9 -8
  77. langchain_core/runnables/schema.py +2 -0
  78. langchain_core/runnables/utils.py +51 -89
  79. langchain_core/stores.py +13 -25
  80. langchain_core/sys_info.py +9 -8
  81. langchain_core/tools/base.py +30 -23
  82. langchain_core/tools/convert.py +24 -13
  83. langchain_core/tools/simple.py +35 -3
  84. langchain_core/tools/structured.py +25 -2
  85. langchain_core/tracers/base.py +2 -2
  86. langchain_core/tracers/context.py +5 -1
  87. langchain_core/tracers/core.py +109 -39
  88. langchain_core/tracers/evaluation.py +22 -26
  89. langchain_core/tracers/event_stream.py +40 -27
  90. langchain_core/tracers/langchain.py +12 -3
  91. langchain_core/tracers/langchain_v1.py +10 -2
  92. langchain_core/tracers/log_stream.py +56 -17
  93. langchain_core/tracers/root_listeners.py +4 -20
  94. langchain_core/tracers/run_collector.py +6 -16
  95. langchain_core/tracers/schemas.py +5 -1
  96. langchain_core/utils/aiter.py +14 -6
  97. langchain_core/utils/env.py +3 -0
  98. langchain_core/utils/function_calling.py +37 -20
  99. langchain_core/utils/interactive_env.py +6 -2
  100. langchain_core/utils/iter.py +11 -3
  101. langchain_core/utils/json.py +5 -2
  102. langchain_core/utils/json_schema.py +15 -5
  103. langchain_core/utils/loading.py +5 -1
  104. langchain_core/utils/mustache.py +24 -15
  105. langchain_core/utils/pydantic.py +32 -4
  106. langchain_core/utils/utils.py +24 -8
  107. langchain_core/vectorstores/base.py +7 -20
  108. langchain_core/vectorstores/in_memory.py +18 -12
  109. langchain_core/vectorstores/utils.py +18 -12
  110. langchain_core/version.py +1 -1
  111. langchain_core-0.3.76.dist-info/METADATA +77 -0
  112. langchain_core-0.3.76.dist-info/RECORD +174 -0
  113. langchain_core-0.3.75.dist-info/METADATA +0 -106
  114. langchain_core-0.3.75.dist-info/RECORD +0 -174
  115. {langchain_core-0.3.75.dist-info → langchain_core-0.3.76.dist-info}/WHEEL +0 -0
  116. {langchain_core-0.3.75.dist-info → langchain_core-0.3.76.dist-info}/entry_points.txt +0 -0
@@ -41,6 +41,7 @@ from pydantic import BaseModel, ConfigDict, Field, RootModel
41
41
  from typing_extensions import Literal, get_args, override
42
42
 
43
43
  from langchain_core._api import beta_decorator
44
+ from langchain_core.callbacks.manager import AsyncCallbackManager, CallbackManager
44
45
  from langchain_core.load.serializable import (
45
46
  Serializable,
46
47
  SerializedConstructor,
@@ -60,7 +61,6 @@ from langchain_core.runnables.config import (
60
61
  run_in_executor,
61
62
  set_config_context,
62
63
  )
63
- from langchain_core.runnables.graph import Graph
64
64
  from langchain_core.runnables.utils import (
65
65
  AddableDict,
66
66
  AnyConfigurableField,
@@ -81,6 +81,19 @@ from langchain_core.runnables.utils import (
81
81
  is_async_callable,
82
82
  is_async_generator,
83
83
  )
84
+ from langchain_core.tracers._streaming import _StreamingCallbackHandler
85
+ from langchain_core.tracers.event_stream import (
86
+ _astream_events_implementation_v1,
87
+ _astream_events_implementation_v2,
88
+ )
89
+ from langchain_core.tracers.log_stream import (
90
+ LogStreamCallbackHandler,
91
+ _astream_log_implementation,
92
+ )
93
+ from langchain_core.tracers.root_listeners import (
94
+ AsyncRootListenersTracer,
95
+ RootListenersTracer,
96
+ )
84
97
  from langchain_core.utils.aiter import aclosing, atee, py_anext
85
98
  from langchain_core.utils.iter import safetee
86
99
  from langchain_core.utils.pydantic import create_model_v2
@@ -94,6 +107,7 @@ if TYPE_CHECKING:
94
107
  from langchain_core.runnables.fallbacks import (
95
108
  RunnableWithFallbacks as RunnableWithFallbacksT,
96
109
  )
110
+ from langchain_core.runnables.graph import Graph
97
111
  from langchain_core.runnables.retry import ExponentialJitterParams
98
112
  from langchain_core.runnables.schema import StreamEvent
99
113
  from langchain_core.tools import BaseTool
@@ -114,12 +128,13 @@ class Runnable(ABC, Generic[Input, Output]):
114
128
  - **``invoke``/``ainvoke``**: Transforms a single input into an output.
115
129
  - **``batch``/``abatch``**: Efficiently transforms multiple inputs into outputs.
116
130
  - **``stream``/``astream``**: Streams output from a single input as it's produced.
117
- - **``astream_log``**: Streams output and selected intermediate results from an input.
131
+ - **``astream_log``**: Streams output and selected intermediate results from an
132
+ input.
118
133
 
119
134
  Built-in optimizations:
120
135
 
121
- - **Batch**: By default, batch runs invoke() in parallel using a thread pool executor.
122
- Override to optimize batching.
136
+ - **Batch**: By default, batch runs invoke() in parallel using a thread pool
137
+ executor. Override to optimize batching.
123
138
 
124
139
  - **Async**: Methods with ``'a'`` suffix are asynchronous. By default, they execute
125
140
  the sync counterpart using asyncio's thread pool.
@@ -129,14 +144,16 @@ class Runnable(ABC, Generic[Input, Output]):
129
144
  execution, add tags and metadata for tracing and debugging etc.
130
145
 
131
146
  Runnables expose schematic information about their input, output and config via
132
- the ``input_schema`` property, the ``output_schema`` property and ``config_schema`` method.
147
+ the ``input_schema`` property, the ``output_schema`` property and ``config_schema``
148
+ method.
133
149
 
134
150
  LCEL and Composition
135
151
  ====================
136
152
 
137
- The LangChain Expression Language (LCEL) is a declarative way to compose ``Runnables``
138
- into chains. Any chain constructed this way will automatically have sync, async,
139
- batch, and streaming support.
153
+ The LangChain Expression Language (LCEL) is a declarative way to compose
154
+ ``Runnables``into chains.
155
+ Any chain constructed this way will automatically have sync, async, batch, and
156
+ streaming support.
140
157
 
141
158
  The main composition primitives are ``RunnableSequence`` and ``RunnableParallel``.
142
159
 
@@ -157,25 +174,27 @@ class Runnable(ABC, Generic[Input, Output]):
157
174
 
158
175
  # A RunnableSequence constructed using the `|` operator
159
176
  sequence = RunnableLambda(lambda x: x + 1) | RunnableLambda(lambda x: x * 2)
160
- sequence.invoke(1) # 4
161
- sequence.batch([1, 2, 3]) # [4, 6, 8]
177
+ sequence.invoke(1) # 4
178
+ sequence.batch([1, 2, 3]) # [4, 6, 8]
162
179
 
163
180
 
164
181
  # A sequence that contains a RunnableParallel constructed using a dict literal
165
182
  sequence = RunnableLambda(lambda x: x + 1) | {
166
- 'mul_2': RunnableLambda(lambda x: x * 2),
167
- 'mul_5': RunnableLambda(lambda x: x * 5)
183
+ "mul_2": RunnableLambda(lambda x: x * 2),
184
+ "mul_5": RunnableLambda(lambda x: x * 5),
168
185
  }
169
- sequence.invoke(1) # {'mul_2': 4, 'mul_5': 10}
186
+ sequence.invoke(1) # {'mul_2': 4, 'mul_5': 10}
170
187
 
171
188
  Standard Methods
172
189
  ================
173
190
 
174
- All ``Runnable``s expose additional methods that can be used to modify their behavior
175
- (e.g., add a retry policy, add lifecycle listeners, make them configurable, etc.).
191
+ All ``Runnable``s expose additional methods that can be used to modify their
192
+ behavior (e.g., add a retry policy, add lifecycle listeners, make them
193
+ configurable, etc.).
176
194
 
177
- These methods will work on any ``Runnable``, including ``Runnable`` chains constructed
178
- by composing other ``Runnable``s. See the individual methods for details.
195
+ These methods will work on any ``Runnable``, including ``Runnable`` chains
196
+ constructed by composing other ``Runnable``s.
197
+ See the individual methods for details.
179
198
 
180
199
  For example,
181
200
 
@@ -219,6 +238,7 @@ class Runnable(ABC, Generic[Input, Output]):
219
238
  .. code-block:: python
220
239
 
221
240
  from langchain_core.globals import set_debug
241
+
222
242
  set_debug(True)
223
243
 
224
244
  Alternatively, you can pass existing or custom callbacks to any given chain:
@@ -227,14 +247,11 @@ class Runnable(ABC, Generic[Input, Output]):
227
247
 
228
248
  from langchain_core.tracers import ConsoleCallbackHandler
229
249
 
230
- chain.invoke(
231
- ...,
232
- config={'callbacks': [ConsoleCallbackHandler()]}
233
- )
250
+ chain.invoke(..., config={"callbacks": [ConsoleCallbackHandler()]})
234
251
 
235
252
  For a UI (and much more) checkout `LangSmith <https://docs.smith.langchain.com/>`__.
236
253
 
237
- """ # noqa: E501
254
+ """
238
255
 
239
256
  name: Optional[str]
240
257
  """The name of the ``Runnable``. Used for debugging and tracing."""
@@ -242,7 +259,15 @@ class Runnable(ABC, Generic[Input, Output]):
242
259
  def get_name(
243
260
  self, suffix: Optional[str] = None, *, name: Optional[str] = None
244
261
  ) -> str:
245
- """Get the name of the ``Runnable``."""
262
+ """Get the name of the ``Runnable``.
263
+
264
+ Args:
265
+ suffix: An optional suffix to append to the name.
266
+ name: An optional name to use instead of the ``Runnable``'s name.
267
+
268
+ Returns:
269
+ The name of the ``Runnable``.
270
+ """
246
271
  if name:
247
272
  name_ = name
248
273
  elif hasattr(self, "name") and self.name:
@@ -273,7 +298,13 @@ class Runnable(ABC, Generic[Input, Output]):
273
298
 
274
299
  @property
275
300
  def InputType(self) -> type[Input]: # noqa: N802
276
- """The type of input this ``Runnable`` accepts specified as a type annotation.""" # noqa: E501
301
+ """Input type.
302
+
303
+ The type of input this ``Runnable`` accepts specified as a type annotation.
304
+
305
+ Raises:
306
+ TypeError: If the input type cannot be inferred.
307
+ """
277
308
  # First loop through all parent classes and if any of them is
278
309
  # a pydantic model, we will pick up the generic parameterization
279
310
  # from that model via the __pydantic_generic_metadata__ attribute.
@@ -299,7 +330,13 @@ class Runnable(ABC, Generic[Input, Output]):
299
330
 
300
331
  @property
301
332
  def OutputType(self) -> type[Output]: # noqa: N802
302
- """The type of output this ``Runnable`` produces specified as a type annotation.""" # noqa: E501
333
+ """Output Type.
334
+
335
+ The type of output this ``Runnable`` produces specified as a type annotation.
336
+
337
+ Raises:
338
+ TypeError: If the output type cannot be inferred.
339
+ """
303
340
  # First loop through bases -- this will help generic
304
341
  # any pydantic models.
305
342
  for base in self.__class__.mro():
@@ -381,9 +418,11 @@ class Runnable(ABC, Generic[Input, Output]):
381
418
 
382
419
  from langchain_core.runnables import RunnableLambda
383
420
 
421
+
384
422
  def add_one(x: int) -> int:
385
423
  return x + 1
386
424
 
425
+
387
426
  runnable = RunnableLambda(add_one)
388
427
 
389
428
  print(runnable.get_input_jsonschema())
@@ -395,7 +434,10 @@ class Runnable(ABC, Generic[Input, Output]):
395
434
 
396
435
  @property
397
436
  def output_schema(self) -> type[BaseModel]:
398
- """The type of output this ``Runnable`` produces specified as a pydantic model.""" # noqa: E501
437
+ """Output schema.
438
+
439
+ The type of output this ``Runnable`` produces specified as a pydantic model.
440
+ """
399
441
  return self.get_output_schema()
400
442
 
401
443
  def get_output_schema(
@@ -455,9 +497,11 @@ class Runnable(ABC, Generic[Input, Output]):
455
497
 
456
498
  from langchain_core.runnables import RunnableLambda
457
499
 
500
+
458
501
  def add_one(x: int) -> int:
459
502
  return x + 1
460
503
 
504
+
461
505
  runnable = RunnableLambda(add_one)
462
506
 
463
507
  print(runnable.get_output_jsonschema())
@@ -535,6 +579,9 @@ class Runnable(ABC, Generic[Input, Output]):
535
579
 
536
580
  def get_graph(self, config: Optional[RunnableConfig] = None) -> Graph:
537
581
  """Return a graph representation of this ``Runnable``."""
582
+ # Import locally to prevent circular import
583
+ from langchain_core.runnables.graph import Graph # noqa: PLC0415
584
+
538
585
  graph = Graph()
539
586
  try:
540
587
  input_node = graph.add_node(self.get_input_schema(config))
@@ -555,7 +602,8 @@ class Runnable(ABC, Generic[Input, Output]):
555
602
  self, config: Optional[RunnableConfig] = None
556
603
  ) -> list[BasePromptTemplate]:
557
604
  """Return a list of prompts used by this ``Runnable``."""
558
- from langchain_core.prompts.base import BasePromptTemplate
605
+ # Import locally to prevent circular import
606
+ from langchain_core.prompts.base import BasePromptTemplate # noqa: PLC0415
559
607
 
560
608
  return [
561
609
  node.data
@@ -573,7 +621,17 @@ class Runnable(ABC, Generic[Input, Output]):
573
621
  Mapping[str, Union[Runnable[Any, Other], Callable[[Any], Other], Any]],
574
622
  ],
575
623
  ) -> RunnableSerializable[Input, Other]:
576
- """Compose this ``Runnable`` with another object to create a ``RunnableSequence``.""" # noqa: E501
624
+ """Runnable "or" operator.
625
+
626
+ Compose this ``Runnable`` with another object to create a
627
+ ``RunnableSequence``.
628
+
629
+ Args:
630
+ other: Another ``Runnable`` or a ``Runnable``-like object.
631
+
632
+ Returns:
633
+ A new ``Runnable``.
634
+ """
577
635
  return RunnableSequence(self, coerce_to_runnable(other))
578
636
 
579
637
  def __ror__(
@@ -586,7 +644,17 @@ class Runnable(ABC, Generic[Input, Output]):
586
644
  Mapping[str, Union[Runnable[Other, Any], Callable[[Other], Any], Any]],
587
645
  ],
588
646
  ) -> RunnableSerializable[Other, Output]:
589
- """Compose this ``Runnable`` with another object to create a ``RunnableSequence``.""" # noqa: E501
647
+ """Runnable "reverse-or" operator.
648
+
649
+ Compose this ``Runnable`` with another object to create a
650
+ ``RunnableSequence``.
651
+
652
+ Args:
653
+ other: Another ``Runnable`` or a ``Runnable``-like object.
654
+
655
+ Returns:
656
+ A new ``Runnable``.
657
+ """
590
658
  return RunnableSequence(coerce_to_runnable(other), self)
591
659
 
592
660
  def pipe(
@@ -594,21 +662,28 @@ class Runnable(ABC, Generic[Input, Output]):
594
662
  *others: Union[Runnable[Any, Other], Callable[[Any], Other]],
595
663
  name: Optional[str] = None,
596
664
  ) -> RunnableSerializable[Input, Other]:
597
- """Compose this ``Runnable`` with ``Runnable``-like objects to make a ``RunnableSequence``.
665
+ """Pipe runnables.
666
+
667
+ Compose this ``Runnable`` with ``Runnable``-like objects to make a
668
+ ``RunnableSequence``.
598
669
 
599
670
  Equivalent to ``RunnableSequence(self, *others)`` or ``self | others[0] | ...``
600
671
 
601
672
  Example:
673
+
602
674
  .. code-block:: python
603
675
 
604
676
  from langchain_core.runnables import RunnableLambda
605
677
 
678
+
606
679
  def add_one(x: int) -> int:
607
680
  return x + 1
608
681
 
682
+
609
683
  def mul_two(x: int) -> int:
610
684
  return x * 2
611
685
 
686
+
612
687
  runnable_1 = RunnableLambda(add_one)
613
688
  runnable_2 = RunnableLambda(mul_two)
614
689
  sequence = runnable_1.pipe(runnable_2)
@@ -623,13 +698,20 @@ class Runnable(ABC, Generic[Input, Output]):
623
698
  await sequence.abatch([1, 2, 3])
624
699
  # -> [4, 6, 8]
625
700
 
626
- """ # noqa: E501
701
+ Args:
702
+ *others: Other ``Runnable`` or ``Runnable``-like objects to compose
703
+ name: An optional name for the resulting ``RunnableSequence``.
704
+
705
+ Returns:
706
+ A new ``Runnable``.
707
+ """
627
708
  return RunnableSequence(self, *others, name=name)
628
709
 
629
710
  def pick(self, keys: Union[str, list[str]]) -> RunnableSerializable[Any, Any]:
630
711
  """Pick keys from the output dict of this ``Runnable``.
631
712
 
632
713
  Pick single key:
714
+
633
715
  .. code-block:: python
634
716
 
635
717
  import json
@@ -648,6 +730,7 @@ class Runnable(ABC, Generic[Input, Output]):
648
730
  # -> [1, 2, 3]
649
731
 
650
732
  Pick list of keys:
733
+
651
734
  .. code-block:: python
652
735
 
653
736
  from typing import Any
@@ -658,13 +741,14 @@ class Runnable(ABC, Generic[Input, Output]):
658
741
 
659
742
  as_str = RunnableLambda(str)
660
743
  as_json = RunnableLambda(json.loads)
744
+
745
+
661
746
  def as_bytes(x: Any) -> bytes:
662
747
  return bytes(x, "utf-8")
663
748
 
749
+
664
750
  chain = RunnableMap(
665
- str=as_str,
666
- json=as_json,
667
- bytes=RunnableLambda(as_bytes)
751
+ str=as_str, json=as_json, bytes=RunnableLambda(as_bytes)
668
752
  )
669
753
 
670
754
  chain.invoke("[1, 2, 3]")
@@ -674,8 +758,15 @@ class Runnable(ABC, Generic[Input, Output]):
674
758
  json_and_bytes_chain.invoke("[1, 2, 3]")
675
759
  # -> {"json": [1, 2, 3], "bytes": b"[1, 2, 3]"}
676
760
 
761
+ Args:
762
+ keys: A key or list of keys to pick from the output dict.
763
+
764
+ Returns:
765
+ a new ``Runnable``.
766
+
677
767
  """
678
- from langchain_core.runnables.passthrough import RunnablePick
768
+ # Import locally to prevent circular import
769
+ from langchain_core.runnables.passthrough import RunnablePick # noqa: PLC0415
679
770
 
680
771
  return self | RunnablePick(keys)
681
772
 
@@ -692,8 +783,6 @@ class Runnable(ABC, Generic[Input, Output]):
692
783
  ) -> RunnableSerializable[Any, Any]:
693
784
  """Assigns new fields to the dict output of this ``Runnable``.
694
785
 
695
- Returns a new ``Runnable``.
696
-
697
786
  .. code-block:: python
698
787
 
699
788
  from langchain_community.llms.fake import FakeStreamingListLLM
@@ -720,8 +809,16 @@ class Runnable(ABC, Generic[Input, Output]):
720
809
  {'str': {'title': 'Str',
721
810
  'type': 'string'}, 'hello': {'title': 'Hello', 'type': 'string'}}}
722
811
 
812
+ Args:
813
+ **kwargs: A mapping of keys to ``Runnable`` or ``Runnable``-like objects
814
+ that will be invoked with the entire output dict of this ``Runnable``.
815
+
816
+ Returns:
817
+ A new ``Runnable``.
818
+
723
819
  """
724
- from langchain_core.runnables.passthrough import RunnableAssign
820
+ # Import locally to prevent circular import
821
+ from langchain_core.runnables.passthrough import RunnableAssign # noqa: PLC0415
725
822
 
726
823
  return self | RunnableAssign(RunnableParallel[dict[str, Any]](kwargs))
727
824
 
@@ -755,12 +852,18 @@ class Runnable(ABC, Generic[Input, Output]):
755
852
  config: Optional[RunnableConfig] = None,
756
853
  **kwargs: Any,
757
854
  ) -> Output:
758
- """Default implementation of ``ainvoke``, calls ``invoke`` from a thread.
855
+ """Transform a single input into an output.
759
856
 
760
- The default implementation allows usage of async code even if
761
- the ``Runnable`` did not implement a native async version of ``invoke``.
857
+ Args:
858
+ input: The input to the ``Runnable``.
859
+ config: A config to use when invoking the ``Runnable``.
860
+ The config supports standard keys like ``'tags'``, ``'metadata'`` for
861
+ tracing purposes, ``'max_concurrency'`` for controlling how much work to
862
+ do in parallel, and other keys. Please refer to the ``RunnableConfig``
863
+ for more details. Defaults to None.
762
864
 
763
- Subclasses should override this method if they can run asynchronously.
865
+ Returns:
866
+ The output of the ``Runnable``.
764
867
 
765
868
  """
766
869
  return await run_in_executor(config, self.invoke, input, config, **kwargs)
@@ -780,6 +883,20 @@ class Runnable(ABC, Generic[Input, Output]):
780
883
  Subclasses should override this method if they can batch more efficiently;
781
884
  e.g., if the underlying ``Runnable`` uses an API which supports a batch mode.
782
885
 
886
+ Args:
887
+ inputs: A list of inputs to the ``Runnable``.
888
+ config: A config to use when invoking the ``Runnable``.
889
+ The config supports standard keys like ``'tags'``, ``'metadata'`` for
890
+ tracing purposes, ``'max_concurrency'`` for controlling how much work
891
+ to do in parallel, and other keys. Please refer to the
892
+ ``RunnableConfig`` for more details. Defaults to None.
893
+ return_exceptions: Whether to return exceptions instead of raising them.
894
+ Defaults to False.
895
+ **kwargs: Additional keyword arguments to pass to the ``Runnable``.
896
+
897
+ Returns:
898
+ A list of outputs from the ``Runnable``.
899
+
783
900
  """
784
901
  if not inputs:
785
902
  return []
@@ -834,6 +951,20 @@ class Runnable(ABC, Generic[Input, Output]):
834
951
 
835
952
  Yields results as they complete.
836
953
 
954
+ Args:
955
+ inputs: A list of inputs to the ``Runnable``.
956
+ config: A config to use when invoking the ``Runnable``.
957
+ The config supports standard keys like ``'tags'``, ``'metadata'`` for
958
+ tracing purposes, ``'max_concurrency'`` for controlling how much work to
959
+ do in parallel, and other keys. Please refer to the ``RunnableConfig``
960
+ for more details. Defaults to None.
961
+ return_exceptions: Whether to return exceptions instead of raising them.
962
+ Defaults to False.
963
+ **kwargs: Additional keyword arguments to pass to the ``Runnable``.
964
+
965
+ Yields:
966
+ Tuples of the index of the input and the output from the ``Runnable``.
967
+
837
968
  """
838
969
  if not inputs:
839
970
  return
@@ -898,7 +1029,7 @@ class Runnable(ABC, Generic[Input, Output]):
898
1029
  for more details. Defaults to None.
899
1030
  return_exceptions: Whether to return exceptions instead of raising them.
900
1031
  Defaults to False.
901
- kwargs: Additional keyword arguments to pass to the ``Runnable``.
1032
+ **kwargs: Additional keyword arguments to pass to the ``Runnable``.
902
1033
 
903
1034
  Returns:
904
1035
  A list of outputs from the ``Runnable``.
@@ -1120,11 +1251,6 @@ class Runnable(ABC, Generic[Input, Output]):
1120
1251
  A ``RunLogPatch`` or ``RunLog`` object.
1121
1252
 
1122
1253
  """
1123
- from langchain_core.tracers.log_stream import (
1124
- LogStreamCallbackHandler,
1125
- _astream_log_implementation,
1126
- )
1127
-
1128
1254
  stream = LogStreamCallbackHandler(
1129
1255
  auto_close=False,
1130
1256
  include_names=include_names,
@@ -1254,6 +1380,7 @@ class Runnable(ABC, Generic[Input, Output]):
1254
1380
  '''Format the docs.'''
1255
1381
  return ", ".join([doc.page_content for doc in docs])
1256
1382
 
1383
+
1257
1384
  format_docs = RunnableLambda(format_docs)
1258
1385
 
1259
1386
  ``some_tool``:
@@ -1280,9 +1407,11 @@ class Runnable(ABC, Generic[Input, Output]):
1280
1407
 
1281
1408
  from langchain_core.runnables import RunnableLambda
1282
1409
 
1410
+
1283
1411
  async def reverse(s: str) -> str:
1284
1412
  return s[::-1]
1285
1413
 
1414
+
1286
1415
  chain = RunnableLambda(func=reverse)
1287
1416
 
1288
1417
  events = [
@@ -1375,11 +1504,6 @@ class Runnable(ABC, Generic[Input, Output]):
1375
1504
  NotImplementedError: If the version is not ``'v1'`` or ``'v2'``.
1376
1505
 
1377
1506
  """ # noqa: E501
1378
- from langchain_core.tracers.event_stream import (
1379
- _astream_events_implementation_v1,
1380
- _astream_events_implementation_v2,
1381
- )
1382
-
1383
1507
  if version == "v2":
1384
1508
  event_stream = _astream_events_implementation_v2(
1385
1509
  self,
@@ -1422,7 +1546,9 @@ class Runnable(ABC, Generic[Input, Output]):
1422
1546
  config: Optional[RunnableConfig] = None,
1423
1547
  **kwargs: Optional[Any],
1424
1548
  ) -> Iterator[Output]:
1425
- """Default implementation of transform, which buffers input and calls ``astream``.
1549
+ """Transform inputs to outputs.
1550
+
1551
+ Default implementation of transform, which buffers input and calls ``astream``.
1426
1552
 
1427
1553
  Subclasses should override this method if they can start producing output while
1428
1554
  input is still being generated.
@@ -1435,7 +1561,7 @@ class Runnable(ABC, Generic[Input, Output]):
1435
1561
  Yields:
1436
1562
  The output of the ``Runnable``.
1437
1563
 
1438
- """ # noqa: E501
1564
+ """
1439
1565
  final: Input
1440
1566
  got_first_val = False
1441
1567
 
@@ -1465,7 +1591,9 @@ class Runnable(ABC, Generic[Input, Output]):
1465
1591
  config: Optional[RunnableConfig] = None,
1466
1592
  **kwargs: Optional[Any],
1467
1593
  ) -> AsyncIterator[Output]:
1468
- """Default implementation of atransform, which buffers input and calls ``astream``.
1594
+ """Transform inputs to outputs.
1595
+
1596
+ Default implementation of atransform, which buffers input and calls ``astream``.
1469
1597
 
1470
1598
  Subclasses should override this method if they can start producing output while
1471
1599
  input is still being generated.
@@ -1478,7 +1606,7 @@ class Runnable(ABC, Generic[Input, Output]):
1478
1606
  Yields:
1479
1607
  The output of the ``Runnable``.
1480
1608
 
1481
- """ # noqa: E501
1609
+ """
1482
1610
  final: Input
1483
1611
  got_first_val = False
1484
1612
 
@@ -1522,22 +1650,16 @@ class Runnable(ABC, Generic[Input, Output]):
1522
1650
  from langchain_ollama import ChatOllama
1523
1651
  from langchain_core.output_parsers import StrOutputParser
1524
1652
 
1525
- llm = ChatOllama(model='llama2')
1653
+ llm = ChatOllama(model="llama2")
1526
1654
 
1527
1655
  # Without bind.
1528
- chain = (
1529
- llm
1530
- | StrOutputParser()
1531
- )
1656
+ chain = llm | StrOutputParser()
1532
1657
 
1533
1658
  chain.invoke("Repeat quoted words exactly: 'One two three four five.'")
1534
1659
  # Output is 'One two three four five.'
1535
1660
 
1536
1661
  # With bind.
1537
- chain = (
1538
- llm.bind(stop=["three"])
1539
- | StrOutputParser()
1540
- )
1662
+ chain = llm.bind(stop=["three"]) | StrOutputParser()
1541
1663
 
1542
1664
  chain.invoke("Repeat quoted words exactly: 'One two three four five.'")
1543
1665
  # Output is 'One two'
@@ -1609,24 +1731,25 @@ class Runnable(ABC, Generic[Input, Output]):
1609
1731
 
1610
1732
  import time
1611
1733
 
1612
- def test_runnable(time_to_sleep : int):
1734
+
1735
+ def test_runnable(time_to_sleep: int):
1613
1736
  time.sleep(time_to_sleep)
1614
1737
 
1738
+
1615
1739
  def fn_start(run_obj: Run):
1616
1740
  print("start_time:", run_obj.start_time)
1617
1741
 
1742
+
1618
1743
  def fn_end(run_obj: Run):
1619
1744
  print("end_time:", run_obj.end_time)
1620
1745
 
1746
+
1621
1747
  chain = RunnableLambda(test_runnable).with_listeners(
1622
- on_start=fn_start,
1623
- on_end=fn_end
1748
+ on_start=fn_start, on_end=fn_end
1624
1749
  )
1625
1750
  chain.invoke(2)
1626
1751
 
1627
1752
  """
1628
- from langchain_core.tracers.root_listeners import RootListenersTracer
1629
-
1630
1753
  return RunnableBinding(
1631
1754
  bound=self,
1632
1755
  config_factories=[
@@ -1650,7 +1773,9 @@ class Runnable(ABC, Generic[Input, Output]):
1650
1773
  on_end: Optional[AsyncListener] = None,
1651
1774
  on_error: Optional[AsyncListener] = None,
1652
1775
  ) -> Runnable[Input, Output]:
1653
- """Bind async lifecycle listeners to a ``Runnable``, returning a new ``Runnable``.
1776
+ """Bind async lifecycle listeners to a ``Runnable``.
1777
+
1778
+ Returns a new ``Runnable``.
1654
1779
 
1655
1780
  The Run object contains information about the run, including its ``id``,
1656
1781
  ``type``, ``input``, ``output``, ``error``, ``start_time``, ``end_time``, and
@@ -1716,9 +1841,7 @@ class Runnable(ABC, Generic[Input, Output]):
1716
1841
  on end callback ends at 2025-03-01T07:05:29.883893+00:00
1717
1842
  on end callback ends at 2025-03-01T07:05:30.884831+00:00
1718
1843
 
1719
- """ # noqa: E501
1720
- from langchain_core.tracers.root_listeners import AsyncRootListenersTracer
1721
-
1844
+ """
1722
1845
  return RunnableBinding(
1723
1846
  bound=self,
1724
1847
  config_factories=[
@@ -1796,7 +1919,7 @@ class Runnable(ABC, Generic[Input, Output]):
1796
1919
  if x == 1:
1797
1920
  raise ValueError("x is 1")
1798
1921
  else:
1799
- pass
1922
+ pass
1800
1923
 
1801
1924
 
1802
1925
  runnable = RunnableLambda(_lambda)
@@ -1808,10 +1931,11 @@ class Runnable(ABC, Generic[Input, Output]):
1808
1931
  except ValueError:
1809
1932
  pass
1810
1933
 
1811
- assert (count == 2)
1934
+ assert count == 2
1812
1935
 
1813
1936
  """
1814
- from langchain_core.runnables.retry import RunnableRetry
1937
+ # Import locally to prevent circular import
1938
+ from langchain_core.runnables.retry import RunnableRetry # noqa: PLC0415
1815
1939
 
1816
1940
  return RunnableRetry(
1817
1941
  bound=self,
@@ -1837,11 +1961,13 @@ class Runnable(ABC, Generic[Input, Output]):
1837
1961
 
1838
1962
  from langchain_core.runnables import RunnableLambda
1839
1963
 
1964
+
1840
1965
  def _lambda(x: int) -> int:
1841
1966
  return x + 1
1842
1967
 
1968
+
1843
1969
  runnable = RunnableLambda(_lambda)
1844
- print(runnable.map().invoke([1, 2, 3])) # [2, 3, 4]
1970
+ print(runnable.map().invoke([1, 2, 3])) # [2, 3, 4]
1845
1971
 
1846
1972
  """
1847
1973
  return RunnableEach(bound=self)
@@ -1859,13 +1985,15 @@ class Runnable(ABC, Generic[Input, Output]):
1859
1985
  in order, upon failures.
1860
1986
 
1861
1987
  Args:
1862
- fallbacks: A sequence of runnables to try if the original ``Runnable`` fails.
1988
+ fallbacks: A sequence of runnables to try if the original ``Runnable``
1989
+ fails.
1863
1990
  exceptions_to_handle: A tuple of exception types to handle.
1864
1991
  Defaults to ``(Exception,)``.
1865
1992
  exception_key: If string is specified then handled exceptions will be passed
1866
- to fallbacks as part of the input under the specified key. If None,
1867
- exceptions will not be passed to fallbacks. If used, the base ``Runnable``
1868
- and its fallbacks must accept a dictionary as input. Defaults to None.
1993
+ to fallbacks as part of the input under the specified key.
1994
+ If None, exceptions will not be passed to fallbacks.
1995
+ If used, the base ``Runnable`` and its fallbacks must accept a
1996
+ dictionary as input. Defaults to None.
1869
1997
 
1870
1998
  Returns:
1871
1999
  A new ``Runnable`` that will try the original ``Runnable``, and then each
@@ -1891,23 +2019,28 @@ class Runnable(ABC, Generic[Input, Output]):
1891
2019
 
1892
2020
  runnable = RunnableGenerator(_generate_immediate_error).with_fallbacks(
1893
2021
  [RunnableGenerator(_generate)]
1894
- )
1895
- print(''.join(runnable.stream({}))) #foo bar
2022
+ )
2023
+ print("".join(runnable.stream({}))) # foo bar
1896
2024
 
1897
2025
  Args:
1898
- fallbacks: A sequence of runnables to try if the original ``Runnable`` fails.
2026
+ fallbacks: A sequence of runnables to try if the original ``Runnable``
2027
+ fails.
1899
2028
  exceptions_to_handle: A tuple of exception types to handle.
1900
2029
  exception_key: If string is specified then handled exceptions will be passed
1901
- to fallbacks as part of the input under the specified key. If None,
1902
- exceptions will not be passed to fallbacks. If used, the base ``Runnable``
1903
- and its fallbacks must accept a dictionary as input.
2030
+ to fallbacks as part of the input under the specified key.
2031
+ If None, exceptions will not be passed to fallbacks.
2032
+ If used, the base ``Runnable`` and its fallbacks must accept a
2033
+ dictionary as input.
1904
2034
 
1905
2035
  Returns:
1906
2036
  A new ``Runnable`` that will try the original ``Runnable``, and then each
1907
2037
  fallback in order, upon failures.
1908
2038
 
1909
- """ # noqa: E501
1910
- from langchain_core.runnables.fallbacks import RunnableWithFallbacks
2039
+ """
2040
+ # Import locally to prevent circular import
2041
+ from langchain_core.runnables.fallbacks import ( # noqa: PLC0415
2042
+ RunnableWithFallbacks,
2043
+ )
1911
2044
 
1912
2045
  return RunnableWithFallbacks(
1913
2046
  runnable=self,
@@ -1931,11 +2064,14 @@ class Runnable(ABC, Generic[Input, Output]):
1931
2064
  serialized: Optional[dict[str, Any]] = None,
1932
2065
  **kwargs: Optional[Any],
1933
2066
  ) -> Output:
1934
- """Helper method to transform an ``Input`` value to an ``Output`` value, with callbacks.
2067
+ """Call with config.
2068
+
2069
+ Helper method to transform an ``Input`` value to an ``Output`` value,
2070
+ with callbacks.
1935
2071
 
1936
2072
  Use this method to implement ``invoke`` in subclasses.
1937
2073
 
1938
- """ # noqa: E501
2074
+ """
1939
2075
  config = ensure_config(config)
1940
2076
  callback_manager = get_callback_manager_for_config(config)
1941
2077
  run_manager = callback_manager.on_chain_start(
@@ -1982,10 +2118,13 @@ class Runnable(ABC, Generic[Input, Output]):
1982
2118
  serialized: Optional[dict[str, Any]] = None,
1983
2119
  **kwargs: Optional[Any],
1984
2120
  ) -> Output:
1985
- """Helper method to transform an ``Input`` value to an ``Output`` value, with callbacks.
2121
+ """Async call with config.
2122
+
2123
+ Helper method to transform an ``Input`` value to an ``Output`` value,
2124
+ with callbacks.
1986
2125
 
1987
2126
  Use this method to implement ``ainvoke`` in subclasses.
1988
- """ # noqa: E501
2127
+ """
1989
2128
  config = ensure_config(config)
1990
2129
  callback_manager = get_async_callback_manager_for_config(config)
1991
2130
  run_manager = await callback_manager.on_chain_start(
@@ -2187,9 +2326,6 @@ class Runnable(ABC, Generic[Input, Output]):
2187
2326
  Use this to implement ``stream`` or ``transform`` in ``Runnable`` subclasses.
2188
2327
 
2189
2328
  """
2190
- # Mixin that is used by both astream log and astream events implementation
2191
- from langchain_core.tracers._streaming import _StreamingCallbackHandler
2192
-
2193
2329
  # tee the input so we can iterate over it twice
2194
2330
  input_for_tracing, input_for_transform = tee(inputs, 2)
2195
2331
  # Start the input iterator to ensure the input Runnable starts before this one
@@ -2293,9 +2429,6 @@ class Runnable(ABC, Generic[Input, Output]):
2293
2429
  Use this to implement ``astream`` or ``atransform`` in ``Runnable`` subclasses.
2294
2430
 
2295
2431
  """
2296
- # Mixin that is used by both astream log and astream events implementation
2297
- from langchain_core.tracers._streaming import _StreamingCallbackHandler
2298
-
2299
2432
  # tee the input so we can iterate over it twice
2300
2433
  input_for_tracing, input_for_transform = atee(inputs, 2)
2301
2434
  # Start the input iterator to ensure the input Runnable starts before this one
@@ -2411,13 +2544,16 @@ class Runnable(ABC, Generic[Input, Output]):
2411
2544
  from typing_extensions import TypedDict
2412
2545
  from langchain_core.runnables import RunnableLambda
2413
2546
 
2547
+
2414
2548
  class Args(TypedDict):
2415
2549
  a: int
2416
2550
  b: list[int]
2417
2551
 
2552
+
2418
2553
  def f(x: Args) -> str:
2419
2554
  return str(x["a"] * max(x["b"]))
2420
2555
 
2556
+
2421
2557
  runnable = RunnableLambda(f)
2422
2558
  as_tool = runnable.as_tool()
2423
2559
  as_tool.invoke({"a": 3, "b": [1, 2]})
@@ -2450,9 +2586,11 @@ class Runnable(ABC, Generic[Input, Output]):
2450
2586
  from typing import Any
2451
2587
  from langchain_core.runnables import RunnableLambda
2452
2588
 
2589
+
2453
2590
  def f(x: dict[str, Any]) -> str:
2454
2591
  return str(x["a"] * max(x["b"]))
2455
2592
 
2593
+
2456
2594
  runnable = RunnableLambda(f)
2457
2595
  as_tool = runnable.as_tool(arg_types={"a": int, "b": list[int]})
2458
2596
  as_tool.invoke({"a": 3, "b": [1, 2]})
@@ -2463,12 +2601,15 @@ class Runnable(ABC, Generic[Input, Output]):
2463
2601
 
2464
2602
  from langchain_core.runnables import RunnableLambda
2465
2603
 
2604
+
2466
2605
  def f(x: str) -> str:
2467
2606
  return x + "a"
2468
2607
 
2608
+
2469
2609
  def g(x: str) -> str:
2470
2610
  return x + "z"
2471
2611
 
2612
+
2472
2613
  runnable = RunnableLambda(f) | g
2473
2614
  as_tool = runnable.as_tool()
2474
2615
  as_tool.invoke("b")
@@ -2477,7 +2618,7 @@ class Runnable(ABC, Generic[Input, Output]):
2477
2618
 
2478
2619
  """
2479
2620
  # Avoid circular import
2480
- from langchain_core.tools import convert_runnable_to_tool
2621
+ from langchain_core.tools import convert_runnable_to_tool # noqa: PLC0415
2481
2622
 
2482
2623
  return convert_runnable_to_tool(
2483
2624
  self,
@@ -2520,6 +2661,9 @@ class RunnableSerializable(Serializable, Runnable[Input, Output]):
2520
2661
  Args:
2521
2662
  **kwargs: A dictionary of ``ConfigurableField`` instances to configure.
2522
2663
 
2664
+ Raises:
2665
+ ValueError: If a configuration key is not found in the ``Runnable``.
2666
+
2523
2667
  Returns:
2524
2668
  A new ``Runnable`` with the fields configured.
2525
2669
 
@@ -2538,18 +2682,22 @@ class RunnableSerializable(Serializable, Runnable[Input, Output]):
2538
2682
 
2539
2683
  # max_tokens = 20
2540
2684
  print(
2541
- "max_tokens_20: ",
2542
- model.invoke("tell me something about chess").content
2685
+ "max_tokens_20: ", model.invoke("tell me something about chess").content
2543
2686
  )
2544
2687
 
2545
2688
  # max_tokens = 200
2546
- print("max_tokens_200: ", model.with_config(
2547
- configurable={"output_token_number": 200}
2548
- ).invoke("tell me something about chess").content
2689
+ print(
2690
+ "max_tokens_200: ",
2691
+ model.with_config(configurable={"output_token_number": 200})
2692
+ .invoke("tell me something about chess")
2693
+ .content,
2549
2694
  )
2550
2695
 
2551
2696
  """
2552
- from langchain_core.runnables.configurable import RunnableConfigurableFields
2697
+ # Import locally to prevent circular import
2698
+ from langchain_core.runnables.configurable import ( # noqa: PLC0415
2699
+ RunnableConfigurableFields,
2700
+ )
2553
2701
 
2554
2702
  model_fields = type(self).model_fields
2555
2703
  for key in kwargs:
@@ -2596,7 +2744,7 @@ class RunnableSerializable(Serializable, Runnable[Input, Output]):
2596
2744
  ).configurable_alternatives(
2597
2745
  ConfigurableField(id="llm"),
2598
2746
  default_key="anthropic",
2599
- openai=ChatOpenAI()
2747
+ openai=ChatOpenAI(),
2600
2748
  )
2601
2749
 
2602
2750
  # uses the default model ChatAnthropic
@@ -2604,13 +2752,14 @@ class RunnableSerializable(Serializable, Runnable[Input, Output]):
2604
2752
 
2605
2753
  # uses ChatOpenAI
2606
2754
  print(
2607
- model.with_config(
2608
- configurable={"llm": "openai"}
2609
- ).invoke("which organization created you?").content
2755
+ model.with_config(configurable={"llm": "openai"})
2756
+ .invoke("which organization created you?")
2757
+ .content
2610
2758
  )
2611
2759
 
2612
2760
  """
2613
- from langchain_core.runnables.configurable import (
2761
+ # Import locally to prevent circular import
2762
+ from langchain_core.runnables.configurable import ( # noqa: PLC0415
2614
2763
  RunnableConfigurableAlternatives,
2615
2764
  )
2616
2765
 
@@ -2626,7 +2775,11 @@ class RunnableSerializable(Serializable, Runnable[Input, Output]):
2626
2775
  def _seq_input_schema(
2627
2776
  steps: list[Runnable[Any, Any]], config: Optional[RunnableConfig]
2628
2777
  ) -> type[BaseModel]:
2629
- from langchain_core.runnables.passthrough import RunnableAssign, RunnablePick
2778
+ # Import locally to prevent circular import
2779
+ from langchain_core.runnables.passthrough import ( # noqa: PLC0415
2780
+ RunnableAssign,
2781
+ RunnablePick,
2782
+ )
2630
2783
 
2631
2784
  first = steps[0]
2632
2785
  if len(steps) == 1:
@@ -2652,7 +2805,11 @@ def _seq_input_schema(
2652
2805
  def _seq_output_schema(
2653
2806
  steps: list[Runnable[Any, Any]], config: Optional[RunnableConfig]
2654
2807
  ) -> type[BaseModel]:
2655
- from langchain_core.runnables.passthrough import RunnableAssign, RunnablePick
2808
+ # Import locally to prevent circular import
2809
+ from langchain_core.runnables.passthrough import ( # noqa: PLC0415
2810
+ RunnableAssign,
2811
+ RunnablePick,
2812
+ )
2656
2813
 
2657
2814
  last = steps[-1]
2658
2815
  if len(steps) == 1:
@@ -2739,12 +2896,15 @@ class RunnableSequence(RunnableSerializable[Input, Output]):
2739
2896
 
2740
2897
  from langchain_core.runnables import RunnableLambda
2741
2898
 
2899
+
2742
2900
  def add_one(x: int) -> int:
2743
2901
  return x + 1
2744
2902
 
2903
+
2745
2904
  def mul_two(x: int) -> int:
2746
2905
  return x * 2
2747
2906
 
2907
+
2748
2908
  runnable_1 = RunnableLambda(add_one)
2749
2909
  runnable_2 = RunnableLambda(mul_two)
2750
2910
  sequence = runnable_1 | runnable_2
@@ -2764,17 +2924,17 @@ class RunnableSequence(RunnableSerializable[Input, Output]):
2764
2924
  from langchain_openai import ChatOpenAI
2765
2925
 
2766
2926
  prompt = PromptTemplate.from_template(
2767
- 'In JSON format, give me a list of {topic} and their '
2768
- 'corresponding names in French, Spanish and in a '
2769
- 'Cat Language.'
2927
+ "In JSON format, give me a list of {topic} and their "
2928
+ "corresponding names in French, Spanish and in a "
2929
+ "Cat Language."
2770
2930
  )
2771
2931
 
2772
2932
  model = ChatOpenAI()
2773
2933
  chain = prompt | model | SimpleJsonOutputParser()
2774
2934
 
2775
- async for chunk in chain.astream({'topic': 'colors'}):
2776
- print('-') # noqa: T201
2777
- print(chunk, sep='', flush=True) # noqa: T201
2935
+ async for chunk in chain.astream({"topic": "colors"}):
2936
+ print("-") # noqa: T201
2937
+ print(chunk, sep="", flush=True) # noqa: T201
2778
2938
 
2779
2939
  """
2780
2940
 
@@ -2829,6 +2989,11 @@ class RunnableSequence(RunnableSerializable[Input, Output]):
2829
2989
  @classmethod
2830
2990
  @override
2831
2991
  def get_lc_namespace(cls) -> list[str]:
2992
+ """Get the namespace of the langchain object.
2993
+
2994
+ Returns:
2995
+ ``["langchain", "schema", "runnable"]``
2996
+ """
2832
2997
  return ["langchain", "schema", "runnable"]
2833
2998
 
2834
2999
  @property
@@ -2843,14 +3008,7 @@ class RunnableSequence(RunnableSerializable[Input, Output]):
2843
3008
  @classmethod
2844
3009
  @override
2845
3010
  def is_lc_serializable(cls) -> bool:
2846
- """Check if the object is serializable.
2847
-
2848
- Returns:
2849
- True if the object is serializable, False otherwise.
2850
-
2851
- Defaults to True.
2852
-
2853
- """
3011
+ """Return True as this class is serializable."""
2854
3012
  return True
2855
3013
 
2856
3014
  model_config = ConfigDict(
@@ -2908,7 +3066,8 @@ class RunnableSequence(RunnableSerializable[Input, Output]):
2908
3066
  The config specs of the ``Runnable``.
2909
3067
 
2910
3068
  """
2911
- from langchain_core.beta.runnables.context import (
3069
+ # Import locally to prevent circular import
3070
+ from langchain_core.beta.runnables.context import ( # noqa: PLC0415
2912
3071
  CONTEXT_CONFIG_PREFIX,
2913
3072
  _key_from_id,
2914
3073
  )
@@ -2966,7 +3125,8 @@ class RunnableSequence(RunnableSerializable[Input, Output]):
2966
3125
  ValueError: If a ``Runnable`` has no first or last node.
2967
3126
 
2968
3127
  """
2969
- from langchain_core.runnables.graph import Graph
3128
+ # Import locally to prevent circular import
3129
+ from langchain_core.runnables.graph import Graph # noqa: PLC0415
2970
3130
 
2971
3131
  graph = Graph()
2972
3132
  for step in self.steps:
@@ -3054,7 +3214,10 @@ class RunnableSequence(RunnableSerializable[Input, Output]):
3054
3214
  def invoke(
3055
3215
  self, input: Input, config: Optional[RunnableConfig] = None, **kwargs: Any
3056
3216
  ) -> Output:
3057
- from langchain_core.beta.runnables.context import config_with_context
3217
+ # Import locally to prevent circular import
3218
+ from langchain_core.beta.runnables.context import ( # noqa: PLC0415
3219
+ config_with_context,
3220
+ )
3058
3221
 
3059
3222
  # setup callbacks and context
3060
3223
  config = config_with_context(ensure_config(config), self.steps)
@@ -3095,7 +3258,10 @@ class RunnableSequence(RunnableSerializable[Input, Output]):
3095
3258
  config: Optional[RunnableConfig] = None,
3096
3259
  **kwargs: Optional[Any],
3097
3260
  ) -> Output:
3098
- from langchain_core.beta.runnables.context import aconfig_with_context
3261
+ # Import locally to prevent circular import
3262
+ from langchain_core.beta.runnables.context import ( # noqa: PLC0415
3263
+ aconfig_with_context,
3264
+ )
3099
3265
 
3100
3266
  # setup callbacks and context
3101
3267
  config = aconfig_with_context(ensure_config(config), self.steps)
@@ -3139,8 +3305,10 @@ class RunnableSequence(RunnableSerializable[Input, Output]):
3139
3305
  return_exceptions: bool = False,
3140
3306
  **kwargs: Optional[Any],
3141
3307
  ) -> list[Output]:
3142
- from langchain_core.beta.runnables.context import config_with_context
3143
- from langchain_core.callbacks.manager import CallbackManager
3308
+ # Import locally to prevent circular import
3309
+ from langchain_core.beta.runnables.context import ( # noqa: PLC0415
3310
+ config_with_context,
3311
+ )
3144
3312
 
3145
3313
  if not inputs:
3146
3314
  return []
@@ -3269,8 +3437,10 @@ class RunnableSequence(RunnableSerializable[Input, Output]):
3269
3437
  return_exceptions: bool = False,
3270
3438
  **kwargs: Optional[Any],
3271
3439
  ) -> list[Output]:
3272
- from langchain_core.beta.runnables.context import aconfig_with_context
3273
- from langchain_core.callbacks.manager import AsyncCallbackManager
3440
+ # Import locally to prevent circular import
3441
+ from langchain_core.beta.runnables.context import ( # noqa: PLC0415
3442
+ aconfig_with_context,
3443
+ )
3274
3444
 
3275
3445
  if not inputs:
3276
3446
  return []
@@ -3400,7 +3570,10 @@ class RunnableSequence(RunnableSerializable[Input, Output]):
3400
3570
  config: RunnableConfig,
3401
3571
  **kwargs: Any,
3402
3572
  ) -> Iterator[Output]:
3403
- from langchain_core.beta.runnables.context import config_with_context
3573
+ # Import locally to prevent circular import
3574
+ from langchain_core.beta.runnables.context import ( # noqa: PLC0415
3575
+ config_with_context,
3576
+ )
3404
3577
 
3405
3578
  steps = [self.first, *self.middle, self.last]
3406
3579
  config = config_with_context(config, self.steps)
@@ -3427,7 +3600,10 @@ class RunnableSequence(RunnableSerializable[Input, Output]):
3427
3600
  config: RunnableConfig,
3428
3601
  **kwargs: Any,
3429
3602
  ) -> AsyncIterator[Output]:
3430
- from langchain_core.beta.runnables.context import aconfig_with_context
3603
+ # Import locally to prevent circular import
3604
+ from langchain_core.beta.runnables.context import ( # noqa: PLC0415
3605
+ aconfig_with_context,
3606
+ )
3431
3607
 
3432
3608
  steps = [self.first, *self.middle, self.last]
3433
3609
  config = aconfig_with_context(config, self.steps)
@@ -3520,15 +3696,19 @@ class RunnableParallel(RunnableSerializable[Input, dict[str, Any]]):
3520
3696
 
3521
3697
  from langchain_core.runnables import RunnableLambda
3522
3698
 
3699
+
3523
3700
  def add_one(x: int) -> int:
3524
3701
  return x + 1
3525
3702
 
3703
+
3526
3704
  def mul_two(x: int) -> int:
3527
3705
  return x * 2
3528
3706
 
3707
+
3529
3708
  def mul_three(x: int) -> int:
3530
3709
  return x * 3
3531
3710
 
3711
+
3532
3712
  runnable_1 = RunnableLambda(add_one)
3533
3713
  runnable_2 = RunnableLambda(mul_two)
3534
3714
  runnable_3 = RunnableLambda(mul_three)
@@ -3564,8 +3744,7 @@ class RunnableParallel(RunnableSerializable[Input, dict[str, Any]]):
3564
3744
 
3565
3745
  model = ChatOpenAI()
3566
3746
  joke_chain = (
3567
- ChatPromptTemplate.from_template("tell me a joke about {topic}")
3568
- | model
3747
+ ChatPromptTemplate.from_template("tell me a joke about {topic}") | model
3569
3748
  )
3570
3749
  poem_chain = (
3571
3750
  ChatPromptTemplate.from_template("write a 2-line poem about {topic}")
@@ -3619,11 +3798,17 @@ class RunnableParallel(RunnableSerializable[Input, dict[str, Any]]):
3619
3798
  @classmethod
3620
3799
  @override
3621
3800
  def is_lc_serializable(cls) -> bool:
3801
+ """Return True as this class is serializable."""
3622
3802
  return True
3623
3803
 
3624
3804
  @classmethod
3625
3805
  @override
3626
3806
  def get_lc_namespace(cls) -> list[str]:
3807
+ """Get the namespace of the langchain object.
3808
+
3809
+ Returns:
3810
+ ``["langchain", "schema", "runnable"]``
3811
+ """
3627
3812
  return ["langchain", "schema", "runnable"]
3628
3813
 
3629
3814
  model_config = ConfigDict(
@@ -3731,7 +3916,8 @@ class RunnableParallel(RunnableSerializable[Input, dict[str, Any]]):
3731
3916
  ValueError: If a ``Runnable`` has no first or last node.
3732
3917
 
3733
3918
  """
3734
- from langchain_core.runnables.graph import Graph
3919
+ # Import locally to prevent circular import
3920
+ from langchain_core.runnables.graph import Graph # noqa: PLC0415
3735
3921
 
3736
3922
  graph = Graph()
3737
3923
  input_node = graph.add_node(self.get_input_schema(config))
@@ -3767,8 +3953,6 @@ class RunnableParallel(RunnableSerializable[Input, dict[str, Any]]):
3767
3953
  def invoke(
3768
3954
  self, input: Input, config: Optional[RunnableConfig] = None, **kwargs: Any
3769
3955
  ) -> dict[str, Any]:
3770
- from langchain_core.callbacks.manager import CallbackManager
3771
-
3772
3956
  # setup callbacks
3773
3957
  config = ensure_config(config)
3774
3958
  callback_manager = CallbackManager.configure(
@@ -4068,9 +4252,10 @@ class RunnableGenerator(Runnable[Input, Output]):
4068
4252
  for token in ["Have", " a", " nice", " day"]:
4069
4253
  yield token
4070
4254
 
4255
+
4071
4256
  runnable = RunnableGenerator(agen)
4072
4257
  await runnable.ainvoke(None) # "Have a nice day"
4073
- [p async for p in runnable.astream(None)] # ["Have", " a", " nice", " day"]
4258
+ [p async for p in runnable.astream(None)] # ["Have", " a", " nice", " day"]
4074
4259
 
4075
4260
  ``RunnableGenerator`` makes it easy to implement custom behavior within a streaming
4076
4261
  context. Below we show an example:
@@ -4090,6 +4275,7 @@ class RunnableGenerator(Runnable[Input, Output]):
4090
4275
  | StrOutputParser()
4091
4276
  )
4092
4277
 
4278
+
4093
4279
  def character_generator(input: Iterator[str]) -> Iterator[str]:
4094
4280
  for token in input:
4095
4281
  if "," in token or "." in token:
@@ -4100,7 +4286,10 @@ class RunnableGenerator(Runnable[Input, Output]):
4100
4286
 
4101
4287
  runnable = chant_chain | character_generator
4102
4288
  assert type(runnable.last) is RunnableGenerator
4103
- "".join(runnable.stream({"topic": "waste"})) # Reduce👏, Reuse👏, Recycle👏.
4289
+ "".join(
4290
+ runnable.stream({"topic": "waste"})
4291
+ ) # Reduce👏, Reuse👏, Recycle👏.
4292
+
4104
4293
 
4105
4294
  # Note that RunnableLambda can be used to delay streaming of one step in a
4106
4295
  # sequence until the previous step is finished:
@@ -4109,6 +4298,7 @@ class RunnableGenerator(Runnable[Input, Output]):
4109
4298
  for character in input[::-1]:
4110
4299
  yield character
4111
4300
 
4301
+
4112
4302
  runnable = chant_chain | RunnableLambda(reverse_generator)
4113
4303
  "".join(runnable.stream({"topic": "waste"})) # ".elcycer ,esuer ,ecudeR"
4114
4304
 
@@ -4353,26 +4543,29 @@ class RunnableLambda(Runnable[Input, Output]):
4353
4543
  # This is a RunnableLambda
4354
4544
  from langchain_core.runnables import RunnableLambda
4355
4545
 
4546
+
4356
4547
  def add_one(x: int) -> int:
4357
4548
  return x + 1
4358
4549
 
4550
+
4359
4551
  runnable = RunnableLambda(add_one)
4360
4552
 
4361
- runnable.invoke(1) # returns 2
4362
- runnable.batch([1, 2, 3]) # returns [2, 3, 4]
4553
+ runnable.invoke(1) # returns 2
4554
+ runnable.batch([1, 2, 3]) # returns [2, 3, 4]
4363
4555
 
4364
4556
  # Async is supported by default by delegating to the sync implementation
4365
- await runnable.ainvoke(1) # returns 2
4366
- await runnable.abatch([1, 2, 3]) # returns [2, 3, 4]
4557
+ await runnable.ainvoke(1) # returns 2
4558
+ await runnable.abatch([1, 2, 3]) # returns [2, 3, 4]
4367
4559
 
4368
4560
 
4369
4561
  # Alternatively, can provide both synd and sync implementations
4370
4562
  async def add_one_async(x: int) -> int:
4371
4563
  return x + 1
4372
4564
 
4565
+
4373
4566
  runnable = RunnableLambda(add_one, afunc=add_one_async)
4374
- runnable.invoke(1) # Uses add_one
4375
- await runnable.ainvoke(1) # Uses add_one_async
4567
+ runnable.invoke(1) # Uses add_one
4568
+ await runnable.ainvoke(1) # Uses add_one_async
4376
4569
 
4377
4570
  """
4378
4571
 
@@ -4607,6 +4800,9 @@ class RunnableLambda(Runnable[Input, Output]):
4607
4800
  @override
4608
4801
  def get_graph(self, config: RunnableConfig | None = None) -> Graph:
4609
4802
  if deps := self.deps:
4803
+ # Import locally to prevent circular import
4804
+ from langchain_core.runnables.graph import Graph # noqa: PLC0415
4805
+
4610
4806
  graph = Graph()
4611
4807
  input_node = graph.add_node(self.get_input_schema(config))
4612
4808
  output_node = graph.add_node(self.get_output_schema(config))
@@ -4644,7 +4840,7 @@ class RunnableLambda(Runnable[Input, Output]):
4644
4840
  __hash__ = None # type: ignore[assignment]
4645
4841
 
4646
4842
  def __repr__(self) -> str:
4647
- """A string representation of this ``Runnable``."""
4843
+ """Return a string representation of this ``Runnable``."""
4648
4844
  if self._repr is None:
4649
4845
  if hasattr(self, "func") and isinstance(self.func, itemgetter):
4650
4846
  self._repr = f"RunnableLambda({str(self.func)[len('operator.') :]})"
@@ -5080,13 +5276,16 @@ class RunnableLambda(Runnable[Input, Output]):
5080
5276
 
5081
5277
 
5082
5278
  class RunnableEachBase(RunnableSerializable[list[Input], list[Output]]):
5083
- """``Runnable`` that calls another ``Runnable`` for each element of the input sequence.
5279
+ """RunnableEachBase class.
5280
+
5281
+ ``Runnable`` that calls another ``Runnable`` for each element of the input sequence.
5084
5282
 
5085
- Use only if creating a new ``RunnableEach`` subclass with different ``__init__`` args.
5283
+ Use only if creating a new ``RunnableEach`` subclass with different ``__init__``
5284
+ args.
5086
5285
 
5087
5286
  See documentation for ``RunnableEach`` for more details.
5088
5287
 
5089
- """ # noqa: E501
5288
+ """
5090
5289
 
5091
5290
  bound: Runnable[Input, Output]
5092
5291
 
@@ -5154,11 +5353,17 @@ class RunnableEachBase(RunnableSerializable[list[Input], list[Output]]):
5154
5353
  @classmethod
5155
5354
  @override
5156
5355
  def is_lc_serializable(cls) -> bool:
5356
+ """Return True as this class is serializable."""
5157
5357
  return True
5158
5358
 
5159
5359
  @classmethod
5160
5360
  @override
5161
5361
  def get_lc_namespace(cls) -> list[str]:
5362
+ """Get the namespace of the langchain object.
5363
+
5364
+ Returns:
5365
+ ``["langchain", "schema", "runnable"]``
5366
+ """
5162
5367
  return ["langchain", "schema", "runnable"]
5163
5368
 
5164
5369
  def _invoke(
@@ -5204,14 +5409,19 @@ class RunnableEachBase(RunnableSerializable[list[Input], list[Output]]):
5204
5409
  config: Optional[RunnableConfig] = None,
5205
5410
  **kwargs: Optional[Any],
5206
5411
  ) -> AsyncIterator[StreamEvent]:
5412
+ def _error_stream_event(message: str) -> StreamEvent:
5413
+ raise NotImplementedError(message)
5414
+
5207
5415
  for _ in range(1):
5208
- msg = "RunnableEach does not support astream_events yet."
5209
- raise NotImplementedError(msg)
5210
- yield
5416
+ yield _error_stream_event(
5417
+ "RunnableEach does not support astream_events yet."
5418
+ )
5211
5419
 
5212
5420
 
5213
5421
  class RunnableEach(RunnableEachBase[Input, Output]):
5214
- """``Runnable`` that calls another ``Runnable`` for each element of the input sequence.
5422
+ """RunnableEach class.
5423
+
5424
+ ``Runnable`` that calls another ``Runnable`` for each element of the input sequence.
5215
5425
 
5216
5426
  It allows you to call multiple inputs with the bounded ``Runnable``.
5217
5427
 
@@ -5236,7 +5446,7 @@ class RunnableEach(RunnableEachBase[Input, Output]):
5236
5446
  {'topic':'Biology'}])
5237
5447
  print(output) # noqa: T201
5238
5448
 
5239
- """ # noqa: E501
5449
+ """
5240
5450
 
5241
5451
  @override
5242
5452
  def get_name(
@@ -5300,7 +5510,9 @@ class RunnableEach(RunnableEachBase[Input, Output]):
5300
5510
  on_end: Optional[AsyncListener] = None,
5301
5511
  on_error: Optional[AsyncListener] = None,
5302
5512
  ) -> RunnableEach[Input, Output]:
5303
- """Bind async lifecycle listeners to a ``Runnable``, returning a new ``Runnable``.
5513
+ """Bind async lifecycle listeners to a ``Runnable``.
5514
+
5515
+ Returns a new ``Runnable``.
5304
5516
 
5305
5517
  The ``Run`` object contains information about the run, including its ``id``,
5306
5518
  ``type``, ``input``, ``output``, ``error``, ``start_time``, ``end_time``, and
@@ -5317,7 +5529,7 @@ class RunnableEach(RunnableEachBase[Input, Output]):
5317
5529
  Returns:
5318
5530
  A new ``Runnable`` with the listeners bound.
5319
5531
 
5320
- """ # noqa: E501
5532
+ """
5321
5533
  return RunnableEach(
5322
5534
  bound=self.bound.with_alisteners(
5323
5535
  on_start=on_start, on_end=on_end, on_error=on_error
@@ -5388,22 +5600,23 @@ class RunnableBindingBase(RunnableSerializable[Input, Output]): # type: ignore[
5388
5600
  """Create a ``RunnableBinding`` from a ``Runnable`` and kwargs.
5389
5601
 
5390
5602
  Args:
5391
- bound: The underlying ``Runnable`` that this ``Runnable`` delegates calls to.
5603
+ bound: The underlying ``Runnable`` that this ``Runnable`` delegates calls
5604
+ to.
5392
5605
  kwargs: optional kwargs to pass to the underlying ``Runnable``, when running
5393
- the underlying ``Runnable`` (e.g., via ``invoke``, ``batch``,
5394
- ``transform``, or ``stream`` or async variants)
5395
- Defaults to None.
5606
+ the underlying ``Runnable`` (e.g., via ``invoke``, ``batch``,
5607
+ ``transform``, or ``stream`` or async variants)
5608
+ Defaults to None.
5396
5609
  config: optional config to bind to the underlying ``Runnable``.
5397
- Defaults to None.
5610
+ Defaults to None.
5398
5611
  config_factories: optional list of config factories to apply to the
5399
- config before binding to the underlying ``Runnable``.
5400
- Defaults to None.
5612
+ config before binding to the underlying ``Runnable``.
5613
+ Defaults to None.
5401
5614
  custom_input_type: Specify to override the input type of the underlying
5402
- ``Runnable`` with a custom type. Defaults to None.
5615
+ ``Runnable`` with a custom type. Defaults to None.
5403
5616
  custom_output_type: Specify to override the output type of the underlying
5404
- ``Runnable`` with a custom type. Defaults to None.
5617
+ ``Runnable`` with a custom type. Defaults to None.
5405
5618
  **other_kwargs: Unpacked into the base class.
5406
- """ # noqa: E501
5619
+ """
5407
5620
  super().__init__(
5408
5621
  bound=bound,
5409
5622
  kwargs=kwargs or {},
@@ -5470,6 +5683,7 @@ class RunnableBindingBase(RunnableSerializable[Input, Output]): # type: ignore[
5470
5683
  @classmethod
5471
5684
  @override
5472
5685
  def is_lc_serializable(cls) -> bool:
5686
+ """Return True as this class is serializable."""
5473
5687
  return True
5474
5688
 
5475
5689
  @classmethod
@@ -5477,7 +5691,8 @@ class RunnableBindingBase(RunnableSerializable[Input, Output]): # type: ignore[
5477
5691
  def get_lc_namespace(cls) -> list[str]:
5478
5692
  """Get the namespace of the langchain object.
5479
5693
 
5480
- Defaults to ``["langchain", "schema", "runnable"]``.
5694
+ Returns:
5695
+ ``["langchain", "schema", "runnable"]``
5481
5696
  """
5482
5697
  return ["langchain", "schema", "runnable"]
5483
5698
 
@@ -5744,9 +5959,11 @@ class RunnableBinding(RunnableBindingBase[Input, Output]): # type: ignore[no-re
5744
5959
  These methods include:
5745
5960
 
5746
5961
  - ``bind``: Bind kwargs to pass to the underlying ``Runnable`` when running it.
5747
- - ``with_config``: Bind config to pass to the underlying ``Runnable`` when running it.
5962
+ - ``with_config``: Bind config to pass to the underlying ``Runnable`` when running
5963
+ it.
5748
5964
  - ``with_listeners``: Bind lifecycle listeners to the underlying ``Runnable``.
5749
- - ``with_types``: Override the input and output types of the underlying ``Runnable``.
5965
+ - ``with_types``: Override the input and output types of the underlying
5966
+ ``Runnable``.
5750
5967
  - ``with_retry``: Bind a retry policy to the underlying ``Runnable``.
5751
5968
  - ``with_fallbacks``: Bind a fallback policy to the underlying ``Runnable``.
5752
5969
 
@@ -5758,12 +5975,13 @@ class RunnableBinding(RunnableBindingBase[Input, Output]): # type: ignore[no-re
5758
5975
  # Create a Runnable binding that invokes the ChatModel with the
5759
5976
  # additional kwarg `stop=['-']` when running it.
5760
5977
  from langchain_community.chat_models import ChatOpenAI
5978
+
5761
5979
  model = ChatOpenAI()
5762
- model.invoke('Say "Parrot-MAGIC"', stop=['-']) # Should return `Parrot`
5980
+ model.invoke('Say "Parrot-MAGIC"', stop=["-"]) # Should return `Parrot`
5763
5981
  # Using it the easy way via `bind` method which returns a new
5764
5982
  # RunnableBinding
5765
- runnable_binding = model.bind(stop=['-'])
5766
- runnable_binding.invoke('Say "Parrot-MAGIC"') # Should return `Parrot`
5983
+ runnable_binding = model.bind(stop=["-"])
5984
+ runnable_binding.invoke('Say "Parrot-MAGIC"') # Should return `Parrot`
5767
5985
 
5768
5986
  Can also be done by instantiating a ``RunnableBinding`` directly (not
5769
5987
  recommended):
@@ -5771,13 +5989,14 @@ class RunnableBinding(RunnableBindingBase[Input, Output]): # type: ignore[no-re
5771
5989
  .. code-block:: python
5772
5990
 
5773
5991
  from langchain_core.runnables import RunnableBinding
5992
+
5774
5993
  runnable_binding = RunnableBinding(
5775
5994
  bound=model,
5776
- kwargs={'stop': ['-']} # <-- Note the additional kwargs
5995
+ kwargs={"stop": ["-"]}, # <-- Note the additional kwargs
5777
5996
  )
5778
- runnable_binding.invoke('Say "Parrot-MAGIC"') # Should return `Parrot`
5997
+ runnable_binding.invoke('Say "Parrot-MAGIC"') # Should return `Parrot`
5779
5998
 
5780
- """ # noqa: E501
5999
+ """
5781
6000
 
5782
6001
  @override
5783
6002
  def bind(self, **kwargs: Any) -> Runnable[Input, Output]:
@@ -5847,7 +6066,6 @@ class RunnableBinding(RunnableBindingBase[Input, Output]): # type: ignore[no-re
5847
6066
  Returns:
5848
6067
  A new ``Runnable`` with the listeners bound.
5849
6068
  """
5850
- from langchain_core.tracers.root_listeners import RootListenersTracer
5851
6069
 
5852
6070
  def listener_config_factory(config: RunnableConfig) -> RunnableConfig:
5853
6071
  return {
@@ -6050,6 +6268,7 @@ def chain(
6050
6268
  from langchain_core.prompts import PromptTemplate
6051
6269
  from langchain_openai import OpenAI
6052
6270
 
6271
+
6053
6272
  @chain
6054
6273
  def my_func(fields):
6055
6274
  prompt = PromptTemplate("Hello, {name}!")