langchain-core 0.3.72__py3-none-any.whl → 0.3.74__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain-core might be problematic. Click here for more details.

Files changed (60) hide show
  1. langchain_core/_api/beta_decorator.py +1 -0
  2. langchain_core/_api/deprecation.py +2 -0
  3. langchain_core/beta/runnables/context.py +1 -0
  4. langchain_core/callbacks/file.py +1 -0
  5. langchain_core/callbacks/manager.py +4 -0
  6. langchain_core/callbacks/usage.py +3 -1
  7. langchain_core/chat_history.py +1 -0
  8. langchain_core/document_loaders/langsmith.py +2 -1
  9. langchain_core/documents/base.py +2 -0
  10. langchain_core/embeddings/fake.py +2 -0
  11. langchain_core/indexing/api.py +10 -0
  12. langchain_core/language_models/_utils.py +1 -0
  13. langchain_core/language_models/chat_models.py +58 -27
  14. langchain_core/language_models/llms.py +1 -0
  15. langchain_core/memory.py +1 -0
  16. langchain_core/messages/ai.py +3 -0
  17. langchain_core/messages/human.py +1 -0
  18. langchain_core/messages/tool.py +3 -0
  19. langchain_core/messages/utils.py +7 -2
  20. langchain_core/output_parsers/base.py +1 -0
  21. langchain_core/output_parsers/openai_functions.py +1 -0
  22. langchain_core/outputs/llm_result.py +4 -1
  23. langchain_core/prompts/base.py +4 -0
  24. langchain_core/prompts/chat.py +3 -0
  25. langchain_core/prompts/few_shot.py +1 -0
  26. langchain_core/prompts/few_shot_with_templates.py +1 -0
  27. langchain_core/prompts/image.py +1 -0
  28. langchain_core/prompts/pipeline.py +1 -0
  29. langchain_core/prompts/prompt.py +1 -0
  30. langchain_core/prompts/structured.py +1 -0
  31. langchain_core/rate_limiters.py +1 -0
  32. langchain_core/retrievers.py +3 -0
  33. langchain_core/runnables/base.py +59 -48
  34. langchain_core/runnables/branch.py +1 -0
  35. langchain_core/runnables/configurable.py +2 -1
  36. langchain_core/runnables/fallbacks.py +3 -7
  37. langchain_core/runnables/graph.py +2 -0
  38. langchain_core/runnables/graph_ascii.py +1 -0
  39. langchain_core/runnables/graph_mermaid.py +1 -0
  40. langchain_core/runnables/history.py +1 -0
  41. langchain_core/runnables/passthrough.py +3 -0
  42. langchain_core/runnables/retry.py +1 -0
  43. langchain_core/runnables/router.py +1 -0
  44. langchain_core/runnables/schema.py +1 -0
  45. langchain_core/stores.py +3 -0
  46. langchain_core/tools/base.py +11 -6
  47. langchain_core/tools/convert.py +3 -1
  48. langchain_core/tools/structured.py +1 -0
  49. langchain_core/tracers/context.py +1 -1
  50. langchain_core/tracers/log_stream.py +2 -0
  51. langchain_core/utils/_merge.py +1 -1
  52. langchain_core/utils/aiter.py +5 -0
  53. langchain_core/utils/function_calling.py +12 -0
  54. langchain_core/utils/iter.py +1 -0
  55. langchain_core/vectorstores/base.py +1 -0
  56. langchain_core/version.py +1 -1
  57. {langchain_core-0.3.72.dist-info → langchain_core-0.3.74.dist-info}/METADATA +1 -1
  58. {langchain_core-0.3.72.dist-info → langchain_core-0.3.74.dist-info}/RECORD +60 -60
  59. {langchain_core-0.3.72.dist-info → langchain_core-0.3.74.dist-info}/WHEEL +0 -0
  60. {langchain_core-0.3.72.dist-info → langchain_core-0.3.74.dist-info}/entry_points.txt +0 -0
@@ -97,10 +97,7 @@ if TYPE_CHECKING:
97
97
  from langchain_core.runnables.retry import ExponentialJitterParams
98
98
  from langchain_core.runnables.schema import StreamEvent
99
99
  from langchain_core.tools import BaseTool
100
- from langchain_core.tracers.log_stream import (
101
- RunLog,
102
- RunLogPatch,
103
- )
100
+ from langchain_core.tracers.log_stream import RunLog, RunLogPatch
104
101
  from langchain_core.tracers.root_listeners import AsyncListener
105
102
  from langchain_core.tracers.schemas import Run
106
103
 
@@ -236,6 +233,7 @@ class Runnable(ABC, Generic[Input, Output]):
236
233
  )
237
234
 
238
235
  For a UI (and much more) checkout LangSmith: https://docs.smith.langchain.com/
236
+
239
237
  """ # noqa: E501
240
238
 
241
239
  name: Optional[str]
@@ -391,6 +389,7 @@ class Runnable(ABC, Generic[Input, Output]):
391
389
  print(runnable.get_input_jsonschema())
392
390
 
393
391
  .. versionadded:: 0.3.0
392
+
394
393
  """
395
394
  return self.get_input_schema(config).model_json_schema()
396
395
 
@@ -464,6 +463,7 @@ class Runnable(ABC, Generic[Input, Output]):
464
463
  print(runnable.get_output_jsonschema())
465
464
 
466
465
  .. versionadded:: 0.3.0
466
+
467
467
  """
468
468
  return self.get_output_schema(config).model_json_schema()
469
469
 
@@ -620,6 +620,7 @@ class Runnable(ABC, Generic[Input, Output]):
620
620
  sequence.batch([1, 2, 3])
621
621
  await sequence.abatch([1, 2, 3])
622
622
  # -> [4, 6, 8]
623
+
623
624
  """
624
625
  return RunnableSequence(self, *others, name=name)
625
626
 
@@ -736,10 +737,10 @@ class Runnable(ABC, Generic[Input, Output]):
736
737
  Args:
737
738
  input: The input to the Runnable.
738
739
  config: A config to use when invoking the Runnable.
739
- The config supports standard keys like 'tags', 'metadata' for tracing
740
- purposes, 'max_concurrency' for controlling how much work to do
741
- in parallel, and other keys. Please refer to the RunnableConfig
742
- for more details.
740
+ The config supports standard keys like ``'tags'``, ``'metadata'`` for
741
+ tracing purposes, ``'max_concurrency'`` for controlling how much work to
742
+ do in parallel, and other keys. Please refer to the RunnableConfig
743
+ for more details. Defaults to None.
743
744
 
744
745
  Returns:
745
746
  The output of the Runnable.
@@ -885,9 +886,9 @@ class Runnable(ABC, Generic[Input, Output]):
885
886
  Args:
886
887
  inputs: A list of inputs to the Runnable.
887
888
  config: A config to use when invoking the Runnable.
888
- The config supports standard keys like 'tags', 'metadata' for tracing
889
- purposes, 'max_concurrency' for controlling how much work to do
890
- in parallel, and other keys. Please refer to the RunnableConfig
889
+ The config supports standard keys like ``'tags'``, ``'metadata'`` for
890
+ tracing purposes, ``'max_concurrency'`` for controlling how much work to
891
+ do in parallel, and other keys. Please refer to the RunnableConfig
891
892
  for more details. Defaults to None.
892
893
  return_exceptions: Whether to return exceptions instead of raising them.
893
894
  Defaults to False.
@@ -950,10 +951,10 @@ class Runnable(ABC, Generic[Input, Output]):
950
951
  Args:
951
952
  inputs: A list of inputs to the Runnable.
952
953
  config: A config to use when invoking the Runnable.
953
- The config supports standard keys like 'tags', 'metadata' for tracing
954
- purposes, 'max_concurrency' for controlling how much work to do
955
- in parallel, and other keys. Please refer to the RunnableConfig
956
- for more details. Defaults to None. Defaults to None.
954
+ The config supports standard keys like ``'tags'``, ``'metadata'`` for
955
+ tracing purposes, ``'max_concurrency'`` for controlling how much work to
956
+ do in parallel, and other keys. Please refer to the RunnableConfig
957
+ for more details. Defaults to None.
957
958
  return_exceptions: Whether to return exceptions instead of raising them.
958
959
  Defaults to False.
959
960
  kwargs: Additional keyword arguments to pass to the Runnable.
@@ -1361,6 +1362,7 @@ class Runnable(ABC, Generic[Input, Output]):
1361
1362
 
1362
1363
  Raises:
1363
1364
  NotImplementedError: If the version is not `v1` or `v2`.
1365
+
1364
1366
  """ # noqa: E501
1365
1367
  from langchain_core.tracers.event_stream import (
1366
1368
  _astream_events_implementation_v1,
@@ -1569,18 +1571,17 @@ class Runnable(ABC, Generic[Input, Output]):
1569
1571
  ) -> Runnable[Input, Output]:
1570
1572
  """Bind lifecycle listeners to a Runnable, returning a new Runnable.
1571
1573
 
1572
- on_start: Called before the Runnable starts running, with the Run object.
1573
- on_end: Called after the Runnable finishes running, with the Run object.
1574
- on_error: Called if the Runnable throws an error, with the Run object.
1575
-
1576
1574
  The Run object contains information about the run, including its id,
1577
1575
  type, input, output, error, start_time, end_time, and any tags or metadata
1578
1576
  added to the run.
1579
1577
 
1580
1578
  Args:
1581
- on_start: Called before the Runnable starts running. Defaults to None.
1582
- on_end: Called after the Runnable finishes running. Defaults to None.
1583
- on_error: Called if the Runnable throws an error. Defaults to None.
1579
+ on_start: Called before the Runnable starts running, with the Run object.
1580
+ Defaults to None.
1581
+ on_end: Called after the Runnable finishes running, with the Run object.
1582
+ Defaults to None.
1583
+ on_error: Called if the Runnable throws an error, with the Run object.
1584
+ Defaults to None.
1584
1585
 
1585
1586
  Returns:
1586
1587
  A new Runnable with the listeners bound.
@@ -1608,6 +1609,7 @@ class Runnable(ABC, Generic[Input, Output]):
1608
1609
  on_end=fn_end
1609
1610
  )
1610
1611
  chain.invoke(2)
1612
+
1611
1613
  """
1612
1614
  from langchain_core.tracers.root_listeners import RootListenersTracer
1613
1615
 
@@ -1636,21 +1638,17 @@ class Runnable(ABC, Generic[Input, Output]):
1636
1638
  ) -> Runnable[Input, Output]:
1637
1639
  """Bind async lifecycle listeners to a Runnable, returning a new Runnable.
1638
1640
 
1639
- on_start: Asynchronously called before the Runnable starts running.
1640
- on_end: Asynchronously called after the Runnable finishes running.
1641
- on_error: Asynchronously called if the Runnable throws an error.
1642
-
1643
1641
  The Run object contains information about the run, including its id,
1644
1642
  type, input, output, error, start_time, end_time, and any tags or metadata
1645
1643
  added to the run.
1646
1644
 
1647
1645
  Args:
1648
- on_start: Asynchronously called before the Runnable starts running.
1649
- Defaults to None.
1650
- on_end: Asynchronously called after the Runnable finishes running.
1651
- Defaults to None.
1652
- on_error: Asynchronously called if the Runnable throws an error.
1653
- Defaults to None.
1646
+ on_start: Called asynchronously before the Runnable starts running,
1647
+ with the Run object. Defaults to None.
1648
+ on_end: Called asynchronously after the Runnable finishes running,
1649
+ with the Run object. Defaults to None.
1650
+ on_error: Called asynchronously if the Runnable throws an error,
1651
+ with the Run object. Defaults to None.
1654
1652
 
1655
1653
  Returns:
1656
1654
  A new Runnable with the listeners bound.
@@ -1830,6 +1828,7 @@ class Runnable(ABC, Generic[Input, Output]):
1830
1828
 
1831
1829
  runnable = RunnableLambda(_lambda)
1832
1830
  print(runnable.map().invoke([1, 2, 3])) # [2, 3, 4]
1831
+
1833
1832
  """
1834
1833
  return RunnableEach(bound=self)
1835
1834
 
@@ -2451,6 +2450,7 @@ class Runnable(ABC, Generic[Input, Output]):
2451
2450
  as_tool.invoke("b")
2452
2451
 
2453
2452
  .. versionadded:: 0.2.14
2453
+
2454
2454
  """
2455
2455
  # Avoid circular import
2456
2456
  from langchain_core.tools import convert_runnable_to_tool
@@ -2522,6 +2522,7 @@ class RunnableSerializable(Serializable, Runnable[Input, Output]):
2522
2522
  configurable={"output_token_number": 200}
2523
2523
  ).invoke("tell me something about chess").content
2524
2524
  )
2525
+
2525
2526
  """
2526
2527
  from langchain_core.runnables.configurable import RunnableConfigurableFields
2527
2528
 
@@ -2550,7 +2551,7 @@ class RunnableSerializable(Serializable, Runnable[Input, Output]):
2550
2551
  which: The ConfigurableField instance that will be used to select the
2551
2552
  alternative.
2552
2553
  default_key: The default key to use if no alternative is selected.
2553
- Defaults to "default".
2554
+ Defaults to ``'default'``.
2554
2555
  prefix_keys: Whether to prefix the keys with the ConfigurableField id.
2555
2556
  Defaults to False.
2556
2557
  **kwargs: A dictionary of keys to Runnable instances or callables that
@@ -2566,7 +2567,7 @@ class RunnableSerializable(Serializable, Runnable[Input, Output]):
2566
2567
  from langchain_openai import ChatOpenAI
2567
2568
 
2568
2569
  model = ChatAnthropic(
2569
- model_name="claude-3-sonnet-20240229"
2570
+ model_name="claude-3-7-sonnet-20250219"
2570
2571
  ).configurable_alternatives(
2571
2572
  ConfigurableField(id="llm"),
2572
2573
  default_key="anthropic",
@@ -2582,6 +2583,7 @@ class RunnableSerializable(Serializable, Runnable[Input, Output]):
2582
2583
  configurable={"llm": "openai"}
2583
2584
  ).invoke("which organization created you?").content
2584
2585
  )
2586
+
2585
2587
  """
2586
2588
  from langchain_core.runnables.configurable import (
2587
2589
  RunnableConfigurableAlternatives,
@@ -2746,6 +2748,7 @@ class RunnableSequence(RunnableSerializable[Input, Output]):
2746
2748
  async for chunk in chain.astream({'topic': 'colors'}):
2747
2749
  print('-') # noqa: T201
2748
2750
  print(chunk, sep='', flush=True) # noqa: T201
2751
+
2749
2752
  """
2750
2753
 
2751
2754
  # The steps are broken into first, middle and last, solely for type checking
@@ -3544,6 +3547,7 @@ class RunnableParallel(RunnableSerializable[Input, dict[str, Any]]):
3544
3547
  for key in chunk:
3545
3548
  output[key] = output[key] + chunk[key].content
3546
3549
  print(output) # noqa: T201
3550
+
3547
3551
  """
3548
3552
 
3549
3553
  steps__: Mapping[str, Runnable[Input, Any]]
@@ -4066,6 +4070,7 @@ class RunnableGenerator(Runnable[Input, Output]):
4066
4070
 
4067
4071
  runnable = chant_chain | RunnableLambda(reverse_generator)
4068
4072
  "".join(runnable.stream({"topic": "waste"})) # ".elcycer ,esuer ,ecudeR"
4073
+
4069
4074
  """
4070
4075
 
4071
4076
  def __init__(
@@ -4326,6 +4331,7 @@ class RunnableLambda(Runnable[Input, Output]):
4326
4331
  runnable = RunnableLambda(add_one, afunc=add_one_async)
4327
4332
  runnable.invoke(1) # Uses add_one
4328
4333
  await runnable.ainvoke(1) # Uses add_one_async
4334
+
4329
4335
  """
4330
4336
 
4331
4337
  def __init__(
@@ -5180,6 +5186,7 @@ class RunnableEach(RunnableEachBase[Input, Output]):
5180
5186
  {'topic':'Art'},
5181
5187
  {'topic':'Biology'}])
5182
5188
  print(output) # noqa: T201
5189
+
5183
5190
  """
5184
5191
 
5185
5192
  @override
@@ -5215,6 +5222,10 @@ class RunnableEach(RunnableEachBase[Input, Output]):
5215
5222
  ) -> RunnableEach[Input, Output]:
5216
5223
  """Bind lifecycle listeners to a Runnable, returning a new Runnable.
5217
5224
 
5225
+ The Run object contains information about the run, including its id,
5226
+ type, input, output, error, start_time, end_time, and any tags or metadata
5227
+ added to the run.
5228
+
5218
5229
  Args:
5219
5230
  on_start: Called before the Runnable starts running, with the Run object.
5220
5231
  Defaults to None.
@@ -5225,10 +5236,6 @@ class RunnableEach(RunnableEachBase[Input, Output]):
5225
5236
 
5226
5237
  Returns:
5227
5238
  A new Runnable with the listeners bound.
5228
-
5229
- The Run object contains information about the run, including its id,
5230
- type, input, output, error, start_time, end_time, and any tags or metadata
5231
- added to the run.
5232
5239
  """
5233
5240
  return RunnableEach(
5234
5241
  bound=self.bound.with_listeners(
@@ -5245,20 +5252,20 @@ class RunnableEach(RunnableEachBase[Input, Output]):
5245
5252
  ) -> RunnableEach[Input, Output]:
5246
5253
  """Bind async lifecycle listeners to a Runnable, returning a new Runnable.
5247
5254
 
5255
+ The Run object contains information about the run, including its id,
5256
+ type, input, output, error, start_time, end_time, and any tags or metadata
5257
+ added to the run.
5258
+
5248
5259
  Args:
5249
5260
  on_start: Called asynchronously before the Runnable starts running,
5250
- with the Run object. Defaults to None.
5261
+ with the Run object. Defaults to None.
5251
5262
  on_end: Called asynchronously after the Runnable finishes running,
5252
- with the Run object. Defaults to None.
5263
+ with the Run object. Defaults to None.
5253
5264
  on_error: Called asynchronously if the Runnable throws an error,
5254
- with the Run object. Defaults to None.
5265
+ with the Run object. Defaults to None.
5255
5266
 
5256
5267
  Returns:
5257
5268
  A new Runnable with the listeners bound.
5258
-
5259
- The Run object contains information about the run, including its id,
5260
- type, input, output, error, start_time, end_time, and any tags or metadata
5261
- added to the run.
5262
5269
  """
5263
5270
  return RunnableEach(
5264
5271
  bound=self.bound.with_alisteners(
@@ -5714,6 +5721,7 @@ class RunnableBinding(RunnableBindingBase[Input, Output]):
5714
5721
  kwargs={'stop': ['-']} # <-- Note the additional kwargs
5715
5722
  )
5716
5723
  runnable_binding.invoke('Say "Parrot-MAGIC"') # Should return `Parrot`
5724
+
5717
5725
  """
5718
5726
 
5719
5727
  @override
@@ -5768,6 +5776,10 @@ class RunnableBinding(RunnableBindingBase[Input, Output]):
5768
5776
  ) -> Runnable[Input, Output]:
5769
5777
  """Bind lifecycle listeners to a Runnable, returning a new Runnable.
5770
5778
 
5779
+ The Run object contains information about the run, including its id,
5780
+ type, input, output, error, start_time, end_time, and any tags or metadata
5781
+ added to the run.
5782
+
5771
5783
  Args:
5772
5784
  on_start: Called before the Runnable starts running, with the Run object.
5773
5785
  Defaults to None.
@@ -5777,9 +5789,7 @@ class RunnableBinding(RunnableBindingBase[Input, Output]):
5777
5789
  Defaults to None.
5778
5790
 
5779
5791
  Returns:
5780
- The Runnable object contains information about the run, including its id,
5781
- type, input, output, error, start_time, end_time, and any tags or metadata
5782
- added to the run.
5792
+ A new Runnable with the listeners bound.
5783
5793
  """
5784
5794
  from langchain_core.tracers.root_listeners import RootListenersTracer
5785
5795
 
@@ -5992,5 +6002,6 @@ def chain(
5992
6002
 
5993
6003
  for chunk in llm.stream(formatted):
5994
6004
  yield chunk
6005
+
5995
6006
  """
5996
6007
  return RunnableLambda(func)
@@ -63,6 +63,7 @@ class RunnableBranch(RunnableSerializable[Input, Output]):
63
63
 
64
64
  branch.invoke("hello") # "HELLO"
65
65
  branch.invoke(None) # "goodbye"
66
+
66
67
  """
67
68
 
68
69
  branches: Sequence[tuple[Runnable[Input, bool], Runnable[Input, Output]]]
@@ -378,6 +378,7 @@ class RunnableConfigurableFields(DynamicRunnable[Input, Output]):
378
378
  {"question": "foo", "context": "bar"},
379
379
  config={"configurable": {"hub_commit": "rlm/rag-prompt-llama"}},
380
380
  )
381
+
381
382
  """
382
383
 
383
384
  fields: dict[str, AnyConfigurableField]
@@ -544,7 +545,7 @@ class RunnableConfigurableAlternatives(DynamicRunnable[Input, Output]):
544
545
  """The alternatives to choose from."""
545
546
 
546
547
  default_key: str = "default"
547
- """The enum value to use for the default option. Defaults to "default"."""
548
+ """The enum value to use for the default option. Defaults to ``'default'``."""
548
549
 
549
550
  prefix_keys: bool
550
551
  """Whether to prefix configurable fields of each alternative with a namespace
@@ -5,12 +5,7 @@ import inspect
5
5
  import typing
6
6
  from collections.abc import AsyncIterator, Iterator, Sequence
7
7
  from functools import wraps
8
- from typing import (
9
- TYPE_CHECKING,
10
- Any,
11
- Optional,
12
- Union,
13
- )
8
+ from typing import TYPE_CHECKING, Any, Optional, Union
14
9
 
15
10
  from pydantic import BaseModel, ConfigDict
16
11
  from typing_extensions import override
@@ -85,6 +80,7 @@ class RunnableWithFallbacks(RunnableSerializable[Input, Output]):
85
80
  | model
86
81
  | StrOutputParser()
87
82
  ).with_fallbacks([RunnableLambda(when_all_is_lost)])
83
+
88
84
  """
89
85
 
90
86
  runnable: Runnable[Input, Output]
@@ -598,7 +594,7 @@ class RunnableWithFallbacks(RunnableSerializable[Input, Output]):
598
594
  from langchain_anthropic import ChatAnthropic
599
595
 
600
596
  gpt_4o = ChatOpenAI(model="gpt-4o")
601
- claude_3_sonnet = ChatAnthropic(model="claude-3-sonnet-20240229")
597
+ claude_3_sonnet = ChatAnthropic(model="claude-3-7-sonnet-20250219")
602
598
  llm = gpt_4o.with_fallbacks([claude_3_sonnet])
603
599
 
604
600
  llm.model_name
@@ -611,6 +611,7 @@ class Graph:
611
611
 
612
612
  Returns:
613
613
  The Mermaid syntax string.
614
+
614
615
  """
615
616
  from langchain_core.runnables.graph_mermaid import draw_mermaid
616
617
 
@@ -681,6 +682,7 @@ class Graph:
681
682
 
682
683
  Returns:
683
684
  The PNG image as bytes.
685
+
684
686
  """
685
687
  from langchain_core.runnables.graph_mermaid import draw_mermaid_png
686
688
 
@@ -263,6 +263,7 @@ def draw_ascii(vertices: Mapping[str, str], edges: Sequence[LangEdge]) -> str:
263
263
  +---+ +---+
264
264
  | 3 | | 4 |
265
265
  +---+ +---+
266
+
266
267
  """
267
268
  # NOTE: coordinates might me negative, so we need to shift
268
269
  # everything to the positive plane before we actually draw it.
@@ -70,6 +70,7 @@ def draw_mermaid(
70
70
 
71
71
  Returns:
72
72
  str: Mermaid graph syntax.
73
+
73
74
  """
74
75
  # Initialize Mermaid graph configuration
75
76
  original_frontmatter_config = frontmatter_config or {}
@@ -311,6 +311,7 @@ class RunnableWithMessageHistory(RunnableBindingBase):
311
311
  into the get_session_history factory.
312
312
  **kwargs: Arbitrary additional kwargs to pass to parent class
313
313
  ``RunnableBindingBase`` init.
314
+
314
315
  """
315
316
  history_chain: Runnable = RunnableLambda(
316
317
  self._enter_history, self._aenter_history
@@ -132,6 +132,7 @@ class RunnablePassthrough(RunnableSerializable[Other, Other]):
132
132
 
133
133
  runnable.invoke('hello')
134
134
  # {'llm1': 'completion', 'llm2': 'completion', 'total_chars': 20}
135
+
135
136
  """
136
137
 
137
138
  input_type: Optional[type[Other]] = None
@@ -393,6 +394,7 @@ class RunnableAssign(RunnableSerializable[dict[str, Any], dict[str, Any]]):
393
394
  # Asynchronous example
394
395
  await runnable_assign.ainvoke({"input": 5})
395
396
  # returns {'input': 5, 'add_step': {'added': 15}}
397
+
396
398
  """
397
399
 
398
400
  mapper: RunnableParallel
@@ -697,6 +699,7 @@ class RunnablePick(RunnableSerializable[dict[str, Any], dict[str, Any]]):
697
699
  output_data = runnable.invoke(input_data)
698
700
 
699
701
  print(output_data) # Output: {'name': 'John', 'age': 30}
702
+
700
703
  """
701
704
 
702
705
  keys: Union[str, list[str]]
@@ -110,6 +110,7 @@ class RunnableRetry(RunnableBindingBase[Input, Output]):
110
110
  # Bad
111
111
  chain = template | model
112
112
  retryable_chain = chain.with_retry()
113
+
113
114
  """ # noqa: E501
114
115
 
115
116
  retry_exception_types: tuple[type[BaseException], ...] = (Exception,)
@@ -66,6 +66,7 @@ class RouterRunnable(RunnableSerializable[RouterInput, Output]):
66
66
 
67
67
  router = RouterRunnable(runnables={"add": add, "square": square})
68
68
  router.invoke({"key": "square", "input": 3})
69
+
69
70
  """
70
71
 
71
72
  runnables: Mapping[str, Runnable[Any, Output]]
@@ -83,6 +83,7 @@ class BaseStreamEvent(TypedDict):
83
83
  "tags": [],
84
84
  },
85
85
  ]
86
+
86
87
  """
87
88
 
88
89
  event: str
langchain_core/stores.py CHANGED
@@ -76,6 +76,7 @@ class BaseStore(ABC, Generic[K, V]):
76
76
  for key in self.store.keys():
77
77
  if key.startswith(prefix):
78
78
  yield key
79
+
79
80
  """
80
81
 
81
82
  @abstractmethod
@@ -302,6 +303,7 @@ class InMemoryStore(InMemoryBaseStore[Any]):
302
303
  # ['key2']
303
304
  list(store.yield_keys(prefix='k'))
304
305
  # ['key2']
306
+
305
307
  """
306
308
 
307
309
 
@@ -327,6 +329,7 @@ class InMemoryByteStore(InMemoryBaseStore[bytes]):
327
329
  # ['key2']
328
330
  list(store.yield_keys(prefix='k'))
329
331
  # ['key2']
332
+
330
333
  """
331
334
 
332
335
 
@@ -74,7 +74,14 @@ if TYPE_CHECKING:
74
74
  from collections.abc import Sequence
75
75
 
76
76
  FILTERED_ARGS = ("run_manager", "callbacks")
77
- TOOL_MESSAGE_BLOCK_TYPES = ("text", "image_url", "image", "json", "search_result")
77
+ TOOL_MESSAGE_BLOCK_TYPES = (
78
+ "text",
79
+ "image_url",
80
+ "image",
81
+ "json",
82
+ "search_result",
83
+ "custom_tool_call_output",
84
+ )
78
85
 
79
86
 
80
87
  class SchemaAnnotationError(TypeError):
@@ -443,9 +450,7 @@ class ChildTool(BaseTool):
443
450
  Args schema should be either:
444
451
 
445
452
  - A subclass of pydantic.BaseModel.
446
- or
447
453
  - A subclass of pydantic.v1.BaseModel if accessing v1 namespace in pydantic 2
448
- or
449
454
  - a JSON schema dict
450
455
  """
451
456
  return_direct: bool = False
@@ -1258,8 +1263,8 @@ class InjectedToolCallId(InjectedToolArg):
1258
1263
  This annotation is used to mark a tool parameter that should receive
1259
1264
  the tool call ID at runtime.
1260
1265
 
1261
- Example:
1262
- ```python
1266
+ .. code-block:: python
1267
+
1263
1268
  from typing_extensions import Annotated
1264
1269
  from langchain_core.messages import ToolMessage
1265
1270
  from langchain_core.tools import tool, InjectedToolCallId
@@ -1275,7 +1280,7 @@ class InjectedToolCallId(InjectedToolArg):
1275
1280
  name="foo",
1276
1281
  tool_call_id=tool_call_id
1277
1282
  )
1278
- ```
1283
+
1279
1284
  """
1280
1285
 
1281
1286
 
@@ -144,7 +144,8 @@ def tool(
144
144
  return "partial json of results", {"full": "object of results"}
145
145
 
146
146
  .. versionadded:: 0.2.14
147
- Parse Google-style docstrings:
147
+
148
+ Parse Google-style docstrings:
148
149
 
149
150
  .. code-block:: python
150
151
 
@@ -214,6 +215,7 @@ def tool(
214
215
  monkey: The baz.
215
216
  \"\"\"
216
217
  return bar
218
+
217
219
  """ # noqa: D214, D410, D411
218
220
 
219
221
  def _create_tool_factory(
@@ -174,6 +174,7 @@ class StructuredTool(BaseTool):
174
174
  return a + b
175
175
  tool = StructuredTool.from_function(add)
176
176
  tool.run(1, 2) # 3
177
+
177
178
  """
178
179
  if func is not None:
179
180
  source_function = func
@@ -62,7 +62,7 @@ def tracing_v2_enabled(
62
62
 
63
63
  Args:
64
64
  project_name (str, optional): The name of the project.
65
- Defaults to "default".
65
+ Defaults to ``'default'``.
66
66
  example_id (str or UUID, optional): The ID of the example.
67
67
  Defaults to None.
68
68
  tags (list[str], optional): The tags to add to the run.
@@ -210,7 +210,9 @@ class LogStreamCallbackHandler(BaseTracer, _StreamingCallbackHandler):
210
210
  exclude_tags: Exclude runs from Runnables with matching tags.
211
211
  _schema_format: Primarily changes how the inputs and outputs are
212
212
  handled.
213
+
213
214
  **For internal use only. This API will change.**
215
+
214
216
  - 'original' is the format used by all current tracers.
215
217
  This format is slightly inconsistent with respect to inputs
216
218
  and outputs.
@@ -97,7 +97,7 @@ def merge_lists(left: Optional[list], *others: Optional[list]) -> Optional[list]
97
97
  to_merge = [
98
98
  i
99
99
  for i, e_left in enumerate(merged)
100
- if e_left["index"] == e["index"]
100
+ if "index" in e_left and e_left["index"] == e["index"]
101
101
  ]
102
102
  if to_merge:
103
103
  # TODO: Remove this once merge_dict is updated with special
@@ -189,6 +189,7 @@ class Tee(Generic[T]):
189
189
  To enforce sequential use of ``anext``, provide a ``lock``
190
190
  - e.g. an :py:class:`asyncio.Lock` instance in an :py:mod:`asyncio` application -
191
191
  and access is automatically synchronised.
192
+
192
193
  """
193
194
 
194
195
  def __init__(
@@ -266,11 +267,15 @@ class aclosing(AbstractAsyncContextManager): # noqa: N801
266
267
 
267
268
  Code like this:
268
269
 
270
+ .. code-block:: python
271
+
269
272
  async with aclosing(<module>.fetch(<arguments>)) as agen:
270
273
  <block>
271
274
 
272
275
  is equivalent to this:
273
276
 
277
+ .. code-block:: python
278
+
274
279
  agen = <module>.fetch(<arguments>)
275
280
  try:
276
281
  <block>
@@ -575,12 +575,23 @@ def convert_to_openai_tool(
575
575
 
576
576
  Added support for OpenAI's image generation built-in tool.
577
577
  """
578
+ from langchain_core.tools import Tool
579
+
578
580
  if isinstance(tool, dict):
579
581
  if tool.get("type") in _WellKnownOpenAITools:
580
582
  return tool
581
583
  # As of 03.12.25 can be "web_search_preview" or "web_search_preview_2025_03_11"
582
584
  if (tool.get("type") or "").startswith("web_search_preview"):
583
585
  return tool
586
+ if isinstance(tool, Tool) and (tool.metadata or {}).get("type") == "custom_tool":
587
+ oai_tool = {
588
+ "type": "custom",
589
+ "name": tool.name,
590
+ "description": tool.description,
591
+ }
592
+ if tool.metadata is not None and "format" in tool.metadata:
593
+ oai_tool["format"] = tool.metadata["format"]
594
+ return oai_tool
584
595
  oai_function = convert_to_openai_function(tool, strict=strict)
585
596
  return {"type": "function", "function": oai_function}
586
597
 
@@ -687,6 +698,7 @@ def tool_example_to_messages(
687
698
  messages.extend(
688
699
  tool_example_to_messages(txt, [tool_call])
689
700
  )
701
+
690
702
  """
691
703
  messages: list[BaseMessage] = [HumanMessage(content=input)]
692
704
  openai_tool_calls = [