langchain-core 1.0.7__py3-none-any.whl → 1.2.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. langchain_core/callbacks/manager.py +14 -14
  2. langchain_core/callbacks/usage.py +1 -1
  3. langchain_core/indexing/api.py +2 -0
  4. langchain_core/language_models/__init__.py +15 -5
  5. langchain_core/language_models/_utils.py +1 -0
  6. langchain_core/language_models/chat_models.py +74 -94
  7. langchain_core/language_models/llms.py +5 -3
  8. langchain_core/language_models/model_profile.py +84 -0
  9. langchain_core/load/load.py +14 -1
  10. langchain_core/messages/ai.py +12 -4
  11. langchain_core/messages/base.py +6 -6
  12. langchain_core/messages/block_translators/anthropic.py +27 -8
  13. langchain_core/messages/block_translators/bedrock_converse.py +18 -8
  14. langchain_core/messages/block_translators/google_genai.py +25 -10
  15. langchain_core/messages/content.py +1 -1
  16. langchain_core/messages/tool.py +28 -27
  17. langchain_core/messages/utils.py +45 -18
  18. langchain_core/output_parsers/openai_tools.py +9 -7
  19. langchain_core/output_parsers/pydantic.py +1 -1
  20. langchain_core/output_parsers/string.py +27 -1
  21. langchain_core/prompts/chat.py +22 -17
  22. langchain_core/prompts/string.py +2 -59
  23. langchain_core/prompts/structured.py +7 -1
  24. langchain_core/runnables/base.py +174 -160
  25. langchain_core/runnables/branch.py +1 -1
  26. langchain_core/runnables/config.py +25 -20
  27. langchain_core/runnables/fallbacks.py +1 -2
  28. langchain_core/runnables/passthrough.py +2 -2
  29. langchain_core/tools/base.py +23 -4
  30. langchain_core/tools/convert.py +16 -0
  31. langchain_core/tools/retriever.py +29 -58
  32. langchain_core/tracers/event_stream.py +9 -4
  33. langchain_core/utils/aiter.py +3 -1
  34. langchain_core/utils/function_calling.py +7 -2
  35. langchain_core/utils/json_schema.py +29 -21
  36. langchain_core/utils/pydantic.py +7 -7
  37. langchain_core/utils/uuid.py +54 -0
  38. langchain_core/vectorstores/base.py +26 -18
  39. langchain_core/version.py +1 -1
  40. {langchain_core-1.0.7.dist-info → langchain_core-1.2.1.dist-info}/METADATA +2 -1
  41. {langchain_core-1.0.7.dist-info → langchain_core-1.2.1.dist-info}/RECORD +42 -40
  42. {langchain_core-1.0.7.dist-info → langchain_core-1.2.1.dist-info}/WHEEL +1 -1
@@ -94,7 +94,7 @@ from langchain_core.tracers.root_listeners import (
94
94
  AsyncRootListenersTracer,
95
95
  RootListenersTracer,
96
96
  )
97
- from langchain_core.utils.aiter import aclosing, atee, py_anext
97
+ from langchain_core.utils.aiter import aclosing, atee
98
98
  from langchain_core.utils.iter import safetee
99
99
  from langchain_core.utils.pydantic import create_model_v2
100
100
 
@@ -127,10 +127,10 @@ class Runnable(ABC, Generic[Input, Output]):
127
127
  Key Methods
128
128
  ===========
129
129
 
130
- - **`invoke`/`ainvoke`**: Transforms a single input into an output.
131
- - **`batch`/`abatch`**: Efficiently transforms multiple inputs into outputs.
132
- - **`stream`/`astream`**: Streams output from a single input as it's produced.
133
- - **`astream_log`**: Streams output and selected intermediate results from an
130
+ - `invoke`/`ainvoke`: Transforms a single input into an output.
131
+ - `batch`/`abatch`: Efficiently transforms multiple inputs into outputs.
132
+ - `stream`/`astream`: Streams output from a single input as it's produced.
133
+ - `astream_log`: Streams output and selected intermediate results from an
134
134
  input.
135
135
 
136
136
  Built-in optimizations:
@@ -707,51 +707,53 @@ class Runnable(ABC, Generic[Input, Output]):
707
707
  def pick(self, keys: str | list[str]) -> RunnableSerializable[Any, Any]:
708
708
  """Pick keys from the output `dict` of this `Runnable`.
709
709
 
710
- Pick a single key:
710
+ !!! example "Pick a single key"
711
711
 
712
- ```python
713
- import json
712
+ ```python
713
+ import json
714
714
 
715
- from langchain_core.runnables import RunnableLambda, RunnableMap
715
+ from langchain_core.runnables import RunnableLambda, RunnableMap
716
716
 
717
- as_str = RunnableLambda(str)
718
- as_json = RunnableLambda(json.loads)
719
- chain = RunnableMap(str=as_str, json=as_json)
717
+ as_str = RunnableLambda(str)
718
+ as_json = RunnableLambda(json.loads)
719
+ chain = RunnableMap(str=as_str, json=as_json)
720
720
 
721
- chain.invoke("[1, 2, 3]")
722
- # -> {"str": "[1, 2, 3]", "json": [1, 2, 3]}
721
+ chain.invoke("[1, 2, 3]")
722
+ # -> {"str": "[1, 2, 3]", "json": [1, 2, 3]}
723
723
 
724
- json_only_chain = chain.pick("json")
725
- json_only_chain.invoke("[1, 2, 3]")
726
- # -> [1, 2, 3]
727
- ```
724
+ json_only_chain = chain.pick("json")
725
+ json_only_chain.invoke("[1, 2, 3]")
726
+ # -> [1, 2, 3]
727
+ ```
728
728
 
729
- Pick a list of keys:
729
+ !!! example "Pick a list of keys"
730
730
 
731
- ```python
732
- from typing import Any
731
+ ```python
732
+ from typing import Any
733
733
 
734
- import json
734
+ import json
735
735
 
736
- from langchain_core.runnables import RunnableLambda, RunnableMap
736
+ from langchain_core.runnables import RunnableLambda, RunnableMap
737
737
 
738
- as_str = RunnableLambda(str)
739
- as_json = RunnableLambda(json.loads)
738
+ as_str = RunnableLambda(str)
739
+ as_json = RunnableLambda(json.loads)
740
740
 
741
741
 
742
- def as_bytes(x: Any) -> bytes:
743
- return bytes(x, "utf-8")
742
+ def as_bytes(x: Any) -> bytes:
743
+ return bytes(x, "utf-8")
744
744
 
745
745
 
746
- chain = RunnableMap(str=as_str, json=as_json, bytes=RunnableLambda(as_bytes))
746
+ chain = RunnableMap(
747
+ str=as_str, json=as_json, bytes=RunnableLambda(as_bytes)
748
+ )
747
749
 
748
- chain.invoke("[1, 2, 3]")
749
- # -> {"str": "[1, 2, 3]", "json": [1, 2, 3], "bytes": b"[1, 2, 3]"}
750
+ chain.invoke("[1, 2, 3]")
751
+ # -> {"str": "[1, 2, 3]", "json": [1, 2, 3], "bytes": b"[1, 2, 3]"}
750
752
 
751
- json_and_bytes_chain = chain.pick(["json", "bytes"])
752
- json_and_bytes_chain.invoke("[1, 2, 3]")
753
- # -> {"json": [1, 2, 3], "bytes": b"[1, 2, 3]"}
754
- ```
753
+ json_and_bytes_chain = chain.pick(["json", "bytes"])
754
+ json_and_bytes_chain.invoke("[1, 2, 3]")
755
+ # -> {"json": [1, 2, 3], "bytes": b"[1, 2, 3]"}
756
+ ```
755
757
 
756
758
  Args:
757
759
  keys: A key or list of keys to pick from the output dict.
@@ -1372,48 +1374,50 @@ class Runnable(ABC, Generic[Input, Output]):
1372
1374
  ).with_config({"run_name": "my_template", "tags": ["my_template"]})
1373
1375
  ```
1374
1376
 
1375
- For instance:
1377
+ !!! example
1376
1378
 
1377
- ```python
1378
- from langchain_core.runnables import RunnableLambda
1379
+ ```python
1380
+ from langchain_core.runnables import RunnableLambda
1379
1381
 
1380
1382
 
1381
- async def reverse(s: str) -> str:
1382
- return s[::-1]
1383
+ async def reverse(s: str) -> str:
1384
+ return s[::-1]
1383
1385
 
1384
1386
 
1385
- chain = RunnableLambda(func=reverse)
1387
+ chain = RunnableLambda(func=reverse)
1386
1388
 
1387
- events = [event async for event in chain.astream_events("hello", version="v2")]
1389
+ events = [
1390
+ event async for event in chain.astream_events("hello", version="v2")
1391
+ ]
1388
1392
 
1389
- # Will produce the following events
1390
- # (run_id, and parent_ids has been omitted for brevity):
1391
- [
1392
- {
1393
- "data": {"input": "hello"},
1394
- "event": "on_chain_start",
1395
- "metadata": {},
1396
- "name": "reverse",
1397
- "tags": [],
1398
- },
1399
- {
1400
- "data": {"chunk": "olleh"},
1401
- "event": "on_chain_stream",
1402
- "metadata": {},
1403
- "name": "reverse",
1404
- "tags": [],
1405
- },
1406
- {
1407
- "data": {"output": "olleh"},
1408
- "event": "on_chain_end",
1409
- "metadata": {},
1410
- "name": "reverse",
1411
- "tags": [],
1412
- },
1413
- ]
1414
- ```
1393
+ # Will produce the following events
1394
+ # (run_id, and parent_ids has been omitted for brevity):
1395
+ [
1396
+ {
1397
+ "data": {"input": "hello"},
1398
+ "event": "on_chain_start",
1399
+ "metadata": {},
1400
+ "name": "reverse",
1401
+ "tags": [],
1402
+ },
1403
+ {
1404
+ "data": {"chunk": "olleh"},
1405
+ "event": "on_chain_stream",
1406
+ "metadata": {},
1407
+ "name": "reverse",
1408
+ "tags": [],
1409
+ },
1410
+ {
1411
+ "data": {"output": "olleh"},
1412
+ "event": "on_chain_end",
1413
+ "metadata": {},
1414
+ "name": "reverse",
1415
+ "tags": [],
1416
+ },
1417
+ ]
1418
+ ```
1415
1419
 
1416
- ```python title="Example: Dispatch Custom Event"
1420
+ ```python title="Dispatch custom event"
1417
1421
  from langchain_core.callbacks.manager import (
1418
1422
  adispatch_custom_event,
1419
1423
  )
@@ -1447,10 +1451,13 @@ class Runnable(ABC, Generic[Input, Output]):
1447
1451
  Args:
1448
1452
  input: The input to the `Runnable`.
1449
1453
  config: The config to use for the `Runnable`.
1450
- version: The version of the schema to use either `'v2'` or `'v1'`.
1454
+ version: The version of the schema to use, either `'v2'` or `'v1'`.
1455
+
1451
1456
  Users should use `'v2'`.
1457
+
1452
1458
  `'v1'` is for backwards compatibility and will be deprecated
1453
1459
  in `0.4.0`.
1460
+
1454
1461
  No default will be assigned until the API is stabilized.
1455
1462
  custom events will only be surfaced in `'v2'`.
1456
1463
  include_names: Only include events from `Runnable` objects with matching names.
@@ -1460,6 +1467,7 @@ class Runnable(ABC, Generic[Input, Output]):
1460
1467
  exclude_types: Exclude events from `Runnable` objects with matching types.
1461
1468
  exclude_tags: Exclude events from `Runnable` objects with matching tags.
1462
1469
  **kwargs: Additional keyword arguments to pass to the `Runnable`.
1470
+
1463
1471
  These will be passed to `astream_log` as this implementation
1464
1472
  of `astream_events` is built on top of `astream_log`.
1465
1473
 
@@ -2369,7 +2377,7 @@ class Runnable(ABC, Generic[Input, Output]):
2369
2377
  # tee the input so we can iterate over it twice
2370
2378
  input_for_tracing, input_for_transform = atee(inputs, 2)
2371
2379
  # Start the input iterator to ensure the input Runnable starts before this one
2372
- final_input: Input | None = await py_anext(input_for_tracing, None)
2380
+ final_input: Input | None = await anext(input_for_tracing, None)
2373
2381
  final_input_supported = True
2374
2382
  final_output: Output | None = None
2375
2383
  final_output_supported = True
@@ -2409,7 +2417,7 @@ class Runnable(ABC, Generic[Input, Output]):
2409
2417
  iterator = iterator_
2410
2418
  try:
2411
2419
  while True:
2412
- chunk = await coro_with_context(py_anext(iterator), context)
2420
+ chunk = await coro_with_context(anext(iterator), context)
2413
2421
  yield chunk
2414
2422
  if final_output_supported:
2415
2423
  if final_output is None:
@@ -2476,82 +2484,82 @@ class Runnable(ABC, Generic[Input, Output]):
2476
2484
  Returns:
2477
2485
  A `BaseTool` instance.
2478
2486
 
2479
- Typed dict input:
2487
+ !!! example "`TypedDict` input"
2480
2488
 
2481
- ```python
2482
- from typing_extensions import TypedDict
2483
- from langchain_core.runnables import RunnableLambda
2489
+ ```python
2490
+ from typing_extensions import TypedDict
2491
+ from langchain_core.runnables import RunnableLambda
2484
2492
 
2485
2493
 
2486
- class Args(TypedDict):
2487
- a: int
2488
- b: list[int]
2494
+ class Args(TypedDict):
2495
+ a: int
2496
+ b: list[int]
2489
2497
 
2490
2498
 
2491
- def f(x: Args) -> str:
2492
- return str(x["a"] * max(x["b"]))
2499
+ def f(x: Args) -> str:
2500
+ return str(x["a"] * max(x["b"]))
2493
2501
 
2494
2502
 
2495
- runnable = RunnableLambda(f)
2496
- as_tool = runnable.as_tool()
2497
- as_tool.invoke({"a": 3, "b": [1, 2]})
2498
- ```
2503
+ runnable = RunnableLambda(f)
2504
+ as_tool = runnable.as_tool()
2505
+ as_tool.invoke({"a": 3, "b": [1, 2]})
2506
+ ```
2499
2507
 
2500
- `dict` input, specifying schema via `args_schema`:
2508
+ !!! example "`dict` input, specifying schema via `args_schema`"
2501
2509
 
2502
- ```python
2503
- from typing import Any
2504
- from pydantic import BaseModel, Field
2505
- from langchain_core.runnables import RunnableLambda
2510
+ ```python
2511
+ from typing import Any
2512
+ from pydantic import BaseModel, Field
2513
+ from langchain_core.runnables import RunnableLambda
2506
2514
 
2507
- def f(x: dict[str, Any]) -> str:
2508
- return str(x["a"] * max(x["b"]))
2515
+ def f(x: dict[str, Any]) -> str:
2516
+ return str(x["a"] * max(x["b"]))
2509
2517
 
2510
- class FSchema(BaseModel):
2511
- \"\"\"Apply a function to an integer and list of integers.\"\"\"
2518
+ class FSchema(BaseModel):
2519
+ \"\"\"Apply a function to an integer and list of integers.\"\"\"
2512
2520
 
2513
- a: int = Field(..., description="Integer")
2514
- b: list[int] = Field(..., description="List of ints")
2521
+ a: int = Field(..., description="Integer")
2522
+ b: list[int] = Field(..., description="List of ints")
2515
2523
 
2516
- runnable = RunnableLambda(f)
2517
- as_tool = runnable.as_tool(FSchema)
2518
- as_tool.invoke({"a": 3, "b": [1, 2]})
2519
- ```
2524
+ runnable = RunnableLambda(f)
2525
+ as_tool = runnable.as_tool(FSchema)
2526
+ as_tool.invoke({"a": 3, "b": [1, 2]})
2527
+ ```
2520
2528
 
2521
- `dict` input, specifying schema via `arg_types`:
2529
+ !!! example "`dict` input, specifying schema via `arg_types`"
2522
2530
 
2523
- ```python
2524
- from typing import Any
2525
- from langchain_core.runnables import RunnableLambda
2531
+ ```python
2532
+ from typing import Any
2533
+ from langchain_core.runnables import RunnableLambda
2526
2534
 
2527
2535
 
2528
- def f(x: dict[str, Any]) -> str:
2529
- return str(x["a"] * max(x["b"]))
2536
+ def f(x: dict[str, Any]) -> str:
2537
+ return str(x["a"] * max(x["b"]))
2530
2538
 
2531
2539
 
2532
- runnable = RunnableLambda(f)
2533
- as_tool = runnable.as_tool(arg_types={"a": int, "b": list[int]})
2534
- as_tool.invoke({"a": 3, "b": [1, 2]})
2535
- ```
2540
+ runnable = RunnableLambda(f)
2541
+ as_tool = runnable.as_tool(arg_types={"a": int, "b": list[int]})
2542
+ as_tool.invoke({"a": 3, "b": [1, 2]})
2543
+ ```
2536
2544
 
2537
- `str` input:
2545
+ !!! example "`str` input"
2538
2546
 
2539
- ```python
2540
- from langchain_core.runnables import RunnableLambda
2547
+ ```python
2548
+ from langchain_core.runnables import RunnableLambda
2541
2549
 
2542
2550
 
2543
- def f(x: str) -> str:
2544
- return x + "a"
2551
+ def f(x: str) -> str:
2552
+ return x + "a"
2545
2553
 
2546
2554
 
2547
- def g(x: str) -> str:
2548
- return x + "z"
2555
+ def g(x: str) -> str:
2556
+ return x + "z"
2549
2557
 
2550
2558
 
2551
- runnable = RunnableLambda(f) | g
2552
- as_tool = runnable.as_tool()
2553
- as_tool.invoke("b")
2554
- ```
2559
+ runnable = RunnableLambda(f) | g
2560
+ as_tool = runnable.as_tool()
2561
+ as_tool.invoke("b")
2562
+ ```
2555
2563
  """
2556
2564
  # Avoid circular import
2557
2565
  from langchain_core.tools import convert_runnable_to_tool # noqa: PLC0415
@@ -2603,29 +2611,33 @@ class RunnableSerializable(Serializable, Runnable[Input, Output]):
2603
2611
  Returns:
2604
2612
  A new `Runnable` with the fields configured.
2605
2613
 
2606
- ```python
2607
- from langchain_core.runnables import ConfigurableField
2608
- from langchain_openai import ChatOpenAI
2614
+ !!! example
2609
2615
 
2610
- model = ChatOpenAI(max_tokens=20).configurable_fields(
2611
- max_tokens=ConfigurableField(
2612
- id="output_token_number",
2613
- name="Max tokens in the output",
2614
- description="The maximum number of tokens in the output",
2616
+ ```python
2617
+ from langchain_core.runnables import ConfigurableField
2618
+ from langchain_openai import ChatOpenAI
2619
+
2620
+ model = ChatOpenAI(max_tokens=20).configurable_fields(
2621
+ max_tokens=ConfigurableField(
2622
+ id="output_token_number",
2623
+ name="Max tokens in the output",
2624
+ description="The maximum number of tokens in the output",
2625
+ )
2615
2626
  )
2616
- )
2617
2627
 
2618
- # max_tokens = 20
2619
- print("max_tokens_20: ", model.invoke("tell me something about chess").content)
2628
+ # max_tokens = 20
2629
+ print(
2630
+ "max_tokens_20: ", model.invoke("tell me something about chess").content
2631
+ )
2620
2632
 
2621
- # max_tokens = 200
2622
- print(
2623
- "max_tokens_200: ",
2624
- model.with_config(configurable={"output_token_number": 200})
2625
- .invoke("tell me something about chess")
2626
- .content,
2627
- )
2628
- ```
2633
+ # max_tokens = 200
2634
+ print(
2635
+ "max_tokens_200: ",
2636
+ model.with_config(configurable={"output_token_number": 200})
2637
+ .invoke("tell me something about chess")
2638
+ .content,
2639
+ )
2640
+ ```
2629
2641
  """
2630
2642
  # Import locally to prevent circular import
2631
2643
  from langchain_core.runnables.configurable import ( # noqa: PLC0415
@@ -2664,29 +2676,31 @@ class RunnableSerializable(Serializable, Runnable[Input, Output]):
2664
2676
  Returns:
2665
2677
  A new `Runnable` with the alternatives configured.
2666
2678
 
2667
- ```python
2668
- from langchain_anthropic import ChatAnthropic
2669
- from langchain_core.runnables.utils import ConfigurableField
2670
- from langchain_openai import ChatOpenAI
2679
+ !!! example
2671
2680
 
2672
- model = ChatAnthropic(
2673
- model_name="claude-sonnet-4-5-20250929"
2674
- ).configurable_alternatives(
2675
- ConfigurableField(id="llm"),
2676
- default_key="anthropic",
2677
- openai=ChatOpenAI(),
2678
- )
2681
+ ```python
2682
+ from langchain_anthropic import ChatAnthropic
2683
+ from langchain_core.runnables.utils import ConfigurableField
2684
+ from langchain_openai import ChatOpenAI
2685
+
2686
+ model = ChatAnthropic(
2687
+ model_name="claude-sonnet-4-5-20250929"
2688
+ ).configurable_alternatives(
2689
+ ConfigurableField(id="llm"),
2690
+ default_key="anthropic",
2691
+ openai=ChatOpenAI(),
2692
+ )
2679
2693
 
2680
- # uses the default model ChatAnthropic
2681
- print(model.invoke("which organization created you?").content)
2694
+ # uses the default model ChatAnthropic
2695
+ print(model.invoke("which organization created you?").content)
2682
2696
 
2683
- # uses ChatOpenAI
2684
- print(
2685
- model.with_config(configurable={"llm": "openai"})
2686
- .invoke("which organization created you?")
2687
- .content
2688
- )
2689
- ```
2697
+ # uses ChatOpenAI
2698
+ print(
2699
+ model.with_config(configurable={"llm": "openai"})
2700
+ .invoke("which organization created you?")
2701
+ .content
2702
+ )
2703
+ ```
2690
2704
  """
2691
2705
  # Import locally to prevent circular import
2692
2706
  from langchain_core.runnables.configurable import ( # noqa: PLC0415
@@ -4011,7 +4025,7 @@ class RunnableParallel(RunnableSerializable[Input, dict[str, Any]]):
4011
4025
 
4012
4026
  # Wrap in a coroutine to satisfy linter
4013
4027
  async def get_next_chunk(generator: AsyncIterator) -> Output | None:
4014
- return await py_anext(generator)
4028
+ return await anext(generator)
4015
4029
 
4016
4030
  # Start the first iteration of each generator
4017
4031
  tasks = {
@@ -303,7 +303,7 @@ class RunnableBranch(RunnableSerializable[Input, Output]):
303
303
 
304
304
  Args:
305
305
  input: The input to the `Runnable`.
306
- config: The configuration for the Runna`ble.
306
+ config: The configuration for the `Runnable`.
307
307
  **kwargs: Additional keyword arguments to pass to the `Runnable`.
308
308
 
309
309
  Yields:
@@ -47,54 +47,59 @@ class EmptyDict(TypedDict, total=False):
47
47
 
48
48
 
49
49
  class RunnableConfig(TypedDict, total=False):
50
- """Configuration for a Runnable."""
50
+ """Configuration for a `Runnable`.
51
51
 
52
- tags: list[str]
52
+ See the [reference docs](https://reference.langchain.com/python/langchain_core/runnables/#langchain_core.runnables.RunnableConfig)
53
+ for more details.
53
54
  """
54
- Tags for this call and any sub-calls (eg. a Chain calling an LLM).
55
+
56
+ tags: list[str]
57
+ """Tags for this call and any sub-calls (e.g. a Chain calling an LLM).
58
+
55
59
  You can use these to filter calls.
56
60
  """
57
61
 
58
62
  metadata: dict[str, Any]
59
- """
60
- Metadata for this call and any sub-calls (eg. a Chain calling an LLM).
63
+ """Metadata for this call and any sub-calls (e.g. a Chain calling an LLM).
64
+
61
65
  Keys should be strings, values should be JSON-serializable.
62
66
  """
63
67
 
64
68
  callbacks: Callbacks
65
- """
66
- Callbacks for this call and any sub-calls (eg. a Chain calling an LLM).
69
+ """Callbacks for this call and any sub-calls (e.g. a Chain calling an LLM).
70
+
67
71
  Tags are passed to all callbacks, metadata is passed to handle*Start callbacks.
68
72
  """
69
73
 
70
74
  run_name: str
71
- """
72
- Name for the tracer run for this call. Defaults to the name of the class.
73
- """
75
+ """Name for the tracer run for this call.
76
+
77
+ Defaults to the name of the class."""
74
78
 
75
79
  max_concurrency: int | None
76
- """
77
- Maximum number of parallel calls to make. If not provided, defaults to
78
- `ThreadPoolExecutor`'s default.
80
+ """Maximum number of parallel calls to make.
81
+
82
+ If not provided, defaults to `ThreadPoolExecutor`'s default.
79
83
  """
80
84
 
81
85
  recursion_limit: int
82
- """
83
- Maximum number of times a call can recurse. If not provided, defaults to `25`.
86
+ """Maximum number of times a call can recurse.
87
+
88
+ If not provided, defaults to `25`.
84
89
  """
85
90
 
86
91
  configurable: dict[str, Any]
87
- """
88
- Runtime values for attributes previously made configurable on this `Runnable`,
92
+ """Runtime values for attributes previously made configurable on this `Runnable`,
89
93
  or sub-Runnables, through `configurable_fields` or `configurable_alternatives`.
94
+
90
95
  Check `output_schema` for a description of the attributes that have been made
91
96
  configurable.
92
97
  """
93
98
 
94
99
  run_id: uuid.UUID | None
95
- """
96
- Unique identifier for the tracer run for this call. If not provided, a new UUID
97
- will be generated.
100
+ """Unique identifier for the tracer run for this call.
101
+
102
+ If not provided, a new UUID will be generated.
98
103
  """
99
104
 
100
105
 
@@ -28,7 +28,6 @@ from langchain_core.runnables.utils import (
28
28
  coro_with_context,
29
29
  get_unique_config_specs,
30
30
  )
31
- from langchain_core.utils.aiter import py_anext
32
31
 
33
32
  if TYPE_CHECKING:
34
33
  from langchain_core.callbacks.manager import AsyncCallbackManagerForChainRun
@@ -563,7 +562,7 @@ class RunnableWithFallbacks(RunnableSerializable[Input, Output]):
563
562
  child_config,
564
563
  **kwargs,
565
564
  )
566
- chunk = await coro_with_context(py_anext(stream), context)
565
+ chunk = await coro_with_context(anext(stream), context)
567
566
  except self.exceptions_to_handle as e:
568
567
  first_error = e if first_error is None else first_error
569
568
  last_error = e
@@ -33,7 +33,7 @@ from langchain_core.runnables.utils import (
33
33
  AddableDict,
34
34
  ConfigurableFieldSpec,
35
35
  )
36
- from langchain_core.utils.aiter import atee, py_anext
36
+ from langchain_core.utils.aiter import atee
37
37
  from langchain_core.utils.iter import safetee
38
38
  from langchain_core.utils.pydantic import create_model_v2
39
39
 
@@ -614,7 +614,7 @@ class RunnableAssign(RunnableSerializable[dict[str, Any], dict[str, Any]]):
614
614
  )
615
615
  # start map output stream
616
616
  first_map_chunk_task: asyncio.Task = asyncio.create_task(
617
- py_anext(map_output, None), # type: ignore[arg-type]
617
+ anext(map_output, None),
618
618
  )
619
619
  # consume passthrough stream
620
620
  async for chunk in for_passthrough:
@@ -496,6 +496,24 @@ class ChildTool(BaseTool):
496
496
  two-tuple corresponding to the `(content, artifact)` of a `ToolMessage`.
497
497
  """
498
498
 
499
+ extras: dict[str, Any] | None = None
500
+ """Optional provider-specific extra fields for the tool.
501
+
502
+ This is used to pass provider-specific configuration that doesn't fit into
503
+ standard tool fields.
504
+
505
+ Example:
506
+ Anthropic-specific fields like [`cache_control`](https://docs.langchain.com/oss/python/integrations/chat/anthropic#prompt-caching),
507
+ [`defer_loading`](https://docs.langchain.com/oss/python/integrations/chat/anthropic#tool-search),
508
+ or `input_examples`.
509
+
510
+ ```python
511
+ @tool(extras={"defer_loading": True, "cache_control": {"type": "ephemeral"}})
512
+ def my_tool(x: str) -> str:
513
+ return x
514
+ ```
515
+ """
516
+
499
517
  def __init__(self, **kwargs: Any) -> None:
500
518
  """Initialize the tool.
501
519
 
@@ -696,7 +714,9 @@ class ChildTool(BaseTool):
696
714
  k: getattr(result, k) for k in result_dict if k in tool_input
697
715
  }
698
716
  for k in self._injected_args_keys:
699
- if k == "tool_call_id":
717
+ if k in tool_input:
718
+ validated_input[k] = tool_input[k]
719
+ elif k == "tool_call_id":
700
720
  if tool_call_id is None:
701
721
  msg = (
702
722
  "When tool includes an InjectedToolCallId "
@@ -707,9 +727,6 @@ class ChildTool(BaseTool):
707
727
  )
708
728
  raise ValueError(msg)
709
729
  validated_input[k] = tool_call_id
710
- if k in tool_input:
711
- injected_val = tool_input[k]
712
- validated_input[k] = injected_val
713
730
  return validated_input
714
731
  return tool_input
715
732
 
@@ -878,6 +895,7 @@ class ChildTool(BaseTool):
878
895
  name=run_name,
879
896
  run_id=run_id,
880
897
  inputs=filtered_tool_input,
898
+ tool_call_id=tool_call_id,
881
899
  **kwargs,
882
900
  )
883
901
 
@@ -1005,6 +1023,7 @@ class ChildTool(BaseTool):
1005
1023
  name=run_name,
1006
1024
  run_id=run_id,
1007
1025
  inputs=filtered_tool_input,
1026
+ tool_call_id=tool_call_id,
1008
1027
  **kwargs,
1009
1028
  )
1010
1029
  content = None