langchain-core 1.0.0rc3__py3-none-any.whl → 1.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain-core might be problematic. Click here for more details.

Files changed (37) hide show
  1. langchain_core/caches.py +3 -1
  2. langchain_core/callbacks/base.py +0 -4
  3. langchain_core/callbacks/manager.py +0 -11
  4. langchain_core/document_loaders/langsmith.py +0 -3
  5. langchain_core/documents/base.py +0 -2
  6. langchain_core/documents/transformers.py +4 -4
  7. langchain_core/indexing/base.py +4 -6
  8. langchain_core/indexing/in_memory.py +0 -2
  9. langchain_core/language_models/base.py +5 -6
  10. langchain_core/language_models/chat_models.py +84 -78
  11. langchain_core/language_models/llms.py +16 -12
  12. langchain_core/load/dump.py +5 -7
  13. langchain_core/messages/block_translators/google_genai.py +1 -1
  14. langchain_core/messages/utils.py +4 -4
  15. langchain_core/prompts/string.py +5 -2
  16. langchain_core/rate_limiters.py +1 -3
  17. langchain_core/runnables/base.py +11 -15
  18. langchain_core/runnables/branch.py +9 -9
  19. langchain_core/runnables/config.py +2 -4
  20. langchain_core/runnables/fallbacks.py +1 -1
  21. langchain_core/runnables/retry.py +1 -1
  22. langchain_core/runnables/schema.py +2 -5
  23. langchain_core/runnables/utils.py +3 -3
  24. langchain_core/stores.py +4 -6
  25. langchain_core/tools/base.py +2 -2
  26. langchain_core/tools/convert.py +0 -2
  27. langchain_core/tracers/event_stream.py +4 -1
  28. langchain_core/tracers/log_stream.py +4 -1
  29. langchain_core/utils/function_calling.py +8 -0
  30. langchain_core/utils/json_schema.py +1 -1
  31. langchain_core/vectorstores/base.py +59 -63
  32. langchain_core/vectorstores/in_memory.py +2 -2
  33. langchain_core/vectorstores/utils.py +1 -1
  34. langchain_core/version.py +1 -1
  35. {langchain_core-1.0.0rc3.dist-info → langchain_core-1.0.1.dist-info}/METADATA +8 -7
  36. {langchain_core-1.0.0rc3.dist-info → langchain_core-1.0.1.dist-info}/RECORD +37 -37
  37. {langchain_core-1.0.0rc3.dist-info → langchain_core-1.0.1.dist-info}/WHEEL +0 -0
langchain_core/caches.py CHANGED
@@ -1,4 +1,6 @@
1
- """`caches` provides an optional caching layer for language models.
1
+ """Optional caching layer for language models.
2
+
3
+ Distinct from provider-based [prompt caching](https://docs.langchain.com/oss/python/langchain/models#prompt-caching).
2
4
 
3
5
  !!! warning
4
6
  This is a beta feature! Please be wary of deploying experimental code to production
@@ -420,8 +420,6 @@ class RunManagerMixin:
420
420
  (includes inherited tags).
421
421
  metadata: The metadata associated with the custom event
422
422
  (includes inherited metadata).
423
-
424
- !!! version-added "Added in version 0.2.15"
425
423
  """
426
424
 
427
425
 
@@ -882,8 +880,6 @@ class AsyncCallbackHandler(BaseCallbackHandler):
882
880
  (includes inherited tags).
883
881
  metadata: The metadata associated with the custom event
884
882
  (includes inherited metadata).
885
-
886
- !!! version-added "Added in version 0.2.15"
887
883
  """
888
884
 
889
885
 
@@ -1566,9 +1566,6 @@ class CallbackManager(BaseCallbackManager):
1566
1566
 
1567
1567
  Raises:
1568
1568
  ValueError: If additional keyword arguments are passed.
1569
-
1570
- !!! version-added "Added in version 0.2.14"
1571
-
1572
1569
  """
1573
1570
  if not self.handlers:
1574
1571
  return
@@ -2042,8 +2039,6 @@ class AsyncCallbackManager(BaseCallbackManager):
2042
2039
 
2043
2040
  Raises:
2044
2041
  ValueError: If additional keyword arguments are passed.
2045
-
2046
- !!! version-added "Added in version 0.2.14"
2047
2042
  """
2048
2043
  if not self.handlers:
2049
2044
  return
@@ -2555,9 +2550,6 @@ async def adispatch_custom_event(
2555
2550
  This is due to a limitation in asyncio for python <= 3.10 that prevents
2556
2551
  LangChain from automatically propagating the config object on the user's
2557
2552
  behalf.
2558
-
2559
- !!! version-added "Added in version 0.2.15"
2560
-
2561
2553
  """
2562
2554
  # Import locally to prevent circular imports.
2563
2555
  from langchain_core.runnables.config import ( # noqa: PLC0415
@@ -2630,9 +2622,6 @@ def dispatch_custom_event(
2630
2622
  foo_ = RunnableLambda(foo)
2631
2623
  foo_.invoke({"a": "1"}, {"callbacks": [CustomCallbackManager()]})
2632
2624
  ```
2633
-
2634
- !!! version-added "Added in version 0.2.15"
2635
-
2636
2625
  """
2637
2626
  # Import locally to prevent circular imports.
2638
2627
  from langchain_core.runnables.config import ( # noqa: PLC0415
@@ -34,9 +34,6 @@ class LangSmithLoader(BaseLoader):
34
34
  ```python
35
35
  # -> [Document("...", metadata={"inputs": {...}, "outputs": {...}, ...}), ...]
36
36
  ```
37
-
38
- !!! version-added "Added in version 0.2.34"
39
-
40
37
  """
41
38
 
42
39
  def __init__(
@@ -38,8 +38,6 @@ class BaseMedia(Serializable):
38
38
 
39
39
  Ideally this should be unique across the document collection and formatted
40
40
  as a UUID, but this will not be enforced.
41
-
42
- !!! version-added "Added in version 0.2.11"
43
41
  """
44
42
 
45
43
  metadata: dict = Field(default_factory=dict)
@@ -57,10 +57,10 @@ class BaseDocumentTransformer(ABC):
57
57
  """Transform a list of documents.
58
58
 
59
59
  Args:
60
- documents: A sequence of Documents to be transformed.
60
+ documents: A sequence of `Document` objects to be transformed.
61
61
 
62
62
  Returns:
63
- A sequence of transformed Documents.
63
+ A sequence of transformed `Document` objects.
64
64
  """
65
65
 
66
66
  async def atransform_documents(
@@ -69,10 +69,10 @@ class BaseDocumentTransformer(ABC):
69
69
  """Asynchronously transform a list of documents.
70
70
 
71
71
  Args:
72
- documents: A sequence of Documents to be transformed.
72
+ documents: A sequence of `Document` objects to be transformed.
73
73
 
74
74
  Returns:
75
- A sequence of transformed Documents.
75
+ A sequence of transformed `Document` objects.
76
76
  """
77
77
  return await run_in_executor(
78
78
  None, self.transform_documents, documents, **kwargs
@@ -508,8 +508,6 @@ class DocumentIndex(BaseRetriever):
508
508
  1. Storing document in the index.
509
509
  2. Fetching document by ID.
510
510
  3. Searching for document using a query.
511
-
512
- !!! version-added "Added in version 0.2.29"
513
511
  """
514
512
 
515
513
  @abc.abstractmethod
@@ -522,10 +520,10 @@ class DocumentIndex(BaseRetriever):
522
520
 
523
521
  When an ID is specified and the content already exists in the vectorstore,
524
522
  the upsert method should update the content with the new data. If the content
525
- does not exist, the upsert method should add the item to the vectorstore.
523
+ does not exist, the upsert method should add the item to the `VectorStore`.
526
524
 
527
525
  Args:
528
- items: Sequence of documents to add to the vectorstore.
526
+ items: Sequence of documents to add to the `VectorStore`.
529
527
  **kwargs: Additional keyword arguments.
530
528
 
531
529
  Returns:
@@ -545,10 +543,10 @@ class DocumentIndex(BaseRetriever):
545
543
 
546
544
  When an ID is specified and the item already exists in the vectorstore,
547
545
  the upsert method should update the item with the new data. If the item
548
- does not exist, the upsert method should add the item to the vectorstore.
546
+ does not exist, the upsert method should add the item to the `VectorStore`.
549
547
 
550
548
  Args:
551
- items: Sequence of documents to add to the vectorstore.
549
+ items: Sequence of documents to add to the `VectorStore`.
552
550
  **kwargs: Additional keyword arguments.
553
551
 
554
552
  Returns:
@@ -23,8 +23,6 @@ class InMemoryDocumentIndex(DocumentIndex):
23
23
 
24
24
  It provides a simple search API that returns documents by the number of
25
25
  counts the given query appears in the document.
26
-
27
- !!! version-added "Added in version 0.2.29"
28
26
  """
29
27
 
30
28
  store: dict[str, Document] = Field(default_factory=dict)
@@ -200,14 +200,14 @@ class BaseLanguageModel(
200
200
  pure text generation models and `BaseMessage` objects for chat models).
201
201
  stop: Stop words to use when generating. Model output is cut off at the
202
202
  first occurrence of any of these substrings.
203
- callbacks: Callbacks to pass through. Used for executing additional
203
+ callbacks: `Callbacks` to pass through. Used for executing additional
204
204
  functionality, such as logging or streaming, throughout generation.
205
205
  **kwargs: Arbitrary additional keyword arguments. These are usually passed
206
206
  to the model provider API call.
207
207
 
208
208
  Returns:
209
209
  An `LLMResult`, which contains a list of candidate `Generation` objects for
210
- each input prompt and additional model provider-specific output.
210
+ each input prompt and additional model provider-specific output.
211
211
 
212
212
  """
213
213
 
@@ -237,14 +237,14 @@ class BaseLanguageModel(
237
237
  pure text generation models and `BaseMessage` objects for chat models).
238
238
  stop: Stop words to use when generating. Model output is cut off at the
239
239
  first occurrence of any of these substrings.
240
- callbacks: Callbacks to pass through. Used for executing additional
240
+ callbacks: `Callbacks` to pass through. Used for executing additional
241
241
  functionality, such as logging or streaming, throughout generation.
242
242
  **kwargs: Arbitrary additional keyword arguments. These are usually passed
243
243
  to the model provider API call.
244
244
 
245
245
  Returns:
246
246
  An `LLMResult`, which contains a list of candidate `Generation` objects for
247
- each input prompt and additional model provider-specific output.
247
+ each input prompt and additional model provider-specific output.
248
248
 
249
249
  """
250
250
 
@@ -269,8 +269,7 @@ class BaseLanguageModel(
269
269
 
270
270
  Returns:
271
271
  A list of ids corresponding to the tokens in the text, in order they occur
272
- in the text.
273
-
272
+ in the text.
274
273
  """
275
274
  if self.custom_get_token_ids is not None:
276
275
  return self.custom_get_token_ids(text)
@@ -838,13 +838,13 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
838
838
  1. Take advantage of batched calls,
839
839
  2. Need more output from the model than just the top generated value,
840
840
  3. Are building chains that are agnostic to the underlying language model
841
- type (e.g., pure text completion models vs chat models).
841
+ type (e.g., pure text completion models vs chat models).
842
842
 
843
843
  Args:
844
844
  messages: List of list of messages.
845
845
  stop: Stop words to use when generating. Model output is cut off at the
846
846
  first occurrence of any of these substrings.
847
- callbacks: Callbacks to pass through. Used for executing additional
847
+ callbacks: `Callbacks` to pass through. Used for executing additional
848
848
  functionality, such as logging or streaming, throughout generation.
849
849
  tags: The tags to apply.
850
850
  metadata: The metadata to apply.
@@ -854,8 +854,8 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
854
854
  to the model provider API call.
855
855
 
856
856
  Returns:
857
- An LLMResult, which contains a list of candidate Generations for each input
858
- prompt and additional model provider-specific output.
857
+ An `LLMResult`, which contains a list of candidate `Generations` for each
858
+ input prompt and additional model provider-specific output.
859
859
 
860
860
  """
861
861
  ls_structured_output_format = kwargs.pop(
@@ -956,13 +956,13 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
956
956
  1. Take advantage of batched calls,
957
957
  2. Need more output from the model than just the top generated value,
958
958
  3. Are building chains that are agnostic to the underlying language model
959
- type (e.g., pure text completion models vs chat models).
959
+ type (e.g., pure text completion models vs chat models).
960
960
 
961
961
  Args:
962
962
  messages: List of list of messages.
963
963
  stop: Stop words to use when generating. Model output is cut off at the
964
964
  first occurrence of any of these substrings.
965
- callbacks: Callbacks to pass through. Used for executing additional
965
+ callbacks: `Callbacks` to pass through. Used for executing additional
966
966
  functionality, such as logging or streaming, throughout generation.
967
967
  tags: The tags to apply.
968
968
  metadata: The metadata to apply.
@@ -972,8 +972,8 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
972
972
  to the model provider API call.
973
973
 
974
974
  Returns:
975
- An LLMResult, which contains a list of candidate Generations for each input
976
- prompt and additional model provider-specific output.
975
+ An `LLMResult`, which contains a list of candidate `Generations` for each
976
+ input prompt and additional model provider-specific output.
977
977
 
978
978
  """
979
979
  ls_structured_output_format = kwargs.pop(
@@ -1510,17 +1510,21 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
1510
1510
  If `schema` is a Pydantic class then the model output will be a
1511
1511
  Pydantic instance of that class, and the model-generated fields will be
1512
1512
  validated by the Pydantic class. Otherwise the model output will be a
1513
- dict and will not be validated. See `langchain_core.utils.function_calling.convert_to_openai_tool`
1514
- for more on how to properly specify types and descriptions of
1515
- schema fields when specifying a Pydantic or `TypedDict` class.
1513
+ dict and will not be validated.
1514
+
1515
+ See `langchain_core.utils.function_calling.convert_to_openai_tool` for
1516
+ more on how to properly specify types and descriptions of schema fields
1517
+ when specifying a Pydantic or `TypedDict` class.
1516
1518
 
1517
1519
  include_raw:
1518
1520
  If `False` then only the parsed structured output is returned. If
1519
1521
  an error occurs during model output parsing it will be raised. If `True`
1520
1522
  then both the raw model response (a `BaseMessage`) and the parsed model
1521
1523
  response will be returned. If an error occurs during output parsing it
1522
- will be caught and returned as well. The final output is always a dict
1523
- with keys `'raw'`, `'parsed'`, and `'parsing_error'`.
1524
+ will be caught and returned as well.
1525
+
1526
+ The final output is always a `dict` with keys `'raw'`, `'parsed'`, and
1527
+ `'parsing_error'`.
1524
1528
 
1525
1529
  Raises:
1526
1530
  ValueError: If there are any unsupported `kwargs`.
@@ -1528,97 +1532,99 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
1528
1532
  `with_structured_output()`.
1529
1533
 
1530
1534
  Returns:
1531
- A Runnable that takes same inputs as a `langchain_core.language_models.chat.BaseChatModel`.
1535
+ A `Runnable` that takes same inputs as a
1536
+ `langchain_core.language_models.chat.BaseChatModel`. If `include_raw` is
1537
+ `False` and `schema` is a Pydantic class, `Runnable` outputs an instance
1538
+ of `schema` (i.e., a Pydantic object). Otherwise, if `include_raw` is
1539
+ `False` then `Runnable` outputs a `dict`.
1532
1540
 
1533
- If `include_raw` is False and `schema` is a Pydantic class, Runnable outputs
1534
- an instance of `schema` (i.e., a Pydantic object).
1541
+ If `include_raw` is `True`, then `Runnable` outputs a `dict` with keys:
1535
1542
 
1536
- Otherwise, if `include_raw` is False then Runnable outputs a dict.
1543
+ - `'raw'`: `BaseMessage`
1544
+ - `'parsed'`: `None` if there was a parsing error, otherwise the type
1545
+ depends on the `schema` as described above.
1546
+ - `'parsing_error'`: `BaseException | None`
1537
1547
 
1538
- If `include_raw` is True, then Runnable outputs a dict with keys:
1548
+ Example: Pydantic schema (`include_raw=False`):
1539
1549
 
1540
- - `'raw'`: BaseMessage
1541
- - `'parsed'`: None if there was a parsing error, otherwise the type
1542
- depends on the `schema` as described above.
1543
- - `'parsing_error'`: BaseException | None
1550
+ ```python
1551
+ from pydantic import BaseModel
1544
1552
 
1545
- Example: Pydantic schema (include_raw=False):
1546
- ```python
1547
- from pydantic import BaseModel
1548
1553
 
1554
+ class AnswerWithJustification(BaseModel):
1555
+ '''An answer to the user question along with justification for the answer.'''
1549
1556
 
1550
- class AnswerWithJustification(BaseModel):
1551
- '''An answer to the user question along with justification for the answer.'''
1557
+ answer: str
1558
+ justification: str
1552
1559
 
1553
- answer: str
1554
- justification: str
1555
1560
 
1561
+ model = ChatModel(model="model-name", temperature=0)
1562
+ structured_model = model.with_structured_output(AnswerWithJustification)
1556
1563
 
1557
- model = ChatModel(model="model-name", temperature=0)
1558
- structured_model = model.with_structured_output(AnswerWithJustification)
1564
+ structured_model.invoke(
1565
+ "What weighs more a pound of bricks or a pound of feathers"
1566
+ )
1559
1567
 
1560
- structured_model.invoke(
1561
- "What weighs more a pound of bricks or a pound of feathers"
1562
- )
1568
+ # -> AnswerWithJustification(
1569
+ # answer='They weigh the same',
1570
+ # justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'
1571
+ # )
1572
+ ```
1563
1573
 
1564
- # -> AnswerWithJustification(
1565
- # answer='They weigh the same',
1566
- # justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'
1567
- # )
1568
- ```
1574
+ Example: Pydantic schema (`include_raw=True`):
1569
1575
 
1570
- Example: Pydantic schema (include_raw=True):
1571
- ```python
1572
- from pydantic import BaseModel
1576
+ ```python
1577
+ from pydantic import BaseModel
1573
1578
 
1574
1579
 
1575
- class AnswerWithJustification(BaseModel):
1576
- '''An answer to the user question along with justification for the answer.'''
1580
+ class AnswerWithJustification(BaseModel):
1581
+ '''An answer to the user question along with justification for the answer.'''
1577
1582
 
1578
- answer: str
1579
- justification: str
1583
+ answer: str
1584
+ justification: str
1580
1585
 
1581
1586
 
1582
- model = ChatModel(model="model-name", temperature=0)
1583
- structured_model = model.with_structured_output(
1584
- AnswerWithJustification, include_raw=True
1585
- )
1587
+ model = ChatModel(model="model-name", temperature=0)
1588
+ structured_model = model.with_structured_output(
1589
+ AnswerWithJustification, include_raw=True
1590
+ )
1586
1591
 
1587
- structured_model.invoke(
1588
- "What weighs more a pound of bricks or a pound of feathers"
1589
- )
1590
- # -> {
1591
- # 'raw': AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_Ao02pnFYXD6GN1yzc0uXPsvF', 'function': {'arguments': '{"answer":"They weigh the same.","justification":"Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ."}', 'name': 'AnswerWithJustification'}, 'type': 'function'}]}),
1592
- # 'parsed': AnswerWithJustification(answer='They weigh the same.', justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'),
1593
- # 'parsing_error': None
1594
- # }
1595
- ```
1592
+ structured_model.invoke(
1593
+ "What weighs more a pound of bricks or a pound of feathers"
1594
+ )
1595
+ # -> {
1596
+ # 'raw': AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_Ao02pnFYXD6GN1yzc0uXPsvF', 'function': {'arguments': '{"answer":"They weigh the same.","justification":"Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ."}', 'name': 'AnswerWithJustification'}, 'type': 'function'}]}),
1597
+ # 'parsed': AnswerWithJustification(answer='They weigh the same.', justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'),
1598
+ # 'parsing_error': None
1599
+ # }
1600
+ ```
1596
1601
 
1597
- Example: Dict schema (include_raw=False):
1598
- ```python
1599
- from pydantic import BaseModel
1600
- from langchain_core.utils.function_calling import convert_to_openai_tool
1602
+ Example: `dict` schema (`include_raw=False`):
1601
1603
 
1604
+ ```python
1605
+ from pydantic import BaseModel
1606
+ from langchain_core.utils.function_calling import convert_to_openai_tool
1602
1607
 
1603
- class AnswerWithJustification(BaseModel):
1604
- '''An answer to the user question along with justification for the answer.'''
1605
1608
 
1606
- answer: str
1607
- justification: str
1609
+ class AnswerWithJustification(BaseModel):
1610
+ '''An answer to the user question along with justification for the answer.'''
1608
1611
 
1612
+ answer: str
1613
+ justification: str
1609
1614
 
1610
- dict_schema = convert_to_openai_tool(AnswerWithJustification)
1611
- model = ChatModel(model="model-name", temperature=0)
1612
- structured_model = model.with_structured_output(dict_schema)
1613
1615
 
1614
- structured_model.invoke(
1615
- "What weighs more a pound of bricks or a pound of feathers"
1616
- )
1617
- # -> {
1618
- # 'answer': 'They weigh the same',
1619
- # 'justification': 'Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume and density of the two substances differ.'
1620
- # }
1621
- ```
1616
+ dict_schema = convert_to_openai_tool(AnswerWithJustification)
1617
+ model = ChatModel(model="model-name", temperature=0)
1618
+ structured_model = model.with_structured_output(dict_schema)
1619
+
1620
+ structured_model.invoke(
1621
+ "What weighs more a pound of bricks or a pound of feathers"
1622
+ )
1623
+ # -> {
1624
+ # 'answer': 'They weigh the same',
1625
+ # 'justification': 'Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume and density of the two substances differ.'
1626
+ # }
1627
+ ```
1622
1628
 
1623
1629
  !!! warning "Behavior changed in 0.2.26"
1624
1630
  Added support for TypedDict class.
@@ -91,13 +91,17 @@ def create_base_retry_decorator(
91
91
  if isinstance(run_manager, AsyncCallbackManagerForLLMRun):
92
92
  coro = run_manager.on_retry(retry_state)
93
93
  try:
94
- loop = asyncio.get_event_loop()
95
- if loop.is_running():
96
- # TODO: Fix RUF006 - this task should have a reference
97
- # and be awaited somewhere
98
- loop.create_task(coro) # noqa: RUF006
99
- else:
94
+ try:
95
+ loop = asyncio.get_event_loop()
96
+ except RuntimeError:
100
97
  asyncio.run(coro)
98
+ else:
99
+ if loop.is_running():
100
+ # TODO: Fix RUF006 - this task should have a reference
101
+ # and be awaited somewhere
102
+ loop.create_task(coro) # noqa: RUF006
103
+ else:
104
+ asyncio.run(coro)
101
105
  except Exception as e:
102
106
  _log_error_once(f"Error in on_retry: {e}")
103
107
  else:
@@ -841,7 +845,7 @@ class BaseLLM(BaseLanguageModel[str], ABC):
841
845
  prompts: List of string prompts.
842
846
  stop: Stop words to use when generating. Model output is cut off at the
843
847
  first occurrence of any of these substrings.
844
- callbacks: Callbacks to pass through. Used for executing additional
848
+ callbacks: `Callbacks` to pass through. Used for executing additional
845
849
  functionality, such as logging or streaming, throughout generation.
846
850
  tags: List of tags to associate with each prompt. If provided, the length
847
851
  of the list must match the length of the prompts list.
@@ -861,8 +865,8 @@ class BaseLLM(BaseLanguageModel[str], ABC):
861
865
  `run_name` (if provided) does not match the length of prompts.
862
866
 
863
867
  Returns:
864
- An LLMResult, which contains a list of candidate Generations for each input
865
- prompt and additional model provider-specific output.
868
+ An `LLMResult`, which contains a list of candidate `Generations` for each
869
+ input prompt and additional model provider-specific output.
866
870
  """
867
871
  if not isinstance(prompts, list):
868
872
  msg = (
@@ -1111,7 +1115,7 @@ class BaseLLM(BaseLanguageModel[str], ABC):
1111
1115
  prompts: List of string prompts.
1112
1116
  stop: Stop words to use when generating. Model output is cut off at the
1113
1117
  first occurrence of any of these substrings.
1114
- callbacks: Callbacks to pass through. Used for executing additional
1118
+ callbacks: `Callbacks` to pass through. Used for executing additional
1115
1119
  functionality, such as logging or streaming, throughout generation.
1116
1120
  tags: List of tags to associate with each prompt. If provided, the length
1117
1121
  of the list must match the length of the prompts list.
@@ -1130,8 +1134,8 @@ class BaseLLM(BaseLanguageModel[str], ABC):
1130
1134
  `run_name` (if provided) does not match the length of prompts.
1131
1135
 
1132
1136
  Returns:
1133
- An LLMResult, which contains a list of candidate Generations for each input
1134
- prompt and additional model provider-specific output.
1137
+ An `LLMResult`, which contains a list of candidate `Generations` for each
1138
+ input prompt and additional model provider-specific output.
1135
1139
  """
1136
1140
  if isinstance(metadata, list):
1137
1141
  metadata = [
@@ -38,7 +38,7 @@ def _dump_pydantic_models(obj: Any) -> Any:
38
38
 
39
39
 
40
40
  def dumps(obj: Any, *, pretty: bool = False, **kwargs: Any) -> str:
41
- """Return a json string representation of an object.
41
+ """Return a JSON string representation of an object.
42
42
 
43
43
  Args:
44
44
  obj: The object to dump.
@@ -47,7 +47,7 @@ def dumps(obj: Any, *, pretty: bool = False, **kwargs: Any) -> str:
47
47
  **kwargs: Additional arguments to pass to `json.dumps`
48
48
 
49
49
  Returns:
50
- A json string representation of the object.
50
+ A JSON string representation of the object.
51
51
 
52
52
  Raises:
53
53
  ValueError: If `default` is passed as a kwarg.
@@ -71,14 +71,12 @@ def dumps(obj: Any, *, pretty: bool = False, **kwargs: Any) -> str:
71
71
  def dumpd(obj: Any) -> Any:
72
72
  """Return a dict representation of an object.
73
73
 
74
- !!! note
75
- Unfortunately this function is not as efficient as it could be because it first
76
- dumps the object to a json string and then loads it back into a dictionary.
77
-
78
74
  Args:
79
75
  obj: The object to dump.
80
76
 
81
77
  Returns:
82
- dictionary that can be serialized to json using json.dumps
78
+ Dictionary that can be serialized to json using `json.dumps`.
83
79
  """
80
+ # Unfortunately this function is not as efficient as it could be because it first
81
+ # dumps the object to a json string and then loads it back into a dictionary.
84
82
  return json.loads(dumps(obj))
@@ -282,7 +282,7 @@ def _convert_to_v1_from_genai(message: AIMessage) -> list[types.ContentBlock]:
282
282
  standard content blocks for returning.
283
283
 
284
284
  Args:
285
- message: The AIMessage or AIMessageChunk to convert.
285
+ message: The `AIMessage` or `AIMessageChunk` to convert.
286
286
 
287
287
  Returns:
288
288
  List of standard content blocks derived from the message content.
@@ -439,8 +439,8 @@ def filter_messages(
439
439
  exclude_ids: Message IDs to exclude.
440
440
  exclude_tool_calls: Tool call IDs to exclude.
441
441
  Can be one of the following:
442
- - `True`: all `AIMessage`s with tool calls and all
443
- `ToolMessage` objects will be excluded.
442
+ - `True`: All `AIMessage` objects with tool calls and all `ToolMessage`
443
+ objects will be excluded.
444
444
  - a sequence of tool call IDs to exclude:
445
445
  - `ToolMessage` objects with the corresponding tool call ID will be
446
446
  excluded.
@@ -1678,11 +1678,11 @@ def count_tokens_approximately(
1678
1678
  messages: List of messages to count tokens for.
1679
1679
  chars_per_token: Number of characters per token to use for the approximation.
1680
1680
  One token corresponds to ~4 chars for common English text.
1681
- You can also specify float values for more fine-grained control.
1681
+ You can also specify `float` values for more fine-grained control.
1682
1682
  [See more here](https://platform.openai.com/tokenizer).
1683
1683
  extra_tokens_per_message: Number of extra tokens to add per message, e.g.
1684
1684
  special tokens, including beginning/end of message.
1685
- You can also specify float values for more fine-grained control.
1685
+ You can also specify `float` values for more fine-grained control.
1686
1686
  [See more here](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb).
1687
1687
  count_name: Whether to include message names in the count.
1688
1688
  Enabled by default.
@@ -122,13 +122,16 @@ def mustache_formatter(template: str, /, **kwargs: Any) -> str:
122
122
  def mustache_template_vars(
123
123
  template: str,
124
124
  ) -> set[str]:
125
- """Get the variables from a mustache template.
125
+ """Get the top-level variables from a mustache template.
126
+
127
+ For nested variables like `{{person.name}}`, only the top-level
128
+ key (`person`) is returned.
126
129
 
127
130
  Args:
128
131
  template: The template string.
129
132
 
130
133
  Returns:
131
- The variables from the template.
134
+ The top-level variables from the template.
132
135
  """
133
136
  variables: set[str] = set()
134
137
  section_depth = 0
@@ -105,9 +105,7 @@ class InMemoryRateLimiter(BaseRateLimiter):
105
105
 
106
106
  from langchain_anthropic import ChatAnthropic
107
107
 
108
- model = ChatAnthropic(
109
- model_name="claude-sonnet-4-5-20250929", rate_limiter=rate_limiter
110
- )
108
+ model = ChatAnthropic(model_name="claude-sonnet-4-5", rate_limiter=rate_limiter)
111
109
 
112
110
  for _ in range(5):
113
111
  tic = time.time()