langchain-core 1.0.0a8__py3-none-any.whl → 1.0.0rc1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain-core might be problematic. Click here for more details.

Files changed (135) hide show
  1. langchain_core/__init__.py +1 -1
  2. langchain_core/_api/__init__.py +0 -1
  3. langchain_core/_api/beta_decorator.py +17 -20
  4. langchain_core/_api/deprecation.py +30 -35
  5. langchain_core/_import_utils.py +1 -1
  6. langchain_core/agents.py +7 -6
  7. langchain_core/caches.py +4 -10
  8. langchain_core/callbacks/__init__.py +1 -8
  9. langchain_core/callbacks/base.py +232 -243
  10. langchain_core/callbacks/file.py +33 -33
  11. langchain_core/callbacks/manager.py +353 -416
  12. langchain_core/callbacks/stdout.py +21 -22
  13. langchain_core/callbacks/streaming_stdout.py +32 -32
  14. langchain_core/callbacks/usage.py +54 -51
  15. langchain_core/chat_history.py +42 -57
  16. langchain_core/document_loaders/langsmith.py +21 -21
  17. langchain_core/documents/__init__.py +0 -1
  18. langchain_core/documents/base.py +37 -40
  19. langchain_core/documents/transformers.py +28 -29
  20. langchain_core/embeddings/fake.py +46 -52
  21. langchain_core/exceptions.py +5 -5
  22. langchain_core/indexing/api.py +11 -11
  23. langchain_core/indexing/base.py +24 -24
  24. langchain_core/language_models/__init__.py +0 -2
  25. langchain_core/language_models/_utils.py +51 -53
  26. langchain_core/language_models/base.py +23 -24
  27. langchain_core/language_models/chat_models.py +121 -144
  28. langchain_core/language_models/fake_chat_models.py +5 -5
  29. langchain_core/language_models/llms.py +10 -12
  30. langchain_core/load/dump.py +1 -1
  31. langchain_core/load/load.py +16 -16
  32. langchain_core/load/serializable.py +35 -34
  33. langchain_core/messages/__init__.py +1 -16
  34. langchain_core/messages/ai.py +105 -104
  35. langchain_core/messages/base.py +26 -26
  36. langchain_core/messages/block_translators/__init__.py +17 -17
  37. langchain_core/messages/block_translators/anthropic.py +2 -2
  38. langchain_core/messages/block_translators/bedrock_converse.py +2 -2
  39. langchain_core/messages/block_translators/google_genai.py +2 -2
  40. langchain_core/messages/block_translators/groq.py +117 -21
  41. langchain_core/messages/block_translators/langchain_v0.py +2 -2
  42. langchain_core/messages/block_translators/openai.py +4 -4
  43. langchain_core/messages/chat.py +1 -1
  44. langchain_core/messages/content.py +189 -193
  45. langchain_core/messages/function.py +5 -5
  46. langchain_core/messages/human.py +15 -17
  47. langchain_core/messages/modifier.py +1 -1
  48. langchain_core/messages/system.py +12 -14
  49. langchain_core/messages/tool.py +45 -49
  50. langchain_core/messages/utils.py +384 -396
  51. langchain_core/output_parsers/__init__.py +1 -14
  52. langchain_core/output_parsers/base.py +22 -23
  53. langchain_core/output_parsers/json.py +3 -3
  54. langchain_core/output_parsers/list.py +1 -1
  55. langchain_core/output_parsers/openai_functions.py +46 -44
  56. langchain_core/output_parsers/openai_tools.py +7 -7
  57. langchain_core/output_parsers/pydantic.py +10 -11
  58. langchain_core/output_parsers/string.py +1 -1
  59. langchain_core/output_parsers/transform.py +2 -2
  60. langchain_core/output_parsers/xml.py +1 -1
  61. langchain_core/outputs/__init__.py +1 -1
  62. langchain_core/outputs/chat_generation.py +14 -14
  63. langchain_core/outputs/generation.py +5 -5
  64. langchain_core/outputs/llm_result.py +5 -5
  65. langchain_core/prompt_values.py +5 -5
  66. langchain_core/prompts/__init__.py +3 -23
  67. langchain_core/prompts/base.py +32 -37
  68. langchain_core/prompts/chat.py +216 -222
  69. langchain_core/prompts/dict.py +2 -2
  70. langchain_core/prompts/few_shot.py +76 -83
  71. langchain_core/prompts/few_shot_with_templates.py +6 -8
  72. langchain_core/prompts/image.py +11 -13
  73. langchain_core/prompts/loading.py +1 -1
  74. langchain_core/prompts/message.py +2 -2
  75. langchain_core/prompts/prompt.py +14 -16
  76. langchain_core/prompts/string.py +19 -7
  77. langchain_core/prompts/structured.py +24 -25
  78. langchain_core/rate_limiters.py +36 -38
  79. langchain_core/retrievers.py +41 -182
  80. langchain_core/runnables/base.py +565 -590
  81. langchain_core/runnables/branch.py +7 -7
  82. langchain_core/runnables/config.py +37 -44
  83. langchain_core/runnables/configurable.py +8 -9
  84. langchain_core/runnables/fallbacks.py +8 -8
  85. langchain_core/runnables/graph.py +28 -27
  86. langchain_core/runnables/graph_ascii.py +19 -18
  87. langchain_core/runnables/graph_mermaid.py +20 -31
  88. langchain_core/runnables/graph_png.py +7 -7
  89. langchain_core/runnables/history.py +20 -20
  90. langchain_core/runnables/passthrough.py +8 -8
  91. langchain_core/runnables/retry.py +3 -3
  92. langchain_core/runnables/router.py +1 -1
  93. langchain_core/runnables/schema.py +33 -33
  94. langchain_core/runnables/utils.py +30 -34
  95. langchain_core/stores.py +72 -102
  96. langchain_core/sys_info.py +27 -29
  97. langchain_core/tools/__init__.py +1 -14
  98. langchain_core/tools/base.py +63 -63
  99. langchain_core/tools/convert.py +92 -92
  100. langchain_core/tools/render.py +9 -9
  101. langchain_core/tools/retriever.py +1 -1
  102. langchain_core/tools/simple.py +6 -7
  103. langchain_core/tools/structured.py +17 -18
  104. langchain_core/tracers/__init__.py +1 -9
  105. langchain_core/tracers/base.py +35 -35
  106. langchain_core/tracers/context.py +12 -17
  107. langchain_core/tracers/event_stream.py +3 -3
  108. langchain_core/tracers/langchain.py +8 -8
  109. langchain_core/tracers/log_stream.py +17 -18
  110. langchain_core/tracers/memory_stream.py +2 -2
  111. langchain_core/tracers/schemas.py +0 -129
  112. langchain_core/utils/aiter.py +31 -31
  113. langchain_core/utils/env.py +5 -5
  114. langchain_core/utils/function_calling.py +48 -120
  115. langchain_core/utils/html.py +4 -4
  116. langchain_core/utils/input.py +2 -2
  117. langchain_core/utils/interactive_env.py +1 -1
  118. langchain_core/utils/iter.py +19 -19
  119. langchain_core/utils/json.py +1 -1
  120. langchain_core/utils/json_schema.py +2 -2
  121. langchain_core/utils/mustache.py +5 -5
  122. langchain_core/utils/pydantic.py +17 -17
  123. langchain_core/utils/strings.py +4 -4
  124. langchain_core/utils/utils.py +25 -28
  125. langchain_core/vectorstores/base.py +43 -64
  126. langchain_core/vectorstores/in_memory.py +83 -85
  127. langchain_core/version.py +1 -1
  128. {langchain_core-1.0.0a8.dist-info → langchain_core-1.0.0rc1.dist-info}/METADATA +23 -11
  129. langchain_core-1.0.0rc1.dist-info/RECORD +172 -0
  130. langchain_core/memory.py +0 -120
  131. langchain_core/pydantic_v1/__init__.py +0 -30
  132. langchain_core/pydantic_v1/dataclasses.py +0 -23
  133. langchain_core/pydantic_v1/main.py +0 -23
  134. langchain_core-1.0.0a8.dist-info/RECORD +0 -176
  135. {langchain_core-1.0.0a8.dist-info → langchain_core-1.0.0rc1.dist-info}/WHEEL +0 -0
@@ -50,7 +50,7 @@ def py_anext(
50
50
 
51
51
  Returns:
52
52
  The next value from the iterator, or the default value
53
- if the iterator is exhausted.
53
+ if the iterator is exhausted.
54
54
 
55
55
  Raises:
56
56
  TypeError: If the iterator is not an async iterator.
@@ -107,7 +107,7 @@ async def tee_peer(
107
107
  """An individual iterator of a `tee`.
108
108
 
109
109
  This function is a generator that yields items from the shared iterator
110
- ``iterator``. It buffers items until the least advanced iterator has
110
+ `iterator`. It buffers items until the least advanced iterator has
111
111
  yielded them as well. The buffer is shared with all other peers.
112
112
 
113
113
  Args:
@@ -153,38 +153,38 @@ async def tee_peer(
153
153
 
154
154
 
155
155
  class Tee(Generic[T]):
156
- """Create ``n`` separate asynchronous iterators over ``iterable``.
156
+ """Create `n` separate asynchronous iterators over `iterable`.
157
157
 
158
- This splits a single ``iterable`` into multiple iterators, each providing
158
+ This splits a single `iterable` into multiple iterators, each providing
159
159
  the same items in the same order.
160
160
  All child iterators may advance separately but share the same items
161
- from ``iterable`` -- when the most advanced iterator retrieves an item,
161
+ from `iterable` -- when the most advanced iterator retrieves an item,
162
162
  it is buffered until the least advanced iterator has yielded it as well.
163
- A ``tee`` works lazily and can handle an infinite ``iterable``, provided
163
+ A `tee` works lazily and can handle an infinite `iterable`, provided
164
164
  that all iterators advance.
165
165
 
166
- .. code-block:: python
167
-
168
- async def derivative(sensor_data):
169
- previous, current = a.tee(sensor_data, n=2)
170
- await a.anext(previous) # advance one iterator
171
- return a.map(operator.sub, previous, current)
166
+ ```python
167
+ async def derivative(sensor_data):
168
+ previous, current = a.tee(sensor_data, n=2)
169
+ await a.anext(previous) # advance one iterator
170
+ return a.map(operator.sub, previous, current)
171
+ ```
172
172
 
173
173
  Unlike `itertools.tee`, `.tee` returns a custom type instead
174
174
  of a :py`tuple`. Like a tuple, it can be indexed, iterated and unpacked
175
175
  to get the child iterators. In addition, its `.tee.aclose` method
176
- immediately closes all children, and it can be used in an ``async with`` context
176
+ immediately closes all children, and it can be used in an `async with` context
177
177
  for the same effect.
178
178
 
179
- If ``iterable`` is an iterator and read elsewhere, ``tee`` will *not*
180
- provide these items. Also, ``tee`` must internally buffer each item until the
179
+ If `iterable` is an iterator and read elsewhere, `tee` will *not*
180
+ provide these items. Also, `tee` must internally buffer each item until the
181
181
  last iterator has yielded it; if the most and least advanced iterator differ
182
182
  by most data, using a :py`list` is more efficient (but not lazy).
183
183
 
184
- If the underlying iterable is concurrency safe (``anext`` may be awaited
184
+ If the underlying iterable is concurrency safe (`anext` may be awaited
185
185
  concurrently) the resulting iterators are concurrency safe as well. Otherwise,
186
186
  the iterators are safe if there is only ever one single "most advanced" iterator.
187
- To enforce sequential use of ``anext``, provide a ``lock``
187
+ To enforce sequential use of `anext`, provide a `lock`
188
188
  - e.g. an :py`asyncio.Lock` instance in an :py:mod:`asyncio` application -
189
189
  and access is automatically synchronised.
190
190
 
@@ -197,13 +197,13 @@ class Tee(Generic[T]):
197
197
  *,
198
198
  lock: AbstractAsyncContextManager[Any] | None = None,
199
199
  ):
200
- """Create a ``tee``.
200
+ """Create a `tee`.
201
201
 
202
202
  Args:
203
203
  iterable: The iterable to split.
204
204
  n: The number of iterators to create. Defaults to 2.
205
205
  lock: The lock to synchronise access to the shared buffers.
206
- Defaults to None.
206
+
207
207
  """
208
208
  self._iterator = iterable.__aiter__() # before 3.10 aiter() doesn't exist
209
209
  self._buffers: list[deque[T]] = [deque() for _ in range(n)]
@@ -269,25 +269,25 @@ atee = Tee
269
269
 
270
270
 
271
271
  class aclosing(AbstractAsyncContextManager): # noqa: N801
272
- """Async context manager to wrap an AsyncGenerator that has a ``aclose()`` method.
272
+ """Async context manager to wrap an AsyncGenerator that has a `aclose()` method.
273
273
 
274
274
  Code like this:
275
275
 
276
- .. code-block:: python
277
-
278
- async with aclosing(<module>.fetch(<arguments>)) as agen:
279
- <block>
276
+ ```python
277
+ async with aclosing(<module>.fetch(<arguments>)) as agen:
278
+ <block>
279
+ ```
280
280
 
281
281
  is equivalent to this:
282
282
 
283
- .. code-block:: python
284
-
285
- agen = <module>.fetch(<arguments>)
286
- try:
287
- <block>
288
- finally:
289
- await agen.aclose()
283
+ ```python
284
+ agen = <module>.fetch(<arguments>)
285
+ try:
286
+ <block>
287
+ finally:
288
+ await agen.aclose()
290
289
 
290
+ ```
291
291
  """
292
292
 
293
293
  def __init__(self, thing: AsyncGenerator[Any, Any] | AsyncIterator[Any]) -> None:
@@ -10,10 +10,10 @@ def env_var_is_set(env_var: str) -> bool:
10
10
  """Check if an environment variable is set.
11
11
 
12
12
  Args:
13
- env_var (str): The name of the environment variable.
13
+ env_var: The name of the environment variable.
14
14
 
15
15
  Returns:
16
- bool: True if the environment variable is set, False otherwise.
16
+ `True` if the environment variable is set, `False` otherwise.
17
17
  """
18
18
  return env_var in os.environ and os.environ[env_var] not in {
19
19
  "",
@@ -38,7 +38,7 @@ def get_from_dict_or_env(
38
38
  env_key: The environment variable to look up if the key is not
39
39
  in the dictionary.
40
40
  default: The default value to return if the key is not in the dictionary
41
- or the environment. Defaults to None.
41
+ or the environment.
42
42
 
43
43
  Returns:
44
44
  The dict value or the environment variable value.
@@ -64,10 +64,10 @@ def get_from_env(key: str, env_key: str, default: str | None = None) -> str:
64
64
  env_key: The environment variable to look up if the key is not
65
65
  in the dictionary.
66
66
  default: The default value to return if the key is not in the dictionary
67
- or the environment. Defaults to None.
67
+ or the environment.
68
68
 
69
69
  Returns:
70
- str: The value of the key.
70
+ The value of the key.
71
71
 
72
72
  Raises:
73
73
  ValueError: If the key is not in the dictionary and no default value is
@@ -27,7 +27,7 @@ from pydantic.v1 import create_model as create_model_v1
27
27
  from typing_extensions import TypedDict, is_typeddict
28
28
 
29
29
  import langchain_core
30
- from langchain_core._api import beta, deprecated
30
+ from langchain_core._api import beta
31
31
  from langchain_core.messages import AIMessage, BaseMessage, HumanMessage, ToolMessage
32
32
  from langchain_core.utils.json_schema import dereference_refs
33
33
  from langchain_core.utils.pydantic import is_basemodel_subclass
@@ -72,11 +72,11 @@ def _rm_titles(kv: dict, prev_key: str = "") -> dict:
72
72
  except when a "title" appears within a property definition under "properties".
73
73
 
74
74
  Args:
75
- kv (dict): The input JSON schema as a dictionary.
76
- prev_key (str): The key from the parent dictionary, used to identify context.
75
+ kv: The input JSON schema as a dictionary.
76
+ prev_key: The key from the parent dictionary, used to identify context.
77
77
 
78
78
  Returns:
79
- dict: A new dictionary with appropriate "title" fields removed.
79
+ A new dictionary with appropriate "title" fields removed.
80
80
  """
81
81
  new_kv = {}
82
82
 
@@ -114,7 +114,7 @@ def _convert_json_schema_to_openai_function(
114
114
  used.
115
115
  description: The description of the function. If not provided, the description
116
116
  of the schema will be used.
117
- rm_titles: Whether to remove titles from the schema. Defaults to True.
117
+ rm_titles: Whether to remove titles from the schema. Defaults to `True`.
118
118
 
119
119
  Returns:
120
120
  The function description.
@@ -148,7 +148,7 @@ def _convert_pydantic_to_openai_function(
148
148
  used.
149
149
  description: The description of the function. If not provided, the description
150
150
  of the schema will be used.
151
- rm_titles: Whether to remove titles from the schema. Defaults to True.
151
+ rm_titles: Whether to remove titles from the schema. Defaults to `True`.
152
152
 
153
153
  Raises:
154
154
  TypeError: If the model is not a Pydantic model.
@@ -168,42 +168,6 @@ def _convert_pydantic_to_openai_function(
168
168
  )
169
169
 
170
170
 
171
- convert_pydantic_to_openai_function = deprecated(
172
- "0.1.16",
173
- alternative="langchain_core.utils.function_calling.convert_to_openai_function()",
174
- removal="1.0",
175
- )(_convert_pydantic_to_openai_function)
176
-
177
-
178
- @deprecated(
179
- "0.1.16",
180
- alternative="langchain_core.utils.function_calling.convert_to_openai_tool()",
181
- removal="1.0",
182
- )
183
- def convert_pydantic_to_openai_tool(
184
- model: type[BaseModel],
185
- *,
186
- name: str | None = None,
187
- description: str | None = None,
188
- ) -> ToolDescription:
189
- """Converts a Pydantic model to a function description for the OpenAI API.
190
-
191
- Args:
192
- model: The Pydantic model to convert.
193
- name: The name of the function. If not provided, the title of the schema will be
194
- used.
195
- description: The description of the function. If not provided, the description
196
- of the schema will be used.
197
-
198
- Returns:
199
- The tool description.
200
- """
201
- function = _convert_pydantic_to_openai_function(
202
- model, name=name, description=description
203
- )
204
- return {"type": "function", "function": function}
205
-
206
-
207
171
  def _get_python_function_name(function: Callable) -> str:
208
172
  """Get the name of a Python function."""
209
173
  return function.__name__
@@ -240,13 +204,6 @@ def _convert_python_function_to_openai_function(
240
204
  )
241
205
 
242
206
 
243
- convert_python_function_to_openai_function = deprecated(
244
- "0.1.16",
245
- alternative="langchain_core.utils.function_calling.convert_to_openai_function()",
246
- removal="1.0",
247
- )(_convert_python_function_to_openai_function)
248
-
249
-
250
207
  def _convert_typed_dict_to_openai_function(typed_dict: type) -> FunctionDescription:
251
208
  visited: dict = {}
252
209
 
@@ -368,31 +325,6 @@ def _format_tool_to_openai_function(tool: BaseTool) -> FunctionDescription:
368
325
  }
369
326
 
370
327
 
371
- format_tool_to_openai_function = deprecated(
372
- "0.1.16",
373
- alternative="langchain_core.utils.function_calling.convert_to_openai_function()",
374
- removal="1.0",
375
- )(_format_tool_to_openai_function)
376
-
377
-
378
- @deprecated(
379
- "0.1.16",
380
- alternative="langchain_core.utils.function_calling.convert_to_openai_tool()",
381
- removal="1.0",
382
- )
383
- def format_tool_to_openai_tool(tool: BaseTool) -> ToolDescription:
384
- """Format tool into the OpenAI function API.
385
-
386
- Args:
387
- tool: The tool to format.
388
-
389
- Returns:
390
- The tool description.
391
- """
392
- function = _format_tool_to_openai_function(tool)
393
- return {"type": "function", "function": function}
394
-
395
-
396
328
  def convert_to_openai_function(
397
329
  function: dict[str, Any] | type | Callable | BaseTool,
398
330
  *,
@@ -408,8 +340,8 @@ def convert_to_openai_function(
408
340
  top-level 'title' key specified, an Anthropic format
409
341
  tool, or an Amazon Bedrock Converse format tool.
410
342
  strict:
411
- If True, model output is guaranteed to exactly match the JSON Schema
412
- provided in the function definition. If None, ``strict`` argument will not
343
+ If `True`, model output is guaranteed to exactly match the JSON Schema
344
+ provided in the function definition. If `None`, `strict` argument will not
413
345
  be included in function definition.
414
346
 
415
347
  Returns:
@@ -420,7 +352,7 @@ def convert_to_openai_function(
420
352
  ValueError: If function is not in a supported format.
421
353
 
422
354
  !!! warning "Behavior changed in 0.2.29"
423
- ``strict`` arg added.
355
+ `strict` arg added.
424
356
 
425
357
  !!! warning "Behavior changed in 0.3.13"
426
358
  Support for Anthropic format tools added.
@@ -538,8 +470,8 @@ def convert_to_openai_tool(
538
470
  top-level 'title' key specified, an Anthropic format
539
471
  tool, or an Amazon Bedrock Converse format tool.
540
472
  strict:
541
- If True, model output is guaranteed to exactly match the JSON Schema
542
- provided in the function definition. If None, ``strict`` argument will not
473
+ If `True`, model output is guaranteed to exactly match the JSON Schema
474
+ provided in the function definition. If `None`, `strict` argument will not
543
475
  be included in tool definition.
544
476
 
545
477
  Returns:
@@ -547,7 +479,7 @@ def convert_to_openai_tool(
547
479
  OpenAI tool-calling API.
548
480
 
549
481
  !!! warning "Behavior changed in 0.2.29"
550
- ``strict`` arg added.
482
+ `strict` arg added.
551
483
 
552
484
  !!! warning "Behavior changed in 0.3.13"
553
485
  Support for Anthropic format tools added.
@@ -601,8 +533,8 @@ def convert_to_json_schema(
601
533
 
602
534
  Args:
603
535
  schema: The schema to convert.
604
- strict: If True, model output is guaranteed to exactly match the JSON Schema
605
- provided in the function definition. If None, ``strict`` argument will not
536
+ strict: If `True`, model output is guaranteed to exactly match the JSON Schema
537
+ provided in the function definition. If `None`, `strict` argument will not
606
538
  be included in function definition.
607
539
 
608
540
  Raises:
@@ -649,15 +581,15 @@ def tool_example_to_messages(
649
581
 
650
582
  The list of messages per example by default corresponds to:
651
583
 
652
- 1. ``HumanMessage``: contains the content from which content should be extracted.
653
- 2. ``AIMessage``: contains the extracted information from the model
654
- 3. ``ToolMessage``: contains confirmation to the model that the model requested a
655
- tool correctly.
584
+ 1. `HumanMessage`: contains the content from which content should be extracted.
585
+ 2. `AIMessage`: contains the extracted information from the model
586
+ 3. `ToolMessage`: contains confirmation to the model that the model requested a
587
+ tool correctly.
656
588
 
657
- If ``ai_response`` is specified, there will be a final ``AIMessage`` with that
589
+ If `ai_response` is specified, there will be a final `AIMessage` with that
658
590
  response.
659
591
 
660
- The ``ToolMessage`` is required because some chat models are hyper-optimized for
592
+ The `ToolMessage` is required because some chat models are hyper-optimized for
661
593
  agents rather than for an extraction use case.
662
594
 
663
595
  Args:
@@ -665,50 +597,46 @@ def tool_example_to_messages(
665
597
  tool_calls: Tool calls represented as Pydantic BaseModels
666
598
  tool_outputs: Tool call outputs.
667
599
  Does not need to be provided. If not provided, a placeholder value
668
- will be inserted. Defaults to None.
669
- ai_response: If provided, content for a final ``AIMessage``.
600
+ will be inserted.
601
+ ai_response: If provided, content for a final `AIMessage`.
670
602
 
671
603
  Returns:
672
604
  A list of messages
673
605
 
674
606
  Examples:
607
+ ```python
608
+ from typing import Optional
609
+ from pydantic import BaseModel, Field
610
+ from langchain_openai import ChatOpenAI
675
611
 
676
- .. code-block:: python
677
-
678
- from typing import Optional
679
- from pydantic import BaseModel, Field
680
- from langchain_openai import ChatOpenAI
681
612
 
613
+ class Person(BaseModel):
614
+ '''Information about a person.'''
682
615
 
683
- class Person(BaseModel):
684
- '''Information about a person.'''
685
-
686
- name: Optional[str] = Field(..., description="The name of the person")
687
- hair_color: Optional[str] = Field(
688
- ..., description="The color of the person's hair if known"
689
- )
690
- height_in_meters: Optional[str] = Field(
691
- ..., description="Height in METERS"
692
- )
693
-
616
+ name: str | None = Field(..., description="The name of the person")
617
+ hair_color: str | None = Field(
618
+ ..., description="The color of the person's hair if known"
619
+ )
620
+ height_in_meters: str | None = Field(..., description="Height in METERS")
694
621
 
695
- examples = [
696
- (
697
- "The ocean is vast and blue. It's more than 20,000 feet deep.",
698
- Person(name=None, height_in_meters=None, hair_color=None),
699
- ),
700
- (
701
- "Fiona traveled far from France to Spain.",
702
- Person(name="Fiona", height_in_meters=None, hair_color=None),
703
- ),
704
- ]
705
622
 
623
+ examples = [
624
+ (
625
+ "The ocean is vast and blue. It's more than 20,000 feet deep.",
626
+ Person(name=None, height_in_meters=None, hair_color=None),
627
+ ),
628
+ (
629
+ "Fiona traveled far from France to Spain.",
630
+ Person(name="Fiona", height_in_meters=None, hair_color=None),
631
+ ),
632
+ ]
706
633
 
707
- messages = []
708
634
 
709
- for txt, tool_call in examples:
710
- messages.extend(tool_example_to_messages(txt, [tool_call]))
635
+ messages = []
711
636
 
637
+ for txt, tool_call in examples:
638
+ messages.extend(tool_example_to_messages(txt, [tool_call]))
639
+ ```
712
640
  """
713
641
  messages: list[BaseMessage] = [HumanMessage(content=input)]
714
642
  openai_tool_calls = [
@@ -717,7 +645,7 @@ def tool_example_to_messages(
717
645
  "type": "function",
718
646
  "function": {
719
647
  # The name of the function right now corresponds to the name
720
- # of the pydantic model. This is implicit in the API right now,
648
+ # of the Pydantic model. This is implicit in the API right now,
721
649
  # and will be improved over time.
722
650
  "name": tool_call.__class__.__name__,
723
651
  "arguments": tool_call.model_dump_json(),
@@ -43,7 +43,7 @@ def find_all_links(
43
43
  pattern: Regex to use for extracting links from raw HTML.
44
44
 
45
45
  Returns:
46
- list[str]: all links
46
+ all links
47
47
  """
48
48
  pattern = pattern or DEFAULT_LINK_REGEX
49
49
  return list(set(re.findall(pattern, raw_html)))
@@ -66,14 +66,14 @@ def extract_sub_links(
66
66
  url: the url of the HTML.
67
67
  base_url: the base URL to check for outside links against.
68
68
  pattern: Regex to use for extracting links from raw HTML.
69
- prevent_outside: If True, ignore external links which are not children
69
+ prevent_outside: If `True`, ignore external links which are not children
70
70
  of the base URL.
71
71
  exclude_prefixes: Exclude any URLs that start with one of these prefixes.
72
- continue_on_failure: If True, continue if parsing a specific link raises an
72
+ continue_on_failure: If `True`, continue if parsing a specific link raises an
73
73
  exception. Otherwise, raise the exception.
74
74
 
75
75
  Returns:
76
- list[str]: sub links.
76
+ sub links.
77
77
  """
78
78
  base_url_to_use = base_url if base_url is not None else url
79
79
  parsed_base_url = urlparse(base_url_to_use)
@@ -65,9 +65,9 @@ def print_text(
65
65
 
66
66
  Args:
67
67
  text: The text to print.
68
- color: The color to use. Defaults to None.
68
+ color: The color to use.
69
69
  end: The end character to use. Defaults to "".
70
- file: The file to write to. Defaults to None.
70
+ file: The file to write to.
71
71
  """
72
72
  text_to_print = get_colored_text(text, color) if color else text
73
73
  print(text_to_print, end=end, file=file)
@@ -7,6 +7,6 @@ def is_interactive_env() -> bool:
7
7
  """Determine if running within IPython or Jupyter.
8
8
 
9
9
  Returns:
10
- True if running in an interactive environment, False otherwise.
10
+ True if running in an interactive environment, `False` otherwise.
11
11
  """
12
12
  return hasattr(sys, "ps2")
@@ -43,7 +43,7 @@ def tee_peer(
43
43
  """An individual iterator of a `.tee`.
44
44
 
45
45
  This function is a generator that yields items from the shared iterator
46
- ``iterator``. It buffers items until the least advanced iterator has
46
+ `iterator`. It buffers items until the least advanced iterator has
47
47
  yielded them as well. The buffer is shared with all other peers.
48
48
 
49
49
  Args:
@@ -89,38 +89,38 @@ def tee_peer(
89
89
 
90
90
 
91
91
  class Tee(Generic[T]):
92
- """Create ``n`` separate asynchronous iterators over ``iterable``.
92
+ """Create `n` separate asynchronous iterators over `iterable`.
93
93
 
94
- This splits a single ``iterable`` into multiple iterators, each providing
94
+ This splits a single `iterable` into multiple iterators, each providing
95
95
  the same items in the same order.
96
96
  All child iterators may advance separately but share the same items
97
- from ``iterable`` -- when the most advanced iterator retrieves an item,
97
+ from `iterable` -- when the most advanced iterator retrieves an item,
98
98
  it is buffered until the least advanced iterator has yielded it as well.
99
- A ``tee`` works lazily and can handle an infinite ``iterable``, provided
99
+ A `tee` works lazily and can handle an infinite `iterable`, provided
100
100
  that all iterators advance.
101
101
 
102
- .. code-block:: python
103
-
104
- async def derivative(sensor_data):
105
- previous, current = a.tee(sensor_data, n=2)
106
- await a.anext(previous) # advance one iterator
107
- return a.map(operator.sub, previous, current)
102
+ ```python
103
+ async def derivative(sensor_data):
104
+ previous, current = a.tee(sensor_data, n=2)
105
+ await a.anext(previous) # advance one iterator
106
+ return a.map(operator.sub, previous, current)
107
+ ```
108
108
 
109
109
  Unlike `itertools.tee`, `.tee` returns a custom type instead
110
110
  of a :py`tuple`. Like a tuple, it can be indexed, iterated and unpacked
111
111
  to get the child iterators. In addition, its `.tee.aclose` method
112
- immediately closes all children, and it can be used in an ``async with`` context
112
+ immediately closes all children, and it can be used in an `async with` context
113
113
  for the same effect.
114
114
 
115
- If ``iterable`` is an iterator and read elsewhere, ``tee`` will *not*
116
- provide these items. Also, ``tee`` must internally buffer each item until the
115
+ If `iterable` is an iterator and read elsewhere, `tee` will *not*
116
+ provide these items. Also, `tee` must internally buffer each item until the
117
117
  last iterator has yielded it; if the most and least advanced iterator differ
118
118
  by most data, using a :py`list` is more efficient (but not lazy).
119
119
 
120
- If the underlying iterable is concurrency safe (``anext`` may be awaited
120
+ If the underlying iterable is concurrency safe (`anext` may be awaited
121
121
  concurrently) the resulting iterators are concurrency safe as well. Otherwise,
122
122
  the iterators are safe if there is only ever one single "most advanced" iterator.
123
- To enforce sequential use of ``anext``, provide a ``lock``
123
+ To enforce sequential use of `anext`, provide a `lock`
124
124
  - e.g. an :py`asyncio.Lock` instance in an :py:mod:`asyncio` application -
125
125
  and access is automatically synchronised.
126
126
 
@@ -133,13 +133,13 @@ class Tee(Generic[T]):
133
133
  *,
134
134
  lock: AbstractContextManager[Any] | None = None,
135
135
  ):
136
- """Create a ``tee``.
136
+ """Create a `tee`.
137
137
 
138
138
  Args:
139
139
  iterable: The iterable to split.
140
140
  n: The number of iterators to create. Defaults to 2.
141
141
  lock: The lock to synchronise access to the shared buffers.
142
- Defaults to None.
142
+
143
143
  """
144
144
  self._iterator = iter(iterable)
145
145
  self._buffers: list[deque[T]] = [deque() for _ in range(n)]
@@ -207,7 +207,7 @@ def batch_iterate(size: int | None, iterable: Iterable[T]) -> Iterator[list[T]]:
207
207
  """Utility batching function.
208
208
 
209
209
  Args:
210
- size: The size of the batch. If None, returns a single batch.
210
+ size: The size of the batch. If `None`, returns a single batch.
211
211
  iterable: The iterable to batch.
212
212
 
213
213
  Yields:
@@ -51,7 +51,7 @@ def parse_partial_json(s: str, *, strict: bool = False) -> Any:
51
51
 
52
52
  Args:
53
53
  s: The JSON string to parse.
54
- strict: Whether to use strict parsing. Defaults to False.
54
+ strict: Whether to use strict parsing. Defaults to `False`.
55
55
 
56
56
  Returns:
57
57
  The parsed JSON object as a Python dictionary.
@@ -85,7 +85,7 @@ def _dereference_refs_helper(
85
85
  full_schema: The complete schema containing all definitions
86
86
  processed_refs: Set tracking currently processing refs (for cycle detection)
87
87
  skip_keys: Keys under which to skip recursion
88
- shallow_refs: If True, only break cycles; if False, deep-inline all refs
88
+ shallow_refs: If `True`, only break cycles; if False, deep-inline all refs
89
89
 
90
90
  Returns:
91
91
  The object with $ref properties resolved and merged with other properties.
@@ -184,7 +184,7 @@ def dereference_refs(
184
184
  point to. If not provided, defaults to schema_obj (useful when the
185
185
  schema is self-contained).
186
186
  skip_keys: Controls recursion behavior and reference resolution depth:
187
- - If None (default): Only recurse under '$defs' and use shallow reference
187
+ - If `None` (Default): Only recurse under '$defs' and use shallow reference
188
188
  resolution (break cycles but don't deep-inline nested refs)
189
189
  - If provided (even as []): Recurse under all keys and use deep reference
190
190
  resolution (fully inline all nested references)
@@ -46,7 +46,7 @@ def grab_literal(template: str, l_del: str) -> tuple[str, str]:
46
46
  l_del: The left delimiter.
47
47
 
48
48
  Returns:
49
- tuple[str, str]: The literal and the template.
49
+ The literal and the template.
50
50
  """
51
51
  global _CURRENT_LINE
52
52
 
@@ -76,7 +76,7 @@ def l_sa_check(
76
76
  is_standalone: Whether the tag is standalone.
77
77
 
78
78
  Returns:
79
- bool: Whether the tag could be a standalone.
79
+ Whether the tag could be a standalone.
80
80
  """
81
81
  # If there is a newline, or the previous tag was a standalone
82
82
  if literal.find("\n") != -1 or is_standalone:
@@ -102,7 +102,7 @@ def r_sa_check(
102
102
  is_standalone: Whether the tag is standalone.
103
103
 
104
104
  Returns:
105
- bool: Whether the tag could be a standalone.
105
+ Whether the tag could be a standalone.
106
106
  """
107
107
  # Check right side if we might be a standalone
108
108
  if is_standalone and tag_type not in {"variable", "no escape"}:
@@ -124,7 +124,7 @@ def parse_tag(template: str, l_del: str, r_del: str) -> tuple[tuple[str, str], s
124
124
  r_del: The right delimiter.
125
125
 
126
126
  Returns:
127
- tuple[tuple[str, str], str]: The tag and the template.
127
+ The tag and the template.
128
128
 
129
129
  Raises:
130
130
  ChevronError: If the tag is unclosed.
@@ -329,7 +329,7 @@ def tokenize(
329
329
 
330
330
 
331
331
  def _html_escape(string: str) -> str:
332
- """Return the HTML-escaped string with these characters escaped: ``" & < >``."""
332
+ """Return the HTML-escaped string with these characters escaped: `" & < >`."""
333
333
  html_codes = {
334
334
  '"': "&quot;",
335
335
  "<": "&lt;",