langchain-core 0.3.79__py3-none-any.whl → 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain-core might be problematic. Click here for more details.

Files changed (165) hide show
  1. langchain_core/__init__.py +1 -1
  2. langchain_core/_api/__init__.py +3 -4
  3. langchain_core/_api/beta_decorator.py +23 -26
  4. langchain_core/_api/deprecation.py +52 -65
  5. langchain_core/_api/path.py +3 -6
  6. langchain_core/_import_utils.py +3 -4
  7. langchain_core/agents.py +19 -19
  8. langchain_core/caches.py +53 -63
  9. langchain_core/callbacks/__init__.py +1 -8
  10. langchain_core/callbacks/base.py +323 -334
  11. langchain_core/callbacks/file.py +44 -44
  12. langchain_core/callbacks/manager.py +441 -507
  13. langchain_core/callbacks/stdout.py +29 -30
  14. langchain_core/callbacks/streaming_stdout.py +32 -32
  15. langchain_core/callbacks/usage.py +60 -57
  16. langchain_core/chat_history.py +48 -63
  17. langchain_core/document_loaders/base.py +23 -23
  18. langchain_core/document_loaders/langsmith.py +37 -37
  19. langchain_core/documents/__init__.py +0 -1
  20. langchain_core/documents/base.py +62 -65
  21. langchain_core/documents/compressor.py +4 -4
  22. langchain_core/documents/transformers.py +28 -29
  23. langchain_core/embeddings/fake.py +50 -54
  24. langchain_core/example_selectors/length_based.py +1 -1
  25. langchain_core/example_selectors/semantic_similarity.py +21 -25
  26. langchain_core/exceptions.py +10 -11
  27. langchain_core/globals.py +3 -151
  28. langchain_core/indexing/api.py +61 -66
  29. langchain_core/indexing/base.py +58 -58
  30. langchain_core/indexing/in_memory.py +3 -3
  31. langchain_core/language_models/__init__.py +14 -27
  32. langchain_core/language_models/_utils.py +270 -84
  33. langchain_core/language_models/base.py +55 -162
  34. langchain_core/language_models/chat_models.py +442 -402
  35. langchain_core/language_models/fake.py +11 -11
  36. langchain_core/language_models/fake_chat_models.py +61 -39
  37. langchain_core/language_models/llms.py +123 -231
  38. langchain_core/load/dump.py +4 -5
  39. langchain_core/load/load.py +18 -28
  40. langchain_core/load/mapping.py +2 -4
  41. langchain_core/load/serializable.py +39 -40
  42. langchain_core/messages/__init__.py +61 -22
  43. langchain_core/messages/ai.py +368 -163
  44. langchain_core/messages/base.py +214 -43
  45. langchain_core/messages/block_translators/__init__.py +111 -0
  46. langchain_core/messages/block_translators/anthropic.py +470 -0
  47. langchain_core/messages/block_translators/bedrock.py +94 -0
  48. langchain_core/messages/block_translators/bedrock_converse.py +297 -0
  49. langchain_core/messages/block_translators/google_genai.py +530 -0
  50. langchain_core/messages/block_translators/google_vertexai.py +21 -0
  51. langchain_core/messages/block_translators/groq.py +143 -0
  52. langchain_core/messages/block_translators/langchain_v0.py +301 -0
  53. langchain_core/messages/block_translators/openai.py +1010 -0
  54. langchain_core/messages/chat.py +2 -6
  55. langchain_core/messages/content.py +1423 -0
  56. langchain_core/messages/function.py +6 -10
  57. langchain_core/messages/human.py +41 -38
  58. langchain_core/messages/modifier.py +2 -2
  59. langchain_core/messages/system.py +38 -28
  60. langchain_core/messages/tool.py +96 -103
  61. langchain_core/messages/utils.py +478 -504
  62. langchain_core/output_parsers/__init__.py +1 -14
  63. langchain_core/output_parsers/base.py +58 -61
  64. langchain_core/output_parsers/json.py +7 -8
  65. langchain_core/output_parsers/list.py +5 -7
  66. langchain_core/output_parsers/openai_functions.py +49 -47
  67. langchain_core/output_parsers/openai_tools.py +14 -19
  68. langchain_core/output_parsers/pydantic.py +12 -13
  69. langchain_core/output_parsers/string.py +2 -2
  70. langchain_core/output_parsers/transform.py +15 -17
  71. langchain_core/output_parsers/xml.py +8 -10
  72. langchain_core/outputs/__init__.py +1 -1
  73. langchain_core/outputs/chat_generation.py +18 -18
  74. langchain_core/outputs/chat_result.py +1 -3
  75. langchain_core/outputs/generation.py +8 -8
  76. langchain_core/outputs/llm_result.py +10 -10
  77. langchain_core/prompt_values.py +12 -12
  78. langchain_core/prompts/__init__.py +3 -27
  79. langchain_core/prompts/base.py +45 -55
  80. langchain_core/prompts/chat.py +254 -313
  81. langchain_core/prompts/dict.py +5 -5
  82. langchain_core/prompts/few_shot.py +81 -88
  83. langchain_core/prompts/few_shot_with_templates.py +11 -13
  84. langchain_core/prompts/image.py +12 -14
  85. langchain_core/prompts/loading.py +6 -8
  86. langchain_core/prompts/message.py +3 -3
  87. langchain_core/prompts/prompt.py +24 -39
  88. langchain_core/prompts/string.py +4 -4
  89. langchain_core/prompts/structured.py +42 -50
  90. langchain_core/rate_limiters.py +51 -60
  91. langchain_core/retrievers.py +49 -190
  92. langchain_core/runnables/base.py +1484 -1709
  93. langchain_core/runnables/branch.py +45 -61
  94. langchain_core/runnables/config.py +80 -88
  95. langchain_core/runnables/configurable.py +117 -134
  96. langchain_core/runnables/fallbacks.py +83 -79
  97. langchain_core/runnables/graph.py +85 -95
  98. langchain_core/runnables/graph_ascii.py +27 -28
  99. langchain_core/runnables/graph_mermaid.py +38 -50
  100. langchain_core/runnables/graph_png.py +15 -16
  101. langchain_core/runnables/history.py +135 -148
  102. langchain_core/runnables/passthrough.py +124 -150
  103. langchain_core/runnables/retry.py +46 -51
  104. langchain_core/runnables/router.py +25 -30
  105. langchain_core/runnables/schema.py +79 -74
  106. langchain_core/runnables/utils.py +62 -68
  107. langchain_core/stores.py +81 -115
  108. langchain_core/structured_query.py +8 -8
  109. langchain_core/sys_info.py +27 -29
  110. langchain_core/tools/__init__.py +1 -14
  111. langchain_core/tools/base.py +179 -187
  112. langchain_core/tools/convert.py +131 -139
  113. langchain_core/tools/render.py +10 -10
  114. langchain_core/tools/retriever.py +11 -11
  115. langchain_core/tools/simple.py +19 -24
  116. langchain_core/tools/structured.py +30 -39
  117. langchain_core/tracers/__init__.py +1 -9
  118. langchain_core/tracers/base.py +97 -99
  119. langchain_core/tracers/context.py +29 -52
  120. langchain_core/tracers/core.py +50 -60
  121. langchain_core/tracers/evaluation.py +11 -11
  122. langchain_core/tracers/event_stream.py +115 -70
  123. langchain_core/tracers/langchain.py +21 -21
  124. langchain_core/tracers/log_stream.py +43 -43
  125. langchain_core/tracers/memory_stream.py +3 -3
  126. langchain_core/tracers/root_listeners.py +16 -16
  127. langchain_core/tracers/run_collector.py +2 -4
  128. langchain_core/tracers/schemas.py +0 -129
  129. langchain_core/tracers/stdout.py +3 -3
  130. langchain_core/utils/__init__.py +1 -4
  131. langchain_core/utils/_merge.py +46 -8
  132. langchain_core/utils/aiter.py +57 -61
  133. langchain_core/utils/env.py +9 -9
  134. langchain_core/utils/function_calling.py +89 -191
  135. langchain_core/utils/html.py +7 -8
  136. langchain_core/utils/input.py +6 -6
  137. langchain_core/utils/interactive_env.py +1 -1
  138. langchain_core/utils/iter.py +37 -42
  139. langchain_core/utils/json.py +4 -3
  140. langchain_core/utils/json_schema.py +8 -8
  141. langchain_core/utils/mustache.py +9 -11
  142. langchain_core/utils/pydantic.py +33 -35
  143. langchain_core/utils/strings.py +5 -5
  144. langchain_core/utils/usage.py +1 -1
  145. langchain_core/utils/utils.py +80 -54
  146. langchain_core/vectorstores/base.py +129 -164
  147. langchain_core/vectorstores/in_memory.py +99 -174
  148. langchain_core/vectorstores/utils.py +5 -5
  149. langchain_core/version.py +1 -1
  150. {langchain_core-0.3.79.dist-info → langchain_core-1.0.0.dist-info}/METADATA +28 -27
  151. langchain_core-1.0.0.dist-info/RECORD +172 -0
  152. {langchain_core-0.3.79.dist-info → langchain_core-1.0.0.dist-info}/WHEEL +1 -1
  153. langchain_core/beta/__init__.py +0 -1
  154. langchain_core/beta/runnables/__init__.py +0 -1
  155. langchain_core/beta/runnables/context.py +0 -447
  156. langchain_core/memory.py +0 -120
  157. langchain_core/messages/content_blocks.py +0 -176
  158. langchain_core/prompts/pipeline.py +0 -138
  159. langchain_core/pydantic_v1/__init__.py +0 -30
  160. langchain_core/pydantic_v1/dataclasses.py +0 -23
  161. langchain_core/pydantic_v1/main.py +0 -23
  162. langchain_core/tracers/langchain_v1.py +0 -31
  163. langchain_core/utils/loading.py +0 -35
  164. langchain_core-0.3.79.dist-info/RECORD +0 -174
  165. langchain_core-0.3.79.dist-info/entry_points.txt +0 -4
@@ -4,26 +4,24 @@ from __future__ import annotations
4
4
 
5
5
  import warnings
6
6
  from abc import ABC, abstractmethod
7
- from collections.abc import Mapping, Sequence
7
+ from collections.abc import Callable, Mapping, Sequence
8
8
  from functools import cache
9
9
  from typing import (
10
10
  TYPE_CHECKING,
11
11
  Any,
12
- Callable,
13
12
  Literal,
14
- Optional,
13
+ TypeAlias,
15
14
  TypeVar,
16
- Union,
17
15
  )
18
16
 
19
17
  from pydantic import BaseModel, ConfigDict, Field, field_validator
20
- from typing_extensions import TypeAlias, TypedDict, override
18
+ from typing_extensions import TypedDict, override
21
19
 
22
- from langchain_core._api import deprecated
23
20
  from langchain_core.caches import BaseCache
24
21
  from langchain_core.callbacks import Callbacks
25
22
  from langchain_core.globals import get_verbose
26
23
  from langchain_core.messages import (
24
+ AIMessage,
27
25
  AnyMessage,
28
26
  BaseMessage,
29
27
  MessageLikeRepresentation,
@@ -35,7 +33,6 @@ from langchain_core.prompt_values import (
35
33
  StringPromptValue,
36
34
  )
37
35
  from langchain_core.runnables import Runnable, RunnableSerializable
38
- from langchain_core.utils import get_pydantic_field_names
39
36
 
40
37
  if TYPE_CHECKING:
41
38
  from langchain_core.outputs import LLMResult
@@ -57,11 +54,11 @@ class LangSmithParams(TypedDict, total=False):
57
54
  """Name of the model."""
58
55
  ls_model_type: Literal["chat", "llm"]
59
56
  """Type of the model. Should be 'chat' or 'llm'."""
60
- ls_temperature: Optional[float]
57
+ ls_temperature: float | None
61
58
  """Temperature for generation."""
62
- ls_max_tokens: Optional[int]
59
+ ls_max_tokens: int | None
63
60
  """Max tokens for generation."""
64
- ls_stop: Optional[list[str]]
61
+ ls_stop: list[str] | None
65
62
  """Stop words for generation."""
66
63
 
67
64
 
@@ -98,10 +95,17 @@ def _get_token_ids_default_method(text: str) -> list[int]:
98
95
  return tokenizer.encode(text)
99
96
 
100
97
 
101
- LanguageModelInput = Union[PromptValue, str, Sequence[MessageLikeRepresentation]]
102
- LanguageModelOutput = Union[BaseMessage, str]
98
+ LanguageModelInput = PromptValue | str | Sequence[MessageLikeRepresentation]
99
+ """Input to a language model."""
100
+
101
+ LanguageModelOutput = BaseMessage | str
102
+ """Output from a language model."""
103
+
103
104
  LanguageModelLike = Runnable[LanguageModelInput, LanguageModelOutput]
104
- LanguageModelOutputVar = TypeVar("LanguageModelOutputVar", BaseMessage, str)
105
+ """Input/output interface for a language model."""
106
+
107
+ LanguageModelOutputVar = TypeVar("LanguageModelOutputVar", AIMessage, str)
108
+ """Type variable for the output of a language model."""
105
109
 
106
110
 
107
111
  def _get_verbosity() -> bool:
@@ -113,30 +117,29 @@ class BaseLanguageModel(
113
117
  ):
114
118
  """Abstract base class for interfacing with language models.
115
119
 
116
- All language model wrappers inherited from ``BaseLanguageModel``.
120
+ All language model wrappers inherited from `BaseLanguageModel`.
117
121
 
118
122
  """
119
123
 
120
- cache: Union[BaseCache, bool, None] = Field(default=None, exclude=True)
124
+ cache: BaseCache | bool | None = Field(default=None, exclude=True)
121
125
  """Whether to cache the response.
122
126
 
123
- * If true, will use the global cache.
124
- * If false, will not use a cache
125
- * If None, will use the global cache if it's set, otherwise no cache.
126
- * If instance of ``BaseCache``, will use the provided cache.
127
+ * If `True`, will use the global cache.
128
+ * If `False`, will not use a cache
129
+ * If `None`, will use the global cache if it's set, otherwise no cache.
130
+ * If instance of `BaseCache`, will use the provided cache.
127
131
 
128
132
  Caching is not currently supported for streaming methods of models.
129
-
130
133
  """
131
134
  verbose: bool = Field(default_factory=_get_verbosity, exclude=True, repr=False)
132
135
  """Whether to print out response text."""
133
136
  callbacks: Callbacks = Field(default=None, exclude=True)
134
137
  """Callbacks to add to the run trace."""
135
- tags: Optional[list[str]] = Field(default=None, exclude=True)
138
+ tags: list[str] | None = Field(default=None, exclude=True)
136
139
  """Tags to add to the run trace."""
137
- metadata: Optional[dict[str, Any]] = Field(default=None, exclude=True)
140
+ metadata: dict[str, Any] | None = Field(default=None, exclude=True)
138
141
  """Metadata to add to the run trace."""
139
- custom_get_token_ids: Optional[Callable[[str], list[int]]] = Field(
142
+ custom_get_token_ids: Callable[[str], list[int]] | None = Field(
140
143
  default=None, exclude=True
141
144
  )
142
145
  """Optional encoder to use for counting tokens."""
@@ -146,10 +149,10 @@ class BaseLanguageModel(
146
149
  )
147
150
 
148
151
  @field_validator("verbose", mode="before")
149
- def set_verbose(cls, verbose: Optional[bool]) -> bool: # noqa: FBT001
150
- """If verbose is None, set it.
152
+ def set_verbose(cls, verbose: bool | None) -> bool: # noqa: FBT001
153
+ """If verbose is `None`, set it.
151
154
 
152
- This allows users to pass in None as verbose to access the global setting.
155
+ This allows users to pass in `None` as verbose to access the global setting.
153
156
 
154
157
  Args:
155
158
  verbose: The verbosity setting to use.
@@ -165,21 +168,17 @@ class BaseLanguageModel(
165
168
  @property
166
169
  @override
167
170
  def InputType(self) -> TypeAlias:
168
- """Get the input type for this runnable."""
171
+ """Get the input type for this `Runnable`."""
169
172
  # This is a version of LanguageModelInput which replaces the abstract
170
173
  # base class BaseMessage with a union of its subclasses, which makes
171
174
  # for a much better schema.
172
- return Union[
173
- str,
174
- Union[StringPromptValue, ChatPromptValueConcrete],
175
- list[AnyMessage],
176
- ]
175
+ return str | StringPromptValue | ChatPromptValueConcrete | list[AnyMessage]
177
176
 
178
177
  @abstractmethod
179
178
  def generate_prompt(
180
179
  self,
181
180
  prompts: list[PromptValue],
182
- stop: Optional[list[str]] = None,
181
+ stop: list[str] | None = None,
183
182
  callbacks: Callbacks = None,
184
183
  **kwargs: Any,
185
184
  ) -> LLMResult:
@@ -193,22 +192,22 @@ class BaseLanguageModel(
193
192
  1. Take advantage of batched calls,
194
193
  2. Need more output from the model than just the top generated value,
195
194
  3. Are building chains that are agnostic to the underlying language model
196
- type (e.g., pure text completion models vs chat models).
195
+ type (e.g., pure text completion models vs chat models).
197
196
 
198
197
  Args:
199
- prompts: List of PromptValues. A PromptValue is an object that can be
200
- converted to match the format of any language model (string for pure
201
- text generation models and BaseMessages for chat models).
198
+ prompts: List of `PromptValue` objects. A `PromptValue` is an object that
199
+ can be converted to match the format of any language model (string for
200
+ pure text generation models and `BaseMessage` objects for chat models).
202
201
  stop: Stop words to use when generating. Model output is cut off at the
203
202
  first occurrence of any of these substrings.
204
- callbacks: Callbacks to pass through. Used for executing additional
203
+ callbacks: `Callbacks` to pass through. Used for executing additional
205
204
  functionality, such as logging or streaming, throughout generation.
206
205
  **kwargs: Arbitrary additional keyword arguments. These are usually passed
207
206
  to the model provider API call.
208
207
 
209
208
  Returns:
210
- An LLMResult, which contains a list of candidate Generations for each input
211
- prompt and additional model provider-specific output.
209
+ An `LLMResult`, which contains a list of candidate `Generation` objects for
210
+ each input prompt and additional model provider-specific output.
212
211
 
213
212
  """
214
213
 
@@ -216,7 +215,7 @@ class BaseLanguageModel(
216
215
  async def agenerate_prompt(
217
216
  self,
218
217
  prompts: list[PromptValue],
219
- stop: Optional[list[str]] = None,
218
+ stop: list[str] | None = None,
220
219
  callbacks: Callbacks = None,
221
220
  **kwargs: Any,
222
221
  ) -> LLMResult:
@@ -230,129 +229,33 @@ class BaseLanguageModel(
230
229
  1. Take advantage of batched calls,
231
230
  2. Need more output from the model than just the top generated value,
232
231
  3. Are building chains that are agnostic to the underlying language model
233
- type (e.g., pure text completion models vs chat models).
232
+ type (e.g., pure text completion models vs chat models).
234
233
 
235
234
  Args:
236
- prompts: List of PromptValues. A PromptValue is an object that can be
237
- converted to match the format of any language model (string for pure
238
- text generation models and BaseMessages for chat models).
235
+ prompts: List of `PromptValue` objects. A `PromptValue` is an object that
236
+ can be converted to match the format of any language model (string for
237
+ pure text generation models and `BaseMessage` objects for chat models).
239
238
  stop: Stop words to use when generating. Model output is cut off at the
240
239
  first occurrence of any of these substrings.
241
- callbacks: Callbacks to pass through. Used for executing additional
240
+ callbacks: `Callbacks` to pass through. Used for executing additional
242
241
  functionality, such as logging or streaming, throughout generation.
243
242
  **kwargs: Arbitrary additional keyword arguments. These are usually passed
244
243
  to the model provider API call.
245
244
 
246
245
  Returns:
247
- An ``LLMResult``, which contains a list of candidate Generations for each
248
- input prompt and additional model provider-specific output.
246
+ An `LLMResult`, which contains a list of candidate `Generation` objects for
247
+ each input prompt and additional model provider-specific output.
249
248
 
250
249
  """
251
250
 
252
251
  def with_structured_output(
253
- self, schema: Union[dict, type], **kwargs: Any
254
- ) -> Runnable[LanguageModelInput, Union[dict, BaseModel]]:
252
+ self, schema: dict | type, **kwargs: Any
253
+ ) -> Runnable[LanguageModelInput, dict | BaseModel]:
255
254
  """Not implemented on this class."""
256
255
  # Implement this on child class if there is a way of steering the model to
257
256
  # generate responses that match a given schema.
258
257
  raise NotImplementedError
259
258
 
260
- @deprecated("0.1.7", alternative="invoke", removal="1.0")
261
- @abstractmethod
262
- def predict(
263
- self, text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any
264
- ) -> str:
265
- """Pass a single string input to the model and return a string.
266
-
267
- Use this method when passing in raw text. If you want to pass in specific types
268
- of chat messages, use predict_messages.
269
-
270
- Args:
271
- text: String input to pass to the model.
272
- stop: Stop words to use when generating. Model output is cut off at the
273
- first occurrence of any of these substrings.
274
- **kwargs: Arbitrary additional keyword arguments. These are usually passed
275
- to the model provider API call.
276
-
277
- Returns:
278
- Top model prediction as a string.
279
-
280
- """
281
-
282
- @deprecated("0.1.7", alternative="invoke", removal="1.0")
283
- @abstractmethod
284
- def predict_messages(
285
- self,
286
- messages: list[BaseMessage],
287
- *,
288
- stop: Optional[Sequence[str]] = None,
289
- **kwargs: Any,
290
- ) -> BaseMessage:
291
- """Pass a message sequence to the model and return a message.
292
-
293
- Use this method when passing in chat messages. If you want to pass in raw text,
294
- use predict.
295
-
296
- Args:
297
- messages: A sequence of chat messages corresponding to a single model input.
298
- stop: Stop words to use when generating. Model output is cut off at the
299
- first occurrence of any of these substrings.
300
- **kwargs: Arbitrary additional keyword arguments. These are usually passed
301
- to the model provider API call.
302
-
303
- Returns:
304
- Top model prediction as a message.
305
-
306
- """
307
-
308
- @deprecated("0.1.7", alternative="ainvoke", removal="1.0")
309
- @abstractmethod
310
- async def apredict(
311
- self, text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any
312
- ) -> str:
313
- """Asynchronously pass a string to the model and return a string.
314
-
315
- Use this method when calling pure text generation models and only the top
316
- candidate generation is needed.
317
-
318
- Args:
319
- text: String input to pass to the model.
320
- stop: Stop words to use when generating. Model output is cut off at the
321
- first occurrence of any of these substrings.
322
- **kwargs: Arbitrary additional keyword arguments. These are usually passed
323
- to the model provider API call.
324
-
325
- Returns:
326
- Top model prediction as a string.
327
-
328
- """
329
-
330
- @deprecated("0.1.7", alternative="ainvoke", removal="1.0")
331
- @abstractmethod
332
- async def apredict_messages(
333
- self,
334
- messages: list[BaseMessage],
335
- *,
336
- stop: Optional[Sequence[str]] = None,
337
- **kwargs: Any,
338
- ) -> BaseMessage:
339
- """Asynchronously pass messages to the model and return a message.
340
-
341
- Use this method when calling chat models and only the top candidate generation
342
- is needed.
343
-
344
- Args:
345
- messages: A sequence of chat messages corresponding to a single model input.
346
- stop: Stop words to use when generating. Model output is cut off at the
347
- first occurrence of any of these substrings.
348
- **kwargs: Arbitrary additional keyword arguments. These are usually passed
349
- to the model provider API call.
350
-
351
- Returns:
352
- Top model prediction as a message.
353
-
354
- """
355
-
356
259
  @property
357
260
  def _identifying_params(self) -> Mapping[str, Any]:
358
261
  """Get the identifying parameters."""
@@ -366,8 +269,7 @@ class BaseLanguageModel(
366
269
 
367
270
  Returns:
368
271
  A list of ids corresponding to the tokens in the text, in order they occur
369
- in the text.
370
-
272
+ in the text.
371
273
  """
372
274
  if self.custom_get_token_ids is not None:
373
275
  return self.custom_get_token_ids(text)
@@ -390,20 +292,20 @@ class BaseLanguageModel(
390
292
  def get_num_tokens_from_messages(
391
293
  self,
392
294
  messages: list[BaseMessage],
393
- tools: Optional[Sequence] = None,
295
+ tools: Sequence | None = None,
394
296
  ) -> int:
395
297
  """Get the number of tokens in the messages.
396
298
 
397
299
  Useful for checking if an input fits in a model's context window.
398
300
 
399
- .. note::
400
- The base implementation of ``get_num_tokens_from_messages`` ignores tool
301
+ !!! note
302
+ The base implementation of `get_num_tokens_from_messages` ignores tool
401
303
  schemas.
402
304
 
403
305
  Args:
404
306
  messages: The message inputs to tokenize.
405
- tools: If provided, sequence of dict, ``BaseModel``, function, or
406
- ``BaseTools`` to be converted to tool schemas.
307
+ tools: If provided, sequence of dict, `BaseModel`, function, or
308
+ `BaseTool` objects to be converted to tool schemas.
407
309
 
408
310
  Returns:
409
311
  The sum of the number of tokens across the messages.
@@ -415,12 +317,3 @@ class BaseLanguageModel(
415
317
  stacklevel=2,
416
318
  )
417
319
  return sum(self.get_num_tokens(get_buffer_string([m])) for m in messages)
418
-
419
- @classmethod
420
- def _all_required_field_names(cls) -> set:
421
- """DEPRECATED: Kept for backwards compatibility.
422
-
423
- Use ``get_pydantic_field_names``.
424
-
425
- """
426
- return get_pydantic_field_names(cls)