langchain-core 0.3.74__py3-none-any.whl → 0.3.75__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain-core might be problematic. Click here for more details.

Files changed (41) hide show
  1. langchain_core/_api/beta_decorator.py +2 -2
  2. langchain_core/_api/deprecation.py +1 -1
  3. langchain_core/beta/runnables/context.py +1 -1
  4. langchain_core/callbacks/file.py +13 -2
  5. langchain_core/callbacks/manager.py +55 -16
  6. langchain_core/chat_history.py +6 -6
  7. langchain_core/documents/base.py +1 -1
  8. langchain_core/documents/compressor.py +9 -6
  9. langchain_core/indexing/base.py +2 -2
  10. langchain_core/language_models/base.py +33 -19
  11. langchain_core/language_models/chat_models.py +39 -20
  12. langchain_core/language_models/fake_chat_models.py +5 -4
  13. langchain_core/load/dump.py +3 -4
  14. langchain_core/messages/ai.py +4 -1
  15. langchain_core/messages/modifier.py +1 -1
  16. langchain_core/messages/tool.py +3 -3
  17. langchain_core/messages/utils.py +18 -17
  18. langchain_core/output_parsers/openai_tools.py +2 -0
  19. langchain_core/output_parsers/transform.py +2 -2
  20. langchain_core/output_parsers/xml.py +4 -3
  21. langchain_core/prompts/chat.py +1 -3
  22. langchain_core/runnables/base.py +507 -451
  23. langchain_core/runnables/branch.py +1 -1
  24. langchain_core/runnables/config.py +2 -2
  25. langchain_core/runnables/fallbacks.py +4 -4
  26. langchain_core/runnables/graph.py +3 -3
  27. langchain_core/runnables/history.py +1 -1
  28. langchain_core/runnables/passthrough.py +3 -3
  29. langchain_core/runnables/retry.py +1 -1
  30. langchain_core/runnables/router.py +1 -1
  31. langchain_core/structured_query.py +3 -7
  32. langchain_core/tools/structured.py +1 -1
  33. langchain_core/tracers/_streaming.py +6 -7
  34. langchain_core/tracers/event_stream.py +1 -1
  35. langchain_core/tracers/log_stream.py +1 -1
  36. langchain_core/utils/function_calling.py +12 -10
  37. langchain_core/version.py +1 -1
  38. {langchain_core-0.3.74.dist-info → langchain_core-0.3.75.dist-info}/METADATA +6 -8
  39. {langchain_core-0.3.74.dist-info → langchain_core-0.3.75.dist-info}/RECORD +41 -41
  40. {langchain_core-0.3.74.dist-info → langchain_core-0.3.75.dist-info}/WHEEL +0 -0
  41. {langchain_core-0.3.74.dist-info → langchain_core-0.3.75.dist-info}/entry_points.txt +0 -0
@@ -57,8 +57,8 @@ class LangSmithParams(TypedDict, total=False):
57
57
  def get_tokenizer() -> Any:
58
58
  """Get a GPT-2 tokenizer instance.
59
59
 
60
- This function is cached to avoid re-loading the tokenizer
61
- every time it is called.
60
+ This function is cached to avoid re-loading the tokenizer every time it is called.
61
+
62
62
  """
63
63
  try:
64
64
  from transformers import GPT2TokenizerFast # type: ignore[import-not-found]
@@ -99,7 +99,8 @@ class BaseLanguageModel(
99
99
  ):
100
100
  """Abstract base class for interfacing with language models.
101
101
 
102
- All language model wrappers inherited from BaseLanguageModel.
102
+ All language model wrappers inherited from ``BaseLanguageModel``.
103
+
103
104
  """
104
105
 
105
106
  cache: Union[BaseCache, bool, None] = Field(default=None, exclude=True)
@@ -108,9 +109,10 @@ class BaseLanguageModel(
108
109
  * If true, will use the global cache.
109
110
  * If false, will not use a cache
110
111
  * If None, will use the global cache if it's set, otherwise no cache.
111
- * If instance of BaseCache, will use the provided cache.
112
+ * If instance of ``BaseCache``, will use the provided cache.
112
113
 
113
114
  Caching is not currently supported for streaming methods of models.
115
+
114
116
  """
115
117
  verbose: bool = Field(default_factory=_get_verbosity, exclude=True, repr=False)
116
118
  """Whether to print out response text."""
@@ -140,6 +142,7 @@ class BaseLanguageModel(
140
142
 
141
143
  Returns:
142
144
  The verbosity setting to use.
145
+
143
146
  """
144
147
  if verbose is None:
145
148
  return _get_verbosity()
@@ -195,7 +198,8 @@ class BaseLanguageModel(
195
198
 
196
199
  Returns:
197
200
  An LLMResult, which contains a list of candidate Generations for each input
198
- prompt and additional model provider-specific output.
201
+ prompt and additional model provider-specific output.
202
+
199
203
  """
200
204
 
201
205
  @abstractmethod
@@ -229,8 +233,9 @@ class BaseLanguageModel(
229
233
  to the model provider API call.
230
234
 
231
235
  Returns:
232
- An LLMResult, which contains a list of candidate Generations for each input
233
- prompt and additional model provider-specific output.
236
+ An ``LLMResult``, which contains a list of candidate Generations for each
237
+ input prompt and additional model provider-specific output.
238
+
234
239
  """
235
240
 
236
241
  def with_structured_output(
@@ -248,8 +253,8 @@ class BaseLanguageModel(
248
253
  ) -> str:
249
254
  """Pass a single string input to the model and return a string.
250
255
 
251
- Use this method when passing in raw text. If you want to pass in specific
252
- types of chat messages, use predict_messages.
256
+ Use this method when passing in raw text. If you want to pass in specific types
257
+ of chat messages, use predict_messages.
253
258
 
254
259
  Args:
255
260
  text: String input to pass to the model.
@@ -260,6 +265,7 @@ class BaseLanguageModel(
260
265
 
261
266
  Returns:
262
267
  Top model prediction as a string.
268
+
263
269
  """
264
270
 
265
271
  @deprecated("0.1.7", alternative="invoke", removal="1.0")
@@ -274,7 +280,7 @@ class BaseLanguageModel(
274
280
  """Pass a message sequence to the model and return a message.
275
281
 
276
282
  Use this method when passing in chat messages. If you want to pass in raw text,
277
- use predict.
283
+ use predict.
278
284
 
279
285
  Args:
280
286
  messages: A sequence of chat messages corresponding to a single model input.
@@ -285,6 +291,7 @@ class BaseLanguageModel(
285
291
 
286
292
  Returns:
287
293
  Top model prediction as a message.
294
+
288
295
  """
289
296
 
290
297
  @deprecated("0.1.7", alternative="ainvoke", removal="1.0")
@@ -295,7 +302,7 @@ class BaseLanguageModel(
295
302
  """Asynchronously pass a string to the model and return a string.
296
303
 
297
304
  Use this method when calling pure text generation models and only the top
298
- candidate generation is needed.
305
+ candidate generation is needed.
299
306
 
300
307
  Args:
301
308
  text: String input to pass to the model.
@@ -306,6 +313,7 @@ class BaseLanguageModel(
306
313
 
307
314
  Returns:
308
315
  Top model prediction as a string.
316
+
309
317
  """
310
318
 
311
319
  @deprecated("0.1.7", alternative="ainvoke", removal="1.0")
@@ -319,8 +327,8 @@ class BaseLanguageModel(
319
327
  ) -> BaseMessage:
320
328
  """Asynchronously pass messages to the model and return a message.
321
329
 
322
- Use this method when calling chat models and only the top
323
- candidate generation is needed.
330
+ Use this method when calling chat models and only the top candidate generation
331
+ is needed.
324
332
 
325
333
  Args:
326
334
  messages: A sequence of chat messages corresponding to a single model input.
@@ -331,6 +339,7 @@ class BaseLanguageModel(
331
339
 
332
340
  Returns:
333
341
  Top model prediction as a message.
342
+
334
343
  """
335
344
 
336
345
  @property
@@ -346,7 +355,8 @@ class BaseLanguageModel(
346
355
 
347
356
  Returns:
348
357
  A list of ids corresponding to the tokens in the text, in order they occur
349
- in the text.
358
+ in the text.
359
+
350
360
  """
351
361
  if self.custom_get_token_ids is not None:
352
362
  return self.custom_get_token_ids(text)
@@ -362,6 +372,7 @@ class BaseLanguageModel(
362
372
 
363
373
  Returns:
364
374
  The integer number of tokens in the text.
375
+
365
376
  """
366
377
  return len(self.get_token_ids(text))
367
378
 
@@ -374,16 +385,18 @@ class BaseLanguageModel(
374
385
 
375
386
  Useful for checking if an input fits in a model's context window.
376
387
 
377
- **Note**: the base implementation of get_num_tokens_from_messages ignores
378
- tool schemas.
388
+ .. note::
389
+ The base implementation of ``get_num_tokens_from_messages`` ignores tool
390
+ schemas.
379
391
 
380
392
  Args:
381
393
  messages: The message inputs to tokenize.
382
- tools: If provided, sequence of dict, BaseModel, function, or BaseTools
383
- to be converted to tool schemas.
394
+ tools: If provided, sequence of dict, ``BaseModel``, function, or
395
+ ``BaseTools`` to be converted to tool schemas.
384
396
 
385
397
  Returns:
386
398
  The sum of the number of tokens across the messages.
399
+
387
400
  """
388
401
  if tools is not None:
389
402
  warnings.warn(
@@ -396,6 +409,7 @@ class BaseLanguageModel(
396
409
  def _all_required_field_names(cls) -> set:
397
410
  """DEPRECATED: Kept for backwards compatibility.
398
411
 
399
- Use get_pydantic_field_names.
412
+ Use ``get_pydantic_field_names``.
413
+
400
414
  """
401
415
  return get_pydantic_field_names(cls)
@@ -78,6 +78,11 @@ def _generate_response_from_error(error: BaseException) -> list[ChatGeneration]:
78
78
  if hasattr(error, "response"):
79
79
  response = error.response
80
80
  metadata: dict = {}
81
+ if hasattr(response, "json"):
82
+ try:
83
+ metadata["body"] = response.json()
84
+ except Exception:
85
+ metadata["body"] = getattr(response, "text", None)
81
86
  if hasattr(response, "headers"):
82
87
  try:
83
88
  metadata["headers"] = dict(response.headers)
@@ -97,17 +102,18 @@ def _generate_response_from_error(error: BaseException) -> list[ChatGeneration]:
97
102
 
98
103
 
99
104
  def _format_for_tracing(messages: list[BaseMessage]) -> list[BaseMessage]:
100
- """Format messages for tracing in on_chat_model_start.
105
+ """Format messages for tracing in ``on_chat_model_start``.
101
106
 
102
107
  - Update image content blocks to OpenAI Chat Completions format (backward
103
108
  compatibility).
104
- - Add "type" key to content blocks that have a single key.
109
+ - Add ``type`` key to content blocks that have a single key.
105
110
 
106
111
  Args:
107
112
  messages: List of messages to format.
108
113
 
109
114
  Returns:
110
115
  List of messages formatted for tracing.
116
+
111
117
  """
112
118
  messages_to_trace = []
113
119
  for message in messages:
@@ -153,10 +159,11 @@ def generate_from_stream(stream: Iterator[ChatGenerationChunk]) -> ChatResult:
153
159
  """Generate from a stream.
154
160
 
155
161
  Args:
156
- stream: Iterator of ChatGenerationChunk.
162
+ stream: Iterator of ``ChatGenerationChunk``.
157
163
 
158
164
  Returns:
159
165
  ChatResult: Chat result.
166
+
160
167
  """
161
168
  generation = next(stream, None)
162
169
  if generation:
@@ -180,10 +187,11 @@ async def agenerate_from_stream(
180
187
  """Async generate from a stream.
181
188
 
182
189
  Args:
183
- stream: Iterator of ChatGenerationChunk.
190
+ stream: Iterator of ``ChatGenerationChunk``.
184
191
 
185
192
  Returns:
186
193
  ChatResult: Chat result.
194
+
187
195
  """
188
196
  chunks = [chunk async for chunk in stream]
189
197
  return await run_in_executor(None, generate_from_stream, iter(chunks))
@@ -311,15 +319,16 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
311
319
  provided. This offers the best of both worlds.
312
320
  - If False (default), will always use streaming case if available.
313
321
 
314
- The main reason for this flag is that code might be written using ``.stream()`` and
322
+ The main reason for this flag is that code might be written using ``stream()`` and
315
323
  a user may want to swap out a given model for another model whose the implementation
316
324
  does not properly support streaming.
325
+
317
326
  """
318
327
 
319
328
  @model_validator(mode="before")
320
329
  @classmethod
321
330
  def raise_deprecation(cls, values: dict) -> Any:
322
- """Raise deprecation warning if callback_manager is used.
331
+ """Raise deprecation warning if ``callback_manager`` is used.
323
332
 
324
333
  Args:
325
334
  values (Dict): Values to validate.
@@ -328,7 +337,8 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
328
337
  Dict: Validated values.
329
338
 
330
339
  Raises:
331
- DeprecationWarning: If callback_manager is used.
340
+ DeprecationWarning: If ``callback_manager`` is used.
341
+
332
342
  """
333
343
  if values.get("callback_manager") is not None:
334
344
  warnings.warn(
@@ -528,7 +538,7 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
528
538
  generations = [generations_with_error_metadata]
529
539
  run_manager.on_llm_error(
530
540
  e,
531
- response=LLMResult(generations=generations), # type: ignore[arg-type]
541
+ response=LLMResult(generations=generations),
532
542
  )
533
543
  raise
534
544
 
@@ -622,7 +632,7 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
622
632
  generations = [generations_with_error_metadata]
623
633
  await run_manager.on_llm_error(
624
634
  e,
625
- response=LLMResult(generations=generations), # type: ignore[arg-type]
635
+ response=LLMResult(generations=generations),
626
636
  )
627
637
  raise
628
638
 
@@ -653,6 +663,7 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
653
663
 
654
664
  Returns:
655
665
  List of ChatGeneration objects.
666
+
656
667
  """
657
668
  converted_generations = []
658
669
  for gen in cache_val:
@@ -778,7 +789,8 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
778
789
 
779
790
  Returns:
780
791
  An LLMResult, which contains a list of candidate Generations for each input
781
- prompt and additional model provider-specific output.
792
+ prompt and additional model provider-specific output.
793
+
782
794
  """
783
795
  ls_structured_output_format = kwargs.pop(
784
796
  "ls_structured_output_format", None
@@ -835,17 +847,17 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
835
847
  run_managers[i].on_llm_error(
836
848
  e,
837
849
  response=LLMResult(
838
- generations=[generations_with_error_metadata] # type: ignore[list-item]
850
+ generations=[generations_with_error_metadata]
839
851
  ),
840
852
  )
841
853
  raise
842
854
  flattened_outputs = [
843
- LLMResult(generations=[res.generations], llm_output=res.llm_output) # type: ignore[list-item]
855
+ LLMResult(generations=[res.generations], llm_output=res.llm_output)
844
856
  for res in results
845
857
  ]
846
858
  llm_output = self._combine_llm_outputs([res.llm_output for res in results])
847
859
  generations = [res.generations for res in results]
848
- output = LLMResult(generations=generations, llm_output=llm_output) # type: ignore[arg-type]
860
+ output = LLMResult(generations=generations, llm_output=llm_output)
849
861
  if run_managers:
850
862
  run_infos = []
851
863
  for manager, flattened_output in zip(run_managers, flattened_outputs):
@@ -892,7 +904,8 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
892
904
 
893
905
  Returns:
894
906
  An LLMResult, which contains a list of candidate Generations for each input
895
- prompt and additional model provider-specific output.
907
+ prompt and additional model provider-specific output.
908
+
896
909
  """
897
910
  ls_structured_output_format = kwargs.pop(
898
911
  "ls_structured_output_format", None
@@ -954,7 +967,7 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
954
967
  await run_managers[i].on_llm_error(
955
968
  res,
956
969
  response=LLMResult(
957
- generations=[generations_with_error_metadata] # type: ignore[list-item]
970
+ generations=[generations_with_error_metadata]
958
971
  ),
959
972
  )
960
973
  exceptions.append(res)
@@ -964,7 +977,7 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
964
977
  *[
965
978
  run_manager.on_llm_end(
966
979
  LLMResult(
967
- generations=[res.generations], # type: ignore[list-item, union-attr]
980
+ generations=[res.generations], # type: ignore[union-attr]
968
981
  llm_output=res.llm_output, # type: ignore[union-attr]
969
982
  )
970
983
  )
@@ -974,12 +987,12 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
974
987
  )
975
988
  raise exceptions[0]
976
989
  flattened_outputs = [
977
- LLMResult(generations=[res.generations], llm_output=res.llm_output) # type: ignore[list-item, union-attr]
990
+ LLMResult(generations=[res.generations], llm_output=res.llm_output) # type: ignore[union-attr]
978
991
  for res in results
979
992
  ]
980
993
  llm_output = self._combine_llm_outputs([res.llm_output for res in results]) # type: ignore[union-attr]
981
994
  generations = [res.generations for res in results] # type: ignore[union-attr]
982
- output = LLMResult(generations=generations, llm_output=llm_output) # type: ignore[arg-type]
995
+ output = LLMResult(generations=generations, llm_output=llm_output)
983
996
  await asyncio.gather(
984
997
  *[
985
998
  run_manager.on_llm_end(flattened_output)
@@ -1248,6 +1261,7 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
1248
1261
 
1249
1262
  Returns:
1250
1263
  The model output message.
1264
+
1251
1265
  """
1252
1266
  generation = self.generate(
1253
1267
  [messages], stop=stop, callbacks=callbacks, **kwargs
@@ -1288,6 +1302,7 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
1288
1302
 
1289
1303
  Returns:
1290
1304
  The model output string.
1305
+
1291
1306
  """
1292
1307
  return self.predict(message, stop=stop, **kwargs)
1293
1308
 
@@ -1307,6 +1322,7 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
1307
1322
 
1308
1323
  Returns:
1309
1324
  The predicted output string.
1325
+
1310
1326
  """
1311
1327
  stop_ = None if stop is None else list(stop)
1312
1328
  result = self([HumanMessage(content=text)], stop=stop_, **kwargs)
@@ -1382,6 +1398,7 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
1382
1398
 
1383
1399
  Returns:
1384
1400
  A Runnable that returns a message.
1401
+
1385
1402
  """
1386
1403
  raise NotImplementedError
1387
1404
 
@@ -1544,8 +1561,10 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
1544
1561
  class SimpleChatModel(BaseChatModel):
1545
1562
  """Simplified implementation for a chat model to inherit from.
1546
1563
 
1547
- **Note** This implementation is primarily here for backwards compatibility.
1548
- For new implementations, please use `BaseChatModel` directly.
1564
+ .. note::
1565
+ This implementation is primarily here for backwards compatibility. For new
1566
+ implementations, please use ``BaseChatModel`` directly.
1567
+
1549
1568
  """
1550
1569
 
1551
1570
  def _generate(
@@ -223,11 +223,12 @@ class GenericFakeChatModel(BaseChatModel):
223
223
  This can be expanded to accept other types like Callables / dicts / strings
224
224
  to make the interface more generic if needed.
225
225
 
226
- Note: if you want to pass a list, you can use `iter` to convert it to an iterator.
226
+ .. note::
227
+ if you want to pass a list, you can use ``iter`` to convert it to an iterator.
227
228
 
228
- Please note that streaming is not implemented yet. We should try to implement it
229
- in the future by delegating to invoke and then breaking the resulting output
230
- into message chunks.
229
+ .. warning::
230
+ Streaming is not implemented yet. We should try to implement it in the future by
231
+ delegating to invoke and then breaking the resulting output into message chunks.
231
232
  """
232
233
 
233
234
  @override
@@ -73,10 +73,9 @@ def dumps(obj: Any, *, pretty: bool = False, **kwargs: Any) -> str:
73
73
  def dumpd(obj: Any) -> Any:
74
74
  """Return a dict representation of an object.
75
75
 
76
- Note:
77
- Unfortunately this function is not as efficient as it could be
78
- because it first dumps the object to a json string and then loads it
79
- back into a dictionary.
76
+ .. note::
77
+ Unfortunately this function is not as efficient as it could be because it first
78
+ dumps the object to a json string and then loads it back into a dictionary.
80
79
 
81
80
  Args:
82
81
  obj: The object to dump.
@@ -358,7 +358,10 @@ class AIMessageChunk(AIMessage, BaseMessageChunk):
358
358
 
359
359
  for chunk in self.tool_call_chunks:
360
360
  try:
361
- args_ = parse_partial_json(chunk["args"]) if chunk["args"] != "" else {} # type: ignore[arg-type]
361
+ if chunk["args"] is not None and chunk["args"] != "":
362
+ args_ = parse_partial_json(chunk["args"])
363
+ else:
364
+ args_ = {}
362
365
  if isinstance(args_, dict):
363
366
  tool_calls.append(
364
367
  create_tool_call(
@@ -13,7 +13,7 @@ class RemoveMessage(BaseMessage):
13
13
 
14
14
  def __init__(
15
15
  self,
16
- id: str, # noqa: A002
16
+ id: str,
17
17
  **kwargs: Any,
18
18
  ) -> None:
19
19
  """Create a RemoveMessage.
@@ -212,7 +212,7 @@ def tool_call(
212
212
  *,
213
213
  name: str,
214
214
  args: dict[str, Any],
215
- id: Optional[str], # noqa: A002
215
+ id: Optional[str],
216
216
  ) -> ToolCall:
217
217
  """Create a tool call.
218
218
 
@@ -260,7 +260,7 @@ def tool_call_chunk(
260
260
  *,
261
261
  name: Optional[str] = None,
262
262
  args: Optional[str] = None,
263
- id: Optional[str] = None, # noqa: A002
263
+ id: Optional[str] = None,
264
264
  index: Optional[int] = None,
265
265
  ) -> ToolCallChunk:
266
266
  """Create a tool call chunk.
@@ -298,7 +298,7 @@ def invalid_tool_call(
298
298
  *,
299
299
  name: Optional[str] = None,
300
300
  args: Optional[str] = None,
301
- id: Optional[str] = None, # noqa: A002
301
+ id: Optional[str] = None,
302
302
  error: Optional[str] = None,
303
303
  ) -> InvalidToolCall:
304
304
  """Create an invalid tool call.
@@ -213,7 +213,7 @@ def _create_message_from_message_type(
213
213
  name: Optional[str] = None,
214
214
  tool_call_id: Optional[str] = None,
215
215
  tool_calls: Optional[list[dict[str, Any]]] = None,
216
- id: Optional[str] = None, # noqa: A002
216
+ id: Optional[str] = None,
217
217
  **additional_kwargs: Any,
218
218
  ) -> BaseMessage:
219
219
  """Create a message from a message type and content string.
@@ -656,22 +656,23 @@ def trim_messages(
656
656
  properties:
657
657
 
658
658
  1. The resulting chat history should be valid. Most chat models expect that chat
659
- history starts with either (1) a `HumanMessage` or (2) a `SystemMessage` followed
660
- by a `HumanMessage`. To achieve this, set `start_on="human"`.
661
- In addition, generally a `ToolMessage` can only appear after an `AIMessage`
659
+ history starts with either (1) a ``HumanMessage`` or (2) a ``SystemMessage`` followed
660
+ by a ``HumanMessage``. To achieve this, set ``start_on="human"``.
661
+ In addition, generally a ``ToolMessage`` can only appear after an ``AIMessage``
662
662
  that involved a tool call.
663
663
  Please see the following link for more information about messages:
664
664
  https://python.langchain.com/docs/concepts/#messages
665
665
  2. It includes recent messages and drops old messages in the chat history.
666
- To achieve this set the `strategy="last"`.
667
- 3. Usually, the new chat history should include the `SystemMessage` if it
668
- was present in the original chat history since the `SystemMessage` includes
669
- special instructions to the chat model. The `SystemMessage` is almost always
666
+ To achieve this set the ``strategy="last"``.
667
+ 3. Usually, the new chat history should include the ``SystemMessage`` if it
668
+ was present in the original chat history since the ``SystemMessage`` includes
669
+ special instructions to the chat model. The ``SystemMessage`` is almost always
670
670
  the first message in the history if present. To achieve this set the
671
- `include_system=True`.
671
+ ``include_system=True``.
672
672
 
673
- **Note** The examples below show how to configure `trim_messages` to achieve
674
- a behavior consistent with the above properties.
673
+ .. note::
674
+ The examples below show how to configure ``trim_messages`` to achieve a behavior
675
+ consistent with the above properties.
675
676
 
676
677
  Args:
677
678
  messages: Sequence of Message-like objects to trim.
@@ -1580,26 +1581,26 @@ def count_tokens_approximately(
1580
1581
  chars_per_token: Number of characters per token to use for the approximation.
1581
1582
  Default is 4 (one token corresponds to ~4 chars for common English text).
1582
1583
  You can also specify float values for more fine-grained control.
1583
- See more here: https://platform.openai.com/tokenizer
1584
+ `See more here. <https://platform.openai.com/tokenizer>`__
1584
1585
  extra_tokens_per_message: Number of extra tokens to add per message.
1585
1586
  Default is 3 (special tokens, including beginning/end of message).
1586
1587
  You can also specify float values for more fine-grained control.
1587
- See more here:
1588
- https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
1588
+ `See more here. <https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb>`__
1589
1589
  count_name: Whether to include message names in the count.
1590
1590
  Enabled by default.
1591
1591
 
1592
1592
  Returns:
1593
1593
  Approximate number of tokens in the messages.
1594
1594
 
1595
- Note:
1596
- This is a simple approximation that may not match the exact token count
1597
- used by specific models. For accurate counts, use model-specific tokenizers.
1595
+ .. note::
1596
+ This is a simple approximation that may not match the exact token count used by
1597
+ specific models. For accurate counts, use model-specific tokenizers.
1598
1598
 
1599
1599
  Warning:
1600
1600
  This function does not currently support counting image tokens.
1601
1601
 
1602
1602
  .. versionadded:: 0.3.46
1603
+
1603
1604
  """
1604
1605
  token_count = 0.0
1605
1606
  for message in convert_to_messages(messages):
@@ -246,6 +246,8 @@ class JsonOutputKeyToolsParser(JsonOutputToolsParser):
246
246
  _ = tool_call.pop("id")
247
247
  else:
248
248
  try:
249
+ # This exists purely for backward compatibility / cached messages
250
+ # All new messages should use `message.tool_calls`
249
251
  raw_tool_calls = copy.deepcopy(message.additional_kwargs["tool_calls"])
250
252
  except KeyError:
251
253
  if self.first_tool_only:
@@ -32,7 +32,7 @@ class BaseTransformOutputParser(BaseOutputParser[T]):
32
32
 
33
33
  def _transform(
34
34
  self,
35
- input: Iterator[Union[str, BaseMessage]], # noqa: A002
35
+ input: Iterator[Union[str, BaseMessage]],
36
36
  ) -> Iterator[T]:
37
37
  for chunk in input:
38
38
  if isinstance(chunk, BaseMessage):
@@ -42,7 +42,7 @@ class BaseTransformOutputParser(BaseOutputParser[T]):
42
42
 
43
43
  async def _atransform(
44
44
  self,
45
- input: AsyncIterator[Union[str, BaseMessage]], # noqa: A002
45
+ input: AsyncIterator[Union[str, BaseMessage]],
46
46
  ) -> AsyncIterator[T]:
47
47
  async for chunk in input:
48
48
  if isinstance(chunk, BaseMessage):
@@ -105,10 +105,11 @@ class _StreamingParser:
105
105
  self.buffer = ""
106
106
  # yield all events
107
107
  try:
108
- for event, elem in self.pull_parser.read_events():
108
+ events = self.pull_parser.read_events()
109
+ for event, elem in events: # type: ignore[misc]
109
110
  if event == "start":
110
111
  # update current path
111
- self.current_path.append(elem.tag)
112
+ self.current_path.append(elem.tag) # type: ignore[union-attr]
112
113
  self.current_path_has_children = False
113
114
  elif event == "end":
114
115
  # remove last element from current path
@@ -116,7 +117,7 @@ class _StreamingParser:
116
117
  self.current_path.pop()
117
118
  # yield element
118
119
  if not self.current_path_has_children:
119
- yield nested_element(self.current_path, elem)
120
+ yield nested_element(self.current_path, elem) # type: ignore[arg-type]
120
121
  # prevent yielding of parent element
121
122
  if self.current_path:
122
123
  self.current_path_has_children = True
@@ -155,9 +155,7 @@ class MessagesPlaceholder(BaseMessagePromptTemplate):
155
155
  """
156
156
  # mypy can't detect the init which is defined in the parent class
157
157
  # b/c these are BaseModel classes.
158
- super().__init__( # type: ignore[call-arg]
159
- variable_name=variable_name, optional=optional, **kwargs
160
- )
158
+ super().__init__(variable_name=variable_name, optional=optional, **kwargs)
161
159
 
162
160
  def format_messages(self, **kwargs: Any) -> list[BaseMessage]:
163
161
  """Format messages from kwargs.