langchain-core 0.3.75__py3-none-any.whl → 0.3.77__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain-core might be problematic. Click here for more details.

Files changed (119) hide show
  1. langchain_core/_api/beta_decorator.py +22 -44
  2. langchain_core/_api/deprecation.py +30 -17
  3. langchain_core/_api/path.py +19 -2
  4. langchain_core/_import_utils.py +7 -0
  5. langchain_core/agents.py +10 -6
  6. langchain_core/beta/runnables/context.py +1 -2
  7. langchain_core/callbacks/base.py +28 -15
  8. langchain_core/callbacks/manager.py +83 -71
  9. langchain_core/callbacks/usage.py +6 -4
  10. langchain_core/chat_history.py +29 -21
  11. langchain_core/document_loaders/base.py +34 -9
  12. langchain_core/document_loaders/langsmith.py +4 -1
  13. langchain_core/documents/base.py +35 -10
  14. langchain_core/documents/transformers.py +4 -2
  15. langchain_core/embeddings/fake.py +8 -5
  16. langchain_core/env.py +2 -3
  17. langchain_core/example_selectors/base.py +12 -0
  18. langchain_core/exceptions.py +7 -0
  19. langchain_core/globals.py +17 -28
  20. langchain_core/indexing/api.py +88 -76
  21. langchain_core/indexing/base.py +5 -8
  22. langchain_core/indexing/in_memory.py +23 -3
  23. langchain_core/language_models/__init__.py +3 -2
  24. langchain_core/language_models/base.py +31 -20
  25. langchain_core/language_models/chat_models.py +98 -27
  26. langchain_core/language_models/fake_chat_models.py +10 -9
  27. langchain_core/language_models/llms.py +52 -18
  28. langchain_core/load/dump.py +2 -3
  29. langchain_core/load/load.py +15 -1
  30. langchain_core/load/serializable.py +39 -44
  31. langchain_core/memory.py +7 -3
  32. langchain_core/messages/ai.py +53 -24
  33. langchain_core/messages/base.py +43 -22
  34. langchain_core/messages/chat.py +4 -1
  35. langchain_core/messages/content_blocks.py +23 -2
  36. langchain_core/messages/function.py +9 -5
  37. langchain_core/messages/human.py +13 -10
  38. langchain_core/messages/modifier.py +1 -0
  39. langchain_core/messages/system.py +11 -8
  40. langchain_core/messages/tool.py +60 -29
  41. langchain_core/messages/utils.py +250 -131
  42. langchain_core/output_parsers/base.py +5 -2
  43. langchain_core/output_parsers/json.py +4 -4
  44. langchain_core/output_parsers/list.py +7 -22
  45. langchain_core/output_parsers/openai_functions.py +3 -0
  46. langchain_core/output_parsers/openai_tools.py +6 -1
  47. langchain_core/output_parsers/pydantic.py +4 -0
  48. langchain_core/output_parsers/string.py +5 -1
  49. langchain_core/output_parsers/xml.py +19 -19
  50. langchain_core/outputs/chat_generation.py +25 -10
  51. langchain_core/outputs/generation.py +14 -3
  52. langchain_core/outputs/llm_result.py +8 -1
  53. langchain_core/prompt_values.py +16 -6
  54. langchain_core/prompts/base.py +4 -9
  55. langchain_core/prompts/chat.py +89 -57
  56. langchain_core/prompts/dict.py +16 -8
  57. langchain_core/prompts/few_shot.py +12 -11
  58. langchain_core/prompts/few_shot_with_templates.py +5 -1
  59. langchain_core/prompts/image.py +12 -5
  60. langchain_core/prompts/message.py +5 -6
  61. langchain_core/prompts/pipeline.py +13 -8
  62. langchain_core/prompts/prompt.py +22 -8
  63. langchain_core/prompts/string.py +18 -10
  64. langchain_core/prompts/structured.py +7 -2
  65. langchain_core/rate_limiters.py +2 -2
  66. langchain_core/retrievers.py +7 -6
  67. langchain_core/runnables/base.py +406 -186
  68. langchain_core/runnables/branch.py +14 -19
  69. langchain_core/runnables/config.py +9 -15
  70. langchain_core/runnables/configurable.py +34 -19
  71. langchain_core/runnables/fallbacks.py +20 -13
  72. langchain_core/runnables/graph.py +48 -38
  73. langchain_core/runnables/graph_ascii.py +41 -18
  74. langchain_core/runnables/graph_mermaid.py +54 -25
  75. langchain_core/runnables/graph_png.py +27 -31
  76. langchain_core/runnables/history.py +55 -58
  77. langchain_core/runnables/passthrough.py +44 -21
  78. langchain_core/runnables/retry.py +44 -23
  79. langchain_core/runnables/router.py +9 -8
  80. langchain_core/runnables/schema.py +2 -0
  81. langchain_core/runnables/utils.py +51 -89
  82. langchain_core/stores.py +19 -31
  83. langchain_core/sys_info.py +9 -8
  84. langchain_core/tools/base.py +37 -28
  85. langchain_core/tools/convert.py +26 -15
  86. langchain_core/tools/simple.py +36 -8
  87. langchain_core/tools/structured.py +25 -12
  88. langchain_core/tracers/base.py +2 -2
  89. langchain_core/tracers/context.py +5 -1
  90. langchain_core/tracers/core.py +109 -39
  91. langchain_core/tracers/evaluation.py +22 -26
  92. langchain_core/tracers/event_stream.py +45 -34
  93. langchain_core/tracers/langchain.py +12 -3
  94. langchain_core/tracers/langchain_v1.py +10 -2
  95. langchain_core/tracers/log_stream.py +56 -17
  96. langchain_core/tracers/root_listeners.py +4 -20
  97. langchain_core/tracers/run_collector.py +6 -16
  98. langchain_core/tracers/schemas.py +5 -1
  99. langchain_core/utils/aiter.py +15 -7
  100. langchain_core/utils/env.py +3 -0
  101. langchain_core/utils/function_calling.py +50 -28
  102. langchain_core/utils/interactive_env.py +6 -2
  103. langchain_core/utils/iter.py +12 -4
  104. langchain_core/utils/json.py +12 -3
  105. langchain_core/utils/json_schema.py +156 -40
  106. langchain_core/utils/loading.py +5 -1
  107. langchain_core/utils/mustache.py +24 -15
  108. langchain_core/utils/pydantic.py +38 -9
  109. langchain_core/utils/utils.py +25 -9
  110. langchain_core/vectorstores/base.py +7 -20
  111. langchain_core/vectorstores/in_memory.py +23 -17
  112. langchain_core/vectorstores/utils.py +18 -12
  113. langchain_core/version.py +1 -1
  114. langchain_core-0.3.77.dist-info/METADATA +67 -0
  115. langchain_core-0.3.77.dist-info/RECORD +174 -0
  116. langchain_core-0.3.75.dist-info/METADATA +0 -106
  117. langchain_core-0.3.75.dist-info/RECORD +0 -174
  118. {langchain_core-0.3.75.dist-info → langchain_core-0.3.77.dist-info}/WHEEL +0 -0
  119. {langchain_core-0.3.75.dist-info → langchain_core-0.3.77.dist-info}/entry_points.txt +0 -0
@@ -71,7 +71,7 @@ class _TracerCore(ABC):
71
71
  for streaming events.
72
72
  - 'original+chat' is a format that is the same as 'original'
73
73
  except it does NOT raise an attribute error on_chat_model_start
74
- kwargs: Additional keyword arguments that will be passed to
74
+ **kwargs: Additional keyword arguments that will be passed to
75
75
  the superclass.
76
76
  """
77
77
  super().__init__(**kwargs)
@@ -82,7 +82,7 @@ class _TracerCore(ABC):
82
82
  """Map of run ID to (trace_id, dotted_order). Cleared when tracer GCed."""
83
83
 
84
84
  @abstractmethod
85
- def _persist_run(self, run: Run) -> Union[None, Coroutine[Any, Any, None]]:
85
+ def _persist_run(self, run: Run) -> Union[Coroutine[Any, Any, None], None]:
86
86
  """Persist a run."""
87
87
 
88
88
  @staticmethod
@@ -108,7 +108,7 @@ class _TracerCore(ABC):
108
108
  except: # noqa: E722
109
109
  return msg
110
110
 
111
- def _start_trace(self, run: Run) -> Union[None, Coroutine[Any, Any, None]]: # type: ignore[return]
111
+ def _start_trace(self, run: Run) -> Union[Coroutine[Any, Any, None], None]: # type: ignore[return]
112
112
  current_dotted_order = run.start_time.strftime("%Y%m%dT%H%M%S%fZ") + str(run.id)
113
113
  if run.parent_run_id:
114
114
  if parent := self.order_map.get(run.parent_run_id):
@@ -531,27 +531,43 @@ class _TracerCore(ABC):
531
531
  return retrieval_run
532
532
 
533
533
  def __deepcopy__(self, memo: dict) -> _TracerCore:
534
- """Deepcopy the tracer."""
534
+ """Return self deepcopied."""
535
535
  return self
536
536
 
537
537
  def __copy__(self) -> _TracerCore:
538
- """Copy the tracer."""
538
+ """Return self copied."""
539
539
  return self
540
540
 
541
- def _end_trace(self, run: Run) -> Union[None, Coroutine[Any, Any, None]]: # noqa: ARG002
542
- """End a trace for a run."""
541
+ def _end_trace(self, run: Run) -> Union[Coroutine[Any, Any, None], None]: # noqa: ARG002
542
+ """End a trace for a run.
543
+
544
+ Args:
545
+ run: The run.
546
+ """
543
547
  return None
544
548
 
545
- def _on_run_create(self, run: Run) -> Union[None, Coroutine[Any, Any, None]]: # noqa: ARG002
546
- """Process a run upon creation."""
549
+ def _on_run_create(self, run: Run) -> Union[Coroutine[Any, Any, None], None]: # noqa: ARG002
550
+ """Process a run upon creation.
551
+
552
+ Args:
553
+ run: The created run.
554
+ """
547
555
  return None
548
556
 
549
- def _on_run_update(self, run: Run) -> Union[None, Coroutine[Any, Any, None]]: # noqa: ARG002
550
- """Process a run upon update."""
557
+ def _on_run_update(self, run: Run) -> Union[Coroutine[Any, Any, None], None]: # noqa: ARG002
558
+ """Process a run upon update.
559
+
560
+ Args:
561
+ run: The updated run.
562
+ """
551
563
  return None
552
564
 
553
- def _on_llm_start(self, run: Run) -> Union[None, Coroutine[Any, Any, None]]: # noqa: ARG002
554
- """Process the LLM Run upon start."""
565
+ def _on_llm_start(self, run: Run) -> Union[Coroutine[Any, Any, None], None]: # noqa: ARG002
566
+ """Process the LLM Run upon start.
567
+
568
+ Args:
569
+ run: The LLM run.
570
+ """
555
571
  return None
556
572
 
557
573
  def _on_llm_new_token(
@@ -559,54 +575,108 @@ class _TracerCore(ABC):
559
575
  run: Run, # noqa: ARG002
560
576
  token: str, # noqa: ARG002
561
577
  chunk: Optional[Union[GenerationChunk, ChatGenerationChunk]], # noqa: ARG002
562
- ) -> Union[None, Coroutine[Any, Any, None]]:
563
- """Process new LLM token."""
578
+ ) -> Union[Coroutine[Any, Any, None], None]:
579
+ """Process new LLM token.
580
+
581
+ Args:
582
+ run: The LLM run.
583
+ token: The new token.
584
+ chunk: Optional chunk.
585
+ """
564
586
  return None
565
587
 
566
- def _on_llm_end(self, run: Run) -> Union[None, Coroutine[Any, Any, None]]: # noqa: ARG002
567
- """Process the LLM Run."""
588
+ def _on_llm_end(self, run: Run) -> Union[Coroutine[Any, Any, None], None]: # noqa: ARG002
589
+ """Process the LLM Run.
590
+
591
+ Args:
592
+ run: The LLM run.
593
+ """
568
594
  return None
569
595
 
570
- def _on_llm_error(self, run: Run) -> Union[None, Coroutine[Any, Any, None]]: # noqa: ARG002
571
- """Process the LLM Run upon error."""
596
+ def _on_llm_error(self, run: Run) -> Union[Coroutine[Any, Any, None], None]: # noqa: ARG002
597
+ """Process the LLM Run upon error.
598
+
599
+ Args:
600
+ run: The LLM run.
601
+ """
572
602
  return None
573
603
 
574
- def _on_chain_start(self, run: Run) -> Union[None, Coroutine[Any, Any, None]]: # noqa: ARG002
575
- """Process the Chain Run upon start."""
604
+ def _on_chain_start(self, run: Run) -> Union[Coroutine[Any, Any, None], None]: # noqa: ARG002
605
+ """Process the Chain Run upon start.
606
+
607
+ Args:
608
+ run: The chain run.
609
+ """
576
610
  return None
577
611
 
578
- def _on_chain_end(self, run: Run) -> Union[None, Coroutine[Any, Any, None]]: # noqa: ARG002
579
- """Process the Chain Run."""
612
+ def _on_chain_end(self, run: Run) -> Union[Coroutine[Any, Any, None], None]: # noqa: ARG002
613
+ """Process the Chain Run.
614
+
615
+ Args:
616
+ run: The chain run.
617
+ """
580
618
  return None
581
619
 
582
- def _on_chain_error(self, run: Run) -> Union[None, Coroutine[Any, Any, None]]: # noqa: ARG002
583
- """Process the Chain Run upon error."""
620
+ def _on_chain_error(self, run: Run) -> Union[Coroutine[Any, Any, None], None]: # noqa: ARG002
621
+ """Process the Chain Run upon error.
622
+
623
+ Args:
624
+ run: The chain run.
625
+ """
584
626
  return None
585
627
 
586
- def _on_tool_start(self, run: Run) -> Union[None, Coroutine[Any, Any, None]]: # noqa: ARG002
587
- """Process the Tool Run upon start."""
628
+ def _on_tool_start(self, run: Run) -> Union[Coroutine[Any, Any, None], None]: # noqa: ARG002
629
+ """Process the Tool Run upon start.
630
+
631
+ Args:
632
+ run: The tool run.
633
+ """
588
634
  return None
589
635
 
590
- def _on_tool_end(self, run: Run) -> Union[None, Coroutine[Any, Any, None]]: # noqa: ARG002
591
- """Process the Tool Run."""
636
+ def _on_tool_end(self, run: Run) -> Union[Coroutine[Any, Any, None], None]: # noqa: ARG002
637
+ """Process the Tool Run.
638
+
639
+ Args:
640
+ run: The tool run.
641
+ """
592
642
  return None
593
643
 
594
- def _on_tool_error(self, run: Run) -> Union[None, Coroutine[Any, Any, None]]: # noqa: ARG002
595
- """Process the Tool Run upon error."""
644
+ def _on_tool_error(self, run: Run) -> Union[Coroutine[Any, Any, None], None]: # noqa: ARG002
645
+ """Process the Tool Run upon error.
646
+
647
+ Args:
648
+ run: The tool run.
649
+ """
596
650
  return None
597
651
 
598
- def _on_chat_model_start(self, run: Run) -> Union[None, Coroutine[Any, Any, None]]: # noqa: ARG002
599
- """Process the Chat Model Run upon start."""
652
+ def _on_chat_model_start(self, run: Run) -> Union[Coroutine[Any, Any, None], None]: # noqa: ARG002
653
+ """Process the Chat Model Run upon start.
654
+
655
+ Args:
656
+ run: The chat model run.
657
+ """
600
658
  return None
601
659
 
602
- def _on_retriever_start(self, run: Run) -> Union[None, Coroutine[Any, Any, None]]: # noqa: ARG002
603
- """Process the Retriever Run upon start."""
660
+ def _on_retriever_start(self, run: Run) -> Union[Coroutine[Any, Any, None], None]: # noqa: ARG002
661
+ """Process the Retriever Run upon start.
662
+
663
+ Args:
664
+ run: The retriever run.
665
+ """
604
666
  return None
605
667
 
606
- def _on_retriever_end(self, run: Run) -> Union[None, Coroutine[Any, Any, None]]: # noqa: ARG002
607
- """Process the Retriever Run."""
668
+ def _on_retriever_end(self, run: Run) -> Union[Coroutine[Any, Any, None], None]: # noqa: ARG002
669
+ """Process the Retriever Run.
670
+
671
+ Args:
672
+ run: The retriever run.
673
+ """
608
674
  return None
609
675
 
610
- def _on_retriever_error(self, run: Run) -> Union[None, Coroutine[Any, Any, None]]: # noqa: ARG002
611
- """Process the Retriever Run upon error."""
676
+ def _on_retriever_error(self, run: Run) -> Union[Coroutine[Any, Any, None], None]: # noqa: ARG002
677
+ """Process the Retriever Run upon error.
678
+
679
+ Args:
680
+ run: The retriever run.
681
+ """
612
682
  return None
@@ -38,24 +38,27 @@ class EvaluatorCallbackHandler(BaseTracer):
38
38
  """Tracer that runs a run evaluator whenever a run is persisted.
39
39
 
40
40
  Attributes:
41
- example_id : Union[UUID, None]
42
- The example ID associated with the runs.
43
41
  client : Client
44
42
  The LangSmith client instance used for evaluating the runs.
45
- evaluators : Sequence[RunEvaluator]
46
- The sequence of run evaluators to be executed.
47
- executor : ThreadPoolExecutor
48
- The thread pool executor used for running the evaluators.
49
- futures : set[Future]
50
- The set of futures representing the running evaluators.
51
- skip_unfinished : bool
52
- Whether to skip runs that are not finished or raised
53
- an error.
54
- project_name : Optional[str]
55
- The LangSmith project name to be organize eval chain runs under.
56
43
  """
57
44
 
58
45
  name: str = "evaluator_callback_handler"
46
+ example_id: Optional[UUID] = None
47
+ """The example ID associated with the runs."""
48
+ client: langsmith.Client
49
+ """The LangSmith client instance used for evaluating the runs."""
50
+ evaluators: Sequence[langsmith.RunEvaluator] = ()
51
+ """The sequence of run evaluators to be executed."""
52
+ executor: Optional[ThreadPoolExecutor] = None
53
+ """The thread pool executor used for running the evaluators."""
54
+ futures: weakref.WeakSet[Future] = weakref.WeakSet()
55
+ """The set of futures representing the running evaluators."""
56
+ skip_unfinished: bool = True
57
+ """Whether to skip runs that are not finished or raised an error."""
58
+ project_name: Optional[str] = None
59
+ """The LangSmith project name to be organize eval chain runs under."""
60
+ logged_eval_results: dict[tuple[str, str], list[EvaluationResult]]
61
+ lock: threading.Lock
59
62
 
60
63
  def __init__(
61
64
  self,
@@ -91,7 +94,7 @@ class EvaluatorCallbackHandler(BaseTracer):
91
94
  self.client = client or langchain_tracer.get_client()
92
95
  self.evaluators = evaluators
93
96
  if max_concurrency is None:
94
- self.executor: Optional[ThreadPoolExecutor] = _get_executor()
97
+ self.executor = _get_executor()
95
98
  elif max_concurrency > 0:
96
99
  self.executor = ThreadPoolExecutor(max_workers=max_concurrency)
97
100
  weakref.finalize(
@@ -100,10 +103,10 @@ class EvaluatorCallbackHandler(BaseTracer):
100
103
  )
101
104
  else:
102
105
  self.executor = None
103
- self.futures: weakref.WeakSet[Future] = weakref.WeakSet()
106
+ self.futures = weakref.WeakSet()
104
107
  self.skip_unfinished = skip_unfinished
105
108
  self.project_name = project_name
106
- self.logged_eval_results: dict[tuple[str, str], list[EvaluationResult]] = {}
109
+ self.logged_eval_results = {}
107
110
  self.lock = threading.Lock()
108
111
  _TRACERS.add(self)
109
112
 
@@ -111,12 +114,8 @@ class EvaluatorCallbackHandler(BaseTracer):
111
114
  """Evaluate the run in the project.
112
115
 
113
116
  Args:
114
- ----------
115
- run : Run
116
- The run to be evaluated.
117
- evaluator : RunEvaluator
118
- The evaluator to use for evaluating the run.
119
-
117
+ run: The run to be evaluated.
118
+ evaluator: The evaluator to use for evaluating the run.
120
119
  """
121
120
  try:
122
121
  if self.project_name is None:
@@ -202,10 +201,7 @@ class EvaluatorCallbackHandler(BaseTracer):
202
201
  """Run the evaluator on the run.
203
202
 
204
203
  Args:
205
- ----------
206
- run : Run
207
- The run to be evaluated.
208
-
204
+ run: The run to be evaluated.
209
205
  """
210
206
  if self.skip_unfinished and not run.outputs:
211
207
  logger.debug("Skipping unfinished run %s", run.id)
@@ -9,21 +9,23 @@ from typing import (
9
9
  TYPE_CHECKING,
10
10
  Any,
11
11
  Optional,
12
+ TypedDict,
12
13
  TypeVar,
13
14
  Union,
14
15
  cast,
15
16
  )
16
17
  from uuid import UUID, uuid4
17
18
 
18
- from typing_extensions import NotRequired, TypedDict, override
19
+ from typing_extensions import NotRequired, override
19
20
 
20
- from langchain_core.callbacks.base import AsyncCallbackHandler
21
+ from langchain_core.callbacks.base import AsyncCallbackHandler, BaseCallbackManager
21
22
  from langchain_core.messages import AIMessageChunk, BaseMessage, BaseMessageChunk
22
23
  from langchain_core.outputs import (
23
24
  ChatGenerationChunk,
24
25
  GenerationChunk,
25
26
  LLMResult,
26
27
  )
28
+ from langchain_core.runnables import ensure_config
27
29
  from langchain_core.runnables.schema import (
28
30
  CustomStreamEvent,
29
31
  EventData,
@@ -36,6 +38,11 @@ from langchain_core.runnables.utils import (
36
38
  _RootEventFilter,
37
39
  )
38
40
  from langchain_core.tracers._streaming import _StreamingCallbackHandler
41
+ from langchain_core.tracers.log_stream import (
42
+ LogStreamCallbackHandler,
43
+ RunLog,
44
+ _astream_log_implementation,
45
+ )
39
46
  from langchain_core.tracers.memory_stream import _MemoryStream
40
47
  from langchain_core.utils.aiter import aclosing, py_anext
41
48
 
@@ -53,22 +60,20 @@ class RunInfo(TypedDict):
53
60
  """Information about a run.
54
61
 
55
62
  This is used to keep track of the metadata associated with a run.
56
-
57
- Parameters:
58
- name: The name of the run.
59
- tags: The tags associated with the run.
60
- metadata: The metadata associated with the run.
61
- run_type: The type of the run.
62
- inputs: The inputs to the run.
63
- parent_run_id: The ID of the parent run.
64
63
  """
65
64
 
66
65
  name: str
66
+ """The name of the run."""
67
67
  tags: list[str]
68
+ """The tags associated with the run."""
68
69
  metadata: dict[str, Any]
70
+ """The metadata associated with the run."""
69
71
  run_type: str
72
+ """The type of the run."""
70
73
  inputs: NotRequired[Any]
74
+ """The inputs to the run."""
71
75
  parent_run_id: Optional[UUID]
76
+ """The ID of the parent run."""
72
77
 
73
78
 
74
79
  def _assign_name(name: Optional[str], serialized: Optional[dict[str, Any]]) -> str:
@@ -155,7 +160,11 @@ class _AstreamEventsCallbackHandler(AsyncCallbackHandler, _StreamingCallbackHand
155
160
  self.send_stream.send_nowait(event)
156
161
 
157
162
  def __aiter__(self) -> AsyncIterator[Any]:
158
- """Iterate over the receive stream."""
163
+ """Iterate over the receive stream.
164
+
165
+ Returns:
166
+ An async iterator over the receive stream.
167
+ """
159
168
  return self.receive_stream.__aiter__()
160
169
 
161
170
  async def tap_output_aiter(
@@ -215,7 +224,7 @@ class _AstreamEventsCallbackHandler(AsyncCallbackHandler, _StreamingCallbackHand
215
224
  yield chunk
216
225
 
217
226
  def tap_output_iter(self, run_id: UUID, output: Iterator[T]) -> Iterator[T]:
218
- """Tap the output aiter.
227
+ """Tap the output iter.
219
228
 
220
229
  Args:
221
230
  run_id: The ID of the run.
@@ -306,7 +315,7 @@ class _AstreamEventsCallbackHandler(AsyncCallbackHandler, _StreamingCallbackHand
306
315
  name: Optional[str] = None,
307
316
  **kwargs: Any,
308
317
  ) -> None:
309
- """Start a trace for an LLM run."""
318
+ """Start a trace for a chat model run."""
310
319
  name_ = _assign_name(name, serialized)
311
320
  run_type = "chat_model"
312
321
 
@@ -348,7 +357,7 @@ class _AstreamEventsCallbackHandler(AsyncCallbackHandler, _StreamingCallbackHand
348
357
  name: Optional[str] = None,
349
358
  **kwargs: Any,
350
359
  ) -> None:
351
- """Start a trace for an LLM run."""
360
+ """Start a trace for a (non-chat model) LLM run."""
352
361
  name_ = _assign_name(name, serialized)
353
362
  run_type = "llm"
354
363
 
@@ -412,7 +421,10 @@ class _AstreamEventsCallbackHandler(AsyncCallbackHandler, _StreamingCallbackHand
412
421
  parent_run_id: Optional[UUID] = None,
413
422
  **kwargs: Any,
414
423
  ) -> None:
415
- """Run on new LLM token. Only available when streaming is enabled."""
424
+ """Run on new output token. Only available when streaming is enabled.
425
+
426
+ For both chat models and non-chat models (legacy LLMs).
427
+ """
416
428
  run_info = self.run_map.get(run_id)
417
429
  chunk_: Union[GenerationChunk, BaseMessageChunk]
418
430
 
@@ -458,9 +470,15 @@ class _AstreamEventsCallbackHandler(AsyncCallbackHandler, _StreamingCallbackHand
458
470
  async def on_llm_end(
459
471
  self, response: LLMResult, *, run_id: UUID, **kwargs: Any
460
472
  ) -> None:
461
- """End a trace for an LLM run."""
473
+ """End a trace for a model run.
474
+
475
+ For both chat models and non-chat models (legacy LLMs).
476
+
477
+ Raises:
478
+ ValueError: If the run type is not ``'llm'`` or ``'chat_model'``.
479
+ """
462
480
  run_info = self.run_map.pop(run_id)
463
- inputs_ = run_info["inputs"]
481
+ inputs_ = run_info.get("inputs")
464
482
 
465
483
  generations: Union[list[list[GenerationChunk]], list[list[ChatGenerationChunk]]]
466
484
  output: Union[dict, BaseMessage] = {}
@@ -636,7 +654,11 @@ class _AstreamEventsCallbackHandler(AsyncCallbackHandler, _StreamingCallbackHand
636
654
 
637
655
  @override
638
656
  async def on_tool_end(self, output: Any, *, run_id: UUID, **kwargs: Any) -> None:
639
- """End a trace for a tool run."""
657
+ """End a trace for a tool run.
658
+
659
+ Raises:
660
+ AssertionError: If the run ID is a tool call and does not have inputs
661
+ """
640
662
  run_info = self.run_map.pop(run_id)
641
663
  if "inputs" not in run_info:
642
664
  msg = (
@@ -718,7 +740,7 @@ class _AstreamEventsCallbackHandler(AsyncCallbackHandler, _StreamingCallbackHand
718
740
  "event": "on_retriever_end",
719
741
  "data": {
720
742
  "output": documents,
721
- "input": run_info["inputs"],
743
+ "input": run_info.get("inputs"),
722
744
  },
723
745
  "run_id": str(run_id),
724
746
  "name": run_info["name"],
@@ -730,11 +752,11 @@ class _AstreamEventsCallbackHandler(AsyncCallbackHandler, _StreamingCallbackHand
730
752
  )
731
753
 
732
754
  def __deepcopy__(self, memo: dict) -> _AstreamEventsCallbackHandler:
733
- """Deepcopy the tracer."""
755
+ """Return self."""
734
756
  return self
735
757
 
736
758
  def __copy__(self) -> _AstreamEventsCallbackHandler:
737
- """Copy the tracer."""
759
+ """Return self."""
738
760
  return self
739
761
 
740
762
 
@@ -751,14 +773,6 @@ async def _astream_events_implementation_v1(
751
773
  exclude_tags: Optional[Sequence[str]] = None,
752
774
  **kwargs: Any,
753
775
  ) -> AsyncIterator[StandardStreamEvent]:
754
- from langchain_core.runnables import ensure_config
755
- from langchain_core.runnables.utils import _RootEventFilter
756
- from langchain_core.tracers.log_stream import (
757
- LogStreamCallbackHandler,
758
- RunLog,
759
- _astream_log_implementation,
760
- )
761
-
762
776
  stream = LogStreamCallbackHandler(
763
777
  auto_close=False,
764
778
  include_names=include_names,
@@ -838,12 +852,12 @@ async def _astream_events_implementation_v1(
838
852
  # Usually they will NOT be available for components that operate
839
853
  # on streams, since those components stream the input and
840
854
  # don't know its final value until the end of the stream.
841
- inputs = log_entry["inputs"]
855
+ inputs = log_entry.get("inputs")
842
856
  if inputs is not None:
843
857
  data["input"] = inputs
844
858
 
845
859
  if event_type == "end":
846
- inputs = log_entry["inputs"]
860
+ inputs = log_entry.get("inputs")
847
861
  if inputs is not None:
848
862
  data["input"] = inputs
849
863
 
@@ -936,9 +950,6 @@ async def _astream_events_implementation_v2(
936
950
  **kwargs: Any,
937
951
  ) -> AsyncIterator[StandardStreamEvent]:
938
952
  """Implementation of the astream events API for V2 runnables."""
939
- from langchain_core.callbacks.base import BaseCallbackManager
940
- from langchain_core.runnables import ensure_config
941
-
942
953
  event_streamer = _AstreamEventsCallbackHandler(
943
954
  include_names=include_names,
944
955
  include_types=include_types,
@@ -8,7 +8,7 @@ from datetime import datetime, timezone
8
8
  from typing import TYPE_CHECKING, Any, Optional, Union
9
9
  from uuid import UUID
10
10
 
11
- from langsmith import Client
11
+ from langsmith import Client, get_tracing_context
12
12
  from langsmith import run_trees as rt
13
13
  from langsmith import utils as ls_utils
14
14
  from tenacity import (
@@ -53,7 +53,11 @@ def wait_for_all_tracers() -> None:
53
53
 
54
54
 
55
55
  def get_client() -> Client:
56
- """Get the client."""
56
+ """Get the client.
57
+
58
+ Returns:
59
+ The LangSmith client.
60
+ """
57
61
  return rt.get_cached_client()
58
62
 
59
63
 
@@ -109,6 +113,8 @@ class LangChainTracer(BaseTracer):
109
113
  super()._start_trace(run)
110
114
  if run.ls_client is None:
111
115
  run.ls_client = self.client
116
+ if get_tracing_context().get("enabled") is False:
117
+ run.extra["__disabled"] = True
112
118
 
113
119
  def on_chat_model_start(
114
120
  self,
@@ -201,6 +207,8 @@ class LangChainTracer(BaseTracer):
201
207
 
202
208
  def _persist_run_single(self, run: Run) -> None:
203
209
  """Persist a run."""
210
+ if run.extra.get("__disabled"):
211
+ return
204
212
  try:
205
213
  run.extra["runtime"] = get_runtime_environment()
206
214
  run.tags = self._get_tags(run)
@@ -214,6 +222,8 @@ class LangChainTracer(BaseTracer):
214
222
 
215
223
  def _update_run_single(self, run: Run) -> None:
216
224
  """Update a run."""
225
+ if run.extra.get("__disabled"):
226
+ return
217
227
  try:
218
228
  run.patch(exclude_inputs=run.extra.get("inputs_is_truthy", False))
219
229
  except Exception as e:
@@ -235,7 +245,6 @@ class LangChainTracer(BaseTracer):
235
245
  chunk: Optional[Union[GenerationChunk, ChatGenerationChunk]] = None,
236
246
  parent_run_id: Optional[UUID] = None,
237
247
  ) -> Run:
238
- """Append token event to LLM run and return the run."""
239
248
  run_id_str = str(run_id)
240
249
  if run_id_str not in self.run_has_token_event_map:
241
250
  self.run_has_token_event_map[run_id_str] = True
@@ -7,7 +7,11 @@ from typing import Any
7
7
 
8
8
 
9
9
  def get_headers(*args: Any, **kwargs: Any) -> Any: # noqa: ARG001
10
- """Throw an error because this has been replaced by get_headers."""
10
+ """Throw an error because this has been replaced by get_headers.
11
+
12
+ Raises:
13
+ RuntimeError: Always, because this function is deprecated.
14
+ """
11
15
  msg = (
12
16
  "get_headers for LangChainTracerV1 is no longer supported. "
13
17
  "Please use LangChainTracer instead."
@@ -16,7 +20,11 @@ def get_headers(*args: Any, **kwargs: Any) -> Any: # noqa: ARG001
16
20
 
17
21
 
18
22
  def LangChainTracerV1(*args: Any, **kwargs: Any) -> Any: # noqa: N802,ARG001
19
- """Throw an error because this has been replaced by LangChainTracer."""
23
+ """Throw an error because this has been replaced by ``LangChainTracer``.
24
+
25
+ Raises:
26
+ RuntimeError: Always, because this class is deprecated.
27
+ """
20
28
  msg = (
21
29
  "LangChainTracerV1 is no longer supported. Please use LangChainTracer instead."
22
30
  )